From 253d0b026dbd55f38787d8e7334261b044b8c703 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 20 Jun 2025 10:34:47 +0100 Subject: [PATCH 0001/1093] cmd/k8s-operator: remove conffile hashing mechanism (#16335) Proxies know how to reload configfile on changes since 1.80, which is going to be the earliest supported proxy version with 1.84 operator, so remove the mechanism that was updating configfile hash to force proxy Pod restarts on config changes. Updates #13032 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/connector_test.go | 24 ++++---- cmd/k8s-operator/ingress_test.go | 16 ++--- cmd/k8s-operator/operator_test.go | 42 ++++++------- cmd/k8s-operator/proxygroup.go | 91 +++-------------------------- cmd/k8s-operator/proxygroup_test.go | 35 +++-------- cmd/k8s-operator/sts.go | 85 +++++---------------------- cmd/k8s-operator/testutils_test.go | 19 ------ 7 files changed, 74 insertions(+), 238 deletions(-) diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index f32fe3282020c..d5829c37fe596 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -80,7 +80,7 @@ func TestConnector(t *testing.T) { app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Connector status should get updated with the IP/hostname info when available. const hostname = "foo.tailnetxyz.ts.net" @@ -106,7 +106,7 @@ func TestConnector(t *testing.T) { opts.subnetRoutes = "10.40.0.0/14,10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Remove a route. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -114,7 +114,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Remove the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -122,7 +122,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Re-add the subnet router. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -132,7 +132,7 @@ func TestConnector(t *testing.T) { }) opts.subnetRoutes = "10.44.0.0/20" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -176,7 +176,7 @@ func TestConnector(t *testing.T) { app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Add an exit node. mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { @@ -184,7 +184,7 @@ func TestConnector(t *testing.T) { }) opts.isExitNode = true expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Delete the Connector. if err = fc.Delete(context.Background(), cn); err != nil { @@ -262,7 +262,7 @@ func TestConnectorWithProxyClass(t *testing.T) { app: kubetypes.AppConnector, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Update Connector to specify a ProxyClass. ProxyClass is not yet // ready, so its configuration is NOT applied to the Connector @@ -271,7 +271,7 @@ func TestConnectorWithProxyClass(t *testing.T) { conn.Spec.ProxyClass = "custom-metadata" }) expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Connector // get reconciled and configuration from the ProxyClass is applied to @@ -286,7 +286,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 4. Connector.spec.proxyClass field is unset, Connector gets // reconciled and configuration from the ProxyClass is removed from the @@ -296,7 +296,7 @@ func TestConnectorWithProxyClass(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, cr, "", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func TestConnectorWithAppConnector(t *testing.T) { @@ -352,7 +352,7 @@ func TestConnectorWithAppConnector(t *testing.T) { isAppConnector: true, } expectEqual(t, fc, expectedSecret(t, fc, opts)) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // Connector's ready condition should be set to true cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index dbd6961d7d7ff..aacf27d8e6600 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -71,7 +71,7 @@ func TestTailscaleIngress(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress status gets updated with ingress proxy's MagicDNS name // once that becomes available. @@ -98,7 +98,7 @@ func TestTailscaleIngress(t *testing.T) { }) opts.shouldEnableForwardingClusterTrafficViaIngress = true expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 4. Resources get cleaned up when Ingress class is unset mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { @@ -162,7 +162,7 @@ func TestTailscaleIngressHostname(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress proxy with capability version >= 110 does not have an HTTPS endpoint set mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { @@ -280,7 +280,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. @@ -288,7 +288,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { mak.Set(&ing.ObjectMeta.Labels, LabelProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 3. ProxyClass is set to Ready by proxy-class reconciler. Ingress get // reconciled and configuration from the ProxyClass is applied to the @@ -303,7 +303,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) // 4. tailscale.com/proxy-class label is removed from the Ingress, the // Ingress gets reconciled and the custom ProxyClass configuration is @@ -313,7 +313,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = "" - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) } func TestTailscaleIngressWithServiceMonitor(t *testing.T) { @@ -608,7 +608,7 @@ func TestEmptyPath(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) expectEvents(t, fr, tt.expectedEvents) }) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 33bf23e844d9a..ff6ba4f952749 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -130,7 +130,7 @@ func TestLoadBalancerClass(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) want.Annotations = nil want.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"} @@ -268,7 +268,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -291,7 +291,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { expectEqual(t, fc, want) expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Change the tailscale-target-fqdn annotation which should update the // StatefulSet @@ -380,7 +380,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -403,7 +403,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { expectEqual(t, fc, want) expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Change the tailscale-target-ip annotation which should update the // StatefulSet @@ -631,7 +631,7 @@ func TestAnnotations(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -737,7 +737,7 @@ func TestAnnotationIntoLB(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, since it would have normally happened at @@ -781,7 +781,7 @@ func TestAnnotationIntoLB(t *testing.T) { expectReconciled(t, sr, "default", "test") // None of the proxy machinery should have changed... expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // ... but the service should have a LoadBalancer status. want = &corev1.Service{ @@ -867,7 +867,7 @@ func TestLBIntoAnnotation(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) // Normally the Tailscale proxy pod would come up here and write its info // into the secret. Simulate that, then verify reconcile again and verify @@ -927,7 +927,7 @@ func TestLBIntoAnnotation(t *testing.T) { expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want = &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1007,7 +1007,7 @@ func TestCustomHostname(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, o)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -1118,7 +1118,7 @@ func TestCustomPriorityClassName(t *testing.T) { app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func TestProxyClassForService(t *testing.T) { @@ -1188,7 +1188,7 @@ func TestProxyClassForService(t *testing.T) { } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. The Service gets updated with tailscale.com/proxy-class label // pointing at the 'custom-metadata' ProxyClass. The ProxyClass is not @@ -1197,7 +1197,7 @@ func TestProxyClassForService(t *testing.T) { mak.Set(&svc.Labels, LabelProxyClass, "custom-metadata") }) expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts)) // 3. ProxyClass is set to Ready, the Service gets reconciled by the @@ -1213,7 +1213,7 @@ func TestProxyClassForService(t *testing.T) { }) opts.proxyClass = pc.Name expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) expectEqual(t, fc, expectedSecret(t, fc, opts), removeAuthKeyIfExistsModifier(t)) // 4. tailscale.com/proxy-class label is removed from the Service, the @@ -1224,7 +1224,7 @@ func TestProxyClassForService(t *testing.T) { }) opts.proxyClass = "" expectReconciled(t, sr, "default", "test") - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func TestDefaultLoadBalancer(t *testing.T) { @@ -1280,7 +1280,7 @@ func TestDefaultLoadBalancer(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func TestProxyFirewallMode(t *testing.T) { @@ -1336,7 +1336,7 @@ func TestProxyFirewallMode(t *testing.T) { clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, } - expectEqual(t, fc, expectedSTS(t, fc, o), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } func Test_isMagicDNSName(t *testing.T) { @@ -1617,7 +1617,7 @@ func Test_authKeyRemoval(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Apply update to the Secret that imitates the proxy setting device_id. s := expectedSecret(t, fc, opts) @@ -1691,7 +1691,7 @@ func Test_externalNameService(t *testing.T) { expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) // 2. Change the ExternalName and verify that changes get propagated. mustUpdate(t, sr, "default", "test", func(s *corev1.Service) { @@ -1699,7 +1699,7 @@ func Test_externalNameService(t *testing.T) { }) expectReconciled(t, sr, "default", "test") opts.clusterTargetDNS = "bar.com" - expectEqual(t, fc, expectedSTS(t, fc, opts), removeHashAnnotation, removeResourceReqs) + expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) } func Test_metricsResourceCreation(t *testing.T) { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index e7c0590b0dbb9..0d5eff551e8de 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -7,7 +7,6 @@ package main import ( "context" - "crypto/sha256" "encoding/json" "errors" "fmt" @@ -237,8 +236,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ensureAddedToGaugeForProxyGroup(pg) r.mu.Unlock() - cfgHash, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass) - if err != nil { + if err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass); err != nil { return fmt.Errorf("error provisioning config Secrets: %w", err) } // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. @@ -306,33 +304,10 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: string(pg.Spec.Type), } ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) - capver, err := r.capVerForPG(ctx, pg, logger) - if err != nil { - return fmt.Errorf("error getting device info: %w", err) - } updateSS := func(s *appsv1.StatefulSet) { - // This is a temporary workaround to ensure that egress ProxyGroup proxies with capver older than 110 - // are restarted when tailscaled configfile contents have changed. - // This workaround ensures that: - // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. - // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. - // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid - // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a - // restarting Pod and the hash is re-added again. - // Note that this workaround is only applied to egress ProxyGroups, because ingress ProxyGroup was added after capver 110. - // Note also that the hash annotation is only set on updates, not creation, because if the StatefulSet is - // being created, there is no need for a restart. - // TODO(irbekrm): remove this in 1.84. - hash := cfgHash - if capver >= 110 { - hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] - } s.Spec = ss.Spec - if hash != "" && pg.Spec.Type == tsapi.ProxyGroupTypeEgress { - mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) - } s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations @@ -449,9 +424,8 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc return nil } -func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (hash string, err error) { +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (err error) { logger := r.logger(pg.Name) - var configSHA256Sum string for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -467,7 +441,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return "", err + return err } var authKey string @@ -479,65 +453,39 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } authKey, err = newAuthKey(ctx, r.tsClient, tags) if err != nil { - return "", err + return err } } configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) if err != nil { - return "", fmt.Errorf("error creating tailscaled config: %w", err) + return fmt.Errorf("error creating tailscaled config: %w", err) } for cap, cfg := range configs { cfgJSON, err := json.Marshal(cfg) if err != nil { - return "", fmt.Errorf("error marshalling tailscaled config: %w", err) + return fmt.Errorf("error marshalling tailscaled config: %w", err) } mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } - // The config sha256 sum is a value for a hash annotation used to trigger - // pod restarts when tailscaled config changes. Any config changes apply - // to all replicas, so it is sufficient to only hash the config for the - // first replica. - // - // In future, we're aiming to eliminate restarts altogether and have - // pods dynamically reload their config when it changes. - if i == 0 { - sum := sha256.New() - for _, cfg := range configs { - // Zero out the auth key so it doesn't affect the sha256 hash when we - // remove it from the config after the pods have all authed. Otherwise - // all the pods will need to restart immediately after authing. - cfg.AuthKey = nil - b, err := json.Marshal(cfg) - if err != nil { - return "", err - } - if _, err := sum.Write(b); err != nil { - return "", err - } - } - - configSHA256Sum = fmt.Sprintf("%x", sum.Sum(nil)) - } - if existingCfgSecret != nil { if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) if err := r.Update(ctx, cfgSecret); err != nil { - return "", err + return err } } } else { logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { - return "", err + return err } } } - return configSHA256Sum, nil + return nil } // ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup @@ -707,24 +655,3 @@ type nodeMetadata struct { tsID tailcfg.StableNodeID dnsName string } - -// capVerForPG returns best effort capability version for the given ProxyGroup. It attempts to find it by looking at the -// Secret + Pod for the replica with ordinal 0. Returns -1 if it is not possible to determine the capability version -// (i.e there is no Pod yet). -func (r *ProxyGroupReconciler) capVerForPG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (tailcfg.CapabilityVersion, error) { - metas, err := r.getNodeMetadata(ctx, pg) - if err != nil { - return -1, fmt.Errorf("error getting node metadata: %w", err) - } - if len(metas) == 0 { - return -1, nil - } - dev, err := deviceInfo(metas[0].stateSecret, metas[0].podUID, logger) - if err != nil { - return -1, fmt.Errorf("error getting device info: %w", err) - } - if dev == nil { - return -1, nil - } - return dev.capver, nil -} diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index f3f87aaacf663..c556ae94a0de4 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -30,7 +30,6 @@ import ( "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" - "tailscale.com/util/mak" ) const testProxyImage = "tailscale/tailscale:test" @@ -40,7 +39,6 @@ var defaultProxyClassAnnotations = map[string]string{ } func TestProxyGroup(t *testing.T) { - const initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ @@ -98,7 +96,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, false, "", pc) + expectProxyGroupResources(t, fc, pg, false, pc) }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -119,11 +117,11 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "", pc) + expectProxyGroupResources(t, fc, pg, true, pc) if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } - expectProxyGroupResources(t, fc, pg, true, "", pc) + expectProxyGroupResources(t, fc, pg, true, pc) keyReq := tailscale.KeyCapabilities{ Devices: tailscale.KeyDeviceCapabilities{ Create: tailscale.KeyDeviceCreateCapabilities{ @@ -155,7 +153,7 @@ func TestProxyGroup(t *testing.T) { } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("scale_up_to_3", func(t *testing.T) { @@ -166,7 +164,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) @@ -176,7 +174,7 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }) expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("scale_down_to_1", func(t *testing.T) { @@ -189,21 +187,7 @@ func TestProxyGroup(t *testing.T) { pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, initialCfgHash, pc) - }) - - t.Run("trigger_config_change_and_observe_new_config_hash", func(t *testing.T) { - pc.Spec.TailscaleConfig = &tsapi.TailscaleConfig{ - AcceptRoutes: true, - } - mustUpdate(t, fc, "", pc.Name, func(p *tsapi.ProxyClass) { - p.Spec = pc.Spec - }) - - expectReconciled(t, reconciler, "", pg.Name) - - expectEqual(t, fc, pg) - expectProxyGroupResources(t, fc, pg, true, "518a86e9fae64f270f8e0ec2a2ea6ca06c10f725035d3d6caca132cd61e42a74", pc) + expectProxyGroupResources(t, fc, pg, true, pc) }) t.Run("enable_metrics", func(t *testing.T) { @@ -608,7 +592,7 @@ func verifyEnvVarNotPresent(t *testing.T, sts *appsv1.StatefulSet, name string) } } -func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, cfgHash string, proxyClass *tsapi.ProxyClass) { +func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup, shouldExist bool, proxyClass *tsapi.ProxyClass) { t.Helper() role := pgRole(pg, tsNamespace) @@ -619,9 +603,6 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox t.Fatal(err) } statefulSet.Annotations = defaultProxyClassAnnotations - if cfgHash != "" { - mak.Set(&statefulSet.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, cfgHash) - } if shouldExist { expectEqual(t, fc, role) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 70b25f2d28784..4c7c3ac6741a2 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -7,7 +7,6 @@ package main import ( "context" - "crypto/sha256" _ "embed" "encoding/json" "errors" @@ -91,8 +90,6 @@ const ( podAnnotationLastSetClusterDNSName = "tailscale.com/operator-last-set-cluster-dns-name" podAnnotationLastSetTailnetTargetIP = "tailscale.com/operator-last-set-ts-tailnet-target-ip" podAnnotationLastSetTailnetTargetFQDN = "tailscale.com/operator-last-set-ts-tailnet-target-fqdn" - // podAnnotationLastSetConfigFileHash is sha256 hash of the current tailscaled configuration contents. - podAnnotationLastSetConfigFileHash = "tailscale.com/operator-last-set-config-file-hash" proxyTypeEgress = "egress_service" proxyTypeIngressService = "ingress_service" @@ -110,7 +107,7 @@ var ( // tailscaleManagedLabels are label keys that tailscale operator sets on StatefulSets and Pods. tailscaleManagedLabels = []string{kubetypes.LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} // tailscaleManagedAnnotations are annotation keys that tailscale operator sets on StatefulSets and Pods. - tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN, podAnnotationLastSetConfigFileHash} + tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN} ) type tailscaleSTSConfig struct { @@ -201,11 +198,11 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretName, tsConfigHash, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretName, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName, tsConfigHash) + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -335,7 +332,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName, hash string, configs tailscaledConfigs, _ error) { +func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName string, configs tailscaledConfigs, _ error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ // Hardcode a -0 suffix so that in future, if we support @@ -351,7 +348,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) orig = secret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return "", "", nil, err + return "", nil, err } var authKey string @@ -361,13 +358,13 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * // ACME account key. sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels) if err != nil { - return "", "", nil, err + return "", nil, err } if sts != nil { // StatefulSet exists, so we have already created the secret. // If the secret is missing, they should delete the StatefulSet. logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName()) - return "", "", nil, nil + return "", nil, nil } // Create API Key secret which is going to be used by the statefulset // to authenticate with Tailscale. @@ -378,25 +375,20 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * } authKey, err = newAuthKey(ctx, a.tsClient, tags) if err != nil { - return "", "", nil, err + return "", nil, err } } configs, err := tailscaledConfig(stsC, authKey, orig) if err != nil { - return "", "", nil, fmt.Errorf("error creating tailscaled config: %w", err) + return "", nil, fmt.Errorf("error creating tailscaled config: %w", err) } - hash, err = tailscaledConfigHash(configs) - if err != nil { - return "", "", nil, fmt.Errorf("error calculating hash of tailscaled configs: %w", err) - } - latest := tailcfg.CapabilityVersion(-1) var latestConfig ipn.ConfigVAlpha for key, val := range configs { fn := tsoperator.TailscaledConfigFileName(key) b, err := json.Marshal(val) if err != nil { - return "", "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + return "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) } mak.Set(&secret.StringData, fn, string(b)) if key > latest { @@ -408,7 +400,7 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * if stsC.ServeConfig != nil { j, err := json.Marshal(stsC.ServeConfig) if err != nil { - return "", "", nil, err + return "", nil, err } mak.Set(&secret.StringData, "serve-config", string(j)) } @@ -416,15 +408,15 @@ func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger * if orig != nil { logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { - return "", "", nil, err + return "", nil, err } } else { logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) if err := a.Create(ctx, secret); err != nil { - return "", "", nil, err + return "", nil, err } } - return secret.Name, hash, configs, nil + return secret.Name, configs, nil } // sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted @@ -535,7 +527,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret, tsConfigHash string) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -662,11 +654,6 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }) } - dev, err := a.DeviceInfo(ctx, sts.ChildResourceLabels, logger) - if err != nil { - return nil, fmt.Errorf("failed to get device info: %w", err) - } - app, err := appInfoForProxy(sts) if err != nil { // No need to error out if now or in future we end up in a @@ -685,25 +672,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S ss = applyProxyClassToStatefulSet(sts.ProxyClass, ss, sts, logger) } updateSS := func(s *appsv1.StatefulSet) { - // This is a temporary workaround to ensure that proxies with capver older than 110 - // are restarted when tailscaled configfile contents have changed. - // This workaround ensures that: - // 1. The hash mechanism is used to trigger pod restarts for proxies below capver 110. - // 2. Proxies above capver are not unnecessarily restarted when the configfile contents change. - // 3. If the hash has alreay been set, but the capver is above 110, the old hash is preserved to avoid - // unnecessary pod restarts that could result in an update loop where capver cannot be determined for a - // restarting Pod and the hash is re-added again. - // Note that the hash annotation is only set on updates not creation, because if the StatefulSet is - // being created, there is no need for a restart. - // TODO(irbekrm): remove this in 1.84. - hash := tsConfigHash - if dev == nil || dev.capver >= 110 { - hash = s.Spec.Template.GetAnnotations()[podAnnotationLastSetConfigFileHash] - } s.Spec = ss.Spec - if hash != "" { - mak.Set(&s.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash, hash) - } s.ObjectMeta.Labels = ss.Labels s.ObjectMeta.Annotations = ss.Annotations } @@ -937,8 +906,7 @@ func readAuthKey(secret *corev1.Secret, key string) (*string, error) { } // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy -// state and auth key and returns tailscaled config files for currently supported proxy versions and a hash of that -// configuration. +// state and auth key and returns tailscaled config files for currently supported proxy versions. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", @@ -1031,27 +999,6 @@ type ptrObject[T any] interface { type tailscaledConfigs map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha -// hashBytes produces a hash for the provided tailscaled config that is the same across -// different invocations of this code. We do not use the -// tailscale.com/deephash.Hash here because that produces a different hash for -// the same value in different tailscale builds. The hash we are producing here -// is used to determine if the container running the Connector Tailscale node -// needs to be restarted. The container does not need restarting when the only -// thing that changed is operator version (the hash is also exposed to users via -// an annotation and might be confusing if it changes without the config having -// changed). -func tailscaledConfigHash(c tailscaledConfigs) (string, error) { - b, err := json.Marshal(c) - if err != nil { - return "", fmt.Errorf("error marshalling tailscaled configs: %w", err) - } - h := sha256.New() - if _, err = h.Write(b); err != nil { - return "", fmt.Errorf("error calculating hash: %w", err) - } - return fmt.Sprintf("%x", h.Sum(nil)), nil -} - // createOrMaybeUpdate adds obj to the k8s cluster, unless the object already exists, // in which case update is called to make changes to it. If update is nil or returns // an error, the object is returned unmodified. diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 619aecc56816e..56542700d951c 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -62,7 +62,6 @@ type configOpts struct { subnetRoutes string isExitNode bool isAppConnector bool - confFileHash string serveConfig *ipn.ServeConfig shouldEnableForwardingClusterTrafficViaIngress bool proxyClass string // configuration from the named ProxyClass should be applied to proxy resources @@ -120,9 +119,6 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef ReadOnly: true, MountPath: "/etc/tsconfig", }} - if opts.confFileHash != "" { - mak.Set(&annots, "tailscale.com/operator-last-set-config-file-hash", opts.confFileHash) - } if opts.firewallMode != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_DEBUG_FIREWALL_MODE", @@ -358,10 +354,6 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps }, }, } - ss.Spec.Template.Annotations = map[string]string{} - if opts.confFileHash != "" { - ss.Spec.Template.Annotations["tailscale.com/operator-last-set-config-file-hash"] = opts.confFileHash - } // If opts.proxyClass is set, retrieve the ProxyClass and apply // configuration from that to the StatefulSet. if opts.proxyClass != "" { @@ -842,17 +834,6 @@ func (c *fakeTSClient) Deleted() []string { return c.deleted } -// removeHashAnnotation can be used to remove declarative tailscaled config hash -// annotation from proxy StatefulSets to make the tests more maintainable (so -// that we don't have to change the annotation in each test case after any -// change to the configfile contents). -func removeHashAnnotation(sts *appsv1.StatefulSet) { - delete(sts.Spec.Template.Annotations, podAnnotationLastSetConfigFileHash) - if len(sts.Spec.Template.Annotations) == 0 { - sts.Spec.Template.Annotations = nil - } -} - func removeResourceReqs(sts *appsv1.StatefulSet) { if sts != nil { sts.Spec.Template.Spec.Resources = nil From 5a52f80c4cb4fc231faec2790a088c8cb856397f Mon Sep 17 00:00:00 2001 From: okunamayanad Date: Tue, 17 Jun 2025 04:50:01 +0300 Subject: [PATCH 0002/1093] docs: fix typo in commit-messages.md Updates: #cleanup Signed-off-by: okunamayanad --- docs/commit-messages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commit-messages.md b/docs/commit-messages.md index b3881eaeb9fbb..79b16e4c6f6f2 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -65,7 +65,7 @@ Notably, for the subject (the first line of description): | `foo/bar:fix memory leak` | BAD: no space after colon | | `foo/bar : fix memory leak` | BAD: space before colon | | `foo/bar: fix memory leak Fixes #123` | BAD: the "Fixes" shouldn't be part of the title | - | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bissect to a clean, working tree | + | `!fixup reviewer feedback` | BAD: we don't check in fixup commits; the history should always bisect to a clean, working tree | For the body (the rest of the description): From 9af42f425ca48ca2e0dee9b3524ea586675069c6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 10:56:15 -0700 Subject: [PATCH 0003/1093] .github/workflows: shard the Windows builder It's one of the slower ones, so split it up into chunks. Updates tailscale/corp#28679 Change-Id: I16a5ba667678bf238c84417a51dda61baefbecf7 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 21 +++++++++++++++++---- cmd/testwrapper/testwrapper.go | 10 ++++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 6d8ab863ce422..722a73f93ce33 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -210,6 +210,17 @@ jobs: windows: runs-on: windows-2022 needs: gomod-cache + name: Windows (${{ matrix.name || matrix.shard}}) + strategy: + fail-fast: false # don't abort the entire matrix if one element fails + matrix: + include: + - key: "win-bench" + name: "benchmarks" + - key: "win-shard-1-2" + shard: "1/2" + - key: "win-shard-2-2" + shard: "2/2" steps: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -237,14 +248,16 @@ jobs: ~\AppData\Local\go-build # The -2- here should be incremented when the scheme of data to be # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-go-2- + ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }} + ${{ github.job }}-${{ matrix.key }}-go-2- - name: test + if: matrix.key != 'win-bench' # skip on bench builder working-directory: src - run: go run ./cmd/testwrapper ./... + run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} - name: bench all + if: matrix.key == 'win-bench' working-directory: src # Don't use -bench=. -benchtime=1x. # Somewhere in the layers (powershell?) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 53c1b1d05f7ca..173edee733f04 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -213,6 +213,16 @@ func main() { return } + // As a special case, if the packages looks like "sharded:1/2" then shell out to + // ./tool/listpkgs to cut up the package list pieces for each sharded builder. + if nOfM, ok := strings.CutPrefix(packages[0], "sharded:"); ok && len(packages) == 1 { + out, err := exec.Command("go", "run", "tailscale.com/tool/listpkgs", "-shard", nOfM, "./...").Output() + if err != nil { + log.Fatalf("failed to list packages for sharded test: %v", err) + } + packages = strings.Split(strings.TrimSpace(string(out)), "\n") + } + ctx := context.Background() type nextRun struct { tests []*packageTests From ca06d944c5622e89ce1ae8e507149af2f858d2a0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 18:35:49 -0700 Subject: [PATCH 0004/1093] .github/workflows: try running Windows jobs on bigger VMs Updates tailscale/corp#28679 Change-Id: Iee3f3820d2d8308fff3494e300ad3939e3ed2598 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 722a73f93ce33..2ebb82a8582c8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -208,7 +208,10 @@ jobs: find $(go env GOCACHE) -type f -mmin +90 -delete windows: - runs-on: windows-2022 + # windows-8vpu is a 2022 GitHub-managed runner in our + # org with 8 cores and 32 GB of RAM: + # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/1 + runs-on: windows-8vcpu needs: gomod-cache name: Windows (${{ matrix.name || matrix.shard}}) strategy: From bb085cfa3e434a5a8da2d27eca6e94c49bebc036 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 19 Jun 2025 20:48:50 -0700 Subject: [PATCH 0005/1093] tool: add go toolchain wrapper for Windows go.cmd lets you run just "./tool/go" on Windows the same as Linux/Darwin. The batch script (go.md) then just invokes PowerShell which is more powerful than batch. I wanted this while debugging Windows CI performance by reproducing slow tests on my local Windows laptop. Updates tailscale/corp#28679 Change-Id: I6e520968da3cef3032091c1c4f4237f663cefcab Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 16 +++++++++- tool/go.cmd | 2 ++ tool/go.ps1 | 64 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 tool/go.cmd create mode 100644 tool/go.ps1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2ebb82a8582c8..2e80b44dcc4d3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -220,6 +220,8 @@ jobs: include: - key: "win-bench" name: "benchmarks" + - key: "win-tool-go" + name: "./tool/go" - key: "win-shard-1-2" shard: "1/2" - key: "win-shard-2-2" @@ -231,12 +233,14 @@ jobs: path: src - name: Install Go + if: matrix.key != 'win-tool-go' uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version-file: src/go.mod cache: false - name: Restore Go module cache + if: matrix.key != 'win-tool-go' uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: gomodcache @@ -244,6 +248,7 @@ jobs: enableCrossOsArchive: true - name: Restore Cache + if: matrix.key != 'win-tool-go' uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: path: | @@ -255,10 +260,17 @@ jobs: restore-keys: | ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ matrix.key }}-go-2- + + - name: test-tool-go + if: matrix.key == 'win-tool-go' + working-directory: src + run: ./tool/go version + - name: test - if: matrix.key != 'win-bench' # skip on bench builder + if: matrix.key != 'win-bench' && matrix.key != 'win-tool-go' # skip on bench builder working-directory: src run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} + - name: bench all if: matrix.key == 'win-bench' working-directory: src @@ -266,7 +278,9 @@ jobs: # Somewhere in the layers (powershell?) # the equals signs cause great confusion. run: go test ./... -bench . -benchtime 1x -run "^$" + - name: Tidy cache + if: matrix.key != 'win-tool-go' working-directory: src shell: bash run: | diff --git a/tool/go.cmd b/tool/go.cmd new file mode 100644 index 0000000000000..51bace110d59b --- /dev/null +++ b/tool/go.cmd @@ -0,0 +1,2 @@ +@echo off +powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go.ps1" %* diff --git a/tool/go.ps1 b/tool/go.ps1 new file mode 100644 index 0000000000000..49313ffbabee9 --- /dev/null +++ b/tool/go.ps1 @@ -0,0 +1,64 @@ +<# + go.ps1 – Tailscale Go toolchain fetching wrapper for Windows/PowerShell + • Reads go.toolchain.rev one dir above this script + • If the requested commit hash isn't cached, downloads and unpacks + https://github.com/tailscale/go/releases/download/build-${REV}/${OS}-${ARCH}.tar.gz + • Finally execs the toolchain's "go" binary, forwarding all args & exit-code +#> + +param( + [Parameter(ValueFromRemainingArguments = $true)] + [string[]] $Args +) + +Set-StrictMode -Version Latest +$ErrorActionPreference = 'Stop' + +if ($env:CI -eq 'true' -and $env:NODEBUG -ne 'true') { + $VerbosePreference = 'Continue' +} + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot '..') +$REV = (Get-Content (Join-Path $repoRoot 'go.toolchain.rev') -Raw).Trim() + +if ([IO.Path]::IsPathRooted($REV)) { + $toolchain = $REV +} else { + if (-not [string]::IsNullOrWhiteSpace($env:TSGO_CACHE_ROOT)) { + $cacheRoot = $env:TSGO_CACHE_ROOT + } else { + $cacheRoot = Join-Path $env:USERPROFILE '.cache\tsgo' + } + + $toolchain = Join-Path $cacheRoot $REV + $marker = "$toolchain.extracted" + + if (-not (Test-Path $marker)) { + Write-Host "# Downloading Go toolchain $REV" -ForegroundColor Cyan + if (Test-Path $toolchain) { Remove-Item -Recurse -Force $toolchain } + + # Removing the marker file again (even though it shouldn't still exist) + # because the equivalent Bash script also does so (to guard against + # concurrent cache fills?). + # TODO(bradfitz): remove this and add some proper locking instead? + if (Test-Path $marker ) { Remove-Item -Force $marker } + + New-Item -ItemType Directory -Path $cacheRoot -Force | Out-Null + + $url = "https://github.com/tailscale/go/releases/download/build-$REV/windows-amd64.tar.gz" + $tgz = "$toolchain.tar.gz" + Invoke-WebRequest -Uri $url -OutFile $tgz -UseBasicParsing -ErrorAction Stop + + New-Item -ItemType Directory -Path $toolchain -Force | Out-Null + tar --strip-components=1 -xzf $tgz -C $toolchain + Remove-Item $tgz + Set-Content -Path $marker -Value $REV + } +} + +$goExe = Join-Path $toolchain 'bin\go.exe' +if (-not (Test-Path $goExe)) { throw "go executable not found at $goExe" } + +& $goExe @Args +exit $LASTEXITCODE + From 12e92b1b085b72e900e001d2bd5c827ed395bd57 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 20 Jun 2025 10:25:42 -0700 Subject: [PATCH 0006/1093] tsconsensus: skipping slow non-applicable tests on Windows for now Updates #16340 Change-Id: I61b0186295c095f99c5be81dc4dced5853025d35 Signed-off-by: Brad Fitzpatrick --- tsconsensus/tsconsensus_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index d1b92f8a489f7..bfb6b3e0688cc 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -17,6 +17,7 @@ import ( "net/netip" "os" "path/filepath" + "runtime" "strings" "sync" "testing" @@ -37,6 +38,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/views" + "tailscale.com/util/cibuild" "tailscale.com/util/racebuild" ) @@ -113,6 +115,9 @@ func (f *fsm) Restore(rc io.ReadCloser) error { } func testConfig(t *testing.T) { + if runtime.GOOS == "windows" && cibuild.On() { + t.Skip("cmd/natc isn't supported on Windows, so skipping tsconsensus tests on CI for now; see https://github.com/tailscale/tailscale/issues/16340") + } // -race AND Parallel makes things start to take too long. if !racebuild.On { t.Parallel() From d3bb34c628b01953c1f064d75d01c0a41e4d41ab Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 20 Jun 2025 15:00:28 -0700 Subject: [PATCH 0007/1093] wgengine/magicsock: generate relay server set from tailnet policy (#16331) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 173 +++++++++++++++++--- wgengine/magicsock/magicsock_test.go | 202 +++++++++++++++++++++++- wgengine/magicsock/relaymanager.go | 29 ++++ wgengine/magicsock/relaymanager_test.go | 6 + 4 files changed, 386 insertions(+), 24 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index bfc7afba95149..0679a4ebd049e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -14,6 +14,7 @@ import ( "expvar" "fmt" "io" + "math" "net" "net/netip" "reflect" @@ -348,17 +349,19 @@ type Conn struct { // magicsock could do with any complexity reduction it can get. netInfoLast *tailcfg.NetInfo - derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled - peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate update - lastFlags debugFlags // at time of last onNodeViewsUpdate - firstAddrForTest netip.Addr // from last onNodeViewsUpdate update; for tests only - privateKey key.NodePrivate // WireGuard private key for this node - everHadKey bool // whether we ever had a non-zero private key - myDerp int // nearest DERP region ID; 0 means none/unknown - homeless bool // if true, don't try to find & stay conneted to a DERP home (myDerp will stay 0) - derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close - activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region - prevDerp map[int]*syncs.WaitGroupChan + derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled + self tailcfg.NodeView // from last onNodeViewsUpdate + peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in onNodeMutationsUpdate are never applied + filt *filter.Filter // from last onFilterUpdate + relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers + lastFlags debugFlags // at time of last onNodeViewsUpdate + privateKey key.NodePrivate // WireGuard private key for this node + everHadKey bool // whether we ever had a non-zero private key + myDerp int // nearest DERP region ID; 0 means none/unknown + homeless bool // if true, don't try to find & stay conneted to a DERP home (myDerp will stay 0) + derpStarted chan struct{} // closed on first connection to DERP; for tests & cleaner Close + activeDerp map[int]activeDerp // DERP regionID -> connection to a node in that region + prevDerp map[int]*syncs.WaitGroupChan // derpRoute contains optional alternate routes to use as an // optimization instead of contacting a peer via their home @@ -516,7 +519,7 @@ func (o *Options) derpActiveFunc() func() { // this type out of magicsock. type NodeViewsUpdate struct { SelfNode tailcfg.NodeView - Peers []tailcfg.NodeView + Peers []tailcfg.NodeView // sorted by Node.ID } // NodeMutationsUpdate represents an update event of one or more @@ -2555,38 +2558,160 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { // TODO(jwhited): implement once capVer is bumped - return false + return version == math.MinInt32 +} + +func capVerIsRelayServerCapable(version tailcfg.CapabilityVersion) bool { + // TODO(jwhited): implement once capVer is bumped + return version == math.MinInt32 } +// onFilterUpdate is called when a [FilterUpdate] is received over the +// [eventbus.Bus]. func (c *Conn) onFilterUpdate(f FilterUpdate) { - // TODO(jwhited): implement + c.mu.Lock() + c.filt = f.Filter + self := c.self + peers := c.peers + relayClientEnabled := c.relayClientEnabled + c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + + if !relayClientEnabled { + // Early return if we cannot operate as a relay client. + return + } + + // The filter has changed, and we are operating as a relay server client. + // Re-evaluate it in order to produce an updated relay server set. + c.updateRelayServersSet(f.Filter, self, peers) +} + +// updateRelayServersSet iterates all peers, evaluating filt for each one in +// order to determine which peers are relay server candidates. filt, self, and +// peers are passed as args (vs c.mu-guarded fields) to enable callers to +// release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' +// in filt for every peer 'n'). +// TODO: Optimize this so that it's not O(m * n). This might involve: +// 1. Changes to [filter.Filter], e.g. adding a CapsWithValues() to check for +// a given capability instead of building and returning a map of all of +// them. +// 2. Moving this work upstream into [nodeBackend] or similar, and publishing +// the computed result over the eventbus instead. +func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { + relayServers := make(set.Set[netip.AddrPort]) + for _, peer := range peers.All() { + peerAPI := peerAPIIfCandidateRelayServer(filt, self, peer) + if peerAPI.IsValid() { + relayServers.Add(peerAPI) + } + } + c.relayManager.handleRelayServersSet(relayServers) +} + +// peerAPIIfCandidateRelayServer returns the peer API address of peer if it +// is considered to be a candidate relay server upon evaluation against filt and +// self, otherwise it returns a zero value. +func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, peer tailcfg.NodeView) netip.AddrPort { + if filt == nil || + !self.Valid() || + !peer.Valid() || + !capVerIsRelayServerCapable(peer.Cap()) || + !peer.Hostinfo().Valid() { + return netip.AddrPort{} + } + for _, peerPrefix := range peer.Addresses().All() { + if !peerPrefix.IsSingleIP() { + continue + } + peerAddr := peerPrefix.Addr() + for _, selfPrefix := range self.Addresses().All() { + if !selfPrefix.IsSingleIP() { + continue + } + selfAddr := selfPrefix.Addr() + if selfAddr.BitLen() == peerAddr.BitLen() { // same address family + if filt.CapsWithValues(peerAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { + for _, s := range peer.Hostinfo().Services().All() { + if peerAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || + peerAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { + return netip.AddrPortFrom(peerAddr, s.Port) + } + } + return netip.AddrPort{} // no peerAPI + } else { + // [nodeBackend.peerCapsLocked] only returns/considers the + // [tailcfg.PeerCapMap] between the passed src and the + // _first_ host (/32 or /128) address for self. We are + // consistent with that behavior here. If self and peer + // host addresses are of the same address family they either + // have the capability or not. We do not check against + // additional host addresses of the same address family. + return netip.AddrPort{} + } + } + } + } + return netip.AddrPort{} } // onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the // [eventbus.Bus]. func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { + peersChanged := c.updateNodes(update) + + relayClientEnabled := update.SelfNode.Valid() && + update.SelfNode.HasCap(tailcfg.NodeAttrRelayClient) && + envknob.UseWIPCode() + + c.mu.Lock() + relayClientChanged := c.relayClientEnabled != relayClientEnabled + c.relayClientEnabled = relayClientEnabled + filt := c.filt + self := c.self + peers := c.peers + c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + + if peersChanged || relayClientChanged { + if !relayClientEnabled { + c.relayManager.handleRelayServersSet(nil) + } else { + c.updateRelayServersSet(filt, self, peers) + } + } +} + +// updateNodes updates [Conn] to reflect the [tailcfg.NodeView]'s contained +// in update. It returns true if update.Peers was unequal to c.peers, otherwise +// false. +func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { c.mu.Lock() defer c.mu.Unlock() if c.closed { - return + return false } priorPeers := c.peers metricNumPeers.Set(int64(len(update.Peers))) - // Update c.netMap regardless, before the following early return. + // Update c.self & c.peers regardless, before the following early return. + c.self = update.SelfNode curPeers := views.SliceOf(update.Peers) c.peers = curPeers + // [debugFlags] are mutable in [Conn.SetSilentDisco] & + // [Conn.SetProbeUDPLifetime]. These setters are passed [controlknobs.Knobs] + // values by [ipnlocal.LocalBackend] around netmap reception. + // [controlknobs.Knobs] are simply self [tailcfg.NodeCapability]'s. They are + // useful as a global view of notable feature toggles, but the magicsock + // setters are completely unnecessary as we have the same values right here + // (update.SelfNode.Capabilities) at a time they are considered most + // up-to-date. + // TODO: mutate [debugFlags] here instead of in various [Conn] setters. flags := c.debugFlagsLocked() - if update.SelfNode.Valid() && update.SelfNode.Addresses().Len() > 0 { - c.firstAddrForTest = update.SelfNode.Addresses().At(0).Addr() - } else { - c.firstAddrForTest = netip.Addr{} - } - if nodesEqual(priorPeers, curPeers) && c.lastFlags == flags { + peersChanged = !nodesEqual(priorPeers, curPeers) + if !peersChanged && c.lastFlags == flags { // The rest of this function is all adjusting state for peers that have // changed. But if the set of peers is equal and the debug flags (for // silent disco and probe UDP lifetime) haven't changed, there is no @@ -2728,6 +2853,8 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { delete(c.discoInfo, dk) } } + + return peersChanged } func devPanicf(format string, a ...any) { @@ -3245,7 +3372,7 @@ func simpleDur(d time.Duration) time.Duration { } // onNodeMutationsUpdate is called when a [NodeMutationsUpdate] is received over -// the [eventbus.Bus]. +// the [eventbus.Bus]. Note: It does not apply these mutations to c.peers. func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 7fa062fa87df2..8aa9a09d2c15a 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -19,6 +19,7 @@ import ( "net/http/httptest" "net/netip" "os" + "reflect" "runtime" "strconv" "strings" @@ -71,6 +72,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgcfg/nmcfg" "tailscale.com/wgengine/wglog" @@ -275,7 +277,10 @@ func (s *magicStack) Status() *ipnstate.Status { func (s *magicStack) IP() netip.Addr { for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) { s.conn.mu.Lock() - addr := s.conn.firstAddrForTest + var addr netip.Addr + if s.conn.self.Valid() && s.conn.self.Addresses().Len() > 0 { + addr = s.conn.self.Addresses().At(0).Addr() + } s.conn.mu.Unlock() if addr.IsValid() { return addr @@ -3378,3 +3383,198 @@ func Test_virtualNetworkID(t *testing.T) { }) } } + +func Test_peerAPIIfCandidateRelayServer(t *testing.T) { + selfOnlyIPv4 := &tailcfg.Node{ + Cap: math.MinInt32, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + } + selfOnlyIPv6 := selfOnlyIPv4.Clone() + selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + + peerHostinfo := &tailcfg.Hostinfo{ + Services: []tailcfg.Service{ + { + Proto: tailcfg.PeerAPI4, + Port: 4, + }, + { + Proto: tailcfg.PeerAPI6, + Port: 6, + }, + }, + } + peerOnlyIPv4 := &tailcfg.Node{ + Cap: math.MinInt32, + CapMap: map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.NodeAttrRelayServer: nil, + }, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("2.2.2.2/32"), + }, + Hostinfo: peerHostinfo.View(), + } + + peerOnlyIPv6 := peerOnlyIPv4.Clone() + peerOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") + + peerOnlyIPv4ZeroCapVer := peerOnlyIPv4.Clone() + peerOnlyIPv4ZeroCapVer.Cap = 0 + + peerOnlyIPv4NilHostinfo := peerOnlyIPv4.Clone() + peerOnlyIPv4NilHostinfo.Hostinfo = tailcfg.HostinfoView{} + + tests := []struct { + name string + filt *filter.Filter + self tailcfg.NodeView + peer tailcfg.NodeView + want netip.AddrPort + }{ + { + name: "match v4", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + peer: peerOnlyIPv4.View(), + want: netip.MustParseAddrPort("2.2.2.2:4"), + }, + { + name: "match v6", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + peer: peerOnlyIPv6.View(), + want: netip.MustParseAddrPort("[::2]:6"), + }, + { + name: "no match dst", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::3/128"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + peer: peerOnlyIPv6.View(), + }, + { + name: "no match peer cap", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityIngress, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + peer: peerOnlyIPv6.View(), + }, + { + name: "cap ver not relay capable", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: peerOnlyIPv4.View(), + peer: peerOnlyIPv4ZeroCapVer.View(), + }, + { + name: "nil filt", + filt: nil, + self: selfOnlyIPv4.View(), + peer: peerOnlyIPv4.View(), + }, + { + name: "nil self", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: tailcfg.NodeView{}, + peer: peerOnlyIPv4.View(), + }, + { + name: "nil peer", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + peer: tailcfg.NodeView{}, + }, + { + name: "nil peer hostinfo", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + peer: peerOnlyIPv4NilHostinfo.View(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.peer); !reflect.DeepEqual(got, tt.want) { + t.Errorf("peerAPIIfCandidateRelayServer() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 2b636dc5758d1..3c8ceb2de8625 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -51,6 +51,7 @@ type relayManager struct { cancelWorkCh chan *endpoint newServerEndpointCh chan newRelayServerEndpointEvent rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent + serversCh chan set.Set[netip.AddrPort] discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -174,7 +175,29 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } + case serversUpdate := <-r.serversCh: + r.handleServersUpdateRunLoop(serversUpdate) + if !r.hasActiveWorkRunLoop() { + return + } + } + } +} + +func (r *relayManager) handleServersUpdateRunLoop(update set.Set[netip.AddrPort]) { + for k, v := range r.serversByAddrPort { + if !update.Contains(k) { + delete(r.serversByAddrPort, k) + delete(r.serversByDisco, v) + } + } + for _, v := range update.Slice() { + _, ok := r.serversByAddrPort[v] + if ok { + // don't zero known disco keys + continue } + r.serversByAddrPort[v] = key.DiscoPublic{} } } @@ -215,6 +238,7 @@ func (r *relayManager) init() { r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) + r.serversCh = make(chan set.Set[netip.AddrPort]) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) @@ -299,6 +323,11 @@ func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } +// handleRelayServersSet handles an update of the complete relay server set. +func (r *relayManager) handleRelayServersSet(servers set.Set[netip.AddrPort]) { + relayManagerInputEvent(r, nil, &r.serversCh, servers) +} + // relayManagerInputEvent initializes [relayManager] if necessary, starts // relayManager.runLoop() if it is not running, and writes 'event' on 'eventCh'. // diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index be0582669c964..6055c2d72b4ef 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -4,10 +4,12 @@ package magicsock import ( + "net/netip" "testing" "tailscale.com/disco" "tailscale.com/types/key" + "tailscale.com/util/set" ) func TestRelayManagerInitAndIdle(t *testing.T) { @@ -26,4 +28,8 @@ func TestRelayManagerInitAndIdle(t *testing.T) { rm = relayManager{} rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleRelayServersSet(make(set.Set[netip.AddrPort])) + <-rm.runLoopStoppedCh } From cd9b9a8cadfd03b9e304ca8a2ff0900d016387fc Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 20 Jun 2025 19:23:52 -0700 Subject: [PATCH 0008/1093] wgengine/magicsock: fix relay endpoint allocation URL (#16344) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 3c8ceb2de8625..81a71b20e22f1 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -737,7 +737,7 @@ func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGr const reqTimeout = time.Second * 10 reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/relay/endpoint", &b) + req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &b) if err != nil { return } From e935a28a196f4ccb212ed44c23b62f4e40a7f243 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 21 Jun 2025 19:09:19 -0700 Subject: [PATCH 0009/1093] wgengine/magicsock: set rxDiscoMsgCh field in relayHandshakeWork (#16349) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 81a71b20e22f1..3e72ff0f08ae3 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -567,11 +567,12 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay // We're ready to start a new handshake. ctx, cancel := context.WithCancel(context.Background()) work := &relayHandshakeWork{ - ep: newServerEndpoint.ep, - se: newServerEndpoint.se, - doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), - ctx: ctx, - cancel: cancel, + ep: newServerEndpoint.ep, + se: newServerEndpoint.se, + rxDiscoMsgCh: make(chan relayHandshakeDiscoMsgEvent), + doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), + ctx: ctx, + cancel: cancel, } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) From 61958f531c5c6a004415b46eb341f2dc289288cd Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 21 Jun 2025 19:09:36 -0700 Subject: [PATCH 0010/1093] wgengine/magicsock: set conn field in relayHandshakeDiscoMsgEvent (#16348) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 +- wgengine/magicsock/magicsock.go | 6 +++--- wgengine/magicsock/relaymanager.go | 4 ++-- wgengine/magicsock/relaymanager_test.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 23316dcb454cf..fb5a28c2832fd 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1601,7 +1601,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd if src.vni.isSet() && src != de.bestAddr.epAddr { // "src" is not our bestAddr, but [relayManager] might be in the // middle of probing it, awaiting pong reception. Make it aware. - de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(m, di, src) + de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(de.c, m, di, src) return false } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0679a4ebd049e..a96eaf3d800b9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2053,7 +2053,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(challenge, di, src) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, challenge, di, src) return } @@ -2075,7 +2075,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return true }) if !knownTxID && src.vni.isSet() { - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2221,7 +2221,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // using it as a bestAddr. [relayManager] might be in the middle of // probing it or attempting to set it as best via // [endpoint.relayEndpointReady()]. Make [relayManager] aware. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(dm, di, src) + c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) return } default: // no VNI diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 3e72ff0f08ae3..e655ec99230a3 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -319,8 +319,8 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV // handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated // disco messages if they are not associated with any known // [*endpoint.bestAddr]. -func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(dm disco.Message, di *discoInfo, src epAddr) { - relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) +func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { + relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{conn: conn, msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } // handleRelayServersSet handles an update of the complete relay server set. diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 6055c2d72b4ef..de282b4990637 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -26,7 +26,7 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsgNotBestAddr(&disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + rm.handleGeneveEncapDiscoMsgNotBestAddr(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} From 0905936c45b6380d65d347e3cb9037f64991b8f4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 21 Jun 2025 21:14:42 -0700 Subject: [PATCH 0011/1093] wgengine/magicsock: set Geneve header protocol for WireGuard (#16350) Otherwise receives interpret as naked WireGuard. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/batching_conn_linux.go | 2 ++ wgengine/magicsock/rebinding_conn.go | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/batching_conn_linux.go b/wgengine/magicsock/batching_conn_linux.go index c9aaff168b5b6..a0607c624445c 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/wgengine/magicsock/batching_conn_linux.go @@ -114,6 +114,7 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetwo vniIsSet := vni.isSet() var gh packet.GeneveHeader if vniIsSet { + gh.Protocol = packet.GeneveProtocolWireGuard gh.VNI = vni.get() } for i, buff := range buffs { @@ -202,6 +203,7 @@ retry: vniIsSet := addr.vni.isSet() var gh packet.GeneveHeader if vniIsSet { + gh.Protocol = packet.GeneveProtocolWireGuard gh.VNI = addr.vni.get() offset -= packet.GeneveFixedHeaderLength } diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 51e97c8ccae2e..8b9ad4bb0bead 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -85,7 +85,8 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) var gh packet.GeneveHeader if vniIsSet { gh = packet.GeneveHeader{ - VNI: addr.vni.get(), + Protocol: packet.GeneveProtocolWireGuard, + VNI: addr.vni.get(), } } for _, buf := range buffs { From b3e74367d84650600b25162510d8beaf8a460240 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 22 Jun 2025 21:15:20 -0700 Subject: [PATCH 0012/1093] tool: rename go.ps1 to go-win.ps1 for cmd.exe+Powershell compat This tweaks the just-added ./tool/go.{cmd,ps1} port of ./tool/go for Windows. Otherwise in Windows Terminal in Powershell, running just ".\tool\go" picks up go.ps1 before go.cmd, which means execution gets denied without the cmd script's -ExecutionPolicy Bypass part letting it work. This makes it work in both cmd.exe and in Powershell. Updates tailscale/corp#28679 Change-Id: Iaf628a9fd6cb95670633b2dbdb635dfb8afaa006 Signed-off-by: Brad Fitzpatrick --- tool/{go.ps1 => go-win.ps1} | 0 tool/go.cmd | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename tool/{go.ps1 => go-win.ps1} (100%) diff --git a/tool/go.ps1 b/tool/go-win.ps1 similarity index 100% rename from tool/go.ps1 rename to tool/go-win.ps1 diff --git a/tool/go.cmd b/tool/go.cmd index 51bace110d59b..04172a28d5b25 100644 --- a/tool/go.cmd +++ b/tool/go.cmd @@ -1,2 +1,2 @@ @echo off -powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go.ps1" %* +powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go-win.ps1" %* From 9309760263e7c7c34522871752cf1da08b82b72a Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Thu, 19 Jun 2025 11:31:47 +0200 Subject: [PATCH 0013/1093] util/prompt: make yes/no prompt reusable Updates #19445 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/serve_v2.go | 3 ++- cmd/tailscale/cli/update.go | 18 ++---------------- cmd/tailscale/depaware.txt | 1 + util/prompt/prompt.go | 24 ++++++++++++++++++++++++ 4 files changed, 29 insertions(+), 17 deletions(-) create mode 100644 util/prompt/prompt.go diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 3e173ce28d8c1..bb51fb7d0e131 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -28,6 +28,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/util/mak" + "tailscale.com/util/prompt" "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -757,7 +758,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u if len(mounts) > 1 { msg := fmt.Sprintf("Are you sure you want to delete %d handlers under port %s?", len(mounts), portStr) - if !e.yes && !promptYesNo(msg) { + if !e.yes && !prompt.YesNo(msg) { return nil } } diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 69d1aa97b43f7..7c0269f6a7687 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -9,10 +9,10 @@ import ( "flag" "fmt" "runtime" - "strings" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/clientupdate" + "tailscale.com/util/prompt" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -87,19 +87,5 @@ func confirmUpdate(ver string) bool { } msg := fmt.Sprintf("This will update Tailscale from %v to %v. Continue?", version.Short(), ver) - return promptYesNo(msg) -} - -// PromptYesNo takes a question and prompts the user to answer the -// question with a yes or no. It appends a [y/n] to the message. -func promptYesNo(msg string) bool { - fmt.Print(msg + " [y/n] ") - var resp string - fmt.Scanln(&resp) - resp = strings.ToLower(resp) - switch resp { - case "y", "yes", "sure": - return true - } - return false + return prompt.YesNo(msg) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 69d054ea42fb6..e44e20e8c92b2 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -172,6 +172,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/multierr from tailscale.com/control/controlhttp+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp+ diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go new file mode 100644 index 0000000000000..4e589ceb32b52 --- /dev/null +++ b/util/prompt/prompt.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package prompt provides a simple way to prompt the user for input. +package prompt + +import ( + "fmt" + "strings" +) + +// YesNo takes a question and prompts the user to answer the +// question with a yes or no. It appends a [y/n] to the message. +func YesNo(msg string) bool { + fmt.Print(msg + " [y/n] ") + var resp string + fmt.Scanln(&resp) + resp = strings.ToLower(resp) + switch resp { + case "y", "yes", "sure": + return true + } + return false +} From 01982552663848378ba6cd6ac27013fe4d65f84b Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Thu, 19 Jun 2025 11:32:54 +0200 Subject: [PATCH 0014/1093] cmd/tailscale: warn user about nllock key removal without resigning Fixes #19445 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/network-lock.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index ae1e90bbfaea9..871a931b54ba5 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -17,12 +17,14 @@ import ( "strings" "time" + "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" "tailscale.com/tsconst" "tailscale.com/types/key" "tailscale.com/types/tkatype" + "tailscale.com/util/prompt" ) var netlockCmd = &ffcli.Command{ @@ -369,6 +371,18 @@ func runNetworkLockRemove(ctx context.Context, args []string) error { } } } + } else { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Printf(`Warning +Removal of a signing key(s) without resigning nodes (--re-sign=false) +will cause any nodes signed by the the given key(s) to be locked out +of the Tailscale network. Proceed with caution. +`) + if !prompt.YesNo("Are you sure you want to remove the signing key(s)?") { + fmt.Printf("aborting removal of signing key(s)\n") + os.Exit(0) + } + } } return localClient.NetworkLockModify(ctx, nil, removeKeys) From 9288efe592d45c2578278c61ac0bddd4db57e901 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 23 Jun 2025 08:53:29 -0700 Subject: [PATCH 0015/1093] wgengine/magicsock: remove premature return in handshakeServerEndpoint (#16351) Any return underneath this select case must belong to a type switch case. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 1 - 1 file changed, 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index e655ec99230a3..4ccfbb501ed94 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -691,7 +691,6 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { // unexpected message type, silently discard continue } - return case <-timer.C: // The handshake timed out. return From a589863d61725bf027bb03a1389c7900dce611b8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 23 Jun 2025 15:50:43 -0700 Subject: [PATCH 0016/1093] feature/relayserver,net/udprelay,wgengine/magicsock: implement retry (#16347) udprelay.Server is lazily initialized when the first request is received over peerAPI. These early requests have a high chance of failure until the first address discovery cycle has completed. Return an ErrServerNotReady error until the first address discovery cycle has completed, and plumb retry handling for this error all the way back to the client in relayManager. relayManager can now retry after a few seconds instead of waiting for the next path discovery cycle, which could take another minute or longer. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 8 +++ net/udprelay/server.go | 37 +++++++++---- wgengine/magicsock/relaymanager.go | 85 ++++++++++++++++++++++-------- 3 files changed, 96 insertions(+), 34 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index a38587aa37b3a..4634f3ac27151 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -8,9 +8,11 @@ package relayserver import ( "encoding/json" "errors" + "fmt" "io" "net/http" "sync" + "time" "tailscale.com/envknob" "tailscale.com/feature" @@ -184,6 +186,12 @@ func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.Respon } ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) if err != nil { + var notReady udprelay.ErrServerNotReady + if errors.As(err, ¬Ready) { + w.Header().Set("Retry-After", fmt.Sprintf("%d", notReady.RetryAfter.Round(time.Second)/time.Second)) + httpErrAndLog(err.Error(), http.StatusServiceUnavailable) + return + } httpErrAndLog(err.Error(), http.StatusInternalServerError) return } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index f7f5868c06f21..8b9e95fb1e728 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -63,13 +63,14 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields - addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints - closed bool - lamportID uint64 - vniPool []uint32 // the pool of available VNIs - byVNI map[uint32]*serverEndpoint - byDisco map[pairOfDiscoPubKeys]*serverEndpoint + mu sync.Mutex // guards the following fields + addrDiscoveryOnce bool // addrDiscovery completed once (successfully or unsuccessfully) + addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints + closed bool + lamportID uint64 + vniPool []uint32 // the pool of available VNIs + byVNI map[uint32]*serverEndpoint + byDisco map[pairOfDiscoPubKeys]*serverEndpoint } // pairOfDiscoPubKeys is a pair of key.DiscoPublic. It must be constructed via @@ -321,8 +322,7 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.wg.Add(1) go s.endpointGCLoop() if len(overrideAddrs) > 0 { - var addrPorts set.Set[netip.AddrPort] - addrPorts.Make() + addrPorts := make(set.Set[netip.AddrPort], len(overrideAddrs)) for _, addr := range overrideAddrs { if addr.IsValid() { addrPorts.Add(netip.AddrPortFrom(addr, boundPort)) @@ -401,12 +401,12 @@ func (s *Server) addrDiscoveryLoop() { } s.mu.Lock() s.addrPorts = addrPorts + s.addrDiscoveryOnce = true s.mu.Unlock() case <-s.closeCh: return } } - } func (s *Server) listenOn(port int) (uint16, error) { @@ -521,10 +521,22 @@ func (s *Server) packetReadLoop() { var ErrServerClosed = errors.New("server closed") +// ErrServerNotReady indicates the server is not ready. Allocation should be +// requested after waiting for at least RetryAfter duration. +type ErrServerNotReady struct { + RetryAfter time.Duration +} + +func (e ErrServerNotReady) Error() string { + return fmt.Sprintf("server not ready, retry after %v", e.RetryAfter) +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns -// [ErrServerClosed] if the server has been closed. +// the following notable errors: +// 1. [ErrServerClosed] if the server has been closed. +// 2. [ErrServerNotReady] if the server is not ready. func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) { s.mu.Lock() defer s.mu.Unlock() @@ -533,6 +545,9 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv } if len(s.addrPorts) == 0 { + if !s.addrDiscoveryOnce { + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: 3 * time.Second} + } return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 4ccfbb501ed94..d149d0c595e70 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -7,9 +7,12 @@ import ( "bytes" "context" "encoding/json" + "errors" + "fmt" "io" "net/http" "net/netip" + "strconv" "sync" "time" @@ -716,46 +719,82 @@ func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { }() } -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { - // TODO(jwhited): introduce client metrics counters for notable failures - defer wg.Done() - var b bytes.Buffer - remoteDisco := ep.disco.Load() - if remoteDisco == nil { - return - } +type errNotReady struct{ retryAfter time.Duration } + +func (e errNotReady) Error() string { + return fmt.Sprintf("server not ready, retry after %v", e.retryAfter) +} + +const reqTimeout = time.Second * 10 + +func doAllocate(ctx context.Context, server netip.AddrPort, discoKeys [2]key.DiscoPublic) (udprelay.ServerEndpoint, error) { + var reqBody bytes.Buffer type allocateRelayEndpointReq struct { DiscoKeys []key.DiscoPublic } a := &allocateRelayEndpointReq{ - DiscoKeys: []key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}, + DiscoKeys: []key.DiscoPublic{discoKeys[0], discoKeys[1]}, } - err := json.NewEncoder(&b).Encode(a) + err := json.NewEncoder(&reqBody).Encode(a) if err != nil { - return + return udprelay.ServerEndpoint{}, err } - const reqTimeout = time.Second * 10 reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &b) + req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &reqBody) if err != nil { - return + return udprelay.ServerEndpoint{}, err } resp, err := http.DefaultClient.Do(req) if err != nil { - return + return udprelay.ServerEndpoint{}, err } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { + switch resp.StatusCode { + case http.StatusOK: + var se udprelay.ServerEndpoint + err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) + return se, err + case http.StatusServiceUnavailable: + raHeader := resp.Header.Get("Retry-After") + raSeconds, err := strconv.ParseUint(raHeader, 10, 32) + if err == nil { + return udprelay.ServerEndpoint{}, errNotReady{retryAfter: time.Second * time.Duration(raSeconds)} + } + fallthrough + default: + return udprelay.ServerEndpoint{}, fmt.Errorf("non-200 status: %d", resp.StatusCode) + } +} + +func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { + // TODO(jwhited): introduce client metrics counters for notable failures + defer wg.Done() + remoteDisco := ep.disco.Load() + if remoteDisco == nil { return } - var se udprelay.ServerEndpoint - err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) - if err != nil { + firstTry := true + for { + se, err := doAllocate(ctx, server, [2]key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}) + if err == nil { + relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ + ep: ep, + se: se, + }) + return + } + ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, ep.discoShort(), err) + var notReady errNotReady + if firstTry && errors.As(err, ¬Ready) { + select { + case <-ctx.Done(): + return + case <-time.After(min(notReady.retryAfter, reqTimeout)): + firstTry = false + continue + } + } return } - relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, - se: se, - }) } From 31eebdb0f8b42d40f0360a835e25d4d35c1cf420 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 23 Jun 2025 16:13:58 -0700 Subject: [PATCH 0017/1093] wgengine/magicsock: send CallMeMaybeVia for relay endpoints (#16360) If we acted as the allocator we are responsible for signaling it to the remote peer in a CallMeMaybeVia message over DERP. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 38 ++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index d149d0c595e70..f22e281e6944b 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -30,8 +30,9 @@ import ( // // [relayManager] methods can be called by [Conn] and [endpoint] while their .mu // mutexes are held. Therefore, in order to avoid deadlocks, [relayManager] must -// never attempt to acquire those mutexes, including synchronous calls back -// towards [Conn] or [endpoint] methods that acquire them. +// never attempt to acquire those mutexes synchronously from its runLoop(), +// including synchronous calls back towards [Conn] or [endpoint] methods that +// acquire them. type relayManager struct { initOnce sync.Once @@ -584,9 +585,37 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work + if newServerEndpoint.server.IsValid() { + // Send CallMeMaybeVia to the remote peer if we allocated this endpoint. + go r.sendCallMeMaybeVia(work.ep, work.se) + } + go r.handshakeServerEndpoint(work) } +// sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be +// called as part of a goroutine independent from runLoop(), for 2 reasons: +// 1. it acquires ep.mu (refer to [relayManager] docs for reasoning) +// 2. it makes a networking syscall, which can introduce unwanted backpressure +func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoint) { + ep.mu.Lock() + derpAddr := ep.derpAddr + ep.mu.Unlock() + epDisco := ep.disco.Load() + if epDisco == nil || !derpAddr.IsValid() { + return + } + callMeMaybeVia := &disco.CallMeMaybeVia{ + ServerDisco: se.ServerDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + } + ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) +} + func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -779,8 +808,9 @@ func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGr se, err := doAllocate(ctx, server, [2]key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}) if err == nil { relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, - se: se, + ep: ep, + se: se, + server: server, // we allocated this endpoint (vs CallMeMaybeVia reception), mark it as such }) return } From 4a1fc378d1a8fa4d7f5beef318830d8354f76d1c Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 23 Jun 2025 17:55:23 -0500 Subject: [PATCH 0018/1093] release/dist: switch back to Ubuntu 20.04 for building QNAP packages After the switch to 24.04, unsigned packages did not build correctly (came out as only a few KBs). Fixes tailscale/tailscale-qpkg#148 Signed-off-by: Percy Wegmann --- release/dist/qnap/files/scripts/Dockerfile.qpkg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index 1f4c2406d7642..542eb95e1a7cc 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -1,4 +1,4 @@ -FROM ubuntu:24.04 +FROM ubuntu:20.04 RUN apt-get update -y && \ apt-get install -y --no-install-recommends \ From 9e28bfc69c0127a21fbce6beeaee2d763fe78d2a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 24 Jun 2025 13:39:29 -0500 Subject: [PATCH 0019/1093] ipn/ipnlocal,wgengine/magicsock: wait for magicsock to process pending events on authReconfig Updates #16369 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 5 ++++ ipn/ipnlocal/local_test.go | 6 ++++ ipn/ipnlocal/state_test.go | 51 ++++++++++++++++++++++++++++++++- wgengine/magicsock/magicsock.go | 34 ++++++++++++++++++++++ 4 files changed, 95 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 908418d4aad2c..5467088f7c91a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4853,6 +4853,11 @@ func (b *LocalBackend) readvertiseAppConnectorRoutes() { // updates are not currently blocked, based on the cached netmap and // user prefs. func (b *LocalBackend) authReconfig() { + // Wait for magicsock to process pending [eventbus] events, + // such as netmap updates. This should be completed before + // wireguard-go is reconfigured. See tailscale/tailscale#16369. + b.MagicConn().Synchronize() + b.mu.Lock() blocked := b.blocked prefs := b.pm.CurrentPrefs() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6e24f43006bd4..6e62786883d1a 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -85,6 +85,12 @@ func makeNodeKeyFromID(nodeID tailcfg.NodeID) key.NodePublic { return key.NodePublicFromRaw32(memro.B(raw)) } +func makeDiscoKeyFromID(nodeID tailcfg.NodeID) (ret key.DiscoPublic) { + raw := make([]byte, 32) + binary.BigEndian.PutUint64(raw[24:], uint64(nodeID)) + return key.DiscoPublicFromRaw32(memro.B(raw)) +} + func TestShrinkDefaultRoute(t *testing.T) { tests := []struct { route string diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 5d9e8b169f0a5..2921de2032913 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1114,6 +1114,8 @@ func TestEngineReconfigOnStateChange(t *testing.T) { disconnect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: false}, WantRunningSet: true} node1 := testNetmapForNode(1, "node-1", []netip.Prefix{netip.MustParsePrefix("100.64.1.1/32")}) node2 := testNetmapForNode(2, "node-2", []netip.Prefix{netip.MustParsePrefix("100.64.1.2/32")}) + node3 := testNetmapForNode(3, "node-3", []netip.Prefix{netip.MustParsePrefix("100.64.1.3/32")}) + node3.Peers = []tailcfg.NodeView{node1.SelfNode, node2.SelfNode} routesWithQuad100 := func(extra ...netip.Prefix) []netip.Prefix { return append(extra, netip.MustParsePrefix("100.100.100.100/32")) } @@ -1308,6 +1310,40 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Hosts: hostsFor(node1), }, }, + { + name: "Start/Connect/Login/WithPeers", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node3) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node3.SelfNode.StableID(), + Peers: []wgcfg.Peer{ + { + PublicKey: node1.SelfNode.Key(), + DiscoKey: node1.SelfNode.DiscoKey(), + }, + { + PublicKey: node2.SelfNode.Key(), + DiscoKey: node2.SelfNode.DiscoKey(), + }, + }, + Addresses: node3.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node3.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node3), + }, + }, } for _, tt := range tests { @@ -1322,8 +1358,18 @@ func TestEngineReconfigOnStateChange(t *testing.T) { t.Errorf("State: got %v; want %v", gotState, tt.wantState) } + if engine.Config() != nil { + for _, p := range engine.Config().Peers { + pKey := p.PublicKey.UntypedHexString() + _, err := lb.MagicConn().ParseEndpoint(pKey) + if err != nil { + t.Errorf("ParseEndpoint(%q) failed: %v", pKey, err) + } + } + } + opts := []cmp.Option{ - cmpopts.EquateComparable(key.NodePublic{}, netip.Addr{}, netip.Prefix{}), + cmpopts.EquateComparable(key.NodePublic{}, key.DiscoPublic{}, netip.Addr{}, netip.Prefix{}), } if diff := cmp.Diff(tt.wantCfg, engine.Config(), opts...); diff != "" { t.Errorf("wgcfg.Config(+got -want): %v", diff) @@ -1356,6 +1402,8 @@ func testNetmapForNode(userID tailcfg.UserID, name string, addresses []netip.Pre Addresses: addresses, MachineAuthorized: true, } + self.Key = makeNodeKeyFromID(self.ID) + self.DiscoKey = makeDiscoKeyFromID(self.ID) return &netmap.NetworkMap{ SelfNode: self.View(), Name: self.Name, @@ -1403,6 +1451,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( magicConn, err := magicsock.NewConn(magicsock.Options{ Logf: logf, + EventBus: sys.Bus.Get(), NetMon: dialer.NetMon(), Metrics: sys.UserMetricsRegistry(), HealthTracker: sys.HealthTracker(), diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a96eaf3d800b9..d7b52269984da 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -167,6 +167,8 @@ type Conn struct { filterSub *eventbus.Subscriber[FilterUpdate] nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] + syncSub *eventbus.Subscriber[syncPoint] + syncPub *eventbus.Publisher[syncPoint] subsDoneCh chan struct{} // closed when consumeEventbusTopics returns // pconn4 and pconn6 are the underlying UDP sockets used to @@ -538,6 +540,21 @@ type FilterUpdate struct { *filter.Filter } +// syncPoint is an event published over an [eventbus.Bus] by [Conn.Synchronize]. +// It serves as a synchronization point, allowing to wait until magicsock +// has processed all pending events. +type syncPoint chan struct{} + +// Wait blocks until [syncPoint.Signal] is called. +func (s syncPoint) Wait() { + <-s +} + +// Signal signals the sync point, unblocking the [syncPoint.Wait] call. +func (s syncPoint) Signal() { + close(s) +} + // newConn is the error-free, network-listening-side-effect-free based // of NewConn. Mostly for tests. func newConn(logf logger.Logf) *Conn { @@ -593,10 +610,25 @@ func (c *Conn) consumeEventbusTopics() { c.onNodeViewsUpdate(nodeViews) case nodeMuts := <-c.nodeMutsSub.Events(): c.onNodeMutationsUpdate(nodeMuts) + case syncPoint := <-c.syncSub.Events(): + c.dlogf("magicsock: received sync point after reconfig") + syncPoint.Signal() } } } +// Synchronize waits for all [eventbus] events published +// prior to this call to be processed by the receiver. +func (c *Conn) Synchronize() { + if c.syncPub == nil { + // Eventbus is not used; no need to synchronize (in certain tests). + return + } + sp := syncPoint(make(chan struct{})) + c.syncPub.Publish(sp) + sp.Wait() +} + // NewConn creates a magic Conn listening on opts.Port. // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. @@ -624,6 +656,8 @@ func NewConn(opts Options) (*Conn, error) { c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) + c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) + c.syncPub = eventbus.Publish[syncPoint](c.eventClient) c.subsDoneCh = make(chan struct{}) go c.consumeEventbusTopics() } From 83cd446b5d2cd136e87023187949bbd45710be7a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 24 Jun 2025 16:56:28 -0500 Subject: [PATCH 0020/1093] release/dist/qnap: upgrade to Ubuntu 24.04 Docker image 20.04 is no longer supported. This pulls in changes to the QDK package that were required to make build succeed on 24.04. Updates https://github.com/tailscale/corp/issues/29849 Signed-off-by: Percy Wegmann --- release/dist/qnap/files/scripts/Dockerfile.qpkg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index 542eb95e1a7cc..dbcaac11668f0 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:24.04 RUN apt-get update -y && \ apt-get install -y --no-install-recommends \ @@ -10,7 +10,7 @@ RUN apt-get update -y && \ patch # Install QNAP QDK (force a specific version to pick up updates) -RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 9a31a67387c583d19a81a378dcf7c25e2abe231d +RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 6aba74f6b4c8ea0c30b8aec9f3476f428f6a58a1 RUN cd /QDK && ./InstallToUbuntu.sh install ENV PATH="/usr/share/QDK/bin:${PATH}" From f2f1236ad4174ca46402f26139cca71dd1c94c2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 25 Jun 2025 09:00:34 -0400 Subject: [PATCH 0021/1093] util/eventbus: add test helpers to simplify testing events (#16294) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of every module having to come up with a set of test methods for the event bus, this handful of test helpers hides a lot of the needed setup for the testing of the event bus. The tests in portmapper is also ported over to the new helpers. Updates #15160 Signed-off-by: Claus Lensbøl --- net/portmapper/portmapper.go | 2 +- net/portmapper/portmapper_test.go | 17 +- util/eventbus/doc.go | 10 + util/eventbus/eventbustest/doc.go | 45 +++ util/eventbus/eventbustest/eventbustest.go | 203 ++++++++++ .../eventbustest/eventbustest_test.go | 366 ++++++++++++++++++ util/eventbus/eventbustest/examples_test.go | 201 ++++++++++ 7 files changed, 831 insertions(+), 13 deletions(-) create mode 100644 util/eventbus/eventbustest/doc.go create mode 100644 util/eventbus/eventbustest/eventbustest.go create mode 100644 util/eventbus/eventbustest/eventbustest_test.go create mode 100644 util/eventbus/eventbustest/examples_test.go diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 59f88e96604a5..1c6c7634bf34a 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -515,7 +515,7 @@ func (c *Client) createMapping() { GoodUntil: mapping.GoodUntil(), }) } - if c.onChange != nil { + if c.onChange != nil && c.pubClient == nil { go c.onChange() } } diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index 515a0c28c993f..e66d3c159eccb 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -12,7 +12,7 @@ import ( "time" "tailscale.com/control/controlknobs" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func TestCreateOrGetMapping(t *testing.T) { @@ -142,22 +142,15 @@ func TestUpdateEvent(t *testing.T) { t.Fatalf("Create test gateway: %v", err) } - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) - sub := eventbus.Subscribe[Mapping](bus.Client("TestUpdateEvent")) c := newTestClient(t, igd, bus) if _, err := c.Probe(t.Context()); err != nil { t.Fatalf("Probe failed: %v", err) } c.GetCachedMappingOrStartCreatingOne() - - select { - case evt := <-sub.Events(): - t.Logf("Received portmap update: %+v", evt) - case <-sub.Done(): - t.Error("Subscriber closed prematurely") - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for an update event") + if err := eventbustest.Expect(tw, eventbustest.Type[Mapping]()); err != nil { + t.Error(err.Error()) } } diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index 964a686eae109..f95f9398c8de9 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -89,4 +89,14 @@ // The [Debugger], obtained through [Bus.Debugger], provides // introspection facilities to monitor events flowing through the bus, // and inspect publisher and subscriber state. +// +// Additionally, a debug command exists for monitoring the eventbus: +// +// tailscale debug daemon-bus-events +// +// # Testing facilities +// +// Helpers for testing code with the eventbus can be found in: +// +// eventbus/eventbustest package eventbus diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go new file mode 100644 index 0000000000000..9e39504a83521 --- /dev/null +++ b/util/eventbus/eventbustest/doc.go @@ -0,0 +1,45 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package eventbustest provides helper methods for testing an [eventbus.Bus]. +// +// # Usage +// +// A [Watcher] presents a set of generic helpers for testing events. +// +// To test code that generates events, create a [Watcher] from the [eventbus.Bus] +// used by the code under test, run the code to generate events, then use the watcher +// to verify that the expected events were produced. In outline: +// +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatEmitsSomeEvent() +// if err := eventbustest.Expect(tw, eventbustest.Type[EventFoo]()); err != nil { +// t.Error(err.Error()) +// } +// +// As shown, [Expect] checks that at least one event of the given type occurs +// in the stream generated by the code under test. +// +// The following functions all take an any parameter representing a function. +// This function will take an argument of the expected type and is used to test +// for the events on the eventbus being of the given type. The function can +// take the shape described in [Expect]. +// +// [Type] is a helper for only testing event type. +// +// To check for specific properties of an event, use [Expect], and pass a function +// as the second argument that tests for those properties. +// +// To test for multiple events, use [Expect], which checks that the stream +// contains the given events in the given order, possibly with other events +// interspersed. +// +// To test the complete contents of the stream, use [ExpectExactly], which +// checks that the stream contains exactly the given events in the given order, +// and no others. +// +// See the [usage examples]. +// +// [usage examples]: https://github.com/tailscale/tailscale/blob/main/util/eventbus/eventbustest/examples_test.go +package eventbustest diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go new file mode 100644 index 0000000000000..75d430d53683e --- /dev/null +++ b/util/eventbus/eventbustest/eventbustest.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "tailscale.com/util/eventbus" +) + +// NewBus constructs an [eventbus.Bus] that will be shut automatically when +// its controlling test ends. +func NewBus(t *testing.T) *eventbus.Bus { + bus := eventbus.New() + t.Cleanup(bus.Close) + return bus +} + +// NewTestWatcher constructs a [Watcher] that can be used to check the stream of +// events generated by code under test. After construction the caller may use +// [Expect] and [ExpectExactly], to verify that the desired events were captured. +func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { + tw := &Watcher{ + mon: bus.Debugger().WatchBus(), + TimeOut: 5 * time.Second, + chDone: make(chan bool, 1), + events: make(chan any, 100), + } + if deadline, ok := t.Deadline(); ok { + tw.TimeOut = deadline.Sub(time.Now()) + } + t.Cleanup(tw.done) + go tw.watch() + return tw +} + +// Watcher monitors and holds events for test expectations. +type Watcher struct { + mon *eventbus.Subscriber[eventbus.RoutedEvent] + events chan any + chDone chan bool + // TimeOut defines when the Expect* functions should stop looking for events + // coming from the Watcher. The value is set by [NewWatcher] and defaults to + // the deadline passed in by [testing.T]. If looking to verify the absence + // of an event, the TimeOut can be set to a lower value after creating the + // Watcher. + TimeOut time.Duration +} + +// Type is a helper representing the expectation to see an event of type T, without +// caring about the content of the event. +// It makes it possible to use helpers like: +// +// eventbustest.ExpectFilter(tw, eventbustest.Type[EventFoo]()) +func Type[T any]() func(T) { return func(T) {} } + +// Expect verifies that the given events are a subsequence of the events +// observed by tw. That is, tw must contain at least one event matching the type +// of each argument in the given order, other event types are allowed to occur in +// between without error. The given events are represented by a function +// that must have one of the following forms: +// +// // Tests for the event type only +// func(e ExpectedType) +// +// // Tests for event type and whatever is defined in the body. +// // If return is false, the test will look for other events of that type +// // If return is true, the test will look for the next given event +// // if a list is given +// func(e ExpectedType) bool +// +// // Tests for event type and whatever is defined in the body. +// // The boolean return works as above. +// // The if error != nil, the test helper will return that error immediately. +// func(e ExpectedType) (bool, error) +// +// If the list of events must match exactly with no extra events, +// use [ExpectExactly]. +func Expect(tw *Watcher, filters ...any) error { + if len(filters) == 0 { + return errors.New("no event filters were provided") + } + eventCount := 0 + head := 0 + for head < len(filters) { + eventFunc := eventFilter(filters[head]) + select { + case event := <-tw.events: + eventCount++ + if ok, err := eventFunc(event); err != nil { + return err + } else if ok { + head++ + } + case <-time.After(tw.TimeOut): + return fmt.Errorf( + "timed out waiting for event, saw %d events, %d was expected", + eventCount, head) + case <-tw.chDone: + return errors.New("watcher closed while waiting for events") + } + } + return nil +} + +// ExpectExactly checks for some number of events showing up on the event bus +// in a given order, returning an error if the events does not match the given list +// exactly. The given events are represented by a function as described in +// [Expect]. Use [Expect] if other events are allowed. +func ExpectExactly(tw *Watcher, filters ...any) error { + if len(filters) == 0 { + return errors.New("no event filters were provided") + } + eventCount := 0 + for pos, next := range filters { + eventFunc := eventFilter(next) + fnType := reflect.TypeOf(next) + argType := fnType.In(0) + select { + case event := <-tw.events: + eventCount++ + typeEvent := reflect.TypeOf(event) + if typeEvent != argType { + return fmt.Errorf( + "expected event type %s, saw %s, at index %d", + argType, typeEvent, pos) + } else if ok, err := eventFunc(event); err != nil { + return err + } else if !ok { + return fmt.Errorf( + "expected test ok for type %s, at index %d", argType, pos) + } + case <-time.After(tw.TimeOut): + return fmt.Errorf( + "timed out waiting for event, saw %d events, %d was expected", + eventCount, pos) + case <-tw.chDone: + return errors.New("watcher closed while waiting for events") + } + } + return nil +} + +func (tw *Watcher) watch() { + for { + select { + case event := <-tw.mon.Events(): + tw.events <- event.Event + case <-tw.chDone: + tw.mon.Close() + return + } + } +} + +// done tells the watcher to stop monitoring for new events. +func (tw *Watcher) done() { + close(tw.chDone) +} + +type filter = func(any) (bool, error) + +func eventFilter(f any) filter { + ft := reflect.TypeOf(f) + if ft.Kind() != reflect.Func { + panic("filter is not a function") + } else if ft.NumIn() != 1 { + panic(fmt.Sprintf("function takes %d arguments, want 1", ft.NumIn())) + } + var fixup func([]reflect.Value) []reflect.Value + switch ft.NumOut() { + case 0: + fixup = func([]reflect.Value) []reflect.Value { + return []reflect.Value{reflect.ValueOf(true), reflect.Zero(reflect.TypeFor[error]())} + } + case 1: + if ft.Out(0) != reflect.TypeFor[bool]() { + panic(fmt.Sprintf("result is %T, want bool", ft.Out(0))) + } + fixup = func(vals []reflect.Value) []reflect.Value { + return append(vals, reflect.Zero(reflect.TypeFor[error]())) + } + case 2: + if ft.Out(0) != reflect.TypeFor[bool]() || ft.Out(1) != reflect.TypeFor[error]() { + panic(fmt.Sprintf("results are %T, %T; want bool, error", ft.Out(0), ft.Out(1))) + } + fixup = func(vals []reflect.Value) []reflect.Value { return vals } + default: + panic(fmt.Sprintf("function returns %d values", ft.NumOut())) + } + fv := reflect.ValueOf(f) + return reflect.MakeFunc(reflect.TypeFor[filter](), func(args []reflect.Value) []reflect.Value { + if !args[0].IsValid() || args[0].Elem().Type() != ft.In(0) { + return []reflect.Value{reflect.ValueOf(false), reflect.Zero(reflect.TypeFor[error]())} + } + return fixup(fv.Call([]reflect.Value{args[0].Elem()})) + }).Interface().(filter) +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go new file mode 100644 index 0000000000000..fd95973e5538d --- /dev/null +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -0,0 +1,366 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest_test + +import ( + "fmt" + "testing" + "time" + + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" +) + +type EventFoo struct { + Value int +} + +type EventBar struct { + Value string +} + +type EventBaz struct { + Value []float64 +} + +func TestExpectFilter(t *testing.T) { + tests := []struct { + name string + events []int + expectFunc any + wantErr bool + }{ + { + name: "single event", + events: []int{42}, + expectFunc: eventbustest.Type[EventFoo](), + wantErr: false, + }, + { + name: "multiple events, single expectation", + events: []int{42, 1, 2, 3, 4, 5}, + expectFunc: eventbustest.Type[EventFoo](), + wantErr: false, + }, + { + name: "filter on event with function", + events: []int{24, 42}, + expectFunc: func(event EventFoo) (bool, error) { + if event.Value == 42 { + return true, nil + } + return false, nil + }, + wantErr: false, + }, + { + name: "first event has to be func", + events: []int{24, 42}, + expectFunc: func(event EventFoo) (bool, error) { + if event.Value != 42 { + return false, fmt.Errorf("expected 42, got %d", event.Value) + } + return false, nil + }, + wantErr: true, + }, + { + name: "no events", + events: []int{}, + expectFunc: func(event EventFoo) (bool, error) { + return true, nil + }, + wantErr: true, + }, + } + + bus := eventbustest.NewBus(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: + // https://go.dev/blog/synctest + tw.TimeOut = 10 * time.Millisecond + + client := bus.Client("testClient") + defer client.Close() + updater := eventbus.Publish[EventFoo](client) + + for _, i := range tt.events { + updater.Publish(EventFoo{i}) + } + + if err := eventbustest.Expect(tw, tt.expectFunc); (err != nil) != tt.wantErr { + t.Errorf("ExpectFilter[EventFoo]: error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestExpectEvents(t *testing.T) { + tests := []struct { + name string + events []any + expectEvents []any + wantErr bool + }{ + { + name: "No expectations", + events: []any{EventFoo{}}, + expectEvents: []any{}, + wantErr: true, + }, + { + name: "One event", + events: []any{EventFoo{}}, + expectEvents: []any{eventbustest.Type[EventFoo]()}, + wantErr: false, + }, + { + name: "Two events", + events: []any{EventFoo{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Two expected events with another in the middle", + events: []any{EventFoo{}, EventBaz{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Missing event", + events: []any{EventFoo{}, EventBaz{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "One event with specific value", + events: []any{EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "Two event with one specific value", + events: []any{EventFoo{43}, EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "One event with wrong value", + events: []any{EventFoo{43}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "Two events with specific values", + events: []any{EventFoo{42}, EventFoo{42}, EventBar{"42"}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + func(ev EventBar) (bool, error) { + if ev.Value == "42" { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + } + + bus := eventbustest.NewBus(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: + // https://go.dev/blog/synctest + tw.TimeOut = 10 * time.Millisecond + + client := bus.Client("testClient") + defer client.Close() + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev.(type) { + case EventFoo: + evCast := ev.(EventFoo) + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev.(EventBar) + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev.(EventBaz) + updaterBaz.Publish(evCast) + } + } + + if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestExpectExactlyEventsFilter(t *testing.T) { + tests := []struct { + name string + events []any + expectEvents []any + wantErr bool + }{ + { + name: "No expectations", + events: []any{EventFoo{}}, + expectEvents: []any{}, + wantErr: true, + }, + { + name: "One event", + events: []any{EventFoo{}}, + expectEvents: []any{eventbustest.Type[EventFoo]()}, + wantErr: false, + }, + { + name: "Two events", + events: []any{EventFoo{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: false, + }, + { + name: "Two expected events with another in the middle", + events: []any{EventFoo{}, EventBaz{}, EventBar{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "Missing event", + events: []any{EventFoo{}, EventBaz{}}, + expectEvents: []any{eventbustest.Type[EventFoo](), eventbustest.Type[EventBar]()}, + wantErr: true, + }, + { + name: "One event with value", + events: []any{EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: false, + }, + { + name: "Two event with one specific value", + events: []any{EventFoo{43}, EventFoo{42}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "One event with wrong value", + events: []any{EventFoo{43}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + { + name: "Two events with specific values", + events: []any{EventFoo{42}, EventFoo{42}, EventBar{"42"}}, + expectEvents: []any{ + func(ev EventFoo) (bool, error) { + if ev.Value == 42 { + return true, nil + } + return false, nil + }, + func(ev EventBar) (bool, error) { + if ev.Value == "42" { + return true, nil + } + return false, nil + }, + }, + wantErr: true, + }, + } + + bus := eventbustest.NewBus(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: + // https://go.dev/blog/synctest + tw.TimeOut = 10 * time.Millisecond + + client := bus.Client("testClient") + defer client.Close() + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev.(type) { + case EventFoo: + evCast := ev.(EventFoo) + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev.(EventBar) + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev.(EventBaz) + updaterBaz.Publish(evCast) + } + } + + if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go new file mode 100644 index 0000000000000..914e29933b2a2 --- /dev/null +++ b/util/eventbus/eventbustest/examples_test.go @@ -0,0 +1,201 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbustest_test + +import ( + "testing" + + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" +) + +func TestExample_Expect(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + updater.Publish(eventOfInterest{}) + + if err := eventbustest.Expect(tw, eventbustest.Type[eventOfInterest]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_Expect_WithFunction(t *testing.T) { + type eventOfInterest struct { + value int + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + updater.Publish(eventOfInterest{43}) + updater.Publish(eventOfInterest{42}) + + // Look for an event of eventOfInterest with a specific value + if err := eventbustest.Expect(tw, func(event eventOfInterest) (bool, error) { + if event.value != 42 { + return false, nil // Look for another event with the expected value. + // You could alternatively return an error here to ensure that the + // first seen eventOfInterest matches the value: + // return false, fmt.Errorf("expected 42, got %d", event.value) + } + return true, nil + }); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_Expect_MultipleEvents(t *testing.T) { + type eventOfInterest struct{} + type eventOfNoConcern struct{} + type eventOfCuriosity struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{}) + + // Even though three events was published, we just care about the two + if err := eventbustest.Expect(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfCuriosity]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_ExpectExactly_MultipleEvents(t *testing.T) { + type eventOfInterest struct{} + type eventOfNoConcern struct{} + type eventOfCuriosity struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{}) + + // Will fail as more events than the two expected comes in + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfCuriosity]()); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } +} + +func TestExample_Expect_WithMultipleFunctions(t *testing.T) { + type eventOfInterest struct { + value int + } + type eventOfNoConcern struct{} + type eventOfCuriosity struct { + value string + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{42}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{"42"}) + + interest := func(event eventOfInterest) (bool, error) { + if event.value == 42 { + return true, nil + } + return false, nil + } + curiosity := func(event eventOfCuriosity) (bool, error) { + if event.value == "42" { + return true, nil + } + return false, nil + } + + // Will fail as more events than the two expected comes in + if err := eventbustest.Expect(tw, interest, curiosity); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK +} + +func TestExample_ExpectExactly_WithMultipleFuncions(t *testing.T) { + type eventOfInterest struct { + value int + } + type eventOfNoConcern struct{} + type eventOfCuriosity struct { + value string + } + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updaterInterest := eventbus.Publish[eventOfInterest](client) + updaterConcern := eventbus.Publish[eventOfNoConcern](client) + updaterCuriosity := eventbus.Publish[eventOfCuriosity](client) + updaterInterest.Publish(eventOfInterest{42}) + updaterConcern.Publish(eventOfNoConcern{}) + updaterCuriosity.Publish(eventOfCuriosity{"42"}) + + interest := func(event eventOfInterest) (bool, error) { + if event.value == 42 { + return true, nil + } + return false, nil + } + curiosity := func(event eventOfCuriosity) (bool, error) { + if event.value == "42" { + return true, nil + } + return false, nil + } + + // Will fail as more events than the two expected comes in + if err := eventbustest.ExpectExactly(tw, interest, curiosity); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // expected event type eventbustest.eventOfCuriosity, saw eventbustest.eventOfNoConcern, at index 1 +} From b75fe9eeca13d7ff651c83d8202d22cce466dc08 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 25 Jun 2025 14:14:17 +0100 Subject: [PATCH 0022/1093] cmd/k8s-operator: Add NOTES.txt to Helm chart (#16364) This commit adds a NOTES.txt to the operator helm chart that will be written to the terminal upon successful installation of the operator. It includes a small list of knowledgebase articles with possible next steps for the actor that installed the operator to the cluster. It also provides possible commands to use for explaining the custom resources. Fixes #13427 Signed-off-by: David Bond --- .gitignore | 3 +++ .../deploy/chart/templates/NOTES.txt | 25 +++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 cmd/k8s-operator/deploy/chart/templates/NOTES.txt diff --git a/.gitignore b/.gitignore index 47d2bbe959ae1..3941fd06ef6d5 100644 --- a/.gitignore +++ b/.gitignore @@ -49,3 +49,6 @@ client/web/build/assets *.xcworkspacedata /tstest/tailmac/bin /tstest/tailmac/build + +# Ignore personal IntelliJ settings +.idea/ diff --git a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt new file mode 100644 index 0000000000000..5678e597a6824 --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt @@ -0,0 +1,25 @@ +You have successfully installed the Tailscale Kubernetes Operator! + +Once connected, the operator should appear as a device within the Tailscale admin console: +https://login.tailscale.com/admin/machines + +If you have not used the Tailscale operator before, here are some examples to try out: + +* Private Kubernetes API access and authorization using the API server proxy + https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy + +* Private access to cluster Services using an ingress proxy + https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress + +* Private access to the cluster's available subnets using a subnet router + https://tailscale.com/kb/1441/kubernetes-operator-connector + +You can also explore the CRDs, operator, and associated resources within the {{ .Release.Namespace }} namespace: + +$ kubectl explain connector +$ kubectl explain proxygroup +$ kubectl explain proxyclass +$ kubectl explain recorder +$ kubectl explain dnsconfig + +$ kubectl --namespace={{ .Release.Namespace }} get pods From 35b11e7be55088e282b5e240b9473968eebeb002 Mon Sep 17 00:00:00 2001 From: Laszlo Magyar Date: Wed, 25 Jun 2025 20:26:11 +0300 Subject: [PATCH 0023/1093] envknob/featureknob: restore SSH and exit-node capability for Home Assistant (#16263) SSH was disabled in #10538 Exit node was disabled in #13726 This enables ssh and exit-node options in case of Home Assistant. Fixes #15552 Signed-off-by: Laszlo Magyar --- envknob/featureknob/featureknob.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go index e9b871f74a8c0..5a54a1c42978d 100644 --- a/envknob/featureknob/featureknob.go +++ b/envknob/featureknob/featureknob.go @@ -10,7 +10,6 @@ import ( "runtime" "tailscale.com/envknob" - "tailscale.com/hostinfo" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -26,14 +25,6 @@ func CanRunTailscaleSSH() error { if distro.Get() == distro.QNAP && !envknob.UseWIPCode() { return errors.New("The Tailscale SSH server does not run on QNAP.") } - - // Setting SSH on Home Assistant causes trouble on startup - // (since the flag is not being passed to `tailscale up`). - // Although Tailscale SSH does work here, - // it's not terribly useful since it's running in a separate container. - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { - return errors.New("The Tailscale SSH server does not run on HomeAssistant.") - } // otherwise okay case "darwin": // okay only in tailscaled mode for now. @@ -58,10 +49,5 @@ func CanUseExitNode() error { distro.QNAP: return errors.New("Tailscale exit nodes cannot be used on " + string(dist)) } - - if hostinfo.GetEnvType() == hostinfo.HomeAssistantAddOn { - return errors.New("Tailscale exit nodes cannot be used on HomeAssistant.") - } - return nil } From 37eca1785c280311b16133e6bd455fa062df29e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 25 Jun 2025 14:44:01 -0400 Subject: [PATCH 0024/1093] net/netmon: add tests for the events over the eventbus (#16382) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #15160 Signed-off-by: Claus Lensbøl --- net/netmon/netmon_test.go | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index a9af8fb004af3..b8ec1b75f97ec 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -12,6 +12,7 @@ import ( "time" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" ) @@ -68,6 +69,23 @@ func TestMonitorInjectEvent(t *testing.T) { } } +func TestMonitorInjectEventOnBus(t *testing.T) { + bus := eventbustest.NewBus(t) + + mon, err := New(bus, t.Logf) + if err != nil { + t.Fatal(err) + } + defer mon.Close() + tw := eventbustest.NewWatcher(t, bus) + + mon.Start() + mon.InjectEvent() + if err := eventbustest.Expect(tw, eventbustest.Type[*ChangeDelta]()); err != nil { + t.Error(err) + } +} + var ( monitor = flag.String("monitor", "", `go into monitor mode like 'route monitor'; test never terminates. Value can be either "raw" or "callback"`) monitorDuration = flag.Duration("monitor-duration", 0, "if non-zero, how long to run TestMonitorMode. Zero means forever.") @@ -77,13 +95,13 @@ func TestMonitorMode(t *testing.T) { switch *monitor { case "": t.Skip("skipping non-test without --monitor") - case "raw", "callback": + case "raw", "callback", "eventbus": default: - t.Skipf(`invalid --monitor value: must be "raw" or "callback"`) + t.Skipf(`invalid --monitor value: must be "raw", "callback" or "eventbus"`) } - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) mon, err := New(bus, t.Logf) if err != nil { @@ -124,6 +142,16 @@ func TestMonitorMode(t *testing.T) { mon.Start() <-done t.Logf("%v callbacks", n) + case "eventbus": + tw.TimeOut = *monitorDuration + n := 0 + mon.Start() + eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { + n++ + t.Logf("cb: changed=%v, ifSt=%v", event.Major, event.New) + return false, nil // Return false, indicating we wanna look for more events + }) + t.Logf("%v events", n) } } From 51d00e135b6c5775f60f77ecd2a94e327aabd1f6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 25 Jun 2025 19:13:02 -0700 Subject: [PATCH 0025/1093] wgengine/magicsock: fix relayManager alloc work cleanup (#16387) Premature cancellation was preventing the work from ever being cleaned up in runLoop(). Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index f22e281e6944b..7b378838a145c 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -743,8 +743,11 @@ func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { r.allocWorkByEndpoint[ep] = started go func() { started.wg.Wait() - started.cancel() relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) + // cleanup context cancellation must come after the + // relayManagerInputEvent call, otherwise it returns early without + // writing the event to runLoop(). + started.cancel() }() } From aa106c92a4ed6d66b26d455dc4bff23516514af1 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 25 Jun 2025 21:30:44 -0700 Subject: [PATCH 0026/1093] .github/workflows: request @tailscale/dataplane review DERP changes (#16372) For any changes that involve DERP, automatically add the @tailscale/dataplane team as a reviewer. Updates #cleanup Signed-off-by: Simon Law --- .../workflows/request-dataplane-review.yml | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/request-dataplane-review.yml diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml new file mode 100644 index 0000000000000..836fef6fbce7c --- /dev/null +++ b/.github/workflows/request-dataplane-review.yml @@ -0,0 +1,31 @@ +name: request-dataplane-review + +on: + pull_request: + branches: + - "*" + paths: + - ".github/workflows/request-dataplane-review.yml" + - "**/*derp*" + - "**/derp*/**" + +jobs: + request-dataplane-review: + name: Request Dataplane Review + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Get access token + uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 + id: generate-token + with: + # Get token for app: https://github.com/apps/change-visibility-bot + app-id: ${{ secrets.VISIBILITY_BOT_APP_ID }} + private-key: ${{ secrets.VISIBILITY_BOT_APP_PRIVATE_KEY }} + - name: Add reviewers + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + url: ${{ github.event.pull_request.html_url }} + run: | + gh pr edit "$url" --add-reviewer tailscale/dataplane From 47dff33eac2441003a1d8ca4e98d56660f8119d4 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 25 Jun 2025 18:07:49 -0700 Subject: [PATCH 0027/1093] tool/gocross: remove GOROOT to ensure correct toolchain use go(1) repsects GOROOT if set, but tool/go / gocross-wrapper.sh are explicitly intending to use our toolchain. We don't need to set GOROOT, just unset it, and then go(1) handles the rest. Updates tailscale/corp#26717 Signed-off-by: James Tucker --- tool/gocross/gocross-wrapper.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index e9fca2aea71b5..90485d31b95af 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -133,6 +133,12 @@ fi repo_root="${BASH_SOURCE%/*}/../.." +# Some scripts/package systems set GOROOT even though they should only be +# setting $PATH. Stop them from breaking builds - go(1) respects GOROOT and +# so if it is left on here, compilation units depending on our Go fork will +# fail (such as those which depend on our net/ patches). +unset GOROOT + # gocross is opt-in as of 2025-06-16. See tailscale/corp#26717 # and comment above in this file. if [ "${TS_USE_GOCROSS:-}" != "1" ]; then From 99aaa6e92cda572896503538e1716851358e42d6 Mon Sep 17 00:00:00 2001 From: JerryYan Date: Fri, 27 Jun 2025 00:43:48 +0800 Subject: [PATCH 0028/1093] ipn/ipnlocal: update PeerByID to return SelfNode and rename it to NodeByID (#16096) Like NodeByKey, add an if stmt for checking the NodeId is SelfNode. Updates #16052 Signed-off-by: Jerry Yan <792602257@qq.com> --- ipn/ipnlocal/drive.go | 2 +- ipn/ipnlocal/local.go | 14 +++++--------- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/node_backend.go | 7 ++++++- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index a06ea5e8c41ba..6a6f9bcd2b24a 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -318,7 +318,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // - They are online // - They are allowed to share at least one folder with us cn := b.currentNode() - peer, ok := cn.PeerByID(peerID) + peer, ok := cn.NodeByID(peerID) if !ok { return false } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5467088f7c91a..9cec088f1f28b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1391,7 +1391,7 @@ func profileFromView(v tailcfg.UserProfileView) tailcfg.UserProfile { func (b *LocalBackend) WhoIsNodeKey(k key.NodePublic) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { cn := b.currentNode() if nid, ok := cn.NodeByKey(k); ok { - if n, ok := cn.PeerByID(nid); ok { + if n, ok := cn.NodeByID(nid); ok { up, ok := cn.NetMap().UserProfiles[n.User()] u = profileFromView(up) return n, u, ok @@ -1457,13 +1457,9 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi if nm == nil { return failf("no netmap") } - n, ok = cn.PeerByID(nid) + n, ok = cn.NodeByID(nid) if !ok { - // Check if this the self-node, which would not appear in peers. - if !nm.SelfNode.Valid() || nid != nm.SelfNode.ID() { - return zero, u, false - } - n = nm.SelfNode + return zero, u, false } up, ok := cn.UserByID(n.User()) if !ok { @@ -1968,7 +1964,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if !ok || mo.Online { continue } - n, ok := cn.PeerByID(m.NodeIDBeingMutated()) + n, ok := cn.NodeByID(m.NodeIDBeingMutated()) if !ok || n.StableID() != exitNodeID { continue } @@ -7724,7 +7720,7 @@ func (b *LocalBackend) srcIPHasCapForFilter(srcIP netip.Addr, cap tailcfg.NodeCa if !ok { return false } - n, ok := cn.PeerByID(nodeID) + n, ok := cn.NodeByID(nodeID) if !ok { return false } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6e62786883d1a..16dbef62a4190 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1005,7 +1005,7 @@ func TestUpdateNetmapDelta(t *testing.T) { }, } for _, want := range wants { - gotv, ok := b.currentNode().PeerByID(want.ID) + gotv, ok := b.currentNode().NodeByID(want.ID) if !ok { t.Errorf("netmap.Peer %v missing from b.profile.Peers", want.ID) continue diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 05389a677d4f5..ec503f1300ca5 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -206,9 +206,14 @@ func (nb *nodeBackend) NodeByKey(k key.NodePublic) (_ tailcfg.NodeID, ok bool) { return 0, false } -func (nb *nodeBackend) PeerByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { +func (nb *nodeBackend) NodeByID(id tailcfg.NodeID) (_ tailcfg.NodeView, ok bool) { nb.mu.Lock() defer nb.mu.Unlock() + if nb.netMap != nil { + if self := nb.netMap.SelfNode; self.Valid() && self.ID() == id { + return self, true + } + } n, ok := nb.peers[id] return n, ok } From d2c1ed22c39096f11cfd7920449ff746b865a025 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 26 Jun 2025 13:37:21 -0700 Subject: [PATCH 0029/1093] .github/workflows: replace tibdex with official GitHub Action (#16385) GitHub used to recommend the tibdex/github-app-token GitHub Action until they wrote their own actions/create-github-app-token. This patch replaces the use of the third-party action with the official one. Updates #cleanup Signed-off-by: Simon Law --- .github/workflows/update-flake.yml | 9 ++++----- .github/workflows/update-webclient-prebuilt.yml | 11 ++++------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index af7bdff1ee66d..61a09cea1c990 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -27,13 +27,12 @@ jobs: run: ./update-flake.sh - name: Get access token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: generate-token with: - app_id: ${{ secrets.LICENSING_APP_ID }} - installation_retrieval_mode: "id" - installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} - private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} + # Get token for app: https://github.com/apps/tailscale-code-updater + app-id: ${{ secrets.CODE_UPDATER_APP_ID }} + private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index f1c2b0c3b9368..5565b8c86c4bf 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -23,15 +23,12 @@ jobs: ./tool/go mod tidy - name: Get access token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: generate-token with: - # TODO(will): this should use the code updater app rather than licensing. - # It has the same permissions, so not a big deal, but still. - app_id: ${{ secrets.LICENSING_APP_ID }} - installation_retrieval_mode: "id" - installation_retrieval_payload: ${{ secrets.LICENSING_APP_INSTALLATION_ID }} - private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} + # Get token for app: https://github.com/apps/tailscale-code-updater + app-id: ${{ secrets.CODE_UPDATER_APP_ID }} + private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request id: pull-request From 6feb3c35cb851d54b613236c31f2dd3c03dbd6b7 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 26 Jun 2025 17:09:13 -0700 Subject: [PATCH 0030/1093] ipn/store: automatically migrate between plaintext and encrypted state (#16318) Add a new `--encrypt-state` flag to `cmd/tailscaled`. Based on that flag, migrate the existing state file to/from encrypted format if needed. Updates #15830 Signed-off-by: Andrew Lytvynov --- atomicfile/atomicfile.go | 6 +- cmd/tailscaled/tailscaled.go | 49 +++++- cmd/tsconnect/src/lib/js-state-store.ts | 3 + cmd/tsconnect/src/types/wasm_js.d.ts | 1 + cmd/tsconnect/wasm/wasm_js.go | 24 +++ docs/windows/policy/en-US/tailscale.adml | 11 +- docs/windows/policy/tailscale.admx | 14 ++ feature/tpm/tpm.go | 20 ++- feature/tpm/tpm_test.go | 165 +++++++++++++++++- ipn/ipnlocal/state_test.go | 8 +- ipn/store.go | 6 + ipn/store/awsstore/store_aws.go | 5 + ipn/store/kubestore/store_kube.go | 5 + ipn/store/mem/store_mem.go | 14 ++ ipn/store/stores.go | 140 +++++++++++++++ ipn/store_test.go | 14 ++ tailcfg/tailcfg.go | 3 + tstest/integration/integration.go | 16 +- tstest/integration/integration_test.go | 59 +++++++ .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + util/syspolicy/policy_keys.go | 5 + 24 files changed, 546 insertions(+), 26 deletions(-) diff --git a/atomicfile/atomicfile.go b/atomicfile/atomicfile.go index b3c8c93da2af9..9cae9bb750fa8 100644 --- a/atomicfile/atomicfile.go +++ b/atomicfile/atomicfile.go @@ -48,5 +48,9 @@ func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { if err := f.Close(); err != nil { return err } - return rename(tmpName, filename) + return Rename(tmpName, filename) } + +// Rename srcFile to dstFile, similar to [os.Rename] but preserving file +// attributes and ACLs on Windows. +func Rename(srcFile, dstFile string) error { return rename(srcFile, dstFile) } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 61b811c129454..3987b0c26927f 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -64,6 +64,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" "tailscale.com/util/osshare" + "tailscale.com/util/syspolicy" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -126,6 +127,7 @@ var args struct { debug string port uint16 statepath string + encryptState bool statedir string socketpath string birdSocketPath string @@ -193,6 +195,7 @@ func main() { flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) + flag.BoolVar(&args.encryptState, "encrypt-state", defaultEncryptState(), "encrypt the state file on disk; uses TPM on Linux and Windows, on all other platforms this flag is not supported") flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") @@ -268,6 +271,28 @@ func main() { } } + if args.encryptState { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + log.SetFlags(0) + log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM support in this build. + if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported in this build of tailscaled") + } + // Check if we have TPM access. + if !hostinfo.New().TPM.Present() { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + log.SetFlags(0) + log.Fatal("--encrypt-state can only be used with --state set to a local file path") + } + } + if args.disableLogs { envknob.SetNoLogsNoSupport() } @@ -315,13 +340,17 @@ func trySynologyMigration(p string) error { } func statePathOrDefault() string { + var path string if args.statepath != "" { - return args.statepath + path = args.statepath } - if args.statedir != "" { - return filepath.Join(args.statedir, "tailscaled.state") + if path == "" && args.statedir != "" { + path = filepath.Join(args.statedir, "tailscaled.state") } - return "" + if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState { + path = store.TPMPrefix + path + } + return path } // serverOptions is the configuration of the Tailscale node agent. @@ -974,3 +1003,15 @@ func applyIntegrationTestEnvKnob() { } } } + +func defaultEncryptState() bool { + if runtime.GOOS != "windows" && runtime.GOOS != "linux" { + // TPM encryption is only configurable on Windows and Linux. Other + // platforms either use system APIs and are not configurable + // (Android/Apple), or don't support any form of encryption yet + // (plan9/FreeBSD/etc). + return false + } + v, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + return v +} diff --git a/cmd/tsconnect/src/lib/js-state-store.ts b/cmd/tsconnect/src/lib/js-state-store.ts index e57dfd98efabd..7f2fc8087e768 100644 --- a/cmd/tsconnect/src/lib/js-state-store.ts +++ b/cmd/tsconnect/src/lib/js-state-store.ts @@ -10,4 +10,7 @@ export const sessionStateStorage: IPNStateStorage = { getState(id) { return window.sessionStorage[`ipn-state-${id}`] || "" }, + all() { + return JSON.stringify(window.sessionStorage) + }, } diff --git a/cmd/tsconnect/src/types/wasm_js.d.ts b/cmd/tsconnect/src/types/wasm_js.d.ts index 492197ccb1a9b..f47a972b03fba 100644 --- a/cmd/tsconnect/src/types/wasm_js.d.ts +++ b/cmd/tsconnect/src/types/wasm_js.d.ts @@ -44,6 +44,7 @@ declare global { interface IPNStateStorage { setState(id: string, value: string): void getState(id: string): string + all(): string } type IPNConfig = { diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 779a87e49dec9..c5ff56120f492 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -15,6 +15,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "iter" "log" "math/rand/v2" "net" @@ -579,6 +580,29 @@ func (s *jsStateStore) WriteState(id ipn.StateKey, bs []byte) error { return nil } +func (s *jsStateStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + jsValue := s.jsStateStorage.Call("all") + if jsValue.String() == "" { + return + } + buf, err := hex.DecodeString(jsValue.String()) + if err != nil { + return + } + var state map[string][]byte + if err := json.Unmarshal(buf, &state); err != nil { + return + } + + for k, v := range state { + if !yield(ipn.StateKey(k), v) { + break + } + } + } +} + func mapSlice[T any, M any](a []T, f func(T) M) []M { n := make([]M, len(a)) for i, e := range a { diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 62ff94da7096d..c09d847bc7c0d 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -19,6 +19,7 @@ Tailscale version 1.80.0 and later Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later + Tailscale version 1.86.0 and later Tailscale UI customization Settings @@ -67,7 +68,7 @@ If you disable or do not configure this policy setting, an interactive user logi See https://tailscale.com/kb/1315/mdm-keys#set-an-auth-key for more details.]]> Require using a specific Exit Node + Encrypt client state file stored on disk + diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index d97b24c36b5df..0a8aa1a75eb50 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -66,6 +66,10 @@ displayName="$(string.SINCE_V1_84)"> + + + @@ -365,5 +369,15 @@ + + + + + + + + + + diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 6feac85e35ecb..64656d412a2e7 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "iter" "log" "os" "path/filepath" @@ -37,7 +38,7 @@ func init() { hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) - store.Register(storePrefix, newStore) + store.Register(store.TPMPrefix, newStore) } func info() *tailcfg.TPMInfo { @@ -103,10 +104,8 @@ func propToString(v uint32) string { return string(slices.DeleteFunc(chars, func(b byte) bool { return b < ' ' || b > '~' })) } -const storePrefix = "tpmseal:" - func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { - path = strings.TrimPrefix(path, storePrefix) + path = strings.TrimPrefix(path, store.TPMPrefix) if err := paths.MkStateDir(filepath.Dir(path)); err != nil { return nil, fmt.Errorf("creating state directory: %w", err) } @@ -205,6 +204,19 @@ func (s *tpmStore) writeSealed() error { return atomicfile.WriteFile(s.path, buf, 0600) } +func (s *tpmStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} + // The nested levels of encoding and encryption are confusing, so here's what's // going on in plain English. // diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index a022b69b2bf04..b08681354a1e4 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -6,13 +6,22 @@ package tpm import ( "bytes" "crypto/rand" + "encoding/json" "errors" + "fmt" + "maps" + "os" "path/filepath" + "slices" "strconv" + "strings" "testing" + "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" + "tailscale.com/ipn/store/mem" + "tailscale.com/types/logger" ) func TestPropToString(t *testing.T) { @@ -29,11 +38,9 @@ func TestPropToString(t *testing.T) { } func skipWithoutTPM(t testing.TB) { - tpm, err := open() - if err != nil { + if !tpmSupported() { t.Skip("TPM not available") } - tpm.Close() } func TestSealUnseal(t *testing.T) { @@ -67,7 +74,7 @@ func TestSealUnseal(t *testing.T) { func TestStore(t *testing.T) { skipWithoutTPM(t) - path := storePrefix + filepath.Join(t.TempDir(), "state") + path := store.TPMPrefix + filepath.Join(t.TempDir(), "state") store, err := newStore(t.Logf, path) if err != nil { t.Fatal(err) @@ -180,3 +187,153 @@ func BenchmarkStore(b *testing.B) { }) } } + +func TestMigrateStateToTPM(t *testing.T) { + if !tpmSupported() { + t.Logf("using mock tpmseal provider") + store.RegisterForTest(t, store.TPMPrefix, newMockTPMSeal) + } + + storePath := filepath.Join(t.TempDir(), "store") + // Make sure migration doesn't cause a failure when no state file exists. + if _, err := store.New(t.Logf, store.TPMPrefix+storePath); err != nil { + t.Fatalf("store.New failed for new tpmseal store: %v", err) + } + os.Remove(storePath) + + initial, err := store.New(t.Logf, storePath) + if err != nil { + t.Fatalf("store.New failed for new file store: %v", err) + } + + // Populate initial state file. + content := map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + } + for k, v := range content { + if err := initial.WriteState(k, v); err != nil { + t.Fatal(err) + } + } + // Expected file keys for plaintext and sealed versions of state. + keysPlaintext := []string{"foo", "baz"} + keysTPMSeal := []string{"key", "nonce", "data"} + + for _, tt := range []struct { + desc string + path string + wantKeys []string + }{ + { + desc: "plaintext-to-plaintext", + path: storePath, + wantKeys: keysPlaintext, + }, + { + desc: "plaintext-to-tpmseal", + path: store.TPMPrefix + storePath, + wantKeys: keysTPMSeal, + }, + { + desc: "tpmseal-to-tpmseal", + path: store.TPMPrefix + storePath, + wantKeys: keysTPMSeal, + }, + { + desc: "tpmseal-to-plaintext", + path: storePath, + wantKeys: keysPlaintext, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + s, err := store.New(t.Logf, tt.path) + if err != nil { + t.Fatalf("migration failed: %v", err) + } + gotContent := maps.Collect(s.All()) + if diff := cmp.Diff(content, gotContent); diff != "" { + t.Errorf("unexpected content after migration, diff:\n%s", diff) + } + + buf, err := os.ReadFile(storePath) + if err != nil { + t.Fatal(err) + } + var data map[string]any + if err := json.Unmarshal(buf, &data); err != nil { + t.Fatal(err) + } + gotKeys := slices.Collect(maps.Keys(data)) + slices.Sort(gotKeys) + slices.Sort(tt.wantKeys) + if diff := cmp.Diff(gotKeys, tt.wantKeys); diff != "" { + t.Errorf("unexpected content keys after migration, diff:\n%s", diff) + } + }) + } +} + +func tpmSupported() bool { + tpm, err := open() + if err != nil { + return false + } + tpm.Close() + return true +} + +type mockTPMSealProvider struct { + path string + mem.Store +} + +func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { + path, ok := strings.CutPrefix(path, store.TPMPrefix) + if !ok { + return nil, fmt.Errorf("%q missing tpmseal: prefix", path) + } + s := &mockTPMSealProvider{path: path, Store: mem.Store{}} + buf, err := os.ReadFile(path) + if errors.Is(err, os.ErrNotExist) { + return s, s.flushState() + } + if err != nil { + return nil, err + } + var data struct { + Key string + Nonce string + Data map[ipn.StateKey][]byte + } + if err := json.Unmarshal(buf, &data); err != nil { + return nil, err + } + if data.Key == "" || data.Nonce == "" { + return nil, fmt.Errorf("%q missing key or nonce", path) + } + for k, v := range data.Data { + s.Store.WriteState(k, v) + } + return s, nil +} + +func (p *mockTPMSealProvider) WriteState(k ipn.StateKey, v []byte) error { + if err := p.Store.WriteState(k, v); err != nil { + return err + } + return p.flushState() +} + +func (p *mockTPMSealProvider) flushState() error { + data := map[string]any{ + "key": "foo", + "nonce": "bar", + "data": maps.Collect(p.Store.All()), + } + buf, err := json.Marshal(data) + if err != nil { + return err + } + return os.WriteFile(p.path, buf, 0600) +} diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 2921de2032913..eb36643856f82 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1013,17 +1013,13 @@ func TestEditPrefsHasNoKeys(t *testing.T) { } type testStateStorage struct { - mem mem.Store + mem.Store written atomic.Bool } -func (s *testStateStorage) ReadState(id ipn.StateKey) ([]byte, error) { - return s.mem.ReadState(id) -} - func (s *testStateStorage) WriteState(id ipn.StateKey, bs []byte) error { s.written.Store(true) - return s.mem.WriteState(id, bs) + return s.Store.WriteState(id, bs) } // awaitWrite clears the "I've seen writes" bit, in prep for a future diff --git a/ipn/store.go b/ipn/store.go index 550aa8cba819a..e176e48421216 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "iter" "net" "strconv" ) @@ -83,6 +84,11 @@ type StateStore interface { // instead, which only writes if the value is different from what's // already in the store. WriteState(id StateKey, bs []byte) error + // All returns an iterator over all StateStore keys. Using ReadState or + // WriteState is not safe while iterating and can lead to a deadlock. + // The order of keys in the iterator is not specified and may change + // between runs. + All() iter.Seq2[StateKey, []byte] } // WriteState is a wrapper around store.WriteState that only writes if diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 40bbbf0370822..523d1657b109d 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "iter" "net/url" "regexp" "strings" @@ -253,3 +254,7 @@ func (s *awsStore) persistState() error { _, err = s.ssmClient.PutParameter(context.TODO(), in) return err } + +func (s *awsStore) All() iter.Seq2[ipn.StateKey, []byte] { + return s.memory.All() +} diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 14025bbb4150a..f6bedbf0b8054 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -7,6 +7,7 @@ package kubestore import ( "context" "fmt" + "iter" "log" "net" "os" @@ -428,3 +429,7 @@ func sanitizeKey[T ~string](k T) string { return '_' }, string(k)) } + +func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { + return s.memory.All() +} diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index 6f474ce993b43..6c22aefd547f8 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -7,6 +7,7 @@ package mem import ( "bytes" "encoding/json" + "iter" "sync" xmaps "golang.org/x/exp/maps" @@ -85,3 +86,16 @@ func (s *Store) ExportToJSON() ([]byte, error) { } return json.MarshalIndent(s.cache, "", " ") } + +func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 1a98574c91cf9..43c79639934b8 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -7,10 +7,14 @@ package store import ( "bytes" "encoding/json" + "errors" "fmt" + "iter" + "maps" "os" "path/filepath" "runtime" + "slices" "strings" "sync" @@ -20,6 +24,7 @@ import ( "tailscale.com/paths" "tailscale.com/types/logger" "tailscale.com/util/mak" + "tailscale.com/util/testenv" ) // Provider returns a StateStore for the provided path. @@ -32,6 +37,9 @@ func init() { var knownStores map[string]Provider +// TPMPrefix is the path prefix used for TPM-encrypted StateStore. +const TPMPrefix = "tpmseal:" + // New returns a StateStore based on the provided arg // and registered stores. // The arg is of the form "prefix:rest", where prefix was previously @@ -53,12 +61,23 @@ func New(logf logger.Logf, path string) (ipn.StateStore, error) { if strings.HasPrefix(path, prefix) { // We can't strip the prefix here as some NewStoreFunc (like arn:) // expect the prefix. + if prefix == TPMPrefix { + if runtime.GOOS == "windows" { + path = TPMPrefix + TryWindowsAppDataMigration(logf, strings.TrimPrefix(path, TPMPrefix)) + } + if err := maybeMigrateLocalStateFile(logf, path); err != nil { + return nil, fmt.Errorf("failed to migrate existing state file to TPM-sealed format: %w", err) + } + } return sf(logf, path) } } if runtime.GOOS == "windows" { path = TryWindowsAppDataMigration(logf, path) } + if err := maybeMigrateLocalStateFile(logf, path); err != nil { + return nil, fmt.Errorf("failed to migrate existing TPM-sealed state file to plaintext format: %w", err) + } return NewFileStore(logf, path) } @@ -77,6 +96,29 @@ func Register(prefix string, fn Provider) { mak.Set(&knownStores, prefix, fn) } +// RegisterForTest registers a prefix to be used for NewStore in tests. An +// existing registered prefix will be replaced. +func RegisterForTest(t testenv.TB, prefix string, fn Provider) { + if len(prefix) == 0 { + panic("prefix is empty") + } + old := maps.Clone(knownStores) + t.Cleanup(func() { knownStores = old }) + + mak.Set(&knownStores, prefix, fn) +} + +// HasKnownProviderPrefix reports whether path uses one of the registered +// Provider prefixes. +func HasKnownProviderPrefix(path string) bool { + for prefix := range knownStores { + if strings.HasPrefix(path, prefix) { + return true + } + } + return false +} + // TryWindowsAppDataMigration attempts to copy the Windows state file // from its old location to the new location. (Issue 2856) // @@ -179,3 +221,101 @@ func (s *FileStore) WriteState(id ipn.StateKey, bs []byte) error { } return atomicfile.WriteFile(s.path, bs, 0600) } + +func (s *FileStore) All() iter.Seq2[ipn.StateKey, []byte] { + return func(yield func(ipn.StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.cache { + if !yield(k, v) { + break + } + } + } +} + +func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { + path, toTPM := strings.CutPrefix(path, TPMPrefix) + + // Extract JSON keys from the file on disk and guess what kind it is. + bs, err := os.ReadFile(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + var content map[string]any + if err := json.Unmarshal(bs, &content); err != nil { + return fmt.Errorf("failed to unmarshal %q: %w", path, err) + } + keys := slices.Sorted(maps.Keys(content)) + tpmKeys := []string{"key", "nonce", "data"} + slices.Sort(tpmKeys) + // TPM-sealed files will have exactly these keys. + existingFileSealed := slices.Equal(keys, tpmKeys) + // Plaintext files for nodes that registered at least once will have this + // key, plus other dynamic ones. + _, existingFilePlaintext := content["_machinekey"] + isTPM := existingFileSealed && !existingFilePlaintext + + if isTPM == toTPM { + // No migration needed. + return nil + } + + newTPMStore, ok := knownStores[TPMPrefix] + if !ok { + return errors.New("this build does not support TPM integration") + } + + // Open from (old format) and to (new format) stores for migration. The + // "to" store will be at tmpPath. + var from, to ipn.StateStore + tmpPath := path + ".tmp" + if toTPM { + // Migrate plaintext file to be TPM-sealed. + from, err = NewFileStore(logf, path) + if err != nil { + return fmt.Errorf("NewFileStore(%q): %w", path, err) + } + to, err = newTPMStore(logf, TPMPrefix+tmpPath) + if err != nil { + return fmt.Errorf("newTPMStore(%q): %w", tmpPath, err) + } + } else { + // Migrate TPM-selaed file to plaintext. + from, err = newTPMStore(logf, TPMPrefix+path) + if err != nil { + return fmt.Errorf("newTPMStore(%q): %w", path, err) + } + to, err = NewFileStore(logf, tmpPath) + if err != nil { + return fmt.Errorf("NewFileStore(%q): %w", tmpPath, err) + } + } + defer os.Remove(tmpPath) + + // Copy all the items. This is pretty inefficient, because both stores + // write the file to disk for each WriteState, but that's ok for a one-time + // migration. + for k, v := range from.All() { + if err := to.WriteState(k, v); err != nil { + return err + } + } + + // Finally, overwrite the state file with the new one we created at + // tmpPath. + if err := atomicfile.Rename(tmpPath, path); err != nil { + return err + } + + if toTPM { + logf("migrated %q from plaintext to TPM-sealed format", path) + } else { + logf("migrated %q from TPM-sealed to plaintext format", path) + } + return nil +} diff --git a/ipn/store_test.go b/ipn/store_test.go index fcc082d8a8a87..4dd7321b9048d 100644 --- a/ipn/store_test.go +++ b/ipn/store_test.go @@ -5,6 +5,7 @@ package ipn import ( "bytes" + "iter" "sync" "testing" @@ -31,6 +32,19 @@ func (s *memStore) WriteState(k StateKey, v []byte) error { return nil } +func (s *memStore) All() iter.Seq2[StateKey, []byte] { + return func(yield func(StateKey, []byte) bool) { + s.mu.Lock() + defer s.mu.Unlock() + + for k, v := range s.m { + if !yield(k, v) { + break + } + } + } +} + func TestWriteState(t *testing.T) { var ss StateStore = new(memStore) WriteState(ss, "foo", []byte("bar")) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4679609f3e9d4..23f3cc49b152b 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -908,6 +908,9 @@ type TPMInfo struct { SpecRevision int `json:",omitempty"` } +// Present reports whether a TPM device is present on this machine. +func (t *TPMInfo) Present() bool { return t != nil } + // ServiceName is the name of a service, of the form `svc:dns-label`. Services // represent some kind of application provided for users of the tailnet with a // MagicDNS name and possibly dedicated IP addresses. Currently (2024-01-21), diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index d64bfbbd9d755..987bb569a4f66 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -569,11 +569,12 @@ type TestNode struct { env *TestEnv tailscaledParser *nodeOutputParser - dir string // temp dir for sock & state - configFile string // or empty for none - sockFile string - stateFile string - upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + dir string // temp dir for sock & state + configFile string // or empty for none + sockFile string + stateFile string + upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI + encryptState bool mu sync.Mutex onLogLine []func([]byte) @@ -640,7 +641,7 @@ func (n *TestNode) diskPrefs() *ipn.Prefs { if _, err := os.ReadFile(n.stateFile); err != nil { t.Fatalf("reading prefs: %v", err) } - fs, err := store.NewFileStore(nil, n.stateFile) + fs, err := store.New(nil, n.stateFile) if err != nil { t.Fatalf("reading prefs, NewFileStore: %v", err) } @@ -822,6 +823,9 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { if n.configFile != "" { cmd.Args = append(cmd.Args, "--config="+n.configFile) } + if n.encryptState { + cmd.Args = append(cmd.Args, "--encrypt-state") + } cmd.Env = append(os.Environ(), "TS_DEBUG_PERMIT_HTTP_C2N=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 90cc7e443b5d3..7cb251f31c344 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -21,6 +21,7 @@ import ( "os/exec" "path/filepath" "regexp" + "runtime" "strconv" "sync/atomic" "testing" @@ -32,6 +33,7 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" @@ -1470,3 +1472,60 @@ func TestNetstackUDPLoopback(t *testing.T) { d1.MustCleanShutdown(t) } + +func TestEncryptStateMigration(t *testing.T) { + if !hostinfo.New().TPM.Present() { + t.Skip("TPM not available") + } + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + t.Skip("--encrypt-state for tailscaled state not supported on this platform") + } + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n := NewTestNode(t, env) + + runNode := func(t *testing.T, wantStateKeys []string) { + t.Helper() + + // Run the node. + d := n.StartDaemon() + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + + // Check the contents of the state file. + buf, err := os.ReadFile(n.stateFile) + if err != nil { + t.Fatalf("reading %q: %v", n.stateFile, err) + } + t.Logf("state file content:\n%s", buf) + var content map[string]any + if err := json.Unmarshal(buf, &content); err != nil { + t.Fatalf("parsing %q: %v", n.stateFile, err) + } + for _, k := range wantStateKeys { + if _, ok := content[k]; !ok { + t.Errorf("state file is missing key %q", k) + } + } + + // Stop the node. + d.MustCleanShutdown(t) + } + + wantPlaintextStateKeys := []string{"_machinekey", "_current-profile", "_profiles"} + wantEncryptedStateKeys := []string{"key", "nonce", "data"} + t.Run("regular-state", func(t *testing.T) { + n.encryptState = false + runNode(t, wantPlaintextStateKeys) + }) + t.Run("migrate-to-encrypted", func(t *testing.T) { + n.encryptState = true + runNode(t, wantEncryptedStateKeys) + }) + t.Run("migrate-to-plaintext", func(t *testing.T) { + n.encryptState = false + runNode(t, wantPlaintextStateKeys) + }) +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 321ba25668c1f..a73c6ebf649f2 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 321ba25668c1f..a73c6ebf649f2 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 321ba25668c1f..a73c6ebf649f2 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 321ba25668c1f..a73c6ebf649f2 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -51,6 +51,7 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" + _ "tailscale.com/util/syspolicy" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ed00d0004abb1..b19a3e7fec61f 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -120,6 +120,10 @@ const ( LogSCMInteractions Key = "LogSCMInteractions" FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" + // EncryptState is a boolean setting that specifies whether to encrypt the + // tailscaled state file with a TPM device. + EncryptState Key = "EncryptState" + // PostureChecking indicates if posture checking is enabled and the client shall gather // posture data. // Key is a string value that specifies an option: "always", "never", "user-decides". @@ -186,6 +190,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(EncryptState, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(Hostname, setting.DeviceSetting, setting.StringValue), setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), From b2bf7e988e110e2a7245ac67792f666e1cd114f1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 26 Jun 2025 18:39:47 -0700 Subject: [PATCH 0031/1093] wgengine/magicsock: add envknob to toggle UDP relay feature (#16396) Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/debugknobs.go | 6 ++++++ wgengine/magicsock/debugknobs_stubs.go | 1 + wgengine/magicsock/magicsock.go | 6 +++--- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index f8fd9f0407d44..0558953887ae0 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,6 +62,12 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") + // debugAssumeUDPRelayCapable forces magicsock to assume that all peers are + // UDP relay capable clients and servers. This will eventually be replaced + // by a [tailcfg.CapabilityVersion] comparison. It enables early testing of + // the UDP relay feature before we have established related + // [tailcfg.CapabilityVersion]'s. + debugAssumeUDPRelayCapable = envknob.RegisterBool("TS_DEBUG_ASSUME_UDP_RELAY_CAPABLE") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 336d7baa19645..3d23b1f8e8f01 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -31,3 +31,4 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } +func debugAssumeUDPRelayCapable() bool { return false } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d7b52269984da..e76d0054f04c2 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2592,12 +2592,12 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { // TODO(jwhited): implement once capVer is bumped - return version == math.MinInt32 + return version == math.MinInt32 || debugAssumeUDPRelayCapable() } func capVerIsRelayServerCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped - return version == math.MinInt32 + // TODO(jwhited): implement once capVer is bumped & update Test_peerAPIIfCandidateRelayServer + return version == math.MinInt32 || debugAssumeUDPRelayCapable() } // onFilterUpdate is called when a [FilterUpdate] is received over the From b32a01b2dc44986ce83d2dd091f53c31e9a25391 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 26 Jun 2025 19:30:14 -0700 Subject: [PATCH 0032/1093] disco,net/udprelay,wgengine/magicsock: support relay re-binding (#16388) Relay handshakes may now occur multiple times over the lifetime of a relay server endpoint. Handshake messages now include a handshake generation, which is client specified, as a means to trigger safe challenge reset server-side. Relay servers continue to enforce challenge values as single use. They will only send a given value once, in reply to the first arriving bind message for a handshake generation. VNI has been added to the handshake messages, and we expect the outer Geneve header value to match the sealed value upon reception. Remote peer disco pub key is now also included in handshake messages, and it must match the receiver's expectation for the remote, participating party. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- disco/disco.go | 110 ++++++++++++++++------ disco/disco_test.go | 27 ++++-- net/udprelay/server.go | 146 +++++++++++++++-------------- net/udprelay/server_test.go | 80 +++++++++++++--- wgengine/magicsock/relaymanager.go | 46 +++++++-- 5 files changed, 276 insertions(+), 133 deletions(-) diff --git a/disco/disco.go b/disco/disco.go index 0854eb4c0af5a..d4623c119dbcb 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -321,79 +321,131 @@ const ( BindUDPRelayHandshakeStateAnswerReceived ) -// bindUDPRelayEndpointLen is the length of a marshalled BindUDPRelayEndpoint -// message, without the message header. -const bindUDPRelayEndpointLen = BindUDPRelayEndpointChallengeLen +// bindUDPRelayEndpointCommonLen is the length of a marshalled +// [BindUDPRelayEndpointCommon], without the message header. +const bindUDPRelayEndpointCommonLen = 72 + +// BindUDPRelayChallengeLen is the length of the Challenge field carried in +// [BindUDPRelayEndpointChallenge] & [BindUDPRelayEndpointAnswer] messages. +const BindUDPRelayChallengeLen = 32 + +// BindUDPRelayEndpointCommon contains fields that are common across all 3 +// UDP relay handshake message types. All 4 field values are expected to be +// consistent for the lifetime of a handshake besides Challenge, which is +// irrelevant in a [BindUDPRelayEndpoint] message. +type BindUDPRelayEndpointCommon struct { + // VNI is the Geneve header Virtual Network Identifier field value, which + // must match this disco-sealed value upon reception. If they are + // non-matching it indicates the cleartext Geneve header was tampered with + // and/or mangled. + VNI uint32 + // Generation represents the handshake generation. Clients must set a new, + // nonzero value at the start of every handshake. + Generation uint32 + // RemoteKey is the disco key of the remote peer participating over this + // relay endpoint. + RemoteKey key.DiscoPublic + // Challenge is set by the server in a [BindUDPRelayEndpointChallenge] + // message, and expected to be echoed back by the client in a + // [BindUDPRelayEndpointAnswer] message. Its value is irrelevant in a + // [BindUDPRelayEndpoint] message, where it simply serves a padding purpose + // ensuring all handshake messages are equal in size. + Challenge [BindUDPRelayChallengeLen]byte +} + +// encode encodes m in b. b must be at least bindUDPRelayEndpointCommonLen bytes +// long. +func (m *BindUDPRelayEndpointCommon) encode(b []byte) { + binary.BigEndian.PutUint32(b, m.VNI) + b = b[4:] + binary.BigEndian.PutUint32(b, m.Generation) + b = b[4:] + m.RemoteKey.AppendTo(b[:0]) + b = b[key.DiscoPublicRawLen:] + copy(b, m.Challenge[:]) +} + +// decode decodes m from b. +func (m *BindUDPRelayEndpointCommon) decode(b []byte) error { + if len(b) < bindUDPRelayEndpointCommonLen { + return errShort + } + m.VNI = binary.BigEndian.Uint32(b) + b = b[4:] + m.Generation = binary.BigEndian.Uint32(b) + b = b[4:] + m.RemoteKey = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + copy(m.Challenge[:], b[:BindUDPRelayChallengeLen]) + return nil +} // BindUDPRelayEndpoint is the first messaged transmitted from UDP relay client -// towards UDP relay server as part of the 3-way bind handshake. It is padded to -// match the length of BindUDPRelayEndpointChallenge. This message type is -// currently considered experimental and is not yet tied to a +// towards UDP relay server as part of the 3-way bind handshake. This message +// type is currently considered experimental and is not yet tied to a // tailcfg.CapabilityVersion. type BindUDPRelayEndpoint struct { + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpoint) AppendMarshal(b []byte) []byte { - ret, _ := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointLen) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpoint, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpoint(ver uint8, p []byte) (m *BindUDPRelayEndpoint, err error) { m = new(BindUDPRelayEndpoint) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } -// BindUDPRelayEndpointChallengeLen is the length of a marshalled -// BindUDPRelayEndpointChallenge message, without the message header. -const BindUDPRelayEndpointChallengeLen = 32 - // BindUDPRelayEndpointChallenge is transmitted from UDP relay server towards // UDP relay client in response to a BindUDPRelayEndpoint message as part of the // 3-way bind handshake. This message type is currently considered experimental // and is not yet tied to a tailcfg.CapabilityVersion. type BindUDPRelayEndpointChallenge struct { - Challenge [BindUDPRelayEndpointChallengeLen]byte + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpointChallenge) AppendMarshal(b []byte) []byte { - ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, BindUDPRelayEndpointChallengeLen) - copy(d, m.Challenge[:]) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointChallenge, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpointChallenge(ver uint8, p []byte) (m *BindUDPRelayEndpointChallenge, err error) { - if len(p) < BindUDPRelayEndpointChallengeLen { - return nil, errShort - } m = new(BindUDPRelayEndpointChallenge) - copy(m.Challenge[:], p[:]) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } -// bindUDPRelayEndpointAnswerLen is the length of a marshalled -// BindUDPRelayEndpointAnswer message, without the message header. -const bindUDPRelayEndpointAnswerLen = BindUDPRelayEndpointChallengeLen - // BindUDPRelayEndpointAnswer is transmitted from UDP relay client to UDP relay // server in response to a BindUDPRelayEndpointChallenge message. This message // type is currently considered experimental and is not yet tied to a // tailcfg.CapabilityVersion. type BindUDPRelayEndpointAnswer struct { - Answer [bindUDPRelayEndpointAnswerLen]byte + BindUDPRelayEndpointCommon } func (m *BindUDPRelayEndpointAnswer) AppendMarshal(b []byte) []byte { - ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointAnswerLen) - copy(d, m.Answer[:]) + ret, d := appendMsgHeader(b, TypeBindUDPRelayEndpointAnswer, v0, bindUDPRelayEndpointCommonLen) + m.BindUDPRelayEndpointCommon.encode(d) return ret } func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpointAnswer, err error) { - if len(p) < bindUDPRelayEndpointAnswerLen { - return nil, errShort - } m = new(BindUDPRelayEndpointAnswer) - copy(m.Answer[:], p[:]) + err = m.BindUDPRelayEndpointCommon.decode(p) + if err != nil { + return nil, err + } return m, nil } diff --git a/disco/disco_test.go b/disco/disco_test.go index f2a29a744992f..9fb71ff83b73b 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -16,6 +16,15 @@ import ( ) func TestMarshalAndParse(t *testing.T) { + relayHandshakeCommon := BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 2, + RemoteKey: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + Challenge: [BindUDPRelayChallengeLen]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }, + } + tests := []struct { name string want string @@ -86,26 +95,24 @@ func TestMarshalAndParse(t *testing.T) { }, { name: "bind_udp_relay_endpoint", - m: &BindUDPRelayEndpoint{}, - want: "04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + m: &BindUDPRelayEndpoint{ + relayHandshakeCommon, + }, + want: "04 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "bind_udp_relay_endpoint_challenge", m: &BindUDPRelayEndpointChallenge{ - Challenge: [BindUDPRelayEndpointChallengeLen]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, + relayHandshakeCommon, }, - want: "05 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + want: "05 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "bind_udp_relay_endpoint_answer", m: &BindUDPRelayEndpointAnswer{ - Answer: [bindUDPRelayEndpointAnswerLen]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, + relayHandshakeCommon, }, - want: "06 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", + want: "06 00 00 00 00 01 00 00 00 02 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f", }, { name: "call_me_maybe_via", diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 8b9e95fb1e728..e32f8917c520c 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -96,12 +96,13 @@ type serverEndpoint struct { // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys pairOfDiscoPubKeys - discoSharedSecrets [2]key.DiscoShared - handshakeState [2]disco.BindUDPRelayHandshakeState - addrPorts [2]netip.AddrPort - lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time - challenge [2][disco.BindUDPRelayEndpointChallengeLen]byte + discoPubKeys pairOfDiscoPubKeys + discoSharedSecrets [2]key.DiscoShared + handshakeGeneration [2]uint32 // or zero if a handshake has never started for that relay leg + handshakeAddrPorts [2]netip.AddrPort // or zero value if a handshake has never started for that relay leg + boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg + lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time + challenge [2][disco.BindUDPRelayChallengeLen]byte lamportID uint64 vni uint32 @@ -112,69 +113,77 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if senderIndex != 0 && senderIndex != 1 { return } - handshakeState := e.handshakeState[senderIndex] - if handshakeState == disco.BindUDPRelayHandshakeStateAnswerReceived { - // this sender is already bound - return + + otherSender := 0 + if senderIndex == 0 { + otherSender = 1 } + + validateVNIAndRemoteKey := func(common disco.BindUDPRelayEndpointCommon) error { + if common.VNI != e.vni { + return errors.New("mismatching VNI") + } + if common.RemoteKey.Compare(e.discoPubKeys[otherSender]) != 0 { + return errors.New("mismatching RemoteKey") + } + return nil + } + switch discoMsg := discoMsg.(type) { case *disco.BindUDPRelayEndpoint: - switch handshakeState { - case disco.BindUDPRelayHandshakeStateInit: - // set sender addr - e.addrPorts[senderIndex] = from - fallthrough - case disco.BindUDPRelayHandshakeStateChallengeSent: - if from != e.addrPorts[senderIndex] { - // this is a later arriving bind from a different source, or - // a retransmit and the sender's source has changed, discard - return - } - m := new(disco.BindUDPRelayEndpointChallenge) - copy(m.Challenge[:], e.challenge[senderIndex][:]) - reply := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} - err := gh.Encode(reply) - if err != nil { - return - } - reply = append(reply, disco.Magic...) - reply = serverDisco.AppendTo(reply) - box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) - reply = append(reply, box...) - uw.WriteMsgUDPAddrPort(reply, nil, from) - // set new state - e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateChallengeSent + err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop return - default: - // disco.BindUDPRelayEndpoint is unexpected in all other handshake states + } + if discoMsg.Generation == 0 { + // Generation must be nonzero, silently drop + return + } + if e.handshakeGeneration[senderIndex] == discoMsg.Generation { + // we've seen this generation before, silently drop + return + } + e.handshakeGeneration[senderIndex] = discoMsg.Generation + e.handshakeAddrPorts[senderIndex] = from + m := new(disco.BindUDPRelayEndpointChallenge) + m.VNI = e.vni + m.Generation = discoMsg.Generation + m.RemoteKey = e.discoPubKeys[otherSender] + rand.Read(e.challenge[senderIndex][:]) + copy(m.Challenge[:], e.challenge[senderIndex][:]) + reply := make([]byte, packet.GeneveFixedHeaderLength, 512) + gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} + err = gh.Encode(reply) + if err != nil { return } + reply = append(reply, disco.Magic...) + reply = serverDisco.AppendTo(reply) + box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) + reply = append(reply, box...) + uw.WriteMsgUDPAddrPort(reply, nil, from) + return case *disco.BindUDPRelayEndpointAnswer: - switch handshakeState { - case disco.BindUDPRelayHandshakeStateChallengeSent: - if from != e.addrPorts[senderIndex] { - // sender source has changed - return - } - if !bytes.Equal(discoMsg.Answer[:], e.challenge[senderIndex][:]) { - // bad answer - return - } - // sender is now bound - // TODO: Consider installing a fast path via netfilter or similar to - // relay (NAT) data packets for this serverEndpoint. - e.handshakeState[senderIndex] = disco.BindUDPRelayHandshakeStateAnswerReceived - // record last seen as bound time - e.lastSeen[senderIndex] = time.Now() + err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop return - default: - // disco.BindUDPRelayEndpointAnswer is unexpected in all other handshake - // states, or we've already handled it + } + generation := e.handshakeGeneration[senderIndex] + if generation == 0 || // we have no active handshake + generation != discoMsg.Generation || // mismatching generation for the active handshake + e.handshakeAddrPorts[senderIndex] != from || // mismatching source for the active handshake + !bytes.Equal(e.challenge[senderIndex][:], discoMsg.Challenge[:]) { // mismatching answer for the active handshake + // silently drop return } + // Handshake complete. Update the binding for this sender. + e.boundAddrPorts[senderIndex] = from + e.lastSeen[senderIndex] = time.Now() // record last seen as bound time + return default: - // unexpected Disco message type + // unexpected message types, silently drop return } } @@ -225,12 +234,12 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade } var to netip.AddrPort switch { - case from == e.addrPorts[0]: + case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() - to = e.addrPorts[1] - case from == e.addrPorts[1]: + to = e.boundAddrPorts[1] + case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() - to = e.addrPorts[0] + to = e.boundAddrPorts[0] default: // unrecognized source return @@ -240,11 +249,6 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade return } - if e.isBound() { - // control packet, but serverEndpoint is already bound - return - } - if gh.Protocol != packet.GeneveProtocolDisco { // control packet, but not Disco return @@ -267,11 +271,11 @@ func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifet return false } -// isBound returns true if both clients have completed their 3-way handshake, +// isBound returns true if both clients have completed a 3-way handshake, // otherwise false. func (e *serverEndpoint) isBound() bool { - return e.handshakeState[0] == disco.BindUDPRelayHandshakeStateAnswerReceived && - e.handshakeState[1] == disco.BindUDPRelayHandshakeStateAnswerReceived + return e.boundAddrPorts[0].IsValid() && + e.boundAddrPorts[1].IsValid() } // NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet @@ -591,8 +595,6 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys[0]) e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys[1]) e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] - rand.Read(e.challenge[0][:]) - rand.Read(e.challenge[1][:]) s.byDisco[pair] = e s.byVNI[e.vni] = e diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index a4e5ca451af2e..3fcb9b8b198c2 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -19,23 +19,27 @@ import ( ) type testClient struct { - vni uint32 - local key.DiscoPrivate - server key.DiscoPublic - uc *net.UDPConn + vni uint32 + handshakeGeneration uint32 + local key.DiscoPrivate + remote key.DiscoPublic + server key.DiscoPublic + uc *net.UDPConn } -func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, server key.DiscoPublic) *testClient { +func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, remote, server key.DiscoPublic) *testClient { rAddr := &net.UDPAddr{IP: serverEndpoint.Addr().AsSlice(), Port: int(serverEndpoint.Port())} uc, err := net.DialUDP("udp4", nil, rAddr) if err != nil { t.Fatal(err) } return &testClient{ - vni: vni, - local: local, - server: server, - uc: uc, + vni: vni, + handshakeGeneration: 1, + local: local, + remote: remote, + server: server, + uc: uc, } } @@ -137,13 +141,35 @@ func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { } func (c *testClient) handshake(t *testing.T) { - c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{}) + generation := c.handshakeGeneration + c.handshakeGeneration++ + common := disco.BindUDPRelayEndpointCommon{ + VNI: c.vni, + Generation: generation, + RemoteKey: c.remote, + } + c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpoint{ + BindUDPRelayEndpointCommon: common, + }) msg := c.readControlDiscoMsg(t) challenge, ok := msg.(*disco.BindUDPRelayEndpointChallenge) if !ok { - t.Fatal("unexepcted disco message type") + t.Fatal("unexpected disco message type") + } + if challenge.Generation != common.Generation { + t.Fatalf("rx'd challenge.Generation (%d) != %d", challenge.Generation, common.Generation) + } + if challenge.VNI != common.VNI { + t.Fatalf("rx'd challenge.VNI (%d) != %d", challenge.VNI, common.VNI) + } + if challenge.RemoteKey != common.RemoteKey { + t.Fatalf("rx'd challenge.RemoteKey (%v) != %v", challenge.RemoteKey, common.RemoteKey) } - c.writeControlDiscoMsg(t, &disco.BindUDPRelayEndpointAnswer{Answer: challenge.Challenge}) + answer := &disco.BindUDPRelayEndpointAnswer{ + BindUDPRelayEndpointCommon: common, + } + answer.Challenge = challenge.Challenge + c.writeControlDiscoMsg(t, answer) } func (c *testClient) close() { @@ -179,9 +205,9 @@ func TestServer(t *testing.T) { if len(endpoint.AddrPorts) != 1 { t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, endpoint.ServerDisco) + tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, endpoint.ServerDisco) + tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) defer tcB.close() tcA.handshake(t) @@ -209,4 +235,30 @@ func TestServer(t *testing.T) { if !bytes.Equal(txToA, rxFromB) { t.Fatal("unexpected msg B->A") } + + tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 + defer tcAOnNewPort.close() + + // Handshake client A on a new source IP:port, verify we receive packets on the new binding + tcAOnNewPort.handshake(t) + txToAOnNewPort := []byte{7, 8, 9} + tcB.writeDataPkt(t, txToAOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(txToAOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") + } + + tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 + defer tcBOnNewPort.close() + + // Handshake client B on a new source IP:port, verify we receive packets on the new binding + tcBOnNewPort.handshake(t) + txToBOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) + rxFromA = tcBOnNewPort.readDataPkt(t) + if !bytes.Equal(txToBOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") + } } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 7b378838a145c..6418a43641200 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -45,6 +45,7 @@ type relayManager struct { handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork + handshakeGeneration uint32 // =================================================================== // The following chan fields serve event inputs to a single goroutine, @@ -590,7 +591,12 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay go r.sendCallMeMaybeVia(work.ep, work.se) } - go r.handshakeServerEndpoint(work) + r.handshakeGeneration++ + if r.handshakeGeneration == 0 { // generation must be nonzero + r.handshakeGeneration++ + } + + go r.handshakeServerEndpoint(work, r.handshakeGeneration) } // sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be @@ -616,7 +622,7 @@ func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoi ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) } -func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { +func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generation uint32) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -627,8 +633,21 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { work.cancel() }() + epDisco := work.ep.disco.Load() + if epDisco == nil { + return + } + + common := disco.BindUDPRelayEndpointCommon{ + VNI: work.se.VNI, + Generation: generation, + RemoteKey: epDisco.key, + } + sentBindAny := false - bind := &disco.BindUDPRelayEndpoint{} + bind := &disco.BindUDPRelayEndpoint{ + BindUDPRelayEndpointCommon: common, + } vni := virtualNetworkID{} vni.set(work.se.VNI) for _, addrPort := range work.se.AddrPorts { @@ -661,10 +680,6 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { if len(sentPingAt) == limitPings { return } - epDisco := work.ep.disco.Load() - if epDisco == nil { - return - } txid := stun.NewTxID() sentPingAt[txid] = time.Now() ping := &disco.Ping{ @@ -673,13 +688,24 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { } go func() { if withAnswer != nil { - answer := &disco.BindUDPRelayEndpointAnswer{Answer: *withAnswer} + answer := &disco.BindUDPRelayEndpointAnswer{BindUDPRelayEndpointCommon: common} + answer.Challenge = *withAnswer work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) } work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) }() } + validateVNIAndRemoteKey := func(common disco.BindUDPRelayEndpointCommon) error { + if common.VNI != work.se.VNI { + return errors.New("mismatching VNI") + } + if common.RemoteKey.Compare(epDisco.key) != 0 { + return errors.New("mismatching RemoteKey") + } + return nil + } + // This for{select{}} is responsible for handshaking and tx'ing ping/pong // when the handshake is complete. for { @@ -689,6 +715,10 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { case msgEvent := <-work.rxDiscoMsgCh: switch msg := msgEvent.msg.(type) { case *disco.BindUDPRelayEndpointChallenge: + err := validateVNIAndRemoteKey(msg.BindUDPRelayEndpointCommon) + if err != nil { + continue + } if handshakeState >= disco.BindUDPRelayHandshakeStateAnswerSent { continue } From 4a7b8afabfe71cde16445b416e7c93274ff657b8 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 20 Jun 2025 13:21:31 +0200 Subject: [PATCH 0033/1093] cmd/tailscale: add tlpub: prefix to lock log output Updates tailscale/corp#23258 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/network-lock.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 871a931b54ba5..9ab2b11b0bf01 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -623,7 +623,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er printKey := func(key *tka.Key, prefix string) { fmt.Fprintf(&stanza, "%sType: %s\n", prefix, key.Kind.String()) if keyID, err := key.ID(); err == nil { - fmt.Fprintf(&stanza, "%sKeyID: %x\n", prefix, keyID) + fmt.Fprintf(&stanza, "%sKeyID: tlpub:%x\n", prefix, keyID) } else { // Older versions of the client shouldn't explode when they encounter an // unknown key type. @@ -645,10 +645,10 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er case tka.AUMAddKey.String(): printKey(aum.Key, "") case tka.AUMRemoveKey.String(): - fmt.Fprintf(&stanza, "KeyID: %x\n", aum.KeyID) + fmt.Fprintf(&stanza, "KeyID: tlpub:%x\n", aum.KeyID) case tka.AUMUpdateKey.String(): - fmt.Fprintf(&stanza, "KeyID: %x\n", aum.KeyID) + fmt.Fprintf(&stanza, "KeyID: tlpub:%x\n", aum.KeyID) if aum.Votes != nil { fmt.Fprintf(&stanza, "Votes: %d\n", aum.Votes) } From df786be14d40a9aadd82c5900bcf4d79b3e6af4f Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 27 Jun 2025 11:54:01 +0200 Subject: [PATCH 0034/1093] cmd/tailscale: use text format for TKA head Updates tailscale/corp#23258 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/network-lock.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 9ab2b11b0bf01..d19909576c090 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -639,7 +639,11 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er return "", fmt.Errorf("decoding: %w", err) } - fmt.Fprintf(&stanza, "%supdate %x (%s)%s\n", terminalYellow, update.Hash, update.Change, terminalClear) + tkaHead, err := aum.Hash().MarshalText() + if err != nil { + return "", fmt.Errorf("decoding AUM hash: %w", err) + } + fmt.Fprintf(&stanza, "%supdate %s (%s)%s\n", terminalYellow, string(tkaHead), update.Change, terminalClear) switch update.Change { case tka.AUMAddKey.String(): From 53f67c43961a322048bc2c858181b331d2f35695 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 27 Jun 2025 10:03:56 -0400 Subject: [PATCH 0035/1093] util/eventbus: fix docstrings (#16401) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #15160 Signed-off-by: Claus Lensbøl --- util/eventbus/bus.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 45d12da2f3736..e5bf7329a67ee 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -40,8 +40,8 @@ type Bus struct { clients set.Set[*Client] } -// New returns a new bus. Use [PublisherOf] to make event publishers, -// and [Bus.Queue] and [Subscribe] to make event subscribers. +// New returns a new bus. Use [Publish] to make event publishers, +// and [Subscribe] and [SubscribeFunc] to make event subscribers. func New() *Bus { ret := &Bus{ write: make(chan PublishedEvent), From f81baa2d56795267df835f770d0779d414aed283 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 27 Jun 2025 17:12:14 +0100 Subject: [PATCH 0036/1093] cmd/k8s-operator, k8s-operator: support Static Endpoints on ProxyGroups (#16115) updates: #14674 Signed-off-by: chaosinthecrd --- .../deploy/chart/templates/operator-rbac.yaml | 3 + .../crds/tailscale.com_proxyclasses.yaml | 45 + .../crds/tailscale.com_proxygroups.yaml | 5 + .../deploy/manifests/operator.yaml | 58 ++ cmd/k8s-operator/nodeport-service-ports.go | 203 +++++ .../nodeport-services-ports_test.go | 277 +++++++ cmd/k8s-operator/operator.go | 88 +- cmd/k8s-operator/proxyclass.go | 33 +- cmd/k8s-operator/proxyclass_test.go | 118 ++- cmd/k8s-operator/proxygroup.go | 389 ++++++++- cmd/k8s-operator/proxygroup_specs.go | 45 +- cmd/k8s-operator/proxygroup_test.go | 771 +++++++++++++++++- k8s-operator/api.md | 55 ++ .../apis/v1alpha1/types_proxyclass.go | 122 +++ .../apis/v1alpha1/types_proxygroup.go | 4 + .../apis/v1alpha1/zz_generated.deepcopy.go | 91 +++ 16 files changed, 2244 insertions(+), 63 deletions(-) create mode 100644 cmd/k8s-operator/nodeport-service-ports.go create mode 100644 cmd/k8s-operator/nodeport-services-ports_test.go diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 00d8318acdce4..5eb920a6f41c4 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -16,6 +16,9 @@ kind: ClusterRole metadata: name: tailscale-operator rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events", "services", "services/status"] verbs: ["create","delete","deletecollection","get","list","patch","update","watch"] diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 1541234755029..fcf1b27aaf318 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -2203,6 +2203,51 @@ spec: won't make it *more* imbalanced. It's a required field. type: string + staticEndpoints: + description: |- + Configuration for 'static endpoints' on proxies in order to facilitate + direct connections from other devices on the tailnet. + See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + type: object + required: + - nodePort + properties: + nodePort: + description: The configuration for static endpoints using NodePort Services. + type: object + required: + - ports + properties: + ports: + description: |- + The port ranges from which the operator will select NodePorts for the Services. + You must ensure that firewall rules allow UDP ingress traffic for these ports + to the node's external IPs. + The ports must be in the range of service node ports for the cluster (default `30000-32767`). + See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + type: array + minItems: 1 + items: + type: object + required: + - port + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be used. This field cannot be defined if the port field is not defined. + The endPort must be either unset, or equal or greater than port. + type: integer + port: + description: port represents a port selected to be used. This is a required field. + type: integer + selector: + description: |- + A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + by the ProxyGroup as Static Endpoints. + type: object + additionalProperties: + type: string tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 4b9149e23e55b..f695e989d7b85 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -196,6 +196,11 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + staticEndpoints: + description: StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + type: array + items: + type: string tailnetIPs: description: |- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 1d910cf92c6c6..fa18a5debeaa9 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2679,6 +2679,51 @@ spec: type: array type: object type: object + staticEndpoints: + description: |- + Configuration for 'static endpoints' on proxies in order to facilitate + direct connections from other devices on the tailnet. + See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + properties: + nodePort: + description: The configuration for static endpoints using NodePort Services. + properties: + ports: + description: |- + The port ranges from which the operator will select NodePorts for the Services. + You must ensure that firewall rules allow UDP ingress traffic for these ports + to the node's external IPs. + The ports must be in the range of service node ports for the cluster (default `30000-32767`). + See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + items: + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be used. This field cannot be defined if the port field is not defined. + The endPort must be either unset, or equal or greater than port. + type: integer + port: + description: port represents a port selected to be used. This is a required field. + type: integer + required: + - port + type: object + minItems: 1 + type: array + selector: + additionalProperties: + type: string + description: |- + A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + by the ProxyGroup as Static Endpoints. + type: object + required: + - ports + type: object + required: + - nodePort + type: object tailscale: description: |- TailscaleConfig contains options to configure the tailscale-specific @@ -2976,6 +3021,11 @@ spec: If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the node. type: string + staticEndpoints: + description: StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + items: + type: string + type: array tailnetIPs: description: |- TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) @@ -4791,6 +4841,14 @@ kind: ClusterRole metadata: name: tailscale-operator rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - "" resources: diff --git a/cmd/k8s-operator/nodeport-service-ports.go b/cmd/k8s-operator/nodeport-service-ports.go new file mode 100644 index 0000000000000..a9504e3e94f88 --- /dev/null +++ b/cmd/k8s-operator/nodeport-service-ports.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "context" + "fmt" + "math/rand/v2" + "regexp" + "sort" + "strconv" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + k8soperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" +) + +const ( + tailscaledPortMax = 65535 + tailscaledPortMin = 1024 + testSvcName = "test-node-port-range" + + invalidSvcNodePort = 777777 +) + +// getServicesNodePortRange is a hacky function that attempts to determine Service NodePort range by +// creating a deliberately invalid Service with a NodePort that is too large and parsing the returned +// validation error. Returns nil if unable to determine port range. +// https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport +func getServicesNodePortRange(ctx context.Context, c client.Client, tsNamespace string, logger *zap.SugaredLogger) *tsapi.PortRange { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: testSvcName, + Namespace: tsNamespace, + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + { + Name: testSvcName, + Port: 8080, + TargetPort: intstr.FromInt32(8080), + Protocol: corev1.ProtocolUDP, + NodePort: invalidSvcNodePort, + }, + }, + }, + } + + // NOTE(ChaosInTheCRD): ideally this would be a server side dry-run but could not get it working + err := c.Create(ctx, svc) + if err == nil { + return nil + } + + if validPorts := getServicesNodePortRangeFromErr(err.Error()); validPorts != "" { + pr, err := parseServicesNodePortRange(validPorts) + if err != nil { + logger.Debugf("failed to parse NodePort range set for Kubernetes Cluster: %w", err) + return nil + } + + return pr + } + + return nil +} + +func getServicesNodePortRangeFromErr(err string) string { + reg := regexp.MustCompile(`\d{1,5}-\d{1,5}`) + matches := reg.FindAllString(err, -1) + if len(matches) != 1 { + return "" + } + + return matches[0] +} + +// parseServicesNodePortRange converts the `ValidPorts` string field in the Kubernetes PortAllocator error and converts it to +// PortRange +func parseServicesNodePortRange(p string) (*tsapi.PortRange, error) { + parts := strings.Split(p, "-") + s, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse string as uint16: %w", err) + } + + var e uint64 + switch len(parts) { + case 1: + e = uint64(s) + case 2: + e, err = strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return nil, fmt.Errorf("failed to parse string as uint16: %w", err) + } + default: + return nil, fmt.Errorf("failed to parse port range %q", p) + } + + portRange := &tsapi.PortRange{Port: uint16(s), EndPort: uint16(e)} + if !portRange.IsValid() { + return nil, fmt.Errorf("port range %q is not valid", portRange.String()) + } + + return portRange, nil +} + +// validateNodePortRanges checks that the port range specified is valid. It also ensures that the specified ranges +// lie within the NodePort Service port range specified for the Kubernetes API Server. +func validateNodePortRanges(ctx context.Context, c client.Client, kubeRange *tsapi.PortRange, pc *tsapi.ProxyClass) error { + if pc.Spec.StaticEndpoints == nil { + return nil + } + + portRanges := pc.Spec.StaticEndpoints.NodePort.Ports + + if kubeRange != nil { + for _, pr := range portRanges { + if !kubeRange.Contains(pr.Port) || (pr.EndPort != 0 && !kubeRange.Contains(pr.EndPort)) { + return fmt.Errorf("range %q is not within Cluster configured range %q", pr.String(), kubeRange.String()) + } + } + } + + for _, r := range portRanges { + if !r.IsValid() { + return fmt.Errorf("port range %q is invalid", r.String()) + } + } + + // TODO(ChaosInTheCRD): if a ProxyClass that made another invalid (due to port range clash) is deleted, + // the invalid ProxyClass doesn't get reconciled on, and therefore will not go valid. We should fix this. + proxyClassRanges, err := getPortsForProxyClasses(ctx, c) + if err != nil { + return fmt.Errorf("failed to get port ranges for ProxyClasses: %w", err) + } + + for _, r := range portRanges { + for pcName, pcr := range proxyClassRanges { + if pcName == pc.Name { + continue + } + if pcr.ClashesWith(r) { + return fmt.Errorf("port ranges for ProxyClass %q clash with existing ProxyClass %q", pc.Name, pcName) + } + } + } + + if len(portRanges) == 1 { + return nil + } + + sort.Slice(portRanges, func(i, j int) bool { + return portRanges[i].Port < portRanges[j].Port + }) + + for i := 1; i < len(portRanges); i++ { + prev := portRanges[i-1] + curr := portRanges[i] + if curr.Port <= prev.Port || curr.Port <= prev.EndPort { + return fmt.Errorf("overlapping ranges: %q and %q", prev.String(), curr.String()) + } + } + + return nil +} + +// getPortsForProxyClasses gets the port ranges for all the other existing ProxyClasses +func getPortsForProxyClasses(ctx context.Context, c client.Client) (map[string]tsapi.PortRanges, error) { + pcs := new(tsapi.ProxyClassList) + + err := c.List(ctx, pcs) + if err != nil { + return nil, fmt.Errorf("failed to list ProxyClasses: %w", err) + } + + portRanges := make(map[string]tsapi.PortRanges) + for _, i := range pcs.Items { + if !k8soperator.ProxyClassIsReady(&i) { + continue + } + if se := i.Spec.StaticEndpoints; se != nil && se.NodePort != nil { + portRanges[i.Name] = se.NodePort.Ports + } + } + + return portRanges, nil +} + +func getRandomPort() uint16 { + return uint16(rand.IntN(tailscaledPortMax-tailscaledPortMin+1) + tailscaledPortMin) +} diff --git a/cmd/k8s-operator/nodeport-services-ports_test.go b/cmd/k8s-operator/nodeport-services-ports_test.go new file mode 100644 index 0000000000000..9418bb8446bd8 --- /dev/null +++ b/cmd/k8s-operator/nodeport-services-ports_test.go @@ -0,0 +1,277 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tstest" +) + +func TestGetServicesNodePortRangeFromErr(t *testing.T) { + tests := []struct { + name string + errStr string + want string + }{ + { + name: "valid_error_string", + errStr: "NodePort 777777 is not in the allowed range 30000-32767", + want: "30000-32767", + }, + { + name: "error_string_with_different_message", + errStr: "some other error without a port range", + want: "", + }, + { + name: "error_string_with_multiple_port_ranges", + errStr: "range 1000-2000 and another range 3000-4000", + want: "", + }, + { + name: "empty_error_string", + errStr: "", + want: "", + }, + { + name: "error_string_with_range_at_start", + errStr: "30000-32767 is the range", + want: "30000-32767", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := getServicesNodePortRangeFromErr(tt.errStr); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestParseServicesNodePortRange(t *testing.T) { + tests := []struct { + name string + p string + want *tsapi.PortRange + wantErr bool + }{ + { + name: "valid_range", + p: "30000-32767", + want: &tsapi.PortRange{Port: 30000, EndPort: 32767}, + wantErr: false, + }, + { + name: "single_port_range", + p: "30000", + want: &tsapi.PortRange{Port: 30000, EndPort: 30000}, + wantErr: false, + }, + { + name: "invalid_format_non_numeric_end", + p: "30000-abc", + want: nil, + wantErr: true, + }, + { + name: "invalid_format_non_numeric_start", + p: "abc-32767", + want: nil, + wantErr: true, + }, + { + name: "empty_string", + p: "", + want: nil, + wantErr: true, + }, + { + name: "too_many_parts", + p: "1-2-3", + want: nil, + wantErr: true, + }, + { + name: "port_too_large_start", + p: "65536-65537", + want: nil, + wantErr: true, + }, + { + name: "port_too_large_end", + p: "30000-65536", + want: nil, + wantErr: true, + }, + { + name: "inverted_range", + p: "32767-30000", + want: nil, + wantErr: true, // IsValid() will fail + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + portRange, err := parseServicesNodePortRange(tt.p) + if (err != nil) != tt.wantErr { + t.Errorf("error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr { + return + } + + if portRange == nil { + t.Fatalf("got nil port range, expected %v", tt.want) + } + + if portRange.Port != tt.want.Port || portRange.EndPort != tt.want.EndPort { + t.Errorf("got = %v, want %v", portRange, tt.want) + } + }) + } +} + +func TestValidateNodePortRanges(t *testing.T) { + tests := []struct { + name string + portRanges []tsapi.PortRange + wantErr bool + }{ + { + name: "valid_ranges_with_unknown_kube_range", + portRanges: []tsapi.PortRange{ + {Port: 30003, EndPort: 30005}, + {Port: 30006, EndPort: 30007}, + }, + wantErr: false, + }, + { + name: "overlapping_ranges", + portRanges: []tsapi.PortRange{ + {Port: 30000, EndPort: 30010}, + {Port: 30005, EndPort: 30015}, + }, + wantErr: true, + }, + { + name: "adjacent_ranges_no_overlap", + portRanges: []tsapi.PortRange{ + {Port: 30010, EndPort: 30020}, + {Port: 30021, EndPort: 30022}, + }, + wantErr: false, + }, + { + name: "identical_ranges_are_overlapping", + portRanges: []tsapi.PortRange{ + {Port: 30005, EndPort: 30010}, + {Port: 30005, EndPort: 30010}, + }, + wantErr: true, + }, + { + name: "range_clashes_with_existing_proxyclass", + portRanges: []tsapi.PortRange{ + {Port: 31005, EndPort: 32070}, + }, + wantErr: true, + }, + } + + // as part of this test, we want to create an adjacent ProxyClass in order to ensure that if it clashes with the one created in this test + // that we get an error + cl := tstest.NewClock(tstest.ClockOpts{}) + opc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + StaticEndpoints: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 31000}, {Port: 32000}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + + fc := fake.NewClientBuilder(). + WithObjects(opc). + WithStatusSubresource(opc). + WithScheme(tsapi.GlobalScheme). + Build() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + StaticEndpoints: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: tt.portRanges, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + err := validateNodePortRanges(context.Background(), fc, &tsapi.PortRange{Port: 30000, EndPort: 32767}, pc) + if (err != nil) != tt.wantErr { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestGetRandomPort(t *testing.T) { + for range 100 { + port := getRandomPort() + if port < tailscaledPortMin || port > tailscaledPortMax { + t.Errorf("generated port %d which is out of range [%d, %d]", port, tailscaledPortMin, tailscaledPortMax) + } + } +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index a08dd4da8c52f..cd1ae8158d01c 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -26,7 +26,9 @@ import ( networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + klabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" toolscache "k8s.io/client-go/tools/cache" @@ -39,6 +41,7 @@ import ( kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "tailscale.com/client/local" "tailscale.com/client/tailscale" @@ -228,6 +231,17 @@ waitOnline: return s, tsc } +// predicate function for filtering to ensure we *don't* reconcile on tailscale managed Kubernetes Services +func serviceManagedResourceFilterPredicate() predicate.Predicate { + return predicate.NewPredicateFuncs(func(object client.Object) bool { + if svc, ok := object.(*corev1.Service); !ok { + return false + } else { + return !isManagedResource(svc) + } + }) +} + // runReconcilers starts the controller-runtime manager and registers the // ServiceReconciler. It blocks forever. func runReconcilers(opts reconcilerOpts) { @@ -374,7 +388,7 @@ func runReconcilers(opts reconcilerOpts) { ingressSvcFromEpsFilter := handler.EnqueueRequestsFromMapFunc(ingressSvcFromEps(mgr.GetClient(), opts.log.Named("service-pg-reconciler"))) err = builder. ControllerManagedBy(mgr). - For(&corev1.Service{}). + For(&corev1.Service{}, builder.WithPredicates(serviceManagedResourceFilterPredicate())). Named("service-pg-reconciler"). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAServicesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). @@ -519,16 +533,19 @@ func runReconcilers(opts reconcilerOpts) { // ProxyClass reconciler gets triggered on ServiceMonitor CRD changes to ensure that any ProxyClasses, that // define that a ServiceMonitor should be created, were set to invalid because the CRD did not exist get // reconciled if the CRD is applied at a later point. + kPortRange := getServicesNodePortRange(context.Background(), mgr.GetClient(), opts.tailscaleNamespace, startlog) serviceMonitorFilter := handler.EnqueueRequestsFromMapFunc(proxyClassesWithServiceMonitor(mgr.GetClient(), opts.log)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyClass{}). Named("proxyclass-reconciler"). Watches(&apiextensionsv1.CustomResourceDefinition{}, serviceMonitorFilter). Complete(&ProxyClassReconciler{ - Client: mgr.GetClient(), - recorder: eventRecorder, - logger: opts.log.Named("proxyclass-reconciler"), - clock: tstime.DefaultClock{}, + Client: mgr.GetClient(), + nodePortRange: kPortRange, + recorder: eventRecorder, + tsNamespace: opts.tailscaleNamespace, + logger: opts.log.Named("proxyclass-reconciler"), + clock: tstime.DefaultClock{}, }) if err != nil { startlog.Fatal("could not create proxyclass reconciler: %v", err) @@ -587,9 +604,11 @@ func runReconcilers(opts reconcilerOpts) { // ProxyGroup reconciler. ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) + nodeFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(nodeHandlerForProxyGroup(mgr.GetClient(), opts.defaultProxyClass, startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). Named("proxygroup-reconciler"). + Watches(&corev1.Service{}, ownedByProxyGroupFilter). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). @@ -597,6 +616,7 @@ func runReconcilers(opts reconcilerOpts) { Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). Watches(&tsapi.ProxyClass{}, proxyClassFilterForProxyGroup). + Watches(&corev1.Node{}, nodeFilterForProxyGroup). Complete(&ProxyGroupReconciler{ recorder: eventRecorder, Client: mgr.GetClient(), @@ -840,6 +860,64 @@ func proxyClassHandlerForConnector(cl client.Client, logger *zap.SugaredLogger) } } +// nodeHandlerForProxyGroup returns a handler that, for a given Node, returns a +// list of reconcile requests for ProxyGroups that should be reconciled for the +// Node event. ProxyGroups need to be reconciled for Node events if they are +// configured to expose tailscaled static endpoints to tailnet using NodePort +// Services. +func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ProxyClass: %v", err) + return nil + } + + reqs := make([]reconcile.Request, 0) + for _, pg := range pgList.Items { + if pg.Spec.ProxyClass == "" && defaultProxyClass == "" { + continue + } + + pc := defaultProxyClass + if pc == "" { + pc = pg.Spec.ProxyClass + } + + proxyClass := &tsapi.ProxyClass{} + if err := cl.Get(ctx, types.NamespacedName{Name: pc}, proxyClass); err != nil { + logger.Debugf("error getting ProxyClass %q: %v", pg.Spec.ProxyClass, err) + return nil + } + + stat := proxyClass.Spec.StaticEndpoints + if stat == nil { + continue + } + + // If the selector is empty, all nodes match. + // TODO(ChaosInTheCRD): think about how this must be handled if we want to limit the number of nodes used + if len(stat.NodePort.Selector) == 0 { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + continue + } + + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: stat.NodePort.Selector, + }) + if err != nil { + logger.Debugf("error converting `spec.staticEndpoints.nodePort.selector` to Selector: %v", err) + return nil + } + + if selector.Matches(klabels.Set(o.GetLabels())) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + } + return reqs + } +} + // proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass, // returns a list of reconcile requests for all Connectors that have // .spec.proxyClass set. diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index 5ec9897d0a8b7..2d51b351d3907 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -44,22 +44,24 @@ const ( type ProxyClassReconciler struct { client.Client - recorder record.EventRecorder - logger *zap.SugaredLogger - clock tstime.Clock + recorder record.EventRecorder + logger *zap.SugaredLogger + clock tstime.Clock + tsNamespace string mu sync.Mutex // protects following // managedProxyClasses is a set of all ProxyClass resources that we're currently // managing. This is only used for metrics. managedProxyClasses set.Slice[types.UID] + // nodePortRange is the NodePort range set for the Kubernetes Cluster. This is used + // when validating port ranges configured by users for spec.StaticEndpoints + nodePortRange *tsapi.PortRange } -var ( - // gaugeProxyClassResources tracks the number of ProxyClass resources - // that we're currently managing. - gaugeProxyClassResources = clientmetric.NewGauge("k8s_proxyclass_resources") -) +// gaugeProxyClassResources tracks the number of ProxyClass resources +// that we're currently managing. +var gaugeProxyClassResources = clientmetric.NewGauge("k8s_proxyclass_resources") func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { logger := pcr.logger.With("ProxyClass", req.Name) @@ -96,7 +98,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re pcr.mu.Unlock() oldPCStatus := pc.Status.DeepCopy() - if errs := pcr.validate(ctx, pc); errs != nil { + if errs := pcr.validate(ctx, pc, logger); errs != nil { msg := fmt.Sprintf(messageProxyClassInvalid, errs.ToAggregate().Error()) pcr.recorder.Event(pc, corev1.EventTypeWarning, reasonProxyClassInvalid, msg) tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionFalse, reasonProxyClassInvalid, msg, pc.Generation, pcr.clock, logger) @@ -112,7 +114,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re return reconcile.Result{}, nil } -func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass) (violations field.ErrorList) { +func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) (violations field.ErrorList) { if sts := pc.Spec.StatefulSet; sts != nil { if len(sts.Labels) > 0 { if errs := metavalidation.ValidateLabels(sts.Labels.Parse(), field.NewPath(".spec.statefulSet.labels")); errs != nil { @@ -183,6 +185,17 @@ func (pcr *ProxyClassReconciler) validate(ctx context.Context, pc *tsapi.ProxyCl violations = append(violations, errs...) } } + + if stat := pc.Spec.StaticEndpoints; stat != nil { + if err := validateNodePortRanges(ctx, pcr.Client, pcr.nodePortRange, pc); err != nil { + var prs tsapi.PortRanges = stat.NodePort.Ports + violations = append(violations, field.TypeInvalid(field.NewPath("spec", "staticEndpoints", "nodePort", "ports"), prs.String(), err.Error())) + } + + if len(stat.NodePort.Selector) < 1 { + logger.Debug("no Selectors specified on `spec.staticEndpoints.nodePort.selectors` field") + } + } // We do not validate embedded fields (security context, resource // requirements etc) as we inherit upstream validation for those fields. // Invalid values would get rejected by upstream validations at apply diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index 48290eea782b5..ae0f63d99ea4d 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -131,9 +131,11 @@ func TestProxyClass(t *testing.T) { proxyClass.Spec.StatefulSet.Pod.TailscaleInitContainer.Image = pc.Spec.StatefulSet.Pod.TailscaleInitContainer.Image proxyClass.Spec.StatefulSet.Pod.TailscaleContainer.Env = []tsapi.Env{{Name: "TS_USERSPACE", Value: "true"}, {Name: "EXPERIMENTAL_TS_CONFIGFILE_PATH"}, {Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS"}} }) - expectedEvents := []string{"Warning CustomTSEnvVar ProxyClass overrides the default value for TS_USERSPACE env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", + expectedEvents := []string{ + "Warning CustomTSEnvVar ProxyClass overrides the default value for TS_USERSPACE env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_TS_CONFIGFILE_PATH env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", - "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future."} + "Warning CustomTSEnvVar ProxyClass overrides the default value for EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS env var for tailscale container. Running with custom values for Tailscale env vars is not recommended and might break in the future.", + } expectReconciled(t, pcr, "", "test") expectEvents(t, fr, expectedEvents) @@ -176,6 +178,110 @@ func TestProxyClass(t *testing.T) { expectEqual(t, fc, pc) } +func TestValidateProxyClassStaticEndpoints(t *testing.T) { + for name, tc := range map[string]struct { + staticEndpointConfig *tsapi.StaticEndpointsConfig + valid bool + }{ + "no_static_endpoints": { + staticEndpointConfig: nil, + valid: true, + }, + "valid_specific_ports": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: true, + }, + "valid_port_ranges": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3002}, + {Port: 3005, EndPort: 3007}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: true, + }, + "overlapping_port_ranges": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 1000, EndPort: 2000}, + {Port: 1500, EndPort: 1800}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "clashing_port_and_range": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3005}, + {Port: 3001, EndPort: 3010}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "malformed_port_range": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001, EndPort: 3000}, + }, + Selector: map[string]string{"kubernetes.io/hostname": "foobar"}, + }, + }, + valid: false, + }, + "empty_selector": { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{{Port: 3000}}, + Selector: map[string]string{}, + }, + }, + valid: true, + }, + } { + t.Run(name, func(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + Build() + zl, _ := zap.NewDevelopment() + pcr := &ProxyClassReconciler{ + logger: zl.Sugar(), + Client: fc, + } + + pc := &tsapi.ProxyClass{ + Spec: tsapi.ProxyClassSpec{ + StaticEndpoints: tc.staticEndpointConfig, + }, + } + + logger := pcr.logger.With("ProxyClass", pc) + err := pcr.validate(context.Background(), pc, logger) + valid := err == nil + if valid != tc.valid { + t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) + } + }) + } +} + func TestValidateProxyClass(t *testing.T) { for name, tc := range map[string]struct { pc *tsapi.ProxyClass @@ -219,8 +325,12 @@ func TestValidateProxyClass(t *testing.T) { }, } { t.Run(name, func(t *testing.T) { - pcr := &ProxyClassReconciler{} - err := pcr.validate(context.Background(), tc.pc) + zl, _ := zap.NewDevelopment() + pcr := &ProxyClassReconciler{ + logger: zl.Sugar(), + } + logger := pcr.logger.With("ProxyClass", tc.pc) + err := pcr.validate(context.Background(), tc.pc, logger) valid := err == nil if valid != tc.valid { t.Errorf("expected valid=%v, got valid=%v, err=%v", tc.valid, valid, err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 0d5eff551e8de..328262031b85c 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "net/http" + "net/netip" "slices" "strings" "sync" @@ -24,6 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -48,7 +50,8 @@ const ( reasonProxyGroupInvalid = "ProxyGroupInvalid" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c - optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" + staticEndpointsMaxAddrs = 2 ) var ( @@ -174,7 +177,8 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } } - if err = r.maybeProvision(ctx, pg, proxyClass); err != nil { + isProvisioned, err := r.maybeProvision(ctx, pg, proxyClass) + if err != nil { reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -185,9 +189,20 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } else { r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) } + return setStatusReady(pg, metav1.ConditionFalse, reason, msg) } + if !isProvisioned { + if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + return reconcile.Result{}, errors.Join(err, updateErr) + } + } + return + } + desiredReplicas := int(pgReplicas(pg)) if len(pg.Status.Devices) < desiredReplicas { message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) @@ -230,15 +245,42 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc } } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) error { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (isProvisioned bool, err error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureAddedToGaugeForProxyGroup(pg) r.mu.Unlock() - if err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass); err != nil { - return fmt.Errorf("error provisioning config Secrets: %w", err) + svcToNodePorts := make(map[string]uint16) + var tailscaledPort *uint16 + if proxyClass != nil && proxyClass.Spec.StaticEndpoints != nil { + svcToNodePorts, tailscaledPort, err = r.ensureNodePortServiceCreated(ctx, pg, proxyClass) + if err != nil { + wrappedErr := fmt.Errorf("error provisioning NodePort Services for static endpoints: %w", err) + var allocatePortErr *allocatePortsErr + if errors.As(err, &allocatePortErr) { + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) + r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) + return false, nil + } + return false, wrappedErr + } } + + staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass, svcToNodePorts) + if err != nil { + wrappedErr := fmt.Errorf("error provisioning config Secrets: %w", err) + var selectorErr *FindStaticEndpointErr + if errors.As(err, &selectorErr) { + reason := reasonProxyGroupCreationFailed + msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) + r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) + return false, nil + } + return false, wrappedErr + } + // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. stateSecrets := pgStateSecrets(pg, r.tsNamespace) for _, sec := range stateSecrets { @@ -247,7 +289,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning state Secrets: %w", err) + return false, fmt.Errorf("error provisioning state Secrets: %w", err) } } sa := pgServiceAccount(pg, r.tsNamespace) @@ -256,7 +298,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning ServiceAccount: %w", err) + return false, fmt.Errorf("error provisioning ServiceAccount: %w", err) } role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { @@ -265,7 +307,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return fmt.Errorf("error provisioning Role: %w", err) + return false, fmt.Errorf("error provisioning Role: %w", err) } roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { @@ -275,7 +317,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return fmt.Errorf("error provisioning RoleBinding: %w", err) + return false, fmt.Errorf("error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) @@ -284,7 +326,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) + return false, fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) } } if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { @@ -293,12 +335,12 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return false, fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, proxyClass) + ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return fmt.Errorf("error generating StatefulSet spec: %w", err) + return false, fmt.Errorf("error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), @@ -306,7 +348,6 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) updateSS := func(s *appsv1.StatefulSet) { - s.Spec = ss.Spec s.ObjectMeta.Labels = ss.ObjectMeta.Labels @@ -314,7 +355,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences } if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, updateSS); err != nil { - return fmt.Errorf("error provisioning StatefulSet: %w", err) + return false, fmt.Errorf("error provisioning StatefulSet: %w", err) } mo := &metricsOpts{ tsNamespace: r.tsNamespace, @@ -323,26 +364,150 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return fmt.Errorf("error reconciling metrics resources: %w", err) + return false, fmt.Errorf("error reconciling metrics resources: %w", err) } - if err := r.cleanupDanglingResources(ctx, pg); err != nil { - return fmt.Errorf("error cleaning up dangling resources: %w", err) + if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { + return false, fmt.Errorf("error cleaning up dangling resources: %w", err) } - devices, err := r.getDeviceInfo(ctx, pg) + devices, err := r.getDeviceInfo(ctx, staticEndpoints, pg) if err != nil { - return fmt.Errorf("failed to get device info: %w", err) + return false, fmt.Errorf("failed to get device info: %w", err) } pg.Status.Devices = devices - return nil + return true, nil +} + +// getServicePortsForProxyGroups returns a map of ProxyGroup Service names to their NodePorts, +// and a set of all allocated NodePorts for quick occupancy checking. +func getServicePortsForProxyGroups(ctx context.Context, c client.Client, namespace string, portRanges tsapi.PortRanges) (map[string]uint16, set.Set[uint16], error) { + svcs := new(corev1.ServiceList) + matchingLabels := client.MatchingLabels(map[string]string{ + LabelParentType: "proxygroup", + }) + + err := c.List(ctx, svcs, matchingLabels, client.InNamespace(namespace)) + if err != nil { + return nil, nil, fmt.Errorf("failed to list ProxyGroup Services: %w", err) + } + + svcToNodePorts := map[string]uint16{} + usedPorts := set.Set[uint16]{} + for _, svc := range svcs.Items { + if len(svc.Spec.Ports) == 1 && svc.Spec.Ports[0].NodePort != 0 { + p := uint16(svc.Spec.Ports[0].NodePort) + if portRanges.Contains(p) { + svcToNodePorts[svc.Name] = p + usedPorts.Add(p) + } + } + } + + return svcToNodePorts, usedPorts, nil +} + +type allocatePortsErr struct { + msg string +} + +func (e *allocatePortsErr) Error() string { + return e.msg +} + +func (r *ProxyGroupReconciler) allocatePorts(ctx context.Context, pg *tsapi.ProxyGroup, proxyClassName string, portRanges tsapi.PortRanges) (map[string]uint16, error) { + replicaCount := int(pgReplicas(pg)) + svcToNodePorts, usedPorts, err := getServicePortsForProxyGroups(ctx, r.Client, r.tsNamespace, portRanges) + if err != nil { + return nil, &allocatePortsErr{msg: fmt.Sprintf("failed to find ports for existing ProxyGroup NodePort Services: %s", err.Error())} + } + + replicasAllocated := 0 + for i := range pgReplicas(pg) { + if _, ok := svcToNodePorts[pgNodePortServiceName(pg.Name, i)]; !ok { + svcToNodePorts[pgNodePortServiceName(pg.Name, i)] = 0 + } else { + replicasAllocated++ + } + } + + for replica, port := range svcToNodePorts { + if port == 0 { + for p := range portRanges.All() { + if !usedPorts.Contains(p) { + svcToNodePorts[replica] = p + usedPorts.Add(p) + replicasAllocated++ + break + } + } + } + } + + if replicasAllocated < replicaCount { + return nil, &allocatePortsErr{msg: fmt.Sprintf("not enough available ports to allocate all replicas (needed %d, got %d). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass %q must have bigger range allocated", replicaCount, usedPorts.Len(), proxyClassName)} + } + + return svcToNodePorts, nil +} + +func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) (map[string]uint16, *uint16, error) { + // NOTE: (ChaosInTheCRD) we want the same TargetPort for every static endpoint NodePort Service for the ProxyGroup + tailscaledPort := getRandomPort() + svcs := []*corev1.Service{} + for i := range pgReplicas(pg) { + replicaName := pgNodePortServiceName(pg.Name, i) + + svc := &corev1.Service{} + err := r.Get(ctx, types.NamespacedName{Name: replicaName, Namespace: r.tsNamespace}, svc) + if err != nil && !apierrors.IsNotFound(err) { + return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", replicaName, err) + } + if apierrors.IsNotFound(err) { + svcs = append(svcs, pgNodePortService(pg, replicaName, r.tsNamespace)) + } else { + // NOTE: if we can we want to recover the random port used for tailscaled, + // as well as the NodePort previously used for that Service + if len(svc.Spec.Ports) == 1 { + if svc.Spec.Ports[0].Port != 0 { + tailscaledPort = uint16(svc.Spec.Ports[0].Port) + } + } + svcs = append(svcs, svc) + } + } + + svcToNodePorts, err := r.allocatePorts(ctx, pg, pc.Name, pc.Spec.StaticEndpoints.NodePort.Ports) + if err != nil { + return nil, nil, fmt.Errorf("failed to allocate NodePorts to ProxyGroup Services: %w", err) + } + + for _, svc := range svcs { + // NOTE: we know that every service is going to have 1 port here + svc.Spec.Ports[0].Port = int32(tailscaledPort) + svc.Spec.Ports[0].TargetPort = intstr.FromInt(int(tailscaledPort)) + svc.Spec.Ports[0].NodePort = int32(svcToNodePorts[svc.Name]) + + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, svc, func(s *corev1.Service) { + s.ObjectMeta.Labels = svc.ObjectMeta.Labels + s.ObjectMeta.Annotations = svc.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = svc.ObjectMeta.OwnerReferences + s.Spec.Selector = svc.Spec.Selector + s.Spec.Ports = svc.Spec.Ports + }) + if err != nil { + return nil, nil, fmt.Errorf("error creating/updating Kubernetes NodePort Service %q: %w", svc.Name, err) + } + } + + return svcToNodePorts, ptr.To(tailscaledPort), nil } // cleanupDanglingResources ensures we don't leak config secrets, state secrets, and // tailnet devices when the number of replicas specified is reduced. -func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup) error { +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { logger := r.logger(pg.Name) metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { @@ -371,6 +536,30 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) } } + // NOTE(ChaosInTheCRD): we shouldn't need to get the service first, checking for a not found error should be enough + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-nodeport", m.stateSecret.Name), + Namespace: m.stateSecret.Namespace, + }, + } + if err := r.Delete(ctx, svc); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting static endpoints Kubernetes Service %q: %w", svc.Name, err) + } + } + } + + // If the ProxyClass has its StaticEndpoints config removed, we want to remove all of the NodePort Services + if pc != nil && pc.Spec.StaticEndpoints == nil { + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentType: proxyTypeProxyGroup, + LabelParentName: pg.Name, + } + if err := r.DeleteAllOf(ctx, &corev1.Service{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Kubernetes Services for static endpoints: %w", err) + } } return nil @@ -396,7 +585,8 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy mo := &metricsOpts{ proxyLabels: pgLabels(pg.Name, nil), tsNamespace: r.tsNamespace, - proxyType: "proxygroup"} + proxyType: "proxygroup", + } if err := maybeCleanupMetricsResources(ctx, mo, r.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } @@ -424,8 +614,9 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc return nil } -func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (err error) { +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16) (endpoints map[string][]netip.AddrPort, err error) { logger := r.logger(pg.Name) + endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -441,7 +632,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { - return err + return nil, err } var authKey string @@ -453,19 +644,32 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } authKey, err = newAuthKey(ctx, r.tsClient, tags) if err != nil { - return err + return nil, err + } + } + + replicaName := pgNodePortServiceName(pg.Name, i) + if len(svcToNodePorts) > 0 { + port, ok := svcToNodePorts[replicaName] + if !ok { + return nil, fmt.Errorf("could not find configured NodePort for ProxyGroup replica %q", replicaName) + } + + endpoints[replicaName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) + if err != nil { + return nil, fmt.Errorf("could not find static endpoints for replica %q: %w", replicaName, err) } } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret) + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret, endpoints[replicaName]) if err != nil { - return fmt.Errorf("error creating tailscaled config: %w", err) + return nil, fmt.Errorf("error creating tailscaled config: %w", err) } for cap, cfg := range configs { cfgJSON, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("error marshalling tailscaled config: %w", err) + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) } mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } @@ -474,18 +678,111 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) if err := r.Update(ctx, cfgSecret); err != nil { - return err + return nil, err } } } else { logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { - return err + return nil, err } } } - return nil + return endpoints, nil +} + +type FindStaticEndpointErr struct { + msg string +} + +func (e *FindStaticEndpointErr) Error() string { + return e.msg +} + +// findStaticEndpoints returns up to two `netip.AddrPort` entries, derived from the ExternalIPs of Nodes that +// match the `proxyClass`'s selector within the StaticEndpoints configuration. The port is set to the replica's NodePort Service Port. +func (r *ProxyGroupReconciler) findStaticEndpoints(ctx context.Context, existingCfgSecret *corev1.Secret, proxyClass *tsapi.ProxyClass, port uint16, logger *zap.SugaredLogger) ([]netip.AddrPort, error) { + var currAddrs []netip.AddrPort + if existingCfgSecret != nil { + oldConfB := existingCfgSecret.Data[tsoperator.TailscaledConfigFileName(106)] + if len(oldConfB) > 0 { + var oldConf ipn.ConfigVAlpha + if err := json.Unmarshal(oldConfB, &oldConf); err == nil { + currAddrs = oldConf.StaticEndpoints + } else { + logger.Debugf("failed to unmarshal tailscaled config from secret %q: %v", existingCfgSecret.Name, err) + } + } else { + logger.Debugf("failed to get tailscaled config from secret %q: empty data", existingCfgSecret.Name) + } + } + + nodes := new(corev1.NodeList) + selectors := client.MatchingLabels(proxyClass.Spec.StaticEndpoints.NodePort.Selector) + + err := r.List(ctx, nodes, selectors) + if err != nil { + return nil, fmt.Errorf("failed to list nodes: %w", err) + } + + if len(nodes.Items) == 0 { + return nil, &FindStaticEndpointErr{msg: fmt.Sprintf("failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass %q", proxyClass.Name)} + } + + endpoints := []netip.AddrPort{} + + // NOTE(ChaosInTheCRD): Setting a hard limit of two static endpoints. + newAddrs := []netip.AddrPort{} + for _, n := range nodes.Items { + for _, a := range n.Status.Addresses { + if a.Type == corev1.NodeExternalIP { + addr := getStaticEndpointAddress(&a, port) + if addr == nil { + logger.Debugf("failed to parse %q address on node %q: %q", corev1.NodeExternalIP, n.Name, a.Address) + continue + } + + // we want to add the currently used IPs first before + // adding new ones. + if currAddrs != nil && slices.Contains(currAddrs, *addr) { + endpoints = append(endpoints, *addr) + } else { + newAddrs = append(newAddrs, *addr) + } + } + + if len(endpoints) == 2 { + break + } + } + } + + // if the 2 endpoints limit hasn't been reached, we + // can start adding newIPs. + if len(endpoints) < 2 { + for _, a := range newAddrs { + endpoints = append(endpoints, a) + if len(endpoints) == 2 { + break + } + } + } + + if len(endpoints) == 0 { + return nil, &FindStaticEndpointErr{msg: fmt.Sprintf("failed to find any `status.addresses` of type %q on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass %q", corev1.NodeExternalIP, proxyClass.Name)} + } + + return endpoints, nil +} + +func getStaticEndpointAddress(a *corev1.NodeAddress, port uint16) *netip.AddrPort { + addr, err := netip.ParseAddr(a.Address) + if err != nil { + return nil + } + + return ptr.To(netip.AddrPortFrom(addr, port)) } // ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup @@ -514,7 +811,7 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret, staticEndpoints []netip.AddrPort) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -531,6 +828,10 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 conf.AcceptRoutes = "true" } + if len(staticEndpoints) > 0 { + conf.StaticEndpoints = staticEndpoints + } + deviceAuthed := false for _, d := range pg.Status.Devices { if d.Hostname == *conf.Hostname { @@ -624,7 +925,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr return metadata, nil } -func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { +func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoints map[string][]netip.AddrPort, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { return nil, err @@ -638,10 +939,21 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, pg *tsapi.Prox if !ok { continue } - devices = append(devices, tsapi.TailnetDevice{ + + dev := tsapi.TailnetDevice{ Hostname: device.Hostname, TailnetIPs: device.TailnetIPs, - }) + } + + if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 { + eps := make([]string, 0, len(ep)) + for _, e := range ep { + eps = append(eps, e.String()) + } + dev.StaticEndpoints = eps + } + + devices = append(devices, dev) } return devices, nil @@ -655,3 +967,8 @@ type nodeMetadata struct { tsID tailcfg.StableNodeID dnsName string } + +func (pr *ProxyGroupReconciler) setStatusReady(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason string, msg string, logger *zap.SugaredLogger) { + pr.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, msg, pg.Generation, pr.clock, logger) +} diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 1d12c39e0241e..20e797f0c07cd 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -9,6 +9,7 @@ import ( "fmt" "slices" "strconv" + "strings" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -23,12 +24,43 @@ import ( "tailscale.com/types/ptr" ) -// deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. -const deletionGracePeriodSeconds int64 = 360 +const ( + // deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. + deletionGracePeriodSeconds int64 = 360 + staticEndpointPortName = "static-endpoint-port" +) + +func pgNodePortServiceName(proxyGroupName string, replica int32) string { + return fmt.Sprintf("%s-%d-nodeport", proxyGroupName, replica) +} + +func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Ports: []corev1.ServicePort{ + // NOTE(ChaosInTheCRD): we set the ports once we've iterated over every svc and found any old configuration we want to persist. + { + Name: staticEndpointPortName, + Protocol: corev1.ProtocolUDP, + }, + }, + Selector: map[string]string{ + appsv1.StatefulSetPodNameLabel: strings.TrimSuffix(name, "-nodeport"), + }, + }, + } +} // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. -func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { +func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -144,6 +176,13 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string }, } + if port != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PORT", + Value: strconv.Itoa(int(*port)), + }) + } + if tsFirewallMode != "" { envs = append(envs, corev1.EnvVar{ Name: "TS_DEBUG_FIREWALL_MODE", diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index c556ae94a0de4..8ffce2c0c68ac 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -9,6 +9,8 @@ import ( "context" "encoding/json" "fmt" + "net/netip" + "slices" "testing" "time" @@ -18,6 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" @@ -32,14 +35,772 @@ import ( "tailscale.com/types/ptr" ) -const testProxyImage = "tailscale/tailscale:test" +const ( + testProxyImage = "tailscale/tailscale:test" + initialCfgHash = "6632726be70cf224049580deb4d317bba065915b5fd415461d60ed621c91b196" +) + +var ( + defaultProxyClassAnnotations = map[string]string{ + "some-annotation": "from-the-proxy-class", + } + + defaultReplicas = ptr.To(int32(2)) + defaultStaticEndpointConfig = &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 30001}, {Port: 30002}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + } +) + +func TestProxyGroupWithStaticEndpoints(t *testing.T) { + type testNodeAddr struct { + ip string + addrType corev1.NodeAddressType + } -var defaultProxyClassAnnotations = map[string]string{ - "some-annotation": "from-the-proxy-class", + type testNode struct { + name string + addresses []testNodeAddr + labels map[string]string + } + + type reconcile struct { + staticEndpointConfig *tsapi.StaticEndpointsConfig + replicas *int32 + nodes []testNode + expectedIPs []netip.Addr + expectedEvents []string + expectedErr string + expectStatefulSet bool + } + + testCases := []struct { + name string + description string + reconciles []reconcile + }{ + { + // the reconciler should manage to create static endpoints when Nodes have IPv6 addresses. + name: "IPv6", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "2001:0db8::1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "2001:0db8::2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "2001:0db8::3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("2001:0db8::1"), netip.MustParseAddr("2001:0db8::2"), netip.MustParseAddr("2001:0db8::3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // declaring specific ports (with no `endPort`s) in the `spec.staticEndpoints.nodePort` should work. + name: "SpecificPorts", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("192.168.0.1"), netip.MustParseAddr("192.168.0.2"), netip.MustParseAddr("192.168.0.3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // if too narrow a range of `spec.staticEndpoints.nodePort.Ports` on the proxyClass should result in no StatefulSet being created. + name: "NotEnoughPorts", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // when supplying a variety of ranges that are not clashing, the reconciler should manage to create a StatefulSet. + name: "NonClashingRanges", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3002}, + {Port: 3003, EndPort: 3005}, + {Port: 3006}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(3)), + nodes: []testNode{ + {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + {name: "node3", addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.3")}, + expectedEvents: []string{}, + expectedErr: "", + expectStatefulSet: true, + }, + }, + }, + { + // when there isn't a node that matches the selector, the ProxyGroup enters a failed state as there are no valid Static Endpoints. + // while it does create an event on the resource, It does not return an error + name: "NoMatchingNodes", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3000, EndPort: 3005}, + }, + Selector: map[string]string{ + "zone": "us-west", + }, + }, + }, + replicas: defaultReplicas, + nodes: []testNode{ + {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"zone": "eu-central"}}, + {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, labels: map[string]string{"zone": "eu-central"}}, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // when all the nodes have only have addresses of type InternalIP populated in their status, the ProxyGroup enters a failed state as there are no valid Static Endpoints. + // while it does create an event on the resource, It does not return an error + name: "AllInternalIPAddresses", + reconciles: []reconcile{ + { + staticEndpointConfig: &tsapi.StaticEndpointsConfig{ + NodePort: &tsapi.NodePortConfig{ + Ports: []tsapi.PortRange{ + {Port: 3001}, + {Port: 3005}, + {Port: 3007}, + {Port: 3009}, + }, + Selector: map[string]string{ + "foo/bar": "baz", + }, + }, + }, + replicas: ptr.To(int32(4)), + nodes: []testNode{ + { + name: "foobar", + addresses: []testNodeAddr{{ip: "192.168.0.1", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbaz", + addresses: []testNodeAddr{{ip: "192.168.0.2", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "foobarbazz", + addresses: []testNodeAddr{{ip: "192.168.0.3", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, + expectedErr: "", + expectStatefulSet: false, + }, + }, + }, + { + // When the node's (and some of their addresses) change between reconciles, the reconciler should first pick addresses that + // have been used previously (provided that they are still populated on a node that matches the selector) + name: "NodeIPChangesAndPersists", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.10", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectStatefulSet: true, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + }, + }, + }, + { + // given a new node being created with a new IP, and a node previously used for Static Endpoints being removed, the Static Endpoints should be updated + // correctly + name: "NodeIPChangesWithNewNode", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.3", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.3")}, + expectStatefulSet: true, + }, + }, + }, + { + // when all the node IPs change, they should all update + name: "AllNodeIPsChange", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.100", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.200", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.100"), netip.MustParseAddr("10.0.0.200")}, + expectStatefulSet: true, + }, + }, + }, + { + // if there are less ExternalIPs after changes to the nodes between reconciles, the reconciler should complete without issues + name: "LessExternalIPsAfterChange", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + expectStatefulSet: true, + }, + }, + }, + { + // if node address parsing fails (given an invalid address), the reconciler should continue without failure and find other + // valid addresses + name: "NodeAddressParsingFails", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "invalid-ip", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "invalid-ip", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + }, + }, + { + // if the node's become unlabeled, the ProxyGroup should enter a ProxyGroupInvalid state, but the reconciler should not fail + name: "NodesBecomeUnlabeled", + reconciles: []reconcile{ + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node1", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + { + name: "node2", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{"foo/bar": "baz"}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectStatefulSet: true, + }, + { + staticEndpointConfig: defaultStaticEndpointConfig, + replicas: defaultReplicas, + nodes: []testNode{ + { + name: "node3", + addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{}, + }, + { + name: "node4", + addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, + labels: map[string]string{}, + }, + }, + expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectStatefulSet: true, + }, + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + tsClient := &fakeTSClient{} + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(10) + cl := tstest.NewClock(tstest.ClockOpts{}) + + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Annotations: defaultProxyClassAnnotations, + }, + }, + Status: tsapi.ProxyClassStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyClassReady), + Status: metav1.ConditionTrue, + Reason: reasonProxyClassValid, + Message: reasonProxyClassValid, + LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + }}, + }, + } + + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + ProxyClass: pc.Name, + }, + } + + fc := fake.NewClientBuilder(). + WithObjects(pc, pg). + WithStatusSubresource(pc, pg). + WithScheme(tsapi.GlobalScheme). + Build() + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + clock: cl, + } + + for i, r := range tt.reconciles { + createdNodes := []corev1.Node{} + t.Run(tt.name, func(t *testing.T) { + for _, n := range r.nodes { + no := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: n.name, + Labels: n.labels, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{}, + }, + } + for _, addr := range n.addresses { + no.Status.Addresses = append(no.Status.Addresses, corev1.NodeAddress{ + Type: addr.addrType, + Address: addr.ip, + }) + } + if err := fc.Create(context.Background(), no); err != nil { + t.Fatalf("failed to create node %q: %v", n.name, err) + } + createdNodes = append(createdNodes, *no) + t.Logf("created node %q with data", n.name) + } + + reconciler.l = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) + pg.Spec.Replicas = r.replicas + pc.Spec.StaticEndpoints = r.staticEndpointConfig + + createOrUpdate(context.Background(), fc, "", pg, func(o *tsapi.ProxyGroup) { + o.Spec.Replicas = pg.Spec.Replicas + }) + + createOrUpdate(context.Background(), fc, "", pc, func(o *tsapi.ProxyClass) { + o.Spec.StaticEndpoints = pc.Spec.StaticEndpoints + }) + + if r.expectedErr != "" { + expectError(t, reconciler, "", pg.Name) + } else { + expectReconciled(t, reconciler, "", pg.Name) + } + expectEvents(t, fr, r.expectedEvents) + + sts := &appsv1.StatefulSet{} + err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) + if r.expectStatefulSet { + if err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + for j := range 2 { + sec := &corev1.Secret{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { + t.Fatalf("failed to get state Secret for replica %d: %v", j, err) + } + + config := &ipn.ConfigVAlpha{} + foundConfig := false + for _, d := range sec.Data { + if err := json.Unmarshal(d, config); err == nil { + foundConfig = true + break + } + } + if !foundConfig { + t.Fatalf("could not unmarshal config from secret data for replica %d", j) + } + + if len(config.StaticEndpoints) > staticEndpointsMaxAddrs { + t.Fatalf("expected %d StaticEndpoints in config Secret, but got %d for replica %d. Found Static Endpoints: %v", staticEndpointsMaxAddrs, len(config.StaticEndpoints), j, config.StaticEndpoints) + } + + for _, e := range config.StaticEndpoints { + if !slices.Contains(r.expectedIPs, e.Addr()) { + t.Fatalf("found unexpected static endpoint IP %q for replica %d. Expected one of %v", e.Addr().String(), j, r.expectedIPs) + } + if c := r.staticEndpointConfig; c != nil && c.NodePort.Ports != nil { + var ports tsapi.PortRanges = c.NodePort.Ports + found := false + for port := range ports.All() { + if port == e.Port() { + found = true + break + } + } + + if !found { + t.Fatalf("found unexpected static endpoint port %d for replica %d. Expected one of %v .", e.Port(), j, ports.All()) + } + } else { + if e.Port() != 3001 && e.Port() != 3002 { + t.Fatalf("found unexpected static endpoint port %d for replica %d. Expected 3001 or 3002.", e.Port(), j) + } + } + } + } + + pgroup := &tsapi.ProxyGroup{} + err = fc.Get(context.Background(), client.ObjectKey{Name: pg.Name}, pgroup) + if err != nil { + t.Fatalf("failed to get ProxyGroup %q: %v", pg.Name, err) + } + + t.Logf("getting proxygroup after reconcile") + for _, d := range pgroup.Status.Devices { + t.Logf("found device %q", d.Hostname) + for _, e := range d.StaticEndpoints { + t.Logf("found static endpoint %q", e) + } + } + } else { + if err == nil { + t.Fatal("expected error when getting Statefulset") + } + } + }) + + // node cleanup between reconciles + // we created a new set of nodes for each + for _, n := range createdNodes { + err := fc.Delete(context.Background(), &n) + if err != nil && !apierrors.IsNotFound(err) { + t.Fatalf("failed to delete node: %v", err) + } + } + } + + t.Run("delete_and_cleanup", func(t *testing.T) { + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + defaultProxyClass: "default-pc", + + Client: fc, + tsClient: tsClient, + recorder: fr, + l: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), + clock: cl, + } + + if err := fc.Delete(context.Background(), pg); err != nil { + t.Fatalf("error deleting ProxyGroup: %v", err) + } + + expectReconciled(t, reconciler, "", pg.Name) + expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) + + if err := fc.Delete(context.Background(), pc); err != nil { + t.Fatalf("error deleting ProxyClass: %v", err) + } + expectMissing[tsapi.ProxyClass](t, fc, "", pc.Name) + }) + }) + } } func TestProxyGroup(t *testing.T) { - pc := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ Name: "default-pc", @@ -598,7 +1359,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox role := pgRole(pg, tsNamespace) roleBinding := pgRoleBinding(pg, tsNamespace) serviceAccount := pgServiceAccount(pg, tsNamespace) - statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", proxyClass) + statefulSet, err := pgStatefulSet(pg, tsNamespace, testProxyImage, "auto", nil, proxyClass) if err != nil { t.Fatal(err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 03bb8989b9782..aba5f9e2df4b2 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -425,6 +425,23 @@ _Appears in:_ | `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
Currently you must manually update your cluster DNS config to add
this address as a stub nameserver for ts.net for cluster workloads to be
able to resolve MagicDNS names associated with egress or Ingress
proxies.
The IP address will change if you delete and recreate the DNSConfig. | | | +#### NodePortConfig + + + + + + + +_Appears in:_ +- [StaticEndpointsConfig](#staticendpointsconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ports` _[PortRange](#portrange) array_ | The port ranges from which the operator will select NodePorts for the Services.
You must ensure that firewall rules allow UDP ingress traffic for these ports
to the node's external IPs.
The ports must be in the range of service node ports for the cluster (default `30000-32767`).
See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. | | MinItems: 1
| +| `selector` _object (keys:string, values:string)_ | A selector which will be used to select the node's that will have their `ExternalIP`'s advertised
by the ProxyGroup as Static Endpoints. | | | + + #### Pod @@ -451,6 +468,26 @@ _Appears in:_ | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
By default Tailscale Kubernetes operator does not apply any topology spread constraints.
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | +#### PortRange + + + + + + + +_Appears in:_ +- [NodePortConfig](#nodeportconfig) +- [PortRanges](#portranges) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `port` _integer_ | port represents a port selected to be used. This is a required field. | | | +| `endPort` _integer_ | endPort indicates that the range of ports from port to endPort if set, inclusive,
should be used. This field cannot be defined if the port field is not defined.
The endPort must be either unset, or equal or greater than port. | | | + + + + #### ProxyClass @@ -518,6 +555,7 @@ _Appears in:_ | `metrics` _[Metrics](#metrics)_ | Configuration for proxy metrics. Metrics are currently not supported
for egress proxies and for Ingress proxies that have been configured
with tailscale.com/experimental-forward-cluster-traffic-via-ingress
annotation. Note that the metrics are currently considered unstable
and will likely change in breaking ways in the future - we only
recommend that you use those for debugging purposes. | | | | `tailscale` _[TailscaleConfig](#tailscaleconfig)_ | TailscaleConfig contains options to configure the tailscale-specific
parameters of proxies. | | | | `useLetsEncryptStagingEnvironment` _boolean_ | Set UseLetsEncryptStagingEnvironment to true to issue TLS
certificates for any HTTPS endpoints exposed to the tailnet from
LetsEncrypt's staging environment.
https://letsencrypt.org/docs/staging-environment/
This setting only affects Tailscale Ingress resources.
By default Ingress TLS certificates are issued from LetsEncrypt's
production environment.
Changing this setting true -> false, will result in any
existing certs being re-issued from the production environment.
Changing this setting false (default) -> true, when certs have already
been provisioned from production environment will NOT result in certs
being re-issued from the staging environment before they need to be
renewed. | | | +| `staticEndpoints` _[StaticEndpointsConfig](#staticendpointsconfig)_ | Configuration for 'static endpoints' on proxies in order to facilitate
direct connections from other devices on the tailnet.
See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. | | | #### ProxyClassStatus @@ -935,6 +973,22 @@ _Appears in:_ | `pod` _[Pod](#pod)_ | Configuration for the proxy Pod. | | | +#### StaticEndpointsConfig + + + + + + + +_Appears in:_ +- [ProxyClassSpec](#proxyclassspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `nodePort` _[NodePortConfig](#nodeportconfig)_ | The configuration for static endpoints using NodePort Services. | | | + + #### Storage @@ -1015,6 +1069,7 @@ _Appears in:_ | --- | --- | --- | --- | | `hostname` _string_ | Hostname is the fully qualified domain name of the device.
If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
node. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
assigned to the device. | | | +| `staticEndpoints` _string array_ | StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. | | | #### TailscaleConfig diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 899abf096bb86..9221c60f3c870 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -6,6 +6,10 @@ package v1alpha1 import ( + "fmt" + "iter" + "strings" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -82,6 +86,124 @@ type ProxyClassSpec struct { // renewed. // +optional UseLetsEncryptStagingEnvironment bool `json:"useLetsEncryptStagingEnvironment,omitempty"` + // Configuration for 'static endpoints' on proxies in order to facilitate + // direct connections from other devices on the tailnet. + // See https://tailscale.com/kb/1445/kubernetes-operator-customization#static-endpoints. + // +optional + StaticEndpoints *StaticEndpointsConfig `json:"staticEndpoints,omitempty"` +} + +type StaticEndpointsConfig struct { + // The configuration for static endpoints using NodePort Services. + NodePort *NodePortConfig `json:"nodePort"` +} + +type NodePortConfig struct { + // The port ranges from which the operator will select NodePorts for the Services. + // You must ensure that firewall rules allow UDP ingress traffic for these ports + // to the node's external IPs. + // The ports must be in the range of service node ports for the cluster (default `30000-32767`). + // See https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport. + // +kubebuilder:validation:MinItems=1 + Ports []PortRange `json:"ports"` + // A selector which will be used to select the node's that will have their `ExternalIP`'s advertised + // by the ProxyGroup as Static Endpoints. + Selector map[string]string `json:"selector,omitempty"` +} + +// PortRanges is a list of PortRange(s) +type PortRanges []PortRange + +func (prs PortRanges) String() string { + var prStrings []string + + for _, pr := range prs { + prStrings = append(prStrings, pr.String()) + } + + return strings.Join(prStrings, ", ") +} + +// All allows us to iterate over all the ports in the PortRanges +func (prs PortRanges) All() iter.Seq[uint16] { + return func(yield func(uint16) bool) { + for _, pr := range prs { + end := pr.EndPort + if end == 0 { + end = pr.Port + } + + for port := pr.Port; port <= end; port++ { + if !yield(port) { + return + } + } + } + } +} + +// Contains reports whether port is in any of the PortRanges. +func (prs PortRanges) Contains(port uint16) bool { + for _, r := range prs { + if r.Contains(port) { + return true + } + } + + return false +} + +// ClashesWith reports whether the supplied PortRange clashes with any of the PortRanges. +func (prs PortRanges) ClashesWith(pr PortRange) bool { + for p := range prs.All() { + if pr.Contains(p) { + return true + } + } + + return false +} + +type PortRange struct { + // port represents a port selected to be used. This is a required field. + Port uint16 `json:"port"` + + // endPort indicates that the range of ports from port to endPort if set, inclusive, + // should be used. This field cannot be defined if the port field is not defined. + // The endPort must be either unset, or equal or greater than port. + // +optional + EndPort uint16 `json:"endPort,omitempty"` +} + +// Contains reports whether port is in pr. +func (pr PortRange) Contains(port uint16) bool { + switch pr.EndPort { + case 0: + return port == pr.Port + default: + return port >= pr.Port && port <= pr.EndPort + } +} + +// String returns the PortRange in a string form. +func (pr PortRange) String() string { + if pr.EndPort == 0 { + return fmt.Sprintf("%d", pr.Port) + } + + return fmt.Sprintf("%d-%d", pr.Port, pr.EndPort) +} + +// IsValid reports whether the port range is valid. +func (pr PortRange) IsValid() bool { + if pr.Port == 0 { + return false + } + if pr.EndPort == 0 { + return true + } + + return pr.Port <= pr.EndPort } type TailscaleConfig struct { diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index ac87cc6caf892..17b13064bb4fc 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -111,6 +111,10 @@ type TailnetDevice struct { // assigned to the device. // +optional TailnetIPs []string `json:"tailnetIPs,omitempty"` + + // StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. + // +optional + StaticEndpoints []string `json:"staticEndpoints,omitempty"` } // +kubebuilder:validation:Type=string diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index e091272075ce2..ffc04d3b9dde3 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -407,6 +407,33 @@ func (in *NameserverStatus) DeepCopy() *NameserverStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePortConfig) DeepCopyInto(out *NodePortConfig) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortRange, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePortConfig. +func (in *NodePortConfig) DeepCopy() *NodePortConfig { + if in == nil { + return nil + } + out := new(NodePortConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Pod) DeepCopyInto(out *Pod) { *out = *in @@ -482,6 +509,40 @@ func (in *Pod) DeepCopy() *Pod { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortRange) DeepCopyInto(out *PortRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRange. +func (in *PortRange) DeepCopy() *PortRange { + if in == nil { + return nil + } + out := new(PortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PortRanges) DeepCopyInto(out *PortRanges) { + { + in := &in + *out = make(PortRanges, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRanges. +func (in PortRanges) DeepCopy() PortRanges { + if in == nil { + return nil + } + out := new(PortRanges) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyClass) DeepCopyInto(out *ProxyClass) { *out = *in @@ -559,6 +620,11 @@ func (in *ProxyClassSpec) DeepCopyInto(out *ProxyClassSpec) { *out = new(TailscaleConfig) **out = **in } + if in.StaticEndpoints != nil { + in, out := &in.StaticEndpoints, &out.StaticEndpoints + *out = new(StaticEndpointsConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyClassSpec. @@ -1096,6 +1162,26 @@ func (in *StatefulSet) DeepCopy() *StatefulSet { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StaticEndpointsConfig) DeepCopyInto(out *StaticEndpointsConfig) { + *out = *in + if in.NodePort != nil { + in, out := &in.NodePort, &out.NodePort + *out = new(NodePortConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StaticEndpointsConfig. +func (in *StaticEndpointsConfig) DeepCopy() *StaticEndpointsConfig { + if in == nil { + return nil + } + out := new(StaticEndpointsConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Storage) DeepCopyInto(out *Storage) { *out = *in @@ -1163,6 +1249,11 @@ func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.StaticEndpoints != nil { + in, out := &in.StaticEndpoints, &out.StaticEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetDevice. From 711698f5a985a5c93649b31c9f49ed6d22a91c42 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 27 Jun 2025 18:10:04 +0100 Subject: [PATCH 0037/1093] cmd/{containerboot,k8s-operator}: use state Secret for checking device auth (#16328) Previously, the operator checked the ProxyGroup status fields for information on how many of the proxies had successfully authed. Use their state Secrets instead as a more reliable source of truth. containerboot has written device_fqdn and device_ips keys to the state Secret since inception, and pod_uid since 1.78.0, so there's no need to use the API for that data. Read it from the state Secret for consistency. However, to ensure we don't read data from a previous run of containerboot, make sure we reset containerboot's state keys on startup. One other knock-on effect of that is ProxyGroups can briefly be marked not Ready while a Pod is restarting. Introduce a new ProxyGroupAvailable condition to more accurately reflect when downstream controllers can implement flows that rely on a ProxyGroup having at least 1 proxy Pod running. Fixes #16327 Change-Id: I026c18e9d23e87109a471a87b8e4fb6271716a66 Signed-off-by: Tom Proctor --- cmd/containerboot/kube.go | 42 +++-- cmd/containerboot/kube_test.go | 80 +++++++++ cmd/containerboot/main.go | 19 +- cmd/containerboot/main_test.go | 132 +++++++------- cmd/k8s-operator/egress-services-readiness.go | 2 +- .../egress-services-readiness_test.go | 2 +- cmd/k8s-operator/egress-services.go | 2 +- cmd/k8s-operator/ingress-for-pg.go | 6 +- cmd/k8s-operator/ingress-for-pg_test.go | 10 +- cmd/k8s-operator/proxygroup.go | 167 +++++++++++------- cmd/k8s-operator/proxygroup_specs.go | 6 +- cmd/k8s-operator/proxygroup_test.go | 18 +- cmd/k8s-operator/sts.go | 36 ++-- cmd/k8s-operator/svc-for-pg.go | 2 +- cmd/k8s-operator/svc-for-pg_test.go | 4 +- cmd/k8s-operator/tsrecorder.go | 9 +- k8s-operator/apis/v1alpha1/types_connector.go | 11 +- k8s-operator/conditions.go | 10 +- kube/kubeclient/fake_client.go | 17 +- 19 files changed, 373 insertions(+), 202 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 0a2dfa1bf342f..d4a974e6f3a24 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -18,12 +18,15 @@ import ( "time" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/set" ) // kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use @@ -117,20 +120,39 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { return nil } -// storeCapVerUID stores the current capability version of tailscale and, if provided, UID of the Pod in the tailscale -// state Secret. -// These two fields are used by the Kubernetes Operator to observe the current capability version of tailscaled running in this container. -func (kc *kubeClient) storeCapVerUID(ctx context.Context, podUID string) error { - capVerS := fmt.Sprintf("%d", tailcfg.CurrentCapabilityVersion) - d := map[string][]byte{ - kubetypes.KeyCapVer: []byte(capVerS), +// resetContainerbootState resets state from previous runs of containerboot to +// ensure the operator doesn't use stale state when a Pod is first recreated. +func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error { + existingSecret, err := kc.GetSecret(ctx, kc.stateSecret) + if err != nil { + return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err) + } + + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion), + }, } if podUID != "" { - d[kubetypes.KeyPodUID] = []byte(podUID) + s.Data[kubetypes.KeyPodUID] = []byte(podUID) } - s := &kubeapi.Secret{ - Data: d, + + toClear := set.SetOf([]string{ + kubetypes.KeyDeviceID, + kubetypes.KeyDeviceFQDN, + kubetypes.KeyDeviceIPs, + kubetypes.KeyHTTPSEndpoint, + egressservices.KeyEgressServices, + ingressservices.IngressConfigKey, + }) + for key := range existingSecret.Data { + if toClear.Contains(key) { + // It's fine to leave the key in place as a debugging breadcrumb, + // it should get a new value soon. + s.Data[key] = nil + } } + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") } diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index 413971bc6df23..c33714ed12ace 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -8,13 +8,18 @@ package main import ( "context" "errors" + "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "tailscale.com/ipn" + "tailscale.com/kube/egressservices" + "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" ) func TestSetupKube(t *testing.T) { @@ -238,3 +243,78 @@ func TestWaitForConsistentState(t *testing.T) { t.Fatalf("expected nil, got %v", err) } } + +func TestResetContainerbootState(t *testing.T) { + capver := fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) + for name, tc := range map[string]struct { + podUID string + initial map[string][]byte + expected map[string][]byte + }{ + "empty_initial": { + podUID: "1234", + initial: map[string][]byte{}, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: []byte("1234"), + }, + }, + "empty_initial_no_pod_uid": { + initial: map[string][]byte{}, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + }, + }, + "only_relevant_keys_updated": { + podUID: "1234", + initial: map[string][]byte{ + kubetypes.KeyCapVer: []byte("1"), + kubetypes.KeyPodUID: []byte("5678"), + kubetypes.KeyDeviceID: []byte("device-id"), + kubetypes.KeyDeviceFQDN: []byte("device-fqdn"), + kubetypes.KeyDeviceIPs: []byte(`["192.0.2.1"]`), + kubetypes.KeyHTTPSEndpoint: []byte("https://example.com"), + egressservices.KeyEgressServices: []byte("egress-services"), + ingressservices.IngressConfigKey: []byte("ingress-config"), + "_current-profile": []byte("current-profile"), + "_machinekey": []byte("machine-key"), + "_profiles": []byte("profiles"), + "_serve_e0ce": []byte("serve-e0ce"), + "profile-e0ce": []byte("profile-e0ce"), + }, + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: []byte("1234"), + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + // Tailscaled keys not included in patch. + }, + }, + } { + t.Run(name, func(t *testing.T) { + var actual map[string][]byte + kc := &kubeClient{stateSecret: "foo", Client: &kubeclient.FakeClient{ + GetSecretImpl: func(context.Context, string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{ + Data: tc.initial, + }, nil + }, + StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error { + actual = secret.Data + return nil + }, + }} + if err := kc.resetContainerbootState(context.Background(), tc.podUID); err != nil { + t.Fatalf("resetContainerbootState() error = %v", err) + } + if diff := cmp.Diff(tc.expected, actual); diff != "" { + t.Errorf("resetContainerbootState() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 9543308975b79..52b30b8375a4c 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -188,6 +188,14 @@ func run() error { if err := cfg.setupKube(bootCtx, kc); err != nil { return fmt.Errorf("error setting up for running on Kubernetes: %w", err) } + // Clear out any state from previous runs of containerboot. Check + // hasKubeStateStore because although we know we're in kube, that + // doesn't guarantee the state store is properly configured. + if hasKubeStateStore(cfg) { + if err := kc.resetContainerbootState(bootCtx, cfg.PodUID); err != nil { + return fmt.Errorf("error clearing previous state from Secret: %w", err) + } + } } client, daemonProcess, err := startTailscaled(bootCtx, cfg) @@ -367,11 +375,6 @@ authLoop: if err := client.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { return fmt.Errorf("failed to unset serve config: %w", err) } - if hasKubeStateStore(cfg) { - if err := kc.storeHTTPSEndpoint(ctx, ""); err != nil { - return fmt.Errorf("failed to update HTTPS endpoint in tailscale state: %w", err) - } - } } if hasKubeStateStore(cfg) && isTwoStepConfigAuthOnce(cfg) { @@ -384,12 +387,6 @@ authLoop: } } - if hasKubeStateStore(cfg) { - if err := kc.storeCapVerUID(ctx, cfg.PodUID); err != nil { - return fmt.Errorf("storing capability version and UID: %w", err) - } - } - w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState) if err != nil { return fmt.Errorf("rewatching tailscaled for updates after auth: %w", err) diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index c7293c77a4afa..96feef682af5b 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -460,6 +460,7 @@ func TestContainerBoot(t *testing.T) { Env: map[string]string{ "KUBERNETES_SERVICE_HOST": env.kube.Host, "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + "POD_UID": "some-pod-uid", }, KubeSecret: map[string]string{ "authkey": "tskey-key", @@ -471,17 +472,20 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: "some-pod-uid", }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, + kubetypes.KeyPodUID: "some-pod-uid", }, }, }, @@ -554,7 +558,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -565,7 +570,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -574,10 +580,10 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", }, WantKubeSecret: map[string]string{ - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, }, @@ -599,17 +605,18 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, { @@ -624,11 +631,11 @@ func TestContainerBoot(t *testing.T) { }, }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "new-name.test.ts.net", - "device_id": "newID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "new-name.test.ts.net", + "device_id": "newID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, }, }, @@ -912,18 +919,19 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { Notify: runningNotify, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "https_endpoint": "no-https", - "tailscale_capver": capver, + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + "https_endpoint": "no-https", + kubetypes.KeyCapVer: capver, }, }, }, @@ -947,7 +955,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, EndpointStatuses: map[string]int{ egressSvcTerminateURL(env.localAddrPort): 200, @@ -956,12 +965,12 @@ func TestContainerBoot(t *testing.T) { { Notify: runningNotify, WantKubeSecret: map[string]string{ - "egress-services": mustBase64(t, egressStatus), - "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", - "device_id": "myID", - "device_ips": `["100.64.0.1"]`, - "tailscale_capver": capver, + "egress-services": string(mustJSON(t, egressStatus)), + "authkey": "tskey-key", + "device_fqdn": "test-node.test.ts.net", + "device_id": "myID", + "device_ips": `["100.64.0.1"]`, + kubetypes.KeyCapVer: capver, }, EndpointStatuses: map[string]int{ egressSvcTerminateURL(env.localAddrPort): 200, @@ -1002,7 +1011,8 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", + "authkey": "tskey-key", + kubetypes.KeyCapVer: capver, }, }, { @@ -1016,10 +1026,11 @@ func TestContainerBoot(t *testing.T) { // Missing "_current-profile" key. }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + kubetypes.KeyCapVer: capver, }, WantLog: "Waiting for tailscaled to finish writing state to Secret \"tailscale\"", }, @@ -1029,11 +1040,12 @@ func TestContainerBoot(t *testing.T) { "_current-profile": "foo", }, WantKubeSecret: map[string]string{ - "authkey": "tskey-key", - "_machinekey": "foo", - "_profiles": "foo", - "profile-baff": "foo", - "_current-profile": "foo", + "authkey": "tskey-key", + "_machinekey": "foo", + "_profiles": "foo", + "profile-baff": "foo", + "_current-profile": "foo", + kubetypes.KeyCapVer: capver, }, WantLog: "HTTP server at [::]:9002 closed", WantExitCode: ptr.To(0), @@ -1061,7 +1073,7 @@ func TestContainerBoot(t *testing.T) { fmt.Sprintf("TS_TEST_SOCKET=%s", env.lapi.Path), fmt.Sprintf("TS_SOCKET=%s", env.runningSockPath), fmt.Sprintf("TS_TEST_ONLY_ROOT=%s", env.d), - fmt.Sprint("TS_TEST_FAKE_NETFILTER=true"), + "TS_TEST_FAKE_NETFILTER=true", } for k, v := range tc.Env { cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) @@ -1489,10 +1501,7 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { } switch r.Header.Get("Content-Type") { case "application/json-patch+json": - req := []struct { - Op string `json:"op"` - Path string `json:"path"` - }{} + req := []kubeclient.JSONPatch{} if err := json.Unmarshal(bs, &req); err != nil { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } @@ -1503,23 +1512,20 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } delete(k.secret, strings.TrimPrefix(op.Path, "/data/")) - case "replace": + case "add", "replace": path, ok := strings.CutPrefix(op.Path, "/data/") if !ok { panic(fmt.Sprintf("unsupported json-patch path %q", op.Path)) } - req := make([]kubeclient.JSONPatch, 0) - if err := json.Unmarshal(bs, &req); err != nil { - panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) + val, ok := op.Value.(string) + if !ok { + panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", op.Value)) } - - for _, patch := range req { - val, ok := patch.Value.(string) - if !ok { - panic(fmt.Sprintf("unsupported json patch value %v: cannot be converted to string", patch.Value)) - } - k.secret[path] = val + v, err := base64.StdEncoding.DecodeString(val) + if err != nil { + panic(fmt.Sprintf("json patch value %q is not base64 encoded: %v", val, err)) } + k.secret[path] = string(v) default: panic(fmt.Sprintf("unsupported json-patch op %q", op.Op)) } diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index 5e95a52790395..ecf99b63cda44 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -102,7 +102,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re msg = err.Error() return res, err } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { l.Infof("ProxyGroup for Service is not ready, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index ce947329ddfb8..f80759aef927b 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -137,7 +137,7 @@ func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replic } func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) } func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 7103205ac2c3f..ca6562071eba7 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -531,7 +531,7 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index ea31dbd63befb..09417fd0c8878 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -182,7 +182,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { logger.Infof("ProxyGroup is not (yet) ready") return false, nil } @@ -666,7 +666,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki } // Validate TLS configuration - if ing.Spec.TLS != nil && len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { + if len(ing.Spec.TLS) > 0 && (len(ing.Spec.TLS) > 1 || len(ing.Spec.TLS[0].Hosts) > 1) { errs = append(errs, fmt.Errorf("Ingress contains invalid TLS block %v: only a single TLS entry with a single host is allowed", ing.Spec.TLS)) } @@ -683,7 +683,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki } // Validate ProxyGroup readiness - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) } diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 05f4827927923..2308514f3af9c 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -305,7 +305,7 @@ func TestValidateIngress(t *testing.T) { Status: tsapi.ProxyGroupStatus{ Conditions: []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, @@ -399,7 +399,7 @@ func TestValidateIngress(t *testing.T) { Status: tsapi.ProxyGroupStatus{ Conditions: []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionFalse, ObservedGeneration: 1, }, @@ -755,7 +755,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec Labels: pgSecretLabels(pgName, "config"), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), }, }) } @@ -794,13 +794,13 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { Labels: pgSecretLabels(pgName, "config"), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte("{}"), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte("{}"), }, } mustCreate(t, fc, pgCfgSecret) pg.Status.Conditions = []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 328262031b85c..bedf06ba0ac28 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -52,6 +52,17 @@ const ( // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" staticEndpointsMaxAddrs = 2 + + // The minimum tailcfg.CapabilityVersion that deployed clients are expected + // to support to be compatible with the current ProxyGroup controller. + // If the controller needs to depend on newer client behaviour, it should + // maintain backwards compatible logic for older capability versions for 3 + // stable releases, as per documentation on supported version drift: + // https://tailscale.com/kb/1236/kubernetes-operator#supported-versions + // + // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was + // first introduced. + pgMinCapabilityVersion = 106 ) var ( @@ -204,14 +215,27 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } desiredReplicas := int(pgReplicas(pg)) + + // Set ProxyGroupAvailable condition. + status := metav1.ConditionFalse + reason := reasonProxyGroupCreating + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) + if len(pg.Status.Devices) > 0 { + status = metav1.ConditionTrue + if len(pg.Status.Devices) == desiredReplicas { + reason = reasonProxyGroupReady + } + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, pg.Generation, r.clock, logger) + + // Set ProxyGroupReady condition. if len(pg.Status.Devices) < desiredReplicas { - message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) logger.Debug(message) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) } if len(pg.Status.Devices) > desiredReplicas { - message := fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) + message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) logger.Debug(message) return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) } @@ -524,17 +548,13 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { return err } - if err := r.Delete(ctx, m.stateSecret); err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting state Secret %s: %w", m.stateSecret.Name, err) - } + if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting state Secret %q: %w", m.stateSecret.Name, err) } configSecret := m.stateSecret.DeepCopy() configSecret.Name += "-config" - if err := r.Delete(ctx, configSecret); err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("error deleting config Secret %s: %w", configSecret.Name, err) - } + if err := r.Delete(ctx, configSecret); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("error deleting config Secret %q: %w", configSecret.Name, err) } // NOTE(ChaosInTheCRD): we shouldn't need to get the service first, checking for a not found error should be enough svc := &corev1.Service{ @@ -635,17 +655,38 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - var authKey string + var authKey *string if existingCfgSecret == nil { logger.Debugf("Creating authkey for new ProxyGroup proxy") tags := pg.Spec.Tags.Stringify() if len(tags) == 0 { tags = r.defaultTags } - authKey, err = newAuthKey(ctx, r.tsClient, tags) + key, err := newAuthKey(ctx, r.tsClient, tags) if err != nil { return nil, err } + authKey = &key + } + + if authKey == nil { + // Get state Secret to check if it's already authed. + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, i), + Namespace: r.tsNamespace, + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + + if shouldRetainAuthKey(stateSecret) && existingCfgSecret != nil { + authKey, err = authKeyFromSecret(existingCfgSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from existing config Secret: %w", err) + } + } } replicaName := pgNodePortServiceName(pg.Name, i) @@ -661,7 +702,14 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, existingCfgSecret, endpoints[replicaName]) + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it if already set. + existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) + if err != nil { + return nil, err + } + + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -811,20 +859,22 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32, authKey string, oldSecret *corev1.Secret, staticEndpoints []netip.AddrPort) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ - Version: "alpha0", - AcceptDNS: "false", - AcceptRoutes: "false", // AcceptRoutes defaults to true - Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", // AcceptRoutes defaults to true + Locked: "false", + Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + AdvertiseServices: oldAdvertiseServices, + AuthKey: authKey, } if pg.Spec.HostnamePrefix != "" { conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) } - if shouldAcceptRoutes(class) { + if shouldAcceptRoutes(pc) { conf.AcceptRoutes = "true" } @@ -832,51 +882,26 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 conf.StaticEndpoints = staticEndpoints } - deviceAuthed := false - for _, d := range pg.Status.Devices { - if d.Hostname == *conf.Hostname { - deviceAuthed = true - break - } - } - - if authKey != "" { - conf.AuthKey = &authKey - } else if !deviceAuthed { - key, err := authKeyFromSecret(oldSecret) - if err != nil { - return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) - } - conf.AuthKey = key - } - capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) - - // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we - // don't overwrite it here. - if err := copyAdvertiseServicesConfig(conf, oldSecret, 106); err != nil { - return nil, err - } - capVerConfigs[106] = *conf - return capVerConfigs, nil + return map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha{ + pgMinCapabilityVersion: *conf, + }, nil } -func copyAdvertiseServicesConfig(conf *ipn.ConfigVAlpha, oldSecret *corev1.Secret, capVer tailcfg.CapabilityVersion) error { - if oldSecret == nil { - return nil +func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) { + if cfgSecret == nil { + return nil, nil } - oldConfB := oldSecret.Data[tsoperator.TailscaledConfigFileName(capVer)] - if len(oldConfB) == 0 { - return nil + conf, err := latestConfigFromSecret(cfgSecret) + if err != nil { + return nil, err } - var oldConf ipn.ConfigVAlpha - if err := json.Unmarshal(oldConfB, &oldConf); err != nil { - return fmt.Errorf("error unmarshalling existing config: %w", err) + if conf == nil { + return nil, nil } - conf.AdvertiseServices = oldConf.AdvertiseServices - return nil + return conf.AdvertiseServices, nil } func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { @@ -914,7 +939,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr dnsName: prefs.Config.UserProfile.LoginName, } pod := &corev1.Pod{} - if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) { + if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { return nil, err } else if err == nil { nm.podUID = string(pod.UID) @@ -932,17 +957,23 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint } for _, m := range metadata { - device, ok, err := getDeviceInfo(ctx, r.tsClient, m.stateSecret) - if err != nil { - return nil, err - } - if !ok { + if !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { + // Current Pod has not yet written its UID to the state Secret, data may + // be stale. continue } - dev := tsapi.TailnetDevice{ - Hostname: device.Hostname, - TailnetIPs: device.TailnetIPs, + device := tsapi.TailnetDevice{} + if ipsB := m.stateSecret.Data[kubetypes.KeyDeviceIPs]; len(ipsB) > 0 { + ips := []string{} + if err := json.Unmarshal(ipsB, &ips); err != nil { + return nil, fmt.Errorf("failed to extract device IPs from state Secret %q: %w", m.stateSecret.Name, err) + } + device.TailnetIPs = ips + } + + if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { + device.Hostname = hostname } if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 { @@ -950,10 +981,10 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint for _, e := range ep { eps = append(eps, e.String()) } - dev.StaticEndpoints = eps + device.StaticEndpoints = eps } - devices = append(devices, dev) + devices = append(devices, device) } return devices, nil diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 20e797f0c07cd..50d9c2d5fd8f9 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -351,7 +351,7 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S for i := range pgReplicas(pg) { secrets = append(secrets, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d", pg.Name, i), + Name: pgStateSecretName(pg.Name, i), Namespace: namespace, Labels: pgSecretLabels(pg.Name, "state"), OwnerReferences: pgOwnerReference(pg), @@ -422,6 +422,10 @@ func pgConfigSecretName(pgName string, i int32) string { return fmt.Sprintf("%s-%d-config", pgName, i) } +func pgStateSecretName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d", pgName, i) +} + func pgEgressCMName(pg string) string { return fmt.Sprintf("%s-egress-config", pg) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 8ffce2c0c68ac..87b04a434c102 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -877,6 +877,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) if expected := 1; reconciler.egressProxyGroups.Len() != expected { @@ -913,6 +914,7 @@ func TestProxyGroup(t *testing.T) { }, } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -924,12 +926,14 @@ func TestProxyGroup(t *testing.T) { }) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", TailnetIPs: []string{"1.2.3.4", "::1"}, @@ -947,6 +951,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -1224,7 +1229,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Namespace: tsNamespace, }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): existingConfigBytes, + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): existingConfigBytes, }, }) @@ -1261,7 +1266,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { ResourceVersion: "2", }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): expectedConfigBytes, + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): expectedConfigBytes, }, }) } @@ -1421,8 +1426,13 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { s.Data = map[string][]byte{ - currentProfileKey: []byte(key), - key: bytes, + currentProfileKey: []byte(key), + key: bytes, + kubetypes.KeyDeviceIPs: []byte(`["1.2.3.4", "::1"]`), + kubetypes.KeyDeviceFQDN: []byte(fmt.Sprintf("hostname-nodeid-%d.tails-scales.ts.net", i)), + // TODO(tomhjp): We have two different mechanisms to retrieve device IDs. + // Consolidate on this one. + kubetypes.KeyDeviceID: []byte(fmt.Sprintf("nodeid-%d", i)), } }) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 4c7c3ac6741a2..3e3d2d5903a7b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -897,14 +897,6 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { } } -func readAuthKey(secret *corev1.Secret, key string) (*string, error) { - origConf := &ipn.ConfigVAlpha{} - if err := json.Unmarshal([]byte(secret.Data[key]), origConf); err != nil { - return nil, fmt.Errorf("error unmarshaling previous tailscaled config in %q: %w", key, err) - } - return origConf.AuthKey, nil -} - // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { @@ -951,7 +943,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co return capVerConfigs, nil } -func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { +// latestConfigFromSecret returns the ipn.ConfigVAlpha with the highest capver +// as found in the Secret's key names, e.g. "cap-107.hujson" has capver 107. +// If no config is found, it returns nil. +func latestConfigFromSecret(s *corev1.Secret) (*ipn.ConfigVAlpha, error) { latest := tailcfg.CapabilityVersion(-1) latestStr := "" for k, data := range s.Data { @@ -968,12 +963,31 @@ func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { latest = v } } + + var conf *ipn.ConfigVAlpha + if latestStr != "" { + conf = &ipn.ConfigVAlpha{} + if err := json.Unmarshal([]byte(s.Data[latestStr]), conf); err != nil { + return nil, fmt.Errorf("error unmarshaling tailscaled config from Secret %q in field %q: %w", s.Name, latestStr, err) + } + } + + return conf, nil +} + +func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { + conf, err := latestConfigFromSecret(s) + if err != nil { + return nil, err + } + // Allow for configs that don't contain an auth key. Perhaps // users have some mechanisms to delete them. Auth key is // normally not needed after the initial login. - if latestStr != "" { - return readAuthKey(s, latestStr) + if conf != nil { + key = conf.AuthKey } + return key, nil } diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index c9b5b8ae69a18..9846513c78d74 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -164,7 +164,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } - if !tsoperator.ProxyGroupIsReady(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { logger.Infof("ProxyGroup is not (yet) ready") return false, nil } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 5772cd5d64e7f..e08bfd80d318b 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -142,7 +142,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien Labels: pgSecretLabels("test-pg", "config"), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(106): []byte(`{"Version":""}`), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(`{"Version":""}`), }, } @@ -179,7 +179,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien // Set ProxyGroup status to ready pg.Status.Conditions = []metav1.Condition{ { - Type: string(tsapi.ProxyGroupReady), + Type: string(tsapi.ProxyGroupAvailable), Status: metav1.ConditionTrue, ObservedGeneration: 1, }, diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 081543cd384db..cbabc1d89e475 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -446,18 +446,15 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) return tsapi.RecorderTailnetDevice{}, false, err } - return getDeviceInfo(ctx, r.tsClient, secret) -} - -func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { prefs, ok, err := getDevicePrefs(secret) if !ok || err != nil { return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we - // need the API. Should we instead update the profile to include addresses? - device, err := tsClient.Device(ctx, string(prefs.Config.NodeID), nil) + // need the API. Should maybe update tsrecorder to write IPs to the state + // Secret like containerboot does. + device, err := r.tsClient.Device(ctx, string(prefs.Config.NodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index b8b7a935e3a3f..88fd07346cd5b 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -205,11 +205,12 @@ type ConnectorStatus struct { type ConditionType string const ( - ConnectorReady ConditionType = `ConnectorReady` - ProxyClassReady ConditionType = `ProxyClassReady` - ProxyGroupReady ConditionType = `ProxyGroupReady` - ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service - RecorderReady ConditionType = `RecorderReady` + ConnectorReady ConditionType = `ConnectorReady` + ProxyClassReady ConditionType = `ProxyClassReady` + ProxyGroupReady ConditionType = `ProxyGroupReady` // All proxy Pods running. + ProxyGroupAvailable ConditionType = `ProxyGroupAvailable` // At least one proxy Pod running. + ProxyReady ConditionType = `TailscaleProxyReady` // a Tailscale-specific condition type for corev1.Service + RecorderReady ConditionType = `RecorderReady` // EgressSvcValid gets set on a user configured ExternalName Service that defines a tailnet target to be exposed // on a ProxyGroup. // Set to true if the user provided configuration is valid. diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index abe8f7f9cc6fa..1d30f352c3603 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -137,8 +137,16 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { } func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool { + return proxyGroupCondition(pg, tsapi.ProxyGroupReady) +} + +func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { + return proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) +} + +func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) bool { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { - return cond.Type == string(tsapi.ProxyGroupReady) + return cond.Type == string(condType) }) if idx == -1 { return false diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index c21dc2bf89e61..15ebb5f443f2a 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -13,12 +13,13 @@ import ( var _ Client = &FakeClient{} type FakeClient struct { - GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) - CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) - CreateSecretImpl func(context.Context, *kubeapi.Secret) error - UpdateSecretImpl func(context.Context, *kubeapi.Secret) error - JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error - ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) + GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) + CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) + CreateSecretImpl func(context.Context, *kubeapi.Secret) error + UpdateSecretImpl func(context.Context, *kubeapi.Secret) error + JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error + ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) + StrategicMergePatchSecretImpl func(context.Context, string, *kubeapi.Secret, string) error } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -30,8 +31,8 @@ func (fc *FakeClient) GetSecret(ctx context.Context, name string) (*kubeapi.Secr func (fc *FakeClient) SetURL(_ string) {} func (fc *FakeClient) SetDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) { } -func (fc *FakeClient) StrategicMergePatchSecret(context.Context, string, *kubeapi.Secret, string) error { - return nil +func (fc *FakeClient) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { + return fc.StrategicMergePatchSecretImpl(ctx, name, s, fieldManager) } func (fc *FakeClient) Event(context.Context, string, string, string) error { return nil From 0a64e86a0df89db063211a826ba4f62eb5ec959f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 27 Jun 2025 13:56:55 -0700 Subject: [PATCH 0038/1093] wgengine/magicsock: move UDP relay path discovery to heartbeat() (#16407) This was previously hooked around direct UDP path discovery / CallMeMaybe transmission, and related conditions. Now it is subject to relay-specific considerations. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 80 +++++++++++++++++++++++------ wgengine/magicsock/endpoint_test.go | 42 +++++++++++++++ wgengine/magicsock/magicsock.go | 14 +++-- 3 files changed, 118 insertions(+), 18 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index fb5a28c2832fd..29ae025f4d159 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -75,11 +75,12 @@ type endpoint struct { // mu protects all following fields. mu sync.Mutex // Lock ordering: Conn.mu, then endpoint.mu - heartBeatTimer *time.Timer // nil when idle - lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) - lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock - lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints - derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) + heartBeatTimer *time.Timer // nil when idle + lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) + lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock + lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints + lastUDPRelayPathDiscovery mono.Time // last time we ran UDP relay path discovery + derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) bestAddr addrQuality // best non-DERP path; zero if none; mutate via setBestAddrLocked() bestAddrAt mono.Time // time best address re-confirmed @@ -851,6 +852,10 @@ func (de *endpoint) heartbeat() { de.sendDiscoPingsLocked(now, true) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } + de.heartBeatTimer = time.AfterFunc(heartbeatInterval, de.heartbeat) } @@ -861,6 +866,45 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { de.heartbeatDisabled = v } +// discoverUDPRelayPathsLocked starts UDP relay path discovery. +func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { + // TODO(jwhited): return early if there are no relay servers set, otherwise + // we spin up and down relayManager.runLoop unnecessarily. + de.lastUDPRelayPathDiscovery = now + de.c.relayManager.allocateAndHandshakeAllServers(de) +} + +// wantUDPRelayPathDiscoveryLocked reports whether we should kick off UDP relay +// path discovery. +func (de *endpoint) wantUDPRelayPathDiscoveryLocked(now mono.Time) bool { + if runtime.GOOS == "js" { + return false + } + if !de.relayCapable { + return false + } + if de.bestAddr.isDirect() && now.Before(de.trustBestAddrUntil) { + return false + } + if !de.lastUDPRelayPathDiscovery.IsZero() && now.Sub(de.lastUDPRelayPathDiscovery) < discoverUDPRelayPathsInterval { + return false + } + // TODO(jwhited): consider applying 'goodEnoughLatency' suppression here, + // but not until we have a strategy for triggering CallMeMaybeVia regularly + // and/or enabling inbound packets to act as a UDP relay path discovery + // trigger, otherwise clients without relay servers may fall off a UDP + // relay path and never come back. They are dependent on the remote side + // regularly TX'ing CallMeMaybeVia, which currently only happens as part + // of full UDP relay path discovery. + if now.After(de.trustBestAddrUntil) { + return true + } + if !de.lastUDPRelayPathDiscovery.IsZero() && now.Sub(de.lastUDPRelayPathDiscovery) >= upgradeUDPRelayInterval { + return true + } + return false +} + // wantFullPingLocked reports whether we should ping to all our peers looking for // a better path. // @@ -869,7 +913,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } - if !de.bestAddr.ap.IsValid() || de.lastFullPing.IsZero() { + if !de.bestAddr.isDirect() || de.lastFullPing.IsZero() { return true } if now.After(de.trustBestAddrUntil) { @@ -878,7 +922,7 @@ func (de *endpoint) wantFullPingLocked(now mono.Time) bool { if de.bestAddr.latency <= goodEnoughLatency { return false } - if now.Sub(de.lastFullPing) >= upgradeInterval { + if now.Sub(de.lastFullPing) >= upgradeUDPDirectInterval { return true } return false @@ -964,9 +1008,16 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { if startWGPing { de.sendWireGuardOnlyPingsLocked(now) } - } else if !udpAddr.ap.IsValid() || now.After(de.trustBestAddrUntil) { + } else if !udpAddr.isDirect() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) } + // TODO(jwhited): consider triggering UDP relay path discovery here under + // certain conditions. We currently only trigger it in heartbeat(), which + // is both good and bad. It's good because the first heartbeat() tick is 3s + // after the first packet, which gives us time to discover a UDP direct + // path and potentially avoid what would be wasted UDP relay path discovery + // work. It's bad because we might not discover a UDP direct path, and we + // incur a 3s delay before we try to discover a UDP relay path. de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now de.mu.Unlock() @@ -1275,13 +1326,6 @@ func (de *endpoint) sendDiscoPingsLocked(now mono.Time, sendCallMeMaybe bool) { // sent so our firewall ports are probably open and now // would be a good time for them to connect. go de.c.enqueueCallMeMaybe(derpAddr, de) - - // Schedule allocation of relay endpoints. We make no considerations for - // current relay endpoints or best UDP path state for now, keep it - // simple. - if de.relayCapable { - go de.c.relayManager.allocateAndHandshakeAllServers(de) - } } } @@ -1703,6 +1747,12 @@ type epAddr struct { vni virtualNetworkID // vni.isSet() indicates if this [epAddr] involves a Geneve header } +// isDirect returns true if e.ap is valid and not tailcfg.DerpMagicIPAddr, +// and a VNI is not set. +func (e epAddr) isDirect() bool { + return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.isSet() +} + func (e epAddr) String() string { if !e.vni.isSet() { return e.ap.String() diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index b1e8cab91bcd1..3a1e55b8b9728 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "tailscale.com/tailcfg" "tailscale.com/types/key" ) @@ -323,3 +324,44 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { }) } } + +func Test_epAddr_isDirectUDP(t *testing.T) { + vni := virtualNetworkID{} + vni.set(7) + tests := []struct { + name string + ap netip.AddrPort + vni virtualNetworkID + want bool + }{ + { + name: "true", + ap: netip.MustParseAddrPort("192.0.2.1:7"), + vni: virtualNetworkID{}, + want: true, + }, + { + name: "false derp magic addr", + ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, 0), + vni: virtualNetworkID{}, + want: false, + }, + { + name: "false vni set", + ap: netip.MustParseAddrPort("192.0.2.1:7"), + vni: vni, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := epAddr{ + ap: tt.ap, + vni: tt.vni, + } + if got := e.isDirect(); got != tt.want { + t.Errorf("isDirect() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e76d0054f04c2..553543b0f496d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3494,9 +3494,17 @@ const ( // keep NAT mappings alive. sessionActiveTimeout = 45 * time.Second - // upgradeInterval is how often we try to upgrade to a better path - // even if we have some non-DERP route that works. - upgradeInterval = 1 * time.Minute + // upgradeUDPDirectInterval is how often we try to upgrade to a better, + // direct UDP path even if we have some direct UDP path that works. + upgradeUDPDirectInterval = 1 * time.Minute + + // upgradeUDPRelayInterval is how often we try to discover UDP relay paths + // even if we have a UDP relay path that works. + upgradeUDPRelayInterval = 1 * time.Minute + + // discoverUDPRelayPathsInterval is the minimum time between UDP relay path + // discovery. + discoverUDPRelayPathsInterval = 30 * time.Second // heartbeatInterval is how often pings to the best UDP address // are sent. From 76b9afb54d7ddf662a6d5e47ab42021a2e6dba36 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 27 Jun 2025 15:14:18 -0700 Subject: [PATCH 0039/1093] ipn/store: make StateStore.All optional (#16409) This method is only needed to migrate between store.FileStore and tpm.tpmStore. We can make a runtime type assertion instead of implementing an unused method for every platform. Updates #15830 Signed-off-by: Andrew Lytvynov --- cmd/tsconnect/src/lib/js-state-store.ts | 3 --- cmd/tsconnect/src/types/wasm_js.d.ts | 1 - cmd/tsconnect/wasm/wasm_js.go | 24 -------------------- feature/tpm/tpm.go | 4 ++++ feature/tpm/tpm_test.go | 29 +++++++++++++++---------- ipn/store.go | 6 ----- ipn/store/awsstore/store_aws.go | 5 ----- ipn/store/kubestore/store_kube.go | 5 ----- ipn/store/mem/store_mem.go | 14 ------------ ipn/store/stores.go | 24 +++++++++++++++++++- 10 files changed, 45 insertions(+), 70 deletions(-) diff --git a/cmd/tsconnect/src/lib/js-state-store.ts b/cmd/tsconnect/src/lib/js-state-store.ts index 7f2fc8087e768..e57dfd98efabd 100644 --- a/cmd/tsconnect/src/lib/js-state-store.ts +++ b/cmd/tsconnect/src/lib/js-state-store.ts @@ -10,7 +10,4 @@ export const sessionStateStorage: IPNStateStorage = { getState(id) { return window.sessionStorage[`ipn-state-${id}`] || "" }, - all() { - return JSON.stringify(window.sessionStorage) - }, } diff --git a/cmd/tsconnect/src/types/wasm_js.d.ts b/cmd/tsconnect/src/types/wasm_js.d.ts index f47a972b03fba..492197ccb1a9b 100644 --- a/cmd/tsconnect/src/types/wasm_js.d.ts +++ b/cmd/tsconnect/src/types/wasm_js.d.ts @@ -44,7 +44,6 @@ declare global { interface IPNStateStorage { setState(id: string, value: string): void getState(id: string): string - all(): string } type IPNConfig = { diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index c5ff56120f492..779a87e49dec9 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -15,7 +15,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "iter" "log" "math/rand/v2" "net" @@ -580,29 +579,6 @@ func (s *jsStateStore) WriteState(id ipn.StateKey, bs []byte) error { return nil } -func (s *jsStateStore) All() iter.Seq2[ipn.StateKey, []byte] { - return func(yield func(ipn.StateKey, []byte) bool) { - jsValue := s.jsStateStorage.Call("all") - if jsValue.String() == "" { - return - } - buf, err := hex.DecodeString(jsValue.String()) - if err != nil { - return - } - var state map[string][]byte - if err := json.Unmarshal(buf, &state); err != nil { - return - } - - for k, v := range state { - if !yield(ipn.StateKey(k), v) { - break - } - } - } -} - func mapSlice[T any, M any](a []T, f func(T) M) []M { n := make([]M, len(a)) for i, e := range a { diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 64656d412a2e7..5ec084effa27d 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -217,6 +217,10 @@ func (s *tpmStore) All() iter.Seq2[ipn.StateKey, []byte] { } } +// Ensure tpmStore implements store.ExportableStore for migration to/from +// store.FileStore. +var _ store.ExportableStore = (*tpmStore)(nil) + // The nested levels of encoding and encryption are confusing, so here's what's // going on in plain English. // diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index b08681354a1e4..f4497f8c72732 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "iter" "maps" "os" "path/filepath" @@ -20,8 +21,8 @@ import ( "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" - "tailscale.com/ipn/store/mem" "tailscale.com/types/logger" + "tailscale.com/util/mak" ) func TestPropToString(t *testing.T) { @@ -251,7 +252,9 @@ func TestMigrateStateToTPM(t *testing.T) { if err != nil { t.Fatalf("migration failed: %v", err) } - gotContent := maps.Collect(s.All()) + gotContent := maps.Collect(s.(interface { + All() iter.Seq2[ipn.StateKey, []byte] + }).All()) if diff := cmp.Diff(content, gotContent); diff != "" { t.Errorf("unexpected content after migration, diff:\n%s", diff) } @@ -285,7 +288,7 @@ func tpmSupported() bool { type mockTPMSealProvider struct { path string - mem.Store + data map[ipn.StateKey][]byte } func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { @@ -293,7 +296,7 @@ func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { if !ok { return nil, fmt.Errorf("%q missing tpmseal: prefix", path) } - s := &mockTPMSealProvider{path: path, Store: mem.Store{}} + s := &mockTPMSealProvider{path: path} buf, err := os.ReadFile(path) if errors.Is(err, os.ErrNotExist) { return s, s.flushState() @@ -312,24 +315,28 @@ func newMockTPMSeal(logf logger.Logf, path string) (ipn.StateStore, error) { if data.Key == "" || data.Nonce == "" { return nil, fmt.Errorf("%q missing key or nonce", path) } - for k, v := range data.Data { - s.Store.WriteState(k, v) - } + s.data = data.Data return s, nil } +func (p *mockTPMSealProvider) ReadState(k ipn.StateKey) ([]byte, error) { + return p.data[k], nil +} + func (p *mockTPMSealProvider) WriteState(k ipn.StateKey, v []byte) error { - if err := p.Store.WriteState(k, v); err != nil { - return err - } + mak.Set(&p.data, k, v) return p.flushState() } +func (p *mockTPMSealProvider) All() iter.Seq2[ipn.StateKey, []byte] { + return maps.All(p.data) +} + func (p *mockTPMSealProvider) flushState() error { data := map[string]any{ "key": "foo", "nonce": "bar", - "data": maps.Collect(p.Store.All()), + "data": p.data, } buf, err := json.Marshal(data) if err != nil { diff --git a/ipn/store.go b/ipn/store.go index e176e48421216..550aa8cba819a 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -8,7 +8,6 @@ import ( "context" "errors" "fmt" - "iter" "net" "strconv" ) @@ -84,11 +83,6 @@ type StateStore interface { // instead, which only writes if the value is different from what's // already in the store. WriteState(id StateKey, bs []byte) error - // All returns an iterator over all StateStore keys. Using ReadState or - // WriteState is not safe while iterating and can lead to a deadlock. - // The order of keys in the iterator is not specified and may change - // between runs. - All() iter.Seq2[StateKey, []byte] } // WriteState is a wrapper around store.WriteState that only writes if diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 523d1657b109d..40bbbf0370822 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -10,7 +10,6 @@ import ( "context" "errors" "fmt" - "iter" "net/url" "regexp" "strings" @@ -254,7 +253,3 @@ func (s *awsStore) persistState() error { _, err = s.ssmClient.PutParameter(context.TODO(), in) return err } - -func (s *awsStore) All() iter.Seq2[ipn.StateKey, []byte] { - return s.memory.All() -} diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index f6bedbf0b8054..14025bbb4150a 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -7,7 +7,6 @@ package kubestore import ( "context" "fmt" - "iter" "log" "net" "os" @@ -429,7 +428,3 @@ func sanitizeKey[T ~string](k T) string { return '_' }, string(k)) } - -func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { - return s.memory.All() -} diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index 6c22aefd547f8..6f474ce993b43 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -7,7 +7,6 @@ package mem import ( "bytes" "encoding/json" - "iter" "sync" xmaps "golang.org/x/exp/maps" @@ -86,16 +85,3 @@ func (s *Store) ExportToJSON() ([]byte, error) { } return json.MarshalIndent(s.cache, "", " ") } - -func (s *Store) All() iter.Seq2[ipn.StateKey, []byte] { - return func(yield func(ipn.StateKey, []byte) bool) { - s.mu.Lock() - defer s.mu.Unlock() - - for k, v := range s.cache { - if !yield(k, v) { - break - } - } - } -} diff --git a/ipn/store/stores.go b/ipn/store/stores.go index 43c79639934b8..bf175da41d8aa 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -235,6 +235,23 @@ func (s *FileStore) All() iter.Seq2[ipn.StateKey, []byte] { } } +// Ensure FileStore implements ExportableStore for migration to/from +// tpm.tpmStore. +var _ ExportableStore = (*FileStore)(nil) + +// ExportableStore is an ipn.StateStore that can export all of its contents. +// This interface is optional to implement, and used for migrating the state +// between different store implementations. +type ExportableStore interface { + ipn.StateStore + + // All returns an iterator over all store keys. Using ReadState or + // WriteState is not safe while iterating and can lead to a deadlock. The + // order of keys in the iterator is not specified and may change between + // runs. + All() iter.Seq2[ipn.StateKey, []byte] +} + func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { path, toTPM := strings.CutPrefix(path, TPMPrefix) @@ -297,10 +314,15 @@ func maybeMigrateLocalStateFile(logf logger.Logf, path string) error { } defer os.Remove(tmpPath) + fromExp, ok := from.(ExportableStore) + if !ok { + return fmt.Errorf("%T does not implement the exportableStore interface", from) + } + // Copy all the items. This is pretty inefficient, because both stores // write the file to disk for each WriteState, but that's ok for a one-time // migration. - for k, v := range from.All() { + for k, v := range fromExp.All() { if err := to.WriteState(k, v); err != nil { return err } From 544aee9d08e214c3fc2699916c2ed410b2fb79eb Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 27 Jun 2025 17:30:43 -0700 Subject: [PATCH 0040/1093] tsidp: update README to refer to community projects (#16411) We dropped the idea of the Experimental release stage in tailscale/tailscale-www#7697, in favour of Community Projects. Updates #cleanup Signed-off-by: Simon Law --- cmd/tsidp/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index fce844e0b309c..780d9ab95b037 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -1,6 +1,6 @@ # `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider -[![status: experimental](https://img.shields.io/badge/status-experimental-blue)](https://tailscale.com/kb/1167/release-stages/#experimental) +[![status: community project](https://img.shields.io/badge/status-community_project-blue)](https://tailscale.com/kb/1531/community-projects) `tsidp` is an OIDC Identity Provider (IdP) server that integrates with your Tailscale network. It allows you to use Tailscale identities for authentication in applications that support OpenID Connect, enabling single sign-on (SSO) capabilities within your tailnet. @@ -89,7 +89,7 @@ The `tsidp` server supports several command-line flags: ## Support -This is an [experimental](https://tailscale.com/kb/1167/release-stages#experimental), work in progress feature. For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale) +This is an experimental, work in progress, [community project](https://tailscale.com/kb/1531/community-projects). For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale). ## License From 3dc694b4f1983dfcb1731cdf3f29aa6e4f058505 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 27 Jun 2025 19:11:59 -0700 Subject: [PATCH 0041/1093] wgengine/magicsock: clear UDP relay bestAddr's on disco ping timeout (#16410) Otherwise we can end up mirroring packets to them forever. We may eventually want to relax this to direct paths as well, but start with UDP relay paths, which have a higher chance of becoming untrusted and never working again, to be conservative. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 29ae025f4d159..9edc6403e6132 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1129,7 +1129,12 @@ func (de *endpoint) discoPingTimeout(txid stun.TxID) { if !ok { return } - if debugDisco() || !de.bestAddr.ap.IsValid() || mono.Now().After(de.trustBestAddrUntil) { + bestUntrusted := mono.Now().After(de.trustBestAddrUntil) + if sp.to == de.bestAddr.epAddr && sp.to.vni.isSet() && bestUntrusted { + // TODO(jwhited): consider applying this to direct UDP paths as well + de.clearBestAddrLocked() + } + if debugDisco() || !de.bestAddr.ap.IsValid() || bestUntrusted { de.c.dlogf("[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)", txid[:6], sp.to, de.publicKey.ShortString(), de.discoShort()) } de.removeSentDiscoPingLocked(txid, sp, discoPingTimedOut) From ee8c3560ef74613443859e1f78a0c9b9071bac76 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 23 Jun 2025 09:02:03 -0700 Subject: [PATCH 0042/1093] tailcfg: format integer IDs as decimal consistently The server-side code already does e.g. "nodeid:%d" instead of "%x" and as a result we have to second guess a lot of identifiers that could be hex or decimal. This stops the bleeding and means in a year and change we'll stop seeing the hex forms. Updates tailscale/corp#29827 Change-Id: Ie5785a07fc32631f7c949348d3453538ab170e6d Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 23f3cc49b152b..fb7d54c388619 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2265,10 +2265,10 @@ type Debug struct { Exit *int `json:",omitempty"` } -func (id ID) String() string { return fmt.Sprintf("id:%x", int64(id)) } -func (id UserID) String() string { return fmt.Sprintf("userid:%x", int64(id)) } -func (id LoginID) String() string { return fmt.Sprintf("loginid:%x", int64(id)) } -func (id NodeID) String() string { return fmt.Sprintf("nodeid:%x", int64(id)) } +func (id ID) String() string { return fmt.Sprintf("id:%d", int64(id)) } +func (id UserID) String() string { return fmt.Sprintf("userid:%d", int64(id)) } +func (id LoginID) String() string { return fmt.Sprintf("loginid:%d", int64(id)) } +func (id NodeID) String() string { return fmt.Sprintf("nodeid:%d", int64(id)) } // Equal reports whether n and n2 are equal. func (n *Node) Equal(n2 *Node) bool { From f85e4bcb3287a0adef9567ff79ba58d9cec4e1d2 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 27 Jun 2025 13:11:59 -0400 Subject: [PATCH 0043/1093] client/systray: replace counter metric with gauge Replace the existing systray_start counter metrics with a systray_running gauge metrics. This also adds an IncrementGauge method to local client to parallel IncrementCounter. The LocalAPI handler supports both, we've just never added a client method for gauges. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/local/local.go | 17 +++++++++++++++++ client/systray/systray.go | 3 ++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/client/local/local.go b/client/local/local.go index 12bf2f7d6fef3..74c4f0b6f8a2c 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -398,6 +398,23 @@ func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) return err } +// IncrementGauge increments the value of a Tailscale daemon's gauge +// metric by the given delta. If the metric has yet to exist, a new gauge +// metric is created and initialized to delta. The delta value can be negative. +func (lc *Client) IncrementGauge(ctx context.Context, name string, delta int) error { + type metricUpdate struct { + Name string `json:"name"` + Type string `json:"type"` + Value int `json:"value"` // amount to increment by + } + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]metricUpdate{{ + Name: name, + Type: "gauge", + Value: delta, + }})) + return err +} + // TailDaemonLogs returns a stream the Tailscale daemon's logs as they arrive. // Close the context to stop the stream. func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { diff --git a/client/systray/systray.go b/client/systray/systray.go index 195a157fb1386..a87783c06ce5a 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -61,7 +61,8 @@ func (menu *Menu) Run() { case <-menu.bgCtx.Done(): } }() - go menu.lc.IncrementCounter(menu.bgCtx, "systray_start", 1) + go menu.lc.IncrementGauge(menu.bgCtx, "systray_running", 1) + defer menu.lc.IncrementGauge(menu.bgCtx, "systray_running", -1) systray.Run(menu.onReady, menu.onExit) } From 2fc247573bfaa7e1dac695e98b5a31c3a2f5217e Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 30 Jun 2025 12:08:35 +0100 Subject: [PATCH 0044/1093] cmd/k8s-operator: ProxyClass annotation for Services and Ingresses (#16363) * cmd/k8s-operator: ProxyClass annotation for Services and Ingresses Previously, the ProxyClass could only be configured for Services and Ingresses via a Label. This adds the ability to set it via an Annotation, but prioritizes the Label if both a Label and Annotation are set. Updates #14323 Signed-off-by: chaosinthecrd * Update cmd/k8s-operator/operator.go Co-authored-by: Tom Proctor Signed-off-by: Tom Meadows * Update cmd/k8s-operator/operator.go Signed-off-by: Tom Meadows * cmd/k8s-operator: ProxyClass annotation for Services and Ingresses Previously, the ProxyClass could only be configured for Services and Ingresses via a Label. This adds the ability to set it via an Annotation, but prioritizes the Label if both a Label and Annotation are set. Updates #14323 Signed-off-by: chaosinthecrd --------- Signed-off-by: chaosinthecrd Signed-off-by: Tom Meadows Co-authored-by: Tom Proctor --- cmd/k8s-operator/ingress.go | 1 + cmd/k8s-operator/ingress_test.go | 124 ++++++++++++++++-- cmd/k8s-operator/operator.go | 69 +++++++++- cmd/k8s-operator/operator_test.go | 202 ++++++++++++++++++++++++++++-- cmd/k8s-operator/sts.go | 18 ++- cmd/k8s-operator/svc.go | 12 +- 6 files changed, 398 insertions(+), 28 deletions(-) diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 6c50e10b2ba94..5058fd6dda8fc 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -34,6 +34,7 @@ const ( tailscaleIngressClassName = "tailscale" // ingressClass.metadata.name for tailscale IngressClass resource tailscaleIngressControllerName = "tailscale.com/ts-ingress" // ingressClass.spec.controllerName for tailscale IngressClass resource ingressClassDefaultAnnotation = "ingressclass.kubernetes.io/is-default-class" // we do not support this https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class + indexIngressProxyClass = ".metadata.annotations.ingress-proxy-class" ) type IngressReconciler struct { diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index aacf27d8e6600..e4396eb106a96 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -230,7 +230,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Spec: tsapi.ProxyClassSpec{StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, + Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}, + }}, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -285,7 +286,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - mak.Set(&ing.ObjectMeta.Labels, LabelProxyClass, "custom-metadata") + mak.Set(&ing.ObjectMeta.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) @@ -299,7 +300,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, - }}} + }}, + } }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name @@ -309,7 +311,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { // Ingress gets reconciled and the custom ProxyClass configuration is // removed from the proxy resources. mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - delete(ing.ObjectMeta.Labels, LabelProxyClass) + delete(ing.ObjectMeta.Labels, LabelAnnotationProxyClass) }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = "" @@ -325,14 +327,15 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: 1, - }}}, + }}, + }, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} // Create fake client with ProxyClass, IngressClass, Ingress with metrics ProxyClass, and Service ing := ingress() ing.Labels = map[string]string{ - LabelProxyClass: "metrics", + LabelAnnotationProxyClass: "metrics", } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -421,6 +424,113 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { // ServiceMonitor gets garbage collected when the Service is deleted - we cannot test that here. } +func TestIngressProxyClassAnnotation(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcLEStaging, pcLEStagingFalse, _ := proxyClassesForLEStagingTest() + + testCases := []struct { + name string + proxyClassAnnotation string + proxyClassLabel string + proxyClassDefault string + expectedProxyClass string + expectEvents []string + }{ + { + name: "via_label", + proxyClassLabel: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_annotation", + proxyClassAnnotation: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_default", + proxyClassDefault: pcLEStaging.Name, + expectedProxyClass: pcLEStaging.Name, + }, + { + name: "via_label_override_annotation", + proxyClassLabel: pcLEStaging.Name, + proxyClassAnnotation: pcLEStagingFalse.Name, + expectedProxyClass: pcLEStaging.Name, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + + builder = builder.WithObjects(pcLEStaging, pcLEStagingFalse). + WithStatusSubresource(pcLEStaging, pcLEStagingFalse) + + fc := builder.Build() + + if tt.proxyClassAnnotation != "" || tt.proxyClassLabel != "" || tt.proxyClassDefault != "" { + name := tt.proxyClassDefault + if name == "" { + name = tt.proxyClassLabel + if name == "" { + name = tt.proxyClassAnnotation + } + } + setProxyClassReady(t, fc, cl, name) + } + + mustCreate(t, fc, ingressClass()) + mustCreate(t, fc, service()) + ing := ingress() + if tt.proxyClassLabel != "" { + ing.Labels = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassLabel, + } + } + if tt.proxyClassAnnotation != "" { + ing.Annotations = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassAnnotation, + } + } + mustCreate(t, fc, ing) + + ingR := &IngressReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: &fakeTSClient{}, + tsnetServer: &fakeTSNetServer{certDomains: []string{"test-host"}}, + defaultTags: []string{"tag:test"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale:test", + }, + logger: zl.Sugar(), + defaultProxyClass: tt.proxyClassDefault, + } + + expectReconciled(t, ingR, "default", "test") + + _, shortName := findGenName(t, fc, "default", "test", "ingress") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + switch tt.expectedProxyClass { + case pcLEStaging.Name: + verifyEnvVar(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) + case pcLEStagingFalse.Name: + verifyEnvVarNotPresent(t, sts, "TS_DEBUG_ACME_DIRECTORY_URL") + default: + t.Fatalf("unexpected expected ProxyClass %q", tt.expectedProxyClass) + } + }) + } +} + func TestIngressLetsEncryptStaging(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) zl := zap.Must(zap.NewDevelopment()) @@ -452,7 +562,7 @@ func TestIngressLetsEncryptStaging(t *testing.T) { ing := ingress() if tt.proxyClassPerResource != "" { ing.Labels = map[string]string{ - LabelProxyClass: tt.proxyClassPerResource, + LabelAnnotationProxyClass: tt.proxyClassPerResource, } } mustCreate(t, fc, ing) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index cd1ae8158d01c..b33dcd114d32b 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -54,6 +54,7 @@ import ( "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/types/logger" + "tailscale.com/util/set" "tailscale.com/version" ) @@ -307,6 +308,7 @@ func runReconcilers(opts reconcilerOpts) { proxyPriorityClassName: opts.proxyPriorityClassName, tsFirewallMode: opts.proxyFirewallMode, } + err = builder. ControllerManagedBy(mgr). Named("service-reconciler"). @@ -327,6 +329,10 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create service reconciler: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(corev1.Service), indexServiceProxyClass, indexProxyClass); err != nil { + startlog.Fatalf("failed setting up ProxyClass indexer for Services: %v", err) + } + ingressChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("ingress")) // If a ProxyClassChanges, enqueue all Ingresses labeled with that // ProxyClass's name. @@ -351,6 +357,10 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) } + if err := mgr.GetFieldIndexer().IndexField(context.Background(), new(networkingv1.Ingress), indexIngressProxyClass, indexProxyClass); err != nil { + startlog.Fatalf("failed setting up ProxyClass indexer for Ingresses: %v", err) + } + lc, err := opts.tsServer.LocalClient() if err != nil { startlog.Fatalf("could not get local client: %v", err) @@ -797,6 +807,16 @@ func managedResourceHandlerForType(typ string) handler.MapFunc { } } +// indexProxyClass is used to select ProxyClass-backed objects which are +// locally indexed in the cache for efficient listing without requiring labels. +func indexProxyClass(o client.Object) []string { + if !hasProxyClassAnnotation(o) { + return nil + } + + return []string{o.GetAnnotations()[LabelAnnotationProxyClass]} +} + // proxyClassHandlerForSvc returns a handler that, for a given ProxyClass, // returns a list of reconcile requests for all Services labeled with // tailscale.com/proxy-class: . @@ -804,16 +824,37 @@ func proxyClassHandlerForSvc(cl client.Client, logger *zap.SugaredLogger) handle return func(ctx context.Context, o client.Object) []reconcile.Request { svcList := new(corev1.ServiceList) labels := map[string]string{ - LabelProxyClass: o.GetName(), + LabelAnnotationProxyClass: o.GetName(), } + if err := cl.List(ctx, svcList, client.MatchingLabels(labels)); err != nil { logger.Debugf("error listing Services for ProxyClass: %v", err) return nil } + reqs := make([]reconcile.Request, 0) + seenSvcs := make(set.Set[string]) for _, svc := range svcList.Items { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&svc)}) + seenSvcs.Add(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name)) } + + svcAnnotationList := new(corev1.ServiceList) + if err := cl.List(ctx, svcAnnotationList, client.MatchingFields{indexServiceProxyClass: o.GetName()}); err != nil { + logger.Debugf("error listing Services for ProxyClass: %v", err) + return nil + } + + for _, svc := range svcAnnotationList.Items { + nsname := fmt.Sprintf("%s/%s", svc.Namespace, svc.Name) + if seenSvcs.Contains(nsname) { + continue + } + + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&svc)}) + seenSvcs.Add(nsname) + } + return reqs } } @@ -825,16 +866,36 @@ func proxyClassHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) ha return func(ctx context.Context, o client.Object) []reconcile.Request { ingList := new(networkingv1.IngressList) labels := map[string]string{ - LabelProxyClass: o.GetName(), + LabelAnnotationProxyClass: o.GetName(), } if err := cl.List(ctx, ingList, client.MatchingLabels(labels)); err != nil { logger.Debugf("error listing Ingresses for ProxyClass: %v", err) return nil } + reqs := make([]reconcile.Request, 0) + seenIngs := make(set.Set[string]) for _, ing := range ingList.Items { reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + seenIngs.Add(fmt.Sprintf("%s/%s", ing.Namespace, ing.Name)) + } + + ingAnnotationList := new(networkingv1.IngressList) + if err := cl.List(ctx, ingAnnotationList, client.MatchingFields{indexIngressProxyClass: o.GetName()}); err != nil { + logger.Debugf("error listing Ingreses for ProxyClass: %v", err) + return nil + } + + for _, ing := range ingAnnotationList.Items { + nsname := fmt.Sprintf("%s/%s", ing.Namespace, ing.Name) + if seenIngs.Contains(nsname) { + continue + } + + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&ing)}) + seenIngs.Add(nsname) } + return reqs } } @@ -1500,6 +1561,10 @@ func hasProxyGroupAnnotation(obj client.Object) bool { return obj.GetAnnotations()[AnnotationProxyGroup] != "" } +func hasProxyClassAnnotation(obj client.Object) bool { + return obj.GetAnnotations()[LabelAnnotationProxyClass] != "" +} + func id(ctx context.Context, lc *local.Client) (string, error) { st, err := lc.StatusWithoutPeers(ctx) if err != nil { diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index ff6ba4f952749..a9f08c18b4793 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -7,6 +7,7 @@ package main import ( "context" + "encoding/json" "fmt" "testing" "time" @@ -20,8 +21,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/k8s-operator/apis/v1alpha1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/net/dns/resolvconffile" @@ -1121,6 +1124,182 @@ func TestCustomPriorityClassName(t *testing.T) { expectEqual(t, fc, expectedSTS(t, fc, o), removeResourceReqs) } +func TestServiceProxyClassAnnotation(t *testing.T) { + cl := tstest.NewClock(tstest.ClockOpts{}) + zl := zap.Must(zap.NewDevelopment()) + + pcIfNotPresent := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "if-not-present", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &v1alpha1.Container{ + ImagePullPolicy: corev1.PullIfNotPresent, + }, + }, + }, + }, + } + + pcAlways := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "always", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleContainer: &v1alpha1.Container{ + ImagePullPolicy: corev1.PullAlways, + }, + }, + }, + }, + } + + builder := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme) + builder = builder.WithObjects(pcIfNotPresent, pcAlways). + WithStatusSubresource(pcIfNotPresent, pcAlways) + fc := builder.Build() + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + // The apiserver is supposed to set the UID, but the fake client + // doesn't. So, set it explicitly because other code later depends + // on it being set. + UID: types.UID("1234-UID"), + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.20.30.40", + Type: corev1.ServiceTypeLoadBalancer, + }, + } + + mustCreate(t, fc, svc) + + testCases := []struct { + name string + proxyClassAnnotation string + proxyClassLabel string + proxyClassDefault string + expectedProxyClass string + expectEvents []string + }{ + { + name: "via_label", + proxyClassLabel: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_annotation", + proxyClassAnnotation: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_default", + proxyClassDefault: pcIfNotPresent.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + { + name: "via_label_override_annotation", + proxyClassLabel: pcIfNotPresent.Name, + proxyClassAnnotation: pcAlways.Name, + expectedProxyClass: pcIfNotPresent.Name, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + ft := &fakeTSClient{} + + if tt.proxyClassAnnotation != "" || tt.proxyClassLabel != "" || tt.proxyClassDefault != "" { + name := tt.proxyClassDefault + if name == "" { + name = tt.proxyClassLabel + if name == "" { + name = tt.proxyClassAnnotation + } + } + setProxyClassReady(t, fc, cl, name) + } + + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + defaultProxyClass: tt.proxyClassDefault, + logger: zl.Sugar(), + clock: cl, + isDefaultLoadBalancer: true, + } + + if tt.proxyClassLabel != "" { + svc.Labels = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassLabel, + } + } + if tt.proxyClassAnnotation != "" { + svc.Annotations = map[string]string{ + LabelAnnotationProxyClass: tt.proxyClassAnnotation, + } + } + + mustUpdate(t, fc, svc.Namespace, svc.Name, func(s *corev1.Service) { + s.Labels = svc.Labels + s.Annotations = svc.Annotations + }) + + expectReconciled(t, sr, "default", "test") + + list := &corev1.ServiceList{} + fc.List(context.Background(), list, client.InNamespace("default")) + + for _, i := range list.Items { + t.Logf("found service %s", i.Name) + } + + slist := &corev1.SecretList{} + fc.List(context.Background(), slist, client.InNamespace("operator-ns")) + for _, i := range slist.Items { + l, _ := json.Marshal(i.Labels) + t.Logf("found secret %q with labels %q ", i.Name, string(l)) + } + + _, shortName := findGenName(t, fc, "default", "test", "svc") + sts := &appsv1.StatefulSet{} + if err := fc.Get(context.Background(), client.ObjectKey{Namespace: "operator-ns", Name: shortName}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + switch tt.expectedProxyClass { + case pcIfNotPresent.Name: + for _, cont := range sts.Spec.Template.Spec.Containers { + if cont.Name == "tailscale" && cont.ImagePullPolicy != corev1.PullIfNotPresent { + t.Fatalf("ImagePullPolicy %q does not match ProxyClass %q with value %q", cont.ImagePullPolicy, pcIfNotPresent.Name, pcIfNotPresent.Spec.StatefulSet.Pod.TailscaleContainer.ImagePullPolicy) + } + } + case pcAlways.Name: + for _, cont := range sts.Spec.Template.Spec.Containers { + if cont.Name == "tailscale" && cont.ImagePullPolicy != corev1.PullAlways { + t.Fatalf("ImagePullPolicy %q does not match ProxyClass %q with value %q", cont.ImagePullPolicy, pcAlways.Name, pcAlways.Spec.StatefulSet.Pod.TailscaleContainer.ImagePullPolicy) + } + } + default: + t.Fatalf("unexpected expected ProxyClass %q", tt.expectedProxyClass) + } + }) + } +} + func TestProxyClassForService(t *testing.T) { // Setup pc := &tsapi.ProxyClass{ @@ -1132,7 +1311,9 @@ func TestProxyClassForService(t *testing.T) { StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}}}, + Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}, + }, + }, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -1194,7 +1375,7 @@ func TestProxyClassForService(t *testing.T) { // pointing at the 'custom-metadata' ProxyClass. The ProxyClass is not // yet ready, so no changes are actually applied to the proxy resources. mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - mak.Set(&svc.Labels, LabelProxyClass, "custom-metadata") + mak.Set(&svc.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, sr, "default", "test") expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -1209,7 +1390,8 @@ func TestProxyClassForService(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: pc.Generation, - }}} + }}, + } }) opts.proxyClass = pc.Name expectReconciled(t, sr, "default", "test") @@ -1220,7 +1402,7 @@ func TestProxyClassForService(t *testing.T) { // configuration from the ProxyClass is removed from the cluster // resources. mustUpdate(t, fc, "default", "test", func(svc *corev1.Service) { - delete(svc.Labels, LabelProxyClass) + delete(svc.Labels, LabelAnnotationProxyClass) }) opts.proxyClass = "" expectReconciled(t, sr, "default", "test") @@ -1439,7 +1621,8 @@ func Test_serviceHandlerForIngress(t *testing.T) { IngressClassName: ptr.To(tailscaleIngressClassName), Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ - {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}}, + {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}, + }, }}}}, }, }) @@ -1466,7 +1649,8 @@ func Test_serviceHandlerForIngress(t *testing.T) { Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ - {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "non-ts-backend"}}}}, + {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "non-ts-backend"}}}, + }, }}}}, }, }) @@ -1565,6 +1749,7 @@ func Test_clusterDomainFromResolverConf(t *testing.T) { }) } } + func Test_authKeyRemoval(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} @@ -1711,14 +1896,15 @@ func Test_metricsResourceCreation(t *testing.T) { Status: metav1.ConditionTrue, Type: string(tsapi.ProxyClassReady), ObservedGeneration: 1, - }}}, + }}, + }, } svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", UID: types.UID("1234-UID"), - Labels: map[string]string{LabelProxyClass: "metrics"}, + Labels: map[string]string{LabelAnnotationProxyClass: "metrics"}, }, Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 3e3d2d5903a7b..a943ae97179a1 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -50,7 +50,7 @@ const ( // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for // the Ingress or Service. - LabelProxyClass = "tailscale.com/proxy-class" + LabelAnnotationProxyClass = "tailscale.com/proxy-class" FinalizerName = "tailscale.com/finalizer" @@ -1127,6 +1127,22 @@ func nameForService(svc *corev1.Service) string { return svc.Namespace + "-" + svc.Name } +// proxyClassForObject returns the proxy class for the given object. If the +// object does not have a proxy class label, it returns the default proxy class +func proxyClassForObject(o client.Object, proxyDefaultClass string) string { + proxyClass, exists := o.GetLabels()[LabelAnnotationProxyClass] + if exists { + return proxyClass + } + + proxyClass, exists = o.GetAnnotations()[LabelAnnotationProxyClass] + if exists { + return proxyClass + } + + return proxyDefaultClass +} + func isValidFirewallMode(m string) bool { return m == "auto" || m == "nftables" || m == "iptables" } diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index c880f59f5012a..f8c9af23990e9 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -41,6 +41,8 @@ const ( reasonProxyInvalid = "ProxyInvalid" reasonProxyFailed = "ProxyFailed" reasonProxyPending = "ProxyPending" + + indexServiceProxyClass = ".metadata.annotations.service-proxy-class" ) type ServiceReconciler struct { @@ -438,16 +440,6 @@ func tailnetTargetAnnotation(svc *corev1.Service) string { return svc.Annotations[annotationTailnetTargetIPOld] } -// proxyClassForObject returns the proxy class for the given object. If the -// object does not have a proxy class label, it returns the default proxy class -func proxyClassForObject(o client.Object, proxyDefaultClass string) string { - proxyClass, exists := o.GetLabels()[LabelProxyClass] - if !exists { - proxyClass = proxyDefaultClass - } - return proxyClass -} - func proxyClassIsReady(ctx context.Context, name string, cl client.Client) (bool, error) { proxyClass := new(tsapi.ProxyClass) if err := cl.Get(ctx, types.NamespacedName{Name: name}, proxyClass); err != nil { From 47e77565c63aa1af9d0de27a38281bc0fcc02250 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 30 Jun 2025 12:12:57 -0700 Subject: [PATCH 0045/1093] wgengine/magicsock: avoid handshaking relay endpoints that are trusted (#16412) Changes to our src/address family can trigger blackholes. This commit also adds a missing set of trustBestAddrUntil when setting a UDP relay path as bestAddr. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 52 +++++++--- wgengine/magicsock/magicsock.go | 6 +- wgengine/magicsock/relaymanager.go | 131 ++++++++++++++++-------- wgengine/magicsock/relaymanager_test.go | 4 +- 4 files changed, 130 insertions(+), 63 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 9edc6403e6132..af4666665b0fb 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -100,25 +100,33 @@ type endpoint struct { relayCapable bool // whether the node is capable of speaking via a [tailscale.com/net/udprelay.Server] } -// relayEndpointReady determines whether the given relay addr should be -// installed as de.bestAddr. It is only called by [relayManager] once it has -// determined addr is functional via [disco.Pong] reception. -func (de *endpoint) relayEndpointReady(addr epAddr, latency time.Duration) { +// udpRelayEndpointReady determines whether the given relay [addrQuality] should +// be installed as de.bestAddr. It is only called by [relayManager] once it has +// determined maybeBest is functional via [disco.Pong] reception. +func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.c.mu.Lock() defer de.c.mu.Unlock() de.mu.Lock() defer de.mu.Unlock() - maybeBetter := addrQuality{addr, latency, pingSizeToPktLen(0, addr)} - if !betterAddr(maybeBetter, de.bestAddr) { + if maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 { + // TODO(jwhited): add some observability for this case, e.g. did we + // flip transports during a de.bestAddr transition from untrusted to + // trusted? + // + // If these are equal we must set maybeBest as bestAddr, otherwise we + // could leave a stale bestAddr if it goes over a different + // address family or src. + } else if !betterAddr(maybeBest, de.bestAddr) { return } - // Promote maybeBetter to bestAddr. + // Promote maybeBest to bestAddr. // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() - de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBetter.epAddr, maybeBetter.wireMTU) - de.setBestAddrLocked(maybeBetter) - de.c.peerMap.setNodeKeyForEpAddr(addr, de.publicKey) + de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) + de.setBestAddrLocked(maybeBest) + de.trustBestAddrUntil = mono.Now().Add(trustUDPAddrDuration) + de.c.peerMap.setNodeKeyForEpAddr(maybeBest.epAddr, de.publicKey) } func (de *endpoint) setBestAddrLocked(v addrQuality) { @@ -871,7 +879,9 @@ func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { // TODO(jwhited): return early if there are no relay servers set, otherwise // we spin up and down relayManager.runLoop unnecessarily. de.lastUDPRelayPathDiscovery = now - de.c.relayManager.allocateAndHandshakeAllServers(de) + lastBest := de.bestAddr + lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) + de.c.relayManager.startUDPRelayPathDiscoveryFor(de, lastBest, lastBestIsTrusted) } // wantUDPRelayPathDiscoveryLocked reports whether we should kick off UDP relay @@ -1714,7 +1724,16 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // Promote this pong response to our current best address if it's lower latency. // TODO(bradfitz): decide how latency vs. preference order affects decision if !isDerp { - thisPong := addrQuality{sp.to, latency, tstun.WireMTU(pingSizeToPktLen(sp.size, sp.to))} + thisPong := addrQuality{ + epAddr: sp.to, + latency: latency, + wireMTU: pingSizeToPktLen(sp.size, sp.to), + } + // TODO(jwhited): consider checking de.trustBestAddrUntil as well. If + // de.bestAddr is untrusted we may want to clear it, otherwise we could + // get stuck with a forever untrusted bestAddr that blackholes, since + // we don't clear direct UDP paths on disco ping timeout (see + // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { if src.vni.isSet() { // This would be unexpected. Switching to a Geneve-encapsulated @@ -1765,14 +1784,17 @@ func (e epAddr) String() string { return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.get()) } -// addrQuality is an [epAddr] with an associated latency and path mtu. +// addrQuality is an [epAddr], an optional [key.DiscoPublic] if a relay server +// is associated, a round-trip latency measurement, and path mtu. type addrQuality struct { epAddr - latency time.Duration - wireMTU tstun.WireMTU + relayServerDisco key.DiscoPublic // only relevant if epAddr.vni.isSet(), otherwise zero value + latency time.Duration + wireMTU tstun.WireMTU } func (a addrQuality) String() string { + // TODO(jwhited): consider including relayServerDisco return fmt.Sprintf("%v@%v+%v", a.epAddr, a.latency, a.wireMTU) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 553543b0f496d..0933c5be251a8 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2137,6 +2137,8 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } ep.mu.Lock() relayCapable := ep.relayCapable + lastBest := ep.bestAddr + lastBestIsTrusted := mono.Now().Before(ep.trustBestAddrUntil) ep.mu.Unlock() if isVia && !relayCapable { c.logf("magicsock: disco: ignoring %s from %v; %v is not known to be relay capable", msgType, sender.ShortString(), sender.ShortString()) @@ -2156,7 +2158,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.discoShort, epDisco.short, via.ServerDisco.ShortString(), ep.publicKey.ShortString(), derpStr(src.String()), len(via.AddrPorts)) - c.relayManager.handleCallMeMaybeVia(ep, via) + c.relayManager.handleCallMeMaybeVia(ep, lastBest, lastBestIsTrusted, via) } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", c.discoShort, epDisco.short, @@ -2254,7 +2256,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // We have no [endpoint] in the [peerMap] for this relay [epAddr] // using it as a bestAddr. [relayManager] might be in the middle of // probing it or attempting to set it as best via - // [endpoint.relayEndpointReady()]. Make [relayManager] aware. + // [endpoint.udpRelayEndpointReady()]. Make [relayManager] aware. c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) return } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 6418a43641200..1c173c01ac138 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -50,7 +50,7 @@ type relayManager struct { // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). - allocateHandshakeCh chan *endpoint + startDiscoveryCh chan endpointWithLastBest allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent cancelWorkCh chan *endpoint @@ -77,8 +77,8 @@ type serverDiscoVNI struct { // relayHandshakeWork serves to track in-progress relay handshake work for a // [udprelay.ServerEndpoint]. This structure is immutable once initialized. type relayHandshakeWork struct { - ep *endpoint - se udprelay.ServerEndpoint + wlb endpointWithLastBest + se udprelay.ServerEndpoint // handshakeServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to @@ -97,7 +97,7 @@ type relayHandshakeWork struct { // [disco.CallMeMaybeVia] reception. This structure is immutable once // initialized. type newRelayServerEndpointEvent struct { - ep *endpoint + wlb endpointWithLastBest se udprelay.ServerEndpoint server netip.AddrPort // zero value if learned via [disco.CallMeMaybeVia] } @@ -142,9 +142,9 @@ func (r *relayManager) runLoop() { for { select { - case ep := <-r.allocateHandshakeCh: - if !r.hasActiveWorkForEndpointRunLoop(ep) { - r.allocateAllServersRunLoop(ep) + case startDiscovery := <-r.startDiscoveryCh: + if !r.hasActiveWorkForEndpointRunLoop(startDiscovery.ep) { + r.allocateAllServersRunLoop(startDiscovery) } if !r.hasActiveWorkRunLoop() { return @@ -153,7 +153,7 @@ func (r *relayManager) runLoop() { work, ok := r.allocWorkByEndpoint[done.work.ep] if ok && work == done.work { // Verify the work in the map is the same as the one that we're - // cleaning up. New events on r.allocateHandshakeCh can + // cleaning up. New events on r.startDiscoveryCh can // overwrite pre-existing keys. delete(r.allocWorkByEndpoint, done.work.ep) } @@ -237,7 +237,7 @@ func (r *relayManager) init() { r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) r.handshakeWorkAwaitingPong = make(map[*relayHandshakeWork]addrPortVNI) r.addrPortVNIToHandshakeWork = make(map[addrPortVNI]*relayHandshakeWork) - r.allocateHandshakeCh = make(chan *endpoint) + r.startDiscoveryCh = make(chan endpointWithLastBest) r.allocateWorkDoneCh = make(chan relayEndpointAllocWorkDoneEvent) r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) @@ -273,7 +273,7 @@ func (r *relayManager) ensureDiscoInfoFor(work *relayHandshakeWork) { di.di = &discoInfo{ discoKey: work.se.ServerDisco, discoShort: work.se.ServerDisco.ShortString(), - sharedKey: work.ep.c.discoPrivate.Shared(work.se.ServerDisco), + sharedKey: work.wlb.ep.c.discoPrivate.Shared(work.se.ServerDisco), } } } @@ -306,7 +306,7 @@ func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok return nil, false } -func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeVia) { +func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool, dm *disco.CallMeMaybeVia) { se := udprelay.ServerEndpoint{ ServerDisco: dm.ServerDisco, LamportID: dm.LamportID, @@ -316,7 +316,11 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, dm *disco.CallMeMaybeV se.BindLifetime.Duration = dm.BindLifetime se.SteadyStateLifetime.Duration = dm.SteadyStateLifetime relayManagerInputEvent(r, nil, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, + wlb: endpointWithLastBest{ + ep: ep, + lastBest: lastBest, + lastBestIsTrusted: lastBestIsTrusted, + }, se: se, }) } @@ -360,11 +364,19 @@ func relayManagerInputEvent[T any](r *relayManager, ctx context.Context, eventCh } } -// allocateAndHandshakeAllServers kicks off allocation and handshaking of relay -// endpoints for 'ep' on all known relay servers if there is no outstanding -// work. -func (r *relayManager) allocateAndHandshakeAllServers(ep *endpoint) { - relayManagerInputEvent(r, nil, &r.allocateHandshakeCh, ep) +// endpointWithLastBest represents an [*endpoint], its last bestAddr, and if +// the last bestAddr was trusted (see endpoint.trustBestAddrUntil) at the time +// of init. This structure is immutable once initialized. +type endpointWithLastBest struct { + ep *endpoint + lastBest addrQuality + lastBestIsTrusted bool +} + +// startUDPRelayPathDiscoveryFor starts UDP relay path discovery for ep on all +// known relay servers if ep has no in-progress work. +func (r *relayManager) startUDPRelayPathDiscoveryFor(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool) { + relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ep, lastBest, lastBestIsTrusted}) } // stopWork stops all outstanding allocation & handshaking work for 'ep'. @@ -432,7 +444,7 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc r.addrPortVNIToHandshakeWork[apv] = work case *disco.Ping: // Always TX a pong. We might not have any associated work if ping - // reception raced with our call to [endpoint.relayEndpointReady()], so + // reception raced with our call to [endpoint.udpRelayEndpointReady()], so // err on the side of enabling the remote side to use this path. // // Conn.handlePingLocked() makes efforts to suppress duplicate pongs @@ -473,7 +485,7 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc } func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshakeWorkDoneEvent) { - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.ep] + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.wlb.ep] if !ok { return } @@ -483,7 +495,7 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak } delete(byServerDisco, done.work.se.ServerDisco) if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, done.work.ep) + delete(r.handshakeWorkByEndpointByServerDisco, done.work.wlb.ep) } delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) apv, ok := r.handshakeWorkAwaitingPong[work] @@ -499,10 +511,15 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak vni := virtualNetworkID{} vni.set(done.work.se.VNI) addr := epAddr{ap: done.pongReceivedFrom, vni: vni} - // ep.relayEndpointReady() must be called in a new goroutine to prevent + // ep.udpRelayEndpointReady() must be called in a new goroutine to prevent // deadlocks as it acquires [endpoint] & [Conn] mutexes. See [relayManager] // docs for details. - go done.work.ep.relayEndpointReady(addr, done.latency) + go done.work.wlb.ep.udpRelayEndpointReady(addrQuality{ + epAddr: addr, + relayServerDisco: done.work.se.ServerDisco, + latency: done.latency, + wireMTU: pingSizeToPktLen(0, addr), + }) } func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelayServerEndpointEvent) { @@ -525,7 +542,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } // Check for duplicate work by [*endpoint] + server disco. - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] + byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] if ok { existingWork, ok := byServerDisco[newServerEndpoint.se.ServerDisco] if ok { @@ -569,10 +586,40 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } } + if newServerEndpoint.server.IsValid() { + // Send a [disco.CallMeMaybeVia] to the remote peer if we allocated this + // endpoint, regardless of if we start a handshake below. + go r.sendCallMeMaybeVia(newServerEndpoint.wlb.ep, newServerEndpoint.se) + } + + lastBestMatchingServer := newServerEndpoint.se.ServerDisco.Compare(newServerEndpoint.wlb.lastBest.relayServerDisco) == 0 + if lastBestMatchingServer && newServerEndpoint.wlb.lastBestIsTrusted { + // This relay server endpoint is the same as [endpoint]'s bestAddr at + // the time UDP relay path discovery was started, and it was also a + // trusted path (see endpoint.trustBestAddrUntil), so return early. + // + // If we were to start a new handshake, there is a chance that we + // cause [endpoint] to blackhole some packets on its bestAddr if we end + // up shifting to a new address family or src, e.g. IPv4 to IPv6, due to + // the window of time between the handshake completing, and our call to + // udpRelayEndpointReady(). The relay server can only forward packets + // from us on a single [epAddr]. + return + } + + // TODO(jwhited): if lastBest is untrusted, consider some strategies + // to reduce the chance we blackhole if it were to transition to + // trusted during/before the new handshake: + // 1. Start by attempting a handshake with only lastBest.epAddr. If + // that fails then try the remaining [epAddr]s. + // 2. Signal bestAddr trust transitions between [endpoint] and + // [relayManager] in order to prevent a handshake from starting + // and/or stop one that is running. + // We're ready to start a new handshake. ctx, cancel := context.WithCancel(context.Background()) work := &relayHandshakeWork{ - ep: newServerEndpoint.ep, + wlb: newServerEndpoint.wlb, se: newServerEndpoint.se, rxDiscoMsgCh: make(chan relayHandshakeDiscoMsgEvent), doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), @@ -581,16 +628,11 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) - r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.ep] = byServerDisco + r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] = byServerDisco } byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work - if newServerEndpoint.server.IsValid() { - // Send CallMeMaybeVia to the remote peer if we allocated this endpoint. - go r.sendCallMeMaybeVia(work.ep, work.se) - } - r.handshakeGeneration++ if r.handshakeGeneration == 0 { // generation must be nonzero r.handshakeGeneration++ @@ -633,7 +675,8 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat work.cancel() }() - epDisco := work.ep.disco.Load() + ep := work.wlb.ep + epDisco := ep.disco.Load() if epDisco == nil { return } @@ -653,7 +696,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true - go work.ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) + go ep.c.sendDiscoMessage(epAddr{ap: addrPort, vni: vni}, key.NodePublic{}, work.se.ServerDisco, bind, discoVerboseLog) } } if !sentBindAny { @@ -684,15 +727,15 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat sentPingAt[txid] = time.Now() ping := &disco.Ping{ TxID: txid, - NodeKey: work.ep.c.publicKeyAtomic.Load(), + NodeKey: ep.c.publicKeyAtomic.Load(), } go func() { if withAnswer != nil { answer := &disco.BindUDPRelayEndpointAnswer{BindUDPRelayEndpointCommon: common} answer.Challenge = *withAnswer - work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) + ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, work.se.ServerDisco, answer, discoVerboseLog) } - work.ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) + ep.c.sendDiscoMessage(epAddr{ap: to, vni: vni}, key.NodePublic{}, epDisco.key, ping, discoVerboseLog) }() } @@ -760,17 +803,17 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat } } -func (r *relayManager) allocateAllServersRunLoop(ep *endpoint) { +func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { if len(r.serversByAddrPort) == 0 { return } ctx, cancel := context.WithCancel(context.Background()) - started := &relayEndpointAllocWork{ep: ep, cancel: cancel, wg: &sync.WaitGroup{}} + started := &relayEndpointAllocWork{ep: wlb.ep, cancel: cancel, wg: &sync.WaitGroup{}} for k := range r.serversByAddrPort { started.wg.Add(1) - go r.allocateSingleServer(ctx, started.wg, k, ep) + go r.allocateSingleServer(ctx, started.wg, k, wlb) } - r.allocWorkByEndpoint[ep] = started + r.allocWorkByEndpoint[wlb.ep] = started go func() { started.wg.Wait() relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) @@ -829,25 +872,25 @@ func doAllocate(ctx context.Context, server netip.AddrPort, discoKeys [2]key.Dis } } -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, ep *endpoint) { +func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, wlb endpointWithLastBest) { // TODO(jwhited): introduce client metrics counters for notable failures defer wg.Done() - remoteDisco := ep.disco.Load() + remoteDisco := wlb.ep.disco.Load() if remoteDisco == nil { return } firstTry := true for { - se, err := doAllocate(ctx, server, [2]key.DiscoPublic{ep.c.discoPublic, remoteDisco.key}) + se, err := doAllocate(ctx, server, [2]key.DiscoPublic{wlb.ep.c.discoPublic, remoteDisco.key}) if err == nil { relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - ep: ep, + wlb: wlb, se: se, server: server, // we allocated this endpoint (vs CallMeMaybeVia reception), mark it as such }) return } - ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, ep.discoShort(), err) + wlb.ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, wlb.ep.discoShort(), err) var notReady errNotReady if firstTry && errors.As(err, ¬Ready) { select { diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index de282b4990637..8feff2f3d5ca8 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -14,7 +14,7 @@ import ( func TestRelayManagerInitAndIdle(t *testing.T) { rm := relayManager{} - rm.allocateAndHandshakeAllServers(&endpoint{}) + rm.startUDPRelayPathDiscoveryFor(&endpoint{}, addrQuality{}, false) <-rm.runLoopStoppedCh rm = relayManager{} @@ -22,7 +22,7 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) + rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) <-rm.runLoopStoppedCh rm = relayManager{} From 6a9bf9172b6fa6dc645b5ea960b98014f389533d Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 30 Jun 2025 13:43:16 -0500 Subject: [PATCH 0046/1093] ipn/ipnlocal: add verbose Taildrive logging on client side This allows logging the following Taildrive behavior from the client's perspective when --verbose=1: - Initialization of Taildrive remotes for every peer - Peer availability checks - All HTTP requests to peers (not just GET and PUT) Updates tailscale/corp#29702 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/drive.go | 6 ++++++ ipn/ipnlocal/local.go | 35 +++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 6a6f9bcd2b24a..8c2f339bb271a 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -306,10 +306,12 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) { } func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote { + b.logf("[v1] taildrive: setting up drive remotes from peers") driveRemotes := make([]*drive.Remote, 0, len(nm.Peers)) for _, p := range nm.Peers { peerID := p.ID() url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:]) + b.logf("[v1] taildrive: appending remote for peer %d: %s", peerID, url) driveRemotes = append(driveRemotes, &drive.Remote{ Name: p.DisplayName(false), URL: url, @@ -320,6 +322,7 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem cn := b.currentNode() peer, ok := cn.NodeByID(peerID) if !ok { + b.logf("[v1] taildrive: Available(): peer %d not found", peerID) return false } @@ -332,14 +335,17 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // The netmap.Peers slice is not updated in all cases. // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { + b.logf("[v1] taildrive: Available(): peer %d offline", peerID) return false } // Check that the peer is allowed to share with us. if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) { + b.logf("[v1] taildrive: Available(): peer %d available", peerID) return true } + b.logf("[v1] taildrive: Available(): peer %d not allowed to share", peerID) return false }, }) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9cec088f1f28b..29d09400b0d8d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1459,7 +1459,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi } n, ok = cn.NodeByID(nid) if !ok { - return zero, u, false + return zero, u, false } up, ok := cn.UserByID(n.User()) if !ok { @@ -5960,6 +5960,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { // the number of bytesRead. type responseBodyWrapper struct { io.ReadCloser + logVerbose bool bytesRx int64 bytesTx int64 log logger.Logf @@ -5981,8 +5982,22 @@ func (rbw *responseBodyWrapper) logAccess(err string) { // Some operating systems create and copy lots of 0 length hidden files for // tracking various states. Omit these to keep logs from being too verbose. - if rbw.contentLength > 0 { - rbw.log("taildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", rbw.method, rbw.selfNodeKey, rbw.shareNodeKey, rbw.statusCode, rbw.fileExtension, rbw.contentType, roundTraffic(rbw.contentLength), roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) + if rbw.logVerbose || rbw.contentLength > 0 { + levelPrefix := "" + if rbw.logVerbose { + levelPrefix = "[v1] " + } + rbw.log( + "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", + levelPrefix, + rbw.method, + rbw.selfNodeKey, + rbw.shareNodeKey, + rbw.statusCode, + rbw.fileExtension, + rbw.contentType, + roundTraffic(rbw.contentLength), + roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) } } @@ -6037,17 +6052,8 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err defer func() { contentType := "unknown" - switch req.Method { - case httpm.PUT: - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - case httpm.GET: - if ct := resp.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - default: - return + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct } dt.b.mu.Lock() @@ -6061,6 +6067,7 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err rbw := responseBodyWrapper{ log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level method: req.Method, bytesTx: int64(bw.bytesRead), selfNodeKey: selfNodeKey, From 454d856be853c713e5e916f13f75cf183de2c94e Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 1 Jul 2025 09:03:54 -0500 Subject: [PATCH 0047/1093] drive,ipn/ipnlocal: calculate peer taildrive URLs on-demand Instead of calculating the PeerAPI URL at the time that we add the peer, we now calculate it on every access to the peer. This way, if we initially did not have a shared address family with the peer, but later do, this allows us to access the peer at that point. This follows the pattern from other places where we access the peer API, which also calculate the URL on an as-needed basis. Additionally, we now show peers as not Available when we can't get a peer API URL. Lastly, this moves some of the more frequent verbose Taildrive logging from [v1] to [v2] level. Updates #29702 Signed-off-by: Percy Wegmann --- drive/driveimpl/drive_test.go | 2 +- drive/driveimpl/local_impl.go | 2 +- drive/local.go | 2 +- ipn/ipnlocal/drive.go | 27 +++++++++++++++++++-------- 4 files changed, 22 insertions(+), 11 deletions(-) diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index e7dd832918cec..cff55fbb2c858 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -524,7 +524,7 @@ func (s *system) addRemote(name string) string { for name, r := range s.remotes { remotes = append(remotes, &drive.Remote{ Name: name, - URL: fmt.Sprintf("http://%s", r.l.Addr()), + URL: func() string { return fmt.Sprintf("http://%s", r.l.Addr()) }, }) } s.local.fs.SetRemotes( diff --git a/drive/driveimpl/local_impl.go b/drive/driveimpl/local_impl.go index 8cdf60179aa0b..871d033431038 100644 --- a/drive/driveimpl/local_impl.go +++ b/drive/driveimpl/local_impl.go @@ -81,7 +81,7 @@ func (s *FileSystemForLocal) SetRemotes(domain string, remotes []*drive.Remote, Name: remote.Name, Available: remote.Available, }, - BaseURL: func() (string, error) { return remote.URL, nil }, + BaseURL: func() (string, error) { return remote.URL(), nil }, Transport: transport, }) } diff --git a/drive/local.go b/drive/local.go index aff79a57bd9b2..052efb3f97ecf 100644 --- a/drive/local.go +++ b/drive/local.go @@ -17,7 +17,7 @@ import ( // Remote represents a remote Taildrive node. type Remote struct { Name string - URL string + URL func() string Available func() bool } diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 8c2f339bb271a..d77481903fc09 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -309,20 +309,26 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem b.logf("[v1] taildrive: setting up drive remotes from peers") driveRemotes := make([]*drive.Remote, 0, len(nm.Peers)) for _, p := range nm.Peers { - peerID := p.ID() - url := fmt.Sprintf("%s/%s", peerAPIBase(nm, p), taildrivePrefix[1:]) - b.logf("[v1] taildrive: appending remote for peer %d: %s", peerID, url) + peer := p + peerID := peer.ID() + peerKey := peer.Key().ShortString() + b.logf("[v1] taildrive: appending remote for peer %s", peerKey) driveRemotes = append(driveRemotes, &drive.Remote{ Name: p.DisplayName(false), - URL: url, + URL: func() string { + url := fmt.Sprintf("%s/%s", b.currentNode().PeerAPIBase(peer), taildrivePrefix[1:]) + b.logf("[v2] taildrive: url for peer %s: %s", peerKey, url) + return url + }, Available: func() bool { // Peers are available to Taildrive if: // - They are online + // - Their PeerAPI is reachable // - They are allowed to share at least one folder with us cn := b.currentNode() peer, ok := cn.NodeByID(peerID) if !ok { - b.logf("[v1] taildrive: Available(): peer %d not found", peerID) + b.logf("[v2] taildrive: Available(): peer %s not found", peerKey) return false } @@ -335,17 +341,22 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem // The netmap.Peers slice is not updated in all cases. // It should be fixed now that we use PeerByIDOk. if !peer.Online().Get() { - b.logf("[v1] taildrive: Available(): peer %d offline", peerID) + b.logf("[v2] taildrive: Available(): peer %s offline", peerKey) + return false + } + + if b.currentNode().PeerAPIBase(peer) == "" { + b.logf("[v2] taildrive: Available(): peer %s PeerAPI unreachable", peerKey) return false } // Check that the peer is allowed to share with us. if cn.PeerHasCap(peer, tailcfg.PeerCapabilityTaildriveSharer) { - b.logf("[v1] taildrive: Available(): peer %d available", peerID) + b.logf("[v2] taildrive: Available(): peer %s available", peerKey) return true } - b.logf("[v1] taildrive: Available(): peer %d not allowed to share", peerID) + b.logf("[v2] taildrive: Available(): peer %s not allowed to share", peerKey) return false }, }) From d15b2312c4fb7b8ea1f98c5c80f7f72aed784b5d Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Tue, 1 Jul 2025 09:28:48 -0700 Subject: [PATCH 0048/1093] tailcfg: add CapabilityOwner (#16426) We would like to start sending whether a node is a Tailnet owner in netmap responses so that clients can determine what information to display to a user who wants to request account deletion. Updates tailscale/corp#30016 Signed-off-by: kari-ts --- ipn/ipnlocal/local_test.go | 14 ++++++++++++++ tailcfg/tailcfg.go | 1 + 2 files changed, 15 insertions(+) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 16dbef62a4190..47e5fa37d11cc 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -826,10 +826,21 @@ func TestStatusPeerCapabilities(t *testing.T) { tailcfg.CapabilityAdmin: {`{"test": "true}`}, }), }).View(), + (&tailcfg.Node{ + ID: 3, + StableID: "baz", + Key: makeNodeKeyFromID(3), + Hostinfo: (&tailcfg.Hostinfo{}).View(), + Capabilities: []tailcfg.NodeCapability{tailcfg.CapabilityOwner}, + CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.CapabilityOwner: nil, + }), + }).View(), }, expectedPeerCapabilities: map[tailcfg.StableNodeID][]tailcfg.NodeCapability{ tailcfg.StableNodeID("foo"): {tailcfg.CapabilitySSH}, tailcfg.StableNodeID("bar"): {tailcfg.CapabilityAdmin}, + tailcfg.StableNodeID("baz"): {tailcfg.CapabilityOwner}, }, expectedPeerCapMap: map[tailcfg.StableNodeID]tailcfg.NodeCapMap{ tailcfg.StableNodeID("foo"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ @@ -838,6 +849,9 @@ func TestStatusPeerCapabilities(t *testing.T) { tailcfg.StableNodeID("bar"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ tailcfg.CapabilityAdmin: {`{"test": "true}`}, }), + tailcfg.StableNodeID("baz"): (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + tailcfg.CapabilityOwner: nil, + }), }, }, { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index fb7d54c388619..4b1217d4e9596 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2367,6 +2367,7 @@ type NodeCapability string const ( CapabilityFileSharing NodeCapability = "https://tailscale.com/cap/file-sharing" CapabilityAdmin NodeCapability = "https://tailscale.com/cap/is-admin" + CapabilityOwner NodeCapability = "https://tailscale.com/cap/is-owner" CapabilitySSH NodeCapability = "https://tailscale.com/cap/ssh" // feature enabled/available CapabilitySSHRuleIn NodeCapability = "https://tailscale.com/cap/ssh-rule-in" // some SSH rule reach this node CapabilityDataPlaneAuditLogs NodeCapability = "https://tailscale.com/cap/data-plane-audit-logs" // feature enabled From d2edf7133a078880995deb184ae66211efb07b34 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Jul 2025 09:23:54 -0700 Subject: [PATCH 0049/1093] wgengine/magicsock: remove references to rucPtr (#16441) It used to be a **RebindingUDPConn, now it's just a *RebindingUDPConn. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0933c5be251a8..89111b7a0485f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3197,9 +3197,9 @@ func (c *Conn) listenPacket(network string, port uint16) (nettype.PacketConn, er return nettype.MakePacketListenerWithNetIP(netns.Listener(c.logf, c.netMon)).ListenPacket(ctx, network, addr) } -// bindSocket initializes rucPtr if necessary and binds a UDP socket to it. +// bindSocket binds a UDP socket to ruc. // Network indicates the UDP socket type; it must be "udp4" or "udp6". -// If rucPtr had an existing UDP socket bound, it closes that socket. +// If ruc had an existing UDP socket bound, it closes that socket. // The caller is responsible for informing the portMapper of any changes. // If curPortFate is set to dropCurrentPort, no attempt is made to reuse // the current port. From 172e26b3e3cf70455161609379da1820f6065f77 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 2 Jul 2025 10:52:00 -0700 Subject: [PATCH 0050/1093] tailcfg: report StateEncrypted in Hostinfo (#16434) Report whether the client is configured with state encryption (which varies by platform and can be optional on some). Wire it up to `--encrypt-state` in tailscaled, which is set for Linux/Windows, and set defaults for other platforms. Macsys will also report this if full Keychain migration is done. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm.go | 2 ++ ipn/ipnlocal/local.go | 27 +++++++++++++++++++++++++++ ipn/store.go | 6 ++++++ tailcfg/tailcfg.go | 9 ++++++++- tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 1 + tailcfg/tailcfg_view.go | 2 ++ 7 files changed, 47 insertions(+), 1 deletion(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 5ec084effa27d..9499ed02a8b2f 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -159,6 +159,8 @@ func newStore(logf logger.Logf, path string) (ipn.StateStore, error) { // tpmStore is an ipn.StateStore that stores the state in a secretbox-encrypted // file using a TPM-sealed symmetric key. type tpmStore struct { + ipn.EncryptedStateStore + logf logger.Logf path string key [32]byte diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 29d09400b0d8d..9c16d55af45f6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2244,6 +2244,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { hostinfo.Userspace.Set(b.sys.IsNetstack()) hostinfo.UserspaceRouter.Set(b.sys.IsNetstackRouter()) hostinfo.AppConnector.Set(b.appConnector != nil) + hostinfo.StateEncrypted = b.stateEncrypted() b.logf.JSON(1, "Hostinfo", hostinfo) // TODO(apenwarr): avoid the need to reinit controlclient. @@ -7801,3 +7802,29 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf var ( metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") ) + +func (b *LocalBackend) stateEncrypted() opt.Bool { + switch runtime.GOOS { + case "android", "ios": + return opt.NewBool(true) + case "darwin": + switch { + case version.IsMacAppStore(): + return opt.NewBool(true) + case version.IsMacSysExt(): + // MacSys still stores its state in plaintext on disk in addition to + // the Keychain. A future release will clean up the on-disk state + // files. + // TODO(#15830): always return true here once MacSys is fully migrated. + sp, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + return opt.NewBool(sp) + default: + // Probably self-compiled tailscaled, we don't use the Keychain + // there. + return opt.NewBool(false) + } + default: + _, ok := b.store.(ipn.EncryptedStateStore) + return opt.NewBool(ok) + } +} diff --git a/ipn/store.go b/ipn/store.go index 550aa8cba819a..9da5288c0d371 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -113,3 +113,9 @@ func ReadStoreInt(store StateStore, id StateKey) (int64, error) { func PutStoreInt(store StateStore, id StateKey, val int64) error { return WriteState(store, id, fmt.Appendf(nil, "%d", val)) } + +// EncryptedStateStore is a marker interface implemented by StateStores that +// encrypt data at rest. +type EncryptedStateStore interface { + stateStoreIsEncrypted() +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 4b1217d4e9596..10b157ac15642 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -162,7 +162,8 @@ type CapabilityVersion int // - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. // - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. -const CurrentCapabilityVersion CapabilityVersion = 117 +// - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) +const CurrentCapabilityVersion CapabilityVersion = 118 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -878,6 +879,12 @@ type Hostinfo struct { Location *Location `json:",omitempty"` TPM *TPMInfo `json:",omitempty"` // TPM device metadata, if available + // StateEncrypted reports whether the node state is stored encrypted on + // disk. The actual mechanism is platform-specific: + // * Apple nodes use the Keychain + // * Linux and Windows nodes use the TPM + // * Android apps use EncryptedSharedPreferences + StateEncrypted opt.Bool `json:",omitempty"` // NOTE: any new fields containing pointers in this type // require changes to Hostinfo.Equal. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 2c7941d51d7e3..412e1f38d18bc 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -188,6 +188,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { ServicesHash string Location *Location TPM *TPMInfo + StateEncrypted opt.Bool }{}) // Clone makes a deep copy of NetInfo. diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 60e86794a195c..e8e86cdb139bd 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -69,6 +69,7 @@ func TestHostinfoEqual(t *testing.T) { "ServicesHash", "Location", "TPM", + "StateEncrypted", } if have := fieldsOf(reflect.TypeFor[Hostinfo]()); !reflect.DeepEqual(have, hiHandles) { t.Errorf("Hostinfo.Equal check might be out of sync\nfields: %q\nhandled: %q\n", diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index c76654887f8ab..7e82cd871c64a 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -303,6 +303,7 @@ func (v HostinfoView) ServicesHash() string { return v.ж.Serv func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } +func (v HostinfoView) StateEncrypted() opt.Bool { return v.ж.StateEncrypted } func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -346,6 +347,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { ServicesHash string Location *Location TPM *TPMInfo + StateEncrypted opt.Bool }{}) // View returns a read-only view of NetInfo. From f9e7131772ffc85016921fe099791ffb467cc681 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Jul 2025 13:27:30 -0700 Subject: [PATCH 0051/1093] wgengine/magicsock: make lazyEndpoint load bearing for UDP relay (#16435) Cryptokey Routing identification is now required to set an [epAddr] into the peerMap for Geneve-encapsulated [epAddr]s. Updates tailscale/corp#27502 Updates tailscale/corp#29422 Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- wgengine/magicsock/endpoint.go | 1 - wgengine/magicsock/magicsock.go | 28 ++++++++++++++++++++++++---- 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 0d031d0baa6c2..5bf04fedaba2e 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f + github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 6f44cd86eb068..f9910bb59bb4d 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f h1:vg3PmQdq1BbB2V81iC1VBICQtfwbVGZ/4A/p7QKXTK0= -github.com/tailscale/wireguard-go v0.0.0-20250530210235-65cd6eed7d7f/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 h1:chIzUDKxR0nXQQra0j41aqiiFNICs0FIC5ZCwDO7z3k= +github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index af4666665b0fb..0569341ff4ab3 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -126,7 +126,6 @@ func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) de.setBestAddrLocked(maybeBest) de.trustBestAddrUntil = mono.Now().Add(trustUDPAddrDuration) - de.c.peerMap.setNodeKeyForEpAddr(maybeBest.epAddr, de.publicKey) } func (de *endpoint) setBestAddrLocked(v addrQuality) { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 89111b7a0485f..174345a84ac87 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1695,8 +1695,13 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach c.mu.Unlock() if !ok { if c.controlKnobs != nil && c.controlKnobs.DisableCryptorouting.Load() { + // Note: UDP relay is dependent on cryptorouting enablement. We + // only update Geneve-encapsulated [epAddr]s in the [peerMap] + // via [lazyEndpoint]. return nil, 0, false } + // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() + // for the same batch & [epAddr] src. return &lazyEndpoint{c: c, src: src}, size, true } cache.epAddr = src @@ -1704,6 +1709,8 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach cache.gen = de.numStopAndReset() ep = de } + // TODO(jwhited): consider the implications of not recording this receive + // activity due to an early [lazyEndpoint] return above. now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(src, now) @@ -3793,14 +3800,27 @@ func (le *lazyEndpoint) DstIP() netip.Addr { return netip.Addr{} } func (le *lazyEndpoint) SrcToString() string { return le.src.String() } func (le *lazyEndpoint) DstToString() string { return "dst" } func (le *lazyEndpoint) DstToBytes() []byte { return nil } -func (le *lazyEndpoint) GetPeerEndpoint(peerPublicKey [32]byte) conn.Endpoint { + +// FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in +// our [conn.ReceiveFunc]s when we are unable to identify the peer at WireGuard +// packet reception time, pre-decryption. If wireguard-go successfully decrypts +// the packet it calls us here, and we update our [peerMap] in order to +// associate le.src with peerPublicKey. +func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) if !ok { - return nil + return } - le.c.logf("magicsock: lazyEndpoint.GetPeerEndpoint(%v) found: %v", pubKey.ShortString(), ep.nodeAddr) - return ep + // TODO(jwhited): Consider [lazyEndpoint] effectiveness as a means to make + // this the sole call site for setNodeKeyForEpAddr. If this is the sole + // call site, and we always update the mapping based on successful + // Cryptokey Routing identification events, then we can go ahead and make + // [epAddr]s singular per peer (like they are for Geneve-encapsulated ones + // already). + // See http://go/corp/29422 & http://go/corp/30042 + le.c.peerMap.setNodeKeyForEpAddr(le.src, pubKey) + le.c.logf("magicsock: lazyEndpoint.FromPeer(%v) setting epAddr(%v) in peerMap for node(%v)", pubKey.ShortString(), le.src, ep.nodeAddr) } From eb03d42fe60acce0e7efacc3a026b26bfb56897c Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 2 Jul 2025 21:42:31 +0100 Subject: [PATCH 0052/1093] cmd/k8s-operator: Allow configuration of login server (#16432) This commit modifies the kubernetes operator to allow for customisation of the tailscale login url. This provides some data locality for people that want to configure it. This value is set in the `loginServer` helm value and is propagated down to all resources managed by the operator. The only exception to this is recorder nodes, where additional changes are required to support modifying the url. Updates https://github.com/tailscale/corp/issues/29847 Signed-off-by: David Bond --- cmd/k8s-operator/connector.go | 5 +++-- .../deploy/chart/templates/deployment.yaml | 2 ++ cmd/k8s-operator/deploy/chart/values.yaml | 3 +++ cmd/k8s-operator/deploy/manifests/operator.yaml | 2 ++ cmd/k8s-operator/ingress.go | 2 ++ cmd/k8s-operator/operator.go | 11 ++++++++--- cmd/k8s-operator/proxygroup.go | 10 ++++++++-- cmd/k8s-operator/sts.go | 9 +++++++++ cmd/k8s-operator/svc.go | 2 ++ cmd/k8s-operator/tsclient.go | 14 +++++++++++--- 10 files changed, 50 insertions(+), 10 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index c243036cbabd9..8406a1156fc8f 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -7,6 +7,7 @@ package main import ( "context" + "errors" "fmt" "net/netip" "slices" @@ -14,8 +15,6 @@ import ( "sync" "time" - "errors" - "go.uber.org/zap" xslices "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" @@ -26,6 +25,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -199,6 +199,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge }, ProxyClassName: proxyClass, proxyType: proxyTypeConnector, + LoginServer: a.ssr.loginServer, } if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 { diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 1b9b97186b6ca..8deba7dab0139 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -68,6 +68,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OPERATOR_LOGIN_SERVER + value: {{ .Values.operatorConfig.loginServer }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 2d1effc255dc5..af941425a5006 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -72,6 +72,9 @@ operatorConfig: # - name: EXTRA_VAR2 # value: "value2" + # URL of the control plane to be used by all resources managed by the operator. + loginServer: "" + # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: enabled: true diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index fa18a5debeaa9..4f1faf104cfc6 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5124,6 +5124,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: OPERATOR_LOGIN_SERVER + value: null - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 5058fd6dda8fc..d6277093824fb 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/ipn" "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" @@ -219,6 +220,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga ChildResourceLabels: crl, ProxyClassName: proxyClass, proxyType: proxyTypeIngressResource, + LoginServer: a.ssr.loginServer, } if val := ing.GetAnnotations()[AnnotationExperimentalForwardClusterTrafficViaL7IngresProxy]; val == "true" { diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index b33dcd114d32b..e5f7d932cc876 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" @@ -144,18 +145,20 @@ func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) { hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") + loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ) startlog := zlog.Named("startup") if clientIDPath == "" || clientSecretPath == "" { startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") } - tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath) + tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } s := &tsnet.Server{ - Hostname: hostname, - Logf: zlog.Named("tailscaled").Debugf, + Hostname: hostname, + Logf: zlog.Named("tailscaled").Debugf, + ControlURL: loginServer, } if p := os.Getenv("TS_PORT"); p != "" { port, err := strconv.ParseUint(p, 10, 16) @@ -307,6 +310,7 @@ func runReconcilers(opts reconcilerOpts) { proxyImage: opts.proxyImage, proxyPriorityClassName: opts.proxyPriorityClassName, tsFirewallMode: opts.proxyFirewallMode, + loginServer: opts.tsServer.ControlURL, } err = builder. @@ -639,6 +643,7 @@ func runReconcilers(opts reconcilerOpts) { defaultTags: strings.Split(opts.proxyTags, ","), tsFirewallMode: opts.proxyFirewallMode, defaultProxyClass: opts.defaultProxyClass, + loginServer: opts.tsServer.ControlURL, }) if err != nil { startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index bedf06ba0ac28..1b622c920d22d 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -84,6 +85,7 @@ type ProxyGroupReconciler struct { defaultTags []string tsFirewallMode string defaultProxyClass string + loginServer string mu sync.Mutex // protects following egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge @@ -709,7 +711,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices) + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices, r.loginServer) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -859,7 +861,7 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string, loginServer string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -870,6 +872,10 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a AuthKey: authKey, } + if loginServer != "" { + conf.ServerURL = &loginServer + } + if pg.Spec.HostnamePrefix != "" { conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index a943ae97179a1..193acad87ff0e 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -27,6 +27,7 @@ import ( "k8s.io/apiserver/pkg/storage/names" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" + "tailscale.com/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -138,6 +139,9 @@ type tailscaleSTSConfig struct { ProxyClassName string // name of ProxyClass if one needs to be applied to the proxy ProxyClass *tsapi.ProxyClass // ProxyClass that needs to be applied to the proxy (if there is one) + + // LoginServer denotes the URL of the control plane that should be used by the proxy. + LoginServer string } type connector struct { @@ -162,6 +166,7 @@ type tailscaleSTSReconciler struct { proxyImage string proxyPriorityClassName string tsFirewallMode string + loginServer string } func (sts tailscaleSTSReconciler) validate() error { @@ -910,6 +915,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } + if stsC.LoginServer != "" { + conf.ServerURL = &stsC.LoginServer + } + if stsC.Connector != nil { routes, err := netutil.CalcAdvertiseRoutes(stsC.Connector.routes, stsC.Connector.isExitNode) if err != nil { diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index f8c9af23990e9..52c8bec7ff32a 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -270,6 +271,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga Tags: tags, ChildResourceLabels: crl, ProxyClassName: proxyClass, + LoginServer: a.ssr.loginServer, } sts.proxyType = proxyTypeEgress if a.shouldExpose(svc) { diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index f49f84af96ed4..a94d55afed604 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -12,6 +12,7 @@ import ( "golang.org/x/oauth2/clientcredentials" "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" "tailscale.com/tailcfg" ) @@ -19,10 +20,9 @@ import ( // call should be performed on the default tailnet for the provided credentials. const ( defaultTailnet = "-" - defaultBaseURL = "https://api.tailscale.com" ) -func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (tsClient, error) { +func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer string) (tsClient, error) { clientID, err := os.ReadFile(clientIDPath) if err != nil { return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) @@ -31,14 +31,22 @@ func newTSClient(ctx context.Context, clientIDPath, clientSecretPath string) (ts if err != nil { return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) } + const tokenURLPath = "/api/v2/oauth/token" + tokenURL := fmt.Sprintf("%s%s", ipn.DefaultControlURL, tokenURLPath) + if loginServer != "" { + tokenURL = fmt.Sprintf("%s%s", loginServer, tokenURLPath) + } credentials := clientcredentials.Config{ ClientID: string(clientID), ClientSecret: string(clientSecret), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + TokenURL: tokenURL, } c := tailscale.NewClient(defaultTailnet, nil) c.UserAgent = "tailscale-k8s-operator" c.HTTPClient = credentials.Client(ctx) + if loginServer != "" { + c.BaseURL = loginServer + } return c, nil } From 77d19604f449ac65092e232c93d28f9e686df161 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 2 Jul 2025 14:32:21 -0700 Subject: [PATCH 0053/1093] derp/derphttp: fix DERP TLS client server name inclusion in URL form When dialed with just an URL and no node, the recent proxy fixes caused a regression where there was no TLS server name being included. Updates #16222 Updates #16223 Signed-off-by: James Tucker Co-Authored-by: Jordan Whited --- derp/derphttp/derphttp_client.go | 4 +++- derp/derphttp/derphttp_test.go | 36 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 7385f0ad1b46f..704b8175d07c6 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -648,12 +648,14 @@ func (c *Client) dialRegion(ctx context.Context, reg *tailcfg.DERPRegion) (net.C func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { tlsConf := tlsdial.Config(c.HealthTracker, c.TLSConfig) + // node is allowed to be nil here, tlsServerName falls back to using the URL + // if node is nil. + tlsConf.ServerName = c.tlsServerName(node) if node != nil { if node.InsecureForTests { tlsConf.InsecureSkipVerify = true tlsConf.VerifyConnection = nil } - tlsConf.ServerName = c.tlsServerName(node) if node.CertName != "" { if suf, ok := strings.CutPrefix(node.CertName, "sha256-raw:"); ok { tlsdial.SetConfigExpectedCertHash(tlsConf, suf) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 7f0a7e3334abf..bb33e60232357 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -590,3 +590,39 @@ func TestManualDial(t *testing.T) { t.Fatalf("rc.Connect: %v", err) } } + +func TestURLDial(t *testing.T) { + if !*liveNetworkTest { + t.Skip("skipping live network test without --live-net-tests") + } + dm := &tailcfg.DERPMap{} + res, err := http.Get("https://controlplane.tailscale.com/derpmap/default") + if err != nil { + t.Fatalf("fetching DERPMap: %v", err) + } + defer res.Body.Close() + if err := json.NewDecoder(res.Body).Decode(dm); err != nil { + t.Fatalf("decoding DERPMap: %v", err) + } + + // find a valid target DERP host to test against + var hostname string + for _, reg := range dm.Regions { + for _, node := range reg.Nodes { + if !node.STUNOnly && node.CanPort80 && node.CertName == "" || node.CertName == node.HostName { + hostname = node.HostName + break + } + } + if hostname != "" { + break + } + } + netMon := netmon.NewStatic() + c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + defer c.Close() + + if err := c.Connect(context.Background()); err != nil { + t.Fatalf("rc.Connect: %v", err) + } +} From 3a4b439c62ba30f882e50a08ae4b93f087501847 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 2 Jul 2025 20:38:39 -0700 Subject: [PATCH 0054/1093] feature/relayserver,net/udprelay: add IPv6 support (#16442) Updates tailscale/corp#27502 Updates tailscale/corp#30043 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 2 +- net/udprelay/server.go | 133 +++++++++++++++++-------- net/udprelay/server_test.go | 154 ++++++++++++++++------------- 3 files changed, 178 insertions(+), 111 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 4634f3ac27151..5a82a9d117bd7 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -137,7 +137,7 @@ func (e *extension) relayServerOrInit() (relayServer, error) { return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") } var err error - e.server, _, err = udprelay.NewServer(e.logf, *e.port, nil) + e.server, err = udprelay.NewServer(e.logf, *e.port, nil) if err != nil { return nil, err } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e32f8917c520c..d2661e59feba4 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -57,7 +57,10 @@ type Server struct { bindLifetime time.Duration steadyStateLifetime time.Duration bus *eventbus.Bus - uc *net.UDPConn + uc4 *net.UDPConn // always non-nil + uc4Port uint16 // always nonzero + uc6 *net.UDPConn // may be nil if IPv6 bind fails during initialization + uc6Port uint16 // may be zero if IPv6 bind fails during initialization closeOnce sync.Once wg sync.WaitGroup closeCh chan struct{} @@ -278,13 +281,11 @@ func (e *serverEndpoint) isBound() bool { e.boundAddrPorts[1].IsValid() } -// NewServer constructs a [Server] listening on 0.0.0.0:'port'. IPv6 is not yet -// supported. Port may be 0, and what ultimately gets bound is returned as -// 'boundPort'. If len(overrideAddrs) > 0 these will be used in place of dynamic -// discovery, which is useful to override in tests. -// -// TODO: IPv6 support -func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, boundPort uint16, err error) { +// NewServer constructs a [Server] listening on port. If port is zero, then +// port selection is left up to the host networking stack. If +// len(overrideAddrs) > 0 these will be used in place of dynamic discovery, +// which is useful to override in tests. +func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, err error) { s = &Server{ logf: logger.WithPrefix(logf, "relayserver"), disco: key.NewDisco(), @@ -306,30 +307,36 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.bus = bus netMon, err := netmon.New(s.bus, logf) if err != nil { - return nil, 0, err + return nil, err } s.netChecker = &netcheck.Client{ NetMon: netMon, Logf: logger.WithPrefix(logf, "relayserver: netcheck:"), SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { - return s.uc.WriteToUDPAddrPort(b, addrPort) + if addrPort.Addr().Is4() { + return s.uc4.WriteToUDPAddrPort(b, addrPort) + } else if s.uc6 != nil { + return s.uc6.WriteToUDPAddrPort(b, addrPort) + } else { + return 0, errors.New("IPv6 socket is not bound") + } }, } - boundPort, err = s.listenOn(port) + err = s.listenOn(port) if err != nil { - return nil, 0, err + return nil, err } - s.wg.Add(1) - go s.packetReadLoop() - s.wg.Add(1) - go s.endpointGCLoop() if len(overrideAddrs) > 0 { addrPorts := make(set.Set[netip.AddrPort], len(overrideAddrs)) for _, addr := range overrideAddrs { if addr.IsValid() { - addrPorts.Add(netip.AddrPortFrom(addr, boundPort)) + if addr.Is4() { + addrPorts.Add(netip.AddrPortFrom(addr, s.uc4Port)) + } else if s.uc6 != nil { + addrPorts.Add(netip.AddrPortFrom(addr, s.uc6Port)) + } } } s.addrPorts = addrPorts.Slice() @@ -337,7 +344,17 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.wg.Add(1) go s.addrDiscoveryLoop() } - return s, boundPort, nil + + s.wg.Add(1) + go s.packetReadLoop(s.uc4) + if s.uc6 != nil { + s.wg.Add(1) + go s.packetReadLoop(s.uc6) + } + s.wg.Add(1) + go s.endpointGCLoop() + + return s, nil } func (s *Server) addrDiscoveryLoop() { @@ -351,14 +368,17 @@ func (s *Server) addrDiscoveryLoop() { addrPorts.Make() // get local addresses - localPort := s.uc.LocalAddr().(*net.UDPAddr).Port ips, _, err := netmon.LocalAddresses() if err != nil { return nil, err } for _, ip := range ips { if ip.IsValid() { - addrPorts.Add(netip.AddrPortFrom(ip, uint16(localPort))) + if ip.Is4() { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc4Port)) + } else { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc6Port)) + } } } @@ -413,24 +433,52 @@ func (s *Server) addrDiscoveryLoop() { } } -func (s *Server) listenOn(port int) (uint16, error) { - uc, err := net.ListenUDP("udp4", &net.UDPAddr{Port: port}) - if err != nil { - return 0, err - } - // TODO: set IP_PKTINFO sockopt - _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) - if err != nil { - s.uc.Close() - return 0, err - } - boundPort, err := strconv.ParseUint(boundPortStr, 10, 16) - if err != nil { - s.uc.Close() - return 0, err +// listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if +// we manage to bind the IPv4 socket. +// +// The requested port may be zero, in which case port selection is left up to +// the host networking stack. We make no attempt to bind a consistent port +// across IPv4 and IPv6 if the requested port is zero. +// +// TODO: make these "re-bindable" in similar fashion to magicsock as a means to +// deal with EDR software closing them. http://go/corp/30118 +func (s *Server) listenOn(port int) error { + for _, network := range []string{"udp4", "udp6"} { + uc, err := net.ListenUDP(network, &net.UDPAddr{Port: port}) + if err != nil { + if network == "udp4" { + return err + } else { + s.logf("ignoring IPv6 bind failure: %v", err) + break + } + } + // TODO: set IP_PKTINFO sockopt + _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) + if err != nil { + uc.Close() + if s.uc4 != nil { + s.uc4.Close() + } + return err + } + portUint, err := strconv.ParseUint(boundPortStr, 10, 16) + if err != nil { + uc.Close() + if s.uc4 != nil { + s.uc4.Close() + } + return err + } + if network == "udp4" { + s.uc4 = uc + s.uc4Port = uint16(portUint) + } else { + s.uc6 = uc + s.uc6Port = uint16(portUint) + } } - s.uc = uc - return uint16(boundPort), nil + return nil } // Close closes the server. @@ -438,7 +486,10 @@ func (s *Server) Close() error { s.closeOnce.Do(func() { s.mu.Lock() defer s.mu.Unlock() - s.uc.Close() + s.uc4.Close() + if s.uc6 != nil { + s.uc6.Close() + } close(s.closeCh) s.wg.Wait() clear(s.byVNI) @@ -507,7 +558,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { e.handlePacket(from, gh, b, uw, s.discoPublic) } -func (s *Server) packetReadLoop() { +func (s *Server) packetReadLoop(uc *net.UDPConn) { defer func() { s.wg.Done() s.Close() @@ -515,11 +566,11 @@ func (s *Server) packetReadLoop() { b := make([]byte, 1<<16-1) for { // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := s.uc.ReadFromUDPAddrPort(b) + n, from, err := uc.ReadFromUDPAddrPort(b) if err != nil { return } - s.handlePacket(from, b[:n], s.uc) + s.handlePacket(from, b[:n], uc) } } diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 3fcb9b8b198c2..8c0c5aff66027 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -29,7 +29,7 @@ type testClient struct { func newTestClient(t *testing.T, vni uint32, serverEndpoint netip.AddrPort, local key.DiscoPrivate, remote, server key.DiscoPublic) *testClient { rAddr := &net.UDPAddr{IP: serverEndpoint.Addr().AsSlice(), Port: int(serverEndpoint.Port())} - uc, err := net.DialUDP("udp4", nil, rAddr) + uc, err := net.DialUDP("udp", nil, rAddr) if err != nil { t.Fatal(err) } @@ -180,85 +180,101 @@ func TestServer(t *testing.T) { discoA := key.NewDisco() discoB := key.NewDisco() - ipv4LoopbackAddr := netip.MustParseAddr("127.0.0.1") - - server, _, err := NewServer(t.Logf, 0, []netip.Addr{ipv4LoopbackAddr}) - if err != nil { - t.Fatal(err) + cases := []struct { + name string + overrideAddrs []netip.Addr + }{ + { + name: "over ipv4", + overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, + }, + { + name: "over ipv6", + overrideAddrs: []netip.Addr{netip.MustParseAddr("::1")}, + }, } - defer server.Close() - endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) - if err != nil { - t.Fatal(err) - } - dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) - if err != nil { - t.Fatal(err) - } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + server, err := NewServer(t.Logf, 0, tt.overrideAddrs) + if err != nil { + t.Fatal(err) + } + defer server.Close() - // We expect the same endpoint details pre-handshake. - if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) - } + endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + dupEndpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } - if len(endpoint.AddrPorts) != 1 { - t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) - } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) - defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) - defer tcB.close() + // We expect the same endpoint details pre-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } - tcA.handshake(t) - tcB.handshake(t) + if len(endpoint.AddrPorts) != 1 { + t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) + } + tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + defer tcA.close() + tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + defer tcB.close() - dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) - if err != nil { - t.Fatal(err) - } - // We expect the same endpoint details post-handshake. - if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { - t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) - } + tcA.handshake(t) + tcB.handshake(t) - txToB := []byte{1, 2, 3} - tcA.writeDataPkt(t, txToB) - rxFromA := tcB.readDataPkt(t) - if !bytes.Equal(txToB, rxFromA) { - t.Fatal("unexpected msg A->B") - } + dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) + if err != nil { + t.Fatal(err) + } + // We expect the same endpoint details post-handshake. + if diff := cmp.Diff(dupEndpoint, endpoint, cmpopts.EquateComparable(netip.AddrPort{}, key.DiscoPublic{})); diff != "" { + t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) + } - txToA := []byte{4, 5, 6} - tcB.writeDataPkt(t, txToA) - rxFromB := tcA.readDataPkt(t) - if !bytes.Equal(txToA, rxFromB) { - t.Fatal("unexpected msg B->A") - } + txToB := []byte{1, 2, 3} + tcA.writeDataPkt(t, txToB) + rxFromA := tcB.readDataPkt(t) + if !bytes.Equal(txToB, rxFromA) { + t.Fatal("unexpected msg A->B") + } - tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) - tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 - defer tcAOnNewPort.close() + txToA := []byte{4, 5, 6} + tcB.writeDataPkt(t, txToA) + rxFromB := tcA.readDataPkt(t) + if !bytes.Equal(txToA, rxFromB) { + t.Fatal("unexpected msg B->A") + } - // Handshake client A on a new source IP:port, verify we receive packets on the new binding - tcAOnNewPort.handshake(t) - txToAOnNewPort := []byte{7, 8, 9} - tcB.writeDataPkt(t, txToAOnNewPort) - rxFromB = tcAOnNewPort.readDataPkt(t) - if !bytes.Equal(txToAOnNewPort, rxFromB) { - t.Fatal("unexpected msg B->A") - } + tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 + defer tcAOnNewPort.close() + + // Handshake client A on a new source IP:port, verify we receive packets on the new binding + tcAOnNewPort.handshake(t) + txToAOnNewPort := []byte{7, 8, 9} + tcB.writeDataPkt(t, txToAOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(txToAOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") + } - tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) - tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 - defer tcBOnNewPort.close() + tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 + defer tcBOnNewPort.close() - // Handshake client B on a new source IP:port, verify we receive packets on the new binding - tcBOnNewPort.handshake(t) - txToBOnNewPort := []byte{7, 8, 9} - tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) - rxFromA = tcBOnNewPort.readDataPkt(t) - if !bytes.Equal(txToBOnNewPort, rxFromA) { - t.Fatal("unexpected msg A->B") + // Handshake client B on a new source IP:port, verify we receive packets on the new binding + tcBOnNewPort.handshake(t) + txToBOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) + rxFromA = tcBOnNewPort.readDataPkt(t) + if !bytes.Equal(txToBOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") + } + }) } } From 5dc11d50f787026055a0125f536e87287ce6899e Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 3 Jul 2025 15:53:35 +0100 Subject: [PATCH 0055/1093] cmd/k8s-operator: Set login server on tsrecorder nodes (#16443) This commit modifies the recorder node reconciler to include the environment variable added in https://github.com/tailscale/corp/pull/30058 which allows for configuration of the coordination server. Updates https://github.com/tailscale/corp/issues/29847 Signed-off-by: David Bond --- cmd/k8s-operator/operator.go | 10 +++++++--- cmd/k8s-operator/tsrecorder.go | 3 ++- cmd/k8s-operator/tsrecorder_specs.go | 10 +++++++--- cmd/k8s-operator/tsrecorder_specs_test.go | 4 ++-- cmd/k8s-operator/tsrecorder_test.go | 8 ++++++-- 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index e5f7d932cc876..276de411c45cb 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -82,6 +82,7 @@ func main() { tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "") defaultProxyClass = defaultEnv("PROXY_DEFAULT_CLASS", "") isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) + loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ) var opts []kzap.Opts @@ -115,7 +116,7 @@ func main() { hostinfo.SetApp(kubetypes.AppAPIServerProxy) } - s, tsc := initTSNet(zlog) + s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode) @@ -131,6 +132,7 @@ func main() { proxyTags: tags, proxyFirewallMode: tsFirewallMode, defaultProxyClass: defaultProxyClass, + loginServer: loginServer, } runReconcilers(rOpts) } @@ -138,14 +140,13 @@ func main() { // initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the // CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate // with Tailscale. -func initTSNet(zlog *zap.SugaredLogger) (*tsnet.Server, tsClient) { +func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { var ( clientIDPath = defaultEnv("CLIENT_ID_FILE", "") clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") - loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") ) startlog := zlog.Named("startup") if clientIDPath == "" || clientSecretPath == "" { @@ -610,6 +611,7 @@ func runReconcilers(opts reconcilerOpts) { l: opts.log.Named("recorder-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, + loginServer: opts.loginServer, }) if err != nil { startlog.Fatalf("could not create Recorder reconciler: %v", err) @@ -693,6 +695,8 @@ type reconcilerOpts struct { // class for proxies that do not have a ProxyClass set. // this is defined by an operator env variable. defaultProxyClass string + // loginServer is the coordination server URL that should be used by managed resources. + loginServer string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index cbabc1d89e475..ec95ecf40dab5 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -59,6 +59,7 @@ type RecorderReconciler struct { clock tstime.Clock tsNamespace string tsClient tsClient + loginServer string mu sync.Mutex // protects following recorders set.Slice[types.UID] // for recorders gauge @@ -202,7 +203,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco }); err != nil { return fmt.Errorf("error creating RoleBinding: %w", err) } - ss := tsrStatefulSet(tsr, r.tsNamespace) + ss := tsrStatefulSet(tsr, r.tsNamespace, r.loginServer) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 7c6e80aed56fd..f5eedc2a1d1da 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -17,7 +17,7 @@ import ( "tailscale.com/version" ) -func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { +func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *appsv1.StatefulSet { return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, @@ -59,7 +59,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string) *appsv1.StatefulSet { ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy, Resources: tsr.Spec.StatefulSet.Pod.Container.Resources, SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext, - Env: env(tsr), + Env: env(tsr, loginServer), EnvFrom: func() []corev1.EnvFromSource { if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" { return nil @@ -201,7 +201,7 @@ func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret { } } -func env(tsr *tsapi.Recorder) []corev1.EnvVar { +func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { envs := []corev1.EnvVar{ { Name: "TS_AUTHKEY", @@ -239,6 +239,10 @@ func env(tsr *tsapi.Recorder) []corev1.EnvVar { Name: "TSRECORDER_HOSTNAME", Value: "$(POD_NAME)", }, + { + Name: "TSRECORDER_LOGIN_SERVER", + Value: loginServer, + }, } for _, env := range tsr.Spec.StatefulSet.Pod.Container.Env { diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go index 94a8a816c69f5..49332d09b6a08 100644 --- a/cmd/k8s-operator/tsrecorder_specs_test.go +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -90,7 +90,7 @@ func TestRecorderSpecs(t *testing.T) { }, } - ss := tsrStatefulSet(tsr, tsNamespace) + ss := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) // StatefulSet-level. if diff := cmp.Diff(ss.Annotations, tsr.Spec.StatefulSet.Annotations); diff != "" { @@ -124,7 +124,7 @@ func TestRecorderSpecs(t *testing.T) { } // Container-level. - if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr)); diff != "" { + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr, tsLoginServer)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" { diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index e6d56ef2f04c6..990bd68193e8b 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -25,7 +25,10 @@ import ( "tailscale.com/tstest" ) -const tsNamespace = "tailscale" +const ( + tsNamespace = "tailscale" + tsLoginServer = "example.tailscale.com" +) func TestRecorder(t *testing.T) { tsr := &tsapi.Recorder{ @@ -51,6 +54,7 @@ func TestRecorder(t *testing.T) { recorder: fr, l: zl.Sugar(), clock: cl, + loginServer: tsLoginServer, } t.Run("invalid_spec_gives_an_error_condition", func(t *testing.T) { @@ -234,7 +238,7 @@ func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recor role := tsrRole(tsr, tsNamespace) roleBinding := tsrRoleBinding(tsr, tsNamespace) serviceAccount := tsrServiceAccount(tsr, tsNamespace) - statefulSet := tsrStatefulSet(tsr, tsNamespace) + statefulSet := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) if shouldExist { expectEqual(t, fc, auth) From 1a2185b1ee2d96ade04fb9f4e43eff5915b9b22a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 2 Jul 2025 19:06:54 -0500 Subject: [PATCH 0056/1093] ipn/ipnlocal: rename setAutoExitNodeIDLockedOnEntry to pickNewAutoExitNode; drop old function Currently, (*LocalBackend).pickNewAutoExitNode() is just a wrapper around setAutoExitNodeIDLockedOnEntry that sends a prefs-change notification at the end. It doesn't need to do that, since setPrefsLockedOnEntry already sends the notification (setAutoExitNodeIDLockedOnEntry calls it via editPrefsLockedOnEntry). This PR removes the old pickNewAutoExitNode function and renames setAutoExitNodeIDLockedOnEntry to pickNewAutoExitNode for clarity. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9c16d55af45f6..bea5085b7761c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2001,20 +2001,6 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// pickNewAutoExitNode picks a new automatic exit node if needed. -func (b *LocalBackend) pickNewAutoExitNode() { - unlock := b.lockAndGetUnlock() - defer unlock() - - newPrefs := b.setAutoExitNodeIDLockedOnEntry(unlock) - if !newPrefs.Valid() { - // Unchanged. - return - } - - b.send(ipn.Notify{Prefs: &newPrefs}) -} - // setExitNodeID updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { @@ -5840,40 +5826,37 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { } cc.SetNetInfo(ni) if refresh { - unlock := b.lockAndGetUnlock() - defer unlock() - b.setAutoExitNodeIDLockedOnEntry(unlock) + b.pickNewAutoExitNode() } } -func (b *LocalBackend) setAutoExitNodeIDLockedOnEntry(unlock unlockOnce) (newPrefs ipn.PrefsView) { - var zero ipn.PrefsView +// pickNewAutoExitNode picks a new automatic exit node if needed. +func (b *LocalBackend) pickNewAutoExitNode() { + unlock := b.lockAndGetUnlock() defer unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { b.logf("[unexpected]: received tailnet exit node ID pref change callback but current prefs are nil") - return zero + return } prefsClone := prefs.AsStruct() newSuggestion, err := b.suggestExitNodeLocked(nil) if err != nil { b.logf("setAutoExitNodeID: %v", err) - return zero + return } if prefsClone.ExitNodeID == newSuggestion.ID { - return zero + return } prefsClone.ExitNodeID = newSuggestion.ID - newPrefs, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ + _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ Prefs: *prefsClone, ExitNodeIDSet: true, }, unlock) if err != nil { b.logf("setAutoExitNodeID: failed to apply exit node ID preference: %v", err) - return zero } - return newPrefs } // setNetMapLocked updates the LocalBackend state to reflect the newly From 56d772bd63e5caf711ec7ffe63967d05e33307df Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 2 Jul 2025 19:16:39 -0500 Subject: [PATCH 0057/1093] ipn/ipnlocal: simplify pickNewAutoExitNode (*profileManager).CurrentPrefs() is always valid. Additionally, there's no value in cloning and passing the full ipn.Prefs when editing preferences. Instead, ipn.MaskedPrefs should only have ExitNodeID set. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bea5085b7761c..adc0af5cdac36 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5835,23 +5835,16 @@ func (b *LocalBackend) pickNewAutoExitNode() { unlock := b.lockAndGetUnlock() defer unlock() - prefs := b.pm.CurrentPrefs() - if !prefs.Valid() { - b.logf("[unexpected]: received tailnet exit node ID pref change callback but current prefs are nil") - return - } - prefsClone := prefs.AsStruct() newSuggestion, err := b.suggestExitNodeLocked(nil) if err != nil { b.logf("setAutoExitNodeID: %v", err) return } - if prefsClone.ExitNodeID == newSuggestion.ID { + if b.pm.CurrentPrefs().ExitNodeID() == newSuggestion.ID { return } - prefsClone.ExitNodeID = newSuggestion.ID _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, + Prefs: ipn.Prefs{ExitNodeID: newSuggestion.ID}, ExitNodeIDSet: true, }, unlock) if err != nil { From 6ecc25b26a8edf191cfbebe2f16254468b1f1695 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 11:50:27 -0500 Subject: [PATCH 0058/1093] ipn/ipnlocal: skip TestUpdateNetmapDeltaAutoExitNode suggestExitNode never checks whether an exit node candidate is online. It also accepts a full netmap, which doesn't include changes from delta updates. The test can't work correctly until both issues are fixed. Previously, it passed only because the test itself is flawed. It doesn't succeed because the currently selected node goes offline and a new one is chosen. Instead, it succeeds because lastSuggestedExitNode is incorrect, and suggestExitNode picks the correct node the first time it runs, based on the DERP map and the netcheck report. The node in exitNodeIDWant just happens to be the optimal choice. Fixing SuggestExitNode requires refactoring its callers first, which in turn reveals the flawed test, as suggestExitNode ends up being called slightly earlier. In this PR, we update the test to correctly fail due to existing bugs in SuggestExitNode, and temporarily skip it until those issues are addressed in a future commit. Updates #16455 Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local_test.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 47e5fa37d11cc..06acd85ce6023 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1918,8 +1918,10 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { - peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes()) - peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes()) + t.Skip("TODO(tailscale/tailscale#16455): suggestExitNode does not check for online status of exit nodes") + + peer1 := makePeer(1, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) + peer2 := makePeer(2, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -1958,8 +1960,10 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { }{ { // selected auto exit node goes offline - name: "exit-node-goes-offline", - lastSuggestedExitNode: peer1.StableID(), + name: "exit-node-goes-offline", + // PreferredDERP is 2, and it's also the region with the lowest latency. + // So, peer2 should be selected as the exit node. + lastSuggestedExitNode: peer2.StableID(), netmap: &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ peer1, @@ -1970,14 +1974,14 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), + Online: ptr.To(true), }, { NodeID: 2, - Online: ptr.To(true), + Online: ptr.To(false), // the selected exit node goes offline }, }, - exitNodeIDWant: peer2.StableID(), + exitNodeIDWant: peer1.StableID(), report: report, }, { @@ -1994,7 +1998,7 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), + Online: ptr.To(false), // a different exit node goes offline }, { NodeID: 2, From 009882298135672522e0fa9dac1b9fe32a71581a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 11:51:27 -0500 Subject: [PATCH 0059/1093] ipn/ipnlocal: update suggestExitNode to skip offline candidates and fix TestSetControlClientStatusAutoExitNode TestSetControlClientStatusAutoExitNode is broken similarly to TestUpdateNetmapDeltaAutoExitNode as suggestExitNode didn't previously check the online status of exit nodes, and similarly to the other test it succeeded because the test itself is also broken. However, it is easier to fix as it sends out a full netmap update rather than a delta peer update, so it doesn't depend on the same refactoring as TestSetControlClientStatusAutoExitNode. Updates #16455 Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/local_test.go | 24 ++++++++++++++---------- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index adc0af5cdac36..8889fa90b634f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7433,7 +7433,7 @@ func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSug } candidates := make([]tailcfg.NodeView, 0, len(netMap.Peers)) for _, peer := range netMap.Peers { - if !peer.Valid() { + if !peer.Valid() || !peer.Online().Get() { continue } if allowList != nil && !allowList.Contains(peer.StableID()) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 06acd85ce6023..ca968ccd76619 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2166,8 +2166,8 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { } func TestSetControlClientStatusAutoExitNode(t *testing.T) { - peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withNodeKey()) - peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withNodeKey()) + peer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withOnline(true), withNodeKey()) + peer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withOnline(true), withNodeKey()) derpMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ 1: { @@ -2210,22 +2210,25 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) b.currentNode().SetNetMap(nm) - b.lastSuggestedExitNode = peer1.StableID() + // Peer 2 should be the initial exit node, as it's better than peer 1 + // in terms of latency and DERP region. + b.lastSuggestedExitNode = peer2.StableID() b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, report) b.SetPrefsForTest(b.pm.CurrentPrefs().AsStruct()) - firstExitNode := b.Prefs().ExitNodeID() - newPeer1 := makePeer(1, withCap(26), withSuggest(), withExitRoutes(), withOnline(false), withNodeKey()) + offlinePeer2 := makePeer(2, withCap(26), withSuggest(), withExitRoutes(), withOnline(false), withNodeKey()) updatedNetmap := &netmap.NetworkMap{ Peers: []tailcfg.NodeView{ - newPeer1, - peer2, + peer1, + offlinePeer2, }, DERPMap: derpMap, } b.SetControlClientStatus(b.cc, controlclient.Status{NetMap: updatedNetmap}) - lastExitNode := b.Prefs().ExitNodeID() - if firstExitNode == lastExitNode { - t.Errorf("did not switch exit nodes despite auto exit node going offline") + // But now that peer 2 is offline, we should switch to peer 1. + wantExitNode := peer1.StableID() + gotExitNode := b.Prefs().ExitNodeID() + if gotExitNode != wantExitNode { + t.Errorf("did not switch exit nodes despite auto exit node going offline: got %q; want %q", gotExitNode, wantExitNode) } } @@ -3289,6 +3292,7 @@ func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { Key: makeNodeKeyFromID(id), StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), Name: fmt.Sprintf("peer%d", id), + Online: ptr.To(true), HomeDERP: int(id), } for _, opt := range opts { From a8055b5f40c625777e6e13dd504a110c223bc8fb Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 12:21:29 -0500 Subject: [PATCH 0060/1093] cmd/tailscale/cli,ipn,ipn/ipnlocal: add AutoExitNode preference for automatic exit node selection With this change, policy enforcement and exit node resolution can happen in separate steps, since enforcement no longer depends on resolving the suggested exit node. This keeps policy enforcement synchronous (e.g., when switching profiles), while allowing exit node resolution to be asynchronous on netmap updates, link changes, etc. Additionally, the new preference will be used to let GUIs and CLIs switch back to "auto" mode after a manual exit node override, which is necessary for tailscale/corp#29969. Updates tailscale/corp#29969 Updates #16459 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/cli_test.go | 4 + ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 2 + ipn/ipnlocal/local.go | 189 +++++++++--- ipn/ipnlocal/local_test.go | 527 ++++++++++++++++++++++++++++++++-- ipn/ipnlocal/state_test.go | 106 +++++-- ipn/prefs.go | 45 +++ ipn/prefs_test.go | 12 + 8 files changed, 793 insertions(+), 93 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 9aa3693fd92c5..48121c7d912d9 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -971,6 +971,10 @@ func TestPrefFlagMapping(t *testing.T) { // Used internally by LocalBackend as part of exit node usage toggling. // No CLI flag for this. continue + case "AutoExitNode": + // TODO(nickkhyl): should be handled by tailscale {set,up} --exit-node. + // See tailscale/tailscale#16459. + continue } t.Errorf("unexpected new ipn.Pref field %q is not handled by up.go (see addPrefFlagMapping and checkForAccidentalSettingReverts)", prefName) } diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 65438444e162f..3d67efc6fd33b 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -74,6 +74,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { RouteAll bool ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression InternalExitNodePrior tailcfg.StableNodeID ExitNodeAllowLANAccess bool CorpDNS bool diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 871270b8564f1..1d31ced9d3847 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -135,6 +135,7 @@ func (v PrefsView) ControlURL() string { return v.ж.Co func (v PrefsView) RouteAll() bool { return v.ж.RouteAll } func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID } func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP } +func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode } func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior } func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess } func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS } @@ -179,6 +180,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { RouteAll bool ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression InternalExitNodePrior tailcfg.StableNodeID ExitNodeAllowLANAccess bool CorpDNS bool diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8889fa90b634f..21057c0e675db 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -912,13 +912,14 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { hadPAC := b.prevIfState.HasPAC() b.prevIfState = ifst b.pauseOrResumeControlClientLocked() - if delta.Major && shouldAutoExitNode() { + prefs := b.pm.CurrentPrefs() + if delta.Major && prefs.AutoExitNode().IsSet() { b.refreshAutoExitNode = true } var needReconfig bool // If the network changed and we're using an exit node and allowing LAN access, we may need to reconfigure. - if delta.Major && b.pm.CurrentPrefs().ExitNodeID() != "" && b.pm.CurrentPrefs().ExitNodeAllowLANAccess() { + if delta.Major && prefs.ExitNodeID() != "" && prefs.ExitNodeAllowLANAccess() { b.logf("linkChange: in state %v; updating LAN routes", b.state) needReconfig = true } @@ -941,8 +942,8 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. - b.updateFilterLocked(b.pm.CurrentPrefs()) - updateExitNodeUsageWarning(b.pm.CurrentPrefs(), delta.New, b.health) + b.updateFilterLocked(prefs) + updateExitNodeUsageWarning(prefs, delta.New, b.health) cn := b.currentNode() nm := cn.NetMap() @@ -1623,17 +1624,17 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } } - if shouldAutoExitNode() { + if applySysPolicy(prefs, b.overrideAlwaysOn) { + prefsChanged = true + } + if prefs.AutoExitNode.IsSet() { // Re-evaluate exit node suggestion in case circumstances have changed. _, err := b.suggestExitNodeLocked(curNetMap) if err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("SetControlClientStatus failed to select auto exit node: %v", err) } } - if applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - prefsChanged = true - } - if setExitNodeID(prefs, curNetMap) { + if setExitNodeID(prefs, b.lastSuggestedExitNode, curNetMap) { prefsChanged = true } @@ -1800,7 +1801,7 @@ var preferencePolicies = []preferencePolicyInfo{ // applySysPolicy overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID, overrideAlwaysOn bool) (anyChange bool) { +func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1839,21 +1840,51 @@ func applySysPolicy(prefs *ipn.Prefs, lastSuggestedExitNode tailcfg.StableNodeID if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) - if shouldAutoExitNode() && lastSuggestedExitNode != "" { - exitNodeID = lastSuggestedExitNode - } - // Note: when exitNodeIDStr == "auto" && lastSuggestedExitNode == "", - // then exitNodeID is now "auto" which will never match a peer's node ID. - // When there is no a peer matching the node ID, traffic will blackhole, - // preventing accidental non-exit-node usage when a policy is in effect that requires an exit node. - if prefs.ExitNodeID != exitNodeID || prefs.ExitNodeIP.IsValid() { + + // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], + // and update prefs if it differs from the current one. + // This includes cases where it was previously an expression but no longer is, + // or where it wasn't before but now is. + autoExitNode, useAutoExitNode := parseAutoExitNodeID(exitNodeID) + if prefs.AutoExitNode != autoExitNode { + prefs.AutoExitNode = autoExitNode + anyChange = true + } + // Additionally, if the specified exit node ID is an expression, + // meaning an exit node is required but we don't yet have a valid exit node ID, + // we should set exitNodeID to a value that is never a valid [tailcfg.StableNodeID], + // to install a blackhole route and prevent accidental non-exit-node usage + // until the expression is evaluated and an actual exit node is selected. + // We use "auto:any" for this purpose, primarily for compatibility with + // older clients (in case a user downgrades to an earlier version) + // and GUIs/CLIs that have special handling for it. + if useAutoExitNode { + exitNodeID = unresolvedExitNodeID + } + + // If the current exit node ID doesn't match the one enforced by the policy setting, + // and the policy either requires a specific exit node ID, + // or requires an auto exit node ID and the current one isn't allowed, + // then update the exit node ID. + if prefs.ExitNodeID != exitNodeID { + if !useAutoExitNode || !isAllowedAutoExitNodeID(prefs.ExitNodeID) { + prefs.ExitNodeID = exitNodeID + anyChange = true + } + } + + // If the exit node IP is set, clear it. When ExitNodeIP is set in the prefs, + // it takes precedence over the ExitNodeID. + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} anyChange = true } - prefs.ExitNodeID = exitNodeID - prefs.ExitNodeIP = netip.Addr{} } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { - exitNodeIP, err := netip.ParseAddr(exitNodeIPStr) - if exitNodeIP.IsValid() && err == nil { + if prefs.AutoExitNode != "" { + prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP + anyChange = true + } + if exitNodeIP, err := netip.ParseAddr(exitNodeIPStr); err == nil { if prefs.ExitNodeID != "" || prefs.ExitNodeIP != exitNodeIP { anyChange = true } @@ -1901,7 +1932,7 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() - if !applySysPolicy(prefs, b.lastSuggestedExitNode, b.overrideAlwaysOn) { + if !applySysPolicy(prefs, b.overrideAlwaysOn) { unlock.UnlockEarly() return prefs.View(), false } @@ -1957,8 +1988,8 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // If auto exit nodes are enabled and our exit node went offline, // we need to schedule picking a new one. // TODO(nickkhyl): move the auto exit node logic to a feature package. - if shouldAutoExitNode() { - exitNodeID := b.pm.prefs.ExitNodeID() + if prefs := b.pm.CurrentPrefs(); prefs.AutoExitNode().IsSet() { + exitNodeID := prefs.ExitNodeID() for _, m := range muts { mo, ok := m.(netmap.NodeMutationOnline) if !ok || mo.Online { @@ -2001,9 +2032,27 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// setExitNodeID updates prefs to reference an exit node by ID, rather +// setExitNodeID updates prefs to either use the suggestedExitNodeID if AutoExitNode is enabled, +// or resolve ExitNodeIP to an ID and use that. It returns whether prefs was mutated. +func setExitNodeID(prefs *ipn.Prefs, suggestedExitNodeID tailcfg.StableNodeID, nm *netmap.NetworkMap) (prefsChanged bool) { + if prefs.AutoExitNode.IsSet() { + newExitNodeID := cmp.Or(suggestedExitNodeID, unresolvedExitNodeID) + if prefs.ExitNodeID != newExitNodeID { + prefs.ExitNodeID = newExitNodeID + prefsChanged = true + } + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} + prefsChanged = true + } + return prefsChanged + } + return resolveExitNodeIP(prefs, nm) +} + +// resolveExitNodeIP updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. -func setExitNodeID(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { +func resolveExitNodeIP(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { if nm == nil { // No netmap, can't resolve anything. return false @@ -2265,8 +2314,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // And also apply syspolicy settings to the current profile. // This is important in two cases: when opts.UpdatePrefs is not nil, // and when Always Mode is enabled and we need to set WantRunning to true. - if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) { - setExitNodeID(newp, cn.NetMap()) + if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.overrideAlwaysOn) { + setExitNodeID(newp, b.lastSuggestedExitNode, cn.NetMap()) b.pm.setPrefsNoPermCheck(newp.View()) } prefs := b.pm.CurrentPrefs() @@ -4187,12 +4236,23 @@ func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { mp := &ipn.MaskedPrefs{} if v { mp.ExitNodeIDSet = true - mp.ExitNodeID = tailcfg.StableNodeID(p0.InternalExitNodePrior()) + mp.ExitNodeID = p0.InternalExitNodePrior() + if expr, ok := parseAutoExitNodeID(mp.ExitNodeID); ok { + mp.AutoExitNodeSet = true + mp.AutoExitNode = expr + mp.ExitNodeID = unresolvedExitNodeID + } } else { mp.ExitNodeIDSet = true mp.ExitNodeID = "" + mp.AutoExitNodeSet = true + mp.AutoExitNode = "" mp.InternalExitNodePriorSet = true - mp.InternalExitNodePrior = p0.ExitNodeID() + if p0.AutoExitNode().IsSet() { + mp.InternalExitNodePrior = tailcfg.StableNodeID(autoExitNodePrefix + p0.AutoExitNode()) + } else { + mp.InternalExitNodePrior = p0.ExitNodeID() + } } return b.editPrefsLockedOnEntry(mp, unlock) } @@ -4229,6 +4289,13 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip mp.InternalExitNodePriorSet = true } + // Disable automatic exit node selection if the user explicitly sets + // ExitNodeID or ExitNodeIP. + if mp.ExitNodeIDSet || mp.ExitNodeIPSet { + mp.AutoExitNodeSet = true + mp.AutoExitNode = "" + } + // Acquire the lock before checking the profile access to prevent // TOCTOU issues caused by the current profile changing between the // check and the actual edit. @@ -4428,9 +4495,14 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // applySysPolicy returns whether it updated newp, // but everything in this function treats b.prefs as completely new // anyway, so its return value can be ignored here. - applySysPolicy(newp, b.lastSuggestedExitNode, b.overrideAlwaysOn) + applySysPolicy(newp, b.overrideAlwaysOn) + if newp.AutoExitNode.IsSet() { + if _, err := b.suggestExitNodeLocked(nil); err != nil { + b.logf("failed to select auto exit node: %v", err) + } + } // setExitNodeID does likewise. No-op if no exit node resolution is needed. - setExitNodeID(newp, netMap) + setExitNodeID(newp, b.lastSuggestedExitNode, netMap) // We do this to avoid holding the lock while doing everything else. @@ -7630,10 +7702,53 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { return earthRadiusMeters * c } -// shouldAutoExitNode checks for the auto exit node MDM policy. -func shouldAutoExitNode() bool { - exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, "") - return exitNodeIDStr == "auto:any" +const ( + // autoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values + // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. + autoExitNodePrefix = "auto:" + + // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value + // used as an exit node ID to install a blackhole route, preventing + // accidental non-exit-node usage until the [ipn.ExitNodeExpression] + // is evaluated and an actual exit node is selected. + // + // We use "auto:any" for compatibility with older, pre-[ipn.ExitNodeExpression] + // clients that have been using "auto:any" for this purpose for a long time. + unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" +) + +// isAutoExitNodeID reports whether the given [tailcfg.StableNodeID] is +// actually an "auto:"-prefixed [ipn.ExitNodeExpression]. +func isAutoExitNodeID(id tailcfg.StableNodeID) bool { + _, ok := parseAutoExitNodeID(id) + return ok +} + +// parseAutoExitNodeID attempts to parse the given [tailcfg.StableNodeID] +// as an [ExitNodeExpression]. +// +// It returns the parsed expression and true on success, +// or an empty string and false if the input does not appear to be +// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). +// +// It is mainly used to parse the [syspolicy.ExitNodeID] value +// when it is set to "auto:" (e.g., auto:any). +func parseAutoExitNodeID(id tailcfg.StableNodeID) (_ ipn.ExitNodeExpression, ok bool) { + if expr, ok := strings.CutPrefix(string(id), autoExitNodePrefix); ok && expr != "" { + return ipn.ExitNodeExpression(expr), true + } + return "", false +} + +func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { + if exitNodeID == "" { + return false // an exit node is required + } + if nodes, _ := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil); nodes != nil { + return slices.Contains(nodes, string(exitNodeID)) + + } + return true // no policy configured; allow all exit nodes } // startAutoUpdate triggers an auto-update attempt. The actual update happens diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index ca968ccd76619..5c9c9f2fab4a9 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" memro "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" @@ -590,6 +591,391 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } } +func makeExitNode(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { + return makePeer(id, append([]peerOptFunc{withCap(26), withSuggest(), withExitRoutes()}, opts...)...) +} + +func TestConfigureExitNode(t *testing.T) { + controlURL := "https://localhost:1/" + exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) + exitNode2 := makeExitNode(2, withName("node-2"), withDERP(2), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))) + selfNode := makeExitNode(3, withName("node-3"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))) + clientNetmap := buildNetmapWithPeers(selfNode, exitNode1, exitNode2) + + report := &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 5 * time.Millisecond, + 2: 10 * time.Millisecond, + }, + PreferredDERP: 1, + } + + tests := []struct { + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + wantPrefs ipn.Prefs + }{ + { + name: "exit-node-id-via-prefs", // set exit node ID via prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeID: exitNode1.StableID()}, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeIP: exitNode1.Addresses().At(0).Addr()}, + ExitNodeIPSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "any", + ExitNodeID: exitNode1.StableID(), + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ExitNodeID: exitNode2.StableID()}, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "", // should be unset + }, + }, + { + name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped + AutoExitNode: "any", + }, + }, + { + name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped + AutoExitNode: "any", + }, + }, + { + name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "foo"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + }, + { + name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "any"}, + AutoExitNodeSet: true, + }, + useExitNodeEnabled: ptr.To(false), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: "", + AutoExitNode: "", + InternalExitNodePrior: "auto:any", + }, + }, + { + name: "auto-exit-node-via-prefs/on", // toggle the exit node on + prefs: ipn.Prefs{ + ControlURL: controlURL, + InternalExitNodePrior: "auto:any", + }, + netMap: clientNetmap, + report: report, + useExitNodeEnabled: ptr.To(true), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + InternalExitNodePrior: "auto:any", + }, + }, + { + name: "id-via-policy", // set exit node ID via syspolicy + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode2.StableID(), // this should be ignored + }, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeIP: exitNode2.Addresses().At(0).Addr(), // this should be ignored + }, + ExitNodeIPSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "any", // this should be ignored + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + }, + }, + { + name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + exitNodeIPPolicy: ptr.To(exitNode2.Addresses().At(0).Addr()), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + }, + }, + { + name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: nil, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, + AutoExitNode: "any", + }, + }, + { + name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:foo")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + }, + { + name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + useExitNodeEnabled: ptr.To(false), // should be ignored + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "auto:any", + }, + }, + } + syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Configure policy settings, if any. + var settings []source.TestSetting[string] + if tt.exitNodeIDPolicy != nil { + settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) + } + if tt.exitNodeIPPolicy != nil { + settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) + } + if settings != nil { + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, source.NewTestStoreOf(t, settings...)) + } else { + // No syspolicy settings, so don't register a store. + // This allows the test to run in parallel with other tests. + t.Parallel() + } + + // Create a new LocalBackend with the given prefs. + // Any syspolicy settings will be applied to the initial prefs. + lb := newTestLocalBackend(t) + lb.SetPrefsForTest(tt.prefs.Clone()) + // Then set the netcheck report and netmap, if any. + if tt.report != nil { + lb.MagicConn().SetLastNetcheckReportForTest(t.Context(), tt.report) + } + if tt.netMap != nil { + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) + } + + // If we have a changePrefs, apply it. + if tt.changePrefs != nil { + lb.EditPrefs(tt.changePrefs) + } + + // If we need to flip exit node toggle on or off, do it. + if tt.useExitNodeEnabled != nil { + lb.SetUseExitNodeEnabled(*tt.useExitNodeEnabled) + } + + // Now check the prefs. + opts := []cmp.Option{ + cmpopts.EquateComparable(netip.Addr{}, netip.Prefix{}), + } + if diff := cmp.Diff(&tt.wantPrefs, lb.Prefs().AsStruct(), opts...); diff != "" { + t.Errorf("Prefs(+got -want): %v", diff) + } + }) + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface @@ -1646,6 +2032,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { prefs *ipn.Prefs exitNodeIPWant string exitNodeIDWant string + autoExitNodeWant ipn.ExitNodeExpression prefsChanged bool nm *netmap.NetworkMap lastSuggestedExitNode tailcfg.StableNodeID @@ -1850,19 +2237,38 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }, }, { - name: "ExitNodeID key is set to auto and last suggested exit node is populated", + name: "ExitNodeID key is set to auto:any and last suggested exit node is populated", exitNodeIDKey: true, exitNodeID: "auto:any", lastSuggestedExitNode: "123", exitNodeIDWant: "123", + autoExitNodeWant: "any", prefsChanged: true, }, { - name: "ExitNodeID key is set to auto and last suggested exit node is not populated", - exitNodeIDKey: true, - exitNodeID: "auto:any", - prefsChanged: true, - exitNodeIDWant: "auto:any", + name: "ExitNodeID key is set to auto:any and last suggested exit node is not populated", + exitNodeIDKey: true, + exitNodeID: "auto:any", + exitNodeIDWant: "auto:any", + autoExitNodeWant: "any", + prefsChanged: true, + }, + { + name: "ExitNodeID key is set to auto:foo and last suggested exit node is populated", + exitNodeIDKey: true, + exitNodeID: "auto:foo", + lastSuggestedExitNode: "123", + exitNodeIDWant: "123", + autoExitNodeWant: "foo", + prefsChanged: true, + }, + { + name: "ExitNodeID key is set to auto:foo and last suggested exit node is not populated", + exitNodeIDKey: true, + exitNodeID: "auto:foo", + exitNodeIDWant: "auto:any", // should be "auto:any" for compatibility with existing clients + autoExitNodeWant: "foo", + prefsChanged: true, }, } @@ -1893,7 +2299,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode prefs := b.pm.prefs.AsStruct() - if changed := applySysPolicy(prefs, test.lastSuggestedExitNode, false) || setExitNodeID(prefs, test.nm); changed != test.prefsChanged { + if changed := applySysPolicy(prefs, false) || setExitNodeID(prefs, test.lastSuggestedExitNode, test.nm); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) } @@ -1903,15 +2309,18 @@ func TestSetExitNodeIDPolicy(t *testing.T) { // preferences to change. b.SetPrefsForTest(pm.CurrentPrefs().AsStruct()) - if got := b.pm.prefs.ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { - t.Errorf("got %v want %v", got, test.exitNodeIDWant) + if got := b.Prefs().ExitNodeID(); got != tailcfg.StableNodeID(test.exitNodeIDWant) { + t.Errorf("ExitNodeID: got %q; want %q", got, test.exitNodeIDWant) } - if got := b.pm.prefs.ExitNodeIP(); test.exitNodeIPWant == "" { + if got := b.Prefs().ExitNodeIP(); test.exitNodeIPWant == "" { if got.String() != "invalid IP" { - t.Errorf("got %v want invalid IP", got) + t.Errorf("ExitNodeIP: got %v want invalid IP", got) } } else if got.String() != test.exitNodeIPWant { - t.Errorf("got %v want %v", got, test.exitNodeIPWant) + t.Errorf("ExitNodeIP: got %q; want %q", got, test.exitNodeIPWant) + } + if got := b.Prefs().AutoExitNode(); got != test.autoExitNodeWant { + t.Errorf("AutoExitNode: got %q; want %q", got, test.autoExitNodeWant) } }) } @@ -2459,7 +2868,7 @@ func TestApplySysPolicy(t *testing.T) { t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs, "", false) + gotAnyChange := applySysPolicy(prefs, false) if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -2607,7 +3016,7 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs, "", false) + gotAnyChange := applySysPolicy(prefs, false) if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) @@ -3288,12 +3697,14 @@ type peerOptFunc func(*tailcfg.Node) func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { node := &tailcfg.Node{ - ID: id, - Key: makeNodeKeyFromID(id), - StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), - Name: fmt.Sprintf("peer%d", id), - Online: ptr.To(true), - HomeDERP: int(id), + ID: id, + Key: makeNodeKeyFromID(id), + DiscoKey: makeDiscoKeyFromID(id), + StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), + Name: fmt.Sprintf("peer%d", id), + Online: ptr.To(true), + MachineAuthorized: true, + HomeDERP: int(id), } for _, opt := range opts { opt(node) @@ -3363,6 +3774,12 @@ func withNodeKey() peerOptFunc { } } +func withAddresses(addresses ...netip.Prefix) peerOptFunc { + return func(n *tailcfg.Node) { + n.Addresses = append(n.Addresses, addresses...) + } +} + func deterministicRegionForTest(t testing.TB, want views.Slice[int], use int) selectRegionFunc { t.Helper() @@ -4065,9 +4482,9 @@ func TestShouldAutoExitNode(t *testing.T) { expectedBool: false, }, { - name: "auto prefix invalid suffix", + name: "auto prefix unknown suffix", exitNodeIDPolicyValue: "auto:foo", - expectedBool: false, + expectedBool: true, // "auto:{unknown}" is treated as "auto:any" }, } @@ -4075,12 +4492,7 @@ func TestShouldAutoExitNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, tt.exitNodeIDPolicyValue, - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - - got := shouldAutoExitNode() + got := isAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeIDPolicyValue)) if got != tt.expectedBool { t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue) } @@ -4088,6 +4500,65 @@ func TestShouldAutoExitNode(t *testing.T) { } } +func TestParseAutoExitNodeID(t *testing.T) { + tests := []struct { + name string + exitNodeID string + wantOk bool + wantExpr ipn.ExitNodeExpression + }{ + { + name: "empty expr", + exitNodeID: "", + wantOk: false, + wantExpr: "", + }, + { + name: "no auto prefix", + exitNodeID: "foo", + wantOk: false, + wantExpr: "", + }, + { + name: "auto:any", + exitNodeID: "auto:any", + wantOk: true, + wantExpr: ipn.AnyExitNode, + }, + { + name: "auto:foo", + exitNodeID: "auto:foo", + wantOk: true, + wantExpr: "foo", + }, + { + name: "auto prefix but empty suffix", + exitNodeID: "auto:", + wantOk: false, + wantExpr: "", + }, + { + name: "auto prefix no colon", + exitNodeID: "auto", + wantOk: false, + wantExpr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotExpr, gotOk := parseAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeID)) + if gotOk != tt.wantOk || gotExpr != tt.wantExpr { + if tt.wantOk { + t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) + } else { + t.Fatalf("got %v (%q); want false", gotOk, gotExpr) + } + } + }) + } +} + func TestEnableAutoUpdates(t *testing.T) { lb := newTestLocalBackend(t) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index eb36643856f82..f0ac5f9442704 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -6,6 +6,7 @@ package ipnlocal import ( "context" "errors" + "fmt" "net/netip" "strings" "sync" @@ -1108,10 +1109,17 @@ func TestEngineReconfigOnStateChange(t *testing.T) { enableLogging := false connect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} disconnect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: false}, WantRunningSet: true} - node1 := testNetmapForNode(1, "node-1", []netip.Prefix{netip.MustParsePrefix("100.64.1.1/32")}) - node2 := testNetmapForNode(2, "node-2", []netip.Prefix{netip.MustParsePrefix("100.64.1.2/32")}) - node3 := testNetmapForNode(3, "node-3", []netip.Prefix{netip.MustParsePrefix("100.64.1.3/32")}) - node3.Peers = []tailcfg.NodeView{node1.SelfNode, node2.SelfNode} + node1 := buildNetmapWithPeers( + makePeer(1, withName("node-1"), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))), + ) + node2 := buildNetmapWithPeers( + makePeer(2, withName("node-2"), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))), + ) + node3 := buildNetmapWithPeers( + makePeer(3, withName("node-3"), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))), + node1.SelfNode, + node2.SelfNode, + ) routesWithQuad100 := func(extra ...netip.Prefix) []netip.Prefix { return append(extra, netip.MustParsePrefix("100.100.100.100/32")) } @@ -1380,33 +1388,75 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } -func testNetmapForNode(userID tailcfg.UserID, name string, addresses []netip.Prefix) *netmap.NetworkMap { +func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( - domain = "example.com" - magicDNSSuffix = ".test.ts.net" + firstAutoUserID = tailcfg.UserID(10000) + domain = "example.com" + magicDNSSuffix = ".test.ts.net" ) - user := &tailcfg.UserProfile{ - ID: userID, - DisplayName: name, - LoginName: strings.Join([]string{name, domain}, "@"), - } - self := &tailcfg.Node{ - ID: tailcfg.NodeID(1000 + userID), - StableID: tailcfg.StableNodeID("stable-" + name), - User: user.ID, - Name: name + magicDNSSuffix, - Addresses: addresses, - MachineAuthorized: true, - } - self.Key = makeNodeKeyFromID(self.ID) - self.DiscoKey = makeDiscoKeyFromID(self.ID) + + users := make(map[tailcfg.UserID]tailcfg.UserProfileView) + makeUserForNode := func(n *tailcfg.Node) { + var user *tailcfg.UserProfile + if n.User == 0 { + n.User = firstAutoUserID + tailcfg.UserID(n.ID) + user = &tailcfg.UserProfile{ + DisplayName: n.Name, + LoginName: n.Name, + } + } else if _, ok := users[n.User]; !ok { + user = &tailcfg.UserProfile{ + DisplayName: fmt.Sprintf("User %d", n.User), + LoginName: fmt.Sprintf("user-%d", n.User), + } + } + if user != nil { + user.ID = n.User + user.LoginName = strings.Join([]string{user.LoginName, domain}, "@") + users[n.User] = user.View() + } + } + + derpmap := &tailcfg.DERPMap{ + Regions: make(map[int]*tailcfg.DERPRegion), + } + makeDERPRegionForNode := func(n *tailcfg.Node) { + if n.HomeDERP == 0 { + return // no DERP region + } + if _, ok := derpmap.Regions[n.HomeDERP]; !ok { + r := &tailcfg.DERPRegion{ + RegionID: n.HomeDERP, + RegionName: fmt.Sprintf("Region %d", n.HomeDERP), + } + r.Nodes = append(r.Nodes, &tailcfg.DERPNode{ + Name: fmt.Sprintf("%da", n.HomeDERP), + RegionID: n.HomeDERP, + }) + derpmap.Regions[n.HomeDERP] = r + } + } + + updateNode := func(n tailcfg.NodeView) tailcfg.NodeView { + mut := n.AsStruct() + makeUserForNode(mut) + makeDERPRegionForNode(mut) + mut.Name = mut.Name + magicDNSSuffix + return mut.View() + } + + self = updateNode(self) + for i := range peers { + peers[i] = updateNode(peers[i]) + } + return &netmap.NetworkMap{ - SelfNode: self.View(), - Name: self.Name, - Domain: domain, - UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ - user.ID: user.View(), - }, + SelfNode: self, + Name: self.Name(), + Domain: domain, + Peers: peers, + UserProfiles: users, + DERPMap: derpmap, } } diff --git a/ipn/prefs.go b/ipn/prefs.go index 01275a7e25bdc..77cea0493af16 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -94,6 +94,25 @@ type Prefs struct { ExitNodeID tailcfg.StableNodeID ExitNodeIP netip.Addr + // AutoExitNode is an optional expression that specifies whether and how + // tailscaled should pick an exit node automatically. + // + // If specified, tailscaled will use an exit node based on the expression, + // and will re-evaluate the selection periodically as network conditions, + // available exit nodes, or policy settings change. A blackhole route will + // be installed to prevent traffic from escaping to the local network until + // an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP. + // + // If empty, tailscaled will not automatically select an exit node. + // + // If the specified expression is invalid or unsupported by the client, + // it falls back to the behavior of [AnyExitNode]. + // + // As of 2025-07-02, the only supported value is [AnyExitNode]. + // It's a string rather than a boolean to allow future extensibility + // (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us"). + AutoExitNode ExitNodeExpression `json:",omitempty"` + // InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by // the backend on transition from exit node on to off and used by the // backend. @@ -325,6 +344,7 @@ type MaskedPrefs struct { RouteAllSet bool `json:",omitempty"` ExitNodeIDSet bool `json:",omitempty"` ExitNodeIPSet bool `json:",omitempty"` + AutoExitNodeSet bool `json:",omitempty"` InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients ExitNodeAllowLANAccessSet bool `json:",omitempty"` CorpDNSSet bool `json:",omitempty"` @@ -533,6 +553,9 @@ func (p *Prefs) pretty(goos string) string { } else if !p.ExitNodeID.IsZero() { fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) } + if p.AutoExitNode.IsSet() { + fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) + } if len(p.AdvertiseRoutes) > 0 || goos == "linux" { fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) } @@ -609,6 +632,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.RouteAll == p2.RouteAll && p.ExitNodeID == p2.ExitNodeID && p.ExitNodeIP == p2.ExitNodeIP && + p.AutoExitNode == p2.AutoExitNode && p.InternalExitNodePrior == p2.InternalExitNodePrior && p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess && p.CorpDNS == p2.CorpDNS && @@ -804,6 +828,7 @@ func isRemoteIP(st *ipnstate.Status, ip netip.Addr) bool { func (p *Prefs) ClearExitNode() { p.ExitNodeID = "" p.ExitNodeIP = netip.Addr{} + p.AutoExitNode = "" } // ExitNodeLocalIPError is returned when the requested IP address for an exit @@ -1043,3 +1068,23 @@ func (p *LoginProfile) Equals(p2 *LoginProfile) bool { p.LocalUserID == p2.LocalUserID && p.ControlURL == p2.ControlURL } + +// ExitNodeExpression is a string that specifies how an exit node +// should be selected. An empty string means that no exit node +// should be selected. +// +// As of 2025-07-02, the only supported value is [AnyExitNode]. +type ExitNodeExpression string + +// AnyExitNode indicates that the exit node should be automatically +// selected from the pool of available exit nodes, excluding any +// disallowed by policy (e.g., [syspolicy.AllowedSuggestedExitNodes]). +// The exact implementation is subject to change, but exit nodes +// offering the best performance will be preferred. +const AnyExitNode ExitNodeExpression = "any" + +// IsSet reports whether the expression is non-empty and can be used +// to select an exit node. +func (e ExitNodeExpression) IsSet() bool { + return e != "" +} diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index d28d161db422e..268ea206c137f 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -40,6 +40,7 @@ func TestPrefsEqual(t *testing.T) { "RouteAll", "ExitNodeID", "ExitNodeIP", + "AutoExitNode", "InternalExitNodePrior", "ExitNodeAllowLANAccess", "CorpDNS", @@ -150,6 +151,17 @@ func TestPrefsEqual(t *testing.T) { true, }, + { + &Prefs{AutoExitNode: ""}, + &Prefs{AutoExitNode: "auto:any"}, + false, + }, + { + &Prefs{AutoExitNode: "auto:any"}, + &Prefs{AutoExitNode: "auto:any"}, + true, + }, + { &Prefs{}, &Prefs{ExitNodeAllowLANAccess: true}, From c46145b99e4157d89df807dc64133e31d855cf09 Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 4 Jul 2025 12:19:23 +0100 Subject: [PATCH 0061/1093] cmd/k8s-operator: Move login server value to top-level (#16470) This commit modifies the operator helm chart values to bring the newly added `loginServer` field to the top level. We felt as though it was a bit confusing to be at the `operatorConfig` level as this value modifies the behaviour or the operator, api server & all resources that the operator manages. Updates https://github.com/tailscale/corp/issues/29847 Signed-off-by: David Bond --- cmd/k8s-operator/deploy/chart/templates/deployment.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 8deba7dab0139..01a290c076368 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -69,7 +69,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: OPERATOR_LOGIN_SERVER - value: {{ .Values.operatorConfig.loginServer }} + value: {{ .Values.loginServer }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index af941425a5006..0ba8d045a858d 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -9,6 +9,9 @@ oauth: {} # clientId: "" # clientSecret: "" +# URL of the control plane to be used by all resources managed by the operator. +loginServer: "" + # Secret volume. # If set it defines the volume the oauth secrets will be mounted from. # The volume needs to contain two files named `client_id` and `client_secret`. @@ -72,9 +75,6 @@ operatorConfig: # - name: EXTRA_VAR2 # value: "value2" - # URL of the control plane to be used by all resources managed by the operator. - loginServer: "" - # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: enabled: true From 639fed6856722bad94762b48546cd84331f12b97 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 4 Jul 2025 16:06:22 +0100 Subject: [PATCH 0062/1093] Dockerfile,build_docker.sh: add a note on how to build local images (#16471) Updates#cleanup Signed-off-by: Irbe Krumina --- Dockerfile | 9 +++++++++ build_docker.sh | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/Dockerfile b/Dockerfile index 015022e49fc28..fbc0d1194ffc3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,15 @@ # Tailscale images are currently built using https://github.com/tailscale/mkctr, # and the build script can be found in ./build_docker.sh. # +# If you want to build local images for testing, you can use make. +# +# To build a Tailscale image and push to the local docker registry: +# +# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage +# +# To build a Tailscale image and push to a remote docker registry: +# +# $ REPO=//tailscale TAGS=v0.0.1 make publishdevimage # # This Dockerfile includes all the tailscale binaries. # diff --git a/build_docker.sh b/build_docker.sh index bdc9dc08609fa..7840dc89775d3 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -6,6 +6,16 @@ # hash of this repository as produced by ./cmd/mkversion. # This is the image build mechanim used to build the official Tailscale # container images. +# +# If you want to build local images for testing, you can use make, which provides few convenience wrappers around this script. +# +# To build a Tailscale image and push to the local docker registry: + +# $ REPO=local/tailscale TAGS=v0.0.1 PLATFORM=local make publishdevimage +# +# To build a Tailscale image and push to a remote docker registry: +# +# $ REPO=//tailscale TAGS=v0.0.1 make publishdevimage set -eu From 92a114c66d296704d48045ee12c0fe28bb7f5b6c Mon Sep 17 00:00:00 2001 From: Dylan Bargatze Date: Fri, 4 Jul 2025 12:48:38 -0400 Subject: [PATCH 0063/1093] tailcfg, feature/relayserver, wgengine/magicsock: invert UDP relay server nodeAttrs (#16444) Inverts the nodeAttrs related to UDP relay client/server enablement to disablement, and fixes up the corresponding logic that uses them. Also updates the doc comments on both nodeAttrs. Fixes tailscale/corp#30024 Signed-off-by: Dylan Bargatze --- feature/relayserver/relayserver.go | 18 +++++++++--------- tailcfg/tailcfg.go | 21 ++++++++++++++------- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/magicsock_test.go | 3 --- 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 5a82a9d117bd7..f4a533193999e 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -50,11 +50,11 @@ func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, err type extension struct { logf logger.Logf - mu sync.Mutex // guards the following fields - shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - hasNodeAttrRelayServer bool // tailcfg.NodeAttrRelayServer - server relayServer // lazily initialized + mu sync.Mutex // guards the following fields + shutdown bool + port *int // ipn.Prefs.RelayServerPort, nil if disabled + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + server relayServer // lazily initialized } // relayServer is the interface of [udprelay.Server]. @@ -81,8 +81,8 @@ func (e *extension) Init(host ipnext.Host) error { func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() - e.hasNodeAttrRelayServer = nodeView.HasCap(tailcfg.NodeAttrRelayServer) - if !e.hasNodeAttrRelayServer && e.server != nil { + e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) + if e.hasNodeAttrDisableRelayServer && e.server != nil { e.server.Close() e.server = nil } @@ -130,8 +130,8 @@ func (e *extension) relayServerOrInit() (relayServer, error) { if e.port == nil { return nil, errors.New("relay server is not configured") } - if !e.hasNodeAttrRelayServer { - return nil, errors.New("no relay:server node attribute") + if e.hasNodeAttrDisableRelayServer { + return nil, errors.New("disable-relay-server node attribute is present") } if !envknob.UseWIPCode() { return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 10b157ac15642..d97f60a8acb84 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2602,13 +2602,20 @@ const ( // peer node list. NodeAttrNativeIPV4 NodeCapability = "native-ipv4" - // NodeAttrRelayServer permits the node to act as an underlay UDP relay - // server. There are no expected values for this key in NodeCapMap. - NodeAttrRelayServer NodeCapability = "relay:server" - - // NodeAttrRelayClient permits the node to act as an underlay UDP relay - // client. There are no expected values for this key in NodeCapMap. - NodeAttrRelayClient NodeCapability = "relay:client" + // NodeAttrDisableRelayServer prevents the node from acting as an underlay + // UDP relay server. There are no expected values for this key; the key + // only needs to be present in [NodeCapMap] to take effect. + NodeAttrDisableRelayServer NodeCapability = "disable-relay-server" + + // NodeAttrDisableRelayClient prevents the node from allocating UDP relay + // server endpoints itself; the node may still bind into and relay traffic + // using endpoints allocated by its peers. This attribute can be added to + // the node dynamically; if added while the node is already running, the + // node will be unable to allocate UDP relay server endpoints after it next + // updates its network map. There are no expected values for this key in + // [NodeCapMap]; the key only needs to be present in [NodeCapMap] to take + // effect. + NodeAttrDisableRelayClient NodeCapability = "disable-relay-client" // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 174345a84ac87..5719b20f9ab6f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2703,7 +2703,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { peersChanged := c.updateNodes(update) relayClientEnabled := update.SelfNode.Valid() && - update.SelfNode.HasCap(tailcfg.NodeAttrRelayClient) && + !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && envknob.UseWIPCode() c.mu.Lock() diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 8aa9a09d2c15a..c388e9ed15d00 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3408,9 +3408,6 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { } peerOnlyIPv4 := &tailcfg.Node{ Cap: math.MinInt32, - CapMap: map[tailcfg.NodeCapability][]tailcfg.RawMessage{ - tailcfg.NodeAttrRelayServer: nil, - }, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, From 079134d3c0f51ad27e502e70a172e10326c70d3d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 7 Jul 2025 00:40:56 +0100 Subject: [PATCH 0064/1093] cmd/k8s-operator: always set ProxyGroup status conditions (#16429) Refactors setting status into its own top-level function to make it easier to ensure we _always_ set the status if it's changed on every reconcile. Previously, it was possible to have stale status if some earlier part of the provision logic failed. Updates #16327 Change-Id: Idab0cfc15ae426cf6914a82f0d37a5cc7845236b Signed-off-by: Tom Proctor --- .../crds/tailscale.com_proxygroups.yaml | 5 +- .../deploy/manifests/operator.yaml | 5 +- cmd/k8s-operator/proxygroup.go | 300 +++++++++--------- cmd/k8s-operator/proxygroup_test.go | 66 ++-- k8s-operator/api.md | 2 +- .../apis/v1alpha1/types_proxygroup.go | 6 +- 6 files changed, 212 insertions(+), 172 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index f695e989d7b85..c426c8427a507 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -124,7 +124,10 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`. + resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. + `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled + and ready. `ProxyGroupAvailable` indicates that at least one proxy is + ready to serve traffic. type: array items: description: Condition contains details for one aspect of the current state of this API Resource. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 4f1faf104cfc6..2888575692594 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2953,7 +2953,10 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`. + resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. + `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled + and ready. `ProxyGroupAvailable` indicates that at least one proxy is + ready to serve traffic. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 1b622c920d22d..c44de09a7fc45 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -13,6 +13,7 @@ import ( "net/http" "net/netip" "slices" + "sort" "strings" "sync" @@ -48,7 +49,6 @@ const ( reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" reasonProxyGroupReady = "ProxyGroupReady" reasonProxyGroupCreating = "ProxyGroupCreating" - reasonProxyGroupInvalid = "ProxyGroupInvalid" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" @@ -132,17 +132,15 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldPGStatus := pg.Status.DeepCopy() - setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { - err = errors.Join(err, updateErr) - } - } - return reconcile.Result{}, err - } + staticEndpoints, nrr, err := r.reconcilePG(ctx, pg, logger) + return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, staticEndpoints)) +} +// reconcilePG handles all reconciliation of a ProxyGroup that is not marked +// for deletion. It is separated out from Reconcile to make a clear separation +// between reconciling the ProxyGroup, and posting the status of its created +// resources onto the ProxyGroup status field. +func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { if !slices.Contains(pg.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -150,18 +148,11 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // operation is underway. logger.Infof("ensuring ProxyGroup is set up") pg.Finalizers = append(pg.Finalizers, FinalizerName) - if err = r.Update(ctx, pg); err != nil { - err = fmt.Errorf("error adding finalizer: %w", err) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, reasonProxyGroupCreationFailed) + if err := r.Update(ctx, pg); err != nil { + return r.notReadyErrf(pg, "error adding finalizer: %w", err) } } - if err = r.validate(pg); err != nil { - message := fmt.Sprintf("ProxyGroup is invalid: %s", err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupInvalid, message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupInvalid, message) - } - proxyClassName := r.defaultProxyClass if pg.Spec.ProxyClass != "" { proxyClassName = pg.Spec.ProxyClass @@ -172,78 +163,33 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ proxyClass = new(tsapi.ProxyClass) err := r.Get(ctx, types.NamespacedName{Name: proxyClassName}, proxyClass) if apierrors.IsNotFound(err) { - err = nil - message := fmt.Sprintf("the ProxyGroup's ProxyClass %s does not (yet) exist", proxyClassName) - logger.Info(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q does not (yet) exist", proxyClassName) + logger.Info(msg) + return r.notReady(reasonProxyGroupCreating, msg) } if err != nil { - err = fmt.Errorf("error getting ProxyGroup's ProxyClass %s: %s", proxyClassName, err) - r.recorder.Eventf(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreationFailed, err.Error()) + return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } validateProxyClassForPG(logger, pg, proxyClass) if !tsoperator.ProxyClassIsReady(proxyClass) { - message := fmt.Sprintf("the ProxyGroup's ProxyClass %s is not yet in a ready state, waiting...", proxyClassName) - logger.Info(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) + msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) + logger.Info(msg) + return r.notReady(reasonProxyGroupCreating, msg) } } - isProvisioned, err := r.maybeProvision(ctx, pg, proxyClass) + staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) if err != nil { - reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { - reason = reasonProxyGroupCreating - msg = fmt.Sprintf("optimistic lock error, retrying: %s", err) - err = nil + msg := fmt.Sprintf("optimistic lock error, retrying: %s", nrr.message) logger.Info(msg) + return r.notReady(reasonProxyGroupCreating, msg) } else { - r.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) - } - - return setStatusReady(pg, metav1.ConditionFalse, reason, msg) - } - - if !isProvisioned { - if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { - return reconcile.Result{}, errors.Join(err, updateErr) - } + return nil, nrr, err } - return } - desiredReplicas := int(pgReplicas(pg)) - - // Set ProxyGroupAvailable condition. - status := metav1.ConditionFalse - reason := reasonProxyGroupCreating - message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(pg.Status.Devices), desiredReplicas) - if len(pg.Status.Devices) > 0 { - status = metav1.ConditionTrue - if len(pg.Status.Devices) == desiredReplicas { - reason = reasonProxyGroupReady - } - } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, pg.Generation, r.clock, logger) - - // Set ProxyGroupReady condition. - if len(pg.Status.Devices) < desiredReplicas { - logger.Debug(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) - } - - if len(pg.Status.Devices) > desiredReplicas { - message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(pg.Status.Devices)-desiredReplicas) - logger.Debug(message) - return setStatusReady(pg, metav1.ConditionFalse, reasonProxyGroupCreating, message) - } - - logger.Info("ProxyGroup resources synced") - return setStatusReady(pg, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady) + return staticEndpoints, nrr, nil } // validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup. @@ -271,7 +217,7 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc } } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (isProvisioned bool, err error) { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureAddedToGaugeForProxyGroup(pg) @@ -280,31 +226,30 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro svcToNodePorts := make(map[string]uint16) var tailscaledPort *uint16 if proxyClass != nil && proxyClass.Spec.StaticEndpoints != nil { + var err error svcToNodePorts, tailscaledPort, err = r.ensureNodePortServiceCreated(ctx, pg, proxyClass) if err != nil { - wrappedErr := fmt.Errorf("error provisioning NodePort Services for static endpoints: %w", err) var allocatePortErr *allocatePortsErr if errors.As(err, &allocatePortErr) { reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) - r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) - return false, nil + msg := fmt.Sprintf("error provisioning NodePort Services for static endpoints: %v", err) + r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) + return r.notReady(reason, msg) } - return false, wrappedErr + return r.notReadyErrf(pg, "error provisioning NodePort Services for static endpoints: %w", err) } } staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass, svcToNodePorts) if err != nil { - wrappedErr := fmt.Errorf("error provisioning config Secrets: %w", err) var selectorErr *FindStaticEndpointErr if errors.As(err, &selectorErr) { reason := reasonProxyGroupCreationFailed - msg := fmt.Sprintf("error provisioning ProxyGroup resources: %s", wrappedErr) - r.setStatusReady(pg, metav1.ConditionFalse, reason, msg, logger) - return false, nil + msg := fmt.Sprintf("error provisioning config Secrets: %v", err) + r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) + return r.notReady(reason, msg) } - return false, wrappedErr + return r.notReadyErrf(pg, "error provisioning config Secrets: %w", err) } // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. @@ -315,7 +260,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return false, fmt.Errorf("error provisioning state Secrets: %w", err) + return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err) } } sa := pgServiceAccount(pg, r.tsNamespace) @@ -324,7 +269,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences }); err != nil { - return false, fmt.Errorf("error provisioning ServiceAccount: %w", err) + return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) } role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { @@ -333,7 +278,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return false, fmt.Errorf("error provisioning Role: %w", err) + return r.notReadyErrf(pg, "error provisioning Role: %w", err) } roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { @@ -343,7 +288,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return false, fmt.Errorf("error provisioning RoleBinding: %w", err) + return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) @@ -352,7 +297,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return false, fmt.Errorf("error provisioning egress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { @@ -361,28 +306,27 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return false, fmt.Errorf("error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return false, fmt.Errorf("error generating StatefulSet spec: %w", err) + return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), } ss = applyProxyClassToStatefulSet(proxyClass, ss, cfg, logger) - updateSS := func(s *appsv1.StatefulSet) { + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.Spec = ss.Spec - s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences + }); err != nil { + return r.notReadyErrf(pg, "error provisioning StatefulSet: %w", err) } - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, updateSS); err != nil { - return false, fmt.Errorf("error provisioning StatefulSet: %w", err) - } + mo := &metricsOpts{ tsNamespace: r.tsNamespace, proxyStsName: pg.Name, @@ -390,21 +334,67 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return false, fmt.Errorf("error reconciling metrics resources: %w", err) + return r.notReadyErrf(pg, "error reconciling metrics resources: %w", err) } if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { - return false, fmt.Errorf("error cleaning up dangling resources: %w", err) + return r.notReadyErrf(pg, "error cleaning up dangling resources: %w", err) } - devices, err := r.getDeviceInfo(ctx, staticEndpoints, pg) + logger.Info("ProxyGroup resources synced") + + return staticEndpoints, nil, nil +} + +func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, oldPGStatus *tsapi.ProxyGroupStatus, nrr *notReadyReason, endpoints map[string][]netip.AddrPort) (err error) { + defer func() { + if !apiequality.Semantic.DeepEqual(*oldPGStatus, pg.Status) { + if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + err = errors.Join(err, updateErr) + } + } + }() + + devices, err := r.getRunningProxies(ctx, pg, endpoints) if err != nil { - return false, fmt.Errorf("failed to get device info: %w", err) + return fmt.Errorf("failed to list running proxies: %w", err) } pg.Status.Devices = devices - return true, nil + desiredReplicas := int(pgReplicas(pg)) + + // Set ProxyGroupAvailable condition. + status := metav1.ConditionFalse + reason := reasonProxyGroupCreating + message := fmt.Sprintf("%d/%d ProxyGroup pods running", len(devices), desiredReplicas) + if len(devices) > 0 { + status = metav1.ConditionTrue + if len(devices) == desiredReplicas { + reason = reasonProxyGroupReady + } + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) + + // Set ProxyGroupReady condition. + status = metav1.ConditionFalse + reason = reasonProxyGroupCreating + switch { + case nrr != nil: + // If we failed earlier, that reason takes precedence. + reason = nrr.reason + message = nrr.message + case len(devices) < desiredReplicas: + case len(devices) > desiredReplicas: + message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(devices)-desiredReplicas) + default: + status = metav1.ConditionTrue + reason = reasonProxyGroupReady + message = reasonProxyGroupReady + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger) + + return nil } // getServicePortsForProxyGroups returns a map of ProxyGroup Service names to their NodePorts, @@ -484,15 +474,15 @@ func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, tailscaledPort := getRandomPort() svcs := []*corev1.Service{} for i := range pgReplicas(pg) { - replicaName := pgNodePortServiceName(pg.Name, i) + nodePortSvcName := pgNodePortServiceName(pg.Name, i) svc := &corev1.Service{} - err := r.Get(ctx, types.NamespacedName{Name: replicaName, Namespace: r.tsNamespace}, svc) + err := r.Get(ctx, types.NamespacedName{Name: nodePortSvcName, Namespace: r.tsNamespace}, svc) if err != nil && !apierrors.IsNotFound(err) { - return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", replicaName, err) + return nil, nil, fmt.Errorf("error getting Kubernetes Service %q: %w", nodePortSvcName, err) } if apierrors.IsNotFound(err) { - svcs = append(svcs, pgNodePortService(pg, replicaName, r.tsNamespace)) + svcs = append(svcs, pgNodePortService(pg, nodePortSvcName, r.tsNamespace)) } else { // NOTE: if we can we want to recover the random port used for tailscaled, // as well as the NodePort previously used for that Service @@ -638,7 +628,7 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16) (endpoints map[string][]netip.AddrPort, err error) { logger := r.logger(pg.Name) - endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) + endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) // keyed by Service name. for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -691,14 +681,15 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - replicaName := pgNodePortServiceName(pg.Name, i) + nodePortSvcName := pgNodePortServiceName(pg.Name, i) if len(svcToNodePorts) > 0 { - port, ok := svcToNodePorts[replicaName] + replicaName := fmt.Sprintf("%s-%d", pg.Name, i) + port, ok := svcToNodePorts[nodePortSvcName] if !ok { return nil, fmt.Errorf("could not find configured NodePort for ProxyGroup replica %q", replicaName) } - endpoints[replicaName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) + endpoints[nodePortSvcName], err = r.findStaticEndpoints(ctx, existingCfgSecret, proxyClass, port, logger) if err != nil { return nil, fmt.Errorf("could not find static endpoints for replica %q: %w", replicaName, err) } @@ -711,7 +702,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[replicaName], existingAdvertiseServices, r.loginServer) + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -910,16 +901,14 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) return conf.AdvertiseServices, nil } -func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { - return nil -} - // getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by // querying their state Secrets. It may not return the same number of items as // specified in the ProxyGroup spec if e.g. it is getting scaled up or down, or // some pods have failed to write state. +// +// The returned metadata will contain an entry for each state Secret that exists. func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { - // List all state secrets owned by this ProxyGroup. + // List all state Secrets owned by this ProxyGroup. secrets := &corev1.SecretList{} if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { return nil, fmt.Errorf("failed to list state Secrets: %w", err) @@ -930,20 +919,20 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) } + nm := nodeMetadata{ + ordinal: ordinal, + stateSecret: &secret, + } + prefs, ok, err := getDevicePrefs(&secret) if err != nil { return nil, err } - if !ok { - continue + if ok { + nm.tsID = prefs.Config.NodeID + nm.dnsName = prefs.Config.UserProfile.LoginName } - nm := nodeMetadata{ - ordinal: ordinal, - stateSecret: &secret, - tsID: prefs.Config.NodeID, - dnsName: prefs.Config.UserProfile.LoginName, - } pod := &corev1.Pod{} if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { return nil, err @@ -953,23 +942,36 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr metadata = append(metadata, nm) } + // Sort for predictable ordering and status. + sort.Slice(metadata, func(i, j int) bool { + return metadata[i].ordinal < metadata[j].ordinal + }) + return metadata, nil } -func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoints map[string][]netip.AddrPort, pg *tsapi.ProxyGroup) (devices []tsapi.TailnetDevice, _ error) { +// getRunningProxies will return status for all proxy Pods whose state Secret +// has an up to date Pod UID and at least a hostname. +func (r *ProxyGroupReconciler) getRunningProxies(ctx context.Context, pg *tsapi.ProxyGroup, staticEndpoints map[string][]netip.AddrPort) (devices []tsapi.TailnetDevice, _ error) { metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { return nil, err } - for _, m := range metadata { - if !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { + for i, m := range metadata { + if m.podUID == "" || !strings.EqualFold(string(m.stateSecret.Data[kubetypes.KeyPodUID]), m.podUID) { // Current Pod has not yet written its UID to the state Secret, data may // be stale. continue } device := tsapi.TailnetDevice{} + if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { + device.Hostname = hostname + } else { + continue + } + if ipsB := m.stateSecret.Data[kubetypes.KeyDeviceIPs]; len(ipsB) > 0 { ips := []string{} if err := json.Unmarshal(ipsB, &ips); err != nil { @@ -978,11 +980,10 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint device.TailnetIPs = ips } - if hostname, _, ok := strings.Cut(string(m.stateSecret.Data[kubetypes.KeyDeviceFQDN]), "."); ok { - device.Hostname = hostname - } - - if ep, ok := staticEndpoints[device.Hostname]; ok && len(ep) > 0 { + // TODO(tomhjp): This is our input to the proxy, but we should instead + // read this back from the proxy's state in some way to more accurately + // reflect its status. + if ep, ok := staticEndpoints[pgNodePortServiceName(pg.Name, int32(i))]; ok && len(ep) > 0 { eps := make([]string, 0, len(ep)) for _, e := range ep { eps = append(eps, e.String()) @@ -999,13 +1000,28 @@ func (r *ProxyGroupReconciler) getDeviceInfo(ctx context.Context, staticEndpoint type nodeMetadata struct { ordinal int stateSecret *corev1.Secret - // podUID is the UID of the current Pod or empty if the Pod does not exist. - podUID string - tsID tailcfg.StableNodeID - dnsName string + podUID string // or empty if the Pod no longer exists. + tsID tailcfg.StableNodeID + dnsName string +} + +func (r *ProxyGroupReconciler) notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { + return nil, ¬ReadyReason{ + reason: reason, + message: msg, + }, nil +} + +func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { + err := fmt.Errorf(format, a...) + r.recorder.Event(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) + return nil, ¬ReadyReason{ + reason: reasonProxyGroupCreationFailed, + message: err.Error(), + }, err } -func (pr *ProxyGroupReconciler) setStatusReady(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason string, msg string, logger *zap.SugaredLogger) { - pr.recorder.Eventf(pg, corev1.EventTypeWarning, reason, msg) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, msg, pg.Generation, pr.clock, logger) +type notReadyReason struct { + reason string + message string } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 87b04a434c102..bd69b49a8978d 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -6,7 +6,6 @@ package main import ( - "context" "encoding/json" "fmt" "net/netip" @@ -22,6 +21,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" @@ -207,7 +207,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, expectedIPs: []netip.Addr{}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning NodePort Services for static endpoints: failed to allocate NodePorts to ProxyGroup Services: not enough available ports to allocate all replicas (needed 4, got 3). Field 'spec.staticEndpoints.nodePort.ports' on ProxyClass \"default-pc\" must have bigger range allocated"}, expectedErr: "", expectStatefulSet: false, }, @@ -265,7 +265,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeInternalIP}}, labels: map[string]string{"zone": "eu-central"}}, }, expectedIPs: []netip.Addr{}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, expectedErr: "", expectStatefulSet: false, }, @@ -309,7 +309,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, expectedIPs: []netip.Addr{}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to find any `status.addresses` of type \"ExternalIP\" on nodes using configured Selectors on `spec.staticEndpoints.nodePort.selectors` for ProxyClass \"default-pc\""}, expectedErr: "", expectStatefulSet: false, }, @@ -576,7 +576,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, expectedIPs: []netip.Addr{netip.MustParseAddr("10.0.0.1"), netip.MustParseAddr("10.0.0.2")}, - expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning ProxyGroup resources: error provisioning config Secrets: could not find static endpoints for replica \"test-0-nodeport\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, + expectedEvents: []string{"Warning ProxyGroupCreationFailed error provisioning config Secrets: could not find static endpoints for replica \"test-0\": failed to match nodes to configured Selectors on `spec.staticEndpoints.nodePort.selectors` field for ProxyClass \"default-pc\""}, expectStatefulSet: true, }, }, @@ -659,7 +659,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { Address: addr.ip, }) } - if err := fc.Create(context.Background(), no); err != nil { + if err := fc.Create(t.Context(), no); err != nil { t.Fatalf("failed to create node %q: %v", n.name, err) } createdNodes = append(createdNodes, *no) @@ -670,11 +670,11 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { pg.Spec.Replicas = r.replicas pc.Spec.StaticEndpoints = r.staticEndpointConfig - createOrUpdate(context.Background(), fc, "", pg, func(o *tsapi.ProxyGroup) { + createOrUpdate(t.Context(), fc, "", pg, func(o *tsapi.ProxyGroup) { o.Spec.Replicas = pg.Spec.Replicas }) - createOrUpdate(context.Background(), fc, "", pc, func(o *tsapi.ProxyClass) { + createOrUpdate(t.Context(), fc, "", pc, func(o *tsapi.ProxyClass) { o.Spec.StaticEndpoints = pc.Spec.StaticEndpoints }) @@ -686,7 +686,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { expectEvents(t, fr, r.expectedEvents) sts := &appsv1.StatefulSet{} - err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) + err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts) if r.expectStatefulSet { if err != nil { t.Fatalf("failed to get StatefulSet: %v", err) @@ -694,7 +694,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { for j := range 2 { sec := &corev1.Secret{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d-config", pg.Name, j)}, sec); err != nil { t.Fatalf("failed to get state Secret for replica %d: %v", j, err) } @@ -740,7 +740,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { } pgroup := &tsapi.ProxyGroup{} - err = fc.Get(context.Background(), client.ObjectKey{Name: pg.Name}, pgroup) + err = fc.Get(t.Context(), client.ObjectKey{Name: pg.Name}, pgroup) if err != nil { t.Fatalf("failed to get ProxyGroup %q: %v", pg.Name, err) } @@ -762,7 +762,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { // node cleanup between reconciles // we created a new set of nodes for each for _, n := range createdNodes { - err := fc.Delete(context.Background(), &n) + err := fc.Delete(t.Context(), &n) if err != nil && !apierrors.IsNotFound(err) { t.Fatalf("failed to delete node: %v", err) } @@ -784,14 +784,14 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { clock: cl, } - if err := fc.Delete(context.Background(), pg); err != nil { + if err := fc.Delete(t.Context(), pg); err != nil { t.Fatalf("error deleting ProxyGroup: %v", err) } expectReconciled(t, reconciler, "", pg.Name) expectMissing[tsapi.ProxyGroup](t, fc, "", pg.Name) - if err := fc.Delete(context.Background(), pc); err != nil { + if err := fc.Delete(t.Context(), pc); err != nil { t.Fatalf("error deleting ProxyClass: %v", err) } expectMissing[tsapi.ProxyClass](t, fc, "", pc.Name) @@ -855,7 +855,8 @@ func TestProxyGroup(t *testing.T) { t.Run("proxyclass_not_ready", func(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass default-pc is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, false, pc) }) @@ -870,7 +871,7 @@ func TestProxyGroup(t *testing.T) { LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, }}, } - if err := fc.Status().Update(context.Background(), pc); err != nil { + if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } @@ -978,7 +979,7 @@ func TestProxyGroup(t *testing.T) { }) t.Run("delete_and_cleanup", func(t *testing.T) { - if err := fc.Delete(context.Background(), pg); err != nil { + if err := fc.Delete(t.Context(), pg); err != nil { t.Fatal(err) } @@ -1049,7 +1050,7 @@ func TestProxyGroupTypes(t *testing.T) { verifyProxyGroupCounts(t, reconciler, 0, 1) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupEgress) @@ -1059,7 +1060,7 @@ func TestProxyGroupTypes(t *testing.T) { // Verify that egress configuration has been set up. cm := &corev1.ConfigMap{} cmName := fmt.Sprintf("%s-egress-config", pg.Name) - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: cmName}, cm); err != nil { t.Fatalf("failed to get ConfigMap: %v", err) } @@ -1135,7 +1136,7 @@ func TestProxyGroupTypes(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } @@ -1155,7 +1156,7 @@ func TestProxyGroupTypes(t *testing.T) { Replicas: ptr.To[int32](0), }, } - if err := fc.Create(context.Background(), pg); err != nil { + if err := fc.Create(t.Context(), pg); err != nil { t.Fatal(err) } @@ -1163,7 +1164,7 @@ func TestProxyGroupTypes(t *testing.T) { verifyProxyGroupCounts(t, reconciler, 1, 2) sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) @@ -1306,7 +1307,7 @@ func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsap func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name string) *tsapi.ProxyClass { t.Helper() pc := &tsapi.ProxyClass{} - if err := fc.Get(context.Background(), client.ObjectKey{Name: name}, pc); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Name: name}, pc); err != nil { t.Fatal(err) } pc.Status = tsapi.ProxyClassStatus{ @@ -1319,7 +1320,7 @@ func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name s ObservedGeneration: pc.Generation, }}, } - if err := fc.Status().Update(context.Background(), pc); err != nil { + if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } return pc @@ -1398,7 +1399,7 @@ func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { t.Helper() secrets := &corev1.SecretList{} - if err := fc.List(context.Background(), secrets); err != nil { + if err := fc.List(t.Context(), secrets); err != nil { t.Fatal(err) } @@ -1413,6 +1414,7 @@ func expectSecrets(t *testing.T, fc client.WithWatch, expected []string) { } func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyGroup) { + t.Helper() const key = "profile-abc" for i := range pgReplicas(pg) { bytes, err := json.Marshal(map[string]any{ @@ -1424,6 +1426,17 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG t.Fatal(err) } + podUID := fmt.Sprintf("pod-uid-%d", i) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", pg.Name, i), + Namespace: "tailscale", + UID: types.UID(podUID), + }, + } + if _, err := createOrUpdate(t.Context(), fc, "tailscale", pod, nil); err != nil { + t.Fatalf("failed to create or update Pod %s: %v", pod.Name, err) + } mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { s.Data = map[string][]byte{ currentProfileKey: []byte(key), @@ -1433,6 +1446,7 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG // TODO(tomhjp): We have two different mechanisms to retrieve device IDs. // Consolidate on this one. kubetypes.KeyDeviceID: []byte(fmt.Sprintf("nodeid-%d", i)), + kubetypes.KeyPodUID: []byte(podUID), } }) } @@ -1512,7 +1526,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { // Verify that the StatefulSet created for ProxyGrup has // the expected setting for the staging endpoint. sts := &appsv1.StatefulSet{} - if err := fc.Get(context.Background(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { t.Fatalf("failed to get StatefulSet: %v", err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index aba5f9e2df4b2..18bf1cb50400f 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -658,7 +658,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types are `ProxyGroupReady`. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`.
`ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled
and ready. `ProxyGroupAvailable` indicates that at least one proxy is
ready to serve traffic. | | | | `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the ProxyGroup StatefulSet. | | | diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 17b13064bb4fc..5edb47f0da6c3 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -88,7 +88,11 @@ type ProxyGroupSpec struct { type ProxyGroupStatus struct { // List of status conditions to indicate the status of the ProxyGroup - // resources. Known condition types are `ProxyGroupReady`. + // resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. + // `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled + // and ready. `ProxyGroupAvailable` indicates that at least one proxy is + // ready to serve traffic. + // // +listType=map // +listMapKey=type // +optional From 4f3355e4997500cef05a7189e6a325c8a687730e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 6 Jul 2025 22:25:18 -0600 Subject: [PATCH 0065/1093] .github: Bump github/codeql-action from 3.29.0 to 3.29.1 (#16423) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.0 to 3.29.1. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/ce28f5bb42b7a9f2c824e633a3f6ee835bab6858...39edc492dbe16b1465b0cafca41432d857bdb31a) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2b471e943318f..610b93b610ea3 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 From 84eac7b8de99e0d6bad73f2b7998ede7228f2a2a Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 7 Jul 2025 12:12:59 +0100 Subject: [PATCH 0066/1093] cmd/k8s-operator: Allow custom ingress class names (#16472) This commit modifies the k8s operator to allow for customisation of the ingress class name via a new `OPERATOR_INGRESS_CLASS_NAME` environment variable. For backwards compatibility, this defaults to `tailscale`. When using helm, a new `ingress.name` value is provided that will set this environment variable and modify the name of the deployed `IngressClass` resource. Fixes https://github.com/tailscale/tailscale/issues/16248 Signed-off-by: David Bond --- .../deploy/chart/templates/deployment.yaml | 2 + .../deploy/chart/templates/ingressclass.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 4 ++ .../deploy/manifests/operator.yaml | 2 + cmd/k8s-operator/ingress-for-pg.go | 21 ++++++----- cmd/k8s-operator/ingress-for-pg_test.go | 22 +++++++---- cmd/k8s-operator/ingress.go | 10 ++--- cmd/k8s-operator/ingress_test.go | 23 ++++++++---- cmd/k8s-operator/operator.go | 37 +++++++++++-------- cmd/k8s-operator/operator_test.go | 12 +++--- 10 files changed, 83 insertions(+), 52 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 01a290c076368..51d0a88c36671 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -70,6 +70,8 @@ spec: fieldPath: metadata.namespace - name: OPERATOR_LOGIN_SERVER value: {{ .Values.loginServer }} + - name: OPERATOR_INGRESS_CLASS_NAME + value: {{ .Values.ingressClass.name }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml index 208d58ee10f08..54851955db67a 100644 --- a/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/ingressclass.yaml @@ -2,7 +2,7 @@ apiVersion: networking.k8s.io/v1 kind: IngressClass metadata: - name: tailscale # class name currently can not be changed + name: {{ .Values.ingressClass.name }} annotations: {} # we do not support default IngressClass annotation https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class spec: controller: tailscale.com/ts-ingress # controller name currently can not be changed diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 0ba8d045a858d..2926f6d0759f2 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -77,6 +77,10 @@ operatorConfig: # In the case that you already have a tailscale ingressclass in your cluster (or vcluster), you can disable the creation here ingressClass: + # Allows for customization of the ingress class name used by the operator to identify ingresses to reconcile. This does + # not allow multiple operator instances to manage different ingresses, but provides an onboarding route for users that + # may have previously set up ingress classes named "tailscale" prior to using the operator. + name: "tailscale" enabled: true # proxyConfig contains configuraton that will be applied to any ingress/egress diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 2888575692594..cdf301318f923 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5129,6 +5129,8 @@ spec: fieldPath: metadata.namespace - name: OPERATOR_LOGIN_SERVER value: null + - name: OPERATOR_INGRESS_CLASS_NAME + value: tailscale - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 09417fd0c8878..79bad92be080e 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -68,14 +68,15 @@ var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGRes type HAIngressReconciler struct { client.Client - recorder record.EventRecorder - logger *zap.SugaredLogger - tsClient tsClient - tsnetServer tsnetServer - tsNamespace string - lc localClient - defaultTags []string - operatorID string // stableID of the operator's Tailscale device + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsnetServer tsnetServer + tsNamespace string + lc localClient + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + ingressClassName string mu sync.Mutex // protects following // managedIngresses is a set of all ingress resources that we're currently @@ -162,7 +163,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } - if err := validateIngressClass(ctx, r.Client); err != nil { + if err := validateIngressClass(ctx, r.Client, r.ingressClassName); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) return false, nil } @@ -645,7 +646,7 @@ func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, er func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName + *ing.Spec.IngressClassName == r.ingressClassName pgAnnot := ing.Annotations[AnnotationProxyGroup] return isTSIngress && pgAnnot != "" } diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 2308514f3af9c..d29368caef59d 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -438,7 +438,12 @@ func TestValidateIngress(t *testing.T) { WithObjects(tt.ing). WithLists(&networkingv1.IngressList{Items: tt.existingIngs}). Build() + r := &HAIngressReconciler{Client: fc} + if tt.ing.Spec.IngressClassName != nil { + r.ingressClassName = *tt.ing.Spec.IngressClassName + } + err := r.validateIngress(context.Background(), tt.ing, tt.pg) if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) { t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr) @@ -841,14 +846,15 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT } ingPGR := &HAIngressReconciler{ - Client: fc, - tsClient: ft, - defaultTags: []string{"tag:k8s"}, - tsNamespace: "operator-ns", - tsnetServer: fakeTsnetServer, - logger: zl.Sugar(), - recorder: record.NewFakeRecorder(10), - lc: lc, + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + tsNamespace: "operator-ns", + tsnetServer: fakeTsnetServer, + logger: zl.Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + ingressClassName: tsIngressClass.Name, } return ingPGR, fc, ft diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index d6277093824fb..d66cf9116f14a 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -32,7 +32,6 @@ import ( ) const ( - tailscaleIngressClassName = "tailscale" // ingressClass.metadata.name for tailscale IngressClass resource tailscaleIngressControllerName = "tailscale.com/ts-ingress" // ingressClass.spec.controllerName for tailscale IngressClass resource ingressClassDefaultAnnotation = "ingressclass.kubernetes.io/is-default-class" // we do not support this https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class indexIngressProxyClass = ".metadata.annotations.ingress-proxy-class" @@ -52,6 +51,7 @@ type IngressReconciler struct { managedIngresses set.Slice[types.UID] defaultProxyClass string + ingressClassName string } var ( @@ -132,7 +132,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare // This function adds a finalizer to ing, ensuring that we can handle orderly // deprovisioning later. func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, ing *networkingv1.Ingress) error { - if err := validateIngressClass(ctx, a.Client); err != nil { + if err := validateIngressClass(ctx, a.Client, a.ingressClassName); err != nil { logger.Warnf("error validating tailscale IngressClass: %v. In future this might be a terminal error.", err) } if !slices.Contains(ing.Finalizers, FinalizerName) { @@ -266,17 +266,17 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga func (a *IngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return ing != nil && ing.Spec.IngressClassName != nil && - *ing.Spec.IngressClassName == tailscaleIngressClassName && + *ing.Spec.IngressClassName == a.ingressClassName && ing.Annotations[AnnotationProxyGroup] == "" } // validateIngressClass attempts to validate that 'tailscale' IngressClass // included in Tailscale installation manifests exists and has not been modified // to attempt to enable features that we do not support. -func validateIngressClass(ctx context.Context, cl client.Client) error { +func validateIngressClass(ctx context.Context, cl client.Client, ingressClassName string) error { ic := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ - Name: tailscaleIngressClassName, + Name: ingressClassName, }, } if err := cl.Get(ctx, client.ObjectKeyFromObject(ic), ic); apierrors.IsNotFound(err) { diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index e4396eb106a96..fe4d90c785c47 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -36,7 +36,8 @@ func TestTailscaleIngress(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -120,7 +121,8 @@ func TestTailscaleIngressHostname(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -245,7 +247,8 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -350,7 +353,8 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -498,7 +502,8 @@ func TestIngressProxyClassAnnotation(t *testing.T) { mustCreate(t, fc, ing) ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: &fakeTSClient{}, @@ -568,7 +573,8 @@ func TestIngressLetsEncryptStaging(t *testing.T) { mustCreate(t, fc, ing) ingR := &IngressReconciler{ - Client: fc, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: &fakeTSClient{}, @@ -675,8 +681,9 @@ func TestEmptyPath(t *testing.T) { t.Fatal(err) } ingR := &IngressReconciler{ - recorder: fr, - Client: fc, + recorder: fr, + Client: fc, + ingressClassName: "tailscale", ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 276de411c45cb..96b3b37ad0340 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -83,6 +83,7 @@ func main() { defaultProxyClass = defaultEnv("PROXY_DEFAULT_CLASS", "") isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false) loginServer = strings.TrimSuffix(defaultEnv("OPERATOR_LOGIN_SERVER", ""), "/") + ingressClassName = defaultEnv("OPERATOR_INGRESS_CLASS_NAME", "tailscale") ) var opts []kzap.Opts @@ -133,6 +134,7 @@ func main() { proxyFirewallMode: tsFirewallMode, defaultProxyClass: defaultProxyClass, loginServer: loginServer, + ingressClassName: ingressClassName, } runReconcilers(rOpts) } @@ -343,7 +345,7 @@ func runReconcilers(opts reconcilerOpts) { // ProxyClass's name. proxyClassFilterForIngress := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForIngress(mgr.GetClient(), startlog)) // Enque Ingress if a managed Service or backend Service associated with a tailscale Ingress changes. - svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog)) + svcHandlerForIngress := handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngress(mgr.GetClient(), startlog, opts.ingressClassName)) err = builder. ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). @@ -358,6 +360,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), logger: opts.log.Named("ingress-reconciler"), defaultProxyClass: opts.defaultProxyClass, + ingressClassName: opts.ingressClassName, }) if err != nil { startlog.Fatalf("could not create ingress reconciler: %v", err) @@ -379,19 +382,20 @@ func runReconcilers(opts reconcilerOpts) { ControllerManagedBy(mgr). For(&networkingv1.Ingress{}). Named("ingress-pg-reconciler"). - Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog, opts.ingressClassName))). Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(HAIngressesFromSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ - recorder: eventRecorder, - tsClient: opts.tsClient, - tsnetServer: opts.tsServer, - defaultTags: strings.Split(opts.proxyTags, ","), - Client: mgr.GetClient(), - logger: opts.log.Named("ingress-pg-reconciler"), - lc: lc, - operatorID: id, - tsNamespace: opts.tailscaleNamespace, + recorder: eventRecorder, + tsClient: opts.tsClient, + tsnetServer: opts.tsServer, + defaultTags: strings.Split(opts.proxyTags, ","), + Client: mgr.GetClient(), + logger: opts.log.Named("ingress-pg-reconciler"), + lc: lc, + operatorID: id, + tsNamespace: opts.tailscaleNamespace, + ingressClassName: opts.ingressClassName, }) if err != nil { startlog.Fatalf("could not create ingress-pg-reconciler: %v", err) @@ -697,6 +701,9 @@ type reconcilerOpts struct { defaultProxyClass string // loginServer is the coordination server URL that should be used by managed resources. loginServer string + // ingressClassName is the name of the ingress class used by reconcilers of Ingress resources. This defaults + // to "tailscale" but can be customised. + ingressClassName string } // enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each @@ -1015,7 +1022,7 @@ func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) // The Services of interest are backend Services for tailscale Ingress and // managed Services for an StatefulSet for a proxy configured for tailscale // Ingress -func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger, ingressClassName string) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { if isManagedByType(o, "ingress") { ingName := parentFromObjectLabels(o) @@ -1028,7 +1035,7 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger) handl } reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { return nil } if hasProxyGroupAnnotation(&ing) { @@ -1533,7 +1540,7 @@ func indexPGIngresses(o client.Object) []string { // serviceHandlerForIngressPG returns a handler for Service events that ensures that if the Service // associated with an event is a backend Service for a tailscale Ingress with ProxyGroup annotation, // the associated Ingress gets reconciled. -func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { +func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger, ingressClassName string) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { ingList := networkingv1.IngressList{} if err := cl.List(ctx, &ingList, client.InNamespace(o.GetNamespace())); err != nil { @@ -1542,7 +1549,7 @@ func serviceHandlerForIngressPG(cl client.Client, logger *zap.SugaredLogger) han } reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { - if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != tailscaleIngressClassName { + if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { continue } if !hasProxyGroupAnnotation(&ing) { diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index a9f08c18b4793..1f700f13a4fc0 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1549,6 +1549,8 @@ func Test_isMagicDNSName(t *testing.T) { } func Test_serviceHandlerForIngress(t *testing.T) { + const tailscaleIngressClassName = "tailscale" + fc := fake.NewFakeClient() zl, err := zap.NewDevelopment() if err != nil { @@ -1578,7 +1580,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, svc1) wantReqs := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-1", Name: "ing-1"}}} - gotReqs := serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), svc1) + gotReqs := serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), svc1) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1605,7 +1607,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, backendSvc) wantReqs = []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-2", Name: "ing-2"}}} - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), backendSvc) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), backendSvc) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1634,7 +1636,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { } mustCreate(t, fc, backendSvc2) wantReqs = []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "ns-3", Name: "ing-3"}}} - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), backendSvc2) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), backendSvc2) if diff := cmp.Diff(gotReqs, wantReqs); diff != "" { t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) } @@ -1661,7 +1663,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { }, } mustCreate(t, fc, nonTSBackend) - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), nonTSBackend) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), nonTSBackend) if len(gotReqs) > 0 { t.Errorf("unexpected reconcile request for a Service that does not belong to a Tailscale Ingress: %#+v\n", gotReqs) } @@ -1675,7 +1677,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { }, } mustCreate(t, fc, someSvc) - gotReqs = serviceHandlerForIngress(fc, zl.Sugar())(context.Background(), someSvc) + gotReqs = serviceHandlerForIngress(fc, zl.Sugar(), tailscaleIngressClassName)(context.Background(), someSvc) if len(gotReqs) > 0 { t.Errorf("unexpected reconcile request for a Service that does not belong to any Ingress: %#+v\n", gotReqs) } From 540eb0563803e86fd08369d242e0aff4db5fee32 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 08:45:13 -0700 Subject: [PATCH 0067/1093] wgengine/magicsock: make Conn.Send() lazyEndpoint aware (#16465) A lazyEndpoint may end up on this TX codepath when wireguard-go is deemed "under load" and ends up transmitting a cookie reply using the received conn.Endpoint. Updates tailscale/corp#20732 Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 5719b20f9ab6f..8d3b2d082c633 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1363,12 +1363,18 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint, offset int) (err error) { metricSendDataNetworkDown.Add(n) return errNetworkDown } - if ep, ok := ep.(*endpoint); ok { + switch ep := ep.(type) { + case *endpoint: return ep.send(buffs, offset) + case *lazyEndpoint: + // A [*lazyEndpoint] may end up on this TX codepath when wireguard-go is + // deemed "under handshake load" and ends up transmitting a cookie reply + // using the received [conn.Endpoint] in [device.SendHandshakeCookie]. + if ep.src.ap.Addr().Is6() { + return c.pconn6.WriteBatchTo(buffs, ep.src, offset) + } + return c.pconn4.WriteBatchTo(buffs, ep.src, offset) } - // If it's not of type *endpoint, it's probably *lazyEndpoint, which means - // we don't actually know who the peer is and we're waiting for wireguard-go - // to switch the endpoint. See go/corp/20732. return nil } @@ -1702,6 +1708,11 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. + // + // TODO(jwhited): implement [lazyEndpoint] integration to call + // [endpoint.noteRecvActivity], which triggers just-in-time + // wireguard-go configuration of the peer, prior to peer lookup + // within wireguard-go. return &lazyEndpoint{c: c, src: src}, size, true } cache.epAddr = src @@ -1709,8 +1720,6 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach cache.gen = de.numStopAndReset() ep = de } - // TODO(jwhited): consider the implications of not recording this receive - // activity due to an early [lazyEndpoint] return above. now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(src, now) From 3b32cc758647bde17c9e3fef36086439ba1bb7e8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 09:38:10 -0700 Subject: [PATCH 0068/1093] wgengine/magicsock: simplify Geneve-encapsulated disco.Ping handling (#16448) Just make [relayManager] always handle it, there's no benefit to checking bestAddr's. Also, remove passing of disco.Pong to [relayManager] in endpoint.handlePongConnLocked(), which is redundant with the callsite in Conn.handleDiscoMessage(). Conn.handleDiscoMessage() already passes to [relayManager] if the txID us not known to any [*endpoint]. Updates tailscale/corp#27502 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 7 -- wgengine/magicsock/magicsock.go | 129 +++++++++++------------- wgengine/magicsock/relaymanager.go | 7 +- wgengine/magicsock/relaymanager_test.go | 2 +- 4 files changed, 61 insertions(+), 84 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 0569341ff4ab3..4780c7f37a781 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1656,13 +1656,6 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd de.mu.Lock() defer de.mu.Unlock() - if src.vni.isSet() && src != de.bestAddr.epAddr { - // "src" is not our bestAddr, but [relayManager] might be in the - // middle of probing it, awaiting pong reception. Make it aware. - de.c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(de.c, m, di, src) - return false - } - isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr sp, ok := de.sentPing[m.TxID] diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8d3b2d082c633..37de4668ab394 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2103,7 +2103,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, challenge, di, src) + c.relayManager.handleGeneveEncapDiscoMsg(c, challenge, di, src) return } @@ -2125,7 +2125,10 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return true }) if !knownTxID && src.vni.isSet() { - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) + // If it's an unknown TxID, and it's Geneve-encapsulated, then + // make [relayManager] aware. It might be in the middle of probing + // src. + c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2233,6 +2236,35 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN di.lastPingTime = time.Now() isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr + if src.vni.isSet() { + if isDerp { + c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) + return + } + + // [relayManager] is always responsible for handling (replying) to + // Geneve-encapsulated [disco.Ping] messages in the interest of + // simplicity. It might be in the middle of probing src, so it must be + // made aware. + c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) + return + } + + // This is a naked [disco.Ping] without a VNI. + + // If we can figure out with certainty which node key this disco + // message is for, eagerly update our [epAddr]<>node and disco<>node + // mappings to make p2p path discovery faster in simple + // cases. Without this, disco would still work, but would be + // reliant on DERP call-me-maybe to establish the disco<>node + // mapping, and on subsequent disco handlePongConnLocked to establish + // the IP:port<>disco mapping. + if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { + if !isDerp { + c.peerMap.setNodeKeyForEpAddr(src, nk) + } + } + // numNodes tracks how many nodes (node keys) are associated with the disco // key tied to this inbound ping. Multiple nodes may share the same disco // key in the case of node sharing and users switching accounts. @@ -2244,81 +2276,34 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // a dstKey if the dst ip:port is DERP. dstKey := derpNodeSrc - switch { - case src.vni.isSet(): - if isDerp { - c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) - return - } - - var bestEpAddr epAddr - var discoKey key.DiscoPublic - ep, ok := c.peerMap.endpointForEpAddr(src) - if ok { - ep.mu.Lock() - bestEpAddr = ep.bestAddr.epAddr - ep.mu.Unlock() - disco := ep.disco.Load() - if disco != nil { - discoKey = disco.key + // Remember this route if not present. + var dup bool + if isDerp { + if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + return } - } - - if src == bestEpAddr && discoKey == di.discoKey { - // We have an associated endpoint with src as its bestAddr. Set - // numNodes so we TX a pong further down. numNodes = 1 - } else { - // We have no [endpoint] in the [peerMap] for this relay [epAddr] - // using it as a bestAddr. [relayManager] might be in the middle of - // probing it or attempting to set it as best via - // [endpoint.udpRelayEndpointReady()]. Make [relayManager] aware. - c.relayManager.handleGeneveEncapDiscoMsgNotBestAddr(c, dm, di, src) - return - } - default: // no VNI - // If we can figure out with certainty which node key this disco - // message is for, eagerly update our [epAddr]<>node and disco<>node - // mappings to make p2p path discovery faster in simple - // cases. Without this, disco would still work, but would be - // reliant on DERP call-me-maybe to establish the disco<>node - // mapping, and on subsequent disco handlePongConnLocked to establish - // the IP:port<>disco mapping. - if nk, ok := c.unambiguousNodeKeyOfPingLocked(dm, di.discoKey, derpNodeSrc); ok { - if !isDerp { - c.peerMap.setNodeKeyForEpAddr(src, nk) - } } - - // Remember this route if not present. - var dup bool - if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return - } - numNodes = 1 - } - } else { - c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - dup = true - return false - } - numNodes++ - if numNodes == 1 && dstKey.IsZero() { - dstKey = ep.publicKey - } - return true - }) - if dup { - return + } else { + c.peerMap.forEachEndpointWithDiscoKey(di.discoKey, func(ep *endpoint) (keepGoing bool) { + if ep.addCandidateEndpoint(src.ap, dm.TxID) { + dup = true + return false } - if numNodes > 1 { - // Zero it out if it's ambiguous, so sendDiscoMessage logging - // isn't confusing. - dstKey = key.NodePublic{} + numNodes++ + if numNodes == 1 && dstKey.IsZero() { + dstKey = ep.publicKey } + return true + }) + if dup { + return + } + if numNodes > 1 { + // Zero it out if it's ambiguous, so sendDiscoMessage logging + // isn't confusing. + dstKey = key.NodePublic{} } } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 1c173c01ac138..c8c9ed41b7b82 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -325,10 +325,9 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, }) } -// handleGeneveEncapDiscoMsgNotBestAddr handles reception of Geneve-encapsulated -// disco messages if they are not associated with any known -// [*endpoint.bestAddr]. -func (r *relayManager) handleGeneveEncapDiscoMsgNotBestAddr(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { +// handleGeneveEncapDiscoMsg handles reception of Geneve-encapsulated disco +// messages. +func (r *relayManager) handleGeneveEncapDiscoMsg(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{conn: conn, msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 8feff2f3d5ca8..8f92360122d0e 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -26,7 +26,7 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsgNotBestAddr(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + rm.handleGeneveEncapDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} From a84d58015ce875863a266dacdfb1ffd65de1615d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 10:06:38 -0700 Subject: [PATCH 0069/1093] wgengine/magicsock: fix lazyEndpoint DstIP() vs SrcIP() (#16453) These were flipped. DstIP() and DstIPBytes() are used internally by wireguard-go as part of a handshake DoS mitigation strategy. Updates tailscale/corp#20732 Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 34 +++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 37de4668ab394..a7eab36786f12 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3774,12 +3774,12 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec c.lastNetCheckReport.Store(report) } -// lazyEndpoint is a wireguard conn.Endpoint for when magicsock received a +// lazyEndpoint is a wireguard [conn.Endpoint] for when magicsock received a // non-disco (presumably WireGuard) packet from a UDP address from which we -// can't map to a Tailscale peer. But Wireguard most likely can, once it -// decrypts it. So we implement the conn.PeerAwareEndpoint interface +// can't map to a Tailscale peer. But WireGuard most likely can, once it +// decrypts it. So we implement the [conn.PeerAwareEndpoint] interface // from https://github.com/tailscale/wireguard-go/pull/27 to allow WireGuard -// to tell us who it is later and get the correct conn.Endpoint. +// to tell us who it is later and get the correct [conn.Endpoint]. type lazyEndpoint struct { c *Conn src epAddr @@ -3788,12 +3788,26 @@ type lazyEndpoint struct { var _ conn.PeerAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.Endpoint = (*lazyEndpoint)(nil) -func (le *lazyEndpoint) ClearSrc() {} -func (le *lazyEndpoint) SrcIP() netip.Addr { return le.src.ap.Addr() } -func (le *lazyEndpoint) DstIP() netip.Addr { return netip.Addr{} } -func (le *lazyEndpoint) SrcToString() string { return le.src.String() } -func (le *lazyEndpoint) DstToString() string { return "dst" } -func (le *lazyEndpoint) DstToBytes() []byte { return nil } +func (le *lazyEndpoint) ClearSrc() {} +func (le *lazyEndpoint) SrcIP() netip.Addr { return netip.Addr{} } + +// DstIP returns the remote address of the peer. +// +// Note: DstIP is used internally by wireguard-go as part of handshake DoS +// mitigation. +func (le *lazyEndpoint) DstIP() netip.Addr { return le.src.ap.Addr() } + +func (le *lazyEndpoint) SrcToString() string { return "" } +func (le *lazyEndpoint) DstToString() string { return le.src.String() } + +// DstToBytes returns a binary representation of the remote address of the peer. +// +// Note: DstToBytes is used internally by wireguard-go as part of handshake DoS +// mitigation. +func (le *lazyEndpoint) DstToBytes() []byte { + b, _ := le.src.ap.MarshalBinary() + return b +} // FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in // our [conn.ReceiveFunc]s when we are unable to identify the peer at WireGuard From 04d24cdbd4b551d95f85ca3b9b36ef147503d2b7 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Mon, 7 Jul 2025 15:36:16 -0400 Subject: [PATCH 0070/1093] wgengine/netstack: correctly proxy half-closed TCP connections TCP connections are two unidirectional data streams, and if one of these streams closes, we should not assume the other half is closed as well. For example, if an HTTP client closes its write half of the connection early, it may still be expecting to receive data on its read half, so we should keep the server -> client half of the connection open, while terminating the client -> server half. Fixes tailscale/corp#29837. Signed-off-by: Naman Sood --- wgengine/netstack/netstack.go | 43 ++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index dab692ead4aa7..d97c669463d78 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1435,6 +1435,13 @@ func (ns *Impl) acceptTCP(r *tcp.ForwarderRequest) { } } +// tcpCloser is an interface to abstract around various TCPConn types that +// allow closing of the read and write streams independently of each other. +type tcpCloser interface { + CloseRead() error + CloseWrite() error +} + func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet.TCPConn, clientRemoteIP netip.Addr, wq *waiter.Queue, dialAddr netip.AddrPort) (handled bool) { dialAddrStr := dialAddr.String() if debugNetstack() { @@ -1501,18 +1508,48 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. } defer client.Close() + // As of 2025-07-03, backend is always either a net.TCPConn + // from stdDialer.DialContext (which has the requisite functions), + // or nil from hangDialer in tests (in which case we would have + // errored out by now), so this conversion should always succeed. + backendTCPCloser, backendIsTCPCloser := backend.(tcpCloser) connClosed := make(chan error, 2) go func() { _, err := io.Copy(backend, client) + if err != nil { + err = fmt.Errorf("client -> backend: %w", err) + } connClosed <- err + err = nil + if backendIsTCPCloser { + err = backendTCPCloser.CloseWrite() + } + err = errors.Join(err, client.CloseRead()) + if err != nil { + ns.logf("client -> backend close connection: %v", err) + } }() go func() { _, err := io.Copy(client, backend) + if err != nil { + err = fmt.Errorf("backend -> client: %w", err) + } connClosed <- err + err = nil + if backendIsTCPCloser { + err = backendTCPCloser.CloseRead() + } + err = errors.Join(err, client.CloseWrite()) + if err != nil { + ns.logf("backend -> client close connection: %v", err) + } }() - err = <-connClosed - if err != nil { - ns.logf("proxy connection closed with error: %v", err) + // Wait for both ends of the connection to close. + for range 2 { + err = <-connClosed + if err != nil { + ns.logf("proxy connection closed with error: %v", err) + } } ns.logf("[v2] netstack: forwarder connection to %s closed", dialAddrStr) return From 3e01652e4dba619d475cc98e691c0e1d155969ae Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 14:25:33 -0500 Subject: [PATCH 0071/1093] ipn/ipnlocal: add (*LocalBackend).RefreshExitNode In this PR, we add (*LocalBackend).RefreshExitNode which determines which exit node to use based on the current prefs and netmap and switches to it if needed. It supports both scenarios when an exit node is specified by IP (rather than ID) and needs to be resolved once the netmap is ready as well as auto exit nodes. We then use it in (*LocalBackend).SetControlClientStatus when the netmap changes, and wherever (*LocalBackend).pickNewAutoExitNode was previously used. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 77 +++++++++++++++++++++++++------------------ 1 file changed, 45 insertions(+), 32 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 21057c0e675db..a69b7dd5a1289 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1627,16 +1627,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control if applySysPolicy(prefs, b.overrideAlwaysOn) { prefsChanged = true } - if prefs.AutoExitNode.IsSet() { - // Re-evaluate exit node suggestion in case circumstances have changed. - _, err := b.suggestExitNodeLocked(curNetMap) - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("SetControlClientStatus failed to select auto exit node: %v", err) - } - } - if setExitNodeID(prefs, b.lastSuggestedExitNode, curNetMap) { - prefsChanged = true - } // Until recently, we did not store the account's tailnet name. So check if this is the case, // and backfill it on incoming status update. @@ -1653,6 +1643,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } + + b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) } // initTKALocked is dependent on CurrentProfile.ID, which is initialized @@ -1695,16 +1687,17 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.mu.Unlock() // Now complete the lock-free parts of what we started while locked. - if prefsChanged { - b.send(ipn.Notify{Prefs: ptr.To(prefs.View())}) - } - if st.NetMap != nil { + // Check and update the exit node if needed, now that we have a new netmap. + b.RefreshExitNode() + if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) // Connecting to this tailnet without logging is forbidden; boot us outta here. b.mu.Lock() + // Get the current prefs again, since we unlocked above. + prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false p := prefs.View() if err := b.pm.SetPrefs(p, ipn.NetworkProfile{ @@ -1999,7 +1992,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if !ok || n.StableID() != exitNodeID { continue } - b.goTracker.Go(b.pickNewAutoExitNode) + b.goTracker.Go(b.RefreshExitNode) break } } @@ -5898,30 +5891,50 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { } cc.SetNetInfo(ni) if refresh { - b.pickNewAutoExitNode() + b.RefreshExitNode() } } -// pickNewAutoExitNode picks a new automatic exit node if needed. -func (b *LocalBackend) pickNewAutoExitNode() { - unlock := b.lockAndGetUnlock() - defer unlock() +// RefreshExitNode determines which exit node to use based on the current +// prefs and netmap and switches to it if needed. +func (b *LocalBackend) RefreshExitNode() { + if b.resolveExitNode() { + b.authReconfig() + } +} - newSuggestion, err := b.suggestExitNodeLocked(nil) - if err != nil { - b.logf("setAutoExitNodeID: %v", err) - return +// resolveExitNode determines which exit node to use based on the current +// prefs and netmap. It updates the exit node ID in the prefs if needed, +// sends a notification to clients, and returns true if the exit node has changed. +// +// It is the caller's responsibility to reconfigure routes and actually +// start using the selected exit node, if needed. +// +// b.mu must not be held. +func (b *LocalBackend) resolveExitNode() (changed bool) { + b.mu.Lock() + defer b.mu.Unlock() + + nm := b.currentNode().NetMap() + prefs := b.pm.CurrentPrefs().AsStruct() + if prefs.AutoExitNode.IsSet() { + _, err := b.suggestExitNodeLocked(nil) + if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) + } } - if b.pm.CurrentPrefs().ExitNodeID() == newSuggestion.ID { - return + if !setExitNodeID(prefs, b.lastSuggestedExitNode, nm) { + return false // no changes } - _, err = b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: ipn.Prefs{ExitNodeID: newSuggestion.ID}, - ExitNodeIDSet: true, - }, unlock) - if err != nil { - b.logf("setAutoExitNodeID: failed to apply exit node ID preference: %v", err) + + if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ + MagicDNSName: nm.MagicDNSSuffix(), + DomainName: nm.DomainName(), + }); err != nil { + b.logf("failed to save exit node changes: %v", err) } + b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) + return true } // setNetMapLocked updates the LocalBackend state to reflect the newly From 4c1c0bac8dcaa717c9909d7b5c9c9991223e9f5f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 14:32:28 -0500 Subject: [PATCH 0072/1093] ipn/ipnlocal: plumb nodeBackend into suggestExitNode to support delta updates, such as online status changes Now that (*LocalBackend).suggestExitNodeLocked is never called with a non-current netmap (the netMap parameter is always nil, indicating that the current netmap should be used), we can remove the unused parameter. Additionally, instead of suggestExitNodeLocked passing the most recent full netmap to suggestExitNode, we now pass the current nodeBackend so it can access peers with delta updates applied. Finally, with that fixed, we no longer need to skip TestUpdateNetmapDeltaAutoExitNode. Updates tailscale/corp#29969 Fixes #16455 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 41 +++++++++++++++----------------------- ipn/ipnlocal/local_test.go | 9 ++++++--- 2 files changed, 22 insertions(+), 28 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a69b7dd5a1289..5fbb0bd9849d9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1947,10 +1947,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. - b.mu.Lock() - _, err := b.suggestExitNodeLocked(nil) - b.mu.Unlock() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID @@ -4490,7 +4487,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // anyway, so its return value can be ignored here. applySysPolicy(newp, b.overrideAlwaysOn) if newp.AutoExitNode.IsSet() { - if _, err := b.suggestExitNodeLocked(nil); err != nil { + if _, err := b.suggestExitNodeLocked(); err != nil { b.logf("failed to select auto exit node: %v", err) } } @@ -5918,7 +5915,7 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { nm := b.currentNode().NetMap() prefs := b.pm.CurrentPrefs().AsStruct() if prefs.AutoExitNode.IsSet() { - _, err := b.suggestExitNodeLocked(nil) + _, err := b.suggestExitNodeLocked() if err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } @@ -7445,19 +7442,12 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // Peers are selected based on having a DERP home that is the lowest latency to this device. For peers // without a DERP home, we look for geographic proximity to this device's DERP home. // -// netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). -// If netMap is nil, then b.netMap is used. -// // b.mu.lock() must be held. -func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (response apitype.ExitNodeSuggestionResponse, err error) { - // netMap is an optional netmap to use that overrides b.netMap (needed for SetControlClientStatus before b.netMap is updated). If netMap is nil, then b.netMap is used. - if netMap == nil { - netMap = b.NetMap() - } +func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode - res, err := suggestExitNode(lastReport, netMap, prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) + res, err := suggestExitNode(lastReport, b.currentNode(), prevSuggestion, randomRegion, randomNode, b.getAllowedSuggestions()) if err != nil { return res, err } @@ -7468,7 +7458,7 @@ func (b *LocalBackend) suggestExitNodeLocked(netMap *netmap.NetworkMap) (respons func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionResponse, err error) { b.mu.Lock() defer b.mu.Unlock() - return b.suggestExitNodeLocked(nil) + return b.suggestExitNodeLocked() } // getAllowedSuggestions returns a set of exit nodes permitted by the most recent @@ -7512,22 +7502,23 @@ func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { return s } -func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { +func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP } - candidates := make([]tailcfg.NodeView, 0, len(netMap.Peers)) - for _, peer := range netMap.Peers { + // Use [nodeBackend.AppendMatchingPeers] instead of the netmap directly, + // since the netmap doesn't include delta updates (e.g., home DERP or Online + // status changes) from the control plane since the last full update. + candidates := nb.AppendMatchingPeers(nil, func(peer tailcfg.NodeView) bool { if !peer.Valid() || !peer.Online().Get() { - continue + return false } if allowList != nil && !allowList.Contains(peer.StableID()) { - continue - } - if peer.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) { - candidates = append(candidates, peer) + return false } - } + return peer.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) && tsaddr.ContainsExitRoutes(peer.AllowedIPs()) + }) if len(candidates) == 0 { return res, nil } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5c9c9f2fab4a9..5c9adfb5fc386 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -57,6 +57,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" @@ -2327,8 +2328,6 @@ func TestSetExitNodeIDPolicy(t *testing.T) { } func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { - t.Skip("TODO(tailscale/tailscale#16455): suggestExitNode does not check for online status of exit nodes") - peer1 := makePeer(1, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) peer2 := makePeer(2, withCap(26), withSuggest(), withOnline(true), withExitRoutes()) derpMap := &tailcfg.DERPMap{ @@ -4278,7 +4277,11 @@ func TestSuggestExitNode(t *testing.T) { allowList = set.SetOf(tt.allowPolicy) } - got, err := suggestExitNode(tt.lastReport, tt.netMap, tt.lastSuggestion, selectRegion, selectNode, allowList) + nb := newNodeBackend(t.Context(), eventbus.New()) + defer nb.shutdown(errShutdown) + nb.SetNetMap(tt.netMap) + + got, err := suggestExitNode(tt.lastReport, nb, tt.lastSuggestion, selectRegion, selectNode, allowList) if got.Name != tt.wantName { t.Errorf("name=%v, want %v", got.Name, tt.wantName) } From 381fdcc3f17f406bb8c5a711b562a23aaef6c98f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 20:32:30 -0500 Subject: [PATCH 0073/1093] ipn/ipnlocal,util/syspolicy/source: retain existing exit node when using auto exit node, if it's allowed by policy In this PR, we update setExitNodeID to retain the existing exit node if auto exit node is enabled, the current exit node is allowed by policy, and no suggested exit node is available yet. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 15 +++- ipn/ipnlocal/local_test.go | 110 ++++++++++++++++++++++++++-- util/syspolicy/source/test_store.go | 7 ++ 3 files changed, 125 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5fbb0bd9849d9..6120c52c68a06 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2026,7 +2026,20 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // or resolve ExitNodeIP to an ID and use that. It returns whether prefs was mutated. func setExitNodeID(prefs *ipn.Prefs, suggestedExitNodeID tailcfg.StableNodeID, nm *netmap.NetworkMap) (prefsChanged bool) { if prefs.AutoExitNode.IsSet() { - newExitNodeID := cmp.Or(suggestedExitNodeID, unresolvedExitNodeID) + var newExitNodeID tailcfg.StableNodeID + if !suggestedExitNodeID.IsZero() { + // If we have a suggested exit node, use it. + newExitNodeID = suggestedExitNodeID + } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { + // If we don't have a suggested exit node, but the prefs already + // specify an allowed auto exit node ID, retain it. + newExitNodeID = prefs.ExitNodeID + } else { + // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, + // preventing traffic from leaking to the local network until an actual + // exit node is selected. + newExitNodeID = unresolvedExitNodeID + } if prefs.ExitNodeID != newExitNodeID { prefs.ExitNodeID = newExitNodeID prefsChanged = true diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5c9adfb5fc386..c9bad838e9cdb 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -620,6 +620,7 @@ func TestConfigureExitNode(t *testing.T) { useExitNodeEnabled *bool exitNodeIDPolicy *tailcfg.StableNodeID exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes wantPrefs ipn.Prefs }{ { @@ -894,6 +895,91 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", }, }, + { + name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // should be retained + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: nil, // not configured, so all exit node IDs are implicitly allowed + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // should be retained + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode2.StableID(), // the current exit node ID is allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + }, + netMap: nil, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode1.StableID(), // a different exit node ID; the current one is not allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowedIDs: []tailcfg.StableNodeID{ + exitNode2.StableID(), // a different exit node ID; the current one is not allowed + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node + AutoExitNode: "any", + }, + }, + { + name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap + prefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // switch to the best exit node + AutoExitNode: "any", + }, + }, { name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression prefs: ipn.Prefs{ @@ -929,19 +1015,23 @@ func TestConfigureExitNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Configure policy settings, if any. - var settings []source.TestSetting[string] + store := source.NewTestStore(t) if tt.exitNodeIDPolicy != nil { - settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) + store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) } if tt.exitNodeIPPolicy != nil { - settings = append(settings, source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) + store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) } - if settings != nil { - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, source.NewTestStoreOf(t, settings...)) - } else { + if tt.exitNodeAllowedIDs != nil { + store.SetStringLists(source.TestSettingOf(syspolicy.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) + } + if store.IsEmpty() { // No syspolicy settings, so don't register a store. // This allows the test to run in parallel with other tests. t.Parallel() + } else { + // Register the store for syspolicy settings to make them available to the LocalBackend. + syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, store) } // Create a new LocalBackend with the given prefs. @@ -6127,3 +6217,11 @@ func TestDisplayMessageIPNBus(t *testing.T) { }) } } + +func toStrings[T ~string](in []T) []string { + out := make([]string, len(in)) + for i, v := range in { + out[i] = string(v) + } + return out +} diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index 4b175611fef0d..efaf4cd5a7c0f 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -154,6 +154,13 @@ func (s *TestStore) RegisterChangeCallback(callback func()) (unregister func(), }, nil } +// IsEmpty reports whether the store does not contain any settings. +func (s *TestStore) IsEmpty() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.mr) == 0 +} + // ReadString implements [Store]. func (s *TestStore) ReadString(key setting.Key) (string, error) { defer s.recordRead(key, setting.StringValue) From cb7b49941eae3a933c4c5b7dc56398bce24d7e08 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 3 Jul 2025 19:37:56 -0500 Subject: [PATCH 0074/1093] ipn/ipnlocal: add (*LocalBackend).reconcilePrefsLocked We have several places where we call applySysPolicy, suggestExitNodeLocked, and setExitNodeID. While there are cases where we want to resolve the exit node specifically, such as when network conditions change or a new netmap is received, we typically need to perform all three steps. For example, enforcing policy settings may enable auto exit nodes or set an ExitNodeIP, which in turn requires picking a suggested exit node or resolving the IP to an ID, respectively. In this PR, we introduce (*LocalBackend).resolveExitNodeInPrefsLocked and (*LocalBackend).reconcilePrefsLocked, with the latter calling both applySysPolicy and resolveExitNodeInPrefsLocked. Consolidating these steps into a single extensibility point would also make it easier to support future hooks registered by ipnext extensions. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 113 ++++++++++++++++++++++++------------- ipn/ipnlocal/local_test.go | 2 +- 2 files changed, 76 insertions(+), 39 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6120c52c68a06..0ee249dfb732d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1624,7 +1624,11 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } } - if applySysPolicy(prefs, b.overrideAlwaysOn) { + // We primarily need this to apply syspolicy to the prefs if an implicit profile + // switch is about to happen. + // TODO(nickkhyl): remove this once we improve handling of implicit profile switching + // in tailscale/corp#28014 and we apply syspolicy when the switch actually happens. + if b.reconcilePrefsLocked(prefs) { prefsChanged = true } @@ -1911,21 +1915,21 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil { return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err) } - if prefs, anyChange := b.applySysPolicy(); anyChange { + if prefs, anyChange := b.reconcilePrefs(); anyChange { b.logf("syspolicy: changed initial profile prefs: %v", prefs.Pretty()) } b.refreshAllowedSuggestions() return unregister, nil } -// applySysPolicy overwrites the current profile's preferences with policies +// reconcilePrefs overwrites the current profile's preferences with policies // that may be configured by the system administrator in an OS-specific way. // // b.mu must not be held. -func (b *LocalBackend) applySysPolicy() (_ ipn.PrefsView, anyChange bool) { +func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() - if !applySysPolicy(prefs, b.overrideAlwaysOn) { + if !b.reconcilePrefsLocked(prefs) { unlock.UnlockEarly() return prefs.View(), false } @@ -1954,7 +1958,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { // will be used when [applySysPolicy] updates the current profile's prefs. } - if prefs, anyChange := b.applySysPolicy(); anyChange { + if prefs, anyChange := b.reconcilePrefs(); anyChange { b.logf("syspolicy: changed profile prefs: %v", prefs.Pretty()) } } @@ -2302,26 +2306,32 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.setStateLocked(ipn.NoState) cn := b.currentNode() + + prefsChanged := false + newPrefs := b.pm.CurrentPrefs().AsStruct() if opts.UpdatePrefs != nil { - oldPrefs := b.pm.CurrentPrefs() - newPrefs := opts.UpdatePrefs.Clone() - newPrefs.Persist = oldPrefs.Persist().AsStruct() - pv := newPrefs.View() - if err := b.pm.SetPrefs(pv, cn.NetworkProfile()); err != nil { - b.logf("failed to save UpdatePrefs state: %v", err) + newPrefs = opts.UpdatePrefs.Clone() + prefsChanged = true + } + // Apply any syspolicy overrides, resolve exit node ID, etc. + // As of 2025-07-03, this is primarily needed in two cases: + // - when opts.UpdatePrefs is not nil + // - when Always Mode is enabled and we need to set WantRunning to true + if b.reconcilePrefsLocked(newPrefs) { + prefsChanged = true + } + if prefsChanged { + // Neither opts.UpdatePrefs nor prefs reconciliation + // is allowed to modify Persist; retain the old value. + newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() + if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { + b.logf("failed to save updated and reconciled prefs: %v", err) } } + prefs := newPrefs.View() // Reset the always-on override whenever Start is called. b.resetAlwaysOnOverrideLocked() - // And also apply syspolicy settings to the current profile. - // This is important in two cases: when opts.UpdatePrefs is not nil, - // and when Always Mode is enabled and we need to set WantRunning to true. - if newp := b.pm.CurrentPrefs().AsStruct(); applySysPolicy(newp, b.overrideAlwaysOn) { - setExitNodeID(newp, b.lastSuggestedExitNode, cn.NetMap()) - b.pm.setPrefsNoPermCheck(newp.View()) - } - prefs := b.pm.CurrentPrefs() b.setAtomicValuesFromPrefsLocked(prefs) wantRunning := prefs.WantRunning() @@ -4495,17 +4505,11 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) if oldp.Valid() { newp.Persist = oldp.Persist().AsStruct() // caller isn't allowed to override this } - // applySysPolicy returns whether it updated newp, - // but everything in this function treats b.prefs as completely new + // Apply reconciliation to the prefs, such as policy overrides, + // exit node resolution, and so on. The call returns whether it updated + // newp, but everything in this function treats newp as completely new // anyway, so its return value can be ignored here. - applySysPolicy(newp, b.overrideAlwaysOn) - if newp.AutoExitNode.IsSet() { - if _, err := b.suggestExitNodeLocked(); err != nil { - b.logf("failed to select auto exit node: %v", err) - } - } - // setExitNodeID does likewise. No-op if no exit node resolution is needed. - setExitNodeID(newp, b.lastSuggestedExitNode, netMap) + b.reconcilePrefsLocked(newp) // We do this to avoid holding the lock while doing everything else. @@ -5927,14 +5931,8 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { nm := b.currentNode().NetMap() prefs := b.pm.CurrentPrefs().AsStruct() - if prefs.AutoExitNode.IsSet() { - _, err := b.suggestExitNodeLocked() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("failed to select auto exit node: %v", err) - } - } - if !setExitNodeID(prefs, b.lastSuggestedExitNode, nm) { - return false // no changes + if !b.resolveExitNodeInPrefsLocked(prefs) { + return } if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ @@ -5947,6 +5945,45 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { return true } +// reconcilePrefsLocked applies policy overrides, exit node resolution, +// and other post-processing to the prefs, and reports whether the prefs +// were modified as a result. +// +// It must not perform any reconfiguration, as the prefs are not yet effective. +// +// b.mu must be held. +func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { + if applySysPolicy(prefs, b.overrideAlwaysOn) { + changed = true + } + if b.resolveExitNodeInPrefsLocked(prefs) { + changed = true + } + if changed { + b.logf("prefs reconciled: %v", prefs.Pretty()) + } + return changed +} + +// resolveExitNodeInPrefsLocked determines which exit node to use +// based on the specified prefs and netmap. It updates the exit node ID +// in the prefs if needed, and returns true if the exit node has changed. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + if prefs.AutoExitNode.IsSet() { + _, err := b.suggestExitNodeLocked() + if err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) + } + } + if setExitNodeID(prefs, b.lastSuggestedExitNode, b.currentNode().NetMap()) { + b.logf("exit node resolved: %v", prefs.ExitNodeID) + return true + } + return false +} + // setNetMapLocked updates the LocalBackend state to reflect the newly // received nm. If nm is nil, it resets all configuration as though // Tailscale is turned off. diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index c9bad838e9cdb..3a2258cc6051f 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2390,7 +2390,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { b.pm = pm b.lastSuggestedExitNode = test.lastSuggestedExitNode prefs := b.pm.prefs.AsStruct() - if changed := applySysPolicy(prefs, false) || setExitNodeID(prefs, test.lastSuggestedExitNode, test.nm); changed != test.prefsChanged { + if changed := b.reconcilePrefsLocked(prefs); changed != test.prefsChanged { t.Errorf("wanted prefs changed %v, got prefs changed %v", test.prefsChanged, changed) } From a6f647812901a11572b9143607ec24445574fed7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 11:50:59 -0500 Subject: [PATCH 0075/1093] util/syspolicy: add HasAnyOf to check if any specified policy settings are configured Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- util/syspolicy/syspolicy.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index afcc28ff1fd86..a84afa5dbb6db 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -56,6 +56,27 @@ func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicySc return reg } +// HasAnyOf returns whether at least one of the specified policy settings is configured, +// or an error if no keys are provided or the check fails. +func HasAnyOf(keys ...Key) (bool, error) { + if len(keys) == 0 { + return false, errors.New("at least one key must be specified") + } + policy, err := rsop.PolicyFor(setting.DefaultScope()) + if err != nil { + return false, err + } + effective := policy.Get() + for _, k := range keys { + _, err := effective.GetErr(k) + if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) { + continue + } + return err == nil, err // err may be nil or non-nil + } + return false, nil +} + // GetString returns a string policy setting with the specified key, // or defaultValue if it does not exist. func GetString(key Key, defaultValue string) (string, error) { From f1c7b463cd1cbc6de634a8b75a14cfeca498756f Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 17:04:07 -0500 Subject: [PATCH 0076/1093] ipn/{ipnauth,ipnlocal,localapi}: make EditPrefs return an error if changing exit node is restricted by policy We extract checkEditPrefsAccessLocked, adjustEditPrefsLocked, and onEditPrefsLocked from the EditPrefs execution path, defining when each step is performed and what behavior is allowed at each stage. Currently, this is primarily used to support Always On mode, to handle the Exit Node enablement toggle, and to report prefs edit metrics. We then use it to enforce Exit Node policy settings by preventing users from setting an exit node and making EditPrefs return an error when an exit node is restricted by policy. This enforcement is also extended to the Exit Node toggle. These changes prepare for supporting Exit Node overrides when permitted by policy and preventing logout while Always On mode is enabled. In the future, implementation of these methods can be delegated to ipnext extensions via the feature hooks. Updates tailscale/corp#29969 Updates tailscale/corp#26249 Signed-off-by: Nick Khyl --- ipn/ipnauth/self.go | 12 +++ ipn/ipnlocal/local.go | 168 ++++++++++++++++++++++++++---------- ipn/ipnlocal/local_test.go | 83 +++++++++++------- ipn/localapi/localapi.go | 2 +- util/syspolicy/syspolicy.go | 5 +- 5 files changed, 191 insertions(+), 79 deletions(-) diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go index 9b430dc6d915e..adee0696458d6 100644 --- a/ipn/ipnauth/self.go +++ b/ipn/ipnauth/self.go @@ -13,6 +13,11 @@ import ( // has unlimited access. var Self Actor = unrestricted{} +// TODO is a caller identity used when the operation is performed on behalf of a user, +// rather than by tailscaled itself, but the surrounding function is not yet extended +// to accept an [Actor] parameter. It grants the same unrestricted access as [Self]. +var TODO Actor = unrestricted{} + // unrestricted is an [Actor] that has unlimited access to the currently running // tailscaled instance. It's typically used for operations performed by tailscaled // on its own, or upon a request from the control plane, rather on behalf of a user. @@ -49,3 +54,10 @@ func (unrestricted) IsLocalSystem() bool { return false } // Deprecated: this method exists for compatibility with the current (as of 2025-01-28) // permission model and will be removed as we progress on tailscale/corp#18342. func (unrestricted) IsLocalAdmin(operatorUID string) bool { return false } + +// IsTailscaled reports whether the given Actor represents Tailscaled itself, +// such as [Self] or a [TODO] placeholder actor. +func IsTailscaled(a Actor) bool { + _, ok := a.(unrestricted) + return ok +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0ee249dfb732d..03a0709e2d95d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -178,6 +178,10 @@ var ( // It is used as a context cancellation cause for the old context // and can be returned when an operation is performed on it. errNodeContextChanged = errors.New("profile changed") + + // errManagedByPolicy indicates the operation is blocked + // because the target state is managed by a GP/MDM policy. + errManagedByPolicy = errors.New("managed by policy") ) // LocalBackend is the glue between the major pieces of the Tailscale @@ -3477,12 +3481,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - Prefs: *prefsClone, - AutoUpdateSet: ipn.AutoUpdatePrefsMask{ - ApplySet: true, - }, - }, unlock) + _, err := b.editPrefsLockedOnEntry( + ipnauth.Self, + &ipn.MaskedPrefs{ + Prefs: *prefsClone, + AutoUpdateSet: ipn.AutoUpdatePrefsMask{ + ApplySet: true, + }, + }, unlock) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -4224,7 +4230,7 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // On success, it returns the resulting prefs (or current prefs, in the case of no change). // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. -func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { +func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { unlock := b.lockAndGetUnlock() defer unlock() @@ -4267,7 +4273,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(v bool) (ipn.PrefsView, error) { mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLockedOnEntry(mp, unlock) + return b.editPrefsLockedOnEntry(actor, mp, unlock) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4296,30 +4302,83 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - // Zeroing the ExitNodeId via localAPI must also zero the prior exit node. - if mp.ExitNodeIDSet && mp.ExitNodeID == "" { + return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) +} + +// checkEditPrefsAccessLocked checks whether the current user has access +// to apply the prefs changes in mp. +// +// It returns an error if the user is not allowed, or nil otherwise. +// +// b.mu must be held. +func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) error { + var errs []error + + if mp.RunSSHSet && mp.RunSSH && !envknob.CanSSHD() { + errs = append(errs, errors.New("Tailscale SSH server administratively disabled")) + } + + // Check if the user is allowed to disconnect Tailscale. + if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.extHost.AuditLogger()); err != nil { + errs = append(errs, err) + } + } + + // Prevent users from changing exit node preferences + // when exit node usage is managed by policy. + if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { + // TODO(nickkhyl): Allow users to override ExitNode policy settings + // if the ExitNode.AllowUserOverride policy permits it. + // (Policy setting name and details are TBD. See tailscale/corp#29969) + isManaged, err := syspolicy.HasAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP) + if err != nil { + err = fmt.Errorf("policy check failed: %w", err) + } else if isManaged { + err = errManagedByPolicy + } + if err != nil { + errs = append(errs, fmt.Errorf("exit node cannot be changed: %w", err)) + } + } + + return multierr.New(errs...) +} + +// adjustEditPrefsLocked applies additional changes to mp if necessary, +// such as zeroing out mutually exclusive fields. +// +// It must not assume that the changes in mp will actually be applied. +// +// b.mu must be held. +func (b *LocalBackend) adjustEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs) { + // Zeroing the ExitNodeID via localAPI must also zero the prior exit node. + if mp.ExitNodeIDSet && mp.ExitNodeID == "" && !mp.InternalExitNodePriorSet { mp.InternalExitNodePrior = "" mp.InternalExitNodePriorSet = true } // Disable automatic exit node selection if the user explicitly sets // ExitNodeID or ExitNodeIP. - if mp.ExitNodeIDSet || mp.ExitNodeIPSet { + if (mp.ExitNodeIDSet || mp.ExitNodeIPSet) && !mp.AutoExitNodeSet { mp.AutoExitNodeSet = true mp.AutoExitNode = "" } +} - // Acquire the lock before checking the profile access to prevent - // TOCTOU issues caused by the current profile changing between the - // check and the actual edit. - unlock := b.lockAndGetUnlock() - defer unlock() - if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { - if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.extHost.AuditLogger()); err != nil { - b.logf("check profile access failed: %v", err) - return ipn.PrefsView{}, err - } - +// onEditPrefsLocked is called when prefs are edited (typically, via LocalAPI), +// just before the changes in newPrefs are set for the current profile. +// +// The changes in mp have been allowed, but the resulting [ipn.Prefs] +// have not yet been applied and may be subject to reconciliation +// by [LocalBackend.reconcilePrefsLocked], either before or after being set. +// +// This method handles preference edits, typically initiated by the user, +// as opposed to reconfiguring the backend when the final prefs are set. +// +// b.mu must be held; mp must not be mutated by this method. +func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, oldPrefs, newPrefs ipn.PrefsView) { + if mp.WantRunningSet && !mp.WantRunning && oldPrefs.WantRunning() { // If a user has enough rights to disconnect, such as when [syspolicy.AlwaysOn] // is disabled, or [syspolicy.AlwaysOnOverrideWithReason] is also set and the user // provides a reason for disconnecting, then we should not force the "always on" @@ -4331,7 +4390,18 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip } } - return b.editPrefsLockedOnEntry(mp, unlock) + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. + // recordForEdit records metrics related to edits and changes, not the final state. + // If, in the future, we want to record gauge-metrics related to the state of prefs, + // that should be done in the setPrefs path. + e := prefsMetricsEditEvent{ + change: mp, + pNew: newPrefs, + pOld: oldPrefs, + node: b.currentNode(), + lastSuggestedExitNode: b.lastSuggestedExitNode, + } + e.record() } // startReconnectTimerLocked sets a timer to automatically set WantRunning to true @@ -4368,7 +4438,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(mp, unlock); err != nil { + if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4399,9 +4469,19 @@ func (b *LocalBackend) stopReconnectTimerLocked() { // Warning: b.mu must be held on entry, but it unlocks it on the way out. // TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { +func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { defer unlock() // for error paths + // Check if the changes in mp are allowed. + if err := b.checkEditPrefsAccessLocked(actor, mp); err != nil { + b.logf("EditPrefs(%v): %v", mp.Pretty(), err) + return ipn.PrefsView{}, err + } + + // Apply additional changes to mp if necessary, + // such as clearing mutually exclusive fields. + b.adjustEditPrefsLocked(actor, mp) + if mp.EggSet { mp.EggSet = false b.egg = true @@ -4416,29 +4496,18 @@ func (b *LocalBackend) editPrefsLockedOnEntry(mp *ipn.MaskedPrefs, unlock unlock b.logf("EditPrefs check error: %v", err) return ipn.PrefsView{}, err } - if p1.RunSSH && !envknob.CanSSHD() { - b.logf("EditPrefs requests SSH, but disabled by envknob; returning error") - return ipn.PrefsView{}, errors.New("Tailscale SSH server administratively disabled.") - } + if p1.View().Equals(p0) { return stripKeysFromPrefs(p0), nil } b.logf("EditPrefs: %v", mp.Pretty()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) - // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. - // recordForEdit records metrics related to edits and changes, not the final state. - // If, in the future, we want to record gauge-metrics related to the state of prefs, - // that should be done in the setPrefs path. - e := prefsMetricsEditEvent{ - change: mp, - pNew: p1.View(), - pOld: p0, - node: b.currentNode(), - lastSuggestedExitNode: b.lastSuggestedExitNode, - } - e.record() + // Perform any actions required when prefs are edited (typically by a user), + // before the modified prefs are actually set for the current profile. + b.onEditPrefsLocked(actor, mp, p0, p1.View()) + + newPrefs := b.setPrefsLockedOnEntry(p1, unlock) // Note: don't perform any actions for the new prefs here. Not // every prefs change goes through EditPrefs. Put your actions @@ -5829,11 +5898,16 @@ func (b *LocalBackend) Logout(ctx context.Context) error { // delete it later. profile := b.pm.CurrentProfile() - _, err := b.editPrefsLockedOnEntry(&ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) + // TODO(nickkhyl): change [LocalBackend.Logout] to accept an [ipnauth.Actor]. + // This will allow enforcing Always On mode when a user tries to log out + // while logged in and connected. See tailscale/corp#26249. + _, err := b.editPrefsLockedOnEntry( + ipnauth.TODO, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }, unlock) if err != nil { return err } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3a2258cc6051f..1e1b7663ab687 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -501,29 +501,30 @@ func TestLazyMachineKeyGeneration(t *testing.T) { func TestZeroExitNodeViaLocalAPI(t *testing.T) { lb := newTestLocalBackend(t) + user := &ipnauth.TestActor{} // Give it an initial exit node in use. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ ExitNodeIDSet: true, Prefs: ipn.Prefs{ ExitNodeID: "foo", }, - }); err != nil { + }, user); err != nil { t.Fatalf("enabling first exit node: %v", err) } // SetUseExitNodeEnabled(false) "remembers" the prior exit node. - if _, err := lb.SetUseExitNodeEnabled(false); err != nil { + if _, err := lb.SetUseExitNodeEnabled(user, false); err != nil { t.Fatal("expected failure") } // Zero the exit node - pv, err := lb.EditPrefs(&ipn.MaskedPrefs{ + pv, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ ExitNodeIDSet: true, Prefs: ipn.Prefs{ ExitNodeID: "", }, - }) + }, user) if err != nil { t.Fatalf("enabling first exit node: %v", err) @@ -539,29 +540,30 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { func TestSetUseExitNodeEnabled(t *testing.T) { lb := newTestLocalBackend(t) + user := &ipnauth.TestActor{} // Can't turn it on if it never had an old value. - if _, err := lb.SetUseExitNodeEnabled(true); err == nil { + if _, err := lb.SetUseExitNodeEnabled(user, true); err == nil { t.Fatal("expected success") } // But we can turn it off when it's already off. - if _, err := lb.SetUseExitNodeEnabled(false); err != nil { + if _, err := lb.SetUseExitNodeEnabled(user, false); err != nil { t.Fatal("expected failure") } // Give it an initial exit node in use. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ ExitNodeIDSet: true, Prefs: ipn.Prefs{ ExitNodeID: "foo", }, - }); err != nil { + }, user); err != nil { t.Fatalf("enabling first exit node: %v", err) } // Now turn off that exit node. - if prefs, err := lb.SetUseExitNodeEnabled(false); err != nil { + if prefs, err := lb.SetUseExitNodeEnabled(user, false); err != nil { t.Fatal("expected failure") } else { if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID(""); g != w { @@ -573,7 +575,7 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } // And turn it back on. - if prefs, err := lb.SetUseExitNodeEnabled(true); err != nil { + if prefs, err := lb.SetUseExitNodeEnabled(user, true); err != nil { t.Fatal("expected failure") } else { if g, w := prefs.ExitNodeID(), tailcfg.StableNodeID("foo"); g != w { @@ -585,9 +587,9 @@ func TestSetUseExitNodeEnabled(t *testing.T) { } // Verify we block setting an Internal field. - if _, err := lb.EditPrefs(&ipn.MaskedPrefs{ + if _, err := lb.EditPrefsAs(&ipn.MaskedPrefs{ InternalExitNodePriorSet: true, - }); err == nil { + }, user); err == nil { t.Fatalf("unexpected success; want an error trying to set an internal field") } } @@ -612,16 +614,18 @@ func TestConfigureExitNode(t *testing.T) { } tests := []struct { - name string - prefs ipn.Prefs - netMap *netmap.NetworkMap - report *netcheck.Report - changePrefs *ipn.MaskedPrefs - useExitNodeEnabled *bool - exitNodeIDPolicy *tailcfg.StableNodeID - exitNodeIPPolicy *netip.Addr - exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes - wantPrefs ipn.Prefs + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] + wantPrefs ipn.Prefs + wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] }{ { name: "exit-node-id-via-prefs", // set exit node ID via prefs @@ -804,6 +808,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs @@ -822,6 +827,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs @@ -840,6 +846,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantChangePrefsErr: errManagedByPolicy, }, { name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) @@ -999,15 +1006,16 @@ func TestConfigureExitNode(t *testing.T) { prefs: ipn.Prefs{ ControlURL: controlURL, }, - netMap: clientNetmap, - report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), - useExitNodeEnabled: ptr.To(false), // should be ignored + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + useExitNodeEnabled: ptr.To(false), // should fail with an error + wantExitNodeToggleErr: errManagedByPolicy, wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting AutoExitNode: "any", - InternalExitNodePrior: "auto:any", + InternalExitNodePrior: "", }, }, } @@ -1046,14 +1054,17 @@ func TestConfigureExitNode(t *testing.T) { lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) } + user := &ipnauth.TestActor{} // If we have a changePrefs, apply it. if tt.changePrefs != nil { - lb.EditPrefs(tt.changePrefs) + _, err := lb.EditPrefsAs(tt.changePrefs, user) + checkError(t, err, tt.wantChangePrefsErr, true) } // If we need to flip exit node toggle on or off, do it. if tt.useExitNodeEnabled != nil { - lb.SetUseExitNodeEnabled(*tt.useExitNodeEnabled) + _, err := lb.SetUseExitNodeEnabled(user, *tt.useExitNodeEnabled) + checkError(t, err, tt.wantExitNodeToggleErr, true) } // Now check the prefs. @@ -6218,6 +6229,18 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func checkError(tb testing.TB, got, want error, fatal bool) { + tb.Helper() + f := tb.Errorf + if fatal { + f = tb.Fatalf + } + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + f("gotErr: %v; wantErr: %v", got, want) + } +} + func toStrings[T ~string](in []T) []string { out := make([]string, len(in)) for i, v := range in { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a90ae5d844b90..d4b4b443ef852 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1910,7 +1910,7 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ http.Error(w, "invalid 'enabled' parameter", http.StatusBadRequest) return } - prefs, err := h.b.SetUseExitNodeEnabled(v) + prefs, err := h.b.SetUseExitNodeEnabled(h.Actor, v) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index a84afa5dbb6db..6555a58ac4564 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -72,7 +72,10 @@ func HasAnyOf(keys ...Key) (bool, error) { if errors.Is(err, setting.ErrNotConfigured) || errors.Is(err, setting.ErrNoSuchKey) { continue } - return err == nil, err // err may be nil or non-nil + if err != nil { + return false, err + } + return true, nil } return false, nil } From ea4018b757fa6f925be59f9d95011c3a7de3ee10 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 17:21:21 -0500 Subject: [PATCH 0077/1093] ipn/ipnlocal: fix missing defer in testExtension.Shutdown Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/extension_host_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index f655c477fcb36..509833ff6de46 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -1230,7 +1230,7 @@ func (e *testExtension) InitCalled() bool { func (e *testExtension) Shutdown() (err error) { e.t.Helper() e.mu.Lock() - e.mu.Unlock() + defer e.mu.Unlock() if e.ShutdownHook != nil { err = e.ShutdownHook(e) } From 47f431b656d0c35aac6f97530a4daa2404bc12d6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 7 Jul 2025 19:46:20 -0700 Subject: [PATCH 0078/1093] net/udprelay: fix relaying between mixed address family sockets (#16485) We can't relay a packet received over the IPv4 socket back out the same socket if destined to an IPv6 address, and vice versa. Updates tailscale/corp#30206 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 42 ++++++++++--------- net/udprelay/server_test.go | 81 +++++++++++++++++++++++++++---------- 2 files changed, 83 insertions(+), 40 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index d2661e59feba4..979ccf71765ed 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -112,7 +112,7 @@ type serverEndpoint struct { allocatedAt time.Time } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, conn *net.UDPConn, serverDisco key.DiscoPublic) { if senderIndex != 0 && senderIndex != 1 { return } @@ -165,7 +165,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex reply = serverDisco.AppendTo(reply) box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) reply = append(reply, box...) - uw.WriteMsgUDPAddrPort(reply, nil, from) + conn.WriteMsgUDPAddrPort(reply, nil, from) return case *disco.BindUDPRelayEndpointAnswer: err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) @@ -191,7 +191,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, conn *net.UDPConn, serverDisco key.DiscoPublic) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message @@ -222,14 +222,10 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by return } - e.handleDiscoControlMsg(from, senderIndex, discoMsg, uw, serverDisco) + e.handleDiscoControlMsg(from, senderIndex, discoMsg, conn, serverDisco) } -type udpWriter interface { - WriteMsgUDPAddrPort(b []byte, oob []byte, addr netip.AddrPort) (n, oobn int, err error) -} - -func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, uw udpWriter, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, rxSocket, otherAFSocket *net.UDPConn, serverDisco key.DiscoPublic) { if !gh.Control { if !e.isBound() { // not a control packet, but serverEndpoint isn't bound @@ -247,8 +243,16 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade // unrecognized source return } - // relay packet - uw.WriteMsgUDPAddrPort(b, nil, to) + // Relay the packet towards the other party via the socket associated + // with the destination's address family. If source and destination + // address families are matching we tx on the same socket the packet + // was received (rxSocket), otherwise we use the "other" socket + // (otherAFSocket). [Server] makes no use of dual-stack sockets. + if from.Addr().Is4() == to.Addr().Is4() { + rxSocket.WriteMsgUDPAddrPort(b, nil, to) + } else if otherAFSocket != nil { + otherAFSocket.WriteMsgUDPAddrPort(b, nil, to) + } return } @@ -258,7 +262,7 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade } msg := b[packet.GeneveFixedHeaderLength:] - e.handleSealedDiscoControlMsg(from, msg, uw, serverDisco) + e.handleSealedDiscoControlMsg(from, msg, rxSocket, serverDisco) } func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { @@ -346,10 +350,10 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve } s.wg.Add(1) - go s.packetReadLoop(s.uc4) + go s.packetReadLoop(s.uc4, s.uc6) if s.uc6 != nil { s.wg.Add(1) - go s.packetReadLoop(s.uc6) + go s.packetReadLoop(s.uc6, s.uc4) } s.wg.Add(1) go s.endpointGCLoop() @@ -531,7 +535,7 @@ func (s *Server) endpointGCLoop() { } } -func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { +func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSocket *net.UDPConn) { if stun.Is(b) && b[1] == 0x01 { // A b[1] value of 0x01 (STUN method binding) is sufficiently // non-overlapping with the Geneve header where the LSB is always 0 @@ -555,10 +559,10 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, uw udpWriter) { return } - e.handlePacket(from, gh, b, uw, s.discoPublic) + e.handlePacket(from, gh, b, rxSocket, otherAFSocket, s.discoPublic) } -func (s *Server) packetReadLoop(uc *net.UDPConn) { +func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { defer func() { s.wg.Done() s.Close() @@ -566,11 +570,11 @@ func (s *Server) packetReadLoop(uc *net.UDPConn) { b := make([]byte, 1<<16-1) for { // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := uc.ReadFromUDPAddrPort(b) + n, from, err := readFromSocket.ReadFromUDPAddrPort(b) if err != nil { return } - s.handlePacket(from, b[:n], uc) + s.handlePacket(from, b[:n], readFromSocket, otherSocket) } } diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 8c0c5aff66027..de1c293644992 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -181,8 +181,9 @@ func TestServer(t *testing.T) { discoB := key.NewDisco() cases := []struct { - name string - overrideAddrs []netip.Addr + name string + overrideAddrs []netip.Addr + forceClientsMixedAF bool }{ { name: "over ipv4", @@ -192,6 +193,11 @@ func TestServer(t *testing.T) { name: "over ipv6", overrideAddrs: []netip.Addr{netip.MustParseAddr("::1")}, }, + { + name: "mixed address families", + overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, + forceClientsMixedAF: true, + }, } for _, tt := range cases { @@ -216,16 +222,47 @@ func TestServer(t *testing.T) { t.Fatalf("wrong dupEndpoint (-got +want)\n%s", diff) } - if len(endpoint.AddrPorts) != 1 { + if len(endpoint.AddrPorts) < 1 { t.Fatalf("unexpected endpoint.AddrPorts: %v", endpoint.AddrPorts) } - tcA := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAServerEndpointAddr := endpoint.AddrPorts[0] + tcA := newTestClient(t, endpoint.VNI, tcAServerEndpointAddr, discoA, discoB.Public(), endpoint.ServerDisco) defer tcA.close() - tcB := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBServerEndpointAddr := tcAServerEndpointAddr + if tt.forceClientsMixedAF { + foundMixedAF := false + for _, addr := range endpoint.AddrPorts { + if addr.Addr().Is4() != tcBServerEndpointAddr.Addr().Is4() { + tcBServerEndpointAddr = addr + foundMixedAF = true + } + } + if !foundMixedAF { + t.Fatal("force clients to mixed address families is set, but relay server lacks address family diversity") + } + } + tcB := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) defer tcB.close() - tcA.handshake(t) - tcB.handshake(t) + for i := 0; i < 2; i++ { + // We handshake both clients twice to guarantee server-side + // packet reading goroutines, which are independent across + // address families, have seen an answer from both clients + // before proceeding. This is needed because the test assumes + // that B's handshake is complete (the first send is A->B below), + // but the server may not have handled B's handshake answer + // before it handles A's data pkt towards B. + // + // Data transmissions following "re-handshakes" orient so that + // the sender is the same as the party that performed the + // handshake, for the same reasons. + // + // [magicsock.relayManager] is not prone to this issue as both + // parties transmit data packets immediately following their + // handshake answer. + tcA.handshake(t) + tcB.handshake(t) + } dupEndpoint, err = server.AllocateEndpoint(discoA.Public(), discoB.Public()) if err != nil { @@ -250,30 +287,32 @@ func TestServer(t *testing.T) { t.Fatal("unexpected msg B->A") } - tcAOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoA, discoB.Public(), endpoint.ServerDisco) + tcAOnNewPort := newTestClient(t, endpoint.VNI, tcAServerEndpointAddr, discoA, discoB.Public(), endpoint.ServerDisco) tcAOnNewPort.handshakeGeneration = tcA.handshakeGeneration + 1 defer tcAOnNewPort.close() - // Handshake client A on a new source IP:port, verify we receive packets on the new binding + // Handshake client A on a new source IP:port, verify we can send packets on the new binding tcAOnNewPort.handshake(t) - txToAOnNewPort := []byte{7, 8, 9} - tcB.writeDataPkt(t, txToAOnNewPort) - rxFromB = tcAOnNewPort.readDataPkt(t) - if !bytes.Equal(txToAOnNewPort, rxFromB) { - t.Fatal("unexpected msg B->A") + + fromAOnNewPort := []byte{7, 8, 9} + tcAOnNewPort.writeDataPkt(t, fromAOnNewPort) + rxFromA = tcB.readDataPkt(t) + if !bytes.Equal(fromAOnNewPort, rxFromA) { + t.Fatal("unexpected msg A->B") } - tcBOnNewPort := newTestClient(t, endpoint.VNI, endpoint.AddrPorts[0], discoB, discoA.Public(), endpoint.ServerDisco) + tcBOnNewPort := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) tcBOnNewPort.handshakeGeneration = tcB.handshakeGeneration + 1 defer tcBOnNewPort.close() - // Handshake client B on a new source IP:port, verify we receive packets on the new binding + // Handshake client B on a new source IP:port, verify we can send packets on the new binding tcBOnNewPort.handshake(t) - txToBOnNewPort := []byte{7, 8, 9} - tcAOnNewPort.writeDataPkt(t, txToBOnNewPort) - rxFromA = tcBOnNewPort.readDataPkt(t) - if !bytes.Equal(txToBOnNewPort, rxFromA) { - t.Fatal("unexpected msg A->B") + + fromBOnNewPort := []byte{7, 8, 9} + tcBOnNewPort.writeDataPkt(t, fromBOnNewPort) + rxFromB = tcAOnNewPort.readDataPkt(t) + if !bytes.Equal(fromBOnNewPort, rxFromB) { + t.Fatal("unexpected msg B->A") } }) } From 5b0074729d38f8cc301803da06086033f53b1b93 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 8 Jul 2025 09:45:18 -0700 Subject: [PATCH 0079/1093] go.mod,wgengine/magicsock: implement conn.InitiationAwareEndpoint (#16486) Since a [*lazyEndpoint] makes wireguard-go responsible for peer ID, but wireguard-go may not yet be configured for said peer, we need a JIT hook around initiation message reception to call what is usually called from an [*endpoint]. Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- wgengine/magicsock/magicsock.go | 34 ++++++++++++++++++++++++++++++--- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 5bf04fedaba2e..e89a383a62726 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 + github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index f9910bb59bb4d..062af66622b85 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003 h1:chIzUDKxR0nXQQra0j41aqiiFNICs0FIC5ZCwDO7z3k= -github.com/tailscale/wireguard-go v0.0.0-20250701223756-24483d7a0003/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 h1:Yjg/+1VVRcdY3DL9fs8g+QnZ1aizotU0pp0VSOSCuTQ= +github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a7eab36786f12..fbfcf0b41565a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3777,17 +3777,45 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec // lazyEndpoint is a wireguard [conn.Endpoint] for when magicsock received a // non-disco (presumably WireGuard) packet from a UDP address from which we // can't map to a Tailscale peer. But WireGuard most likely can, once it -// decrypts it. So we implement the [conn.PeerAwareEndpoint] interface -// from https://github.com/tailscale/wireguard-go/pull/27 to allow WireGuard -// to tell us who it is later and get the correct [conn.Endpoint]. +// decrypts it. So we implement the [conn.InitiationAwareEndpoint] and +// [conn.PeerAwareEndpoint] interfaces, to allow WireGuard to tell us who it is +// later, just-in-time to configure the peer, and set the associated [epAddr] +// in the [peerMap]. Future receives on the associated [epAddr] will then be +// resolvable directly to an [*endpoint]. type lazyEndpoint struct { c *Conn src epAddr } +var _ conn.InitiationAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.PeerAwareEndpoint = (*lazyEndpoint)(nil) var _ conn.Endpoint = (*lazyEndpoint)(nil) +// InitiationMessagePublicKey implements [conn.InitiationAwareEndpoint]. +// wireguard-go calls us here if we passed it a [*lazyEndpoint] for an +// initiation message, for which it might not have the relevant peer configured, +// enabling us to just-in-time configure it and note its activity via +// [*endpoint.noteRecvActivity], before it performs peer lookup and attempts +// decryption. +// +// Reception of all other WireGuard message types implies pre-existing knowledge +// of the peer by wireguard-go for it to do useful work. See +// [userspaceEngine.maybeReconfigWireguardLocked] & +// [userspaceEngine.noteRecvActivity] for more details around just-in-time +// wireguard-go peer (de)configuration. +func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { + pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + le.c.mu.Lock() + defer le.c.mu.Unlock() + ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) + if !ok { + return + } + now := mono.Now() + ep.lastRecvUDPAny.StoreAtomic(now) + ep.noteRecvActivity(le.src, now) +} + func (le *lazyEndpoint) ClearSrc() {} func (le *lazyEndpoint) SrcIP() netip.Addr { return netip.Addr{} } From 1fe82d6ef5f48a85ce7ba6ce388a6c29f112b2cb Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Jul 2025 14:37:13 -0500 Subject: [PATCH 0080/1093] cmd/tailscale/cli,ipn/ipnlocal: restrict logout when AlwaysOn mode is enabled In this PR, we start passing a LocalAPI actor to (*LocalBackend).Logout to make it subject to the same access check as disconnects made via tailscale down or the GUI. We then update the CLI to allow `tailscale logout` to accept a reason, similar to `tailscale down`. Updates tailscale/corp#26249 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/logout.go | 12 ++++++++++++ cmd/tsconnect/wasm/wasm_js.go | 3 ++- ipn/ipnlocal/local.go | 9 +++------ ipn/ipnlocal/state_test.go | 7 ++++--- ipn/localapi/localapi.go | 2 +- 5 files changed, 22 insertions(+), 11 deletions(-) diff --git a/cmd/tailscale/cli/logout.go b/cmd/tailscale/cli/logout.go index 0c2007a66ab1b..fbc39473026a1 100644 --- a/cmd/tailscale/cli/logout.go +++ b/cmd/tailscale/cli/logout.go @@ -5,12 +5,18 @@ package cli import ( "context" + "flag" "fmt" "strings" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/tailscale/apitype" ) +var logoutArgs struct { + reason string +} + var logoutCmd = &ffcli.Command{ Name: "logout", ShortUsage: "tailscale logout", @@ -22,11 +28,17 @@ the current node key, forcing a future use of it to cause a reauthentication. `), Exec: runLogout, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("logout") + fs.StringVar(&logoutArgs.reason, "reason", "", "reason for the logout, if required by a policy") + return fs + })(), } func runLogout(ctx context.Context, args []string) error { if len(args) > 0 { return fmt.Errorf("too many non-flag arguments: %q", args) } + ctx = apitype.RequestReasonKey.WithValue(ctx, logoutArgs.reason) return localClient.Logout(ctx) } diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 779a87e49dec9..ebf7284aa0d43 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -27,6 +27,7 @@ import ( "golang.org/x/crypto/ssh" "tailscale.com/control/controlclient" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnserver" "tailscale.com/ipn/store/mem" @@ -336,7 +337,7 @@ func (i *jsIPN) logout() { go func() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - i.lb.Logout(ctx) + i.lb.Logout(ctx, ipnauth.Self) }() } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 03a0709e2d95d..8fbce4631368a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1077,7 +1077,7 @@ func (b *LocalBackend) Shutdown() { ctx, cancel := context.WithTimeout(b.ctx, 5*time.Second) defer cancel() t0 := time.Now() - err := b.Logout(ctx) // best effort + err := b.Logout(ctx, ipnauth.Self) // best effort td := time.Since(t0).Round(time.Millisecond) if err != nil { b.logf("failed to log out ephemeral node on shutdown after %v: %v", td, err) @@ -5884,7 +5884,7 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. -func (b *LocalBackend) Logout(ctx context.Context) error { +func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { unlock := b.lockAndGetUnlock() defer unlock() @@ -5898,11 +5898,8 @@ func (b *LocalBackend) Logout(ctx context.Context) error { // delete it later. profile := b.pm.CurrentProfile() - // TODO(nickkhyl): change [LocalBackend.Logout] to accept an [ipnauth.Actor]. - // This will allow enforcing Always On mode when a user tries to log out - // while logged in and connected. See tailscale/corp#26249. _, err := b.editPrefsLockedOnEntry( - ipnauth.TODO, + actor, &ipn.MaskedPrefs{ WantRunningSet: true, LoggedOutSet: true, diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index f0ac5f9442704..c29589acc698c 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -21,6 +21,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store/mem" "tailscale.com/net/dns" @@ -607,7 +608,7 @@ func TestStateMachine(t *testing.T) { store.awaitWrite() t.Logf("\n\nLogout") notifies.expect(5) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { nn := notifies.drain(5) previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown") @@ -637,7 +638,7 @@ func TestStateMachine(t *testing.T) { // A second logout should be a no-op as we are in the NeedsLogin state. t.Logf("\n\nLogout2") notifies.expect(0) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { notifies.drain(0) cc.assertCalls() @@ -650,7 +651,7 @@ func TestStateMachine(t *testing.T) { // AuthCantContinue state. t.Logf("\n\nLogout3") notifies.expect(3) - b.Logout(context.Background()) + b.Logout(context.Background(), ipnauth.Self) { notifies.drain(0) cc.assertCalls() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d4b4b443ef852..60ed89b3b2ad3 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1460,7 +1460,7 @@ func (h *Handler) serveLogout(w http.ResponseWriter, r *http.Request) { http.Error(w, "want POST", http.StatusBadRequest) return } - err := h.b.Logout(r.Context()) + err := h.b.Logout(r.Context(), h.Actor) if err == nil { w.WriteHeader(http.StatusNoContent) return From 9bf99741ddb42cf3a2dec644cdff0f8cf9b99265 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 19:02:10 -0500 Subject: [PATCH 0081/1093] ipn/ipnlocal: refactor resolveExitNodeInPrefsLocked, setExitNodeID and resolveExitNodeIP Now that resolveExitNodeInPrefsLocked is the only caller of setExitNodeID, and setExitNodeID is the only caller of resolveExitNodeIP, we can restructure the code with resolveExitNodeInPrefsLocked now calling both resolveAutoExitNodeLocked and resolveExitNodeIPLocked directly. This prepares for factoring out resolveAutoExitNodeLocked and related auto-exit-node logic into an ipnext extension in a future commit. While there, we also update exit node by IP lookup to use (*nodeBackend).NodeByAddr and (*nodeBackend).NodeByID instead of iterating over all peers in the most recent netmap. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 106 ++++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 55 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8fbce4631368a..221edad92147c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2030,47 +2030,48 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { return false } -// setExitNodeID updates prefs to either use the suggestedExitNodeID if AutoExitNode is enabled, -// or resolve ExitNodeIP to an ID and use that. It returns whether prefs was mutated. -func setExitNodeID(prefs *ipn.Prefs, suggestedExitNodeID tailcfg.StableNodeID, nm *netmap.NetworkMap) (prefsChanged bool) { - if prefs.AutoExitNode.IsSet() { - var newExitNodeID tailcfg.StableNodeID - if !suggestedExitNodeID.IsZero() { - // If we have a suggested exit node, use it. - newExitNodeID = suggestedExitNodeID - } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { - // If we don't have a suggested exit node, but the prefs already - // specify an allowed auto exit node ID, retain it. - newExitNodeID = prefs.ExitNodeID - } else { - // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, - // preventing traffic from leaking to the local network until an actual - // exit node is selected. - newExitNodeID = unresolvedExitNodeID - } - if prefs.ExitNodeID != newExitNodeID { - prefs.ExitNodeID = newExitNodeID - prefsChanged = true - } - if prefs.ExitNodeIP.IsValid() { - prefs.ExitNodeIP = netip.Addr{} - prefsChanged = true - } - return prefsChanged +// resolveAutoExitNodeLocked computes a suggested exit node and updates prefs +// to use it if AutoExitNode is enabled, and reports whether prefs was mutated. +// +// b.mu must be held. +func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !prefs.AutoExitNode.IsSet() { + return false + } + if _, err := b.suggestExitNodeLocked(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { + b.logf("failed to select auto exit node: %v", err) // non-fatal, see below + } + var newExitNodeID tailcfg.StableNodeID + if !b.lastSuggestedExitNode.IsZero() { + // If we have a suggested exit node, use it. + newExitNodeID = b.lastSuggestedExitNode + } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { + // If we don't have a suggested exit node, but the prefs already + // specify an allowed auto exit node ID, retain it. + newExitNodeID = prefs.ExitNodeID + } else { + // Otherwise, use [unresolvedExitNodeID] to install a blackhole route, + // preventing traffic from leaking to the local network until an actual + // exit node is selected. + newExitNodeID = unresolvedExitNodeID + } + if prefs.ExitNodeID != newExitNodeID { + prefs.ExitNodeID = newExitNodeID + prefsChanged = true } - return resolveExitNodeIP(prefs, nm) + if prefs.ExitNodeIP.IsValid() { + prefs.ExitNodeIP = netip.Addr{} + prefsChanged = true + } + return prefsChanged } -// resolveExitNodeIP updates prefs to reference an exit node by ID, rather +// resolveExitNodeIPLocked updates prefs to reference an exit node by ID, rather // than by IP. It returns whether prefs was mutated. -func resolveExitNodeIP(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bool) { - if nm == nil { - // No netmap, can't resolve anything. - return false - } - - // If we have a desired IP on file, try to find the corresponding - // node. +// +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged bool) { + // If we have a desired IP on file, try to find the corresponding node. if !prefs.ExitNodeIP.IsValid() { return false } @@ -2081,20 +2082,19 @@ func resolveExitNodeIP(prefs *ipn.Prefs, nm *netmap.NetworkMap) (prefsChanged bo prefsChanged = true } - oldExitNodeID := prefs.ExitNodeID - for _, peer := range nm.Peers { - for _, addr := range peer.Addresses().All() { - if !addr.IsSingleIP() || addr.Addr() != prefs.ExitNodeIP { - continue - } + cn := b.currentNode() + if nid, ok := cn.NodeByAddr(prefs.ExitNodeIP); ok { + if node, ok := cn.NodeByID(nid); ok { // Found the node being referenced, upgrade prefs to // reference it directly for next time. - prefs.ExitNodeID = peer.StableID() + prefs.ExitNodeID = node.StableID() prefs.ExitNodeIP = netip.Addr{} - return prefsChanged || oldExitNodeID != prefs.ExitNodeID + // Cleared ExitNodeIP, so prefs changed + // even if the ID stayed the same. + prefsChanged = true + } } - return prefsChanged } @@ -6042,17 +6042,13 @@ func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { - if prefs.AutoExitNode.IsSet() { - _, err := b.suggestExitNodeLocked() - if err != nil && !errors.Is(err, ErrNoPreferredDERP) { - b.logf("failed to select auto exit node: %v", err) - } + if b.resolveAutoExitNodeLocked(prefs) { + changed = true } - if setExitNodeID(prefs, b.lastSuggestedExitNode, b.currentNode().NetMap()) { - b.logf("exit node resolved: %v", prefs.ExitNodeID) - return true + if b.resolveExitNodeIPLocked(prefs) { + changed = true } - return false + return changed } // setNetMapLocked updates the LocalBackend state to reflect the newly From 2c630e126b84b537053947b579f0b44623deb496 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 7 Jul 2025 19:05:41 -0500 Subject: [PATCH 0082/1093] ipn/ipnlocal: make applySysPolicy a method on LocalBackend Now that applySysPolicy is only called by (*LocalBackend).reconcilePrefsLocked, we can make it a method to avoid passing state via parameters and to support future extensibility. Also factor out exit node-specific logic into applyExitNodeSysPolicyLocked. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 52 ++++++++++++++++++++++++-------------- ipn/ipnlocal/local_test.go | 6 +++-- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 221edad92147c..9ed9522ab259c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1800,9 +1800,11 @@ var preferencePolicies = []preferencePolicyInfo{ }, } -// applySysPolicy overwrites configured preferences with policies that may be +// applySysPolicyLocked overwrites configured preferences with policies that may be // configured by the system administrator in an OS-specific way. -func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { +// +// b.mu must be held. +func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1839,6 +1841,34 @@ func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { } } + if b.applyExitNodeSysPolicyLocked(prefs) { + anyChange = true + } + + if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { + prefs.WantRunning = true + anyChange = true + } + + for _, opt := range preferencePolicies { + if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { + curVal := opt.get(prefs.View()) + newVal := po.ShouldEnable(curVal) + if curVal != newVal { + opt.set(prefs, newVal) + anyChange = true + } + } + } + + return anyChange +} + +// applyExitNodeSysPolicyLocked applies the exit node policy settings to prefs +// and reports whether any change was made. +// +// b.mu must be held. +func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) @@ -1894,22 +1924,6 @@ func applySysPolicy(prefs *ipn.Prefs, overrideAlwaysOn bool) (anyChange bool) { } } - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !overrideAlwaysOn && !prefs.WantRunning { - prefs.WantRunning = true - anyChange = true - } - - for _, opt := range preferencePolicies { - if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { - curVal := opt.get(prefs.View()) - newVal := po.ShouldEnable(curVal) - if curVal != newVal { - opt.set(prefs, newVal) - anyChange = true - } - } - } - return anyChange } @@ -6024,7 +6038,7 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { // // b.mu must be held. func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { - if applySysPolicy(prefs, b.overrideAlwaysOn) { + if b.applySysPolicyLocked(prefs) { changed = true } if b.resolveExitNodeInPrefsLocked(prefs) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 1e1b7663ab687..b8526a4fcc70e 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2968,7 +2968,8 @@ func TestApplySysPolicy(t *testing.T) { t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - gotAnyChange := applySysPolicy(prefs, false) + lb := newTestLocalBackend(t) + gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange && prefs.Equals(&tt.prefs) { t.Errorf("anyChange but prefs is unchanged: %v", prefs.Pretty()) @@ -3116,7 +3117,8 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - gotAnyChange := applySysPolicy(prefs, false) + lb := newTestLocalBackend(t) + gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange != tt.wantChange { t.Errorf("anyChange=%v, want %v", gotAnyChange, tt.wantChange) From 740b77df594f649830d151f64700caea5c341e60 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Jul 2025 16:08:28 -0500 Subject: [PATCH 0083/1093] ipn/ipnlocal,util/syspolicy: add support for ExitNode.AllowOverride policy setting When the policy setting is enabled, it allows users to override the exit node enforced by the ExitNodeID or ExitNodeIP policy. It's primarily intended for use when ExitNodeID is set to auto:any, but it can also be used with specific exit nodes. It does not allow disabling exit node usage entirely. Once the exit node policy is overridden, it will not be enforced again until the policy changes, the user connects or disconnects Tailscale, switches profiles, or disables the override. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 119 ++++++++- ipn/ipnlocal/local_test.go | 312 ++++++++++++++++++++++++ util/syspolicy/policy_keys.go | 10 + util/syspolicy/rsop/change_callbacks.go | 5 + 4 files changed, 434 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9ed9522ab259c..c54cb32d3125c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -414,6 +414,19 @@ type LocalBackend struct { // reconnectTimer is used to schedule a reconnect by setting [ipn.Prefs.WantRunning] // to true after a delay, or nil if no reconnect is scheduled. reconnectTimer tstime.TimerController + + // overrideExitNodePolicy is whether the user has overridden the exit node policy + // by manually selecting an exit node, as allowed by [syspolicy.AllowExitNodeOverride]. + // + // If true, the [syspolicy.ExitNodeID] and [syspolicy.ExitNodeIP] policy settings are ignored, + // and the suggested exit node is not applied automatically. + // + // It is cleared when the user switches back to the state required by policy (typically, auto:any), + // or when switching profiles, connecting/disconnecting Tailscale, restarting the client, + // or on similar events. + // + // See tailscale/corp#29969. + overrideExitNodePolicy bool } // HealthTracker returns the health tracker for the backend. @@ -1841,7 +1854,8 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { } } - if b.applyExitNodeSysPolicyLocked(prefs) { + // Only apply the exit node policy if the user hasn't overridden it. + if !b.overrideExitNodePolicy && b.applyExitNodeSysPolicyLocked(prefs) { anyChange = true } @@ -1957,7 +1971,7 @@ func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChanged(syspolicy.AlwaysOn) || policy.HasChanged(syspolicy.AlwaysOnOverrideWithReason) { + if policy.HasChangedAnyOf(syspolicy.AlwaysOn, syspolicy.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might // no longer be valid. @@ -1966,6 +1980,14 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } + if policy.HasChangedAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP, syspolicy.AllowExitNodeOverride) { + // Reset the exit node override if a policy that enforces exit node usage + // or allows the user to override automatic exit node selection has changed. + b.mu.Lock() + b.overrideExitNodePolicy = false + b.mu.Unlock() + } + if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. @@ -4320,12 +4342,12 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip } // checkEditPrefsAccessLocked checks whether the current user has access -// to apply the prefs changes in mp. +// to apply the changes in mp to the given prefs. // // It returns an error if the user is not allowed, or nil otherwise. // // b.mu must be held. -func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) error { +func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn.PrefsView, mp *ipn.MaskedPrefs) error { var errs []error if mp.RunSSHSet && mp.RunSSH && !envknob.CanSSHD() { @@ -4342,14 +4364,18 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.M // Prevent users from changing exit node preferences // when exit node usage is managed by policy. if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { - // TODO(nickkhyl): Allow users to override ExitNode policy settings - // if the ExitNode.AllowUserOverride policy permits it. - // (Policy setting name and details are TBD. See tailscale/corp#29969) isManaged, err := syspolicy.HasAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP) if err != nil { err = fmt.Errorf("policy check failed: %w", err) } else if isManaged { - err = errManagedByPolicy + // Allow users to override ExitNode policy settings and select an exit node manually + // if permitted by [syspolicy.AllowExitNodeOverride]. + // + // Disabling exit node usage entirely is not allowed. + allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false) + if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { + err = errManagedByPolicy + } } if err != nil { errs = append(errs, fmt.Errorf("exit node cannot be changed: %w", err)) @@ -4359,19 +4385,70 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, mp *ipn.M return multierr.New(errs...) } +// changeDisablesExitNodeLocked reports whether applying the change +// to the given prefs would disable exit node usage. +// +// In other words, it returns true if prefs.ExitNodeID is non-empty +// initially, but would become empty after applying the given change. +// +// It applies the same adjustments and resolves the exit node in the prefs +// as done during actual edits. While not optimal performance-wise, +// changing the exit node via LocalAPI isn't a hot path, and reusing +// the same logic ensures consistency and simplifies maintenance. +// +// b.mu must be held. +func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + if !change.AutoExitNodeSet && !change.ExitNodeIDSet && !change.ExitNodeIPSet { + // The change does not affect exit node usage. + return false + } + + if prefs.ExitNodeID() == "" { + // Exit node usage is already disabled. + // Note that we do not check for ExitNodeIP here. + // If ExitNodeIP hasn't been resolved to a node, + // it's not enabled yet. + return false + } + + // First, apply the adjustments to a copy of the changes, + // e.g., clear AutoExitNode if ExitNodeID is set. + tmpChange := ptr.To(*change) + tmpChange.Prefs = *change.Prefs.Clone() + b.adjustEditPrefsLocked(prefs, tmpChange) + + // Then apply the adjusted changes to a copy of the current prefs, + // and resolve the exit node in the prefs. + tmpPrefs := prefs.AsStruct() + tmpPrefs.ApplyEdits(tmpChange) + b.resolveExitNodeInPrefsLocked(tmpPrefs) + + // If ExitNodeID is empty after applying the changes, + // but wasn't empty before, then the change disables + // exit node usage. + return tmpPrefs.ExitNodeID == "" + +} + // adjustEditPrefsLocked applies additional changes to mp if necessary, // such as zeroing out mutually exclusive fields. // // It must not assume that the changes in mp will actually be applied. // // b.mu must be held. -func (b *LocalBackend) adjustEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs) { +func (b *LocalBackend) adjustEditPrefsLocked(prefs ipn.PrefsView, mp *ipn.MaskedPrefs) { // Zeroing the ExitNodeID via localAPI must also zero the prior exit node. if mp.ExitNodeIDSet && mp.ExitNodeID == "" && !mp.InternalExitNodePriorSet { mp.InternalExitNodePrior = "" mp.InternalExitNodePriorSet = true } + // Clear ExitNodeID if AutoExitNode is disabled and ExitNodeID is still unresolved. + if mp.AutoExitNodeSet && mp.AutoExitNode == "" && prefs.ExitNodeID() == unresolvedExitNodeID { + mp.ExitNodeIDSet = true + mp.ExitNodeID = "" + } + // Disable automatic exit node selection if the user explicitly sets // ExitNodeID or ExitNodeIP. if (mp.ExitNodeIDSet || mp.ExitNodeIPSet) && !mp.AutoExitNodeSet { @@ -4404,6 +4481,22 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o } } + if oldPrefs.WantRunning() != newPrefs.WantRunning() { + // Connecting to or disconnecting from Tailscale clears the override, + // unless the user is also explicitly changing the exit node (see below). + b.overrideExitNodePolicy = false + } + if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { + if allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false); allowExitNodeOverride { + // If applying exit node policy settings to the new prefs results in no change, + // the user is not overriding the policy. Otherwise, it is an override. + b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) + } else { + // Overrides are not allowed; clear the override flag. + b.overrideExitNodePolicy = false + } + } + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. // recordForEdit records metrics related to edits and changes, not the final state. // If, in the future, we want to record gauge-metrics related to the state of prefs, @@ -4486,15 +4579,17 @@ func (b *LocalBackend) stopReconnectTimerLocked() { func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { defer unlock() // for error paths + p0 := b.pm.CurrentPrefs() + // Check if the changes in mp are allowed. - if err := b.checkEditPrefsAccessLocked(actor, mp); err != nil { + if err := b.checkEditPrefsAccessLocked(actor, p0, mp); err != nil { b.logf("EditPrefs(%v): %v", mp.Pretty(), err) return ipn.PrefsView{}, err } // Apply additional changes to mp if necessary, // such as clearing mutually exclusive fields. - b.adjustEditPrefsLocked(actor, mp) + b.adjustEditPrefsLocked(p0, mp) if mp.EggSet { mp.EggSet = false @@ -4502,7 +4597,6 @@ func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.Maske b.goTracker.Go(b.doSetHostinfoFilterServices) } - p0 := b.pm.CurrentPrefs() p1 := b.pm.CurrentPrefs().AsStruct() p1.ApplyEdits(mp) @@ -7231,6 +7325,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.serveConfig = ipn.ServeConfigView{} b.lastSuggestedExitNode = "" b.keyExpired = false + b.overrideExitNodePolicy = false b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index b8526a4fcc70e..8bc84b081c016 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -623,6 +623,7 @@ func TestConfigureExitNode(t *testing.T) { exitNodeIDPolicy *tailcfg.StableNodeID exitNodeIPPolicy *netip.Addr exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] wantPrefs ipn.Prefs wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] @@ -1018,6 +1019,108 @@ func TestConfigureExitNode(t *testing.T) { InternalExitNodePrior: "", }, }, + { + name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing the exit node + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: exitNode2.StableID(), // change the exit node ID + }, + ExitNodeIDSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode2.StableID(), // overridden by user + AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly + }, + }, + { + name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + ExitNodeID: "", // clearing the exit node ID disables the exit node and should not be allowed + }, + ExitNodeIDSet: true, + }, + wantChangePrefsErr: errManagedByPolicy, // edit prefs should fail with an error + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + }, + { + name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + useExitNodeEnabled: ptr.To(false), // should fail with an error + wantExitNodeToggleErr: errManagedByPolicy, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // still enforced by the policy setting + AutoExitNode: "any", + InternalExitNodePrior: "", + }, + }, + { + name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }, + netMap: nil, // no netmap; exit node cannot be resolved + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "", // clear the auto exit node + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "", // cleared + ExitNodeID: "", // has never been resolved, so it should be cleared as well + }, + }, + { + name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", + prefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }, + netMap: clientNetmap, // has a netmap; exit node will be resolved + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + AutoExitNode: "", // clear the auto exit node + }, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: "", // cleared + ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained + }, + }, } syspolicy.RegisterWellKnownSettingsForTest(t) for _, tt := range tests { @@ -1033,6 +1136,9 @@ func TestConfigureExitNode(t *testing.T) { if tt.exitNodeAllowedIDs != nil { store.SetStringLists(source.TestSettingOf(syspolicy.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) } + if tt.exitNodeAllowOverride { + store.SetBooleans(source.TestSettingOf(syspolicy.AllowExitNodeOverride, true)) + } if store.IsEmpty() { // No syspolicy settings, so don't register a store. // This allows the test to run in parallel with other tests. @@ -1078,6 +1184,212 @@ func TestConfigureExitNode(t *testing.T) { } } +func TestPrefsChangeDisablesExitNode(t *testing.T) { + tests := []struct { + name string + netMap *netmap.NetworkMap + prefs ipn.Prefs + change ipn.MaskedPrefs + wantDisablesExitNode bool + }{ + { + name: "has-exit-node-id/no-change", + prefs: ipn.Prefs{ + ExitNodeID: "test-exit-node", + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-ip/no-change", + prefs: ipn.Prefs{ + ExitNodeIP: netip.MustParseAddr("100.100.1.1"), + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-auto-exit-node/no-change", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{}, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-id/non-exit-node-change", + prefs: ipn.Prefs{ + ExitNodeID: "test-exit-node", + }, + change: ipn.MaskedPrefs{ + WantRunningSet: true, + HostnameSet: true, + ExitNodeAllowLANAccessSet: true, + Prefs: ipn.Prefs{ + WantRunning: true, + Hostname: "test-hostname", + ExitNodeAllowLANAccess: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-ip/non-exit-node-change", + prefs: ipn.Prefs{ + ExitNodeIP: netip.MustParseAddr("100.100.1.1"), + }, + change: ipn.MaskedPrefs{ + WantRunningSet: true, + RouteAllSet: true, + ShieldsUpSet: true, + Prefs: ipn.Prefs{ + WantRunning: false, + RouteAll: false, + ShieldsUp: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-auto-exit-node/non-exit-node-change", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + CorpDNSSet: true, + RouteAllSet: true, + ExitNodeAllowLANAccessSet: true, + Prefs: ipn.Prefs{ + CorpDNS: true, + RouteAll: false, + ExitNodeAllowLANAccess: true, + }, + }, + wantDisablesExitNode: false, + }, + { + name: "has-exit-node-id/change-exit-node-id", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "exit-node-2", + }, + }, + wantDisablesExitNode: false, // changing the exit node ID does not disable it + }, + { + name: "has-exit-node-id/enable-auto-exit-node", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + }, + wantDisablesExitNode: false, // changing the exit node ID does not disable it + }, + { + name: "has-exit-node-id/clear-exit-node-id", + prefs: ipn.Prefs{ + ExitNodeID: "exit-node-1", + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, + wantDisablesExitNode: true, // clearing the exit node ID disables it + }, + { + name: "has-auto-exit-node/clear-exit-node-id", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + }, + }, + wantDisablesExitNode: true, // clearing the exit node ID disables auto exit node as well... + }, + { + name: "has-auto-exit-node/clear-exit-node-id/but-keep-auto-exit-node", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIDSet: true, + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + ExitNodeID: "", + AutoExitNode: ipn.AnyExitNode, + }, + }, + wantDisablesExitNode: false, // ... unless we explicitly keep the auto exit node enabled + }, + { + name: "has-auto-exit-node/clear-exit-node-ip", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + ExitNodeIPSet: true, + Prefs: ipn.Prefs{ + ExitNodeIP: netip.Addr{}, + }, + }, + wantDisablesExitNode: false, // auto exit node is still enabled + }, + { + name: "has-auto-exit-node/clear-auto-exit-node", + prefs: ipn.Prefs{ + AutoExitNode: ipn.AnyExitNode, + }, + change: ipn.MaskedPrefs{ + AutoExitNodeSet: true, + Prefs: ipn.Prefs{ + AutoExitNode: "", + }, + }, + wantDisablesExitNode: true, // clearing the auto exit while the exit node ID is unresolved disables exit node usage + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lb := newTestLocalBackend(t) + if tt.netMap != nil { + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: tt.netMap}) + } + // Set the initial prefs via SetPrefsForTest + // to apply necessary adjustments. + lb.SetPrefsForTest(tt.prefs.Clone()) + initialPrefs := lb.Prefs() + + // Check whether changeDisablesExitNodeLocked correctly identifies the change. + if got := lb.changeDisablesExitNodeLocked(initialPrefs, &tt.change); got != tt.wantDisablesExitNode { + t.Errorf("disablesExitNode: got %v; want %v", got, tt.wantDisablesExitNode) + } + + // Apply the change and check if it the actual behavior matches the expectation. + gotPrefs, err := lb.EditPrefsAs(&tt.change, &ipnauth.TestActor{}) + if err != nil { + t.Fatalf("EditPrefsAs failed: %v", err) + } + gotDisabledExitNode := initialPrefs.ExitNodeID() != "" && gotPrefs.ExitNodeID() == "" + if gotDisabledExitNode != tt.wantDisablesExitNode { + t.Errorf("disabledExitNode: got %v; want %v", gotDisabledExitNode, tt.wantDisablesExitNode) + } + }) + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index b19a3e7fec61f..cd5f8172c159a 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -54,6 +54,15 @@ const ( ExitNodeID Key = "ExitNodeID" ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. + // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings + // and manually select an exit node. It does not allow disabling exit node usage entirely. + // It is typically used in conjunction with [ExitNodeID] set to "auto:any". + // + // Warning: This policy setting is experimental and may change, be renamed or removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#29969. + AllowExitNodeOverride Key = "ExitNode.AllowOverride" + // Keys with a string value that specifies an option: "always", "never", "user-decides". // The default is "user-decides" unless otherwise stated. Enforcement of // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs @@ -173,6 +182,7 @@ const ( var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index b962f30c008c1..87b45b654709d 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -59,6 +59,11 @@ func (c PolicyChange) HasChanged(key setting.Key) bool { } } +// HasChangedAnyOf reports whether any of the specified policy settings has changed. +func (c PolicyChange) HasChangedAnyOf(keys ...setting.Key) bool { + return slices.ContainsFunc(keys, c.HasChanged) +} + // policyChangeCallbacks are the callbacks to invoke when the effective policy changes. // It is safe for concurrent use. type policyChangeCallbacks struct { From a60e0caf6a3bc4c2801f4ca6e1630fc9409d1125 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 8 Jul 2025 19:37:09 -0700 Subject: [PATCH 0084/1093] wgengine/magicsock: remove conn.InitiationAwareEndpoint TODO (#16498) It was implemented in 5b0074729d38f8cc301803da06086033f53b1b93. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fbfcf0b41565a..ab7c2102fe1ec 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1708,11 +1708,6 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. - // - // TODO(jwhited): implement [lazyEndpoint] integration to call - // [endpoint.noteRecvActivity], which triggers just-in-time - // wireguard-go configuration of the peer, prior to peer lookup - // within wireguard-go. return &lazyEndpoint{c: c, src: src}, size, true } cache.epAddr = src From bad17a1bfaa0ac3e62e2ebc95fca7c5c5959055b Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 8 Jul 2025 22:14:18 -0700 Subject: [PATCH 0085/1093] cmd/tailscale: format empty cities and countries as hyphens (#16495) When running `tailscale exit-node list`, an empty city or country name should be displayed as a hyphen "-". However, this only happened when there was no location at all. If a node provides a Hostinfo.Location, then the list would display exactly what was provided. This patch changes the listing so that empty cities and countries will either render the provided name or "-". Fixes #16500 Signed-off-by: Simon Law --- cmd/tailscale/cli/exitnode.go | 22 +++++++++------------- cmd/tailscale/cli/exitnode_test.go | 28 ++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index ad7a8ccee5b42..b153f096d6869 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -131,7 +131,7 @@ func runExitNodeList(ctx context.Context, args []string) error { for _, country := range filteredPeers.Countries { for _, city := range country.Cities { for _, peer := range city.Peers { - fmt.Fprintf(w, "\n %s\t%s\t%s\t%s\t%s\t", peer.TailscaleIPs[0], strings.Trim(peer.DNSName, "."), country.Name, city.Name, peerStatus(peer)) + fmt.Fprintf(w, "\n %s\t%s\t%s\t%s\t%s\t", peer.TailscaleIPs[0], strings.Trim(peer.DNSName, "."), cmp.Or(country.Name, "-"), cmp.Or(city.Name, "-"), peerStatus(peer)) } } } @@ -202,23 +202,16 @@ type filteredCity struct { Peers []*ipnstate.PeerStatus } -const noLocationData = "-" - -var noLocation = &tailcfg.Location{ - Country: noLocationData, - CountryCode: noLocationData, - City: noLocationData, - CityCode: noLocationData, -} - // filterFormatAndSortExitNodes filters and sorts exit nodes into // alphabetical order, by country, city and then by priority if // present. +// // If an exit node has location data, and the country has more than // one city, an `Any` city is added to the country that contains the // highest priority exit node within that country. +// // For exit nodes without location data, their country fields are -// defined as '-' to indicate that the data is not available. +// defined as the empty string to indicate that the data is not available. func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) filteredExitNodes { // first get peers into some fixed order, as code below doesn't break ties // and our input comes from a random range-over-map. @@ -229,7 +222,10 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) countries := make(map[string]*filteredCountry) cities := make(map[string]*filteredCity) for _, ps := range peers { - loc := cmp.Or(ps.Location, noLocation) + loc := ps.Location + if loc == nil { + loc = &tailcfg.Location{} + } if filterBy != "" && !strings.EqualFold(loc.Country, filterBy) { continue @@ -259,7 +255,7 @@ func filterFormatAndSortExitNodes(peers []*ipnstate.PeerStatus, filterBy string) } for _, country := range filteredExitNodes.Countries { - if country.Name == noLocationData { + if country.Name == "" { // Countries without location data should not // be filtered further. continue diff --git a/cmd/tailscale/cli/exitnode_test.go b/cmd/tailscale/cli/exitnode_test.go index 9d569a45a4615..cc38fd3a4d39e 100644 --- a/cmd/tailscale/cli/exitnode_test.go +++ b/cmd/tailscale/cli/exitnode_test.go @@ -74,10 +74,10 @@ func TestFilterFormatAndSortExitNodes(t *testing.T) { want := filteredExitNodes{ Countries: []*filteredCountry{ { - Name: noLocationData, + Name: "", Cities: []*filteredCity{ { - Name: noLocationData, + Name: "", Peers: []*ipnstate.PeerStatus{ ps[5], }, @@ -273,14 +273,20 @@ func TestSortByCountryName(t *testing.T) { Name: "Zimbabwe", }, { - Name: noLocationData, + Name: "", }, } sortByCountryName(fc) - if fc[0].Name != noLocationData { - t.Fatalf("sortByCountryName did not order countries by alphabetical order, got %v, want %v", fc[0].Name, noLocationData) + want := []string{"", "Albania", "Sweden", "Zimbabwe"} + var got []string + for _, c := range fc { + got = append(got, c.Name) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("sortByCountryName did not order countries by alphabetical order (-want +got):\n%s", diff) } } @@ -296,13 +302,19 @@ func TestSortByCityName(t *testing.T) { Name: "Squamish", }, { - Name: noLocationData, + Name: "", }, } sortByCityName(fc) - if fc[0].Name != noLocationData { - t.Fatalf("sortByCityName did not order cities by alphabetical order, got %v, want %v", fc[0].Name, noLocationData) + want := []string{"", "Goteborg", "Kingston", "Squamish"} + var got []string + for _, c := range fc { + got = append(got, c.Name) + } + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("sortByCityName did not order countries by alphabetical order (-want +got):\n%s", diff) } } From 90bf0a97b3b1c042b3a6be48ec186732733f995b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Jul 2025 09:13:11 +0100 Subject: [PATCH 0086/1093] cmd/k8s-operator/deploy: clarify helm install notes (#16449) Based on feedback that it wasn't clear what the user is meant to do with the output of the last command, clarify that it's an optional command to explore what got created. Updates #13427 Change-Id: Iff64ec6d02dc04bf4bbebf415d7ed1a44e7dd658 Signed-off-by: Tom Proctor --- cmd/k8s-operator/deploy/chart/templates/NOTES.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt index 5678e597a6824..1bee6704616e6 100644 --- a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt +++ b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt @@ -22,4 +22,6 @@ $ kubectl explain proxyclass $ kubectl explain recorder $ kubectl explain dnsconfig -$ kubectl --namespace={{ .Release.Namespace }} get pods +If you're interested to explore what resources were created: + +$ kubectl --namespace={{ .Release.Namespace }} get all -l app.kubernetes.io/managed-by=Helm From 4dfed6b14697d1a9ab217e01fff774a3b72391df Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Jul 2025 09:21:56 +0100 Subject: [PATCH 0087/1093] cmd/{k8s-operator,k8s-proxy}: add kube-apiserver ProxyGroup type (#16266) Adds a new k8s-proxy command to convert operator's in-process proxy to a separately deployable type of ProxyGroup: kube-apiserver. k8s-proxy reads in a new config file written by the operator, modelled on tailscaled's conffile but with some modifications to ensure multiple versions of the config can co-exist within a file. This should make it much easier to support reading that config file from a Kube Secret with a stable file name. To avoid needing to give the operator ClusterRole{,Binding} permissions, the helm chart now optionally deploys a new static ServiceAccount for the API Server proxy to use if in auth mode. Proxies deployed by kube-apiserver ProxyGroups currently work the same as the operator's in-process proxy. They do not yet leverage Tailscale Services for presenting a single HA DNS name. Updates #13358 Change-Id: Ib6ead69b2173c5e1929f3c13fb48a9a5362195d8 Signed-off-by: Tom Proctor --- Makefile | 48 ++-- build_docker.sh | 18 ++ cmd/k8s-operator/depaware.txt | 3 +- .../chart/templates/apiserverproxy-rbac.yaml | 16 +- cmd/k8s-operator/deploy/chart/values.yaml | 16 ++ .../crds/tailscale.com_proxyclasses.yaml | 46 +++- .../crds/tailscale.com_proxygroups.yaml | 19 +- .../deploy/manifests/authproxy-rbac.yaml | 9 + .../deploy/manifests/operator.yaml | 65 ++++- cmd/k8s-operator/ingress-for-pg.go | 6 +- cmd/k8s-operator/ingress-for-pg_test.go | 49 ++++ cmd/k8s-operator/operator.go | 64 ++++- cmd/k8s-operator/proxy.go | 61 +++++ cmd/k8s-operator/proxygroup.go | 194 +++++++++++--- cmd/k8s-operator/proxygroup_specs.go | 162 +++++++++++- cmd/k8s-operator/proxygroup_test.go | 249 ++++++++++++++++-- cmd/k8s-operator/sts.go | 12 +- cmd/k8s-operator/svc-for-pg.go | 60 +---- cmd/k8s-operator/svc-for-pg_test.go | 2 - cmd/k8s-proxy/k8s-proxy.go | 197 ++++++++++++++ k8s-operator/api-proxy/env.go | 29 -- k8s-operator/api-proxy/proxy.go | 187 +++++-------- k8s-operator/api.md | 40 ++- .../apis/v1alpha1/types_proxyclass.go | 22 +- .../apis/v1alpha1/types_proxygroup.go | 33 ++- .../apis/v1alpha1/zz_generated.deepcopy.go | 25 ++ kube/k8s-proxy/conf/conf.go | 101 +++++++ kube/k8s-proxy/conf/conf_test.go | 86 ++++++ kube/kubetypes/types.go | 18 +- kube/state/state.go | 97 +++++++ kube/state/state_test.go | 203 ++++++++++++++ 31 files changed, 1787 insertions(+), 350 deletions(-) create mode 100644 cmd/k8s-operator/proxy.go create mode 100644 cmd/k8s-proxy/k8s-proxy.go delete mode 100644 k8s-operator/api-proxy/env.go create mode 100644 kube/k8s-proxy/conf/conf.go create mode 100644 kube/k8s-proxy/conf/conf_test.go create mode 100644 kube/state/state.go create mode 100644 kube/state/state_test.go diff --git a/Makefile b/Makefile index 41c67c711791d..f5fc205891191 100644 --- a/Makefile +++ b/Makefile @@ -92,38 +92,38 @@ pushspk: spk ## Push and install synology package on ${SYNO_HOST} host scp tailscale.spk root@${SYNO_HOST}: ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk -publishdevimage: ## Build and publish tailscale image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) +.PHONY: check-image-repo +check-image-repo: + @if [ -z "$(REPO)" ]; then \ + echo "REPO=... required; e.g. REPO=ghcr.io/$$USER/tailscale" >&2; \ + exit 1; \ + fi + @for repo in tailscale/tailscale ghcr.io/tailscale/tailscale \ + tailscale/k8s-operator ghcr.io/tailscale/k8s-operator \ + tailscale/k8s-nameserver ghcr.io/tailscale/k8s-nameserver \ + tailscale/tsidp ghcr.io/tailscale/tsidp \ + tailscale/k8s-proxy ghcr.io/tailscale/k8s-proxy; do \ + if [ "$(REPO)" = "$$repo" ]; then \ + echo "REPO=... must not be $$repo" >&2; \ + exit 1; \ + fi; \ + done + +publishdevimage: check-image-repo ## Build and publish tailscale image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=client ./build_docker.sh -publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1) +publishdevoperator: check-image-repo ## Build and publish k8s-operator image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-operator ./build_docker.sh -publishdevnameserver: ## Build and publish k8s-nameserver image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1) +publishdevnameserver: check-image-repo ## Build and publish k8s-nameserver image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh -publishdevtsidp: ## Build and publish tsidp image to location specified by ${REPO} - @test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1) - @test "${REPO}" != "tailscale/tsidp" || (echo "REPO=... must not be tailscale/tsidp" && exit 1) - @test "${REPO}" != "ghcr.io/tailscale/tsidp" || (echo "REPO=... must not be ghcr.io/tailscale/tsidp" && exit 1) +publishdevtsidp: check-image-repo ## Build and publish tsidp image to location specified by ${REPO} TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=tsidp ./build_docker.sh +publishdevproxy: check-image-repo ## Build and publish k8s-proxy image to location specified by ${REPO} + TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-proxy ./build_docker.sh + .PHONY: sshintegrationtest sshintegrationtest: ## Run the SSH integration tests in various Docker containers @GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ diff --git a/build_docker.sh b/build_docker.sh index 7840dc89775d3..bdeaa8659b805 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -118,6 +118,24 @@ case "$TARGET" in --annotations="${ANNOTATIONS}" \ /usr/local/bin/tsidp ;; + k8s-proxy) + DEFAULT_REPOS="tailscale/k8s-proxy" + REPOS="${REPOS:-${DEFAULT_REPOS}}" + go run github.com/tailscale/mkctr \ + --gopaths="tailscale.com/cmd/k8s-proxy:/usr/local/bin/k8s-proxy" \ + --ldflags=" \ + -X tailscale.com/version.longStamp=${VERSION_LONG} \ + -X tailscale.com/version.shortStamp=${VERSION_SHORT} \ + -X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \ + --base="${BASE}" \ + --tags="${TAGS}" \ + --gotags="ts_kube,ts_package_container" \ + --repos="${REPOS}" \ + --push="${PUSH}" \ + --target="${PLATFORM}" \ + --annotations="${ANNOTATIONS}" \ + /usr/local/bin/k8s-proxy + ;; *) echo "unknown target: $TARGET" exit 1 diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 36c5184c3b44a..f810d1b4fd62a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -200,7 +200,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - github.com/tailscale/hujson from tailscale.com/ipn/conffile + github.com/tailscale/hujson from tailscale.com/ipn/conffile+ L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth @@ -822,6 +822,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/sessionrecording/ws from tailscale.com/k8s-operator/sessionrecording tailscale.com/kube/egressservices from tailscale.com/cmd/k8s-operator tailscale.com/kube/ingressservices from tailscale.com/cmd/k8s-operator + tailscale.com/kube/k8s-proxy/conf from tailscale.com/cmd/k8s-operator tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore tailscale.com/kube/kubetypes from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml index 072ecf6d22e2f..ad0a6fb66f51e 100644 --- a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml @@ -1,7 +1,16 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -{{ if eq .Values.apiServerProxyConfig.mode "true" }} +# If old setting used, enable both old (operator) and new (ProxyGroup) workflows. +# If new setting used, enable only new workflow. +{{ if or (eq .Values.apiServerProxyConfig.mode "true") + (eq .Values.apiServerProxyConfig.allowImpersonation "true") }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-apiserver-auth-proxy + namespace: {{ .Release.Namespace }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -16,9 +25,14 @@ kind: ClusterRoleBinding metadata: name: tailscale-auth-proxy subjects: +{{- if eq .Values.apiServerProxyConfig.mode "true" }} - kind: ServiceAccount name: operator namespace: {{ .Release.Namespace }} +{{- end }} +- kind: ServiceAccount + name: kube-apiserver-auth-proxy + namespace: {{ .Release.Namespace }} roleRef: kind: ClusterRole name: tailscale-auth-proxy diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index 2926f6d0759f2..cdedb92e819e4 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -92,6 +92,13 @@ ingressClass: # If you need more configuration options, take a look at ProxyClass: # https://tailscale.com/kb/1445/kubernetes-operator-customization#cluster-resource-customization-using-proxyclass-custom-resource proxyConfig: + # Configure the proxy image to use instead of the default tailscale/tailscale:latest. + # Applying a ProxyClass with `spec.statefulSet.pod.tailscaleContainer.image` + # set will override any defaults here. + # + # Note that ProxyGroups of type "kube-apiserver" use a different default image, + # tailscale/k8s-proxy:latest, and it is currently only possible to override + # that image via the same ProxyClass field. image: # Repository defaults to DockerHub, but images are also synced to ghcr.io/tailscale/tailscale. repository: tailscale/tailscale @@ -115,6 +122,15 @@ proxyConfig: # Kubernetes API server. # https://tailscale.com/kb/1437/kubernetes-operator-api-server-proxy apiServerProxyConfig: + # Set to "true" to create the ClusterRole permissions required for the API + # server proxy's auth mode. In auth mode, the API server proxy impersonates + # groups and users based on tailnet ACL grants. Required for ProxyGroups of + # type "kube-apiserver" running in auth mode. + allowImpersonation: "false" # "true", "false" + + # If true or noauth, the operator will run an in-process API server proxy. + # You can deploy a ProxyGroup of type "kube-apiserver" to run a high + # availability set of API server proxies instead. mode: "false" # "true", "false", "noauth" imagePullSecrets: [] diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index fcf1b27aaf318..c5dc9c3e96a83 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1379,12 +1379,21 @@ spec: type: string image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -1655,7 +1664,9 @@ spec: PodSecurityContext, the value specified in SecurityContext takes precedence. type: string tailscaleInitContainer: - description: Configuration for the proxy init container that enables forwarding. + description: |- + Configuration for the proxy init container that enables forwarding. + Not valid to apply to ProxyGroups of type "kube-apiserver". type: object properties: debug: @@ -1709,12 +1720,21 @@ spec: type: string image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index c426c8427a507..06c8479252873 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -77,6 +77,22 @@ spec: must not start with a dash and must be between 1 and 62 characters long. type: string pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + kubeAPIServer: + description: |- + KubeAPIServer contains configuration specific to the kube-apiserver + ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + type: object + properties: + mode: + description: |- + Mode to run the API server proxy in. Supported modes are auth and noauth. + In auth mode, requests from the tailnet proxied over to the Kubernetes + API server are additionally impersonated using the sender's tailnet identity. + If not specified, defaults to auth mode. + type: string + enum: + - auth + - noauth proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that contains @@ -106,12 +122,13 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. Type is immutable once a ProxyGroup is created. type: string enum: - egress - ingress + - kube-apiserver x-kubernetes-validations: - rule: self == oldSelf message: ProxyGroup type is immutable diff --git a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml index ddbdda32e476e..5818fa69fff7d 100644 --- a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml @@ -1,6 +1,12 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-apiserver-auth-proxy + namespace: tailscale +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -18,6 +24,9 @@ subjects: - kind: ServiceAccount name: operator namespace: tailscale +- kind: ServiceAccount + name: kube-apiserver-auth-proxy + namespace: tailscale roleRef: kind: ClusterRole name: tailscale-auth-proxy diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index cdf301318f923..ff3705cb343ff 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1852,12 +1852,21 @@ spec: type: array image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -2129,7 +2138,9 @@ spec: type: object type: object tailscaleInitContainer: - description: Configuration for the proxy init container that enables forwarding. + description: |- + Configuration for the proxy init container that enables forwarding. + Not valid to apply to ProxyGroups of type "kube-apiserver". properties: debug: description: |- @@ -2182,12 +2193,21 @@ spec: type: array image: description: |- - Container image name. By default images are pulled from - docker.io/tailscale/tailscale, but the official images are also - available at ghcr.io/tailscale/tailscale. Specifying image name here - will override any proxy image values specified via the Kubernetes - operator's Helm chart values or PROXY_IMAGE env var in the operator - Deployment. + Container image name. By default images are pulled from docker.io/tailscale, + but the official images are also available at ghcr.io/tailscale. + + For all uses except on ProxyGroups of type "kube-apiserver", this image must + be either tailscale/tailscale, or an equivalent mirror of that image. + To apply to ProxyGroups of type "kube-apiserver", this image must be + tailscale/k8s-proxy or a mirror of that image. + + For "tailscale/tailscale"-based proxies, specifying image name here will + override any proxy image values specified via the Kubernetes operator's + Helm chart values or PROXY_IMAGE env var in the operator Deployment. + For "tailscale/k8s-proxy"-based proxies, there is currently no way to + configure your own default, and this field is the only way to use a + custom image. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image type: string imagePullPolicy: @@ -2904,6 +2924,22 @@ spec: must not start with a dash and must be between 1 and 62 characters long. pattern: ^[a-z0-9][a-z0-9-]{0,61}$ type: string + kubeAPIServer: + description: |- + KubeAPIServer contains configuration specific to the kube-apiserver + ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + properties: + mode: + description: |- + Mode to run the API server proxy in. Supported modes are auth and noauth. + In auth mode, requests from the tailnet proxied over to the Kubernetes + API server are additionally impersonated using the sender's tailnet identity. + If not specified, defaults to auth mode. + enum: + - auth + - noauth + type: string + type: object proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that contains @@ -2933,11 +2969,12 @@ spec: type: array type: description: |- - Type of the ProxyGroup proxies. Supported types are egress and ingress. + Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. Type is immutable once a ProxyGroup is created. enum: - egress - ingress + - kube-apiserver type: string x-kubernetes-validations: - message: ProxyGroup type is immutable diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 79bad92be080e..aaf22d471353f 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -239,7 +239,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // This checks and ensures that Tailscale Service's owner references are updated // for this Ingress and errors if that is not possible (i.e. because it // appears that the Tailscale Service has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) + updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc) if err != nil { const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) @@ -867,9 +867,9 @@ type OwnerRef struct { // nil, but does not contain an owner reference we return an error as this likely means // that the Service was created by somthing other than a Tailscale // Kubernetes operator. -func (r *HAIngressReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { +func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string]string, error) { ref := OwnerRef{ - OperatorID: r.operatorID, + OperatorID: operatorID, } if svc == nil { c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index d29368caef59d..5de86cdad573a 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -12,8 +12,10 @@ import ( "maps" "reflect" "slices" + "strings" "testing" + "github.com/google/go-cmp/cmp" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -650,6 +652,53 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { } } +func TestOwnerAnnotations(t *testing.T) { + singleSelfOwner := map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, + } + + for name, tc := range map[string]struct { + svc *tailscale.VIPService + wantAnnotations map[string]string + wantErr string + }{ + "no_svc": { + svc: nil, + wantAnnotations: singleSelfOwner, + }, + "empty_svc": { + svc: &tailscale.VIPService{}, + wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator", + }, + "already_owner": { + svc: &tailscale.VIPService{ + Annotations: singleSelfOwner, + }, + wantAnnotations: singleSelfOwner, + }, + "add_owner": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`, + }, + }, + wantAnnotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"},{"operatorID":"self-id"}]}`, + }, + }, + } { + t.Run(name, func(t *testing.T) { + got, err := ownerAnnotations("self-id", tc.svc) + if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("ownerAnnotations() error = %v, wantErr %v", err, tc.wantErr) + } + if diff := cmp.Diff(tc.wantAnnotations, got); diff != "" { + t.Errorf("ownerAnnotations() mismatch (-want +got):\n%s", diff) + } + }) + } +} + func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 96b3b37ad0340..870a6f8b7f37e 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -26,6 +26,7 @@ import ( networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" klabels "k8s.io/apimachinery/pkg/labels" @@ -77,6 +78,7 @@ func main() { tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "") tslogging = defaultEnv("OPERATOR_LOGGING", "info") image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest") + k8sProxyImage = defaultEnv("K8S_PROXY_IMAGE", "tailscale/k8s-proxy:latest") priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "") tags = defaultEnv("PROXY_TAGS", "tag:k8s") tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "") @@ -110,17 +112,27 @@ func main() { // The operator can run either as a plain operator or it can // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. - mode := apiproxy.ParseAPIProxyMode() - if mode == apiproxy.APIServerProxyModeDisabled { + mode := parseAPIProxyMode() + if mode == apiServerProxyModeDisabled { hostinfo.SetApp(kubetypes.AppOperator) } else { - hostinfo.SetApp(kubetypes.AppAPIServerProxy) + hostinfo.SetApp(kubetypes.AppInProcessAPIServerProxy) } s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() - apiproxy.MaybeLaunchAPIServerProxy(zlog, restConfig, s, mode) + if mode != apiServerProxyModeDisabled { + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled) + if err != nil { + zlog.Fatalf("error creating API server proxy: %v", err) + } + go func() { + if err := ap.Run(context.Background()); err != nil { + zlog.Fatalf("error running API server proxy: %v", err) + } + }() + } rOpts := reconcilerOpts{ log: zlog, tsServer: s, @@ -128,6 +140,7 @@ func main() { tailscaleNamespace: tsNamespace, restConfig: restConfig, proxyImage: image, + k8sProxyImage: k8sProxyImage, proxyPriorityClassName: priorityClassName, proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer, proxyTags: tags, @@ -415,7 +428,6 @@ func runReconcilers(opts reconcilerOpts) { Complete(&HAServiceReconciler{ recorder: eventRecorder, tsClient: opts.tsClient, - tsnetServer: opts.tsServer, defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), logger: opts.log.Named("service-pg-reconciler"), @@ -625,13 +637,14 @@ func runReconcilers(opts reconcilerOpts) { ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) nodeFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(nodeHandlerForProxyGroup(mgr.GetClient(), opts.defaultProxyClass, startlog)) + saFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(serviceAccountHandlerForProxyGroup(mgr.GetClient(), startlog)) err = builder.ControllerManagedBy(mgr). For(&tsapi.ProxyGroup{}). Named("proxygroup-reconciler"). Watches(&corev1.Service{}, ownedByProxyGroupFilter). Watches(&appsv1.StatefulSet{}, ownedByProxyGroupFilter). Watches(&corev1.ConfigMap{}, ownedByProxyGroupFilter). - Watches(&corev1.ServiceAccount{}, ownedByProxyGroupFilter). + Watches(&corev1.ServiceAccount{}, saFilterForProxyGroup). Watches(&corev1.Secret{}, ownedByProxyGroupFilter). Watches(&rbacv1.Role{}, ownedByProxyGroupFilter). Watches(&rbacv1.RoleBinding{}, ownedByProxyGroupFilter). @@ -645,7 +658,8 @@ func runReconcilers(opts reconcilerOpts) { tsClient: opts.tsClient, tsNamespace: opts.tailscaleNamespace, - proxyImage: opts.proxyImage, + tsProxyImage: opts.proxyImage, + k8sProxyImage: opts.k8sProxyImage, defaultTags: strings.Split(opts.proxyTags, ","), tsFirewallMode: opts.proxyFirewallMode, defaultProxyClass: opts.defaultProxyClass, @@ -668,6 +682,7 @@ type reconcilerOpts struct { tailscaleNamespace string // namespace in which operator resources will be deployed restConfig *rest.Config // config for connecting to the kube API server proxyImage string // : + k8sProxyImage string // : // proxyPriorityClassName isPriorityClass to be set for proxy Pods. This // is a legacy mechanism for cluster resource configuration options - // going forward use ProxyClass. @@ -996,8 +1011,8 @@ func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger } // proxyClassHandlerForProxyGroup returns a handler that, for a given ProxyClass, -// returns a list of reconcile requests for all Connectors that have -// .spec.proxyClass set. +// returns a list of reconcile requests for all ProxyGroups that have +// .spec.proxyClass set to that ProxyClass. func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { pgList := new(tsapi.ProxyGroupList) @@ -1016,6 +1031,37 @@ func proxyClassHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) } } +// serviceAccountHandlerForProxyGroup returns a handler that, for a given ServiceAccount, +// returns a list of reconcile requests for all ProxyGroups that use that ServiceAccount. +// For most ProxyGroups, this will be a dedicated ServiceAccount owned by a specific +// ProxyGroup. But for kube-apiserver ProxyGroups running in auth mode, they use a shared +// static ServiceAccount named "kube-apiserver-auth-proxy". +func serviceAccountHandlerForProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + pgList := new(tsapi.ProxyGroupList) + if err := cl.List(ctx, pgList); err != nil { + logger.Debugf("error listing ProxyGroups for ServiceAccount: %v", err) + return nil + } + reqs := make([]reconcile.Request, 0) + saName := o.GetName() + for _, pg := range pgList.Items { + if saName == authAPIServerProxySAName && isAuthAPIServerProxy(&pg) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + } + expectedOwner := pgOwnerReference(&pg)[0] + saOwnerRefs := o.GetOwnerReferences() + for _, ref := range saOwnerRefs { + if apiequality.Semantic.DeepEqual(ref, expectedOwner) { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&pg)}) + break + } + } + } + return reqs + } +} + // serviceHandlerForIngress returns a handler for Service events for ingress // reconciler that ensures that if the Service associated with an event is of // interest to the reconciler, the associated Ingress(es) gets be reconciled. diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go new file mode 100644 index 0000000000000..09a7b8c6232d2 --- /dev/null +++ b/cmd/k8s-operator/proxy.go @@ -0,0 +1,61 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "fmt" + "log" + "os" +) + +type apiServerProxyMode int + +func (a apiServerProxyMode) String() string { + switch a { + case apiServerProxyModeDisabled: + return "disabled" + case apiServerProxyModeEnabled: + return "auth" + case apiServerProxyModeNoAuth: + return "noauth" + default: + return "unknown" + } +} + +const ( + apiServerProxyModeDisabled apiServerProxyMode = iota + apiServerProxyModeEnabled + apiServerProxyModeNoAuth +) + +func parseAPIProxyMode() apiServerProxyMode { + haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" + haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" + switch { + case haveAPIProxyEnv && haveAuthProxyEnv: + log.Fatal("AUTH_PROXY (deprecated) and APISERVER_PROXY are mutually exclusive, please unset AUTH_PROXY") + case haveAuthProxyEnv: + var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated + if authProxyEnv { + return apiServerProxyModeEnabled + } + return apiServerProxyModeDisabled + case haveAPIProxyEnv: + var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" + switch apiProxyEnv { + case "true": + return apiServerProxyModeEnabled + case "false", "": + return apiServerProxyModeDisabled + case "noauth": + return apiServerProxyModeNoAuth + default: + panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) + } + } + return apiServerProxyModeDisabled +} diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index c44de09a7fc45..3dfb004f1dd36 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -17,6 +17,7 @@ import ( "strings" "sync" + dockerref "github.com/distribution/reference" "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -36,9 +37,11 @@ import ( tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/tstime" + "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" @@ -48,7 +51,9 @@ import ( const ( reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" reasonProxyGroupReady = "ProxyGroupReady" + reasonProxyGroupAvailable = "ProxyGroupAvailable" reasonProxyGroupCreating = "ProxyGroupCreating" + reasonProxyGroupInvalid = "ProxyGroupInvalid" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" @@ -63,12 +68,14 @@ const ( // // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was // first introduced. - pgMinCapabilityVersion = 106 + pgMinCapabilityVersion = 106 + kubeAPIServerConfigFile = "config.hujson" ) var ( - gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) - gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) + gaugeEgressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupEgressCount) + gaugeIngressProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupIngressCount) + gaugeAPIServerProxyGroupResources = clientmetric.NewGauge(kubetypes.MetricProxyGroupAPIServerCount) ) // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. @@ -81,15 +88,17 @@ type ProxyGroupReconciler struct { // User-specified defaults from the helm installation. tsNamespace string - proxyImage string + tsProxyImage string + k8sProxyImage string defaultTags []string tsFirewallMode string defaultProxyClass string loginServer string - mu sync.Mutex // protects following - egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge - ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + mu sync.Mutex // protects following + egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge + ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + apiServerProxyGroups set.Slice[types.UID] // for kube-apiserver proxygroups gauge } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { @@ -170,7 +179,6 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG if err != nil { return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } - validateProxyClassForPG(logger, pg, proxyClass) if !tsoperator.ProxyClassIsReady(proxyClass) { msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) logger.Info(msg) @@ -178,6 +186,10 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG } } + if err := r.validate(ctx, pg, proxyClass, logger); err != nil { + return r.notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) + } + staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -192,11 +204,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG return staticEndpoints, nrr, nil } -// validateProxyClassForPG applies custom validation logic for ProxyClass applied to ProxyGroup. -func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) { - if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { - return - } +func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, logger *zap.SugaredLogger) error { // Our custom logic for ensuring minimum downtime ProxyGroup update rollouts relies on the local health check // beig accessible on the replica Pod IP:9002. This address can also be modified by users, via // TS_LOCAL_ADDR_PORT env var. @@ -208,13 +216,70 @@ func validateProxyClassForPG(logger *zap.SugaredLogger, pg *tsapi.ProxyGroup, pc // shouldn't need to set their own). // // TODO(irbekrm): maybe disallow configuring this env var in future (in Tailscale 1.84 or later). - if hasLocalAddrPortSet(pc) { + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress && hasLocalAddrPortSet(pc) { msg := fmt.Sprintf("ProxyClass %s applied to an egress ProxyGroup has TS_LOCAL_ADDR_PORT env var set to a custom value."+ "This will disable the ProxyGroup graceful failover mechanism, so you might experience downtime when ProxyGroup pods are restarted."+ "In future we will remove the ability to set custom TS_LOCAL_ADDR_PORT for egress ProxyGroups."+ "Please raise an issue if you expect that this will cause issues for your workflow.", pc.Name) logger.Warn(msg) } + + // image is the value of pc.Spec.StatefulSet.Pod.TailscaleContainer.Image or "" + // imagePath is a slash-delimited path ending with the image name, e.g. + // "tailscale/tailscale" or maybe "k8s-proxy" if hosted at example.com/k8s-proxy. + var image, imagePath string + if pc != nil && + pc.Spec.StatefulSet != nil && + pc.Spec.StatefulSet.Pod != nil && + pc.Spec.StatefulSet.Pod.TailscaleContainer != nil && + pc.Spec.StatefulSet.Pod.TailscaleContainer.Image != "" { + image, err := dockerref.ParseNormalizedNamed(pc.Spec.StatefulSet.Pod.TailscaleContainer.Image) + if err != nil { + // Shouldn't be possible as the ProxyClass won't be marked ready + // without successfully parsing the image. + return fmt.Errorf("error parsing %q as a container image reference: %w", pc.Spec.StatefulSet.Pod.TailscaleContainer.Image, err) + } + imagePath = dockerref.Path(image) + } + + var errs []error + if isAuthAPIServerProxy(pg) { + // Validate that the static ServiceAccount already exists. + sa := &corev1.ServiceAccount{} + if err := r.Get(ctx, types.NamespacedName{Namespace: r.tsNamespace, Name: authAPIServerProxySAName}, sa); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("error validating that ServiceAccount %q exists: %w", authAPIServerProxySAName, err) + } + + errs = append(errs, fmt.Errorf("the ServiceAccount %q used for the API server proxy in auth mode does not exist but "+ + "should have been created during operator installation; use apiServerProxyConfig.allowImpersonation=true "+ + "in the helm chart, or authproxy-rbac.yaml from the static manifests", authAPIServerProxySAName)) + } + } else { + // Validate that the ServiceAccount we create won't overwrite the static one. + // TODO(tomhjp): This doesn't cover other controllers that could create a + // ServiceAccount. Perhaps should have some guards to ensure that an update + // would never change the ownership of a resource we expect to already be owned. + if pgServiceAccountName(pg) == authAPIServerProxySAName { + errs = append(errs, fmt.Errorf("the name of the ProxyGroup %q conflicts with the static ServiceAccount used for the API server proxy in auth mode", pg.Name)) + } + } + + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + if strings.HasSuffix(imagePath, "tailscale") { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "k8s-proxy", pg.Spec.Type)) + } + + if pc != nil && pc.Spec.StatefulSet != nil && pc.Spec.StatefulSet.Pod != nil && pc.Spec.StatefulSet.Pod.TailscaleInitContainer != nil { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies Tailscale init container config, but ProxyGroups of type %q do not use init containers", pc.Name, pg.Spec.Type)) + } + } else { + if strings.HasSuffix(imagePath, "k8s-proxy") { + errs = append(errs, fmt.Errorf("the configured ProxyClass %q specifies to use image %q but expected a %q image for ProxyGroup of type %q", pc.Name, image, "tailscale", pg.Spec.Type)) + } + } + + return errors.Join(errs...) } func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { @@ -263,14 +328,21 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err) } } - sa := pgServiceAccount(pg, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { - s.ObjectMeta.Labels = sa.ObjectMeta.Labels - s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations - s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences - }); err != nil { - return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) + + // auth mode kube-apiserver ProxyGroups use a statically created + // ServiceAccount to keep ClusterRole creation permissions limited to the + // helm chart installer. + if !isAuthAPIServerProxy(pg) { + sa := pgServiceAccount(pg, r.tsNamespace) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) { + s.ObjectMeta.Labels = sa.ObjectMeta.Labels + s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations + s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences + }); err != nil { + return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) + } } + role := pgRole(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { r.ObjectMeta.Labels = role.ObjectMeta.Labels @@ -280,6 +352,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro }); err != nil { return r.notReadyErrf(pg, "error provisioning Role: %w", err) } + roleBinding := pgRoleBinding(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels @@ -290,6 +363,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro }); err != nil { return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err) } + if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { cm, hp := pgEgressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { @@ -300,6 +374,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } + if pg.Spec.Type == tsapi.ProxyGroupTypeIngress { cm := pgIngressCM(pg, r.tsNamespace) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, cm, func(existing *corev1.ConfigMap) { @@ -309,7 +384,12 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } - ss, err := pgStatefulSet(pg, r.tsNamespace, r.proxyImage, r.tsFirewallMode, tailscaledPort, proxyClass) + + defaultImage := r.tsProxyImage + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + defaultImage = r.k8sProxyImage + } + ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err) } @@ -371,7 +451,7 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za if len(devices) > 0 { status = metav1.ConditionTrue if len(devices) == desiredReplicas { - reason = reasonProxyGroupReady + reason = reasonProxyGroupAvailable } } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) @@ -702,17 +782,57 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) - if err != nil { - return nil, fmt.Errorf("error creating tailscaled config: %w", err) - } + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + hostname := pgHostname(pg, i) - for cap, cfg := range configs { - cfgJSON, err := json.Marshal(cfg) + if authKey == nil && existingCfgSecret != nil { + deviceAuthed := false + for _, d := range pg.Status.Devices { + if d.Hostname == hostname { + deviceAuthed = true + break + } + } + if !deviceAuthed { + existingCfg := conf.ConfigV1Alpha1{} + if err := json.Unmarshal(existingCfgSecret.Data[kubeAPIServerConfigFile], &existingCfg); err != nil { + return nil, fmt.Errorf("error unmarshalling existing config: %w", err) + } + if existingCfg.AuthKey != nil { + authKey = existingCfg.AuthKey + } + } + } + cfg := conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + Hostname: &hostname, + State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), + App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), + AuthKey: authKey, + KubeAPIServer: &conf.KubeAPIServer{ + AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)), + }, + }, + } + cfgB, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) + } + mak.Set(&cfgSecret.Data, kubeAPIServerConfigFile, cfgB) + } else { + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) if err != nil { - return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + return nil, fmt.Errorf("error creating tailscaled config: %w", err) + } + + for cap, cfg := range configs { + cfgJSON, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + } + mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } - mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } if existingCfgSecret != nil { @@ -834,9 +954,12 @@ func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGr r.egressProxyGroups.Add(pg.UID) case tsapi.ProxyGroupTypeIngress: r.ingressProxyGroups.Add(pg.UID) + case tsapi.ProxyGroupTypeKubernetesAPIServer: + r.apiServerProxyGroups.Add(pg.UID) } gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) + gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) } // ensureRemovedFromGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the @@ -847,9 +970,12 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro r.egressProxyGroups.Remove(pg.UID) case tsapi.ProxyGroupTypeIngress: r.ingressProxyGroups.Remove(pg.UID) + case tsapi.ProxyGroupTypeKubernetesAPIServer: + r.apiServerProxyGroups.Remove(pg.UID) } gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) + gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) } func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string, loginServer string) (tailscaledConfigs, error) { @@ -858,7 +984,7 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a AcceptDNS: "false", AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pg.Name, idx)), + Hostname: ptr.To(pgHostname(pg, idx)), AdvertiseServices: oldAdvertiseServices, AuthKey: authKey, } @@ -867,10 +993,6 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a conf.ServerURL = &loginServer } - if pg.Spec.HostnamePrefix != "" { - conf.Hostname = ptr.To(fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, idx)) - } - if shouldAcceptRoutes(pc) { conf.AcceptRoutes = "true" } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 50d9c2d5fd8f9..5d6d0b8ef9626 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -7,6 +7,7 @@ package main import ( "fmt" + "path/filepath" "slices" "strconv" "strings" @@ -28,6 +29,9 @@ const ( // deletionGracePeriodSeconds is set to 6 minutes to ensure that the pre-stop hook of these proxies have enough chance to terminate gracefully. deletionGracePeriodSeconds int64 = 360 staticEndpointPortName = "static-endpoint-port" + // authAPIServerProxySAName is the ServiceAccount deployed by the helm chart + // if apiServerProxy.authEnabled is true. + authAPIServerProxySAName = "kube-apiserver-auth-proxy" ) func pgNodePortServiceName(proxyGroupName string, replica int32) string { @@ -61,6 +65,9 @@ func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *cor // Returns the base StatefulSet definition for a ProxyGroup. A ProxyClass may be // applied over the top after. func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { + if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { + return kubeAPIServerStatefulSet(pg, namespace, image) + } ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err) @@ -167,6 +174,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Value: "$(POD_NAME)", }, { + // TODO(tomhjp): This is tsrecorder-specific and does nothing. Delete. Name: "TS_STATE", Value: "kube:$(POD_NAME)", }, @@ -264,9 +272,124 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string // gracefully. ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds) } + return ss, nil } +func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*appsv1.StatefulSet, error) { + sts := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + OwnerReferences: pgOwnerReference(pg), + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: ptr.To(pgReplicas(pg)), + Selector: &metav1.LabelSelector{ + MatchLabels: pgLabels(pg.Name, nil), + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: pg.Name, + Namespace: namespace, + Labels: pgLabels(pg.Name, nil), + DeletionGracePeriodSeconds: ptr.To[int64](10), + }, + Spec: corev1.PodSpec{ + ServiceAccountName: pgServiceAccountName(pg), + Containers: []corev1.Container{ + { + Name: mainContainerName, + Image: image, + Env: []corev1.EnvVar{ + { + // Used as default hostname and in Secret names. + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + // Used by kubeclient to post Events about the Pod's lifecycle. + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, + }, + }, + { + // Used in an interpolated env var if metrics enabled. + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + { + // Included for completeness with POD_IP and easier backwards compatibility in future. + Name: "POD_IPS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, + }, + }, + { + Name: "TS_K8S_PROXY_CONFIG", + Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), + }, + }, + VolumeMounts: func() []corev1.VolumeMount { + var mounts []corev1.VolumeMount + + // TODO(tomhjp): Read config directly from the Secret instead. + for i := range pgReplicas(pg) { + mounts = append(mounts, corev1.VolumeMount{ + Name: fmt.Sprintf("k8s-proxy-config-%d", i), + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), + }) + } + + return mounts + }(), + Ports: []corev1.ContainerPort{ + { + Name: "k8s-proxy", + ContainerPort: 443, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + }, + Volumes: func() []corev1.Volume { + var volumes []corev1.Volume + for i := range pgReplicas(pg) { + volumes = append(volumes, corev1.Volume{ + Name: fmt.Sprintf("k8s-proxy-config-%d", i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: pgConfigSecretName(pg.Name, i), + }, + }, + }) + } + + return volumes + }(), + }, + }, + }, + } + + return sts, nil +} + func pgServiceAccount(pg *tsapi.ProxyGroup, namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -305,8 +428,8 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { ResourceNames: func() (secrets []string) { for i := range pgReplicas(pg) { secrets = append(secrets, - pgConfigSecretName(pg.Name, i), // Config with auth key. - fmt.Sprintf("%s-%d", pg.Name, i), // State. + pgConfigSecretName(pg.Name, i), // Config with auth key. + pgPodName(pg.Name, i), // State. ) } return secrets @@ -336,7 +459,7 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: pg.Name, + Name: pgServiceAccountName(pg), Namespace: namespace, }, }, @@ -347,6 +470,27 @@ func pgRoleBinding(pg *tsapi.ProxyGroup, namespace string) *rbacv1.RoleBinding { } } +// kube-apiserver proxies in auth mode use a static ServiceAccount. Everything +// else uses a per-ProxyGroup ServiceAccount. +func pgServiceAccountName(pg *tsapi.ProxyGroup) string { + if isAuthAPIServerProxy(pg) { + return authAPIServerProxySAName + } + + return pg.Name +} + +func isAuthAPIServerProxy(pg *tsapi.ProxyGroup) bool { + if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer { + return false + } + + // The default is auth mode. + return pg.Spec.KubeAPIServer == nil || + pg.Spec.KubeAPIServer.Mode == nil || + *pg.Spec.KubeAPIServer.Mode == tsapi.APIServerProxyModeAuth +} + func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.Secret) { for i := range pgReplicas(pg) { secrets = append(secrets, &corev1.Secret{ @@ -418,6 +562,18 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { return 2 } +func pgPodName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d", pgName, i) +} + +func pgHostname(pg *tsapi.ProxyGroup, i int32) string { + if pg.Spec.HostnamePrefix != "" { + return fmt.Sprintf("%s-%d", pg.Spec.HostnamePrefix, i) + } + + return fmt.Sprintf("%s-%d", pg.Name, i) +} + func pgConfigSecretName(pgName string, i int32) string { return fmt.Sprintf("%s-%d-config", pgName, i) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index bd69b49a8978d..c58e427aa06b6 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -629,7 +629,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", @@ -772,7 +772,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { t.Run("delete_and_cleanup", func(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", @@ -832,7 +832,7 @@ func TestProxyGroup(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test-tag"}, tsFirewallMode: "auto", defaultProxyClass: "default-pc", @@ -915,7 +915,7 @@ func TestProxyGroup(t *testing.T) { }, } tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -934,7 +934,7 @@ func TestProxyGroup(t *testing.T) { addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", TailnetIPs: []string{"1.2.3.4", "::1"}, @@ -952,7 +952,7 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = pg.Status.Devices[:1] // truncate to only the first device. - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupReady, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "1/1 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) }) @@ -1025,12 +1025,12 @@ func TestProxyGroupTypes(t *testing.T) { zl, _ := zap.NewDevelopment() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - Client: fc, - l: zl.Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zl.Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), } t.Run("egress_type", func(t *testing.T) { @@ -1047,7 +1047,7 @@ func TestProxyGroupTypes(t *testing.T) { mustCreate(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 0, 1) + verifyProxyGroupCounts(t, reconciler, 0, 1, 0) sts := &appsv1.StatefulSet{} if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { @@ -1161,7 +1161,7 @@ func TestProxyGroupTypes(t *testing.T) { } expectReconciled(t, reconciler, "", pg.Name) - verifyProxyGroupCounts(t, reconciler, 1, 2) + verifyProxyGroupCounts(t, reconciler, 1, 2, 0) sts := &appsv1.StatefulSet{} if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { @@ -1198,6 +1198,44 @@ func TestProxyGroupTypes(t *testing.T) { t.Errorf("unexpected volume mounts (-want +got):\n%s", diff) } }) + + t.Run("kubernetes_api_server_type", func(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: ptr.To[int32](2), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + }, + }, + } + if err := fc.Create(t.Context(), pg); err != nil { + t.Fatal(err) + } + + expectReconciled(t, reconciler, "", pg.Name) + verifyProxyGroupCounts(t, reconciler, 1, 2, 1) + + sts := &appsv1.StatefulSet{} + if err := fc.Get(t.Context(), client.ObjectKey{Namespace: tsNamespace, Name: pg.Name}, sts); err != nil { + t.Fatalf("failed to get StatefulSet: %v", err) + } + + // Verify the StatefulSet configuration for KubernetesAPIServer type. + if sts.Spec.Template.Spec.Containers[0].Name != mainContainerName { + t.Errorf("unexpected container name %s, want %s", sts.Spec.Template.Spec.Containers[0].Name, mainContainerName) + } + if sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort != 443 { + t.Errorf("unexpected container port %d, want 443", sts.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort) + } + if sts.Spec.Template.Spec.Containers[0].Ports[0].Name != "k8s-proxy" { + t.Errorf("unexpected port name %s, want k8s-proxy", sts.Spec.Template.Spec.Containers[0].Ports[0].Name) + } + }) } func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { @@ -1206,12 +1244,12 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { WithStatusSubresource(&tsapi.ProxyGroup{}). Build() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - proxyImage: testProxyImage, - Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), } existingServices := []string{"svc1", "svc2"} @@ -1272,6 +1310,170 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { }) } +func TestValidateProxyGroup(t *testing.T) { + type testCase struct { + typ tsapi.ProxyGroupType + pgName string + image string + noauth bool + initContainer bool + staticSAExists bool + expectedErrs int + } + + for name, tc := range map[string]testCase{ + "default_ingress": { + typ: tsapi.ProxyGroupTypeIngress, + }, + "default_kube": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + }, + "default_kube_noauth": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + noauth: true, + // Does not require the static ServiceAccount to exist. + }, + "kube_static_sa_missing": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: false, + expectedErrs: 1, + }, + "kube_noauth_would_overwrite_static_sa": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + noauth: true, + pgName: authAPIServerProxySAName, + expectedErrs: 1, + }, + "ingress_would_overwrite_static_sa": { + typ: tsapi.ProxyGroupTypeIngress, + staticSAExists: true, + pgName: authAPIServerProxySAName, + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_1": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale/tailscale", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_2": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_3": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "example.com/tailscale/tailscale:latest", + expectedErrs: 1, + }, + "tailscale_image_for_kube_pg_4": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + image: "tailscale/tailscale", + expectedErrs: 1, + }, + "k8s_proxy_image_for_ingress_pg": { + typ: tsapi.ProxyGroupTypeIngress, + image: "example.com/k8s-proxy", + expectedErrs: 1, + }, + "init_container_for_kube_pg": { + typ: tsapi.ProxyGroupTypeKubernetesAPIServer, + staticSAExists: true, + initContainer: true, + expectedErrs: 1, + }, + "init_container_for_ingress_pg": { + typ: tsapi.ProxyGroupTypeIngress, + initContainer: true, + }, + "init_container_for_egress_pg": { + typ: tsapi.ProxyGroupTypeEgress, + initContainer: true, + }, + } { + t.Run(name, func(t *testing.T) { + pc := &tsapi.ProxyClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "some-pc", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{}, + }, + }, + } + if tc.image != "" { + pc.Spec.StatefulSet.Pod.TailscaleContainer = &tsapi.Container{ + Image: tc.image, + } + } + if tc.initContainer { + pc.Spec.StatefulSet.Pod.TailscaleInitContainer = &tsapi.Container{} + } + pgName := "some-pg" + if tc.pgName != "" { + pgName = tc.pgName + } + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tc.typ, + }, + } + if tc.noauth { + pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + } + } + + var objs []client.Object + if tc.staticSAExists { + objs = append(objs, &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: authAPIServerProxySAName, + Namespace: tsNamespace, + }, + }) + } + r := ProxyGroupReconciler{ + tsNamespace: tsNamespace, + Client: fake.NewClientBuilder(). + WithObjects(objs...). + Build(), + } + + logger, _ := zap.NewDevelopment() + err := r.validate(t.Context(), pg, pc, logger.Sugar()) + if tc.expectedErrs == 0 { + if err != nil { + t.Fatalf("expected no errors, got: %v", err) + } + // Test finished. + return + } + + if err == nil { + t.Fatalf("expected %d errors, got none", tc.expectedErrs) + } + + type unwrapper interface { + Unwrap() []error + } + errs := err.(unwrapper) + if len(errs.Unwrap()) != tc.expectedErrs { + t.Fatalf("expected %d errors, got %d: %v", tc.expectedErrs, len(errs.Unwrap()), err) + } + }) + } +} + func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) { pcLEStaging := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ @@ -1326,7 +1528,7 @@ func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name s return pc } -func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { +func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress, wantAPIServer int) { t.Helper() if r.ingressProxyGroups.Len() != wantIngress { t.Errorf("expected %d ingress proxy groups, got %d", wantIngress, r.ingressProxyGroups.Len()) @@ -1334,6 +1536,9 @@ func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, if r.egressProxyGroups.Len() != wantEgress { t.Errorf("expected %d egress proxy groups, got %d", wantEgress, r.egressProxyGroups.Len()) } + if r.apiServerProxyGroups.Len() != wantAPIServer { + t.Errorf("expected %d kube-apiserver proxy groups, got %d", wantAPIServer, r.apiServerProxyGroups.Len()) + } } func verifyEnvVar(t *testing.T, sts *appsv1.StatefulSet, name, expectedValue string) { @@ -1512,7 +1717,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { reconciler := &ProxyGroupReconciler{ tsNamespace: tsNamespace, - proxyImage: testProxyImage, + tsProxyImage: testProxyImage, defaultTags: []string{"tag:test"}, defaultProxyClass: tt.defaultProxyClass, Client: fc, diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 193acad87ff0e..fbb271800390a 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -102,6 +102,8 @@ const ( defaultLocalAddrPort = 9002 // metrics and health check port letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory" + + mainContainerName = "tailscale" ) var ( @@ -761,7 +763,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, } if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) { for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ Name: "TS_DEBUG_ACME_DIRECTORY_URL", Value: letsEncryptStagingEndpoint, @@ -829,7 +831,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, return base } for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { ss.Spec.Template.Spec.Containers[i] = updateContainer(wantsPod.TailscaleContainer, ss.Spec.Template.Spec.Containers[i]) break } @@ -847,7 +849,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { for i, c := range ss.Spec.Template.Spec.Containers { - if c.Name == "tailscale" { + if isMainContainer(&c) { if debug { ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, // Serve tailscaled's debug metrics on on @@ -902,6 +904,10 @@ func enableEndpoints(ss *appsv1.StatefulSet, metrics, debug bool) { } } +func isMainContainer(c *corev1.Container) bool { + return c.Name == mainContainerName +} + // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 9846513c78d74..4247eaaa0040f 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -60,7 +60,6 @@ type HAServiceReconciler struct { recorder record.EventRecorder logger *zap.SugaredLogger tsClient tsClient - tsnetServer tsnetServer tsNamespace string lc localClient defaultTags []string @@ -221,7 +220,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // This checks and ensures that Tailscale Service's owner references are updated // for this Service and errors if that is not possible (i.e. because it // appears that the Tailscale Service has been created by a non-operator actor). - updatedAnnotations, err := r.ownerAnnotations(existingTSSvc) + updatedAnnotations, err := ownerAnnotations(r.operatorID, existingTSSvc) if err != nil { instr := fmt.Sprintf("To proceed, you can either manually delete the existing Tailscale Service or choose a different hostname with the '%s' annotaion", AnnotationHostname) msg := fmt.Sprintf("error ensuring ownership of Tailscale Service %s: %v. %s", hostname, err, instr) @@ -395,7 +394,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) // 1. Clean up the Tailscale Service. - svcChanged, err = r.cleanupTailscaleService(ctx, serviceName, logger) + svcChanged, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } @@ -456,7 +455,7 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - svcsChanged, err = r.cleanupTailscaleService(ctx, tailcfg.ServiceName(tsSvcName), logger) + svcsChanged, err = cleanupTailscaleService(ctx, r.tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } @@ -529,8 +528,8 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, err error) { - svc, err := r.tsClient.GetVIPService(ctx, name) +func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { + svc, err := tsClient.GetVIPService(ctx, name) if isErrorFeatureFlagNotEnabled(err) { msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) logger.Warn(msg) @@ -563,14 +562,14 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name // cluster before deleting the Ingress. Perhaps the comparison could be // 'if or.OperatorID == r.operatorID || or.ingressUID == r.ingressUID'. ix := slices.IndexFunc(o.OwnerRefs, func(or OwnerRef) bool { - return or.OperatorID == r.operatorID + return or.OperatorID == operatorID }) if ix == -1 { return false, nil } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", name) - return false, r.tsClient.DeleteVIPService(ctx, name) + return false, tsClient.DeleteVIPService(ctx, name) } o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) logger.Infof("Updating Tailscale Service %q", name) @@ -579,7 +578,7 @@ func (r *HAServiceReconciler) cleanupTailscaleService(ctx context.Context, name return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) - return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) + return true, tsClient.CreateOrUpdateVIPService(ctx, svc) } func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { @@ -742,49 +741,6 @@ func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName return count, nil } -// ownerAnnotations returns the updated annotations required to ensure this -// instance of the operator is included as an owner. If the Tailscale Service is not -// nil, but does not contain an owner we return an error as this likely means -// that the Tailscale Service was created by something other than a Tailscale -// Kubernetes operator. -func (r *HAServiceReconciler) ownerAnnotations(svc *tailscale.VIPService) (map[string]string, error) { - ref := OwnerRef{ - OperatorID: r.operatorID, - } - if svc == nil { - c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} - json, err := json.Marshal(c) - if err != nil { - return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service owner annotation contents: %w, please report this", err) - } - return map[string]string{ - ownerAnnotation: string(json), - }, nil - } - o, err := parseOwnerAnnotation(svc) - if err != nil { - return nil, err - } - if o == nil || len(o.OwnerRefs) == 0 { - return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) - } - if slices.Contains(o.OwnerRefs, ref) { // up to date - return svc.Annotations, nil - } - o.OwnerRefs = append(o.OwnerRefs, ref) - json, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshalling updated owner references: %w", err) - } - - newAnnots := make(map[string]string, len(svc.Annotations)+1) - for k, v := range svc.Annotations { - newAnnots[k] = v - } - newAnnots[ownerAnnotation] = string(json) - return newAnnots, nil -} - // dnsNameForService returns the DNS name for the given Tailscale Service name. func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { s := svc.WithoutPrefix() diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index e08bfd80d318b..054c3ed49f5cb 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -187,7 +187,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien if err := fc.Status().Update(context.Background(), pg); err != nil { t.Fatal(err) } - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} ft := &fakeTSClient{} zl, err := zap.NewDevelopment() @@ -210,7 +209,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien clock: cl, defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", - tsnetServer: fakeTsnetServer, logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), lc: lc, diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go new file mode 100644 index 0000000000000..6e7eadb7303b5 --- /dev/null +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -0,0 +1,197 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// k8s-proxy proxies between tailnet and Kubernetes cluster traffic. +// Currently, it only supports proxying tailnet clients to the Kubernetes API +// server. +package main + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "tailscale.com/hostinfo" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + apiproxy "tailscale.com/k8s-operator/api-proxy" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/state" + "tailscale.com/tsnet" +) + +func main() { + logger := zap.Must(zap.NewProduction()).Sugar() + defer logger.Sync() + if err := run(logger); err != nil { + logger.Fatal(err.Error()) + } +} + +func run(logger *zap.SugaredLogger) error { + var ( + configFile = os.Getenv("TS_K8S_PROXY_CONFIG") + podUID = os.Getenv("POD_UID") + ) + if configFile == "" { + return errors.New("TS_K8S_PROXY_CONFIG unset") + } + + // TODO(tomhjp): Support reloading config. + // TODO(tomhjp): Support reading config from a Secret. + cfg, err := conf.Load(configFile) + if err != nil { + return fmt.Errorf("error loading config file %q: %w", configFile, err) + } + + if cfg.Parsed.LogLevel != nil { + level, err := zapcore.ParseLevel(*cfg.Parsed.LogLevel) + if err != nil { + return fmt.Errorf("error parsing log level %q: %w", *cfg.Parsed.LogLevel, err) + } + logger = logger.WithOptions(zap.IncreaseLevel(level)) + } + + if cfg.Parsed.App != nil { + hostinfo.SetApp(*cfg.Parsed.App) + } + + st, err := getStateStore(cfg.Parsed.State, logger) + if err != nil { + return err + } + + // If Pod UID unset, assume we're running outside of a cluster/not managed + // by the operator, so no need to set additional state keys. + if podUID != "" { + if err := state.SetInitialKeys(st, podUID); err != nil { + return fmt.Errorf("error setting initial state: %w", err) + } + } + + var authKey string + if cfg.Parsed.AuthKey != nil { + authKey = *cfg.Parsed.AuthKey + } + + ts := &tsnet.Server{ + Logf: logger.Named("tsnet").Debugf, + UserLogf: logger.Named("tsnet").Infof, + Store: st, + AuthKey: authKey, + } + if cfg.Parsed.Hostname != nil { + ts.Hostname = *cfg.Parsed.Hostname + } + + // ctx to live for the lifetime of the process. + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + // Make sure we crash loop if Up doesn't complete in reasonable time. + upCtx, upCancel := context.WithTimeout(ctx, time.Minute) + defer upCancel() + if _, err := ts.Up(upCtx); err != nil { + return fmt.Errorf("error starting tailscale server: %w", err) + } + defer ts.Close() + + group, groupCtx := errgroup.WithContext(ctx) + + // Setup for updating state keys. + if podUID != "" { + lc, err := ts.LocalClient() + if err != nil { + return fmt.Errorf("error getting local client: %w", err) + } + w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + group.Go(func() error { + if err := state.KeepKeysUpdated(st, w.Next); err != nil && err != groupCtx.Err() { + return fmt.Errorf("error keeping state keys updated: %w", err) + } + + return nil + }) + } + + // Setup for the API server proxy. + restConfig, err := getRestConfig(logger) + if err != nil { + return fmt.Errorf("error getting rest config: %w", err) + } + authMode := true + if cfg.Parsed.KubeAPIServer != nil { + v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get() + if ok { + authMode = v + } + } + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode) + if err != nil { + return fmt.Errorf("error creating api server proxy: %w", err) + } + + // TODO(tomhjp): Work out whether we should use TS_CERT_SHARE_MODE or not, + // and possibly issue certs upfront here before serving. + group.Go(func() error { + if err := ap.Run(groupCtx); err != nil { + return fmt.Errorf("error running API server proxy: %w", err) + } + + return nil + }) + + return group.Wait() +} + +func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { + p := "mem:" + if path != nil { + p = *path + } else { + logger.Warn("No state Secret provided; using in-memory store, which will lose state on restart") + } + st, err := store.New(logger.Errorf, p) + if err != nil { + return nil, fmt.Errorf("error creating state store: %w", err) + } + + return st, nil +} + +func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) { + restConfig, err := rest.InClusterConfig() + switch err { + case nil: + return restConfig, nil + case rest.ErrNotInCluster: + logger.Info("Not running in-cluster, falling back to kubeconfig") + default: + return nil, fmt.Errorf("error getting in-cluster config: %w", err) + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, nil) + restConfig, err = clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("error loading kubeconfig: %w", err) + } + + return restConfig, nil +} diff --git a/k8s-operator/api-proxy/env.go b/k8s-operator/api-proxy/env.go deleted file mode 100644 index c0640ab1e16bf..0000000000000 --- a/k8s-operator/api-proxy/env.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !plan9 - -package apiproxy - -import ( - "os" - - "tailscale.com/types/opt" -) - -func defaultBool(envName string, defVal bool) bool { - vs := os.Getenv(envName) - if vs == "" { - return defVal - } - v, _ := opt.Bool(vs).Get() - return v -} - -func defaultEnv(envName, defVal string) string { - v := os.Getenv(envName) - if v == "" { - return defVal - } - return v -} diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index 7c7260b94af39..c3c13e7846915 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -6,17 +6,17 @@ package apiproxy import ( + "context" "crypto/tls" + "errors" "fmt" - "log" "net/http" "net/http/httputil" "net/netip" "net/url" - "os" "strings" + "time" - "github.com/pkg/errors" "go.uber.org/zap" "k8s.io/client-go/rest" "k8s.io/client-go/transport" @@ -37,123 +37,52 @@ var ( whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) -type APIServerProxyMode int - -func (a APIServerProxyMode) String() string { - switch a { - case APIServerProxyModeDisabled: - return "disabled" - case APIServerProxyModeEnabled: - return "auth" - case APIServerProxyModeNoAuth: - return "noauth" - default: - return "unknown" - } -} - -const ( - APIServerProxyModeDisabled APIServerProxyMode = iota - APIServerProxyModeEnabled - APIServerProxyModeNoAuth -) - -func ParseAPIProxyMode() APIServerProxyMode { - haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" - haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" - switch { - case haveAPIProxyEnv && haveAuthProxyEnv: - log.Fatal("AUTH_PROXY and APISERVER_PROXY are mutually exclusive") - case haveAuthProxyEnv: - var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated - if authProxyEnv { - return APIServerProxyModeEnabled - } - return APIServerProxyModeDisabled - case haveAPIProxyEnv: - var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" - switch apiProxyEnv { - case "true": - return APIServerProxyModeEnabled - case "false", "": - return APIServerProxyModeDisabled - case "noauth": - return APIServerProxyModeNoAuth - default: - panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) - } - } - return APIServerProxyModeDisabled -} - -// maybeLaunchAPIServerProxy launches the auth proxy, which is a small HTTP server -// that authenticates requests using the Tailscale LocalAPI and then proxies -// them to the kube-apiserver. -func MaybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, s *tsnet.Server, mode APIServerProxyMode) { - if mode == APIServerProxyModeDisabled { - return - } - startlog := zlog.Named("launchAPIProxy") - if mode == APIServerProxyModeNoAuth { +// NewAPIServerProxy creates a new APIServerProxy that's ready to start once Run +// is called. No network traffic will flow until Run is called. +// +// authMode controls how the proxy behaves: +// - true: the proxy is started and requests are impersonated using the +// caller's Tailscale identity and the rules defined in the tailnet ACLs. +// - false: the proxy is started and requests are passed through to the +// Kubernetes API without any auth modifications. +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool) (*APIServerProxy, error) { + if !authMode { restConfig = rest.AnonymousClientConfig(restConfig) } cfg, err := restConfig.TransportConfig() if err != nil { - startlog.Fatalf("could not get rest.TransportConfig(): %v", err) + return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) } - // Kubernetes uses SPDY for exec and port-forward, however SPDY is - // incompatible with HTTP/2; so disable HTTP/2 in the proxy. tr := http.DefaultTransport.(*http.Transport).Clone() tr.TLSClientConfig, err = transport.TLSConfigFor(cfg) if err != nil { - startlog.Fatalf("could not get transport.TLSConfigFor(): %v", err) + return nil, fmt.Errorf("could not get transport.TLSConfigFor(): %w", err) } tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper) rt, err := transport.HTTPWrappersForConfig(cfg, tr) if err != nil { - startlog.Fatalf("could not get rest.TransportConfig(): %v", err) + return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) } - go runAPIServerProxy(s, rt, zlog.Named("apiserver-proxy"), mode, restConfig.Host) -} -// runAPIServerProxy runs an HTTP server that authenticates requests using the -// Tailscale LocalAPI and then proxies them to the Kubernetes API. -// It listens on :443 and uses the Tailscale HTTPS certificate. -// s will be started if it is not already running. -// rt is used to proxy requests to the Kubernetes API. -// -// mode controls how the proxy behaves: -// - apiserverProxyModeDisabled: the proxy is not started. -// - apiserverProxyModeEnabled: the proxy is started and requests are impersonated using the -// caller's identity from the Tailscale LocalAPI. -// - apiserverProxyModeNoAuth: the proxy is started and requests are not impersonated and -// are passed through to the Kubernetes API. -// -// It never returns. -func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode APIServerProxyMode, host string) { - if mode == APIServerProxyModeDisabled { - return - } - ln, err := ts.Listen("tcp", ":443") + u, err := url.Parse(restConfig.Host) if err != nil { - log.Fatalf("could not listen on :443: %v", err) + return nil, fmt.Errorf("failed to parse URL %w", err) } - u, err := url.Parse(host) - if err != nil { - log.Fatalf("runAPIServerProxy: failed to parse URL %v", err) + if u.Scheme == "" || u.Host == "" { + return nil, fmt.Errorf("the API server proxy requires host and scheme but got: %q", restConfig.Host) } lc, err := ts.LocalClient() if err != nil { - log.Fatalf("could not get local client: %v", err) + return nil, fmt.Errorf("could not get local client: %w", err) } - ap := &apiserverProxy{ - log: log, + ap := &APIServerProxy{ + log: zlog, lc: lc, - mode: mode, + authMode: authMode, upstreamURL: u, ts: ts, } @@ -164,41 +93,69 @@ func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredL Transport: rt, } + return ap, nil +} + +// Run starts the HTTP server that authenticates requests using the +// Tailscale LocalAPI and then proxies them to the Kubernetes API. +// It listens on :443 and uses the Tailscale HTTPS certificate. +// +// It return when ctx is cancelled or ServeTLS fails. +func (ap *APIServerProxy) Run(ctx context.Context) error { + ln, err := ap.ts.Listen("tcp", ":443") + if err != nil { + return fmt.Errorf("could not listen on :443: %v", err) + } + mux := http.NewServeMux() mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecWS) - hs := &http.Server{ + ap.hs = &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is // incompatible with HTTP/2; so disable HTTP/2 in the proxy. TLSConfig: &tls.Config{ - GetCertificate: lc.GetCertificate, + GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, }, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), Handler: mux, } - log.Infof("API server proxy in %q mode is listening on %s", mode, ln.Addr()) - if err := hs.ServeTLS(ln, "", ""); err != nil { - log.Fatalf("runAPIServerProxy: failed to serve %v", err) + + errs := make(chan error) + go func() { + ap.log.Infof("API server proxy is listening on %s with auth mode: %v", ln.Addr(), ap.authMode) + if err := ap.hs.ServeTLS(ln, "", ""); err != nil && err != http.ErrServerClosed { + errs <- fmt.Errorf("failed to serve: %w", err) + } + }() + + select { + case <-ctx.Done(): + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return ap.hs.Shutdown(shutdownCtx) + case err := <-errs: + return err } } -// apiserverProxy is an [net/http.Handler] that authenticates requests using the Tailscale +// APIServerProxy is an [net/http.Handler] that authenticates requests using the Tailscale // LocalAPI and then proxies them to the Kubernetes API. -type apiserverProxy struct { +type APIServerProxy struct { log *zap.SugaredLogger lc *local.Client rp *httputil.ReverseProxy - mode APIServerProxyMode + authMode bool ts *tsnet.Server + hs *http.Server upstreamURL *url.URL } // serveDefault is the default handler for Kubernetes API server requests. -func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { who, err := ap.whoIs(r) if err != nil { ap.authError(w, err) @@ -210,17 +167,17 @@ func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { // serveExecSPDY serves 'kubectl exec' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. -func (ap *apiserverProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { ap.execForProto(w, r, ksr.SPDYProtocol) } // serveExecWS serves 'kubectl exec' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. -func (ap *apiserverProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { +func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { ap.execForProto(w, r, ksr.WSProtocol) } -func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { +func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -282,10 +239,10 @@ func (ap *apiserverProxy) execForProto(w http.ResponseWriter, r *http.Request, p ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { - r.URL.Scheme = h.upstreamURL.Scheme - r.URL.Host = h.upstreamURL.Host - if h.mode == APIServerProxyModeNoAuth { +func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { + r.URL.Scheme = ap.upstreamURL.Scheme + r.URL.Host = ap.upstreamURL.Host + if !ap.authMode { // If we are not providing authentication, then we are just // proxying to the Kubernetes API, so we don't need to do // anything else. @@ -310,16 +267,16 @@ func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { } // Now add the impersonation headers that we want. - if err := addImpersonationHeaders(r, h.log); err != nil { - log.Print("failed to add impersonation headers: ", err.Error()) + if err := addImpersonationHeaders(r, ap.log); err != nil { + ap.log.Errorf("failed to add impersonation headers: %v", err) } } -func (ap *apiserverProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { +func (ap *APIServerProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { return ap.lc.WhoIs(r.Context(), r.RemoteAddr) } -func (ap *apiserverProxy) authError(w http.ResponseWriter, err error) { +func (ap *APIServerProxy) authError(w http.ResponseWriter, err error) { ap.log.Errorf("failed to authenticate caller: %v", err) http.Error(w, "failed to authenticate caller", http.StatusInternalServerError) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 18bf1cb50400f..c09152da6f6c1 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -21,6 +21,21 @@ +#### APIServerProxyMode + +_Underlying type:_ _string_ + + + +_Validation:_ +- Enum: [auth noauth] +- Type: string + +_Appears in:_ +- [KubeAPIServerConfig](#kubeapiserverconfig) + + + #### AppConnector @@ -142,7 +157,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `env` _[Env](#env) array_ | List of environment variables to set in the container.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
Note that environment variables provided here will take precedence
over Tailscale-specific environment variables set by the operator,
however running proxies with custom values for Tailscale environment
variables (i.e TS_USERSPACE) is not recommended and might break in
the future. | | | -| `image` _string_ | Container image name. By default images are pulled from
docker.io/tailscale/tailscale, but the official images are also
available at ghcr.io/tailscale/tailscale. Specifying image name here
will override any proxy image values specified via the Kubernetes
operator's Helm chart values or PROXY_IMAGE env var in the operator
Deployment.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | +| `image` _string_ | Container image name. By default images are pulled from docker.io/tailscale,
but the official images are also available at ghcr.io/tailscale.
For all uses except on ProxyGroups of type "kube-apiserver", this image must
be either tailscale/tailscale, or an equivalent mirror of that image.
To apply to ProxyGroups of type "kube-apiserver", this image must be
tailscale/k8s-proxy or a mirror of that image.
For "tailscale/tailscale"-based proxies, specifying image name here will
override any proxy image values specified via the Kubernetes operator's
Helm chart values or PROXY_IMAGE env var in the operator Deployment.
For "tailscale/k8s-proxy"-based proxies, there is currently no way to
configure your own default, and this field is the only way to use a
custom image.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | | | `imagePullPolicy` _[PullPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#pullpolicy-v1-core)_ | Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image | | Enum: [Always Never IfNotPresent]
| | `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#resourcerequirements-v1-core)_ | Container resource requirements.
By default Tailscale Kubernetes operator does not apply any resource
requirements. The amount of resources required wil depend on the
amount of resources the operator needs to parse, usage patterns and
cluster size.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources | | | | `securityContext` _[SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#securitycontext-v1-core)_ | Container security context.
Security context specified here will override the security context set by the operator.
By default the operator sets the Tailscale container and the Tailscale init container to privileged
for proxies created for Tailscale ingress and egress Service, Connector and ProxyGroup.
You can reduce the permissions of the Tailscale container to cap NET_ADMIN by
installing device plugin in your cluster and configuring the proxies tun device to be created
by the device plugin, see https://github.com/tailscale/tailscale/issues/10814#issuecomment-2479977752
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context | | | @@ -313,6 +328,22 @@ _Appears in:_ +#### KubeAPIServerConfig + + + +KubeAPIServerConfig contains configuration specific to the kube-apiserver ProxyGroup type. + + + +_Appears in:_ +- [ProxyGroupSpec](#proxygroupspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `mode` _[APIServerProxyMode](#apiserverproxymode)_ | Mode to run the API server proxy in. Supported modes are auth and noauth.
In auth mode, requests from the tailnet proxied over to the Kubernetes
API server are additionally impersonated using the sender's tailnet identity.
If not specified, defaults to auth mode. | | Enum: [auth noauth]
Type: string
| + + #### LabelValue _Underlying type:_ _string_ @@ -459,7 +490,7 @@ _Appears in:_ | `annotations` _object (keys:string, values:string)_ | Annotations that will be added to the proxy Pod.
Any annotations specified here will be merged with the default
annotations applied to the Pod by the Tailscale Kubernetes operator.
Annotations must be valid Kubernetes annotations.
https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set | | | | `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#affinity-v1-core)_ | Proxy Pod's affinity rules.
By default, the Tailscale Kubernetes operator does not apply any affinity rules.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#affinity | | | | `tailscaleContainer` _[Container](#container)_ | Configuration for the proxy container running tailscale. | | | -| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding. | | | +| `tailscaleInitContainer` _[Container](#container)_ | Configuration for the proxy init container that enables forwarding.
Not valid to apply to ProxyGroups of type "kube-apiserver". | | | | `securityContext` _[PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#podsecuritycontext-v1-core)_ | Proxy Pod's security context.
By default Tailscale Kubernetes operator does not apply any Pod
security context.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-2 | | | | `imagePullSecrets` _[LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#localobjectreference-v1-core) array_ | Proxy Pod's image pull Secrets.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec | | | | `nodeName` _string_ | Proxy Pod's node name.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | @@ -638,11 +669,12 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress and ingress.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress]
Type: string
| +| `type` _[ProxyGroupType](#proxygrouptype)_ | Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver.
Type is immutable once a ProxyGroup is created. | | Enum: [egress ingress kube-apiserver]
Type: string
| | `tags` _[Tags](#tags)_ | Tags that the Tailscale devices will be tagged with. Defaults to [tag:k8s].
If you specify custom tags here, make sure you also make the operator
an owner of these tags.
See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
Tags cannot be changed once a ProxyGroup device has been created.
Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
Type: string
| | `replicas` _integer_ | Replicas specifies how many replicas to create the StatefulSet with.
Defaults to 2. | | Minimum: 0
| | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
by the ProxyGroup. Each device will have the integer number from its
StatefulSet pod appended to this prefix to form the full hostname.
HostnamePrefix can contain lower case letters, numbers and dashes, it
must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
Type: string
| | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
configuration options that should be applied to the resources created
for this ProxyGroup. If unset, and there is no default ProxyClass
configured, the operator will create resources with the default
configuration. | | | +| `kubeAPIServer` _[KubeAPIServerConfig](#kubeapiserverconfig)_ | KubeAPIServer contains configuration specific to the kube-apiserver
ProxyGroup type. This field is only used when Type is set to "kube-apiserver". | | | #### ProxyGroupStatus @@ -669,7 +701,7 @@ _Underlying type:_ _string_ _Validation:_ -- Enum: [egress ingress] +- Enum: [egress ingress kube-apiserver] - Type: string _Appears in:_ diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 9221c60f3c870..6a4114bfa83da 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -264,6 +264,7 @@ type Pod struct { // +optional TailscaleContainer *Container `json:"tailscaleContainer,omitempty"` // Configuration for the proxy init container that enables forwarding. + // Not valid to apply to ProxyGroups of type "kube-apiserver". // +optional TailscaleInitContainer *Container `json:"tailscaleInitContainer,omitempty"` // Proxy Pod's security context. @@ -364,12 +365,21 @@ type Container struct { // the future. // +optional Env []Env `json:"env,omitempty"` - // Container image name. By default images are pulled from - // docker.io/tailscale/tailscale, but the official images are also - // available at ghcr.io/tailscale/tailscale. Specifying image name here - // will override any proxy image values specified via the Kubernetes - // operator's Helm chart values or PROXY_IMAGE env var in the operator - // Deployment. + // Container image name. By default images are pulled from docker.io/tailscale, + // but the official images are also available at ghcr.io/tailscale. + // + // For all uses except on ProxyGroups of type "kube-apiserver", this image must + // be either tailscale/tailscale, or an equivalent mirror of that image. + // To apply to ProxyGroups of type "kube-apiserver", this image must be + // tailscale/k8s-proxy or a mirror of that image. + // + // For "tailscale/tailscale"-based proxies, specifying image name here will + // override any proxy image values specified via the Kubernetes operator's + // Helm chart values or PROXY_IMAGE env var in the operator Deployment. + // For "tailscale/k8s-proxy"-based proxies, there is currently no way to + // configure your own default, and this field is the only way to use a + // custom image. + // // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#image // +optional Image string `json:"image,omitempty"` diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 5edb47f0da6c3..ad5b113612bbf 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -49,7 +49,7 @@ type ProxyGroupList struct { } type ProxyGroupSpec struct { - // Type of the ProxyGroup proxies. Supported types are egress and ingress. + // Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. // Type is immutable once a ProxyGroup is created. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup type is immutable" Type ProxyGroupType `json:"type"` @@ -84,6 +84,11 @@ type ProxyGroupSpec struct { // configuration. // +optional ProxyClass string `json:"proxyClass,omitempty"` + + // KubeAPIServer contains configuration specific to the kube-apiserver + // ProxyGroup type. This field is only used when Type is set to "kube-apiserver". + // +optional + KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty"` } type ProxyGroupStatus struct { @@ -122,14 +127,34 @@ type TailnetDevice struct { } // +kubebuilder:validation:Type=string -// +kubebuilder:validation:Enum=egress;ingress +// +kubebuilder:validation:Enum=egress;ingress;kube-apiserver type ProxyGroupType string const ( - ProxyGroupTypeEgress ProxyGroupType = "egress" - ProxyGroupTypeIngress ProxyGroupType = "ingress" + ProxyGroupTypeEgress ProxyGroupType = "egress" + ProxyGroupTypeIngress ProxyGroupType = "ingress" + ProxyGroupTypeKubernetesAPIServer ProxyGroupType = "kube-apiserver" +) + +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:Enum=auth;noauth +type APIServerProxyMode string + +const ( + APIServerProxyModeAuth APIServerProxyMode = "auth" + APIServerProxyModeNoAuth APIServerProxyMode = "noauth" ) // +kubebuilder:validation:Type=string // +kubebuilder:validation:Pattern=`^[a-z0-9][a-z0-9-]{0,61}$` type HostnamePrefix string + +// KubeAPIServerConfig contains configuration specific to the kube-apiserver ProxyGroup type. +type KubeAPIServerConfig struct { + // Mode to run the API server proxy in. Supported modes are auth and noauth. + // In auth mode, requests from the tailnet proxied over to the Kubernetes + // API server are additionally impersonated using the sender's tailnet identity. + // If not specified, defaults to auth mode. + // +optional + Mode *APIServerProxyMode `json:"mode,omitempty"` +} diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index ffc04d3b9dde3..32adbd6804ed0 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -316,6 +316,26 @@ func (in *Env) DeepCopy() *Env { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(APIServerProxyMode) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in Labels) DeepCopyInto(out *Labels) { { @@ -731,6 +751,11 @@ func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { *out = new(int32) **out = **in } + if in.KubeAPIServer != nil { + in, out := &in.KubeAPIServer, &out.KubeAPIServer + *out = new(KubeAPIServerConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupSpec. diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go new file mode 100644 index 0000000000000..6b0e853c5c21c --- /dev/null +++ b/kube/k8s-proxy/conf/conf.go @@ -0,0 +1,101 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package conf contains code to load, manipulate, and access config file +// settings for k8s-proxy. +package conf + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/tailscale/hujson" + "tailscale.com/types/opt" +) + +const v1Alpha1 = "v1alpha1" + +// Config describes a config file. +type Config struct { + Path string // disk path of HuJSON + Raw []byte // raw bytes from disk, in HuJSON form + Std []byte // standardized JSON form + Version string // "v1alpha1" + + // Parsed is the parsed config, converted from its on-disk version to the + // latest known format. + Parsed ConfigV1Alpha1 +} + +// VersionedConfig allows specifying config at the root of the object, or in +// a versioned sub-object. +// e.g. {"version": "v1alpha1", "authKey": "abc123"} +// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}} +type VersionedConfig struct { + Version string `json:",omitempty"` // "v1alpha1" + + // Latest version of the config. + *ConfigV1Alpha1 + + // Backwards compatibility version(s) of the config. Fields and sub-fields + // from here should only be added to, never changed in place. + V1Alpha1 *ConfigV1Alpha1 `json:",omitempty"` + // V1Beta1 *ConfigV1Beta1 `json:",omitempty"` // Not yet used. +} + +type ConfigV1Alpha1 struct { + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + Hostname *string `json:",omitempty"` // Tailscale device hostname. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. +} + +type KubeAPIServer struct { + AuthMode opt.Bool `json:",omitempty"` +} + +// Load reads and parses the config file at the provided path on disk. +func Load(path string) (c Config, err error) { + c.Path = path + + c.Raw, err = os.ReadFile(path) + if err != nil { + return c, fmt.Errorf("error reading config file %q: %w", path, err) + } + c.Std, err = hujson.Standardize(c.Raw) + if err != nil { + return c, fmt.Errorf("error parsing config file %q HuJSON/JSON: %w", path, err) + } + var ver VersionedConfig + if err := json.Unmarshal(c.Std, &ver); err != nil { + return c, fmt.Errorf("error parsing config file %q: %w", path, err) + } + rootV1Alpha1 := (ver.Version == v1Alpha1) + backCompatV1Alpha1 := (ver.V1Alpha1 != nil) + switch { + case ver.Version == "": + return c, fmt.Errorf("error parsing config file %q: no \"version\" field provided", path) + case rootV1Alpha1 && backCompatV1Alpha1: + // Exactly one of these should be set. + return c, fmt.Errorf("error parsing config file %q: both root and v1alpha1 config provided", path) + case rootV1Alpha1 != backCompatV1Alpha1: + c.Version = v1Alpha1 + switch { + case rootV1Alpha1 && ver.ConfigV1Alpha1 != nil: + c.Parsed = *ver.ConfigV1Alpha1 + case backCompatV1Alpha1: + c.Parsed = *ver.V1Alpha1 + default: + c.Parsed = ConfigV1Alpha1{} + } + default: + return c, fmt.Errorf("error parsing config file %q: unsupported \"version\" value %q; want \"%s\"", path, ver.Version, v1Alpha1) + } + + return c, nil +} diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go new file mode 100644 index 0000000000000..a47391dc90ade --- /dev/null +++ b/kube/k8s-proxy/conf/conf_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package conf + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/types/ptr" +) + +// Test that the config file can be at the root of the object, or in a versioned sub-object. +// or {"version": "v1beta1", "a-beta-config": "a-beta-value", "v1alpha1": {"authKey": "abc123"}} +func TestVersionedConfig(t *testing.T) { + testCases := map[string]struct { + inputConfig string + expectedConfig ConfigV1Alpha1 + expectedError string + }{ + "root_config_v1alpha1": { + inputConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + }, + "backwards_compat_v1alpha1_config": { + // Client doesn't know about v1beta1, so it should read in v1alpha1. + inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456", "v1alpha1": {"authKey": "abc123"}}`, + expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + }, + "unknown_key_allowed": { + // Adding new keys to the config doesn't require a version bump. + inputConfig: `{"version": "v1alpha1", "unknown-key": "unknown-value", "authKey": "abc123"}`, + expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + }, + "version_only_no_authkey": { + inputConfig: `{"version": "v1alpha1"}`, + expectedConfig: ConfigV1Alpha1{}, + }, + "both_config_v1alpha1": { + inputConfig: `{"version": "v1alpha1", "authKey": "abc123", "v1alpha1": {"authKey": "def456"}}`, + expectedError: "both root and v1alpha1 config provided", + }, + "empty_config": { + inputConfig: `{}`, + expectedError: `no "version" field provided`, + }, + "v1beta1_without_backwards_compat": { + inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456"}`, + expectedError: `unsupported "version" value "v1beta1"; want "v1alpha1"`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "config.json") + if err := os.WriteFile(path, []byte(tc.inputConfig), 0644); err != nil { + t.Fatalf("failed to write config file: %v", err) + } + cfg, err := Load(path) + switch { + case tc.expectedError == "" && err != nil: + t.Fatalf("unexpected error: %v", err) + case tc.expectedError != "": + if err == nil { + t.Fatalf("expected error %q, got nil", tc.expectedError) + } else if !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("expected error %q, got %q", tc.expectedError, err.Error()) + } + return + } + if cfg.Version != "v1alpha1" { + t.Fatalf("expected version %q, got %q", "v1alpha1", cfg.Version) + } + // Diff actual vs expected config. + if diff := cmp.Diff(cfg.Parsed, tc.expectedConfig); diff != "" { + t.Fatalf("Unexpected parsed config (-got +want):\n%s", diff) + } + }) + } +} diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 6f96875dddd0f..20b0050143c93 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -5,14 +5,15 @@ package kubetypes const ( // Hostinfo App values for the Tailscale Kubernetes Operator components. - AppOperator = "k8s-operator" - AppAPIServerProxy = "k8s-operator-proxy" - AppIngressProxy = "k8s-operator-ingress-proxy" - AppIngressResource = "k8s-operator-ingress-resource" - AppEgressProxy = "k8s-operator-egress-proxy" - AppConnector = "k8s-operator-connector-resource" - AppProxyGroupEgress = "k8s-operator-proxygroup-egress" - AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" + AppOperator = "k8s-operator" + AppInProcessAPIServerProxy = "k8s-operator-proxy" + AppIngressProxy = "k8s-operator-ingress-proxy" + AppIngressResource = "k8s-operator-ingress-resource" + AppEgressProxy = "k8s-operator-egress-proxy" + AppConnector = "k8s-operator-connector-resource" + AppProxyGroupEgress = "k8s-operator-proxygroup-egress" + AppProxyGroupIngress = "k8s-operator-proxygroup-ingress" + AppProxyGroupKubeAPIServer = "k8s-operator-proxygroup-kube-apiserver" // Clientmetrics for Tailscale Kubernetes Operator components MetricIngressProxyCount = "k8s_ingress_proxies" // L3 @@ -29,6 +30,7 @@ const ( MetricEgressServiceCount = "k8s_egress_service_resources" MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources" MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources" + MetricProxyGroupAPIServerCount = "k8s_proxygroup_kube_apiserver_resources" // Keys that containerboot writes to state file that can be used to determine its state. // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine diff --git a/kube/state/state.go b/kube/state/state.go new file mode 100644 index 0000000000000..4831a5f5b367a --- /dev/null +++ b/kube/state/state.go @@ -0,0 +1,97 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package state updates state keys for tailnet client devices managed by the +// operator. These keys are used to signal readiness, metadata, and current +// configuration state to the operator. Client packages deployed by the operator +// include containerboot, tsrecorder, and k8s-proxy, but currently containerboot +// has its own implementation to manage the same keys. +package state + +import ( + "encoding/json" + "fmt" + + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/util/deephash" +) + +const ( + keyPodUID = ipn.StateKey(kubetypes.KeyPodUID) + keyCapVer = ipn.StateKey(kubetypes.KeyCapVer) + keyDeviceID = ipn.StateKey(kubetypes.KeyDeviceID) + keyDeviceIPs = ipn.StateKey(kubetypes.KeyDeviceIPs) + keyDeviceFQDN = ipn.StateKey(kubetypes.KeyDeviceFQDN) +) + +// SetInitialKeys sets Pod UID and cap ver and clears tailnet device state +// keys to help stop the operator using stale tailnet device state. +func SetInitialKeys(store ipn.StateStore, podUID string) error { + // Clear device state keys first so the operator knows if the pod UID + // matches, the other values are definitely not stale. + for _, key := range []ipn.StateKey{keyDeviceID, keyDeviceFQDN, keyDeviceIPs} { + if _, err := store.ReadState(key); err == nil { + if err := store.WriteState(key, nil); err != nil { + return fmt.Errorf("error writing %q to state store: %w", key, err) + } + } + } + + if err := store.WriteState(keyPodUID, []byte(podUID)); err != nil { + return fmt.Errorf("error writing pod UID to state store: %w", err) + } + if err := store.WriteState(keyCapVer, fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion)); err != nil { + return fmt.Errorf("error writing capability version to state store: %w", err) + } + + return nil +} + +// KeepKeysUpdated sets state store keys consistent with containerboot to +// signal proxy readiness to the operator. It runs until its context is +// cancelled or it hits an error. The passed in next function is expected to be +// from a local.IPNBusWatcher that is at least subscribed to +// ipn.NotifyInitialNetMap. +func KeepKeysUpdated(store ipn.StateStore, next func() (ipn.Notify, error)) error { + var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum + + for { + n, err := next() // Blocks on a streaming LocalAPI HTTP call. + if err != nil { + return err + } + if n.NetMap == nil { + continue + } + + if deviceID := n.NetMap.SelfNode.StableID(); deephash.Update(¤tDeviceID, &deviceID) { + if err := store.WriteState(keyDeviceID, []byte(deviceID)); err != nil { + return fmt.Errorf("failed to store device ID in state: %w", err) + } + } + + if fqdn := n.NetMap.SelfNode.Name(); deephash.Update(¤tDeviceFQDN, &fqdn) { + if err := store.WriteState(keyDeviceFQDN, []byte(fqdn)); err != nil { + return fmt.Errorf("failed to store device FQDN in state: %w", err) + } + } + + if addrs := n.NetMap.SelfNode.Addresses(); deephash.Update(¤tDeviceIPs, &addrs) { + var deviceIPs []string + for _, addr := range addrs.AsSlice() { + deviceIPs = append(deviceIPs, addr.Addr().String()) + } + deviceIPsValue, err := json.Marshal(deviceIPs) + if err != nil { + return err + } + if err := store.WriteState(keyDeviceIPs, deviceIPsValue); err != nil { + return fmt.Errorf("failed to store device IPs in state: %w", err) + } + } + } +} diff --git a/kube/state/state_test.go b/kube/state/state_test.go new file mode 100644 index 0000000000000..0375b1c01d91a --- /dev/null +++ b/kube/state/state_test.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package state + +import ( + "bytes" + "fmt" + "net/netip" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn" + "tailscale.com/ipn/store" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/types/netmap" +) + +func TestSetInitialStateKeys(t *testing.T) { + var ( + podUID = []byte("test-pod-uid") + expectedCapVer = fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) + ) + for name, tc := range map[string]struct { + initial map[ipn.StateKey][]byte + expected map[ipn.StateKey][]byte + }{ + "empty_initial": { + initial: map[ipn.StateKey][]byte{}, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + }, + "existing_pod_uid_and_capver": { + initial: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + }, + }, + "all_keys_preexisting": { + initial: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + keyDeviceID: []byte("existing-device-id"), + keyDeviceFQDN: []byte("existing-device-fqdn"), + keyDeviceIPs: []byte(`["1.2.3.4"]`), + }, + expected: map[ipn.StateKey][]byte{ + keyPodUID: podUID, + keyCapVer: expectedCapVer, + keyDeviceID: nil, + keyDeviceFQDN: nil, + keyDeviceIPs: nil, + }, + }, + } { + t.Run(name, func(t *testing.T) { + store, err := store.New(logger.Discard, "mem:") + if err != nil { + t.Fatalf("error creating in-memory store: %v", err) + } + + for key, value := range tc.initial { + if err := store.WriteState(key, value); err != nil { + t.Fatalf("error writing initial state key %q: %v", key, err) + } + } + + if err := SetInitialKeys(store, string(podUID)); err != nil { + t.Fatalf("setInitialStateKeys failed: %v", err) + } + + actual := make(map[ipn.StateKey][]byte) + for expectedKey, expectedValue := range tc.expected { + actualValue, err := store.ReadState(expectedKey) + if err != nil { + t.Errorf("error reading state key %q: %v", expectedKey, err) + continue + } + + actual[expectedKey] = actualValue + if !bytes.Equal(actualValue, expectedValue) { + t.Errorf("state key %q mismatch: expected %q, got %q", expectedKey, expectedValue, actualValue) + } + } + if diff := cmp.Diff(actual, tc.expected); diff != "" { + t.Errorf("state keys mismatch (-got +want):\n%s", diff) + } + }) + } +} + +func TestKeepStateKeysUpdated(t *testing.T) { + store, err := store.New(logger.Discard, "mem:") + if err != nil { + t.Fatalf("error creating in-memory store: %v", err) + } + + nextWaiting := make(chan struct{}) + go func() { + <-nextWaiting // Acknowledge the initial signal. + }() + notifyCh := make(chan ipn.Notify) + next := func() (ipn.Notify, error) { + nextWaiting <- struct{}{} // Send signal to test that state is consistent. + return <-notifyCh, nil // Wait for test input. + } + + errs := make(chan error, 1) + go func() { + err := KeepKeysUpdated(store, next) + if err != nil { + errs <- fmt.Errorf("keepStateKeysUpdated returned with error: %w", err) + } + }() + + for _, tc := range []struct { + name string + notify ipn.Notify + expected map[ipn.StateKey][]byte + }{ + { + name: "initial_not_authed", + notify: ipn.Notify{}, + expected: map[ipn.StateKey][]byte{ + keyDeviceID: nil, + keyDeviceFQDN: nil, + keyDeviceIPs: nil, + }, + }, + { + name: "authed", + notify: ipn.Notify{ + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "test-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:0:1/128")}, + }).View(), + }, + }, + expected: map[ipn.StateKey][]byte{ + keyDeviceID: []byte("TESTCTRL00000001"), + keyDeviceFQDN: []byte("test-node.test.ts.net"), + keyDeviceIPs: []byte(`["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), + }, + }, + { + name: "updated_fields", + notify: ipn.Notify{ + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "updated.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.250/32")}, + }).View(), + }, + }, + expected: map[ipn.StateKey][]byte{ + keyDeviceID: []byte("TESTCTRL00000001"), + keyDeviceFQDN: []byte("updated.test.ts.net"), + keyDeviceIPs: []byte(`["100.64.0.250"]`), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + // Send test input. + select { + case notifyCh <- tc.notify: + case <-errs: + t.Fatal("keepStateKeysUpdated returned before test input") + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for next() to be called again") + } + + // Wait for next() to be called again so we know the goroutine has + // processed the event. + select { + case <-nextWaiting: + case <-errs: + t.Fatal("keepStateKeysUpdated returned before test input") + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for next() to be called again") + } + + for key, value := range tc.expected { + got, _ := store.ReadState(key) + if !bytes.Equal(got, value) { + t.Errorf("state key %q mismatch: expected %q, got %q", key, value, got) + } + } + }) + } +} From 27fa2ad868f0e1bf48614dd97b7fde9cd00fa93d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 9 Jul 2025 09:37:45 +0100 Subject: [PATCH 0088/1093] cmd/k8s-operator: don't require generation for Available condition (#16497) The observed generation was set to always 0 in #16429, but this had the knock-on effect of other controllers considering ProxyGroups never ready because the observed generation is never up to date in proxyGroupCondition. Make sure the ProxyGroupAvailable function does not requires the observed generation to be up to date, and add testing coverage to catch regressions. Updates #16327 Change-Id: I42f50ad47dd81cc2d3c3ce2cd7b252160bb58e40 Signed-off-by: Tom Proctor --- cmd/k8s-operator/proxygroup_test.go | 30 +++++++++++++++++++++++------ k8s-operator/conditions.go | 13 +++++++------ 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index c58e427aa06b6..6f143c0566dff 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/client/tailscale" "tailscale.com/ipn" + kube "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -815,6 +816,7 @@ func TestProxyGroup(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test", Finalizers: []string{"tailscale.com/finalizer"}, + Generation: 1, }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, @@ -856,9 +858,12 @@ func TestProxyGroup(t *testing.T) { expectReconciled(t, reconciler, "", pg.Name) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 1, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, false, pc) + if kube.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to not be available") + } }) t.Run("observe_ProxyGroupCreating_status_reason", func(t *testing.T) { @@ -874,13 +879,19 @@ func TestProxyGroup(t *testing.T) { if err := fc.Status().Update(t.Context(), pc); err != nil { t.Fatal(err) } - + pg.ObjectMeta.Generation = 2 + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.ObjectMeta.Generation = pg.ObjectMeta.Generation + }) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 2, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) + if kube.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to not be available") + } if expected := 1; reconciler.egressProxyGroups.Len() != expected { t.Fatalf("expected %d egress ProxyGroups, got %d", expected, reconciler.egressProxyGroups.Len()) } @@ -902,6 +913,10 @@ func TestProxyGroup(t *testing.T) { t.Run("simulate_successful_device_auth", func(t *testing.T) { addNodeIDToStateSecrets(t, fc, pg) + pg.ObjectMeta.Generation = 3 + mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + p.ObjectMeta.Generation = pg.ObjectMeta.Generation + }) expectReconciled(t, reconciler, "", pg.Name) pg.Status.Devices = []tsapi.TailnetDevice{ @@ -914,10 +929,13 @@ func TestProxyGroup(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }, } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 3, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) + if !kube.ProxyGroupAvailable(pg) { + t.Fatal("expected ProxyGroup to be available") + } }) t.Run("scale_up_to_3", func(t *testing.T) { @@ -926,14 +944,14 @@ func TestProxyGroup(t *testing.T) { p.Spec = pg.Spec }) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 3, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupCreating, "2/3 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) addNodeIDToStateSecrets(t, fc, pg) expectReconciled(t, reconciler, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 0, cl, zl.Sugar()) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, reasonProxyGroupReady, 3, cl, zl.Sugar()) tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "3/3 ProxyGroup pods running", 0, cl, zl.Sugar()) pg.Status.Devices = append(pg.Status.Devices, tsapi.TailnetDevice{ Hostname: "hostname-nodeid-2", diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index 1d30f352c3603..f6858c0059162 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -137,22 +137,23 @@ func ProxyClassIsReady(pc *tsapi.ProxyClass) bool { } func ProxyGroupIsReady(pg *tsapi.ProxyGroup) bool { - return proxyGroupCondition(pg, tsapi.ProxyGroupReady) + cond := proxyGroupCondition(pg, tsapi.ProxyGroupReady) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation } func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { - return proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) + cond := proxyGroupCondition(pg, tsapi.ProxyGroupAvailable) + return cond != nil && cond.Status == metav1.ConditionTrue } -func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) bool { +func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) *metav1.Condition { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { return cond.Type == string(condType) }) if idx == -1 { - return false + return nil } - cond := pg.Status.Conditions[idx] - return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation + return &pg.Status.Conditions[idx] } func DNSCfgIsReady(cfg *tsapi.DNSConfig) bool { From 008a238acddcf1cb73c544eee41f689392b74494 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Jul 2025 09:16:29 -0700 Subject: [PATCH 0089/1093] wgengine/magicsock: support self as candidate peer relay (#16499) Updates tailscale/corp#30247 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 51 ++++++------ wgengine/magicsock/magicsock_test.go | 114 ++++++++++++++++++--------- 2 files changed, 102 insertions(+), 63 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index ab7c2102fe1ec..1978867fa5e1c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2618,8 +2618,8 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { c.updateRelayServersSet(f.Filter, self, peers) } -// updateRelayServersSet iterates all peers, evaluating filt for each one in -// order to determine which peers are relay server candidates. filt, self, and +// updateRelayServersSet iterates all peers and self, evaluating filt for each +// one in order to determine which are relay server candidates. filt, self, and // peers are passed as args (vs c.mu-guarded fields) to enable callers to // release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' // in filt for every peer 'n'). @@ -2631,8 +2631,9 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // the computed result over the eventbus instead. func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { relayServers := make(set.Set[netip.AddrPort]) - for _, peer := range peers.All() { - peerAPI := peerAPIIfCandidateRelayServer(filt, self, peer) + nodes := append(peers.AsSlice(), self) + for _, maybeCandidate := range nodes { + peerAPI := peerAPIIfCandidateRelayServer(filt, self, maybeCandidate) if peerAPI.IsValid() { relayServers.Add(peerAPI) } @@ -2640,33 +2641,34 @@ func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, c.relayManager.handleRelayServersSet(relayServers) } -// peerAPIIfCandidateRelayServer returns the peer API address of peer if it -// is considered to be a candidate relay server upon evaluation against filt and -// self, otherwise it returns a zero value. -func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, peer tailcfg.NodeView) netip.AddrPort { +// peerAPIIfCandidateRelayServer returns the peer API address of maybeCandidate +// if it is considered to be a candidate relay server upon evaluation against +// filt and self, otherwise it returns a zero value. self and maybeCandidate +// may be equal. +func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, maybeCandidate tailcfg.NodeView) netip.AddrPort { if filt == nil || !self.Valid() || - !peer.Valid() || - !capVerIsRelayServerCapable(peer.Cap()) || - !peer.Hostinfo().Valid() { + !maybeCandidate.Valid() || + !capVerIsRelayServerCapable(maybeCandidate.Cap()) || + !maybeCandidate.Hostinfo().Valid() { return netip.AddrPort{} } - for _, peerPrefix := range peer.Addresses().All() { - if !peerPrefix.IsSingleIP() { + for _, maybeCandidatePrefix := range maybeCandidate.Addresses().All() { + if !maybeCandidatePrefix.IsSingleIP() { continue } - peerAddr := peerPrefix.Addr() + maybeCandidateAddr := maybeCandidatePrefix.Addr() for _, selfPrefix := range self.Addresses().All() { if !selfPrefix.IsSingleIP() { continue } selfAddr := selfPrefix.Addr() - if selfAddr.BitLen() == peerAddr.BitLen() { // same address family - if filt.CapsWithValues(peerAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { - for _, s := range peer.Hostinfo().Services().All() { - if peerAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || - peerAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { - return netip.AddrPortFrom(peerAddr, s.Port) + if selfAddr.BitLen() == maybeCandidateAddr.BitLen() { // same address family + if filt.CapsWithValues(maybeCandidateAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { + for _, s := range maybeCandidate.Hostinfo().Services().All() { + if maybeCandidateAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || + maybeCandidateAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { + return netip.AddrPortFrom(maybeCandidateAddr, s.Port) } } return netip.AddrPort{} // no peerAPI @@ -2674,10 +2676,11 @@ func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, peer tailcfg.NodeV // [nodeBackend.peerCapsLocked] only returns/considers the // [tailcfg.PeerCapMap] between the passed src and the // _first_ host (/32 or /128) address for self. We are - // consistent with that behavior here. If self and peer - // host addresses are of the same address family they either - // have the capability or not. We do not check against - // additional host addresses of the same address family. + // consistent with that behavior here. If self and + // maybeCandidate host addresses are of the same address + // family they either have the capability or not. We do not + // check against additional host addresses of the same + // address family. return netip.AddrPort{} } } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c388e9ed15d00..aea2de17dc223 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3385,16 +3385,7 @@ func Test_virtualNetworkID(t *testing.T) { } func Test_peerAPIIfCandidateRelayServer(t *testing.T) { - selfOnlyIPv4 := &tailcfg.Node{ - Cap: math.MinInt32, - Addresses: []netip.Prefix{ - netip.MustParsePrefix("1.1.1.1/32"), - }, - } - selfOnlyIPv6 := selfOnlyIPv4.Clone() - selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") - - peerHostinfo := &tailcfg.Hostinfo{ + hostInfo := &tailcfg.Hostinfo{ Services: []tailcfg.Service{ { Proto: tailcfg.PeerAPI4, @@ -3406,12 +3397,23 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, } + + selfOnlyIPv4 := &tailcfg.Node{ + Cap: math.MinInt32, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + Hostinfo: hostInfo.View(), + } + selfOnlyIPv6 := selfOnlyIPv4.Clone() + selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + peerOnlyIPv4 := &tailcfg.Node{ Cap: math.MinInt32, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, - Hostinfo: peerHostinfo.View(), + Hostinfo: hostInfo.View(), } peerOnlyIPv6 := peerOnlyIPv4.Clone() @@ -3424,11 +3426,11 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { peerOnlyIPv4NilHostinfo.Hostinfo = tailcfg.HostinfoView{} tests := []struct { - name string - filt *filter.Filter - self tailcfg.NodeView - peer tailcfg.NodeView - want netip.AddrPort + name string + filt *filter.Filter + self tailcfg.NodeView + maybeCandidate tailcfg.NodeView + want netip.AddrPort }{ { name: "match v4", @@ -3443,9 +3445,26 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - peer: peerOnlyIPv4.View(), - want: netip.MustParseAddrPort("2.2.2.2:4"), + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4.View(), + want: netip.MustParseAddrPort("2.2.2.2:4"), + }, + { + name: "match v4 self", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{selfOnlyIPv4.Addresses[0]}, + Caps: []filtertype.CapMatch{ + { + Dst: selfOnlyIPv4.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + maybeCandidate: selfOnlyIPv4.View(), + want: netip.AddrPortFrom(selfOnlyIPv4.Addresses[0].Addr(), 4), }, { name: "match v6", @@ -3460,9 +3479,26 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - peer: peerOnlyIPv6.View(), - want: netip.MustParseAddrPort("[::2]:6"), + self: selfOnlyIPv6.View(), + maybeCandidate: peerOnlyIPv6.View(), + want: netip.MustParseAddrPort("[::2]:6"), + }, + { + name: "match v6 self", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{selfOnlyIPv6.Addresses[0]}, + Caps: []filtertype.CapMatch{ + { + Dst: selfOnlyIPv6.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv6.View(), + maybeCandidate: selfOnlyIPv6.View(), + want: netip.AddrPortFrom(selfOnlyIPv6.Addresses[0].Addr(), 6), }, { name: "no match dst", @@ -3477,8 +3513,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - peer: peerOnlyIPv6.View(), + self: selfOnlyIPv6.View(), + maybeCandidate: peerOnlyIPv6.View(), }, { name: "no match peer cap", @@ -3493,8 +3529,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - peer: peerOnlyIPv6.View(), + self: selfOnlyIPv6.View(), + maybeCandidate: peerOnlyIPv6.View(), }, { name: "cap ver not relay capable", @@ -3509,14 +3545,14 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: peerOnlyIPv4.View(), - peer: peerOnlyIPv4ZeroCapVer.View(), + self: peerOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4ZeroCapVer.View(), }, { - name: "nil filt", - filt: nil, - self: selfOnlyIPv4.View(), - peer: peerOnlyIPv4.View(), + name: "nil filt", + filt: nil, + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4.View(), }, { name: "nil self", @@ -3531,8 +3567,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: tailcfg.NodeView{}, - peer: peerOnlyIPv4.View(), + self: tailcfg.NodeView{}, + maybeCandidate: peerOnlyIPv4.View(), }, { name: "nil peer", @@ -3547,8 +3583,8 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - peer: tailcfg.NodeView{}, + self: selfOnlyIPv4.View(), + maybeCandidate: tailcfg.NodeView{}, }, { name: "nil peer hostinfo", @@ -3563,13 +3599,13 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - peer: peerOnlyIPv4NilHostinfo.View(), + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4NilHostinfo.View(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.peer); !reflect.DeepEqual(got, tt.want) { + if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.maybeCandidate); !reflect.DeepEqual(got, tt.want) { t.Errorf("peerAPIIfCandidateRelayServer() = %v, want %v", got, tt.want) } }) From cc2f4ac921106ad46691b9271008f0bf43aeb970 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 11:59:57 -0500 Subject: [PATCH 0090/1093] ipn: move ParseAutoExitNodeID from ipn/ipnlocal to ipn So it can be used from the CLI without importing ipnlocal. While there, also remove isAutoExitNodeID, a wrapper around parseAutoExitNodeID that's no longer used. Updates tailscale/corp#29969 Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 33 ++---------- ipn/ipnlocal/local_test.go | 104 ------------------------------------- ipn/prefs.go | 22 ++++++++ ipn/prefs_test.go | 59 +++++++++++++++++++++ 4 files changed, 84 insertions(+), 134 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c54cb32d3125c..048bb1219c2f9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1890,7 +1890,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange // and update prefs if it differs from the current one. // This includes cases where it was previously an expression but no longer is, // or where it wasn't before but now is. - autoExitNode, useAutoExitNode := parseAutoExitNodeID(exitNodeID) + autoExitNode, useAutoExitNode := ipn.ParseAutoExitNodeString(exitNodeID) if prefs.AutoExitNode != autoExitNode { prefs.AutoExitNode = autoExitNode anyChange = true @@ -4292,7 +4292,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P if v { mp.ExitNodeIDSet = true mp.ExitNodeID = p0.InternalExitNodePrior() - if expr, ok := parseAutoExitNodeID(mp.ExitNodeID); ok { + if expr, ok := ipn.ParseAutoExitNodeString(mp.ExitNodeID); ok { mp.AutoExitNodeSet = true mp.AutoExitNode = expr mp.ExitNodeID = unresolvedExitNodeID @@ -4304,7 +4304,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.AutoExitNode = "" mp.InternalExitNodePriorSet = true if p0.AutoExitNode().IsSet() { - mp.InternalExitNodePrior = tailcfg.StableNodeID(autoExitNodePrefix + p0.AutoExitNode()) + mp.InternalExitNodePrior = tailcfg.StableNodeID(ipn.AutoExitNodePrefix + p0.AutoExitNode()) } else { mp.InternalExitNodePrior = p0.ExitNodeID() } @@ -7933,10 +7933,6 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { } const ( - // autoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values - // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. - autoExitNodePrefix = "auto:" - // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value // used as an exit node ID to install a blackhole route, preventing // accidental non-exit-node usage until the [ipn.ExitNodeExpression] @@ -7947,29 +7943,6 @@ const ( unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" ) -// isAutoExitNodeID reports whether the given [tailcfg.StableNodeID] is -// actually an "auto:"-prefixed [ipn.ExitNodeExpression]. -func isAutoExitNodeID(id tailcfg.StableNodeID) bool { - _, ok := parseAutoExitNodeID(id) - return ok -} - -// parseAutoExitNodeID attempts to parse the given [tailcfg.StableNodeID] -// as an [ExitNodeExpression]. -// -// It returns the parsed expression and true on success, -// or an empty string and false if the input does not appear to be -// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). -// -// It is mainly used to parse the [syspolicy.ExitNodeID] value -// when it is set to "auto:" (e.g., auto:any). -func parseAutoExitNodeID(id tailcfg.StableNodeID) (_ ipn.ExitNodeExpression, ok bool) { - if expr, ok := strings.CutPrefix(string(id), autoExitNodePrefix); ok && expr != "" { - return ipn.ExitNodeExpression(expr), true - } - return "", false -} - func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { if exitNodeID == "" { return false // an exit node is required diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 8bc84b081c016..73bae7ede8720 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4873,110 +4873,6 @@ func TestMinLatencyDERPregion(t *testing.T) { } } -func TestShouldAutoExitNode(t *testing.T) { - tests := []struct { - name string - exitNodeIDPolicyValue string - expectedBool bool - }{ - { - name: "auto:any", - exitNodeIDPolicyValue: "auto:any", - expectedBool: true, - }, - { - name: "no auto prefix", - exitNodeIDPolicyValue: "foo", - expectedBool: false, - }, - { - name: "auto prefix but empty suffix", - exitNodeIDPolicyValue: "auto:", - expectedBool: false, - }, - { - name: "auto prefix no colon", - exitNodeIDPolicyValue: "auto", - expectedBool: false, - }, - { - name: "auto prefix unknown suffix", - exitNodeIDPolicyValue: "auto:foo", - expectedBool: true, // "auto:{unknown}" is treated as "auto:any" - }, - } - - syspolicy.RegisterWellKnownSettingsForTest(t) - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := isAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeIDPolicyValue)) - if got != tt.expectedBool { - t.Fatalf("expected %v got %v for %v policy value", tt.expectedBool, got, tt.exitNodeIDPolicyValue) - } - }) - } -} - -func TestParseAutoExitNodeID(t *testing.T) { - tests := []struct { - name string - exitNodeID string - wantOk bool - wantExpr ipn.ExitNodeExpression - }{ - { - name: "empty expr", - exitNodeID: "", - wantOk: false, - wantExpr: "", - }, - { - name: "no auto prefix", - exitNodeID: "foo", - wantOk: false, - wantExpr: "", - }, - { - name: "auto:any", - exitNodeID: "auto:any", - wantOk: true, - wantExpr: ipn.AnyExitNode, - }, - { - name: "auto:foo", - exitNodeID: "auto:foo", - wantOk: true, - wantExpr: "foo", - }, - { - name: "auto prefix but empty suffix", - exitNodeID: "auto:", - wantOk: false, - wantExpr: "", - }, - { - name: "auto prefix no colon", - exitNodeID: "auto", - wantOk: false, - wantExpr: "", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotExpr, gotOk := parseAutoExitNodeID(tailcfg.StableNodeID(tt.exitNodeID)) - if gotOk != tt.wantOk || gotExpr != tt.wantExpr { - if tt.wantOk { - t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) - } else { - t.Fatalf("got %v (%q); want false", gotOk, gotExpr) - } - } - }) - } -} - func TestEnableAutoUpdates(t *testing.T) { lb := newTestLocalBackend(t) diff --git a/ipn/prefs.go b/ipn/prefs.go index 77cea0493af16..71a80b1828760 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -1088,3 +1088,25 @@ const AnyExitNode ExitNodeExpression = "any" func (e ExitNodeExpression) IsSet() bool { return e != "" } + +const ( + // AutoExitNodePrefix is the prefix used in [syspolicy.ExitNodeID] values and CLI + // to indicate that the string following the prefix is an [ipn.ExitNodeExpression]. + AutoExitNodePrefix = "auto:" +) + +// ParseAutoExitNodeString attempts to parse the given string +// as an [ExitNodeExpression]. +// +// It returns the parsed expression and true on success, +// or an empty string and false if the input does not appear to be +// an [ExitNodeExpression] (i.e., it doesn't start with "auto:"). +// +// It is mainly used to parse the [syspolicy.ExitNodeID] value +// when it is set to "auto:" (e.g., auto:any). +func ParseAutoExitNodeString[T ~string](s T) (_ ExitNodeExpression, ok bool) { + if expr, ok := strings.CutPrefix(string(s), AutoExitNodePrefix); ok && expr != "" { + return ExitNodeExpression(expr), true + } + return "", false +} diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 268ea206c137f..43e360c6af0c2 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -1129,3 +1129,62 @@ func TestPrefsDowngrade(t *testing.T) { t.Fatal("AllowSingleHosts should be true") } } + +func TestParseAutoExitNodeString(t *testing.T) { + tests := []struct { + name string + exitNodeID string + wantOk bool + wantExpr ExitNodeExpression + }{ + { + name: "empty expr", + exitNodeID: "", + wantOk: false, + wantExpr: "", + }, + { + name: "no auto prefix", + exitNodeID: "foo", + wantOk: false, + wantExpr: "", + }, + { + name: "auto:any", + exitNodeID: "auto:any", + wantOk: true, + wantExpr: AnyExitNode, + }, + { + name: "auto:foo", + exitNodeID: "auto:foo", + wantOk: true, + wantExpr: "foo", + }, + { + name: "auto prefix but empty suffix", + exitNodeID: "auto:", + wantOk: false, + wantExpr: "", + }, + { + name: "auto prefix no colon", + exitNodeID: "auto", + wantOk: false, + wantExpr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotExpr, gotOk := ParseAutoExitNodeString(tt.exitNodeID) + if gotOk != tt.wantOk || gotExpr != tt.wantExpr { + if tt.wantOk { + t.Fatalf("got %v (%q); want %v (%q)", gotOk, gotExpr, tt.wantOk, tt.wantExpr) + } else { + t.Fatalf("got %v (%q); want false", gotOk, gotExpr) + } + } + }) + } +} From c5fdf9e1db149d5c205ec971ffe9ae6d487833a4 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 12:07:44 -0500 Subject: [PATCH 0091/1093] cmd/tailscale/cli: add support for tailscale {up,set} --exit-node=auto:any If the specified exit node string starts with "auto:" (i.e., can be parsed as an ipn.ExitNodeExpression), we update ipn.Prefs.AutoExitNode instead of ipn.Prefs.ExitNodeID. Fixes #16459 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/cli_test.go | 24 ++++++++++++++++++++++-- cmd/tailscale/cli/set.go | 7 +++++-- cmd/tailscale/cli/up.go | 9 +++++++-- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 48121c7d912d9..5dd4fa2340360 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -972,8 +972,7 @@ func TestPrefFlagMapping(t *testing.T) { // No CLI flag for this. continue case "AutoExitNode": - // TODO(nickkhyl): should be handled by tailscale {set,up} --exit-node. - // See tailscale/tailscale#16459. + // Handled by tailscale {set,up} --exit-node=auto:any. continue } t.Errorf("unexpected new ipn.Pref field %q is not handled by up.go (see addPrefFlagMapping and checkForAccidentalSettingReverts)", prefName) @@ -1338,6 +1337,27 @@ func TestUpdatePrefs(t *testing.T) { } }, }, + { + name: "auto_exit_node", + flags: []string{"--exit-node=auto:any"}, + curPrefs: &ipn.Prefs{ + ControlURL: ipn.DefaultControlURL, + CorpDNS: true, // enabled by [ipn.NewPrefs] by default + NetfilterMode: preftype.NetfilterOn, // enabled by [ipn.NewPrefs] by default + }, + wantJustEditMP: &ipn.MaskedPrefs{ + WantRunningSet: true, // enabled by default for tailscale up + AutoExitNodeSet: true, + ExitNodeIDSet: true, // we want ExitNodeID cleared + ExitNodeIPSet: true, // same for ExitNodeIP + }, + env: upCheckEnv{backendState: "Running"}, + checkUpdatePrefsMutations: func(t *testing.T, newPrefs *ipn.Prefs) { + if newPrefs.AutoExitNode != ipn.AnyExitNode { + t.Errorf("AutoExitNode: got %q; want %q", newPrefs.AutoExitNode, ipn.AnyExitNode) + } + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 66e74d77ff5a5..f1b21995ec388 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -73,7 +73,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.StringVar(&setArgs.profileName, "nickname", "", "nickname for the current account") setf.BoolVar(&setArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") setf.BoolVar(&setArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") - setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") + setf.StringVar(&setArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP, base name, or auto:any) for internet traffic, or empty string to not use an exit node") setf.BoolVar(&setArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") setf.BoolVar(&setArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") setf.BoolVar(&setArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") @@ -173,7 +173,10 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.exitNodeIP != "" { - if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { + if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(setArgs.exitNodeIP); useAutoExitNode { + maskedPrefs.AutoExitNode = expr + maskedPrefs.AutoExitNodeSet = true + } else if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { var e ipn.ExitNodeLocalIPError if errors.As(err, &e) { return fmt.Errorf("%w; did you mean --advertise-exit-node?", err) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 37cdab754db18..1863957d3f143 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -100,7 +100,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") upf.BoolVar(&upArgs.acceptDNS, "accept-dns", true, "accept DNS configuration from the admin panel") upf.Var(notFalseVar{}, "host-routes", hidden+"install host routes to other Tailscale nodes (must be true as of Tailscale 1.67+)") - upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP or base name) for internet traffic, or empty string to not use an exit node") + upf.StringVar(&upArgs.exitNodeIP, "exit-node", "", "Tailscale exit node (IP, base name, or auto:any) for internet traffic, or empty string to not use an exit node") upf.BoolVar(&upArgs.exitNodeAllowLANAccess, "exit-node-allow-lan-access", false, "Allow direct access to the local network when routing traffic via an exit node") upf.BoolVar(&upArgs.shieldsUp, "shields-up", false, "don't allow incoming connections") upf.BoolVar(&upArgs.runSSH, "ssh", false, "run an SSH server, permitting access per tailnet admin's declared policy") @@ -278,7 +278,9 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo prefs.NetfilterMode = preftype.NetfilterOff } if upArgs.exitNodeIP != "" { - if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { + if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(upArgs.exitNodeIP); useAutoExitNode { + prefs.AutoExitNode = expr + } else if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { var e ipn.ExitNodeLocalIPError if errors.As(err, &e) { return nil, fmt.Errorf("%w; did you mean --advertise-exit-node?", err) @@ -408,6 +410,9 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus if env.upArgs.reset { visitFlags = env.flagSet.VisitAll } + if prefs.AutoExitNode.IsSet() { + justEditMP.AutoExitNodeSet = true + } visitFlags(func(f *flag.Flag) { updateMaskedPrefsFromUpOrSetFlag(justEditMP, f.Name) }) From 21a4058ec71878373d68ef6c983e81dda690e441 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 8 Jul 2025 18:35:32 -0500 Subject: [PATCH 0092/1093] ipn/ipnlocal: add test to verify handling of unknown auto exit node expressions We already check this for cases where ipn.Prefs.AutoExitNode is configured via syspolicy. Configuring it directly through EditPrefs should behave the same, so we add a test for that as well. Additionally, we clarify the implementation and future extensibility in (*LocalBackend).resolveAutoExitNodeLocked, where the AutoExitNode is actually enforced. Updates tailscale/corp#29969 Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 8 ++++++++ ipn/ipnlocal/local_test.go | 17 +++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 048bb1219c2f9..55730489e0d4a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2071,6 +2071,14 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // // b.mu must be held. func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. + // + // However, to maintain forward compatibility with future auto exit node expressions, + // we treat any non-empty AutoExitNode as [ipn.AnyExitNode]. + // + // If and when we support additional auto exit node expressions, this method should be updated + // to handle them appropriately, while still falling back to [ipn.AnyExitNode] or a more appropriate + // default for unknown (or partially supported) expressions. if !prefs.AutoExitNode.IsSet() { return false } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 73bae7ede8720..e70e5ad2afe58 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1002,6 +1002,23 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "foo", }, }, + { + name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression + prefs: ipn.Prefs{ + ControlURL: controlURL, + }, + netMap: clientNetmap, + report: report, + changePrefs: &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{AutoExitNode: "foo"}, + AutoExitNodeSet: true, + }, + wantPrefs: ipn.Prefs{ + ControlURL: controlURL, + ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" + AutoExitNode: "foo", + }, + }, { name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy prefs: ipn.Prefs{ From ff1803158a60c37128557f40c643f3839bc5609a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 13:01:32 -0500 Subject: [PATCH 0093/1093] ipn/ipnlocal: change order of exit node refresh and netmap update so that clients receive the new netmap first If the GUI receives a new exit node ID before the new netmap, it may treat the node as offline or invalid if the previous netmap didn't include the peer at all, or if the peer was offline or not advertised as an exit node. This may result in briefly issuing and dismissing a warning, or a similar issue, which isn't ideal. In this PR, we change the operation order to send the new netmap to clients first before selecting the new exit node and notifying them of the Exit Node change. Updates tailscale/corp#30252 (an old issue discovered during testing this) Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 13 ++++++--- ipn/ipnlocal/local_test.go | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 55730489e0d4a..48eceb36c1ab4 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1709,9 +1709,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Now complete the lock-free parts of what we started while locked. if st.NetMap != nil { - // Check and update the exit node if needed, now that we have a new netmap. - b.RefreshExitNode() - if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) @@ -1751,6 +1748,16 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.health.SetDERPMap(st.NetMap.DERPMap) b.send(ipn.Notify{NetMap: st.NetMap}) + + // Check and update the exit node if needed, now that we have a new netmap. + // + // This must happen after the netmap change is sent via [ipn.Notify], + // so the GUI can correctly display the exit node if it has changed + // since the last netmap was sent. + // + // Otherwise, it might briefly show the exit node as offline and display a warning, + // if the node wasn't online or wasn't advertising default routes in the previous netmap. + b.RefreshExitNode() } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index e70e5ad2afe58..bb7f433c02cbe 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1407,6 +1407,60 @@ func TestPrefsChangeDisablesExitNode(t *testing.T) { } } +func TestExitNodeNotifyOrder(t *testing.T) { + const controlURL = "https://localhost:1/" + + report := &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 5 * time.Millisecond, + 2: 10 * time.Millisecond, + }, + PreferredDERP: 1, + } + + exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) + exitNode2 := makeExitNode(2, withName("node-2"), withDERP(2), withAddresses(netip.MustParsePrefix("100.64.1.2/32"))) + selfNode := makeExitNode(3, withName("node-3"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.3/32"))) + clientNetmap := buildNetmapWithPeers(selfNode, exitNode1, exitNode2) + + lb := newTestLocalBackend(t) + lb.sys.MagicSock.Get().SetLastNetcheckReportForTest(lb.ctx, report) + lb.SetPrefsForTest(&ipn.Prefs{ + ControlURL: controlURL, + AutoExitNode: ipn.AnyExitNode, + }) + + nw := newNotificationWatcher(t, lb, ipnauth.Self) + + // Updating the netmap should trigger both a netmap notification + // and an exit node ID notification (since an exit node is selected). + // The netmap notification should be sent first. + nw.watch(0, []wantedNotification{ + wantNetmapNotify(clientNetmap), + wantExitNodeIDNotify(exitNode1.StableID()), + }) + lb.SetControlClientStatus(lb.cc, controlclient.Status{NetMap: clientNetmap}) + nw.check() +} + +func wantNetmapNotify(want *netmap.NetworkMap) wantedNotification { + return wantedNotification{ + name: "Netmap", + cond: func(t testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.NetMap == want + }, + } +} + +func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { + return wantedNotification{ + name: fmt.Sprintf("ExitNodeID-%s", want), + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.Prefs != nil && n.Prefs.Valid() && n.Prefs.ExitNodeID() == want + }, + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface From d40b25326ccdb111ce4e99893164dd6742328a52 Mon Sep 17 00:00:00 2001 From: Dylan Bargatze Date: Wed, 9 Jul 2025 18:06:58 -0400 Subject: [PATCH 0094/1093] tailcfg, wgengine/magicsock: disable all UDP relay usage if disable-relay-client is set (#16492) If the NodeAttrDisableRelayClient node attribute is set, ensures that a node cannot allocate endpoints on a UDP relay server itself, and cannot use newly-discovered paths (via disco/CallMeMaybeVia) that traverse a UDP relay server. Fixes tailscale/corp#30180 Signed-off-by: Dylan Bargatze --- tailcfg/tailcfg.go | 18 ++++++++++-------- wgengine/magicsock/magicsock.go | 10 +++++++++- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index d97f60a8acb84..6c88217de01f4 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2607,14 +2607,16 @@ const ( // only needs to be present in [NodeCapMap] to take effect. NodeAttrDisableRelayServer NodeCapability = "disable-relay-server" - // NodeAttrDisableRelayClient prevents the node from allocating UDP relay - // server endpoints itself; the node may still bind into and relay traffic - // using endpoints allocated by its peers. This attribute can be added to - // the node dynamically; if added while the node is already running, the - // node will be unable to allocate UDP relay server endpoints after it next - // updates its network map. There are no expected values for this key in - // [NodeCapMap]; the key only needs to be present in [NodeCapMap] to take - // effect. + // NodeAttrDisableRelayClient prevents the node from both allocating UDP + // relay server endpoints itself, and from using endpoints allocated by + // its peers. This attribute can be added to the node dynamically; if added + // while the node is already running, the node will be unable to allocate + // endpoints after it next updates its network map, and will be immediately + // unable to use new paths via a UDP relay server. Setting this attribute + // dynamically does not remove any existing paths, including paths that + // traverse a UDP relay server. There are no expected values for this key + // in [NodeCapMap]; the key only needs to be present in [NodeCapMap] to + // take effect. NodeAttrDisableRelayClient NodeCapability = "disable-relay-client" // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1978867fa5e1c..582e74c8b3120 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -355,7 +355,7 @@ type Conn struct { self tailcfg.NodeView // from last onNodeViewsUpdate peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in onNodeMutationsUpdate are never applied filt *filter.Filter // from last onFilterUpdate - relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers + relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers or receive CallMeMaybeVia messages from peers lastFlags debugFlags // at time of last onNodeViewsUpdate privateKey key.NodePrivate // WireGuard private key for this node everHadKey bool // whether we ever had a non-zero private key @@ -2149,6 +2149,14 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } + // If the "disable-relay-client" node attr is set for this node, it + // can't be a UDP relay client, so drop any CallMeMaybeVia messages it + // receives. + if isVia && !c.relayClientEnabled { + c.logf("magicsock: disco: ignoring %s from %v; disable-relay-client node attr is set", msgType, sender.ShortString()) + return + } + ep.mu.Lock() relayCapable := ep.relayCapable lastBest := ep.bestAddr From ae8641735df2844b4d5d0abcd25c074d297a013d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Jul 2025 15:17:51 -0700 Subject: [PATCH 0095/1093] cmd/tailscale/cli,ipn/ipnstate,wgengine/magicsock: label peer-relay (#16510) Updates tailscale/corp#30033 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/status.go | 4 +++- ipn/ipnstate/ipnstate.go | 10 +++++++--- wgengine/magicsock/endpoint.go | 9 +++++---- wgengine/magicsock/magicsock.go | 2 +- 4 files changed, 16 insertions(+), 9 deletions(-) diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index e4dccc247fd54..85679a7decbc1 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -183,10 +183,12 @@ func runStatus(ctx context.Context, args []string) error { } else if ps.ExitNodeOption { f("offers exit node; ") } - if relay != "" && ps.CurAddr == "" { + if relay != "" && ps.CurAddr == "" && ps.PeerRelay == "" { f("relay %q", relay) } else if ps.CurAddr != "" { f("direct %s", ps.CurAddr) + } else if ps.PeerRelay != "" { + f("peer-relay %s", ps.PeerRelay) } if !ps.Online { f("; offline") diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 89c6d7e24dbc5..fdfd4e3346958 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -251,9 +251,10 @@ type PeerStatus struct { PrimaryRoutes *views.Slice[netip.Prefix] `json:",omitempty"` // Endpoints: - Addrs []string - CurAddr string // one of Addrs, or unique if roaming - Relay string // DERP region + Addrs []string + CurAddr string // one of Addrs, or unique if roaming + Relay string // DERP region + PeerRelay string // peer relay address (ip:port:vni) RxBytes int64 TxBytes int64 @@ -451,6 +452,9 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { if v := st.Relay; v != "" { e.Relay = v } + if v := st.PeerRelay; v != "" { + e.PeerRelay = v + } if v := st.UserID; v != 0 { e.UserID = v } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 4780c7f37a781..bfafec5ab6297 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1961,10 +1961,11 @@ func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { ps.Active = now.Sub(de.lastSendExt) < sessionActiveTimeout if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.ap.IsValid() && !derpAddr.IsValid() { - // TODO(jwhited): if udpAddr.vni.isSet() we are using a Tailscale client - // as a UDP relay; update PeerStatus and its interpretation by - // "tailscale status" to make this clear. - ps.CurAddr = udpAddr.String() + if udpAddr.vni.isSet() { + ps.PeerRelay = udpAddr.String() + } else { + ps.CurAddr = udpAddr.String() + } } } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 582e74c8b3120..8743fe9cc574d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3437,7 +3437,7 @@ func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { } } -// UpdateStatus implements the interface nede by ipnstate.StatusBuilder. +// UpdateStatus implements the interface needed by ipnstate.StatusBuilder. // // This method adds in the magicsock-specific information only. Most // of the status is otherwise populated by LocalBackend. From 6a0fad1e1044f567cb4d00608d1b1f00cef954c1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 9 Jul 2025 20:02:00 -0700 Subject: [PATCH 0096/1093] wgengine/magicsock: don't peer relay if NodeAttrOnlyTCP443 is set (#16517) Updates tailscale/corp#30138 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8743fe9cc574d..18a6bbceb4788 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2704,6 +2704,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { relayClientEnabled := update.SelfNode.Valid() && !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && + !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) && envknob.UseWIPCode() c.mu.Lock() From fbc4c34cf7377f4ddb1d95163085e2b27c845018 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 10 Jul 2025 03:04:29 -0400 Subject: [PATCH 0097/1093] ipn/localapi: do not break client on event marshalling errors (#16503) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Errors were mashalled without the correct newlines. Also, they could generally be mashalled with more data, so an intermediate was introduced to make them slightly nicer to look at. Updates #15160 Signed-off-by: Claus Lensbøl --- ipn/localapi/localapi.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 60ed89b3b2ad3..cd59c54e05489 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -919,6 +919,11 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { } } +// EventError provides the JSON encoding of internal errors from event processing. +type EventError struct { + Error string +} + // serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams // events to the client. func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { @@ -971,7 +976,16 @@ func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { } if msg, err := json.Marshal(data); err != nil { - fmt.Fprintf(w, `{"Event":"[ERROR] failed to marshal JSON for %T"}\n`, event.Event) + data.Event = EventError{Error: fmt.Sprintf( + "failed to marshal JSON for %T", event.Event, + )} + if errMsg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, + `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, + i, event.Event) + } else { + w.Write(errMsg) + } } else { w.Write(msg) } From cf0460b9da23f70fb8442baa0a1bca1df32ba2c1 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 10 Jul 2025 14:33:13 +0100 Subject: [PATCH 0098/1093] cmd/k8s-operator: allow letsencrypt staging on k8s proxies (#16521) This commit modifies the operator to detect the usage of k8s-apiserver type proxy groups that wish to use the letsencrypt staging directory and apply the appropriate environment variable to the statefulset it produces. Updates #13358 Signed-off-by: David Bond --- cmd/k8s-operator/sts.go | 23 +++++++++++++++-------- cmd/k8s-operator/sts_test.go | 5 +++++ 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index fbb271800390a..df12554e0feca 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -761,14 +761,21 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, enableEndpoints(ss, metricsEnabled, debugEnabled) } } - if pc.Spec.UseLetsEncryptStagingEnvironment && (stsCfg.proxyType == proxyTypeIngressResource || stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress)) { - for i, c := range ss.Spec.Template.Spec.Containers { - if isMainContainer(&c) { - ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ - Name: "TS_DEBUG_ACME_DIRECTORY_URL", - Value: letsEncryptStagingEndpoint, - }) - break + + if stsCfg != nil { + usesLetsEncrypt := stsCfg.proxyType == proxyTypeIngressResource || + stsCfg.proxyType == string(tsapi.ProxyGroupTypeIngress) || + stsCfg.proxyType == string(tsapi.ProxyGroupTypeKubernetesAPIServer) + + if pc.Spec.UseLetsEncryptStagingEnvironment && usesLetsEncrypt { + for i, c := range ss.Spec.Template.Spec.Containers { + if isMainContainer(&c) { + ss.Spec.Template.Spec.Containers[i].Env = append(ss.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{ + Name: "TS_DEBUG_ACME_DIRECTORY_URL", + Value: letsEncryptStagingEndpoint, + }) + break + } } } } diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 35c512c8cd05b..afa791ccc7904 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -61,6 +61,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // Setup proxyClassAllOpts := &tsapi.ProxyClass{ Spec: tsapi.ProxyClassSpec{ + UseLetsEncryptStagingEnvironment: true, StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"foo.io/bar": "foo"}, @@ -292,6 +293,10 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } + + // 8. A Kubernetes API proxy with letsencrypt staging enabled + gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), &tailscaleSTSConfig{proxyType: string(tsapi.ProxyGroupTypeKubernetesAPIServer)}, zl.Sugar()) + verifyEnvVar(t, gotSS, "TS_DEBUG_ACME_DIRECTORY_URL", letsEncryptStagingEndpoint) } func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { From 2b665c370c50a85f65edf4b9cb15c41bc45a8008 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 10 Jul 2025 14:33:30 +0100 Subject: [PATCH 0099/1093] cmd/{k8s-operator,k8s-proxy}: allow setting login server url (#16504) This commit modifies the k8s proxy application configuration to include a new field named `ServerURL` which, when set, modifies the tailscale coordination server used by the proxy. This works in the same way as the operator and the proxies it deploys. If unset, the default coordination server is used. Updates https://github.com/tailscale/tailscale/issues/13358 Signed-off-by: David Bond --- cmd/k8s-operator/proxygroup.go | 5 +++++ cmd/k8s-proxy/k8s-proxy.go | 5 +++++ kube/k8s-proxy/conf/conf.go | 1 + 3 files changed, 11 insertions(+) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 3dfb004f1dd36..7b8a0754e6d0b 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -815,6 +815,11 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p }, }, } + + if r.loginServer != "" { + cfg.ServerURL = &r.loginServer + } + cfgB, err := json.Marshal(cfg) if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 6e7eadb7303b5..81a5a8483af26 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -91,6 +91,11 @@ func run(logger *zap.SugaredLogger) error { Store: st, AuthKey: authKey, } + + if cfg.Parsed.ServerURL != nil { + ts.ControlURL = *cfg.Parsed.ServerURL + } + if cfg.Parsed.Hostname != nil { ts.Hostname = *cfg.Parsed.Hostname } diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 6b0e853c5c21c..2901e7b44852e 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -53,6 +53,7 @@ type ConfigV1Alpha1 struct { LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. } type KubeAPIServer struct { From d0cafc0a6776397d9a346dde60962c679062a21c Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 10 Jul 2025 15:53:01 +0100 Subject: [PATCH 0100/1093] cmd/{k8s-operator,k8s-proxy}: apply accept-routes configuration to k8s-proxy (#16522) This commit modifies the k8s-operator and k8s-proxy to support passing down the accept-routes configuration from the proxy class as a configuration value read and used by the k8s-proxy when ran as a distinct container managed by the operator. Updates #13358 Signed-off-by: David Bond --- cmd/k8s-operator/proxygroup.go | 4 ++++ cmd/k8s-proxy/k8s-proxy.go | 19 +++++++++++++++---- kube/k8s-proxy/conf/conf.go | 1 + 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 7b8a0754e6d0b..66b6c96e3c25c 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -820,6 +820,10 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p cfg.ServerURL = &r.loginServer } + if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { + cfg.AcceptRoutes = &proxyClass.Spec.TailscaleConfig.AcceptRoutes + } + cfgB, err := json.Marshal(cfg) if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 81a5a8483af26..7dcf6c2ab5809 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -114,12 +114,13 @@ func run(logger *zap.SugaredLogger) error { group, groupCtx := errgroup.WithContext(ctx) + lc, err := ts.LocalClient() + if err != nil { + return fmt.Errorf("error getting local client: %w", err) + } + // Setup for updating state keys. if podUID != "" { - lc, err := ts.LocalClient() - if err != nil { - return fmt.Errorf("error getting local client: %w", err) - } w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap) if err != nil { return fmt.Errorf("error watching IPN bus: %w", err) @@ -135,6 +136,16 @@ func run(logger *zap.SugaredLogger) error { }) } + if cfg.Parsed.AcceptRoutes != nil { + _, err = lc.EditPrefs(groupCtx, &ipn.MaskedPrefs{ + RouteAllSet: true, + Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes}, + }) + if err != nil { + return fmt.Errorf("error editing prefs: %w", err) + } + } + // Setup for the API server proxy. restConfig, err := getRestConfig(logger) if err != nil { diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 2901e7b44852e..fba4a39a420a1 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -54,6 +54,7 @@ type ConfigV1Alpha1 struct { App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. + AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. } type KubeAPIServer struct { From f9bfd8118ae85b5782a29b442acb9ec3764caacb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Jul 2025 12:41:14 -0700 Subject: [PATCH 0101/1093] wgengine/magicsock: resolve epAddr collisions across peer relay conns (#16526) Updates tailscale/corp#30042 Updates tailscale/corp#29422 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 9 +++-- wgengine/magicsock/magicsock.go | 57 +++++++++++++++++++++++----- wgengine/magicsock/magicsock_test.go | 40 +++++++++++++++++++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index bfafec5ab6297..c4ca812969bf9 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -499,8 +499,9 @@ func (de *endpoint) initFakeUDPAddr() { } // noteRecvActivity records receive activity on de, and invokes -// Conn.noteRecvActivity no more than once every 10s. -func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { +// Conn.noteRecvActivity no more than once every 10s, returning true if it +// was called, otherwise false. +func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) bool { if de.isWireguardOnly { de.mu.Lock() de.bestAddr.ap = src.ap @@ -524,10 +525,12 @@ func (de *endpoint) noteRecvActivity(src epAddr, now mono.Time) { de.lastRecvWG.StoreAtomic(now) if de.c.noteRecvActivity == nil { - return + return false } de.c.noteRecvActivity(de.publicKey) + return true } + return false } func (de *endpoint) discoShort() string { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 18a6bbceb4788..6ce91902d7425 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -27,6 +27,7 @@ import ( "time" "github.com/tailscale/wireguard-go/conn" + "github.com/tailscale/wireguard-go/device" "go4.org/mem" "golang.org/x/net/ipv6" @@ -1632,6 +1633,16 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu } } +// looksLikeInitiationMsg returns true if b looks like a WireGuard initiation +// message, otherwise it returns false. +func looksLikeInitiationMsg(b []byte) bool { + if len(b) == device.MessageInitiationSize && + binary.BigEndian.Uint32(b) == device.MessageInitiationType { + return true + } + return false +} + // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. // // size is the length of 'b' to report up to wireguard-go (only relevant if @@ -1717,10 +1728,18 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach } now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) - ep.noteRecvActivity(src, now) + connNoted := ep.noteRecvActivity(src, now) if stats := c.stats.Load(); stats != nil { stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) } + if src.vni.isSet() && (connNoted || looksLikeInitiationMsg(b)) { + // connNoted is periodic, but we also want to verify if the peer is who + // we believe for all initiation messages, otherwise we could get + // unlucky and fail to JIT configure the "correct" peer. + // TODO(jwhited): relax this to include direct connections + // See http://go/corp/29422 & http://go/corp/30042 + return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, true + } return ep, size, true } @@ -3787,11 +3806,19 @@ func (c *Conn) SetLastNetcheckReportForTest(ctx context.Context, report *netchec // decrypts it. So we implement the [conn.InitiationAwareEndpoint] and // [conn.PeerAwareEndpoint] interfaces, to allow WireGuard to tell us who it is // later, just-in-time to configure the peer, and set the associated [epAddr] -// in the [peerMap]. Future receives on the associated [epAddr] will then be -// resolvable directly to an [*endpoint]. +// in the [peerMap]. Future receives on the associated [epAddr] will then +// resolve directly to an [*endpoint]. +// +// We also sometimes (see [Conn.receiveIP]) return a [*lazyEndpoint] to +// wireguard-go to verify an [epAddr] resolves to the [*endpoint] (maybeEP) we +// believe it to be, to resolve [epAddr] collisions across peers. [epAddr] +// collisions have a higher chance of occurrence for packets received over peer +// relays versus direct connections, as peer relay connections do not upsert +// into [peerMap] around disco packet reception, but direct connections do. type lazyEndpoint struct { - c *Conn - src epAddr + c *Conn + maybeEP *endpoint // or nil if unknown + src epAddr } var _ conn.InitiationAwareEndpoint = (*lazyEndpoint)(nil) @@ -3812,6 +3839,9 @@ var _ conn.Endpoint = (*lazyEndpoint)(nil) // wireguard-go peer (de)configuration. func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + if le.maybeEP != nil && pubKey.Compare(le.maybeEP.publicKey) == 0 { + return + } le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) @@ -3821,6 +3851,11 @@ func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) ep.noteRecvActivity(le.src, now) + // [ep.noteRecvActivity] may end up JIT configuring the peer, but we don't + // update [peerMap] as wireguard-go hasn't decrypted the initiation + // message yet. wireguard-go will call us below in [lazyEndpoint.FromPeer] + // if it successfully decrypts the message, at which point it's safe to + // insert le.src into the [peerMap] for ep. } func (le *lazyEndpoint) ClearSrc() {} @@ -3845,12 +3880,16 @@ func (le *lazyEndpoint) DstToBytes() []byte { } // FromPeer implements [conn.PeerAwareEndpoint]. We return a [*lazyEndpoint] in -// our [conn.ReceiveFunc]s when we are unable to identify the peer at WireGuard -// packet reception time, pre-decryption. If wireguard-go successfully decrypts -// the packet it calls us here, and we update our [peerMap] in order to -// associate le.src with peerPublicKey. +// [Conn.receiveIP] when we are unable to identify the peer at WireGuard +// packet reception time, pre-decryption, or we want wireguard-go to verify who +// we believe it to be (le.maybeEP). If wireguard-go successfully decrypts the +// packet it calls us here, and we update our [peerMap] to associate le.src with +// peerPublicKey. func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { pubKey := key.NodePublicFromRaw32(mem.B(peerPublicKey[:])) + if le.maybeEP != nil && pubKey.Compare(le.maybeEP.publicKey) == 0 { + return + } le.c.mu.Lock() defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index aea2de17dc223..0515162c72b9f 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3611,3 +3611,43 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }) } } + +func Test_looksLikeInitiationMsg(t *testing.T) { + initMsg := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) + initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) + tests := []struct { + name string + b []byte + want bool + }{ + { + name: "valid initiation", + b: initMsg, + want: true, + }, + { + name: "invalid message type field", + b: initMsgSizeTransportType, + want: false, + }, + { + name: "too small", + b: initMsg[:device.MessageInitiationSize-1], + want: false, + }, + { + name: "too big", + b: append(initMsg, 0), + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := looksLikeInitiationMsg(tt.b); got != tt.want { + t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + } + }) + } +} From bebc796e6c124e090d01c4651fe79cac771a0b30 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 10 Jul 2025 12:45:05 -0700 Subject: [PATCH 0102/1093] ipn/ipnlocal: add traffic-steering nodecap (#16529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To signal when a tailnet has the `traffic-steering` feature flag, Control will send a `traffic-steering` NodeCapability in netmap’s AllCaps. This patch adds `tailcfg.NodeAttrTrafficSteering` so that it can be used in the control plane. Future patches will implement the actual steering mechanisms. Updates tailscale/corp#29966 Signed-off-by: Simon Law --- tailcfg/tailcfg.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6c88217de01f4..e55389f182f56 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2622,6 +2622,10 @@ const ( // NodeAttrMagicDNSPeerAAAA is a capability that tells the node's MagicDNS // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. NodeAttrMagicDNSPeerAAAA NodeCapability = "magicdns-aaaa" + + // NodeAttrTrafficSteering configures the node to use the traffic + // steering subsystem for via routes. See tailscale/corp#29966. + NodeAttrTrafficSteering NodeCapability = "traffic-steering" ) // SetDNSRequest is a request to add a DNS record. From fbc6a9ec5a797d9a551e74a90bc96947825b7719 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 10 Jul 2025 11:14:08 -0700 Subject: [PATCH 0103/1093] all: detect JetKVM and specialize a handful of things for it Updates #16524 Change-Id: I183428de8c65d7155d82979d2d33f031c22e3331 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 15 +++++++-------- logpolicy/logpolicy.go | 3 +++ net/dns/manager_linux.go | 5 +++++ net/tstun/tun.go | 20 +++++++++++++++++++- net/tstun/tun_linux.go | 10 +++++++++- paths/paths.go | 32 ++++++++++++++++++++++++++++++++ paths/paths_unix.go | 3 +++ util/linuxfw/detector.go | 5 +++++ util/linuxfw/iptables_runner.go | 5 +++-- version/distro/distro.go | 3 +++ 10 files changed, 89 insertions(+), 12 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 3987b0c26927f..ab1590132ece6 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -257,14 +257,13 @@ func main() { // Only apply a default statepath when neither have been provided, so that a // user may specify only --statedir if they wish. if args.statepath == "" && args.statedir == "" { - if runtime.GOOS == "plan9" { - home, err := os.UserHomeDir() - if err != nil { - log.Fatalf("failed to get home directory: %v", err) - } - args.statedir = filepath.Join(home, "tailscale-state") - if err := os.MkdirAll(args.statedir, 0700); err != nil { - log.Fatalf("failed to create state directory: %v", err) + if paths.MakeAutomaticStateDir() { + d := paths.DefaultTailscaledStateDir() + if d != "" { + args.statedir = d + if err := os.MkdirAll(d, 0700); err != nil { + log.Fatalf("failed to create state directory: %v", err) + } } } else { args.statepath = paths.DefaultTailscaledStateFile() diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index b84528d7b76bd..f5c475712afe3 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -224,6 +224,9 @@ func LogsDir(logf logger.Logf) string { logf("logpolicy: using LocalAppData dir %v", dir) return dir case "linux": + if distro.Get() == distro.JetKVM { + return "/userdata/tailscale/var" + } // STATE_DIRECTORY is set by systemd 240+ but we support older // systems-d. For example, Ubuntu 18.04 (Bionic Beaver) is 237. systemdStateDir := os.Getenv("STATE_DIRECTORY") diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 6bd368f50e330..643cc280af1e3 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -22,6 +22,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/cmpver" + "tailscale.com/version/distro" ) type kv struct { @@ -38,6 +39,10 @@ var publishOnce sync.Once // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { + if distro.Get() == distro.JetKVM { + return NewNoopManager() + } + env := newOSConfigEnv{ fs: directFS{}, dbusPing: dbusPing, diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 88679daa24b6c..bfdaddf58b283 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -24,6 +24,9 @@ import ( // CrateTAP is the hook set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] +// modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". +var modprobeTunHook feature.Hook[func() error] + // New returns a tun.Device for the requested device name, along with // the OS-dependent name that was allocated to the device. func New(logf logger.Logf, tunName string) (tun.Device, string, error) { @@ -51,7 +54,22 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { if runtime.GOOS == "plan9" { cleanUpPlan9Interfaces() } - dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) + // Try to create the TUN device up to two times. If it fails + // the first time and we're on Linux, try a desperate + // "modprobe tun" to load the tun module and try again. + for try := range 2 { + dev, err = tun.CreateTUN(tunName, int(DefaultTUNMTU())) + if err == nil || !modprobeTunHook.IsSet() { + if try > 0 { + logf("created TUN device %q after doing `modprobe tun`", tunName) + } + break + } + if modprobeTunHook.Get()() != nil { + // modprobe failed; no point trying again. + break + } + } } if err != nil { return nil, "", err diff --git a/net/tstun/tun_linux.go b/net/tstun/tun_linux.go index 9600ceb77328f..05cf58c17df8a 100644 --- a/net/tstun/tun_linux.go +++ b/net/tstun/tun_linux.go @@ -17,6 +17,14 @@ import ( func init() { tunDiagnoseFailure = diagnoseLinuxTUNFailure + modprobeTunHook.Set(func() error { + _, err := modprobeTun() + return err + }) +} + +func modprobeTun() ([]byte, error) { + return exec.Command("/sbin/modprobe", "tun").CombinedOutput() } func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) { @@ -36,7 +44,7 @@ func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) kernel := utsReleaseField(&un) logf("Linux kernel version: %s", kernel) - modprobeOut, err := exec.Command("/sbin/modprobe", "tun").CombinedOutput() + modprobeOut, err := modprobeTun() if err == nil { logf("'modprobe tun' successful") // Either tun is currently loaded, or it's statically diff --git a/paths/paths.go b/paths/paths.go index 28c3be02a9c86..6c9c3fa6c9dea 100644 --- a/paths/paths.go +++ b/paths/paths.go @@ -6,6 +6,7 @@ package paths import ( + "log" "os" "path/filepath" "runtime" @@ -70,6 +71,37 @@ func DefaultTailscaledStateFile() string { return "" } +// DefaultTailscaledStateDir returns the default state directory +// to use for tailscaled, for use when the user provided neither +// a state directory or state file path to use. +// +// It returns the empty string if there's no reasonable default. +func DefaultTailscaledStateDir() string { + if runtime.GOOS == "plan9" { + home, err := os.UserHomeDir() + if err != nil { + log.Fatalf("failed to get home directory: %v", err) + } + return filepath.Join(home, "tailscale-state") + } + return filepath.Dir(DefaultTailscaledStateFile()) +} + +// MakeAutomaticStateDir reports whether the platform +// automatically creates the state directory for tailscaled +// when it's absent. +func MakeAutomaticStateDir() bool { + switch runtime.GOOS { + case "plan9": + return true + case "linux": + if distro.Get() == distro.JetKVM { + return true + } + } + return false +} + // MkStateDir ensures that dirPath, the daemon's configuration directory // containing machine keys etc, both exists and has the correct permissions. // We want it to only be accessible to the user the daemon is running under. diff --git a/paths/paths_unix.go b/paths/paths_unix.go index 50a8b7ca502f7..d317921d59cd9 100644 --- a/paths/paths_unix.go +++ b/paths/paths_unix.go @@ -21,6 +21,9 @@ func init() { } func statePath() string { + if runtime.GOOS == "linux" && distro.Get() == distro.JetKVM { + return "/userdata/tailscale/var/tailscaled.state" + } switch runtime.GOOS { case "linux", "illumos", "solaris": return "/var/lib/tailscale/tailscaled.state" diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index f3ee4aa0b84f0..fffa523afdcf4 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -23,6 +23,11 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { hostinfo.SetFirewallMode("nft-gokrazy") return FirewallModeNfTables } + if distro.Get() == distro.JetKVM { + // JetKVM doesn't have iptables. + hostinfo.SetFirewallMode("nft-jetkvm") + return FirewallModeNfTables + } mode := envknob.String("TS_DEBUG_FIREWALL_MODE") // If the envknob isn't set, fall back to the pref suggested by c2n or diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 9a6fc02248e62..78844065a4edd 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -688,8 +688,9 @@ func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error // IPTablesCleanUp removes all Tailscale added iptables rules. // Any errors that occur are logged to the provided logf. func IPTablesCleanUp(logf logger.Logf) { - if distro.Get() == distro.Gokrazy { - // Gokrazy uses nftables and doesn't have the "iptables" command. + switch distro.Get() { + case distro.Gokrazy, distro.JetKVM: + // These use nftables and don't have the "iptables" command. // Avoid log spam on cleanup. (#12277) return } diff --git a/version/distro/distro.go b/version/distro/distro.go index f7997e1d9f81b..dd5e0b21b2eb1 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -31,6 +31,7 @@ const ( Unraid = Distro("unraid") Alpine = Distro("alpine") UBNT = Distro("ubnt") // Ubiquiti Networks + JetKVM = Distro("jetkvm") ) var distro lazy.SyncValue[Distro] @@ -102,6 +103,8 @@ func linuxDistro() Distro { return Unraid case have("/etc/alpine-release"): return Alpine + case haveDir("/userdata/jetkvm") && haveDir("/sys/kernel/config/usb_gadget/jetkvm"): + return JetKVM } return "" } From fed72e2aa9d55620655ab1790036523cbae9956f Mon Sep 17 00:00:00 2001 From: Dylan Bargatze Date: Thu, 10 Jul 2025 18:22:25 -0400 Subject: [PATCH 0104/1093] cmd/tailscale, ipn/ipnstate, wgengine/magicsock: update ping output for peer relay (#16515) Updates the output for "tailscale ping" to indicate if a peer relay was traversed, just like the output for DERP or direct connections. Fixes tailscale/corp#30034 Signed-off-by: Dylan Bargatze --- cmd/tailscale/cli/ping.go | 4 +++- ipn/ipnstate/ipnstate.go | 12 ++++++++++-- tailcfg/tailcfg.go | 8 ++++++-- wgengine/magicsock/magicsock.go | 9 +++++---- 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/cmd/tailscale/cli/ping.go b/cmd/tailscale/cli/ping.go index 3a909f30dee86..d438cb2286d4c 100644 --- a/cmd/tailscale/cli/ping.go +++ b/cmd/tailscale/cli/ping.go @@ -152,7 +152,9 @@ func runPing(ctx context.Context, args []string) error { } latency := time.Duration(pr.LatencySeconds * float64(time.Second)).Round(time.Millisecond) via := pr.Endpoint - if pr.DERPRegionID != 0 { + if pr.PeerRelay != "" { + via = fmt.Sprintf("peer-relay(%s)", pr.PeerRelay) + } else if pr.DERPRegionID != 0 { via = fmt.Sprintf("DERP(%s)", pr.DERPRegionCode) } if via == "" { diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index fdfd4e3346958..e7ae2d62bd6b2 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -701,10 +701,17 @@ type PingResult struct { Err string LatencySeconds float64 - // Endpoint is the ip:port if direct UDP was used. - // It is not currently set for TSMP pings. + // Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It + // is not currently set for TSMP. Endpoint string + // PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer + // relay was used. It is not currently set for TSMP. Note that this field + // is not omitted during JSON encoding if it contains a zero value. This is + // done for consistency with the Endpoint field; this structure is exposed + // externally via localAPI, so we want to maintain the existing convention. + PeerRelay string + // DERPRegionID is non-zero DERP region ID if DERP was used. // It is not currently set for TSMP pings. DERPRegionID int @@ -739,6 +746,7 @@ func (pr *PingResult) ToPingResponse(pingType tailcfg.PingType) *tailcfg.PingRes Err: pr.Err, LatencySeconds: pr.LatencySeconds, Endpoint: pr.Endpoint, + PeerRelay: pr.PeerRelay, DERPRegionID: pr.DERPRegionID, DERPRegionCode: pr.DERPRegionCode, PeerAPIPort: pr.PeerAPIPort, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index e55389f182f56..ab8add5b83a3e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1854,10 +1854,14 @@ type PingResponse struct { // omitted, Err should contain information as to the cause. LatencySeconds float64 `json:",omitempty"` - // Endpoint is the ip:port if direct UDP was used. - // It is not currently set for TSMP pings. + // Endpoint is a string of the form "{ip}:{port}" if direct UDP was used. It + // is not currently set for TSMP. Endpoint string `json:",omitempty"` + // PeerRelay is a string of the form "{ip}:{port}:vni:{vni}" if a peer + // relay was used. It is not currently set for TSMP. + PeerRelay string `json:",omitempty"` + // DERPRegionID is non-zero DERP region ID if DERP was used. // It is not currently set for TSMP pings. DERPRegionID int `json:",omitempty"` diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6ce91902d7425..b5087b02e530e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1106,10 +1106,11 @@ func (c *Conn) Ping(peer tailcfg.NodeView, res *ipnstate.PingResult, size int, c func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep epAddr) { res.LatencySeconds = latency.Seconds() if ep.ap.Addr() != tailcfg.DerpMagicIPAddr { - // TODO(jwhited): if ep.vni.isSet() we are using a Tailscale client - // as a UDP relay; update PingResult and its interpretation by - // "tailscale ping" to make this clear. - res.Endpoint = ep.String() + if ep.vni.isSet() { + res.PeerRelay = ep.String() + } else { + res.Endpoint = ep.String() + } return } regionID := int(ep.ap.Port()) From 5f678b9becfbd13b3a5ec57c48fc4bd78d8353db Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 9 Jul 2025 17:41:55 -0500 Subject: [PATCH 0105/1093] docs/windows/policy: add ExitNode.AllowOverride as an option to ExitNodeID policy In this PR, we make ExitNode.AllowOverride configurable as part of the Exit Node ADMX policy setting, similarly to Always On w/ "Disconnect with reason" option. Updates tailscale/corp#29969 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 4 +++- docs/windows/policy/tailscale.admx | 12 ++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index c09d847bc7c0d..2e143d49c9c6c 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -23,6 +23,7 @@ Tailscale UI customization Settings + Allowed Allowed (with audit) Not Allowed Require using a specific Tailscale coordination server @@ -69,7 +70,7 @@ See https://tailscale.com/kb/1315/mdm-keys#set-an-auth-key for more details.]]>< Require using a specific Exit Node + User override:
Registration mode: diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 0a8aa1a75eb50..0da8aef42ded6 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -115,6 +115,18 @@ + + + + + + + + + + + + From bd29a1c8c1000d620b26dcb31363c7b678463c2d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Jul 2025 18:52:01 -0700 Subject: [PATCH 0106/1093] feature/relayserver,wgengine/magicsock: remove WIP gating of peer relay (#16533) Updates tailscale/corp#30051 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 4 ---- wgengine/magicsock/magicsock.go | 3 +-- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index f4a533193999e..d0ad27624f09f 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -14,7 +14,6 @@ import ( "sync" "time" - "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -133,9 +132,6 @@ func (e *extension) relayServerOrInit() (relayServer, error) { if e.hasNodeAttrDisableRelayServer { return nil, errors.New("disable-relay-server node attribute is present") } - if !envknob.UseWIPCode() { - return nil, errors.New("TAILSCALE_USE_WIP_CODE envvar is not set") - } var err error e.server, err = udprelay.NewServer(e.logf, *e.port, nil) if err != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b5087b02e530e..14feed32b5929 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2724,8 +2724,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { relayClientEnabled := update.SelfNode.Valid() && !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && - !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) && - envknob.UseWIPCode() + !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) c.mu.Lock() relayClientChanged := c.relayClientEnabled != relayClientEnabled From c18ba4470b452112b83975f042705e950ef7d232 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 10 Jul 2025 22:15:55 -0700 Subject: [PATCH 0107/1093] ipn/ipnlocal: add traffic steering support to exit-node suggestions (#16527) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When `tailscale exit-node suggest` contacts the LocalAPI for a suggested exit node, the client consults its netmap for peers that contain the `suggest-exit-node` peercap. It currently uses a series of heuristics to determine the exit node to suggest. When the `traffic-steering` feature flag is enabled on its tailnet, the client will defer to Control’s priority scores for a particular peer. These scores, in `tailcfg.Hostinfo.Location.Priority`, were historically only used for Mullvad exit nodes, but they have now been extended to score any peer that could host a redundant resource. Client capability version 119 is the earliest client that understands these traffic steering scores. Control tells the client to switch to rely on these scores by adding `tailcfg.NodeAttrTrafficSteering` to its `AllCaps`. Updates tailscale/corp#29966 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 134 +++++++++++- ipn/ipnlocal/local_test.go | 417 +++++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 3 +- 3 files changed, 546 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 48eceb36c1ab4..4ed012f2e46f8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7675,13 +7675,10 @@ func allowedAutoRoute(ipp netip.Prefix) bool { var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") -// suggestExitNodeLocked computes a suggestion based on the current netmap and last netcheck report. If -// there are multiple equally good options, one is selected at random, so the result is not stable. To be -// eligible for consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. -// -// Currently, peers with a DERP home are preferred over those without (typically this means Mullvad). -// Peers are selected based on having a DERP home that is the lowest latency to this device. For peers -// without a DERP home, we look for geographic proximity to this device's DERP home. +// suggestExitNodeLocked computes a suggestion based on the current netmap and +// other optional factors. If there are multiple equally good options, one may +// be selected at random, so the result is not stable. To be eligible for +// consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. // // b.mu.lock() must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { @@ -7743,7 +7740,32 @@ func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { return s } +// suggestExitNode returns a suggestion for reasonably good exit node based on +// the current netmap and the previous suggestion. func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + switch { + case nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering): + // The traffic-steering feature flag is enabled on this tailnet. + return suggestExitNodeUsingTrafficSteering(nb, prevSuggestion, allowList) + default: + return suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) + } +} + +// suggestExitNodeUsingDERP is the classic algorithm used to suggest exit nodes, +// before traffic steering was implemented. This handles the plain failover +// case, in addition to the optional Regional Routing. +// +// It computes a suggestion based on the current netmap and last netcheck +// report. If there are multiple equally good options, one is selected at +// random, so the result is not stable. To be eligible for consideration, the +// peer must have NodeAttrSuggestExitNode in its CapMap. +// +// Currently, peers with a DERP home are preferred over those without (typically +// this means Mullvad). Peers are selected based on having a DERP home that is +// the lowest latency to this device. For peers without a DERP home, we look for +// geographic proximity to this device's DERP home. +func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP @@ -7864,6 +7886,104 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta return res, nil } +var ErrNoNetMap = errors.New("no network map, try again later") + +// suggestExitNodeUsingTrafficSteering uses traffic steering priority scores to +// pick one of the best exit nodes. These priorities are provided by Control in +// the node’s [tailcfg.Location]. To be eligible for consideration, the node +// must have NodeAttrSuggestExitNode in its CapMap. +func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNodeID, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { + nm := nb.NetMap() + if nm == nil { + return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap + } + + if !nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering) { + panic("missing traffic-steering capability") + } + + peers := nm.Peers + nodes := make([]tailcfg.NodeView, 0, len(peers)) + + for _, p := range peers { + if !p.Valid() { + continue + } + if allowed != nil && !allowed.Contains(p.StableID()) { + continue + } + if !p.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) { + continue + } + if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { + continue + } + if p.StableID() == prev { + // Prevent flapping: since prev is a valid suggestion, + // force prev to be the only valid pick. + nodes = []tailcfg.NodeView{p} + break + } + nodes = append(nodes, p) + } + + var pick tailcfg.NodeView + + scores := make(map[tailcfg.NodeID]int, len(nodes)) + score := func(n tailcfg.NodeView) int { + id := n.ID() + s, ok := scores[id] + if !ok { + s = 0 // score of zero means incomparable + if hi := n.Hostinfo(); hi.Valid() { + if loc := hi.Location(); loc.Valid() { + s = loc.Priority() + } + } + scores[id] = s + } + return s + } + + if len(nodes) > 0 { + // Find the highest scoring exit nodes. + slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { + return cmp.Compare(score(b), score(a)) // reverse sort + }) + + // Find the top exit nodes, which all have the same score. + topI := len(nodes) + ts := score(nodes[0]) + for i, n := range nodes[1:] { + if score(n) < ts { + // n is the first node with a lower score. + // Make nodes[:topI] to slice the top exit nodes. + topI = i + 1 + break + } + } + + // TODO(sfllaw): add a temperature knob so that this client has + // a chance of picking the next best option. + randSeed := uint64(nm.SelfNode.ID()) + pick = nodes[rands.IntN(randSeed, topI)] + } + + if !pick.Valid() { + return apitype.ExitNodeSuggestionResponse{}, nil + } + res := apitype.ExitNodeSuggestionResponse{ + ID: pick.StableID(), + Name: pick.Name(), + } + if hi := pick.Hostinfo(); hi.Valid() { + if loc := hi.Location(); loc.Valid() { + res.Location = loc + } + } + return res, nil +} + // pickWeighted chooses the node with highest priority given a list of mullvad nodes. func pickWeighted(candidates []tailcfg.NodeView) []tailcfg.NodeView { maxWeight := 0 diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bb7f433c02cbe..0b39c45c28f7d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4229,6 +4229,23 @@ func withLocation(loc tailcfg.LocationView) peerOptFunc { } } +func withLocationPriority(pri int) peerOptFunc { + return func(n *tailcfg.Node) { + var hi *tailcfg.Hostinfo + if n.Hostinfo.Valid() { + hi = n.Hostinfo.AsStruct() + } else { + hi = new(tailcfg.Hostinfo) + } + if hi.Location == nil { + hi.Location = new(tailcfg.Location) + } + hi.Location.Priority = pri + + n.Hostinfo = hi.View() + } +} + func withExitRoutes() peerOptFunc { return func(n *tailcfg.Node) { n.AllowedIPs = append(n.AllowedIPs, tsaddr.ExitRoutes()...) @@ -4895,6 +4912,406 @@ func TestSuggestExitNodeLongLatDistance(t *testing.T) { } } +func TestSuggestExitNodeTrafficSteering(t *testing.T) { + city := &tailcfg.Location{ + Country: "Canada", + CountryCode: "CA", + City: "Montreal", + CityCode: "MTR", + Latitude: 45.5053, + Longitude: -73.5525, + } + noLatLng := &tailcfg.Location{ + Country: "Canada", + CountryCode: "CA", + City: "Montreal", + CityCode: "MTR", + } + + selfNode := tailcfg.Node{ + ID: 0, // randomness is seeded off NetMap.SelfNode.ID + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.64.1.1/32"), + netip.MustParsePrefix("fe70::1/128"), + }, + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrTrafficSteering: []tailcfg.RawMessage{}, + }, + } + + for _, tt := range []struct { + name string + + netMap *netmap.NetworkMap + lastExit tailcfg.StableNodeID + allowPolicy []tailcfg.StableNodeID + + wantID tailcfg.StableNodeID + wantName string + wantLoc *tailcfg.Location + wantPri int + + wantErr error + }{ + { + name: "no-netmap", + netMap: nil, + wantErr: ErrNoNetMap, + }, + { + name: "no-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{}, + }, + wantID: "", + }, + { + name: "no-exit-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1), + }, + }, + wantID: "", + }, + { + name: "exit-node-without-suggestion", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes()), + }, + }, + wantID: "", + }, + { + name: "suggested-exit-node-without-routes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withSuggest()), + }, + }, + wantID: "", + }, + { + name: "suggested-exit-node", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable1", + wantName: "peer1", + }, + { + name: "many-suggested-exit-nodes", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable3", + wantName: "peer3", + }, + { + name: "suggested-exit-node-was-last-suggested", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + lastExit: "stable2", // overrides many-suggested-exit-nodes + wantID: "stable2", + wantName: "peer2", + }, + { + name: "suggested-exit-node-was-never-suggested", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withSuggest()), + makePeer(4, + withExitRoutes(), + withSuggest()), + }, + }, + lastExit: "stable10", + wantID: "stable3", // matches many-suggested-exit-nodes + wantName: "peer3", + }, + { + name: "exit-nodes-with-and-without-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(2, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: 1, + }, + { + name: "exit-nodes-without-and-with-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + }, + }, + wantID: "stable2", + wantName: "peer2", + wantPri: 1, + }, + { + name: "exit-nodes-with-negative-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(-1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(-2)), + makePeer(3, + withExitRoutes(), + withSuggest(), + withLocationPriority(-3)), + makePeer(4, + withExitRoutes(), + withSuggest(), + withLocationPriority(-4)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: -1, + }, + { + name: "exit-nodes-no-priority-beats-negative-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(-1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(-2)), + makePeer(3, + withExitRoutes(), + withSuggest()), + }, + }, + wantID: "stable3", + wantName: "peer3", + }, + { + name: "exit-nodes-same-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(2, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(3, + withExitRoutes(), + withSuggest(), + withLocationPriority(1)), + makePeer(4, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(5, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + makePeer(6, + withExitRoutes(), + withSuggest()), + makePeer(7, + withExitRoutes(), + withSuggest(), + withLocationPriority(2)), // top + }, + }, + wantID: "stable5", + wantName: "peer5", + wantPri: 2, + }, + { + name: "suggested-exit-node-with-city", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(city.View())), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: city, + }, + { + name: "suggested-exit-node-with-city-and-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(city.View()), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: city, + wantPri: 1, + }, + { + name: "suggested-exit-node-without-latlng", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(noLatLng.View())), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: noLatLng, + }, + { + name: "suggested-exit-node-without-latlng-with-priority", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest(), + withLocation(noLatLng.View()), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantLoc: noLatLng, + wantPri: 1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + var allowList set.Set[tailcfg.StableNodeID] + if tt.allowPolicy != nil { + allowList = set.SetOf(tt.allowPolicy) + } + + // HACK: NetMap.AllCaps is populated by Control: + if tt.netMap != nil { + caps := maps.Keys(tt.netMap.SelfNode.CapMap().AsMap()) + tt.netMap.AllCaps = set.SetOf(slices.Collect(caps)) + } + + nb := newNodeBackend(t.Context(), eventbus.New()) + defer nb.shutdown(errShutdown) + nb.SetNetMap(tt.netMap) + + got, err := suggestExitNodeUsingTrafficSteering(nb, tt.lastExit, allowList) + if tt.wantErr == nil && err != nil { + t.Fatalf("err=%v, want nil", err) + } + if tt.wantErr != nil && !errors.Is(err, tt.wantErr) { + t.Fatalf("err=%v, want %v", err, tt.wantErr) + } + + if got.Name != tt.wantName { + t.Errorf("name=%q, want %q", got.Name, tt.wantName) + } + + if got.ID != tt.wantID { + t.Errorf("ID=%q, want %q", got.ID, tt.wantID) + } + + wantLoc := tt.wantLoc + if tt.wantPri != 0 { + if wantLoc == nil { + wantLoc = new(tailcfg.Location) + } + wantLoc.Priority = tt.wantPri + } + if diff := cmp.Diff(got.Location.AsStruct(), wantLoc); diff != "" { + t.Errorf("location mismatch (+want -got)\n%s", diff) + } + }) + } +} + func TestMinLatencyDERPregion(t *testing.T) { tests := []struct { name string diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index ab8add5b83a3e..53c4683c1b000 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -163,7 +163,8 @@ type CapabilityVersion int // - 116: 2025-05-05: Client serves MagicDNS "AAAA" if NodeAttrMagicDNSPeerAAAA set on self node // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. // - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) -const CurrentCapabilityVersion CapabilityVersion = 118 +// - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. +const CurrentCapabilityVersion CapabilityVersion = 119 // ID is an integer ID for a user, node, or login allocated by the // control plane. From 04e8d21b0bcaab54f1906fb6a0ebc507ed7114ea Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 10 Jul 2025 22:21:08 -0700 Subject: [PATCH 0108/1093] go.mod: bump wg-go to fix keepalive detection (#16535) Updates tailscale/corp#30364 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e89a383a62726..f040d7799768d 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 + github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index 062af66622b85..ea17b11821392 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8 h1:Yjg/+1VVRcdY3DL9fs8g+QnZ1aizotU0pp0VSOSCuTQ= -github.com/tailscale/wireguard-go v0.0.0-20250707220504-1f398ae148a8/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 h1:kSzi/ugdekAxhcVdCxH6er7OjoNc2oDRcimWJDvnRFM= +github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= From 30da2e1c3206b7e45b42fd3fddfe1d9081c6982d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 11 Jul 2025 08:51:02 -0700 Subject: [PATCH 0109/1093] cmd/tailscale/cli: add "configure jetkvm" subcommand To write the init script. And fix the JetKVM detection to work during early boot while the filesystem and modules are still being loaded; it wasn't being detected on early boot and then tailscaled was failing to start because it didn't know it was on JetKVM and didn't modprobe tun. Updates #16524 Change-Id: I0524ca3abd7ace68a69af96aab4175d32c07e116 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/configure-jetkvm.go | 81 +++++++++++++++++++++++++++ cmd/tailscale/cli/configure.go | 3 + version/distro/distro.go | 11 +++- 3 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 cmd/tailscale/cli/configure-jetkvm.go diff --git a/cmd/tailscale/cli/configure-jetkvm.go b/cmd/tailscale/cli/configure-jetkvm.go new file mode 100644 index 0000000000000..a8e0a7cb542ef --- /dev/null +++ b/cmd/tailscale/cli/configure-jetkvm.go @@ -0,0 +1,81 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && arm + +package cli + +import ( + "bytes" + "context" + "errors" + "flag" + "os" + "runtime" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/version/distro" +) + +func init() { + maybeJetKVMConfigureCmd = jetKVMConfigureCmd +} + +func jetKVMConfigureCmd() *ffcli.Command { + if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { + return nil + } + return &ffcli.Command{ + Name: "jetkvm", + Exec: runConfigureJetKVM, + ShortUsage: "tailscale configure jetkvm", + ShortHelp: "Configure JetKVM to run tailscaled at boot", + LongHelp: strings.TrimSpace(` +This command configures the JetKVM host to run tailscaled at boot. +`), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("jetkvm") + return fs + })(), + } +} + +func runConfigureJetKVM(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unknown arguments") + } + if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { + return errors.New("only implemented on JetKVM") + } + err := os.WriteFile("/etc/init.d/S22tailscale", bytes.TrimLeft([]byte(` +#!/bin/sh +# /etc/init.d/S22tailscale +# Start/stop tailscaled + +case "$1" in + start) + /userdata/tailscale/tailscaled > /dev/null 2>&1 & + ;; + stop) + killall tailscaled + ;; + *) + echo "Usage: $0 {start|stop}" + exit 1 + ;; +esac +`), "\n"), 0755) + if err != nil { + return err + } + + if err := os.Symlink("/userdata/tailscale/tailscale", "/bin/tailscale"); err != nil { + if !os.IsExist(err) { + return err + } + } + + printf("Done. Now restart your JetKVM.\n") + return nil +} diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index acb416755a586..da6278ce24330 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -10,6 +10,8 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" ) +var maybeJetKVMConfigureCmd func() *ffcli.Command // non-nil only on Linux/arm for JetKVM + func configureCmd() *ffcli.Command { return &ffcli.Command{ Name: "configure", @@ -29,6 +31,7 @@ services on the host to use Tailscale in more ways. synologyConfigureCertCmd(), ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), + ccall(maybeJetKVMConfigureCmd), ), } } diff --git a/version/distro/distro.go b/version/distro/distro.go index dd5e0b21b2eb1..0e88bdd2fa297 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -9,6 +9,7 @@ import ( "os" "runtime" "strconv" + "strings" "tailscale.com/types/lazy" "tailscale.com/util/lineiter" @@ -103,12 +104,20 @@ func linuxDistro() Distro { return Unraid case have("/etc/alpine-release"): return Alpine - case haveDir("/userdata/jetkvm") && haveDir("/sys/kernel/config/usb_gadget/jetkvm"): + case runtime.GOARCH == "arm" && isDeviceModel("JetKVM"): return JetKVM } return "" } +func isDeviceModel(want string) bool { + if runtime.GOOS != "linux" { + return false + } + v, _ := os.ReadFile("/sys/firmware/devicetree/base/model") + return want == strings.Trim(string(v), "\x00\r\n\t ") +} + func freebsdDistro() Distro { switch { case have("/etc/pfSense-rc"): From 39bf84d1c70d1b31384acbf37dd9f8d36db47404 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 11 Jul 2025 16:01:15 -0700 Subject: [PATCH 0110/1093] cmd/tsidp: set hostinfo.App in tsnet mode (#16544) This makes it easier to track how widely tsidp is used in practice. Updates #cleanup Signed-off-by: Andrew Lytvynov --- cmd/tsidp/tsidp.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 43020eaf73e63..6a0c2d89e685e 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -39,6 +39,7 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -121,6 +122,7 @@ func main() { } defer cleanup() } else { + hostinfo.SetApp("tsidp") ts := &tsnet.Server{ Hostname: *flagHostname, Dir: *flagDir, From 24062e33d13a4859b7d08f2bcfc518827517784e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 11 Jul 2025 17:12:23 -0700 Subject: [PATCH 0111/1093] net/udprelay: fix peer relay server deadlock (#16542) Fixes tailscale/corp#30381 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 979ccf71765ed..e2652ae99637f 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -488,14 +488,17 @@ func (s *Server) listenOn(port int) error { // Close closes the server. func (s *Server) Close() error { s.closeOnce.Do(func() { - s.mu.Lock() - defer s.mu.Unlock() s.uc4.Close() if s.uc6 != nil { s.uc6.Close() } close(s.closeCh) s.wg.Wait() + // s.mu must not be held while s.wg.Wait'ing, otherwise we can + // deadlock. The goroutines we are waiting on to return can also + // acquire s.mu. + s.mu.Lock() + defer s.mu.Unlock() clear(s.byVNI) clear(s.byDisco) s.vniPool = nil @@ -564,6 +567,12 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSo func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { defer func() { + // We intentionally close the [Server] if we encounter a socket read + // error below, at least until socket "re-binding" is implemented as + // part of http://go/corp/30118. + // + // Decrementing this [sync.WaitGroup] _before_ calling [Server.Close] is + // intentional as [Server.Close] waits on it. s.wg.Done() s.Close() }() From f23e4279c42aec766eb6a89562c1fed3a1b97e09 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Sun, 13 Jul 2025 05:47:56 -0700 Subject: [PATCH 0112/1093] types/lazy: add lazy.GMap: a map of lazily computed GValues (#16532) Fixes tailscale/corp#30360 Signed-off-by: Simon Law --- cmd/stund/depaware.txt | 2 +- types/lazy/map.go | 62 +++++++++++++++++++++++++++ types/lazy/map_test.go | 95 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+), 1 deletion(-) create mode 100644 types/lazy/map.go create mode 100644 types/lazy/map_test.go diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index da768039431fe..81544b7505dc7 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -76,7 +76,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/tailcfg tailscale.com/util/lineiter from tailscale.com/version/distro - tailscale.com/util/mak from tailscale.com/syncs + tailscale.com/util/mak from tailscale.com/syncs+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/slicesx from tailscale.com/tailcfg diff --git a/types/lazy/map.go b/types/lazy/map.go new file mode 100644 index 0000000000000..75a1dd739d3bc --- /dev/null +++ b/types/lazy/map.go @@ -0,0 +1,62 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import "tailscale.com/util/mak" + +// GMap is a map of lazily computed [GValue] pointers, keyed by a comparable +// type. +// +// Use either Get or GetErr, depending on whether your fill function returns an +// error. +// +// GMap is not safe for concurrent use. +type GMap[K comparable, V any] struct { + store map[K]*GValue[V] +} + +// Len returns the number of entries in the map. +func (s *GMap[K, V]) Len() int { + return len(s.store) +} + +// Set attempts to set the value of k to v, and reports whether it succeeded. +// Set only succeeds if k has never been called with Get/GetErr/Set before. +func (s *GMap[K, V]) Set(k K, v V) bool { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.Set(v) +} + +// MustSet sets the value of k to v, or panics if k already has a value. +func (s *GMap[K, V]) MustSet(k K, v V) { + if !s.Set(k, v) { + panic("Set after already filled") + } +} + +// Get returns the value for k, computing it with fill if it's not already +// present. +func (s *GMap[K, V]) Get(k K, fill func() V) V { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.Get(fill) +} + +// GetErr returns the value for k, computing it with fill if it's not already +// present. +func (s *GMap[K, V]) GetErr(k K, fill func() (V, error)) (V, error) { + z, ok := s.store[k] + if !ok { + z = new(GValue[V]) + mak.Set(&s.store, k, z) + } + return z.GetErr(fill) +} diff --git a/types/lazy/map_test.go b/types/lazy/map_test.go new file mode 100644 index 0000000000000..ec1152b0b802c --- /dev/null +++ b/types/lazy/map_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package lazy + +import ( + "errors" + "testing" +) + +func TestGMap(t *testing.T) { + var gm GMap[string, int] + n := int(testing.AllocsPerRun(1000, func() { + got := gm.Get("42", fortyTwo) + if got != 42 { + t.Fatalf("got %v; want 42", got) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapErr(t *testing.T) { + var gm GMap[string, int] + n := int(testing.AllocsPerRun(1000, func() { + got, err := gm.GetErr("42", func() (int, error) { + return 42, nil + }) + if got != 42 || err != nil { + t.Fatalf("got %v, %v; want 42, nil", got, err) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } + + var gmErr GMap[string, int] + wantErr := errors.New("test error") + n = int(testing.AllocsPerRun(1000, func() { + got, err := gmErr.GetErr("42", func() (int, error) { + return 0, wantErr + }) + if got != 0 || err != wantErr { + t.Fatalf("got %v, %v; want 0, %v", got, err, wantErr) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapSet(t *testing.T) { + var gm GMap[string, int] + if !gm.Set("42", 42) { + t.Fatalf("Set failed") + } + if gm.Set("42", 43) { + t.Fatalf("Set succeeded after first Set") + } + n := int(testing.AllocsPerRun(1000, func() { + got := gm.Get("42", fortyTwo) + if got != 42 { + t.Fatalf("got %v; want 42", got) + } + })) + if n != 0 { + t.Errorf("allocs = %v; want 0", n) + } +} + +func TestGMapMustSet(t *testing.T) { + var gm GMap[string, int] + gm.MustSet("42", 42) + defer func() { + if e := recover(); e == nil { + t.Errorf("unexpected success; want panic") + } + }() + gm.MustSet("42", 43) +} + +func TestGMapRecursivePanic(t *testing.T) { + defer func() { + if e := recover(); e != nil { + t.Logf("got panic, as expected") + } else { + t.Errorf("unexpected success; want panic") + } + }() + gm := GMap[string, int]{} + gm.Get("42", func() int { + return gm.Get("42", func() int { return 42 }) + }) +} From bcaea4f24597d840d8a0fd94cafbb2dc0ff7a774 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 14 Jul 2025 15:17:20 +0100 Subject: [PATCH 0113/1093] k8s-operator,sessionrecording: fixing race condition between resize (#16454) messages and cast headers when recording `kubectl attach` sessions Updates #16490 Signed-off-by: chaosinthecrd --- k8s-operator/api-proxy/proxy.go | 51 ++++--- k8s-operator/sessionrecording/fakes/fakes.go | 12 +- k8s-operator/sessionrecording/hijacker.go | 63 +++++--- .../sessionrecording/hijacker_test.go | 2 +- k8s-operator/sessionrecording/spdy/conn.go | 98 +++++++----- .../sessionrecording/spdy/conn_test.go | 98 ++++++------ .../sessionrecording/tsrecorder/tsrecorder.go | 1 + k8s-operator/sessionrecording/ws/conn.go | 115 ++++++++------ k8s-operator/sessionrecording/ws/conn_test.go | 144 ++++++++++-------- sessionrecording/header.go | 10 +- 10 files changed, 351 insertions(+), 243 deletions(-) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index c3c13e7846915..d33c088de78db 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/k8s-operator/sessionrecording" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -49,6 +50,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn if !authMode { restConfig = rest.AnonymousClientConfig(restConfig) } + cfg, err := restConfig.TransportConfig() if err != nil { return nil, fmt.Errorf("could not get rest.TransportConfig(): %w", err) @@ -111,6 +113,8 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecWS) + mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachSPDY) + mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) ap.hs = &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is @@ -165,19 +169,31 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -// serveExecSPDY serves 'kubectl exec' requests for sessions streamed over SPDY, +// serveExecSPDY serves '/exec' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveExecSPDY(w http.ResponseWriter, r *http.Request) { - ap.execForProto(w, r, ksr.SPDYProtocol) + ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.SPDYProtocol) } -// serveExecWS serves 'kubectl exec' requests for sessions streamed over WebSocket, +// serveExecWS serves '/exec' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { - ap.execForProto(w, r, ksr.WSProtocol) + ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.WSProtocol) +} + +// serveExecSPDY serves '/attach' requests for sessions streamed over SPDY, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *APIServerProxy) serveAttachSPDY(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.SPDYProtocol) +} + +// serveExecWS serves '/attach' requests for sessions streamed over WebSocket, +// optionally configuring the kubectl exec sessions to be recorded. +func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) { + ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) } -func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, proto ksr.Protocol) { +func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType sessionrecording.SessionType, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -192,7 +208,7 @@ func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, p counterNumRequestsProxied.Add(1) failOpen, addrs, err := determineRecorderConfig(who) if err != nil { - ap.log.Errorf("error trying to determine whether the 'kubectl exec' session needs to be recorded: %v", err) + ap.log.Errorf("error trying to determine whether the 'kubectl %s' session needs to be recorded: %v", sessionType, err) return } if failOpen && len(addrs) == 0 { // will not record @@ -201,7 +217,7 @@ func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, p } ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded if !failOpen && len(addrs) == 0 { - msg := "forbidden: 'kubectl exec' session must be recorded, but no recorders are available." + msg := fmt.Sprintf("forbidden: 'kubectl %s' session must be recorded, but no recorders are available.", sessionType) ap.log.Error(msg) http.Error(w, msg, http.StatusForbidden) return @@ -223,16 +239,17 @@ func (ap *APIServerProxy) execForProto(w http.ResponseWriter, r *http.Request, p } opts := ksr.HijackerOpts{ - Req: r, - W: w, - Proto: proto, - TS: ap.ts, - Who: who, - Addrs: addrs, - FailOpen: failOpen, - Pod: r.PathValue(podNameKey), - Namespace: r.PathValue(namespaceNameKey), - Log: ap.log, + Req: r, + W: w, + Proto: proto, + SessionType: sessionType, + TS: ap.ts, + Who: who, + Addrs: addrs, + FailOpen: failOpen, + Pod: r.PathValue(podNameKey), + Namespace: r.PathValue(namespaceNameKey), + Log: ap.log, } h := ksr.New(opts) diff --git a/k8s-operator/sessionrecording/fakes/fakes.go b/k8s-operator/sessionrecording/fakes/fakes.go index 9eb1047e4242f..94853df195f7c 100644 --- a/k8s-operator/sessionrecording/fakes/fakes.go +++ b/k8s-operator/sessionrecording/fakes/fakes.go @@ -10,13 +10,13 @@ package fakes import ( "bytes" "encoding/json" + "fmt" + "math/rand" "net" "sync" "testing" "time" - "math/rand" - "tailscale.com/sessionrecording" "tailscale.com/tstime" ) @@ -107,7 +107,13 @@ func CastLine(t *testing.T, p []byte, clock tstime.Clock) []byte { return append(j, '\n') } -func AsciinemaResizeMsg(t *testing.T, width, height int) []byte { +func AsciinemaCastResizeMsg(t *testing.T, width, height int) []byte { + msg := fmt.Sprintf(`[0,"r","%dx%d"]`, height, width) + + return append([]byte(msg), '\n') +} + +func AsciinemaCastHeaderMsg(t *testing.T, width, height int) []byte { t.Helper() ch := sessionrecording.CastHeader{ Width: width, diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index a9ed658964787..e8c534afc9319 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -4,7 +4,7 @@ //go:build !plan9 // Package sessionrecording contains functionality for recording Kubernetes API -// server proxy 'kubectl exec' sessions. +// server proxy 'kubectl exec/attach' sessions. package sessionrecording import ( @@ -35,14 +35,20 @@ import ( ) const ( - SPDYProtocol Protocol = "SPDY" - WSProtocol Protocol = "WebSocket" + SPDYProtocol Protocol = "SPDY" + WSProtocol Protocol = "WebSocket" + ExecSessionType SessionType = "exec" + AttachSessionType SessionType = "attach" ) // Protocol is the streaming protocol of the hijacked session. Supported // protocols are SPDY and WebSocket. type Protocol string +// SessionType is the type of session initiated with `kubectl` +// (`exec` or `attach`) +type SessionType string + var ( // CounterSessionRecordingsAttempted counts the number of session recording attempts. CounterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_attempted") @@ -63,25 +69,27 @@ func New(opts HijackerOpts) *Hijacker { failOpen: opts.FailOpen, proto: opts.Proto, log: opts.Log, + sessionType: opts.SessionType, connectToRecorder: sessionrecording.ConnectToRecorder, } } type HijackerOpts struct { - TS *tsnet.Server - Req *http.Request - W http.ResponseWriter - Who *apitype.WhoIsResponse - Addrs []netip.AddrPort - Log *zap.SugaredLogger - Pod string - Namespace string - FailOpen bool - Proto Protocol + TS *tsnet.Server + Req *http.Request + W http.ResponseWriter + Who *apitype.WhoIsResponse + Addrs []netip.AddrPort + Log *zap.SugaredLogger + Pod string + Namespace string + FailOpen bool + Proto Protocol + SessionType SessionType } // Hijacker implements [net/http.Hijacker] interface. -// It must be configured with an http request for a 'kubectl exec' session that +// It must be configured with an http request for a 'kubectl exec/attach' session that // needs to be recorded. It knows how to hijack the connection and configure for // the session contents to be sent to a tsrecorder instance. type Hijacker struct { @@ -90,12 +98,13 @@ type Hijacker struct { req *http.Request who *apitype.WhoIsResponse log *zap.SugaredLogger - pod string // pod being exec-d - ns string // namespace of the pod being exec-d + pod string // pod being exec/attach-d + ns string // namespace of the pod being exec/attach-d addrs []netip.AddrPort // tsrecorder addresses failOpen bool // whether to fail open if recording fails connectToRecorder RecorderDialFn - proto Protocol // streaming protocol + proto Protocol // streaming protocol + sessionType SessionType // subcommand, e.g., "exec, attach" } // RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder @@ -105,7 +114,7 @@ type Hijacker struct { // after having been established, an error is sent down the channel. type RecorderDialFn func(context.Context, []netip.AddrPort, netx.DialFunc) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) -// Hijack hijacks a 'kubectl exec' session and configures for the session +// Hijack hijacks a 'kubectl exec/attach' session and configures for the session // contents to be sent to a recorder. func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen) @@ -114,7 +123,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, fmt.Errorf("error hijacking connection: %w", err) } - conn, err := h.setUpRecording(context.Background(), reqConn) + conn, err := h.setUpRecording(h.req.Context(), reqConn) if err != nil { return nil, nil, fmt.Errorf("error setting up session recording: %w", err) } @@ -138,7 +147,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, err error errChan <-chan error ) - h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen) + h.log.Infof("kubectl %s session will be recorded, recorders: %v, fail open policy: %t", h.sessionType, h.addrs, h.failOpen) qp := h.req.URL.Query() container := strings.Join(qp[containerKey], "") var recorderAddr net.Addr @@ -161,7 +170,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } return nil, errors.New(msg) } else { - h.log.Infof("exec session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", container, h.pod, h.ns, recorderAddr) + h.log.Infof("%s session to container %q in Pod %q namespace %q will be recorded, the recording will be sent to a tsrecorder instance at %q", h.sessionType, container, h.pod, h.ns, recorderAddr) } cl := tstime.DefaultClock{} @@ -190,9 +199,15 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, var lc net.Conn switch h.proto { case SPDYProtocol: - lc = spdy.New(conn, rec, ch, hasTerm, h.log) + lc, err = spdy.New(ctx, conn, rec, ch, hasTerm, h.log) + if err != nil { + return nil, fmt.Errorf("failed to initialize spdy connection: %w", err) + } case WSProtocol: - lc = ws.New(conn, rec, ch, hasTerm, h.log) + lc, err = ws.New(ctx, conn, rec, ch, hasTerm, h.log) + if err != nil { + return nil, fmt.Errorf("failed to initialize websocket connection: %w", err) + } default: return nil, fmt.Errorf("unknown protocol: %s", h.proto) } @@ -209,7 +224,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, h.log.Info("finished uploading the recording") return } - msg := fmt.Sprintf("connection to the session recorder errorred: %v;", err) + msg := fmt.Sprintf("connection to the session recorder errored: %v;", err) if h.failOpen { msg += msg + "; failure mode is 'fail open'; continuing session without recording." h.log.Info(msg) diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index 880015b22c2d0..cac6f55c7c7d7 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -91,7 +91,7 @@ func Test_Hijacker(t *testing.T) { who: &apitype.WhoIsResponse{Node: &tailcfg.Node{}, UserProfile: &tailcfg.UserProfile{}}, log: zl.Sugar(), ts: &tsnet.Server{}, - req: &http.Request{URL: &url.URL{}}, + req: &http.Request{URL: &url.URL{RawQuery: "tty=true"}}, proto: tt.proto, } ctx := context.Background() diff --git a/k8s-operator/sessionrecording/spdy/conn.go b/k8s-operator/sessionrecording/spdy/conn.go index 455c2225ad921..9fefca11fc2b8 100644 --- a/k8s-operator/sessionrecording/spdy/conn.go +++ b/k8s-operator/sessionrecording/spdy/conn.go @@ -4,11 +4,12 @@ //go:build !plan9 // Package spdy contains functionality for parsing SPDY streaming sessions. This -// is used for 'kubectl exec' session recording. +// is used for 'kubectl exec/attach' session recording. package spdy import ( "bytes" + "context" "encoding/binary" "encoding/json" "fmt" @@ -24,29 +25,50 @@ import ( ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. -// The connection must be a hijacked connection for a 'kubectl exec' session using SPDY. +// The connection must be a hijacked connection for a 'kubectl exec/attach' session using SPDY. // The hijacked connection is used to transmit SPDY streams between Kubernetes client ('kubectl') and the destination container. // Data read from the underlying network connection is data sent via one of the SPDY streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. // We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#background-remotecommand-subprotocol -func New(nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { - return &conn{ - Conn: nc, - rec: rec, - ch: ch, - log: log, - hasTerm: hasTerm, - initialTermSizeSet: make(chan struct{}), +func New(ctx context.Context, nc net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) (net.Conn, error) { + lc := &conn{ + Conn: nc, + ctx: ctx, + rec: rec, + ch: ch, + log: log, + hasTerm: hasTerm, + initialCastHeaderSent: make(chan struct{}, 1), } + + // if there is no term, we don't need to wait for a resize message + if !hasTerm { + var err error + lc.writeCastHeaderOnce.Do(func() { + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = lc.rec.WriteCastHeader(ch) + close(lc.initialCastHeaderSent) + }) + if err != nil { + return nil, fmt.Errorf("error writing CastHeader: %w", err) + } + } + + return lc, nil } // conn is a wrapper around net.Conn. It reads the bytestream for a 'kubectl -// exec' session streamed using SPDY protocol, sends session recording data to +// exec/attach' session streamed using SPDY protocol, sends session recording data to // the configured recorder and forwards the raw bytes to the original // destination. type conn struct { net.Conn + ctx context.Context // rec knows how to send data written to it to a tsrecorder instance. rec *tsrecorder.Client @@ -63,7 +85,7 @@ type conn struct { // CastHeader must be sent before any payload. If the session has a // terminal attached, the CastHeader must have '.Width' and '.Height' // fields set for the tsrecorder UI to be able to play the recording. - // For 'kubectl exec' sessions, terminal width and height are sent as a + // For 'kubectl exec/attach' sessions, terminal width and height are sent as a // resize message on resize stream from the client when the session // starts as well as at any time the client detects a terminal change. // We can intercept the resize message on Read calls. As there is no @@ -79,15 +101,10 @@ type conn struct { // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once hasTerm bool // whether the session had TTY attached - // initialTermSizeSet channel gets sent a value once, when the Read has - // received a resize message and set the initial terminal size. It must - // be set to a buffered channel to prevent Reads being blocked on the - // first stdout/stderr write reading from the channel. - initialTermSizeSet chan struct{} - // sendInitialTermSizeSetOnce is used to ensure that a value is sent to - // initialTermSizeSet channel only once, when the initial resize message - // is received. - sendinitialTermSizeSetOnce sync.Once + // initialCastHeaderSent is a channel to ensure that the cast + // header is the first thing that is streamed to the session recorder. + // Otherwise the stream will fail. + initialCastHeaderSent chan struct{} zlibReqReader zlibReader // writeBuf is used to store data written to the connection that has not @@ -124,7 +141,7 @@ func (c *conn) Read(b []byte) (int, error) { } c.readBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame - if !sf.Ctrl { // data frame + if !sf.Ctrl && c.hasTerm { // data frame switch sf.StreamID { case c.resizeStreamID.Load(): @@ -140,10 +157,19 @@ func (c *conn) Read(b []byte) (int, error) { // subsequent resize message, we need to send asciinema // resize message. var isInitialResize bool - c.sendinitialTermSizeSetOnce.Do(func() { + c.writeCastHeaderOnce.Do(func() { isInitialResize = true - close(c.initialTermSizeSet) // unblock sending of CastHeader + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } if !isInitialResize { if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { return 0, fmt.Errorf("error writing resize message: %w", err) @@ -190,24 +216,14 @@ func (c *conn) Write(b []byte) (int, error) { if !sf.Ctrl { switch sf.StreamID { case c.stdoutStreamID.Load(), c.stderrStreamID.Load(): - var err error - c.writeCastHeaderOnce.Do(func() { - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - if c.hasTerm { - c.log.Debugf("write: waiting for the initial terminal size to be set before proceeding with sending the first payload") - <-c.initialTermSizeSet + // we must wait for confirmation that the initial cast header was sent before proceeding with any more writes + select { + case <-c.ctx.Done(): + return 0, c.ctx.Err() + case <-c.initialCastHeaderSent: + if err := c.rec.WriteOutput(sf.Payload); err != nil { + return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } - err = c.rec.WriteCastHeader(c.ch) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } - if err := c.rec.WriteOutput(sf.Payload); err != nil { - return 0, fmt.Errorf("error sending payload to session recorder: %w", err) } } } diff --git a/k8s-operator/sessionrecording/spdy/conn_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go index 3485d61c4f454..3c1cb8427d822 100644 --- a/k8s-operator/sessionrecording/spdy/conn_test.go +++ b/k8s-operator/sessionrecording/spdy/conn_test.go @@ -6,10 +6,12 @@ package spdy import ( + "context" "encoding/json" "fmt" "reflect" "testing" + "time" "go.uber.org/zap" "tailscale.com/k8s-operator/sessionrecording/fakes" @@ -29,15 +31,11 @@ func Test_Writes(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int - sendInitialResize bool - hasTerm bool + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + hasTerm bool }{ { name: "single_write_control_frame_with_payload", @@ -78,24 +76,17 @@ func Test_Writes(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", - inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, - wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - width: 10, - height: 20, - hasTerm: true, - firstWrite: true, - sendInitialResize: true, + name: "single_first_write_stdout_data_frame_with_payload_sess_has_terminal", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + hasTerm: true, }, { name: "single_first_write_stdout_data_frame_with_payload_sess_does_not_have_terminal", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - width: 10, - height: 20, - firstWrite: true, + wantRecorded: fakes.CastLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, } for _, tt := range tests { @@ -104,29 +95,25 @@ func Test_Writes(t *testing.T) { sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - ch: sessionrecording.CastHeader{ - Width: tt.width, - Height: tt.height, - }, - initialTermSizeSet: make(chan struct{}), - hasTerm: tt.hasTerm, - } - if !tt.firstWrite { - // this test case does not intend to test that cast header gets written once - c.writeCastHeaderOnce.Do(func() {}) - } - if tt.sendInitialResize { - close(c.initialTermSizeSet) + ctx: ctx, + Conn: tc, + log: zl.Sugar(), + rec: rec, + ch: sessionrecording.CastHeader{}, + initialCastHeaderSent: make(chan struct{}), + hasTerm: tt.hasTerm, } + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + c.stdoutStreamID.Store(stdoutStreamID) c.stderrStreamID.Store(stderrStreamID) for i, input := range tt.inputs { - c.hasTerm = tt.hasTerm if _, err := c.Write(input); err != nil { t.Errorf("[%d] spdyRemoteConnRecorder.Write() unexpected error %v", i, err) } @@ -171,11 +158,25 @@ func Test_Reads(t *testing.T) { wantResizeStreamID uint32 wantWidth int wantHeight int + wantRecorded []byte resizeStreamIDBeforeRead uint32 }{ { name: "resize_data_frame_single_read", inputs: [][]byte{append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...)}, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), + resizeStreamIDBeforeRead: 1, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "resize_data_frame_many", + inputs: [][]byte{ + append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...), + append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + resizeStreamIDBeforeRead: 1, wantWidth: 10, wantHeight: 20, @@ -183,6 +184,7 @@ func Test_Reads(t *testing.T) { { name: "resize_data_frame_two_reads", inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg}, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), resizeStreamIDBeforeRead: 1, wantWidth: 10, wantHeight: 20, @@ -215,11 +217,15 @@ func Test_Reads(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - initialTermSizeSet: make(chan struct{}), + ctx: ctx, + Conn: tc, + log: zl.Sugar(), + rec: rec, + initialCastHeaderSent: make(chan struct{}), + hasTerm: true, } c.resizeStreamID.Store(tt.resizeStreamIDBeforeRead) @@ -251,6 +257,12 @@ func Test_Reads(t *testing.T) { t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) } } + + // Assert that the expected bytes have been forwarded to the session recorder. + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded) + } }) } } diff --git a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go index af5fcb8da641a..a5bdf7ddddeeb 100644 --- a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go +++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go @@ -25,6 +25,7 @@ func New(conn io.WriteCloser, clock tstime.Clock, start time.Time, failOpen bool clock: clock, conn: conn, failOpen: failOpen, + logger: logger, } } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index 86029f67b1f13..0d8aefaace52e 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -3,12 +3,13 @@ //go:build !plan9 -// package ws has functionality to parse 'kubectl exec' sessions streamed using +// package ws has functionality to parse 'kubectl exec/attach' sessions streamed using // WebSocket protocol. package ws import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -24,31 +25,53 @@ import ( ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. -// The connection must be a hijacked connection for a 'kubectl exec' session using WebSocket protocol and a *.channel.k8s.io subprotocol. +// The connection must be a hijacked connection for a 'kubectl exec/attach' session using WebSocket protocol and a *.channel.k8s.io subprotocol. // The hijacked connection is used to transmit *.channel.k8s.io streams between Kubernetes client ('kubectl') and the destination proxy controlled by Kubernetes. // Data read from the underlying network connection is data sent via one of the streams from the client to the container. // Data written to the underlying connection is data sent from the container to the client. // We parse the data and send everything for the stdout/stderr streams to the configured tsrecorder as an asciinema recording with the provided header. // https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/4006-transition-spdy-to-websockets#proposal-new-remotecommand-sub-protocol-version---v5channelk8sio -func New(c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) net.Conn { - return &conn{ - Conn: c, - rec: rec, - ch: ch, - hasTerm: hasTerm, - log: log, - initialTermSizeSet: make(chan struct{}, 1), +func New(ctx context.Context, c net.Conn, rec *tsrecorder.Client, ch sessionrecording.CastHeader, hasTerm bool, log *zap.SugaredLogger) (net.Conn, error) { + lc := &conn{ + Conn: c, + ctx: ctx, + rec: rec, + ch: ch, + hasTerm: hasTerm, + log: log, + initialCastHeaderSent: make(chan struct{}, 1), } + + // if there is no term, we don't need to wait for a resize message + if !hasTerm { + var err error + lc.writeCastHeaderOnce.Do(func() { + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = lc.rec.WriteCastHeader(ch) + close(lc.initialCastHeaderSent) + }) + if err != nil { + return nil, fmt.Errorf("error writing CastHeader: %w", err) + } + } + + return lc, nil } // conn is a wrapper around net.Conn. It reads the bytestream -// for a 'kubectl exec' session, sends session recording data to the configured +// for a 'kubectl exec/attach' session, sends session recording data to the configured // recorder and forwards the raw bytes to the original destination. // A new conn is created per session. -// conn only knows to how to read a 'kubectl exec' session that is streamed using WebSocket protocol. +// conn only knows to how to read a 'kubectl exec/attach' session that is streamed using WebSocket protocol. // https://www.rfc-editor.org/rfc/rfc6455 type conn struct { net.Conn + + ctx context.Context // rec knows how to send data to a tsrecorder instance. rec *tsrecorder.Client @@ -56,7 +79,7 @@ type conn struct { // CastHeader must be sent before any payload. If the session has a // terminal attached, the CastHeader must have '.Width' and '.Height' // fields set for the tsrecorder UI to be able to play the recording. - // For 'kubectl exec' sessions, terminal width and height are sent as a + // For 'kubectl exec/attach' sessions, terminal width and height are sent as a // resize message on resize stream from the client when the session // starts as well as at any time the client detects a terminal change. // We can intercept the resize message on Read calls. As there is no @@ -72,15 +95,10 @@ type conn struct { // writeCastHeaderOnce is used to ensure CastHeader gets sent to tsrecorder once. writeCastHeaderOnce sync.Once hasTerm bool // whether the session has TTY attached - // initialTermSizeSet channel gets sent a value once, when the Read has - // received a resize message and set the initial terminal size. It must - // be set to a buffered channel to prevent Reads being blocked on the - // first stdout/stderr write reading from the channel. - initialTermSizeSet chan struct{} - // sendInitialTermSizeSetOnce is used to ensure that a value is sent to - // initialTermSizeSet channel only once, when the initial resize message - // is received. - sendInitialTermSizeSetOnce sync.Once + // initialCastHeaderSent is a boolean that is set to ensure that the cast + // header is the first thing that is streamed to the session recorder. + // Otherwise the stream will fail. + initialCastHeaderSent chan struct{} log *zap.SugaredLogger @@ -171,9 +189,10 @@ func (c *conn) Read(b []byte) (int, error) { c.readBuf.Next(len(readMsg.raw)) if readMsg.isFinalized && !c.readMsgIsIncomplete() { + // we want to send stream resize messages for terminal sessions // Stream IDs for websocket streams are static. // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 - if readMsg.streamID.Load() == remotecommand.StreamResize { + if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { var msg tsrecorder.ResizeMsg if err = json.Unmarshal(readMsg.payload, &msg); err != nil { return 0, fmt.Errorf("error umarshalling resize message: %w", err) @@ -182,22 +201,29 @@ func (c *conn) Read(b []byte) (int, error) { c.ch.Width = msg.Width c.ch.Height = msg.Height - // If this is initial resize message, the width and - // height will be sent in the CastHeader. If this is a - // subsequent resize message, we need to send asciinema - // resize message. var isInitialResize bool - c.sendInitialTermSizeSetOnce.Do(func() { + c.writeCastHeaderOnce.Do(func() { isInitialResize = true - close(c.initialTermSizeSet) // unblock sending of CastHeader + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } + if !isInitialResize { - if err := c.rec.WriteResize(c.ch.Height, c.ch.Width); err != nil { + if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { return 0, fmt.Errorf("error writing resize message: %w", err) } } } } + c.currentReadMsg = readMsg return n, nil } @@ -244,39 +270,33 @@ func (c *conn) Write(b []byte) (int, error) { c.log.Errorf("write: parsing a message errored: %v", err) return 0, fmt.Errorf("write: error parsing message: %v", err) } + c.currentWriteMsg = writeMsg if !ok { // incomplete fragment return len(b), nil } + c.writeBuf.Next(len(writeMsg.raw)) // advance frame if len(writeMsg.payload) != 0 && writeMsg.isFinalized { if writeMsg.streamID.Load() == remotecommand.StreamStdOut || writeMsg.streamID.Load() == remotecommand.StreamStdErr { - var err error - c.writeCastHeaderOnce.Do(func() { - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - if c.hasTerm { - c.log.Debug("waiting for terminal size to be set before starting to send recorded data") - <-c.initialTermSizeSet + // we must wait for confirmation that the initial cast header was sent before proceeding with any more writes + select { + case <-c.ctx.Done(): + return 0, c.ctx.Err() + case <-c.initialCastHeaderSent: + if err := c.rec.WriteOutput(writeMsg.payload); err != nil { + return 0, fmt.Errorf("error writing message to recorder: %w", err) } - err = c.rec.WriteCastHeader(c.ch) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } - if err := c.rec.WriteOutput(writeMsg.payload); err != nil { - return 0, fmt.Errorf("error writing message to recorder: %v", err) } } } + _, err = c.Conn.Write(c.currentWriteMsg.raw) if err != nil { c.log.Errorf("write: error writing to conn: %v", err) } + return len(b), nil } @@ -321,6 +341,7 @@ func (c *conn) writeMsgIsIncomplete() bool { func (c *conn) readMsgIsIncomplete() bool { return c.currentReadMsg != nil && !c.currentReadMsg.isFinalized } + func (c *conn) curReadMsgType() (messageType, error) { if c.currentReadMsg != nil { return c.currentReadMsg.typ, nil diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 11174480ba605..f29154c622602 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -6,9 +6,11 @@ package ws import ( + "context" "fmt" "reflect" "testing" + "time" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/remotecommand" @@ -26,46 +28,69 @@ func Test_conn_Read(t *testing.T) { // Resize stream ID + {"width": 10, "height": 20} testResizeMsg := []byte{byte(remotecommand.StreamResize), 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d} lenResizeMsgPayload := byte(len(testResizeMsg)) - + cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantWidth int - wantHeight int + name string + inputs [][]byte + wantCastHeaderWidth int + wantCastHeaderHeight int + wantRecorded []byte }{ { name: "single_read_control_message", inputs: [][]byte{{0x88, 0x0}}, }, { - name: "single_read_resize_message", - inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, - wantWidth: 10, - wantHeight: 20, + name: "single_read_resize_message", + inputs: [][]byte{append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...)}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, { - name: "two_reads_resize_message", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, - wantWidth: 10, - wantHeight: 20, + name: "resize_data_frame_many", + inputs: [][]byte{ + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, }, { - name: "three_reads_resize_message_with_split_fragment", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, - wantWidth: 10, - wantHeight: 20, + name: "two_reads_resize_message", + inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), + }, + { + name: "three_reads_resize_message_with_split_fragment", + inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + l := zl.Sugar() tc := &fakes.TestConn{} + sr := &fakes.TestSessionRecorder{} + rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) tc.ResetReadBuf() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), + ctx: ctx, + Conn: tc, + log: l, + hasTerm: true, + initialCastHeaderSent: make(chan struct{}), + rec: rec, } for i, input := range tt.inputs { - c.initialTermSizeSet = make(chan struct{}) if err := tc.WriteReadBufBytes(input); err != nil { t.Fatalf("writing bytes to test conn: %v", err) } @@ -75,14 +100,20 @@ func Test_conn_Read(t *testing.T) { return } } - if tt.wantHeight != 0 || tt.wantWidth != 0 { - if tt.wantWidth != c.ch.Width { - t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width) + + if tt.wantCastHeaderHeight != 0 || tt.wantCastHeaderWidth != 0 { + if tt.wantCastHeaderWidth != c.ch.Width { + t.Errorf("wants width: %v, got %v", tt.wantCastHeaderWidth, c.ch.Width) } - if tt.wantHeight != c.ch.Height { - t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) + if tt.wantCastHeaderHeight != c.ch.Height { + t.Errorf("want height: %v, got %v", tt.wantCastHeaderHeight, c.ch.Height) } } + + gotRecorded := sr.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", string(tt.wantRecorded), string(gotRecorded)) + } }) } } @@ -94,15 +125,11 @@ func Test_conn_Write(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) tests := []struct { - name string - inputs [][]byte - wantForwarded []byte - wantRecorded []byte - firstWrite bool - width int - height int - hasTerm bool - sendInitialResize bool + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + hasTerm bool }{ { name: "single_write_control_frame", @@ -130,10 +157,7 @@ func Test_conn_Write(t *testing.T) { name: "single_write_stdout_data_message_with_cast_header", inputs: [][]byte{{0x82, 0x3, 0x1, 0x7, 0x8}}, wantForwarded: []byte{0x82, 0x3, 0x1, 0x7, 0x8}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8}, cl)...), - width: 10, - height: 20, - firstWrite: true, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8}, cl), }, { name: "two_writes_stdout_data_message", @@ -148,15 +172,11 @@ func Test_conn_Write(t *testing.T) { wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), }, { - name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", - inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, - wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, - wantRecorded: append(fakes.AsciinemaResizeMsg(t, 10, 20), fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), - height: 20, - width: 10, - hasTerm: true, - firstWrite: true, - sendInitialResize: true, + name: "three_writes_stdout_data_message_with_split_fragment_cast_header_with_terminal", + inputs: [][]byte{{0x2, 0x3, 0x1, 0x7, 0x8}, {0x80, 0x6, 0x1, 0x1, 0x2, 0x3}, {0x4, 0x5}}, + wantForwarded: []byte{0x2, 0x3, 0x1, 0x7, 0x8, 0x80, 0x6, 0x1, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: fakes.CastLine(t, []byte{0x7, 0x8, 0x1, 0x2, 0x3, 0x4, 0x5}, cl), + hasTerm: true, }, } for _, tt := range tests { @@ -164,24 +184,22 @@ func Test_conn_Write(t *testing.T) { tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() c := &conn{ - Conn: tc, - log: zl.Sugar(), - ch: sessionrecording.CastHeader{ - Width: tt.width, - Height: tt.height, - }, - rec: rec, - initialTermSizeSet: make(chan struct{}), - hasTerm: tt.hasTerm, - } - if !tt.firstWrite { - // This test case does not intend to test that cast header gets written once. - c.writeCastHeaderOnce.Do(func() {}) - } - if tt.sendInitialResize { - close(c.initialTermSizeSet) + Conn: tc, + ctx: ctx, + log: zl.Sugar(), + ch: sessionrecording.CastHeader{}, + rec: rec, + initialCastHeaderSent: make(chan struct{}), + hasTerm: tt.hasTerm, } + + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + for i, input := range tt.inputs { _, err := c.Write(input) if err != nil { diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 4806f6585f976..545bf06bd5984 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -66,13 +66,15 @@ type CastHeader struct { Kubernetes *Kubernetes `json:"kubernetes,omitempty"` } -// Kubernetes contains 'kubectl exec' session specific information for +// Kubernetes contains 'kubectl exec/attach' session specific information for // tsrecorder. type Kubernetes struct { - // PodName is the name of the Pod being exec-ed. + // PodName is the name of the Pod the session was recorded for. PodName string - // Namespace is the namespace in which is the Pod that is being exec-ed. + // Namespace is the namespace in which the Pod the session was recorded for exists in. Namespace string - // Container is the container being exec-ed. + // Container is the container the session was recorded for. Container string + // SessionType is the type of session that was executed (e.g., exec, attach) + SessionType string } From fe46f33885f5abb797f7289fa00d5b49a59d8468 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 14 Jul 2025 15:39:39 +0100 Subject: [PATCH 0114/1093] cmd/{k8s-operator,k8s-proxy},kube/k8s-proxy: add static endpoints for kube-apiserver type ProxyGroups (#16523) Updates #13358 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/proxygroup.go | 4 ++ cmd/k8s-operator/proxygroup_specs.go | 83 ++++++++++++++++------------ cmd/k8s-proxy/k8s-proxy.go | 15 +++++ kube/k8s-proxy/conf/conf.go | 4 ++ 4 files changed, 70 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 66b6c96e3c25c..1fdc076f94cad 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -824,6 +824,10 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p cfg.AcceptRoutes = &proxyClass.Spec.TailscaleConfig.AcceptRoutes } + if len(endpoints[nodePortSvcName]) > 0 { + cfg.StaticEndpoints = endpoints[nodePortSvcName] + } + cfgB, err := json.Marshal(cfg) if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 5d6d0b8ef9626..71398d0d54c2f 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -66,7 +66,7 @@ func pgNodePortService(pg *tsapi.ProxyGroup, name string, namespace string) *cor // applied over the top after. func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string, port *uint16, proxyClass *tsapi.ProxyClass) (*appsv1.StatefulSet, error) { if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { - return kubeAPIServerStatefulSet(pg, namespace, image) + return kubeAPIServerStatefulSet(pg, namespace, image, port) } ss := new(appsv1.StatefulSet) if err := yaml.Unmarshal(proxyYaml, &ss); err != nil { @@ -276,7 +276,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string return ss, nil } -func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*appsv1.StatefulSet, error) { +func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, port *uint16) (*appsv1.StatefulSet, error) { sts := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: pg.Name, @@ -302,48 +302,59 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string) (*a { Name: mainContainerName, Image: image, - Env: []corev1.EnvVar{ - { - // Used as default hostname and in Secret names. - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", + Env: func() []corev1.EnvVar { + envs := []corev1.EnvVar{ + { + // Used as default hostname and in Secret names. + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, }, }, - }, - { - // Used by kubeclient to post Events about the Pod's lifecycle. - Name: "POD_UID", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.uid", + { + // Used by kubeclient to post Events about the Pod's lifecycle. + Name: "POD_UID", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.uid", + }, }, }, - }, - { - // Used in an interpolated env var if metrics enabled. - Name: "POD_IP", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIP", + { + // Used in an interpolated env var if metrics enabled. + Name: "POD_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, }, }, - }, - { - // Included for completeness with POD_IP and easier backwards compatibility in future. - Name: "POD_IPS", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "status.podIPs", + { + // Included for completeness with POD_IP and easier backwards compatibility in future. + Name: "POD_IPS", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "status.podIPs", + }, }, }, - }, - { - Name: "TS_K8S_PROXY_CONFIG", - Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), - }, - }, + { + Name: "TS_K8S_PROXY_CONFIG", + Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), + }, + } + + if port != nil { + envs = append(envs, corev1.EnvVar{ + Name: "PORT", + Value: strconv.Itoa(int(*port)), + }) + } + + return envs + }(), VolumeMounts: func() []corev1.VolumeMount { var mounts []corev1.VolumeMount diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 7dcf6c2ab5809..b7f3d9535a071 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -14,6 +14,7 @@ import ( "fmt" "os" "os/signal" + "strings" "syscall" "time" @@ -63,6 +64,20 @@ func run(logger *zap.SugaredLogger) error { logger = logger.WithOptions(zap.IncreaseLevel(level)) } + // TODO:(ChaosInTheCRD) This is a temporary workaround until we can set static endpoints using prefs + if se := cfg.Parsed.StaticEndpoints; len(se) > 0 { + logger.Debugf("setting static endpoints '%v' via TS_DEBUG_PRETENDPOINT environment variable", cfg.Parsed.StaticEndpoints) + ses := make([]string, len(se)) + for i, e := range se { + ses[i] = e.String() + } + + err := os.Setenv("TS_DEBUG_PRETENDPOINT", strings.Join(ses, ",")) + if err != nil { + return err + } + } + if cfg.Parsed.App != nil { hostinfo.SetApp(*cfg.Parsed.App) } diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index fba4a39a420a1..8882360c5ea21 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -10,6 +10,7 @@ package conf import ( "encoding/json" "fmt" + "net/netip" "os" "github.com/tailscale/hujson" @@ -55,6 +56,9 @@ type ConfigV1Alpha1 struct { KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + // StaticEndpoints are additional, user-defined endpoints that this node + // should advertise amongst its wireguard endpoints. + StaticEndpoints []netip.AddrPort `json:",omitempty"` } type KubeAPIServer struct { From fc5050048ee9c71dcdeb232d3a38f068072f489f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 14 Jul 2025 10:42:56 -0700 Subject: [PATCH 0115/1093] wgengine/magicsock: don't acquire Conn.mu in udpRelayEndpointReady (#16557) udpRelayEndpointReady used to write into the peerMap, which required holding Conn.mu, but this changed in f9e7131. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index c4ca812969bf9..d8d1e6ee338d3 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -104,8 +104,6 @@ type endpoint struct { // be installed as de.bestAddr. It is only called by [relayManager] once it has // determined maybeBest is functional via [disco.Pong] reception. func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { - de.c.mu.Lock() - defer de.c.mu.Unlock() de.mu.Lock() defer de.mu.Unlock() From f338c4074d4bb67acfbabdddf2974b23274236d9 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 14 Jul 2025 11:57:54 -1000 Subject: [PATCH 0116/1093] util/jsonutil: remove unused package (#16563) This package promises more performance, but was never used. The intent of the package is somewhat moot as "encoding/json" in Go 1.25 (while under GOEXPERIMENT=jsonv2) has been completely re-implemented using "encoding/json/v2" such that unmarshal is dramatically faster. Updates #cleanup Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- util/jsonutil/types.go | 16 ------ util/jsonutil/unmarshal.go | 89 --------------------------------- util/jsonutil/unmarshal_test.go | 64 ------------------------ 3 files changed, 169 deletions(-) delete mode 100644 util/jsonutil/types.go delete mode 100644 util/jsonutil/unmarshal.go delete mode 100644 util/jsonutil/unmarshal_test.go diff --git a/util/jsonutil/types.go b/util/jsonutil/types.go deleted file mode 100644 index 057473249f258..0000000000000 --- a/util/jsonutil/types.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package jsonutil - -// Bytes is a byte slice in a json-encoded struct. -// encoding/json assumes that []byte fields are hex-encoded. -// Bytes are not hex-encoded; they are treated the same as strings. -// This can avoid unnecessary allocations due to a round trip through strings. -type Bytes []byte - -func (b *Bytes) UnmarshalText(text []byte) error { - // Copy the contexts of text. - *b = append(*b, text...) - return nil -} diff --git a/util/jsonutil/unmarshal.go b/util/jsonutil/unmarshal.go deleted file mode 100644 index b1eb4ea873e67..0000000000000 --- a/util/jsonutil/unmarshal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package jsonutil provides utilities to improve JSON performance. -// It includes an Unmarshal wrapper that amortizes allocated garbage over subsequent runs -// and a Bytes type to reduce allocations when unmarshalling a non-hex-encoded string into a []byte. -package jsonutil - -import ( - "bytes" - "encoding/json" - "sync" -) - -// decoder is a re-usable json decoder. -type decoder struct { - dec *json.Decoder - r *bytes.Reader -} - -var readerPool = sync.Pool{ - New: func() any { - return bytes.NewReader(nil) - }, -} - -var decoderPool = sync.Pool{ - New: func() any { - var d decoder - d.r = readerPool.Get().(*bytes.Reader) - d.dec = json.NewDecoder(d.r) - return &d - }, -} - -// Unmarshal is similar to encoding/json.Unmarshal. -// There are three major differences: -// -// On error, encoding/json.Unmarshal zeros v. -// This Unmarshal may leave partial data in v. -// Always check the error before using v! -// (Future improvements may remove this bug.) -// -// The errors they return don't always match perfectly. -// If you do error matching more precise than err != nil, -// don't use this Unmarshal. -// -// This Unmarshal allocates considerably less memory. -func Unmarshal(b []byte, v any) error { - d := decoderPool.Get().(*decoder) - d.r.Reset(b) - off := d.dec.InputOffset() - err := d.dec.Decode(v) - d.r.Reset(nil) // don't keep a reference to b - // In case of error, report the offset in this byte slice, - // instead of in the totality of all bytes this decoder has processed. - // It is not possible to make all errors match json.Unmarshal exactly, - // but we can at least try. - switch jsonerr := err.(type) { - case *json.SyntaxError: - jsonerr.Offset -= off - case *json.UnmarshalTypeError: - jsonerr.Offset -= off - case nil: - // json.Unmarshal fails if there's any extra junk in the input. - // json.Decoder does not; see https://github.com/golang/go/issues/36225. - // We need to check for anything left over in the buffer. - if d.dec.More() { - // TODO: Provide a better error message. - // Unfortunately, we can't set the msg field. - // The offset doesn't perfectly match json: - // Ours is at the end of the valid data, - // and theirs is at the beginning of the extra data after whitespace. - // Close enough, though. - err = &json.SyntaxError{Offset: d.dec.InputOffset() - off} - - // TODO: zero v. This is hard; see encoding/json.indirect. - } - } - if err == nil { - decoderPool.Put(d) - } else { - // There might be junk left in the decoder's buffer. - // There's no way to flush it, no Reset method. - // Abandoned the decoder but reuse the reader. - readerPool.Put(d.r) - } - return err -} diff --git a/util/jsonutil/unmarshal_test.go b/util/jsonutil/unmarshal_test.go deleted file mode 100644 index 32f8402f02e58..0000000000000 --- a/util/jsonutil/unmarshal_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package jsonutil - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestCompareToStd(t *testing.T) { - tests := []string{ - `{}`, - `{"a": 1}`, - `{]`, - `"abc"`, - `5`, - `{"a": 1} `, - `{"a": 1} {}`, - `{} bad data`, - `{"a": 1} "hello"`, - `[]`, - ` {"x": {"t": [3,4,5]}}`, - } - - for _, test := range tests { - b := []byte(test) - var ourV, stdV any - ourErr := Unmarshal(b, &ourV) - stdErr := json.Unmarshal(b, &stdV) - if (ourErr == nil) != (stdErr == nil) { - t.Errorf("Unmarshal(%q): our err = %#[2]v (%[2]T), std err = %#[3]v (%[3]T)", test, ourErr, stdErr) - } - // if !reflect.DeepEqual(ourErr, stdErr) { - // t.Logf("Unmarshal(%q): our err = %#[2]v (%[2]T), std err = %#[3]v (%[3]T)", test, ourErr, stdErr) - // } - if ourErr != nil { - // TODO: if we zero ourV on error, remove this continue. - continue - } - if !reflect.DeepEqual(ourV, stdV) { - t.Errorf("Unmarshal(%q): our val = %v, std val = %v", test, ourV, stdV) - } - } -} - -func BenchmarkUnmarshal(b *testing.B) { - var m any - j := []byte("5") - b.ReportAllocs() - for range b.N { - Unmarshal(j, &m) - } -} - -func BenchmarkStdUnmarshal(b *testing.B) { - var m any - j := []byte("5") - b.ReportAllocs() - for range b.N { - json.Unmarshal(j, &m) - } -} From b63f8a457dbb14700a7c6bdb96e4df95a5c258b3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 14 Jul 2025 15:09:31 -0700 Subject: [PATCH 0117/1093] wgengine/magicsock: prioritize trusted peer relay paths over untrusted (#16559) A trusted peer relay path is always better than an untrusted direct or peer relay path. Updates tailscale/corp#30412 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 35 ++++++------ wgengine/magicsock/endpoint_test.go | 89 +++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+), 16 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index d8d1e6ee338d3..385c9245ec4e7 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -106,24 +106,27 @@ type endpoint struct { func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { de.mu.Lock() defer de.mu.Unlock() - - if maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 { - // TODO(jwhited): add some observability for this case, e.g. did we - // flip transports during a de.bestAddr transition from untrusted to - // trusted? + now := mono.Now() + curBestAddrTrusted := now.Before(de.trustBestAddrUntil) + sameRelayServer := de.bestAddr.vni.isSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 + + if !curBestAddrTrusted || + sameRelayServer || + betterAddr(maybeBest, de.bestAddr) { + // We must set maybeBest as de.bestAddr if: + // 1. de.bestAddr is untrusted. betterAddr does not consider + // time-based trust. + // 2. maybeBest & de.bestAddr are on the same relay. If the maybeBest + // handshake happened to use a different source address/transport, + // the relay will drop packets from the 'old' de.bestAddr's. + // 3. maybeBest is a 'betterAddr'. // - // If these are equal we must set maybeBest as bestAddr, otherwise we - // could leave a stale bestAddr if it goes over a different - // address family or src. - } else if !betterAddr(maybeBest, de.bestAddr) { - return + // TODO(jwhited): add observability around !curBestAddrTrusted and sameRelayServer + // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() + de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) + de.setBestAddrLocked(maybeBest) + de.trustBestAddrUntil = now.Add(trustUDPAddrDuration) } - - // Promote maybeBest to bestAddr. - // TODO(jwhited): collapse path change logging with endpoint.handlePongConnLocked() - de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v", de.publicKey.ShortString(), de.discoShort(), maybeBest.epAddr, maybeBest.wireMTU) - de.setBestAddrLocked(maybeBest) - de.trustBestAddrUntil = mono.Now().Add(trustUDPAddrDuration) } func (de *endpoint) setBestAddrLocked(v addrQuality) { diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 3a1e55b8b9728..92f4ef1d3aac1 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/tailcfg" + "tailscale.com/tstime/mono" "tailscale.com/types/key" ) @@ -365,3 +366,91 @@ func Test_epAddr_isDirectUDP(t *testing.T) { }) } } + +func Test_endpoint_udpRelayEndpointReady(t *testing.T) { + directAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.1:7")}} + peerRelayAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.2:77")}, latency: time.Second} + peerRelayAddrQuality.vni.set(1) + peerRelayAddrQualityHigherLatencySameServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency * 10, + } + peerRelayAddrQualityHigherLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency * 10, + relayServerDisco: key.NewDisco().Public(), + } + peerRelayAddrQualityLowerLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.4:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency / 10, + relayServerDisco: key.NewDisco().Public(), + } + peerRelayAddrQualityEqualLatencyDiffServer := addrQuality{ + epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.4:77"), vni: peerRelayAddrQuality.vni}, + latency: peerRelayAddrQuality.latency, + relayServerDisco: key.NewDisco().Public(), + } + tests := []struct { + name string + curBestAddr addrQuality + trustBestAddrUntil mono.Time + maybeBest addrQuality + wantBestAddr addrQuality + }{ + { + name: "bestAddr trusted direct", + curBestAddr: directAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQuality, + wantBestAddr: directAddrQuality, + }, + { + name: "bestAddr untrusted direct", + curBestAddr: directAddrQuality, + trustBestAddrUntil: mono.Now().Add(-1 * time.Hour), + maybeBest: peerRelayAddrQuality, + wantBestAddr: peerRelayAddrQuality, + }, + { + name: "maybeBest same relay server higher latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityHigherLatencySameServer, + wantBestAddr: peerRelayAddrQualityHigherLatencySameServer, + }, + { + name: "maybeBest diff relay server higher latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityHigherLatencyDiffServer, + wantBestAddr: peerRelayAddrQuality, + }, + { + name: "maybeBest diff relay server lower latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityLowerLatencyDiffServer, + wantBestAddr: peerRelayAddrQualityLowerLatencyDiffServer, + }, + { + name: "maybeBest diff relay server equal latency bestAddr trusted", + curBestAddr: peerRelayAddrQuality, + trustBestAddrUntil: mono.Now().Add(1 * time.Hour), + maybeBest: peerRelayAddrQualityEqualLatencyDiffServer, + wantBestAddr: peerRelayAddrQuality, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + de := &endpoint{ + c: &Conn{logf: func(msg string, args ...any) { return }}, + bestAddr: tt.curBestAddr, + trustBestAddrUntil: tt.trustBestAddrUntil, + } + de.udpRelayEndpointReady(tt.maybeBest) + if de.bestAddr != tt.wantBestAddr { + t.Errorf("de.bestAddr = %v, want %v", de.bestAddr, tt.wantBestAddr) + } + }) + } +} From bfb344905f5d12648031b0aaec27393ae4173e12 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 14 Jul 2025 18:51:55 -0700 Subject: [PATCH 0118/1093] ipn/ipnlocal: modernize nm.Peers with AppendMatchingPeers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thanks to @nickkhyl for pointing out that NetMap.Peers doesn’t get incremental updates since the last full NetMap update. Instead, he recommends using ipn/ipnlocal.nodeBackend.AppendMatchingPeers. Updates #cleanup Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4ed012f2e46f8..cd1654eb159b8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7902,33 +7902,32 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod panic("missing traffic-steering capability") } - peers := nm.Peers - nodes := make([]tailcfg.NodeView, 0, len(peers)) - - for _, p := range peers { + var force tailcfg.NodeView + nodes := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { if !p.Valid() { - continue + return false } if allowed != nil && !allowed.Contains(p.StableID()) { - continue + return false } if !p.CapMap().Contains(tailcfg.NodeAttrSuggestExitNode) { - continue + return false } if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { - continue + return false } if p.StableID() == prev { // Prevent flapping: since prev is a valid suggestion, // force prev to be the only valid pick. - nodes = []tailcfg.NodeView{p} - break + force = p + return false } - nodes = append(nodes, p) + return true + }) + if force.Valid() { + nodes = append(nodes[:0], force) } - var pick tailcfg.NodeView - scores := make(map[tailcfg.NodeID]int, len(nodes)) score := func(n tailcfg.NodeView) int { id := n.ID() @@ -7945,7 +7944,11 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod return s } - if len(nodes) > 0 { + var pick tailcfg.NodeView + if len(nodes) == 1 { + pick = nodes[0] + } + if len(nodes) > 1 { // Find the highest scoring exit nodes. slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { return cmp.Compare(score(b), score(a)) // reverse sort From 205f822372d203f32b3fb3c7562347770a927181 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 14 Jul 2025 19:01:02 -0700 Subject: [PATCH 0119/1093] ipn/ipnlocal: check if suggested exit node is online @nickkyl added an peer.Online check to suggestExitNodeUsingDERP, so it should also check when running suggestExitNodeUsingTrafficSteering. Updates tailscale/corp#29966 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index cd1654eb159b8..9b9bd82b5e9e5 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7907,6 +7907,9 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod if !p.Valid() { return false } + if !p.Online().Get() { + return false + } if allowed != nil && !allowed.Contains(p.StableID()) { return false } From 7a3221177e0e323d89b5e6389a4a4274065eb725 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 08:33:22 -0600 Subject: [PATCH 0120/1093] .github: Bump slackapi/slack-github-action from 2.1.0 to 2.1.1 (#16553) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 2.1.0 to 2.1.1. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/b0fa283ad8fea605de13dc3f449259339835fc52...91efab103c0de0a537f72a35f6b8cda0ee76bf0a) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-version: 2.1.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/govulncheck.yml | 2 +- .github/workflows/installer.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 36ed1fe9bf603..c7560983abeb6 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -24,7 +24,7 @@ jobs: - name: Post to slack if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: method: chat.postMessage token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 0ca16ae9fa6c1..6144864fd53b8 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -108,7 +108,7 @@ jobs: steps: - name: Notify Slack of failure on scheduled runs if: failure() && github.event_name == 'schedule' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2e80b44dcc4d3..d5b09a9e6cc07 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -831,7 +831,7 @@ jobs: # By having the job always run, but skipping its only step as needed, we # let the CI output collapse nicely in PRs. if: failure() && github.event_name == 'push' - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 # v2.1.0 + uses: slackapi/slack-github-action@91efab103c0de0a537f72a35f6b8cda0ee76bf0a # v2.1.1 with: webhook: ${{ secrets.SLACK_WEBHOOK_URL }} webhook-type: incoming-webhook From e0fcd596bf50556243c488f916d5128dccba6638 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Mon, 14 Jul 2025 17:54:56 +0100 Subject: [PATCH 0121/1093] tailcfg: send health update if DisplayMessage URL changes Updates tailscale/corp#27759 Signed-off-by: James Sanderson --- health/health_test.go | 160 +++++++++++++++++++--------------------- tailcfg/tailcfg.go | 5 +- tailcfg/tailcfg_test.go | 113 ++++++++++++++++++++-------- 3 files changed, 162 insertions(+), 116 deletions(-) diff --git a/health/health_test.go b/health/health_test.go index 0f1140f621bb4..53f012ecffd55 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -555,98 +555,88 @@ func TestControlHealth(t *testing.T) { }) } -func TestControlHealthNotifiesOnSet(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) - - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") - } -} - -func TestControlHealthNotifiesOnChange(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": {}, - }) - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-2": {}, - }) - - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") - } -} - -func TestControlHealthNotifiesOnDetailsChange(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": { - Title: "Title", +func TestControlHealthNotifies(t *testing.T) { + type test struct { + name string + initialState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + newState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + wantNotify bool + } + tests := []test{ + { + name: "no-change", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + wantNotify: false, }, - }) - - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test-1": { - Title: "Updated title", + { + name: "on-set", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{}, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": {}, + }, + wantNotify: true, + }, + { + name: "details-change", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + Title: "Title", + }, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + Title: "Updated title", + }, + }, + wantNotify: true, + }, + { + name: "action-changes", + initialState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com/a/123456", + Label: "Sign in", + }, + }, + }, + newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test": { + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "http://www.example.com/a/abcdefg", + Label: "Sign in", + }, + }, + }, + wantNotify: true, }, - }) - - if !gotNotified { - t.Errorf("watcher did not get called, want it to be called") } -} - -func TestControlHealthNoNotifyOnUnchanged(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() - // Set up an existing control health issue - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) + if len(test.initialState) != 0 { + ht.SetControlHealth(test.initialState) + } - // Now register our watcher - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + gotNotified := false + ht.registerSyncWatcher(func(_ Change) { + gotNotified = true + }) - // Send the same control health message again - should not notify - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "test": {}, - }) + ht.SetControlHealth(test.newState) - if gotNotified { - t.Errorf("watcher got called, want it to not be called") + if gotNotified != test.wantNotify { + t.Errorf("notified: got %v, want %v", gotNotified, test.wantNotify) + } + }) } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 53c4683c1b000..0f13c725ebbfa 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2171,7 +2171,10 @@ func (m DisplayMessage) Equal(o DisplayMessage) bool { return m.Title == o.Title && m.Text == o.Text && m.Severity == o.Severity && - m.ImpactsConnectivity == o.ImpactsConnectivity + m.ImpactsConnectivity == o.ImpactsConnectivity && + (m.PrimaryAction == nil) == (o.PrimaryAction == nil) && + (m.PrimaryAction == nil || (m.PrimaryAction.URL == o.PrimaryAction.URL && + m.PrimaryAction.Label == o.PrimaryAction.Label)) } // DisplayMessageSeverity represents how serious a [DisplayMessage] is. Analogous diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index e8e86cdb139bd..833314df8fd6d 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -881,76 +881,129 @@ func TestCheckTag(t *testing.T) { } func TestDisplayMessageEqual(t *testing.T) { - base := DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityHigh, - ImpactsConnectivity: false, - } - type test struct { name string - value DisplayMessage + value1 DisplayMessage + value2 DisplayMessage wantEqual bool } for _, test := range []test{ { name: "same", - value: DisplayMessage{ + value1: DisplayMessage{ + Title: "title", + Text: "text", + Severity: SeverityHigh, + ImpactsConnectivity: false, + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ Title: "title", Text: "text", Severity: SeverityHigh, ImpactsConnectivity: false, + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, }, wantEqual: true, }, { name: "different-title", - value: DisplayMessage{ - Title: "different title", - Text: "text", - Severity: SeverityHigh, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Title: "title", + }, + value2: DisplayMessage{ + Title: "different title", }, wantEqual: false, }, { name: "different-text", - value: DisplayMessage{ - Title: "title", - Text: "different text", - Severity: SeverityHigh, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Text: "some text", + }, + value2: DisplayMessage{ + Text: "different text", }, wantEqual: false, }, { name: "different-severity", - value: DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityMedium, - ImpactsConnectivity: false, + value1: DisplayMessage{ + Severity: SeverityHigh, + }, + value2: DisplayMessage{ + Severity: SeverityMedium, }, wantEqual: false, }, { name: "different-impactsConnectivity", - value: DisplayMessage{ - Title: "title", - Text: "text", - Severity: SeverityHigh, + value1: DisplayMessage{ ImpactsConnectivity: true, }, + value2: DisplayMessage{ + ImpactsConnectivity: false, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-nil-non-nil", + value1: DisplayMessage{}, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-url", + value1: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://zombo.com", + Label: "Open", + }, + }, + wantEqual: false, + }, + { + name: "different-primaryAction-label", + value1: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Open", + }, + }, + value2: DisplayMessage{ + PrimaryAction: &DisplayMessageAction{ + URL: "https://example.com", + Label: "Learn more", + }, + }, wantEqual: false, }, } { t.Run(test.name, func(t *testing.T) { - got := base.Equal(test.value) + got := test.value1.Equal(test.value2) if got != test.wantEqual { - t.Errorf("Equal: got %t, want %t", got, test.wantEqual) + value1 := must.Get(json.MarshalIndent(test.value1, "", " ")) + value2 := must.Get(json.MarshalIndent(test.value2, "", " ")) + t.Errorf("value1.Equal(value2): got %t, want %t\nvalue1:\n%s\nvalue2:\n%s", got, test.wantEqual, value1, value2) } }) } From ffe8cc9442335ffb76c0e7555c67493a1975181c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 09:54:00 -0600 Subject: [PATCH 0122/1093] .github: Bump github/codeql-action from 3.29.1 to 3.29.2 (#16480) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.1 to 3.29.2. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/39edc492dbe16b1465b0cafca41432d857bdb31a...181d5eefc20863364f96762470ba6f862bdef56b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 610b93b610ea3..4e129b8471ea5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@39edc492dbe16b1465b0cafca41432d857bdb31a # v3.29.1 + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 From d65c0fd2d04a49fb11964cf0457df499a0e6e366 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 15 Jul 2025 12:29:07 -0700 Subject: [PATCH 0123/1093] tailcfg,wgengine/magicsock: set peer relay CapVer (#16531) Updates tailscale/corp#27502 Updates tailscale/corp#30051 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 3 ++- wgengine/magicsock/debugknobs.go | 6 ------ wgengine/magicsock/debugknobs_stubs.go | 1 - wgengine/magicsock/magicsock.go | 19 ++++++++--------- wgengine/magicsock/magicsock_test.go | 28 ++++++++++++++++++++++++-- 5 files changed, 38 insertions(+), 19 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 0f13c725ebbfa..636e2434de276 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -164,7 +164,8 @@ type CapabilityVersion int // - 117: 2025-05-28: Client understands DisplayMessages (structured health messages), but not necessarily PrimaryAction. // - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) // - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. -const CurrentCapabilityVersion CapabilityVersion = 119 +// - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions +const CurrentCapabilityVersion CapabilityVersion = 120 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index 0558953887ae0..f8fd9f0407d44 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,12 +62,6 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") - // debugAssumeUDPRelayCapable forces magicsock to assume that all peers are - // UDP relay capable clients and servers. This will eventually be replaced - // by a [tailcfg.CapabilityVersion] comparison. It enables early testing of - // the UDP relay feature before we have established related - // [tailcfg.CapabilityVersion]'s. - debugAssumeUDPRelayCapable = envknob.RegisterBool("TS_DEBUG_ASSUME_UDP_RELAY_CAPABLE") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 3d23b1f8e8f01..336d7baa19645 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -31,4 +31,3 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } -func debugAssumeUDPRelayCapable() bool { return false } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 14feed32b5929..a8b1c8f15032d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -14,7 +14,6 @@ import ( "expvar" "fmt" "io" - "math" "net" "net/netip" "reflect" @@ -2616,14 +2615,10 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { }) } +// capVerIsRelayCapable returns true if version is relay client and server +// capable, otherwise it returns false. func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped - return version == math.MinInt32 || debugAssumeUDPRelayCapable() -} - -func capVerIsRelayServerCapable(version tailcfg.CapabilityVersion) bool { - // TODO(jwhited): implement once capVer is bumped & update Test_peerAPIIfCandidateRelayServer - return version == math.MinInt32 || debugAssumeUDPRelayCapable() + return version >= 120 } // onFilterUpdate is called when a [FilterUpdate] is received over the @@ -2677,10 +2672,16 @@ func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, maybeCandidate tai if filt == nil || !self.Valid() || !maybeCandidate.Valid() || - !capVerIsRelayServerCapable(maybeCandidate.Cap()) || !maybeCandidate.Hostinfo().Valid() { return netip.AddrPort{} } + if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { + // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, + // we skip it. If maybeCandidate happens to be self, then this check is + // unnecessary as self is always capable from this point (the statically + // compiled [tailcfg.CurrentCapabilityVersion]) forward. + return netip.AddrPort{} + } for _, maybeCandidatePrefix := range maybeCandidate.Addresses().All() { if !maybeCandidatePrefix.IsSingleIP() { continue diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 0515162c72b9f..1d76e6c595a47 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3399,7 +3399,11 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { } selfOnlyIPv4 := &tailcfg.Node{ - Cap: math.MinInt32, + ID: 1, + // Intentionally set a value < 120 to verify the statically compiled + // [tailcfg.CurrentCapabilityVersion] is used when self is + // maybeCandidate. + Cap: 119, Addresses: []netip.Prefix{ netip.MustParsePrefix("1.1.1.1/32"), }, @@ -3409,13 +3413,17 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") peerOnlyIPv4 := &tailcfg.Node{ - Cap: math.MinInt32, + ID: 2, + Cap: 120, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, Hostinfo: hostInfo.View(), } + peerOnlyIPv4NotCapable := peerOnlyIPv4.Clone() + peerOnlyIPv4NotCapable.Cap = 119 + peerOnlyIPv6 := peerOnlyIPv4.Clone() peerOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") @@ -3500,6 +3508,22 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { maybeCandidate: selfOnlyIPv6.View(), want: netip.AddrPortFrom(selfOnlyIPv6.Addresses[0].Addr(), 6), }, + { + name: "peer incapable", + filt: filter.New([]filtertype.Match{ + { + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Caps: []filtertype.CapMatch{ + { + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfOnlyIPv4.View(), + maybeCandidate: peerOnlyIPv4NotCapable.View(), + }, { name: "no match dst", filt: filter.New([]filtertype.Match{ From cb7a0b1dca91cef710f61cd4f3694bafa27bb7a0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 15 Jul 2025 15:23:47 -0700 Subject: [PATCH 0124/1093] net/udprelay: log socket read errors (#16573) Socket read errors currently close the server, so we need to understand when and why they occur. Updates tailscale/corp#27502 Updates tailscale/corp#30118 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e2652ae99637f..7651bf295a233 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -581,6 +581,7 @@ func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { // TODO: extract laddr from IP_PKTINFO for use in reply n, from, err := readFromSocket.ReadFromUDPAddrPort(b) if err != nil { + s.logf("error reading from socket(%v): %v", readFromSocket.LocalAddr(), err) return } s.handlePacket(from, b[:n], readFromSocket, otherSocket) From 67514f5eb2f9737e7d819f43f007be970e17f293 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 16 Jul 2025 08:08:59 -0700 Subject: [PATCH 0125/1093] ssh/tailssh: fix path of "true" on Darwin (#16569) This is a follow-up to #15351, which fixed the test for Linux but not for Darwin, which stores its "true" executable in /usr/bin instead of /bin. Try both paths when not running on Windows. In addition, disable CGo in the integration test build, which was causing the linker to fail. These tests do not need CGo, and it appears we had some version skew with the base image on the runners. In addition, in error cases the recover step of the permissions check was spuriously panicking and masking the "real" failure reason. Don't do that check when a command was not produced. Updates #15350 Change-Id: Icd91517f45c90f7554310ebf1c888cdfd109f43a Signed-off-by: M. J. Fromberger --- Makefile | 4 ++-- ssh/tailssh/incubator.go | 25 ++++++++++++++----------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index f5fc205891191..55e55f209575c 100644 --- a/Makefile +++ b/Makefile @@ -126,8 +126,8 @@ publishdevproxy: check-image-repo ## Build and publish k8s-proxy image to locati .PHONY: sshintegrationtest sshintegrationtest: ## Run the SSH integration tests in various Docker containers - @GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ - GOOS=linux GOARCH=amd64 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ + @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \ + GOOS=linux GOARCH=amd64 CGO_ENABLED=0 ./tool/go build -o ssh/tailssh/testcontainers/tailscaled ./cmd/tailscaled && \ echo "Testing on ubuntu:focal" && docker build --build-arg="BASE=ubuntu:focal" -t ssh-ubuntu-focal ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:jammy" && docker build --build-arg="BASE=ubuntu:jammy" -t ssh-ubuntu-jammy ssh/tailssh/testcontainers && \ echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index 9e1a9ea94e424..dd280143e36e3 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -51,6 +51,7 @@ const ( darwin = "darwin" freebsd = "freebsd" openbsd = "openbsd" + windows = "windows" ) func init() { @@ -80,20 +81,22 @@ func tryExecInDir(ctx context.Context, dir string) error { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() + run := func(path string) error { + cmd := exec.CommandContext(ctx, path) + cmd.Dir = dir + return cmd.Run() + } + // Assume that the following executables exist, are executable, and // immediately return. - var name string - switch runtime.GOOS { - case "windows": + if runtime.GOOS == windows { windir := os.Getenv("windir") - name = filepath.Join(windir, "system32", "doskey.exe") - default: - name = "/bin/true" + return run(filepath.Join(windir, "system32", "doskey.exe")) } - - cmd := exec.CommandContext(ctx, name) - cmd.Dir = dir - return cmd.Run() + if err := run("/bin/true"); !errors.Is(err, exec.ErrNotFound) { // including nil + return err + } + return run("/usr/bin/true") } // newIncubatorCommand returns a new exec.Cmd configured with @@ -107,7 +110,7 @@ func tryExecInDir(ctx context.Context, dir string) error { // The returned Cmd.Env is guaranteed to be nil; the caller populates it. func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err error) { defer func() { - if cmd.Env != nil { + if cmd != nil && cmd.Env != nil { panic("internal error") } }() From 3c6d17e6f114a2dc166e62b84789154b176e07c6 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 10:03:05 -0700 Subject: [PATCH 0126/1093] cmd/tailscale/cli,ipn/ipnlocal,wgengine/magicsock: implement tailscale debug peer-relay-servers (#16577) Updates tailscale/corp#30036 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/debug.go | 20 ++++++++++++++++++++ ipn/ipnlocal/local.go | 4 ++++ ipn/localapi/localapi.go | 6 ++++++ wgengine/magicsock/magicsock.go | 5 +++++ wgengine/magicsock/relaymanager.go | 21 +++++++++++++++++++++ wgengine/magicsock/relaymanager_test.go | 15 +++++++++++++++ 6 files changed, 71 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ec8a0700dec19..8473c4a1707fa 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -356,6 +356,12 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print Go's runtime/debug.BuildInfo", Exec: runGoBuildInfo, }, + { + Name: "peer-relay-servers", + ShortUsage: "tailscale debug peer-relay-servers", + ShortHelp: "Print the current set of candidate peer relay servers", + Exec: runPeerRelayServers, + }, }...), } } @@ -1327,3 +1333,17 @@ func runDebugResolve(ctx context.Context, args []string) error { } return nil } + +func runPeerRelayServers(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unexpected arguments") + } + v, err := localClient.DebugResultJSON(ctx, "peer-relay-servers") + if err != nil { + return err + } + e := json.NewEncoder(os.Stdout) + e.SetIndent("", " ") + e.Encode(v) + return nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9b9bd82b5e9e5..62ab6d9047338 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6956,6 +6956,10 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } +func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.AddrPort] { + return b.MagicConn().PeerRelays() +} + // ControlKnobs returns the node's control knobs. func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs { return b.sys.ControlKnobs() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index cd59c54e05489..fb024039ba52a 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -696,6 +696,12 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { break } h.b.DebugForcePreferDERP(n) + case "peer-relay-servers": + servers := h.b.DebugPeerRelayServers() + err = json.NewEncoder(w).Encode(servers) + if err == nil { + return + } case "": err = fmt.Errorf("missing parameter 'action'") default: diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a8b1c8f15032d..24a4fc073f1f1 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3907,3 +3907,8 @@ func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { le.c.peerMap.setNodeKeyForEpAddr(le.src, pubKey) le.c.logf("magicsock: lazyEndpoint.FromPeer(%v) setting epAddr(%v) in peerMap for node(%v)", pubKey.ShortString(), le.src, ep.nodeAddr) } + +// PeerRelays returns the current set of candidate peer relays. +func (c *Conn) PeerRelays() set.Set[netip.AddrPort] { + return c.relayManager.getServers() +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index c8c9ed41b7b82..d7acf80b51a58 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -57,6 +57,7 @@ type relayManager struct { newServerEndpointCh chan newRelayServerEndpointEvent rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent serversCh chan set.Set[netip.AddrPort] + getServersCh chan chan set.Set[netip.AddrPort] discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -185,10 +186,29 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } + case getServersCh := <-r.getServersCh: + r.handleGetServersRunLoop(getServersCh) + if !r.hasActiveWorkRunLoop() { + return + } } } } +func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[netip.AddrPort]) { + servers := make(set.Set[netip.AddrPort], len(r.serversByAddrPort)) + for server := range r.serversByAddrPort { + servers.Add(server) + } + getServersCh <- servers +} + +func (r *relayManager) getServers() set.Set[netip.AddrPort] { + ch := make(chan set.Set[netip.AddrPort]) + relayManagerInputEvent(r, nil, &r.getServersCh, ch) + return <-ch +} + func (r *relayManager) handleServersUpdateRunLoop(update set.Set[netip.AddrPort]) { for k, v := range r.serversByAddrPort { if !update.Contains(k) { @@ -244,6 +264,7 @@ func (r *relayManager) init() { r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) r.serversCh = make(chan set.Set[netip.AddrPort]) + r.getServersCh = make(chan chan set.Set[netip.AddrPort]) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 8f92360122d0e..01f9258ad7521 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -32,4 +32,19 @@ func TestRelayManagerInitAndIdle(t *testing.T) { rm = relayManager{} rm.handleRelayServersSet(make(set.Set[netip.AddrPort])) <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.getServers() + <-rm.runLoopStoppedCh +} + +func TestRelayManagerGetServers(t *testing.T) { + rm := relayManager{} + servers := make(set.Set[netip.AddrPort], 1) + servers.Add(netip.MustParseAddrPort("192.0.2.1:7")) + rm.handleRelayServersSet(servers) + got := rm.getServers() + if !servers.Equal(got) { + t.Errorf("got %v != want %v", got, servers) + } } From 097c2bcf6700e5dc074187bbe0c05ae4cd8b3c26 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 11:04:32 -0700 Subject: [PATCH 0127/1093] go.mod: bump wireguard-go (#16578) So that conn.PeerAwareEndpoint is always evaluated per-packet, rather than at least once per packet batch. Updates tailscale/corp#30042 Signed-off-by: Jordan Whited --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f040d7799768d..3d7514158f069 100644 --- a/go.mod +++ b/go.mod @@ -90,7 +90,7 @@ require ( github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 - github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 + github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e github.com/tc-hib/winres v0.2.1 github.com/tcnksm/go-httpstat v0.2.0 diff --git a/go.sum b/go.sum index ea17b11821392..995b930100ff9 100644 --- a/go.sum +++ b/go.sum @@ -975,8 +975,8 @@ github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:U github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y= -github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9 h1:kSzi/ugdekAxhcVdCxH6er7OjoNc2oDRcimWJDvnRFM= -github.com/tailscale/wireguard-go v0.0.0-20250711050509-4064566ecaf9/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da h1:jVRUZPRs9sqyKlYHHzHjAqKN+6e/Vog6NpHYeNPJqOw= +github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA= From 17c5116d469f79d5fba20e50fc414932f3ce681d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 11:19:21 -0700 Subject: [PATCH 0128/1093] ipn/ipnlocal: sort tailscale debug peer-relay-servers slice (#16579) Updates tailscale/corp#30036 Signed-off-by: Jordan Whited --- ipn/localapi/localapi.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index fb024039ba52a..d7c64b917ead4 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -697,7 +697,10 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { } h.b.DebugForcePreferDERP(n) case "peer-relay-servers": - servers := h.b.DebugPeerRelayServers() + servers := h.b.DebugPeerRelayServers().Slice() + slices.SortFunc(servers, func(a, b netip.AddrPort) int { + return a.Compare(b) + }) err = json.NewEncoder(w).Encode(servers) if err == nil { return From e84e58c56733072b15fb92c10e4ff702d8fa84d4 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 16 Jul 2025 11:50:13 -0700 Subject: [PATCH 0129/1093] ipn/ipnlocal: use rendezvous hashing to traffic-steer exit nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With auto exit nodes enabled, the client picks exit nodes from the ones advertised in the network map. Usually, it picks the one with the highest priority score, but when the top spot is tied, it used to pick randomly. Then, once it made a selection, it would strongly prefer to stick with that exit node. It wouldn’t even consider another exit node unless the client was shutdown or the exit node went offline. This is to prevent flapping, where a client constantly chooses a different random exit node. The major problem with this algorithm is that new exit nodes don’t get selected as often as they should. In fact, they wouldn’t even move over if a higher scoring exit node appeared. Let’s say that you have an exit node and it’s overloaded. So you spin up a new exit node, right beside your existing one, in the hopes that the traffic will be split across them. But since the client had this strong affinity, they stick with the exit node they know and love. Using rendezvous hashing, we can have different clients spread their selections equally across their top scoring exit nodes. When an exit node shuts down, its clients will spread themselves evenly to their other equal options. When an exit node starts, a proportional number of clients will migrate to their new best option. Read more: https://en.wikipedia.org/wiki/Rendezvous_hashing The trade-off is that starting up a new exit node may cause some clients to move over, interrupting their existing network connections. So this change is only enabled for tailnets with `traffic-steering` enabled. Updates tailscale/corp#29966 Fixes #16551 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 53 +++++++++++++++++++++----------------- ipn/ipnlocal/local_test.go | 51 +++--------------------------------- 2 files changed, 33 insertions(+), 71 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 62ab6d9047338..d3754e5409c1a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -11,6 +11,7 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/binary" "encoding/hex" "encoding/json" "errors" @@ -7750,7 +7751,7 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta switch { case nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering): // The traffic-steering feature flag is enabled on this tailnet. - return suggestExitNodeUsingTrafficSteering(nb, prevSuggestion, allowList) + return suggestExitNodeUsingTrafficSteering(nb, allowList) default: return suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) } @@ -7896,12 +7897,17 @@ var ErrNoNetMap = errors.New("no network map, try again later") // pick one of the best exit nodes. These priorities are provided by Control in // the node’s [tailcfg.Location]. To be eligible for consideration, the node // must have NodeAttrSuggestExitNode in its CapMap. -func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNodeID, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { +func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { nm := nb.NetMap() if nm == nil { return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap } + self := nb.Self() + if !self.Valid() { + return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap + } + if !nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering) { panic("missing traffic-steering capability") } @@ -7923,12 +7929,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod if !tsaddr.ContainsExitRoutes(p.AllowedIPs()) { return false } - if p.StableID() == prev { - // Prevent flapping: since prev is a valid suggestion, - // force prev to be the only valid pick. - force = p - return false - } return true }) if force.Valid() { @@ -7950,6 +7950,7 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod } return s } + rdvHash := makeRendezvousHasher(self.ID()) var pick tailcfg.NodeView if len(nodes) == 1 { @@ -7958,25 +7959,18 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, prev tailcfg.StableNod if len(nodes) > 1 { // Find the highest scoring exit nodes. slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { - return cmp.Compare(score(b), score(a)) // reverse sort - }) - - // Find the top exit nodes, which all have the same score. - topI := len(nodes) - ts := score(nodes[0]) - for i, n := range nodes[1:] { - if score(n) < ts { - // n is the first node with a lower score. - // Make nodes[:topI] to slice the top exit nodes. - topI = i + 1 - break + c := cmp.Compare(score(b), score(a)) // Highest score first. + if c == 0 { + // Rendezvous hashing for reliably picking the + // same node from a list: tailscale/tailscale#16551. + return cmp.Compare(rdvHash(b.ID()), rdvHash(a.ID())) } - } + return c + }) // TODO(sfllaw): add a temperature knob so that this client has // a chance of picking the next best option. - randSeed := uint64(nm.SelfNode.ID()) - pick = nodes[rands.IntN(randSeed, topI)] + pick = nodes[0] } if !pick.Valid() { @@ -8077,6 +8071,19 @@ func longLatDistance(fromLat, fromLong, toLat, toLong float64) float64 { return earthRadiusMeters * c } +// makeRendezvousHasher returns a function that hashes a node ID to a uint64. +// https://en.wikipedia.org/wiki/Rendezvous_hashing +func makeRendezvousHasher(seed tailcfg.NodeID) func(tailcfg.NodeID) uint64 { + en := binary.BigEndian + return func(n tailcfg.NodeID) uint64 { + var b [16]byte + en.PutUint64(b[:], uint64(seed)) + en.PutUint64(b[8:], uint64(n)) + v := sha256.Sum256(b[:]) + return en.Uint64(v[:]) + } +} + const ( // unresolvedExitNodeID is a special [tailcfg.StableNodeID] value // used as an exit node ID to install a blackhole route, preventing diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 0b39c45c28f7d..13681fc0430ea 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5012,7 +5012,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { wantName: "peer1", }, { - name: "many-suggested-exit-nodes", + name: "suggest-exit-node-stable-pick", netMap: &netmap.NetworkMap{ SelfNode: selfNode.View(), Peers: []tailcfg.NodeView{ @@ -5030,55 +5030,10 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { withSuggest()), }, }, + // Change this, if the hashing function changes. wantID: "stable3", wantName: "peer3", }, - { - name: "suggested-exit-node-was-last-suggested", - netMap: &netmap.NetworkMap{ - SelfNode: selfNode.View(), - Peers: []tailcfg.NodeView{ - makePeer(1, - withExitRoutes(), - withSuggest()), - makePeer(2, - withExitRoutes(), - withSuggest()), - makePeer(3, - withExitRoutes(), - withSuggest()), - makePeer(4, - withExitRoutes(), - withSuggest()), - }, - }, - lastExit: "stable2", // overrides many-suggested-exit-nodes - wantID: "stable2", - wantName: "peer2", - }, - { - name: "suggested-exit-node-was-never-suggested", - netMap: &netmap.NetworkMap{ - SelfNode: selfNode.View(), - Peers: []tailcfg.NodeView{ - makePeer(1, - withExitRoutes(), - withSuggest()), - makePeer(2, - withExitRoutes(), - withSuggest()), - makePeer(3, - withExitRoutes(), - withSuggest()), - makePeer(4, - withExitRoutes(), - withSuggest()), - }, - }, - lastExit: "stable10", - wantID: "stable3", // matches many-suggested-exit-nodes - wantName: "peer3", - }, { name: "exit-nodes-with-and-without-priority", netMap: &netmap.NetworkMap{ @@ -5282,7 +5237,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) - got, err := suggestExitNodeUsingTrafficSteering(nb, tt.lastExit, allowList) + got, err := suggestExitNodeUsingTrafficSteering(nb, allowList) if tt.wantErr == nil && err != nil { t.Fatalf("err=%v, want nil", err) } From 36aeacb297ae97f5b21358cfe6ddc814d3920d59 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 16 Jul 2025 14:34:05 -0700 Subject: [PATCH 0130/1093] wgengine/magicsock: add peer relay metrics (#16582) Updates tailscale/corp#30040 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 21 +++- wgengine/magicsock/magicsock.go | 194 ++++++++++++++++++++++---------- 2 files changed, 151 insertions(+), 64 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 385c9245ec4e7..48d5ef5a11338 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1064,11 +1064,21 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { switch { case udpAddr.ap.Addr().Is4(): - de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) - de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + if udpAddr.vni.isSet() { + de.c.metrics.outboundPacketsPeerRelayIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(txBytes)) + } else { + de.c.metrics.outboundPacketsIPv4Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) + } case udpAddr.ap.Addr().Is6(): - de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) - de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + if udpAddr.vni.isSet() { + de.c.metrics.outboundPacketsPeerRelayIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(txBytes)) + } else { + de.c.metrics.outboundPacketsIPv6Total.Add(int64(len(buffs))) + de.c.metrics.outboundBytesIPv6Total.Add(int64(txBytes)) + } } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. @@ -1082,7 +1092,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { for _, buff := range buffs { buff = buff[offset:] const isDisco = false - ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco) + const isGeneveEncap = false + ok, _ := de.c.sendAddr(derpAddr, de.publicKey, buff, isDisco, isGeneveEncap) txBytes += len(buff) if !ok { allOk = false diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 24a4fc073f1f1..ad07003f72fbb 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -87,9 +87,11 @@ const ( type Path string const ( - PathDirectIPv4 Path = "direct_ipv4" - PathDirectIPv6 Path = "direct_ipv6" - PathDERP Path = "derp" + PathDirectIPv4 Path = "direct_ipv4" + PathDirectIPv6 Path = "direct_ipv6" + PathDERP Path = "derp" + PathPeerRelayIPv4 Path = "peer_relay_ipv4" + PathPeerRelayIPv6 Path = "peer_relay_ipv6" ) type pathLabel struct { @@ -97,6 +99,8 @@ type pathLabel struct { // - direct_ipv4 // - direct_ipv6 // - derp + // - peer_relay_ipv4 + // - peer_relay_ipv6 Path Path } @@ -108,27 +112,35 @@ type pathLabel struct { type metrics struct { // inboundPacketsTotal is the total number of inbound packets received, // labeled by the path the packet took. - inboundPacketsIPv4Total expvar.Int - inboundPacketsIPv6Total expvar.Int - inboundPacketsDERPTotal expvar.Int + inboundPacketsIPv4Total expvar.Int + inboundPacketsIPv6Total expvar.Int + inboundPacketsDERPTotal expvar.Int + inboundPacketsPeerRelayIPv4Total expvar.Int + inboundPacketsPeerRelayIPv6Total expvar.Int // inboundBytesTotal is the total number of inbound bytes received, // labeled by the path the packet took. - inboundBytesIPv4Total expvar.Int - inboundBytesIPv6Total expvar.Int - inboundBytesDERPTotal expvar.Int + inboundBytesIPv4Total expvar.Int + inboundBytesIPv6Total expvar.Int + inboundBytesDERPTotal expvar.Int + inboundBytesPeerRelayIPv4Total expvar.Int + inboundBytesPeerRelayIPv6Total expvar.Int // outboundPacketsTotal is the total number of outbound packets sent, // labeled by the path the packet took. - outboundPacketsIPv4Total expvar.Int - outboundPacketsIPv6Total expvar.Int - outboundPacketsDERPTotal expvar.Int + outboundPacketsIPv4Total expvar.Int + outboundPacketsIPv6Total expvar.Int + outboundPacketsDERPTotal expvar.Int + outboundPacketsPeerRelayIPv4Total expvar.Int + outboundPacketsPeerRelayIPv6Total expvar.Int // outboundBytesTotal is the total number of outbound bytes sent, // labeled by the path the packet took. - outboundBytesIPv4Total expvar.Int - outboundBytesIPv6Total expvar.Int - outboundBytesDERPTotal expvar.Int + outboundBytesIPv4Total expvar.Int + outboundBytesIPv6Total expvar.Int + outboundBytesDERPTotal expvar.Int + outboundBytesPeerRelayIPv4Total expvar.Int + outboundBytesPeerRelayIPv6Total expvar.Int // outboundPacketsDroppedErrors is the total number of outbound packets // dropped due to errors. @@ -723,6 +735,8 @@ func registerMetrics(reg *usermetric.Registry) *metrics { pathDirectV4 := pathLabel{Path: PathDirectIPv4} pathDirectV6 := pathLabel{Path: PathDirectIPv6} pathDERP := pathLabel{Path: PathDERP} + pathPeerRelayV4 := pathLabel{Path: PathPeerRelayIPv4} + pathPeerRelayV6 := pathLabel{Path: PathPeerRelayIPv6} inboundPacketsTotal := usermetric.NewMultiLabelMapWithRegistry[pathLabel]( reg, "tailscaled_inbound_packets_total", @@ -755,25 +769,37 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataPacketsIPv4.Register(&m.inboundPacketsIPv4Total) metricRecvDataPacketsIPv6.Register(&m.inboundPacketsIPv6Total) metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) + metricRecvDataPacketsPeerRelayIPv4.Register(&m.inboundPacketsPeerRelayIPv4Total) + metricRecvDataPacketsPeerRelayIPv6.Register(&m.inboundPacketsPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) + metricSendPeerRelay.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendPeerRelay.Register(&m.outboundPacketsPeerRelayIPv6Total) inboundPacketsTotal.Set(pathDirectV4, &m.inboundPacketsIPv4Total) inboundPacketsTotal.Set(pathDirectV6, &m.inboundPacketsIPv6Total) inboundPacketsTotal.Set(pathDERP, &m.inboundPacketsDERPTotal) + inboundPacketsTotal.Set(pathPeerRelayV4, &m.inboundPacketsPeerRelayIPv4Total) + inboundPacketsTotal.Set(pathPeerRelayV6, &m.inboundPacketsPeerRelayIPv6Total) inboundBytesTotal.Set(pathDirectV4, &m.inboundBytesIPv4Total) inboundBytesTotal.Set(pathDirectV6, &m.inboundBytesIPv6Total) inboundBytesTotal.Set(pathDERP, &m.inboundBytesDERPTotal) + inboundBytesTotal.Set(pathPeerRelayV4, &m.inboundBytesPeerRelayIPv4Total) + inboundBytesTotal.Set(pathPeerRelayV6, &m.inboundBytesPeerRelayIPv6Total) outboundPacketsTotal.Set(pathDirectV4, &m.outboundPacketsIPv4Total) outboundPacketsTotal.Set(pathDirectV6, &m.outboundPacketsIPv6Total) outboundPacketsTotal.Set(pathDERP, &m.outboundPacketsDERPTotal) + outboundPacketsTotal.Set(pathPeerRelayV4, &m.outboundPacketsPeerRelayIPv4Total) + outboundPacketsTotal.Set(pathPeerRelayV6, &m.outboundPacketsPeerRelayIPv6Total) outboundBytesTotal.Set(pathDirectV4, &m.outboundBytesIPv4Total) outboundBytesTotal.Set(pathDirectV6, &m.outboundBytesIPv6Total) outboundBytesTotal.Set(pathDERP, &m.outboundBytesDERPTotal) + outboundBytesTotal.Set(pathPeerRelayV4, &m.outboundBytesPeerRelayIPv4Total) + outboundBytesTotal.Set(pathPeerRelayV6, &m.outboundBytesPeerRelayIPv6Total) outboundPacketsDroppedErrors.Set(usermetric.DropLabels{Reason: usermetric.ReasonError}, &m.outboundPacketsDroppedErrors) @@ -786,8 +812,11 @@ func deregisterMetrics(m *metrics) { metricRecvDataPacketsIPv4.UnregisterAll() metricRecvDataPacketsIPv6.UnregisterAll() metricRecvDataPacketsDERP.UnregisterAll() + metricRecvDataPacketsPeerRelayIPv4.UnregisterAll() + metricRecvDataPacketsPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() + metricSendPeerRelay.UnregisterAll() } // InstallCaptureHook installs a callback which is called to @@ -1415,23 +1444,37 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, // sendUDP sends UDP packet b to ipp. // See sendAddr's docs on the return value meanings. -func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool) (sent bool, err error) { +func (c *Conn) sendUDP(ipp netip.AddrPort, b []byte, isDisco bool, isGeneveEncap bool) (sent bool, err error) { if runtime.GOOS == "js" { return false, errNoUDP } sent, err = c.sendUDPStd(ipp, b) if err != nil { - metricSendUDPError.Add(1) + if isGeneveEncap { + metricSendPeerRelayError.Add(1) + } else { + metricSendUDPError.Add(1) + } c.maybeRebindOnError(err) } else { if sent && !isDisco { switch { case ipp.Addr().Is4(): - c.metrics.outboundPacketsIPv4Total.Add(1) - c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + if isGeneveEncap { + c.metrics.outboundPacketsPeerRelayIPv4Total.Add(1) + c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(len(b))) + } else { + c.metrics.outboundPacketsIPv4Total.Add(1) + c.metrics.outboundBytesIPv4Total.Add(int64(len(b))) + } case ipp.Addr().Is6(): - c.metrics.outboundPacketsIPv6Total.Add(1) - c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + if isGeneveEncap { + c.metrics.outboundPacketsPeerRelayIPv6Total.Add(1) + c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(len(b))) + } else { + c.metrics.outboundPacketsIPv6Total.Add(1) + c.metrics.outboundBytesIPv6Total.Add(int64(len(b))) + } } } } @@ -1506,9 +1549,9 @@ func (c *Conn) sendUDPStd(addr netip.AddrPort, b []byte) (sent bool, err error) // An example of when they might be different: sending to an // IPv6 address when the local machine doesn't have IPv6 support // returns (false, nil); it's not an error, but nothing was sent. -func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool) (sent bool, err error) { +func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, isDisco bool, isGeneveEncap bool) (sent bool, err error) { if addr.Addr() != tailcfg.DerpMagicIPAddr { - return c.sendUDP(addr, b, isDisco) + return c.sendUDP(addr, b, isDisco, isGeneveEncap) } regionID := int(addr.Port()) @@ -1562,7 +1605,9 @@ func (c *Conn) putReceiveBatch(batch *receiveBatch) { func (c *Conn) receiveIPv4() conn.ReceiveFunc { return c.mkReceiveFunc(&c.pconn4, c.health.ReceiveFuncStats(health.ReceiveIPv4), &c.metrics.inboundPacketsIPv4Total, + &c.metrics.inboundPacketsPeerRelayIPv4Total, &c.metrics.inboundBytesIPv4Total, + &c.metrics.inboundBytesPeerRelayIPv4Total, ) } @@ -1570,13 +1615,15 @@ func (c *Conn) receiveIPv4() conn.ReceiveFunc { func (c *Conn) receiveIPv6() conn.ReceiveFunc { return c.mkReceiveFunc(&c.pconn6, c.health.ReceiveFuncStats(health.ReceiveIPv6), &c.metrics.inboundPacketsIPv6Total, + &c.metrics.inboundPacketsPeerRelayIPv6Total, &c.metrics.inboundBytesIPv6Total, + &c.metrics.inboundBytesPeerRelayIPv6Total, ) } // mkReceiveFunc creates a ReceiveFunc reading from ruc. // The provided healthItem and metrics are updated if non-nil. -func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, packetMetric, bytesMetric *expvar.Int) conn.ReceiveFunc { +func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFuncStats, directPacketMetric, peerRelayPacketMetric, directBytesMetric, peerRelayBytesMetric *expvar.Int) conn.ReceiveFunc { // epCache caches an epAddr->endpoint for hot flows. var epCache epAddrEndpointCache @@ -1612,12 +1659,21 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu continue } ipp := msg.Addr.(*net.UDPAddr).AddrPort() - if ep, size, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { - if packetMetric != nil { - packetMetric.Add(1) - } - if bytesMetric != nil { - bytesMetric.Add(int64(msg.N)) + if ep, size, isGeneveEncap, ok := c.receiveIP(msg.Buffers[0][:msg.N], ipp, &epCache); ok { + if isGeneveEncap { + if peerRelayPacketMetric != nil { + peerRelayPacketMetric.Add(1) + } + if peerRelayBytesMetric != nil { + peerRelayBytesMetric.Add(int64(msg.N)) + } + } else { + if directPacketMetric != nil { + directPacketMetric.Add(1) + } + if directBytesMetric != nil { + directBytesMetric.Add(int64(msg.N)) + } } eps[i] = ep sizes[i] = size @@ -1646,11 +1702,14 @@ func looksLikeInitiationMsg(b []byte) bool { // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. // // size is the length of 'b' to report up to wireguard-go (only relevant if -// 'ok' is true) +// 'ok' is true). +// +// isGeneveEncap is whether 'b' is encapsulated by a Geneve header (only +// relevant if 'ok' is true). // // ok is whether this read should be reported up to wireguard-go (our // caller). -func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, ok bool) { +func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCache) (_ conn.Endpoint, size int, isGeneveEncap bool, ok bool) { var ep *endpoint size = len(b) @@ -1663,7 +1722,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // Decode only returns an error when 'b' is too short, and // 'isGeneveEncap' indicates it's a sufficient length. c.logf("[unexpected] geneve header decoding error: %v", err) - return nil, 0, false + return nil, 0, false, false } src.vni.set(geneve.VNI) } @@ -1678,10 +1737,10 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // [disco.MessageType], but we assert it should be handshake-related. shouldByRelayHandshakeMsg := geneve.Control == true c.handleDiscoMessage(b, src, shouldByRelayHandshakeMsg, key.NodePublic{}, discoRXPathUDP) - return nil, 0, false + return nil, 0, false, false case packetLooksLikeSTUNBinding: c.netChecker.ReceiveSTUNPacket(b, ipp) - return nil, 0, false + return nil, 0, false, false default: // Fall through for all other packet types as they are assumed to // be potentially WireGuard. @@ -1691,7 +1750,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // If we have no private key, we're logged out or // stopped. Don't try to pass these wireguard packets // up to wireguard-go; it'll just complain (issue 1167). - return nil, 0, false + return nil, 0, false, false } if src.vni.isSet() { @@ -1715,11 +1774,11 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // Note: UDP relay is dependent on cryptorouting enablement. We // only update Geneve-encapsulated [epAddr]s in the [peerMap] // via [lazyEndpoint]. - return nil, 0, false + return nil, 0, false, false } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. - return &lazyEndpoint{c: c, src: src}, size, true + return &lazyEndpoint{c: c, src: src}, size, isGeneveEncap, true } cache.epAddr = src cache.de = de @@ -1738,9 +1797,9 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // unlucky and fail to JIT configure the "correct" peer. // TODO(jwhited): relax this to include direct connections // See http://go/corp/29422 & http://go/corp/30042 - return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, true + return &lazyEndpoint{c: c, maybeEP: ep, src: src}, size, isGeneveEncap, true } - return ep, size, true + return ep, size, isGeneveEncap, true } // discoLogLevel controls the verbosity of discovery log messages. @@ -1861,7 +1920,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) const isDisco = true - sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco) + sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.isSet()) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" @@ -2149,13 +2208,15 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake isVia := false msgType := "CallMeMaybe" cmm, ok := dm.(*disco.CallMeMaybe) - if !ok { + if ok { + metricRecvDiscoCallMeMaybe.Add(1) + } else { + metricRecvDiscoCallMeMaybeVia.Add(1) via = dm.(*disco.CallMeMaybeVia) msgType = "CallMeMaybeVia" isVia = true } - metricRecvDiscoCallMeMaybe.Add(1) if !isDERP || derpNodeSrc.IsZero() { // CallMeMaybe{Via} messages should only come via DERP. c.logf("[unexpected] %s packets should only come via DERP", msgType) @@ -2164,7 +2225,11 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake nodeKey := derpNodeSrc ep, ok := c.peerMap.endpointForNodeKey(nodeKey) if !ok { - metricRecvDiscoCallMeMaybeBadNode.Add(1) + if isVia { + metricRecvDiscoCallMeMaybeViaBadNode.Add(1) + } else { + metricRecvDiscoCallMeMaybeBadNode.Add(1) + } c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } @@ -2190,7 +2255,11 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } if epDisco.key != di.discoKey { - metricRecvDiscoCallMeMaybeBadDisco.Add(1) + if isVia { + metricRecvDiscoCallMeMaybeViaBadDisco.Add(1) + } else { + metricRecvDiscoCallMeMaybeBadDisco.Add(1) + } c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) return } @@ -3695,15 +3764,19 @@ var ( metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") + metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") + metricSendPeerRelayError = clientmetric.NewCounter("magicsock_send_peer_relay_error") metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") // Data packets (non-disco) - metricSendData = clientmetric.NewCounter("magicsock_send_data") - metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") - metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") - metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") - metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") + metricSendData = clientmetric.NewCounter("magicsock_send_data") + metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") + metricRecvDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_derp") + metricRecvDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv4") + metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") + metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") + metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") @@ -3719,15 +3792,18 @@ var ( metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") - metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") - metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") - metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") - metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") - metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") - metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") - metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") - metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") - metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") + metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") + metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") + metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") + metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") + metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") + metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") + metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") + metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") + metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") + metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") + metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has // changed from non-zero to a different non-zero. metricDERPHomeChange = clientmetric.NewCounter("derp_home_change") From e7238efafa427c2a360540534bd08613a81ca7bc Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 16 Jul 2025 19:37:46 -0400 Subject: [PATCH 0131/1093] cmd/tailscale/cli: Add service flag to serve command (#16191) * cmd/tailscale/cli: Add service flag to serve command This commit adds the service flag to serve command which allows serving a service and add the service to the advertisedServices field in prefs (What advertise command does that will be removed later). When adding proxies, TCP proxies and WEB proxies work the same way as normal serve, just under a different DNSname. There is a services specific L3 serving mode called Tun, can be set via --tun flag. Serving a service is always in --bg mode. If --bg is explicitly set t o false, an error message will be sent out. The restriction on proxy target being localhost or 127.0.0.1 also applies to services. When removing proxies, TCP proxies can be removed with type and port flag and off argument. Web proxies can be removed with type, port, setPath flag and off argument. To align with normal serve, when setPath is not set, all handler under the hostport will be removed. When flags are not set but off argument was passed by user, it will be a noop. Removing all config for a service will be available later with a new subcommand clear. Updates tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: fix ai comments and fix a test Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Add a test for addServiceToPrefs Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: fix comment Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * add dnsName in error message Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * change the cli input flag variable type Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace FindServiceConfig with map lookup Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * some code simplification and add asServiceName This commit cotains code simplification for IsServingHTTPS, SetWebHandler, SetTCPForwarding Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace IsServiceName with tailcfg.AsServiceName Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace all assemble of host name for service with strings.Join Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: adjust parameter order and update output message This commit updates the parameter order for IsTCPForwardingOnPort and SetWebHandler. Also updated the message msgServiceIPNotAssigned to msgServiceWaitingApproval to adapt to latest terminologies around services. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: flip bool condition This commit fixes a previous bug added that throws error when serve funnel without service. It should've been the opposite, which throws error when serve funnel with service. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: change parameter of IsTCPForwardingOnPort This commit changes the dnsName string parameter for IsTCPForwardingOnPort to svcName tailcfg.ServiceName. This change is made to reduce ambiguity when a single service might have different dnsNames Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * ipn/ipnlocal: replace the key to webHandler for services This commit changes the way we get the webhandler for vipServices. It used to use the host name from request to find the webHandler, now everything targeting the vipService IP have the same set of handlers. This commit also stores service:port instead of FQDN:port as the key in serviceConfig for Web map. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Updated use of service name. This commit removes serviceName.IsEmpty and use direct comparison to instead. In legacy code, when an empty service name needs to be passed, a new constant noService is passed. Removed redundant code for checking service name validity and string method for serviceNameFlag. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Update bgBoolFlag This commit update field name, set and string method of bgBoolFlag to make code cleaner. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: remove isDefaultService output from srvTypeAndPortFromFlags This commit removes the isDefaultService out put as it's no longer needed. Also deleted redundant code. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: remove unnessesary variable declare in messageForPort Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace bool output for AsServiceName with err Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Replace DNSName with NoService if DNSname only used to identify service This commit moves noService constant to tailcfg, updates AsServiceName to return tailcfg.NoService if the input is not a valid service name. This commit also removes using the local DNSName as scvName parameter. When a function is only using DNSName to identify if it's working with a service, the input in replaced with svcName and expect caller to pass tailcfg.NoService if it's a local serve. This commit also replaces some use of Sprintf with net.JoinHostPort for ipn.HostPort creation. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: Remove the returned error for AsServiceName Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * apply suggested code and comment Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * replace local dnsName in test with tailcfg.NoService Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * cmd/tailscale/cli: move noService back and use else where The constant serves the purpose of provide readability for passing as a function parameter. It's more meaningful comparing to a . It can just be an empty string in other places. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * ipn: Make WebHandlerExists and RemoveTCPForwarding accept svcName This commit replaces two functions' string input with svcName input since they only use the dnsName to identify service. Also did some minor cleanups Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_legacy.go | 42 +- cmd/tailscale/cli/serve_legacy_test.go | 16 + cmd/tailscale/cli/serve_v2.go | 395 +++++++++-- cmd/tailscale/cli/serve_v2_test.go | 925 ++++++++++++++++++++++++- cmd/tailscale/cli/status.go | 2 +- ipn/ipnlocal/serve.go | 4 +- ipn/serve.go | 208 ++++-- ipn/serve_test.go | 115 +++ tailcfg/tailcfg.go | 10 + 9 files changed, 1573 insertions(+), 144 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 96629b5ad45ef..7c79f7f7bc972 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -141,6 +141,8 @@ type localServeClient interface { QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) IncrementCounter(ctx context.Context, name string, delta int) error + GetPrefs(ctx context.Context) (*ipn.Prefs, error) + EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) } // serveEnv is the environment the serve command runs within. All I/O should be @@ -154,14 +156,16 @@ type serveEnv struct { json bool // output JSON (status only for now) // v2 specific flags - bg bool // background mode - setPath string // serve path - https uint // HTTP port - http uint // HTTP port - tcp uint // TCP port - tlsTerminatedTCP uint // a TLS terminated TCP port - subcmd serveMode // subcommand - yes bool // update without prompt + bg bgBoolFlag // background mode + setPath string // serve path + https uint // HTTP port + http uint // HTTP port + tcp uint // TCP port + tlsTerminatedTCP uint // a TLS terminated TCP port + subcmd serveMode // subcommand + yes bool // update without prompt + service tailcfg.ServiceName // service name + tun bool // redirect traffic to OS for service lc localServeClient // localClient interface, specific to serve @@ -354,7 +358,7 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo if err != nil { return err } - if sc.IsTCPForwardingOnPort(srvPort) { + if sc.IsTCPForwardingOnPort(srvPort, noService) { fmt.Fprintf(Stderr, "error: cannot serve web; already serving TCP\n") return errHelp } @@ -411,11 +415,11 @@ func (e *serveEnv) handleWebServeRemove(ctx context.Context, srvPort uint16, mou if err != nil { return err } - if sc.IsTCPForwardingOnPort(srvPort) { + if sc.IsTCPForwardingOnPort(srvPort, noService) { return errors.New("cannot remove web handler; currently serving TCP") } hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort)))) - if !sc.WebHandlerExists(hp, mount) { + if !sc.WebHandlerExists(noService, hp, mount) { return errors.New("error: handler does not exist") } sc.RemoveWebHandler(dnsName, srvPort, []string{mount}, false) @@ -550,15 +554,15 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u fwdAddr := "127.0.0.1:" + dstPortStr - if sc.IsServingWeb(srcPort) { - return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) - } - dnsName, err := e.getSelfDNSName(ctx) if err != nil { return err } + if sc.IsServingWeb(srcPort, noService) { + return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) + } + sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, dnsName) if !reflect.DeepEqual(cursc, sc) { @@ -581,11 +585,11 @@ func (e *serveEnv) handleTCPServeRemove(ctx context.Context, src uint16) error { if sc == nil { sc = new(ipn.ServeConfig) } - if sc.IsServingWeb(src) { + if sc.IsServingWeb(src, noService) { return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src) } - if ph := sc.GetTCPPortHandler(src); ph != nil { - sc.RemoveTCPForwarding(src) + if ph := sc.GetTCPPortHandler(src, noService); ph != nil { + sc.RemoveTCPForwarding(noService, src) return e.lc.SetServeConfig(ctx, sc) } return errors.New("error: serve config does not exist") @@ -682,7 +686,7 @@ func (e *serveEnv) printWebStatusTree(sc *ipn.ServeConfig, hp ipn.HostPort) erro } scheme := "https" - if sc.IsServingHTTP(port) { + if sc.IsServingHTTP(port, noService) { scheme = "http" } diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index df68b5edd32a1..6b053fbd774ba 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -859,6 +859,7 @@ type fakeLocalServeClient struct { config *ipn.ServeConfig setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls + prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs } // fakeStatus is a fake ipnstate.Status value for tests. @@ -891,6 +892,21 @@ func (lc *fakeLocalServeClient) SetServeConfig(ctx context.Context, config *ipn. return nil } +func (lc *fakeLocalServeClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { + if lc.prefs == nil { + lc.prefs = ipn.NewPrefs() + } + return lc.prefs, nil +} + +func (lc *fakeLocalServeClient) EditPrefs(ctx context.Context, prefs *ipn.MaskedPrefs) (*ipn.Prefs, error) { + if lc.prefs == nil { + lc.prefs = ipn.NewPrefs() + } + lc.prefs.ApplyEdits(prefs) + return lc.prefs, nil +} + type mockQueryFeatureResponse struct { resp *tailcfg.QueryFeatureResponse err error diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index bb51fb7d0e131..15de0609c72ad 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -18,6 +18,7 @@ import ( "os/signal" "path" "path/filepath" + "slices" "sort" "strconv" "strings" @@ -41,6 +42,55 @@ type commandInfo struct { LongHelp string } +type serviceNameFlag struct { + Value *tailcfg.ServiceName +} + +func (s *serviceNameFlag) Set(sv string) error { + if sv == "" { + s.Value = new(tailcfg.ServiceName) + return nil + } + v := tailcfg.ServiceName(sv) + if err := v.Validate(); err != nil { + return fmt.Errorf("invalid service name: %q", sv) + } + *s.Value = v + return nil +} + +// String returns the string representation of service name. +func (s *serviceNameFlag) String() string { + return s.Value.String() +} + +type bgBoolFlag struct { + Value bool + IsSet bool // tracks if the flag was set by the user +} + +// Set sets the boolean flag and whether it's explicitly set by user based on the string value. +func (b *bgBoolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.Value = v + b.IsSet = true + return nil +} + +// This is a hack to make the flag package recognize that this is a boolean flag. +func (b *bgBoolFlag) IsBoolFlag() bool { return true } + +// String returns the string representation of the boolean flag. +func (b *bgBoolFlag) String() string { + if !b.IsSet { + return "default" + } + return strconv.FormatBool(b.Value) +} + var serveHelpCommon = strings.TrimSpace(` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., 3000), @@ -73,8 +123,11 @@ const ( serveTypeHTTP serveTypeTCP serveTypeTLSTerminatedTCP + serveTypeTUN ) +const noService tailcfg.ServiceName = "" + var infoMap = map[serveMode]commandInfo{ serve: { Name: "serve", @@ -120,7 +173,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Exec: e.runServeCombined(subcmd), FlagSet: e.newFlags("serve-set", func(fs *flag.FlagSet) { - fs.BoolVar(&e.bg, "bg", false, "Run the command as a background process (default false)") + fs.Var(&e.bg, "bg", "Run the command as a background process (default false, when --service is set defaults to true).") fs.StringVar(&e.setPath, "set-path", "", "Appends the specified path to the base URL for accessing the underlying service") fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { @@ -128,7 +181,9 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") + fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), UsageFunc: usageFuncNoDefaultValues, Subcommands: []*ffcli.Command{ @@ -162,9 +217,16 @@ func (e *serveEnv) validateArgs(subcmd serveMode, args []string) error { fmt.Fprint(e.stderr(), "\nPlease see https://tailscale.com/kb/1242/tailscale-serve for more information.\n") return errHelpFunc(subcmd) } + if len(args) == 0 && e.tun { + return nil + } if len(args) == 0 { return flag.ErrHelp } + if e.tun && len(args) > 1 { + fmt.Fprintln(e.stderr(), "Error: invalid argument format") + return errHelpFunc(subcmd) + } if len(args) > 2 { fmt.Fprintf(e.stderr(), "Error: invalid number of arguments (%d)\n", len(args)) return errHelpFunc(subcmd) @@ -206,7 +268,16 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { ctx, cancel := signal.NotifyContext(ctx, os.Interrupt) defer cancel() + forService := e.service != "" + if !e.bg.IsSet { + e.bg.Value = forService + } + funnel := subcmd == funnel + if forService && funnel { + return errors.New("Error: --service flag is not supported with funnel") + } + if funnel { // verify node has funnel capabilities if err := e.verifyFunnelEnabled(ctx, 443); err != nil { @@ -214,6 +285,10 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } + if forService && !e.bg.Value { + return errors.New("Error: --service flag is only compatible with background mode") + } + mount, err := cleanURLPath(e.setPath) if err != nil { return fmt.Errorf("failed to clean the mount point: %w", err) @@ -246,7 +321,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { // foreground or background. parentSC := sc - turnOff := "off" == args[len(args)-1] + turnOff := len(args) > 0 && "off" == args[len(args)-1] if !turnOff && srvType == serveTypeHTTPS { // Running serve with https requires that the tailnet has enabled // https cert provisioning. Send users through an interactive flow @@ -263,10 +338,19 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } var watcher *tailscale.IPNBusWatcher - wantFg := !e.bg && !turnOff + svcName := noService + + if forService { + svcName = e.service + dnsName = e.service.String() + } + if !forService && srvType == serveTypeTUN { + return errors.New("tun mode is only supported for services") + } + wantFg := !e.bg.Value && !turnOff if wantFg { // validate the config before creating a WatchIPNBus session - if err := e.validateConfig(parentSC, srvPort, srvType); err != nil { + if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { return err } @@ -292,12 +376,20 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { var msg string if turnOff { - err = e.unsetServe(sc, dnsName, srvType, srvPort, mount) + // only unset serve when trying to unset with type and port flags. + err = e.unsetServe(sc, st, dnsName, srvType, srvPort, mount) } else { - if err := e.validateConfig(parentSC, srvPort, srvType); err != nil { + if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { return err } - err = e.setServe(sc, st, dnsName, srvType, srvPort, mount, args[0], funnel) + if forService { + e.addServiceToPrefs(ctx, svcName.String()) + } + target := "" + if len(args) > 0 { + target = args[0] + } + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -332,22 +424,66 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } -const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" - -func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType) error { - sc, isFg := sc.FindConfig(port) - if sc == nil { - return nil +func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName string) error { + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) } - if isFg { - return errors.New("foreground already exists under this port") + advertisedServices := prefs.AdvertiseServices + if slices.Contains(advertisedServices, serviceName) { + return nil // already advertised } - if !e.bg { - return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port) + advertisedServices = append(advertisedServices, serviceName) + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: advertisedServices, + }, + }) + return err +} + +const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" + +// validateConfig checks if the serve config is valid to serve the type wanted on the port. +// dnsName is a FQDN or a serviceName (with `svc:` prefix). +func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType, svcName tailcfg.ServiceName) error { + var tcpHandlerForPort *ipn.TCPPortHandler + if svcName != noService { + svc := sc.Services[svcName] + if svc == nil { + return nil + } + if wantServe == serveTypeTUN && (svc.TCP != nil || svc.Web != nil) { + return errors.New("service already has a TCP or Web handler, cannot serve in TUN mode") + } + if svc.Tun && wantServe != serveTypeTUN { + return errors.New("service is already being served in TUN mode") + } + if svc.TCP[port] == nil { + return nil + } + tcpHandlerForPort = svc.TCP[port] + } else { + sc, isFg := sc.FindConfig(port) + if sc == nil { + return nil + } + if isFg { + return errors.New("foreground already exists under this port") + } + if !e.bg.Value { + return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port) + } + tcpHandlerForPort = sc.TCP[port] } - existingServe := serveFromPortHandler(sc.TCP[port]) + existingServe := serveFromPortHandler(tcpHandlerForPort) if wantServe != existingServe { - return fmt.Errorf("want %q but port is already serving %q", wantServe, existingServe) + target := svcName + if target == noService { + target = "machine" + } + return fmt.Errorf("want to serve %q but port is already serving %q for %q", wantServe, existingServe, target) } return nil } @@ -367,7 +503,7 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: @@ -380,45 +516,61 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName st if e.setPath != "" { return fmt.Errorf("cannot mount a path for TCP serve") } - err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target) if err != nil { return fmt.Errorf("failed to apply TCP serve: %w", err) } + case serveTypeTUN: + // Caller checks that TUN mode is only supported for services. + svcName := tailcfg.ServiceName(dnsName) + if _, ok := sc.Services[svcName]; !ok { + mak.Set(&sc.Services, svcName, new(ipn.ServiceConfig)) + } + sc.Services[svcName].Tun = true default: return fmt.Errorf("invalid type %q", srvType) } // update the serve config based on if funnel is enabled - e.applyFunnel(sc, dnsName, srvPort, allowFunnel) - + // Since funnel is not supported for services, we only apply it for node's serve. + if svcName := tailcfg.AsServiceName(dnsName); svcName == noService { + e.applyFunnel(sc, dnsName, srvPort, allowFunnel) + } return nil } var ( - msgFunnelAvailable = "Available on the internet:" - msgServeAvailable = "Available within your tailnet:" - msgRunningInBackground = "%s started and running in the background." - msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" - msgToExit = "Press Ctrl+C to exit." + msgFunnelAvailable = "Available on the internet:" + msgServeAvailable = "Available within your tailnet:" + msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" + msgRunningInBackground = "%s started and running in the background." + msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." + msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" + msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" + msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" + msgDisableService = "To remove config for the service, run: tailscale serve clear --service=%s" + msgToExit = "Press Ctrl+C to exit." ) // messageForPort returns a message for the given port based on the // serve config and status. func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16) string { var output strings.Builder - - hp := ipn.HostPort(net.JoinHostPort(dnsName, strconv.Itoa(int(srvPort)))) - - if sc.AllowFunnel[hp] == true { - output.WriteString(msgFunnelAvailable) - } else { - output.WriteString(msgServeAvailable) + svcName := tailcfg.AsServiceName(dnsName) + forService := svcName != noService + var webConfig *ipn.WebServerConfig + var tcpHandler *ipn.TCPPortHandler + ips := st.TailscaleIPs + host := dnsName + displayedHost := dnsName + if forService { + displayedHost = strings.Join([]string{svcName.WithoutPrefix(), st.CurrentTailnet.MagicDNSSuffix}, ".") + host = svcName.WithoutPrefix() } - output.WriteString("\n\n") + hp := ipn.HostPort(net.JoinHostPort(host, strconv.Itoa(int(srvPort)))) scheme := "https" - if sc.IsServingHTTP(srvPort) { + if sc.IsServingHTTP(srvPort, svcName) { scheme = "http" } @@ -439,37 +591,68 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN } return "", "" } + if forService { + serviceIPMaps, err := tailcfg.UnmarshalNodeCapJSON[tailcfg.ServiceIPMappings](st.Self.CapMap, tailcfg.NodeAttrServiceHost) + if err != nil || len(serviceIPMaps) == 0 || serviceIPMaps[0][svcName] == nil { + // The capmap does not contain IPs for this service yet. Usually this means + // the service hasn't been added to prefs and sent to control yet. + output.WriteString(fmt.Sprintf(msgServiceWaitingApproval, svcName.String())) + ips = nil + } else { + output.WriteString(msgServeAvailable) + ips = serviceIPMaps[0][svcName] + } + output.WriteString("\n\n") + svc := sc.Services[svcName] + if srvType == serveTypeTUN && svc.Tun { + output.WriteString(fmt.Sprintf(msgRunningTunService, displayedHost)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableServiceTun, dnsName)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableService, dnsName)) + return output.String() + } + if svc != nil { + webConfig = svc.Web[hp] + tcpHandler = svc.TCP[srvPort] + } + } else { + if sc.AllowFunnel[hp] == true { + output.WriteString(msgFunnelAvailable) + } else { + output.WriteString(msgServeAvailable) + } + output.WriteString("\n\n") + webConfig = sc.Web[hp] + tcpHandler = sc.TCP[srvPort] + } - if sc.Web[hp] != nil { - mounts := slicesx.MapKeys(sc.Web[hp].Handlers) + if webConfig != nil { + mounts := slicesx.MapKeys(webConfig.Handlers) sort.Slice(mounts, func(i, j int) bool { return len(mounts[i]) < len(mounts[j]) }) - for _, m := range mounts { - h := sc.Web[hp].Handlers[m] - t, d := srvTypeAndDesc(h) - output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, dnsName, portPart, m)) + t, d := srvTypeAndDesc(webConfig.Handlers[m]) + output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, displayedHost, portPart, m)) output.WriteString(fmt.Sprintf("%s %-5s %s\n\n", "|--", t, d)) } - } else if sc.TCP[srvPort] != nil { - h := sc.TCP[srvPort] + } else if tcpHandler != nil { tlsStatus := "TLS over TCP" - if h.TerminateTLS != "" { + if tcpHandler.TerminateTLS != "" { tlsStatus = "TLS terminated" } - output.WriteString(fmt.Sprintf("%s://%s%s\n", scheme, dnsName, portPart)) - output.WriteString(fmt.Sprintf("|-- tcp://%s (%s)\n", hp, tlsStatus)) - for _, a := range st.TailscaleIPs { + output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", displayedHost, srvPort, tlsStatus)) + for _, a := range ips { ipp := net.JoinHostPort(a.String(), strconv.Itoa(int(srvPort))) output.WriteString(fmt.Sprintf("|-- tcp://%s\n", ipp)) } - output.WriteString(fmt.Sprintf("|--> tcp://%s\n", h.TCPForward)) + output.WriteString(fmt.Sprintf("|--> tcp://%s\n\n", tcpHandler.TCPForward)) } - if !e.bg { + if !forService && !e.bg.Value { output.WriteString(msgToExit) return output.String() } @@ -479,14 +662,19 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN output.WriteString(fmt.Sprintf(msgRunningInBackground, subCmdUpper)) output.WriteString("\n") - output.WriteString(fmt.Sprintf(msgDisableProxy, subCmd, srvType.String(), srvPort)) + if forService { + output.WriteString(fmt.Sprintf(msgDisableServiceProxy, dnsName, srvType.String(), srvPort)) + output.WriteString("\n") + output.WriteString(fmt.Sprintf(msgDisableService, dnsName)) + } else { + output.WriteString(fmt.Sprintf(msgDisableProxy, subCmd, srvType.String(), srvPort)) + } return output.String() } func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string) error { h := new(ipn.HTTPHandler) - switch { case strings.HasPrefix(target, "text:"): text := strings.TrimPrefix(target, "text:") @@ -522,7 +710,8 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui } // TODO: validation needs to check nested foreground configs - if sc.IsTCPForwardingOnPort(srvPort) { + svcName := tailcfg.AsServiceName(dnsName) + if sc.IsTCPForwardingOnPort(srvPort, svcName) { return errors.New("cannot serve web; already serving TCP") } @@ -553,8 +742,9 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se } // TODO: needs to account for multiple configs from foreground mode - if sc.IsServingWeb(srcPort) { - return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) + svcName := tailcfg.AsServiceName(dnsName) + if sc.IsServingWeb(srcPort, svcName) { + return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, dnsName) @@ -578,18 +768,24 @@ func (e *serveEnv) applyFunnel(sc *ipn.ServeConfig, dnsName string, srvPort uint } // unsetServe removes the serve config for the given serve port. -func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string) error { +// dnsName is a FQDN or a serviceName (with `svc:` prefix). +func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string) error { switch srvType { case serveTypeHTTPS, serveTypeHTTP: - err := e.removeWebServe(sc, dnsName, srvPort, mount) + err := e.removeWebServe(sc, st, dnsName, srvPort, mount) if err != nil { return fmt.Errorf("failed to remove web serve: %w", err) } case serveTypeTCP, serveTypeTLSTerminatedTCP: - err := e.removeTCPServe(sc, srvPort) + err := e.removeTCPServe(sc, dnsName, srvPort) if err != nil { return fmt.Errorf("failed to remove TCP serve: %w", err) } + case serveTypeTUN: + err := e.removeTunServe(sc, dnsName) + if err != nil { + return fmt.Errorf("failed to remove TUN serve: %w", err) + } default: return fmt.Errorf("invalid type %q", srvType) } @@ -620,11 +816,16 @@ func srvTypeAndPortFromFlags(e *serveEnv) (srvType serveType, srvPort uint16, er } } + if e.tun { + srcTypeCount++ + srvType = serveTypeTUN + } + if srcTypeCount > 1 { return 0, 0, fmt.Errorf("cannot serve multiple types for a single mount point") - } else if srcTypeCount == 0 { - srvType = serveTypeHTTPS - srvPort = 443 + } + if srcTypeCount == 0 { + return serveTypeHTTPS, 443, nil } return srvType, srvPort, nil @@ -728,32 +929,48 @@ func isLegacyInvocation(subcmd serveMode, args []string) (string, bool) { // and removes funnel if no remaining mounts exist for the serve port. // The srvPort argument is the serving port and the mount argument is // the mount point or registered path to remove. -func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string) error { - if sc.IsTCPForwardingOnPort(srvPort) { - return errors.New("cannot remove web handler; currently serving TCP") +func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvPort uint16, mount string) error { + if sc == nil { + return nil } portStr := strconv.Itoa(int(srvPort)) - hp := ipn.HostPort(net.JoinHostPort(dnsName, portStr)) + hostName := dnsName + webServeMap := sc.Web + svcName := tailcfg.AsServiceName(dnsName) + forService := svcName != noService + if forService { + svc := sc.Services[svcName] + if svc == nil { + return errors.New("service does not exist") + } + hostName = svcName.WithoutPrefix() + webServeMap = svc.Web + } + + hp := ipn.HostPort(net.JoinHostPort(hostName, portStr)) + if sc.IsTCPForwardingOnPort(srvPort, svcName) { + return errors.New("cannot remove web handler; currently serving TCP") + } var targetExists bool var mounts []string // mount is deduced from e.setPath but it is ambiguous as // to whether the user explicitly passed "/" or it was defaulted to. if e.setPath == "" { - targetExists = sc.Web[hp] != nil && len(sc.Web[hp].Handlers) > 0 + targetExists = webServeMap[hp] != nil && len(webServeMap[hp].Handlers) > 0 if targetExists { - for mount := range sc.Web[hp].Handlers { + for mount := range webServeMap[hp].Handlers { mounts = append(mounts, mount) } } } else { - targetExists = sc.WebHandlerExists(hp, mount) + targetExists = sc.WebHandlerExists(svcName, hp, mount) mounts = []string{mount} } if !targetExists { - return errors.New("error: handler does not exist") + return errors.New("handler does not exist") } if len(mounts) > 1 { @@ -763,23 +980,47 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u } } - sc.RemoveWebHandler(dnsName, srvPort, mounts, true) + if forService { + sc.RemoveServiceWebHandler(st, svcName, srvPort, mounts) + } else { + sc.RemoveWebHandler(dnsName, srvPort, mounts, true) + } return nil } // removeTCPServe removes the TCP forwarding configuration for the -// given srvPort, or serving port. -func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, src uint16) error { +// given srvPort, or serving port for the given dnsName. +func (e *serveEnv) removeTCPServe(sc *ipn.ServeConfig, dnsName string, src uint16) error { if sc == nil { return nil } - if sc.GetTCPPortHandler(src) == nil { - return errors.New("error: serve config does not exist") + svcName := tailcfg.AsServiceName(dnsName) + if sc.GetTCPPortHandler(src, svcName) == nil { + return errors.New("serve config does not exist") } - if sc.IsServingWeb(src) { + if sc.IsServingWeb(src, svcName) { return fmt.Errorf("unable to remove; serving web, not TCP forwarding on serve port %d", src) } - sc.RemoveTCPForwarding(src) + sc.RemoveTCPForwarding(svcName, src) + return nil +} + +func (e *serveEnv) removeTunServe(sc *ipn.ServeConfig, dnsName string) error { + if sc == nil { + return nil + } + svcName := tailcfg.ServiceName(dnsName) + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return errors.New("service does not exist") + } + if !svc.Tun { + return errors.New("service is not being served in TUN mode") + } + delete(sc.Services, svcName) + if len(sc.Services) == 0 { + sc.Services = nil // clean up empty map + } return nil } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 5768127ad0421..b3e7ea773c698 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -8,9 +8,11 @@ import ( "context" "encoding/json" "fmt" + "net/netip" "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "testing" @@ -19,6 +21,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" ) func TestServeDevConfigMutations(t *testing.T) { @@ -874,9 +877,10 @@ func TestValidateConfig(t *testing.T) { name string desc string cfg *ipn.ServeConfig + svc tailcfg.ServiceName servePort uint16 serveType serveType - bg bool + bg bgBoolFlag wantErr bool }{ { @@ -894,7 +898,7 @@ func TestValidateConfig(t *testing.T) { 443: {HTTPS: true}, }, }, - bg: true, + bg: bgBoolFlag{true, false}, servePort: 10000, serveType: serveTypeHTTPS, }, @@ -906,7 +910,7 @@ func TestValidateConfig(t *testing.T) { 443: {TCPForward: "http://localhost:4545"}, }, }, - bg: true, + bg: bgBoolFlag{true, false}, servePort: 443, serveType: serveTypeTCP, }, @@ -918,7 +922,7 @@ func TestValidateConfig(t *testing.T) { 443: {HTTPS: true}, }, }, - bg: true, + bg: bgBoolFlag{true, false}, servePort: 443, serveType: serveTypeHTTP, wantErr: true, @@ -957,12 +961,90 @@ func TestValidateConfig(t *testing.T) { serveType: serveTypeTCP, wantErr: true, }, + { + name: "new_service_tcp", + desc: "no error when adding a new service port", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + }, + }, + }, + svc: "svc:foo", + servePort: 8080, + serveType: serveTypeTCP, + }, + { + name: "override_service_tcp", + desc: "no error when overwriting a previous service port", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:4545"}, + }, + }, + }, + }, + svc: "svc:foo", + servePort: 443, + serveType: serveTypeTCP, + }, + { + name: "override_service_tcp", + desc: "error when overwriting a previous service port with a different serve type", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + }, + }, + }, + }, + svc: "svc:foo", + servePort: 443, + serveType: serveTypeHTTP, + wantErr: true, + }, + { + name: "override_service_tcp", + desc: "error when setting previous tcp service to tun mode", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:4545"}, + }, + }, + }, + }, + svc: "svc:foo", + serveType: serveTypeTUN, + wantErr: true, + }, + { + name: "override_service_tun", + desc: "error when setting previous tun service to tcp forwarder", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + svc: "svc:foo", + serveType: serveTypeTCP, + servePort: 443, + wantErr: true, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { se := serveEnv{bg: tc.bg} - err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType) + err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType, tc.svc) if err == nil && tc.wantErr { t.Fatal("expected an error but got nil") } @@ -1017,6 +1099,13 @@ func TestSrcTypeFromFlags(t *testing.T) { expectedPort: 443, expectedErr: false, }, + { + name: "defaults to https, port 443 for service", + env: &serveEnv{service: "svc:foo"}, + expectedType: serveTypeHTTPS, + expectedPort: 443, + expectedErr: false, + }, { name: "multiple types set", env: &serveEnv{http: 80, https: 443}, @@ -1075,12 +1164,70 @@ func TestCleanURLPath(t *testing.T) { } } +func TestAddServiceToPrefs(t *testing.T) { + tests := []struct { + name string + dnsName string + startServices []string + expected []string + }{ + { + name: "add service to empty prefs", + dnsName: "svc:foo", + expected: []string{"svc:foo"}, + }, + { + name: "add service to existing prefs", + dnsName: "svc:bar", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo", "svc:bar"}, + }, + { + name: "add existing service to prefs", + dnsName: "svc:foo", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lc := &fakeLocalServeClient{} + ctx := t.Context() + lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: tt.startServices, + }, + }) + e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} + err := e.addServiceToPrefs(ctx, tt.dnsName) + if err != nil { + t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.dnsName, err) + } + if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { + t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.dnsName, lc.prefs.AdvertiseServices, tt.expected) + } + }) + } + +} + func TestMessageForPort(t *testing.T) { + svcIPMap := tailcfg.ServiceIPMappings{ + "svc:foo": []netip.Addr{ + netip.MustParseAddr("100.101.101.101"), + netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:cd96:6565:6565"), + }, + } + svcIPMapJSON, _ := json.Marshal(svcIPMap) + svcIPMapJSONRawMSG := tailcfg.RawMessage(svcIPMapJSON) + tests := []struct { name string subcmd serveMode serveConfig *ipn.ServeConfig status *ipnstate.Status + prefs *ipn.Prefs dnsName string srvType serveType srvPort uint16 @@ -1147,10 +1294,206 @@ func TestMessageForPort(t *testing.T) { fmt.Sprintf(msgDisableProxy, "serve", "http", 80), }, "\n"), }, + { + name: "serve service http", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{ + AdvertiseServices: []string{"svc:foo"}, + }, + dnsName: "svc:foo", + srvType: serveTypeHTTP, + srvPort: 80, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "http://foo.test.ts.net/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "http", 80), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service no capmap", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{ + AdvertiseServices: []string{"svc:bar"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + expected: strings.Join([]string{ + fmt.Sprintf(msgServiceWaitingApproval, "svc:bar"), + "", + "http://bar.test.ts.net/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:bar", "http", 80), + fmt.Sprintf(msgDisableService, "svc:bar"), + }, "\n"), + }, + { + name: "serve service https non-default port", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 2200: {HTTPS: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:2200": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeHTTPS, + srvPort: 2200, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "https://foo.test.ts.net:2200/", + "|-- proxy http://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "https", 2200), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service TCPForward", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 2200: {TCPForward: "localhost:3000"}, + }, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeTCP, + srvPort: 2200, + expected: strings.Join([]string{ + msgServeAvailable, + "", + "|-- tcp://foo.test.ts.net:2200 (TLS over TCP)", + "|-- tcp://100.101.101.101:2200", + "|-- tcp://[fd7a:115c:a1e0:ab12:4843:cd96:6565:6565]:2200", + "|--> tcp://localhost:3000", + "", + fmt.Sprintf(msgRunningInBackground, "Serve"), + fmt.Sprintf(msgDisableServiceProxy, "svc:foo", "tcp", 2200), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, + { + name: "serve service Tun", + subcmd: serve, + serveConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + Self: &ipnstate.PeerStatus{ + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{svcIPMapJSONRawMSG}, + }, + }, + }, + prefs: &ipn.Prefs{AdvertiseServices: []string{"svc:foo"}}, + dnsName: "svc:foo", + srvType: serveTypeTUN, + expected: strings.Join([]string{ + msgServeAvailable, + "", + fmt.Sprintf(msgRunningTunService, "foo.test.ts.net"), + fmt.Sprintf(msgDisableServiceTun, "svc:foo"), + fmt.Sprintf(msgDisableService, "svc:foo"), + }, "\n"), + }, } for _, tt := range tests { - e := &serveEnv{bg: true, subcmd: tt.subcmd} + e := &serveEnv{bg: bgBoolFlag{true, false}, subcmd: tt.subcmd} t.Run(tt.name, func(t *testing.T) { actual := e.messageForPort(tt.serveConfig, tt.status, tt.dnsName, tt.srvType, tt.srvPort) @@ -1277,6 +1620,576 @@ func TestIsLegacyInvocation(t *testing.T) { } } +func TestSetServe(t *testing.T) { + e := &serveEnv{} + tests := []struct { + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mountPath string + target string + allowFunnel bool + expected *ipn.ServeConfig + expectErr bool + }{ + { + name: "add new handler", + desc: "add a new http handler to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3000", + expected: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + { + name: "update http handler", + desc: "update an existing http handler on the same port to same type", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + { + name: "update TCP handler", + desc: "update an existing TCP handler on the same port to a http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "http://localhost:3000"}}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expectErr: true, + }, + { + name: "add new service handler", + desc: "add a new service TCP handler to empty config", + cfg: &ipn.ServeConfig{}, + + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + target: "3000", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + }, + { + name: "update service handler", + desc: "update an existing service TCP handler on the same port to same type", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + target: "3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3001"}}, + }, + }, + }, + }, + { + name: "update service handler", + desc: "update an existing service TCP handler on the same port to a http handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {TCPForward: "127.0.0.1:3000"}}, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expectErr: true, + }, + { + name: "add new service handler", + desc: "add a new service HTTP handler to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3000", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "update existing service handler", + desc: "update an existing service HTTP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service handler", + desc: "add a new service HTTP handler to existing service config", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 88, + mountPath: "/", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + 88: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + "bar:88": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service mount", + desc: "add a new service mount to existing service config", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mountPath: "/added", + target: "http://localhost:3001", + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + "/added": {Proxy: "http://localhost:3001"}, + }, + }, + }, + }, + }, + }, + }, + { + name: "add new service handler", + desc: "add a new service handler in tun mode to empty config", + cfg: &ipn.ServeConfig{}, + dnsName: "svc:bar", + srvType: serveTypeTUN, + expected: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + Tun: true, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel) + if err != nil && !tt.expectErr { + t.Fatalf("got error: %v; did not expect error.", err) + } + if err == nil && tt.expectErr { + t.Fatalf("got no error; expected error.") + } + if !tt.expectErr && !reflect.DeepEqual(tt.cfg, tt.expected) { + svcName := tailcfg.ServiceName(tt.dnsName) + t.Fatalf("got: %v; expected: %v", tt.cfg.Services[svcName], tt.expected.Services[svcName]) + } + }) + } +} + +func TestUnsetServe(t *testing.T) { + tests := []struct { + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mount string + setServeEnv bool + serveEnv *serveEnv // if set, use this instead of the default serveEnv + expected *ipn.ServeConfig + expectErr bool + }{ + { + name: "unset http handler", + desc: "remove an existing http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/", + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler", + desc: "remove an existing service TCP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/", + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler tun", + desc: "remove an existing service handler in tun mode", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + Tun: true, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTUN, + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset service handler tcp", + desc: "remove an existing service TCP handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {TCPForward: "11.11.11.11:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + expected: &ipn.ServeConfig{}, + expectErr: false, + }, + { + name: "unset http handler not found", + desc: "try to remove a non-existing http handler", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "bar.test.ts.net", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/abc", + expected: &ipn.ServeConfig{}, + expectErr: true, + }, + { + name: "unset service handler not found", + desc: "try to remove a non-existing service TCP handler", + + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeHTTP, + srvPort: 80, + mount: "/abc", + setServeEnv: true, + serveEnv: &serveEnv{setPath: "/abc"}, + expected: &ipn.ServeConfig{}, + expectErr: true, + }, + { + name: "unset service doesn't exist", + desc: "try to remove a non-existing service's handler", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {TCPForward: "11.11.11.11:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:foo", + srvType: serveTypeTCP, + srvPort: 80, + expectErr: true, + }, + { + name: "unset tcp while port is in use", + desc: "try to remove a TCP handler while the port is used for web", + cfg: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "foo.test.ts.net", + srvType: serveTypeTCP, + srvPort: 80, + mount: "/", + expectErr: true, + }, + { + name: "unset service tcp while port is in use", + desc: "try to remove a service TCP handler while the port is used for web", + cfg: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:bar": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "bar:80": { + Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://localhost:3000"}, + }, + }, + }, + }, + }, + }, + st: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + dnsName: "svc:bar", + srvType: serveTypeTCP, + srvPort: 80, + mount: "/", + expectErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &serveEnv{} + if tt.setServeEnv { + e = tt.serveEnv + } + err := e.unsetServe(tt.cfg, tt.st, tt.dnsName, tt.srvType, tt.srvPort, tt.mount) + if err != nil && !tt.expectErr { + t.Fatalf("got error: %v; did not expect error.", err) + } + if err == nil && tt.expectErr { + t.Fatalf("got no error; expected error.") + } + if !tt.expectErr && !reflect.DeepEqual(tt.cfg, tt.expected) { + t.Fatalf("got: %v; expected: %v", tt.cfg, tt.expected) + } + }) + } +} + // exactErrMsg returns an error checker that wants exactly the provided want error. // If optName is non-empty, it's used in the error message. func exactErrMsg(want error) func(error) string { diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 85679a7decbc1..39e6f9fbdfd8a 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -262,7 +262,7 @@ func printFunnelStatus(ctx context.Context) { } sni, portStr, _ := net.SplitHostPort(string(hp)) p, _ := strconv.ParseUint(portStr, 10, 16) - isTCP := sc.IsTCPForwardingOnPort(uint16(p)) + isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) url := "https://" if isTCP { url = "tcp://" diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 44d63fe54a902..28262251c6880 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1007,8 +1007,6 @@ func allNumeric(s string) bool { } func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.ServiceName, port uint16) (c ipn.WebServerConfigView, ok bool) { - key := ipn.HostPort(fmt.Sprintf("%s:%v", hostname, port)) - b.mu.Lock() defer b.mu.Unlock() @@ -1016,8 +1014,10 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.Se return c, false } if forVIPService != "" { + key := ipn.HostPort(net.JoinHostPort(forVIPService.WithoutPrefix(), fmt.Sprintf("%d", port))) return b.serveConfig.FindServiceWeb(forVIPService, key) } + key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) return b.serveConfig.FindWeb(key) } diff --git a/ipn/serve.go b/ipn/serve.go index ac92287bdc08f..fae0ad5d6568a 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -166,26 +166,44 @@ type HTTPHandler struct { // WebHandlerExists reports whether if the ServeConfig Web handler exists for // the given host:port and mount point. -func (sc *ServeConfig) WebHandlerExists(hp HostPort, mount string) bool { - h := sc.GetWebHandler(hp, mount) +func (sc *ServeConfig) WebHandlerExists(svcName tailcfg.ServiceName, hp HostPort, mount string) bool { + h := sc.GetWebHandler(svcName, hp, mount) return h != nil } // GetWebHandler returns the HTTPHandler for the given host:port and mount point. // Returns nil if the handler does not exist. -func (sc *ServeConfig) GetWebHandler(hp HostPort, mount string) *HTTPHandler { - if sc == nil || sc.Web[hp] == nil { +func (sc *ServeConfig) GetWebHandler(svcName tailcfg.ServiceName, hp HostPort, mount string) *HTTPHandler { + if sc == nil { + return nil + } + if svcName != "" { + if svc, ok := sc.Services[svcName]; ok && svc.Web != nil { + if webCfg, ok := svc.Web[hp]; ok { + return webCfg.Handlers[mount] + } + } + return nil + } + if sc.Web[hp] == nil { return nil } return sc.Web[hp].Handlers[mount] } -// GetTCPPortHandler returns the TCPPortHandler for the given port. -// If the port is not configured, nil is returned. -func (sc *ServeConfig) GetTCPPortHandler(port uint16) *TCPPortHandler { +// GetTCPPortHandler returns the TCPPortHandler for the given port. If the port +// is not configured, nil is returned. Parameter svcName can be tailcfg.NoService +// for local serve or a service name for a service hosted on node. +func (sc *ServeConfig) GetTCPPortHandler(port uint16, svcName tailcfg.ServiceName) *TCPPortHandler { if sc == nil { return nil } + if svcName != "" { + if svc, ok := sc.Services[svcName]; ok && svc != nil { + return svc.TCP[port] + } + return nil + } return sc.TCP[port] } @@ -227,34 +245,78 @@ func (sc *ServeConfig) IsTCPForwardingAny() bool { return false } -// IsTCPForwardingOnPort reports whether if ServeConfig is currently forwarding -// in TCPForward mode on the given port. This is exclusive of Web/HTTPS serving. -func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsTCPForwardingOnPort reports whether ServeConfig is currently forwarding +// in TCPForward mode on the given port for local or a service. svcName will +// either be noService (empty string) for local serve or a serviceName for service +// hosted on node. Notice TCPForwarding is exclusive with Web/HTTPS serving. +func (sc *ServeConfig) IsTCPForwardingOnPort(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + + if svcName != "" { + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return false + } + if svc.TCP[port] == nil { + return false + } + } else if sc.TCP[port] == nil { return false } - return !sc.IsServingWeb(port) + return !sc.IsServingWeb(port, svcName) } -// IsServingWeb reports whether if ServeConfig is currently serving Web -// (HTTP/HTTPS) on the given port. This is exclusive of TCPForwarding. -func (sc *ServeConfig) IsServingWeb(port uint16) bool { - return sc.IsServingHTTP(port) || sc.IsServingHTTPS(port) +// IsServingWeb reports whether ServeConfig is currently serving Web (HTTP/HTTPS) +// on the given port for local or a service. svcName will be either tailcfg.NoService, +// or a serviceName for service hosted on node. This is exclusive with TCPForwarding. +func (sc *ServeConfig) IsServingWeb(port uint16, svcName tailcfg.ServiceName) bool { + return sc.IsServingHTTP(port, svcName) || sc.IsServingHTTPS(port, svcName) } -// IsServingHTTPS reports whether if ServeConfig is currently serving HTTPS on -// the given port. This is exclusive of HTTP and TCPForwarding. -func (sc *ServeConfig) IsServingHTTPS(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsServingHTTPS reports whether ServeConfig is currently serving HTTPS on +// the given port for local or a service. svcName will be either tailcfg.NoService +// for local serve, or a serviceName for service hosted on node. This is exclusive +// with HTTP and TCPForwarding. +func (sc *ServeConfig) IsServingHTTPS(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + var tcpHandlers map[uint16]*TCPPortHandler + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + tcpHandlers = svc.TCP + } + } else { + tcpHandlers = sc.TCP + } + + th := tcpHandlers[port] + if th == nil { return false } - return sc.TCP[port].HTTPS + return th.HTTPS } -// IsServingHTTP reports whether if ServeConfig is currently serving HTTP on the -// given port. This is exclusive of HTTPS and TCPForwarding. -func (sc *ServeConfig) IsServingHTTP(port uint16) bool { - if sc == nil || sc.TCP[port] == nil { +// IsServingHTTP reports whether ServeConfig is currently serving HTTP on the +// given port for local or a service. svcName will be either tailcfg.NoService for +// local serve, or a serviceName for service hosted on node. This is exclusive +// with HTTPS and TCPForwarding. +func (sc *ServeConfig) IsServingHTTP(port uint16, svcName tailcfg.ServiceName) bool { + if sc == nil { + return false + } + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + if svc.TCP[port] != nil { + return svc.TCP[port].HTTP + } + } + return false + } + + if sc.TCP[port] == nil { return false } return sc.TCP[port].HTTP @@ -280,21 +342,37 @@ func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) { // SetWebHandler sets the given HTTPHandler at the specified host, port, // and mount in the serve config. sc.TCP is also updated to reflect web -// serving usage of the given port. +// serving usage of the given port. The st argument is needed when setting +// a web handler for a service, otherwise it can be nil. func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) { if sc == nil { sc = new(ServeConfig) } - mak.Set(&sc.TCP, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS}) - hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port)))) - if _, ok := sc.Web[hp]; !ok { - mak.Set(&sc.Web, hp, new(WebServerConfig)) + tcpMap := &sc.TCP + webServerMap := &sc.Web + hostName := host + if svcName := tailcfg.AsServiceName(host); svcName != "" { + hostName = svcName.WithoutPrefix() + svc, ok := sc.Services[svcName] + if !ok { + svc = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svc) + } + tcpMap = &svc.TCP + webServerMap = &svc.Web } - mak.Set(&sc.Web[hp].Handlers, mount, handler) + mak.Set(tcpMap, port, &TCPPortHandler{HTTPS: useTLS, HTTP: !useTLS}) + hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) + webCfg, ok := (*webServerMap)[hp] + if !ok { + webCfg = new(WebServerConfig) + mak.Set(webServerMap, hp, webCfg) + } + mak.Set(&webCfg.Handlers, mount, handler) // TODO(tylersmalley): handle multiple web handlers from foreground mode - for k, v := range sc.Web[hp].Handlers { + for k, v := range webCfg.Handlers { if v == handler { continue } @@ -305,7 +383,7 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin m1 := strings.TrimSuffix(mount, "/") m2 := strings.TrimSuffix(k, "/") if m1 == m2 { - delete(sc.Web[hp].Handlers, k) + delete(webCfg.Handlers, k) } } } @@ -318,9 +396,19 @@ func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTL if sc == nil { sc = new(ServeConfig) } - mak.Set(&sc.TCP, port, &TCPPortHandler{TCPForward: fwdAddr}) + tcpPortHandler := &sc.TCP + if svcName := tailcfg.AsServiceName(host); svcName != "" { + svcConfig, ok := sc.Services[svcName] + if !ok { + svcConfig = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svcConfig) + } + tcpPortHandler = &svcConfig.TCP + } + mak.Set(tcpPortHandler, port, &TCPPortHandler{TCPForward: fwdAddr}) + if terminateTLS { - sc.TCP[port].TerminateTLS = host + (*tcpPortHandler)[port].TerminateTLS = host } } @@ -344,9 +432,9 @@ func (sc *ServeConfig) SetFunnel(host string, port uint16, setOn bool) { } } -// RemoveWebHandler deletes the web handlers at all of the given mount points -// for the provided host and port in the serve config. If cleanupFunnel is -// true, this also removes the funnel value for this port if no handlers remain. +// RemoveWebHandler deletes the web handlers at all of the given mount points for the +// provided host and port in the serve config for the node (as opposed to a service). +// If cleanupFunnel is true, this also removes the funnel value for this port if no handlers remain. func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []string, cleanupFunnel bool) { hp := HostPort(net.JoinHostPort(host, strconv.Itoa(int(port)))) @@ -374,9 +462,51 @@ func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []strin } } +// RemoveServiceWebHandler deletes the web handlers at all of the given mount points +// for the provided host and port in the serve config for the given service. +func (sc *ServeConfig) RemoveServiceWebHandler(st *ipnstate.Status, svcName tailcfg.ServiceName, port uint16, mounts []string) { + hostName := svcName.WithoutPrefix() + hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) + + svc, ok := sc.Services[svcName] + if !ok || svc == nil { + return + } + + // Delete existing handler, then cascade delete if empty. + for _, m := range mounts { + delete(svc.Web[hp].Handlers, m) + } + if len(svc.Web[hp].Handlers) == 0 { + delete(svc.Web, hp) + delete(svc.TCP, port) + } + if len(svc.Web) == 0 && len(svc.TCP) == 0 { + delete(sc.Services, svcName) + } + if len(sc.Services) == 0 { + sc.Services = nil + } +} + // RemoveTCPForwarding deletes the TCP forwarding configuration for the given // port from the serve config. -func (sc *ServeConfig) RemoveTCPForwarding(port uint16) { +func (sc *ServeConfig) RemoveTCPForwarding(svcName tailcfg.ServiceName, port uint16) { + if svcName != "" { + if svc := sc.Services[svcName]; svc != nil { + delete(svc.TCP, port) + if len(svc.TCP) == 0 { + svc.TCP = nil + } + if len(svc.Web) == 0 && len(svc.TCP) == 0 { + delete(sc.Services, svcName) + } + if len(sc.Services) == 0 { + sc.Services = nil + } + } + return + } delete(sc.TCP, port) if len(sc.TCP) == 0 { sc.TCP = nil diff --git a/ipn/serve_test.go b/ipn/serve_test.go index ba0a26f8c0698..7028c1e17cd71 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -128,6 +128,121 @@ func TestHasPathHandler(t *testing.T) { } } +func TestIsTCPForwardingOnPort(t *testing.T) { + tests := []struct { + name string + cfg ServeConfig + svcName tailcfg.ServiceName + port uint16 + want bool + }{ + { + name: "empty-config", + cfg: ServeConfig{}, + svcName: "", + port: 80, + want: false, + }, + { + name: "node-tcp-config-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "", + port: 80, + want: true, + }, + { + name: "node-tcp-config-no-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "", + port: 443, + want: false, + }, + { + name: "node-tcp-config-no-match-with-service", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + svcName: "svc:bar", + port: 80, + want: false, + }, + { + name: "node-web-config-no-match", + cfg: ServeConfig{ + TCP: map[uint16]*TCPPortHandler{80: {HTTPS: true}}, + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*HTTPHandler{ + "/": {Text: "Hello, world!"}, + }, + }, + }, + }, + svcName: "", + port: 80, + want: false, + }, + { + name: "service-tcp-config-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + }, + }, + svcName: "svc:foo", + port: 80, + want: true, + }, + { + name: "service-tcp-config-no-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {TCPForward: "10.0.0.123:3000"}}, + }, + }, + }, + svcName: "svc:bar", + port: 80, + want: false, + }, + { + name: "service-web-config-no-match", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*TCPPortHandler{80: {HTTPS: true}}, + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:80": { + Handlers: map[string]*HTTPHandler{ + "/": {Text: "Hello, world!"}, + }, + }, + }, + }, + }, + }, + svcName: "svc:foo", + port: 80, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.cfg.IsTCPForwardingOnPort(tt.port, tt.svcName) + if tt.want != got { + t.Errorf("IsTCPForwardingOnPort() = %v, want %v", got, tt.want) + } + }) + } +} + func TestExpandProxyTargetDev(t *testing.T) { tests := []struct { name string diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 636e2434de276..398a2c8a2b93a 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -927,6 +927,16 @@ func (t *TPMInfo) Present() bool { return t != nil } // This is not related to the older [Service] used in [Hostinfo.Services]. type ServiceName string +// AsServiceName reports whether the given string is a valid service name. +// If so returns the name as a [tailcfg.ServiceName], otherwise returns "". +func AsServiceName(s string) ServiceName { + svcName := ServiceName(s) + if err := svcName.Validate(); err != nil { + return "" + } + return svcName +} + // Validate validates if the service name is formatted correctly. // We only allow valid DNS labels, since the expectation is that these will be // used as parts of domain names. All errors are [vizerror.Error]. From 93511be04483dfd9ab6fa3164b70dcae5ec366f9 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 17 Jul 2025 01:30:08 -0700 Subject: [PATCH 0132/1093] types/geo: add geo.Point and its associated units (#16583) Package geo provides functionality to represent and process geographical locations on a sphere. The main type, geo.Point, represents a pair of latitude and longitude coordinates. Updates tailscale/corp#29968 Signed-off-by: Simon Law --- types/geo/doc.go | 6 + types/geo/point.go | 279 +++++++++++++++++++ types/geo/point_test.go | 541 +++++++++++++++++++++++++++++++++++++ types/geo/quantize.go | 106 ++++++++ types/geo/quantize_test.go | 130 +++++++++ types/geo/units.go | 191 +++++++++++++ types/geo/units_test.go | 395 +++++++++++++++++++++++++++ 7 files changed, 1648 insertions(+) create mode 100644 types/geo/doc.go create mode 100644 types/geo/point.go create mode 100644 types/geo/point_test.go create mode 100644 types/geo/quantize.go create mode 100644 types/geo/quantize_test.go create mode 100644 types/geo/units.go create mode 100644 types/geo/units_test.go diff --git a/types/geo/doc.go b/types/geo/doc.go new file mode 100644 index 0000000000000..749c6308093f6 --- /dev/null +++ b/types/geo/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package geo provides functionality to represent and process geographical +// locations on a spherical Earth. +package geo diff --git a/types/geo/point.go b/types/geo/point.go new file mode 100644 index 0000000000000..d7160ac593338 --- /dev/null +++ b/types/geo/point.go @@ -0,0 +1,279 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "strconv" +) + +// ErrBadPoint indicates that the point is malformed. +var ErrBadPoint = errors.New("not a valid point") + +// Point represents a pair of latitude and longitude coordinates. +type Point struct { + lat Degrees + // lng180 is the longitude offset by +180° so the zero value is invalid + // and +0+0/ is Point{lat: +0.0, lng180: +180.0}. + lng180 Degrees +} + +// MakePoint returns a Point representing a given latitude and longitude on +// a WGS 84 ellipsoid. The Coordinate Reference System is EPSG:4326. +// Latitude is wrapped to [-90°, +90°] and longitude to (-180°, +180°]. +func MakePoint(latitude, longitude Degrees) Point { + lat, lng := float64(latitude), float64(longitude) + + switch { + case math.IsNaN(lat) || math.IsInf(lat, 0): + // don’t wrap + case lat < -90 || lat > 90: + // Latitude wraps by flipping the longitude + lat = math.Mod(lat, 360.0) + switch { + case lat == 0.0: + lat = 0.0 // -0.0 == 0.0, but -0° is not valid + case lat < -270.0: + lat = +360.0 + lat + case lat < -90.0: + lat = -180.0 - lat + lng += 180.0 + case lat > +270.0: + lat = -360.0 + lat + case lat > +90.0: + lat = +180.0 - lat + lng += 180.0 + } + } + + switch { + case lat == -90.0 || lat == +90.0: + // By convention, the north and south poles have longitude 0°. + lng = 0 + case math.IsNaN(lng) || math.IsInf(lng, 0): + // don’t wrap + case lng <= -180.0 || lng > 180.0: + // Longitude wraps around normally + lng = math.Mod(lng, 360.0) + switch { + case lng == 0.0: + lng = 0.0 // -0.0 == 0.0, but -0° is not valid + case lng <= -180.0: + lng = +360.0 + lng + case lng > +180.0: + lng = -360.0 + lng + } + } + + return Point{ + lat: Degrees(lat), + lng180: Degrees(lng + 180.0), + } +} + +// Valid reports if p is a valid point. +func (p Point) Valid() bool { + return !p.IsZero() +} + +// LatLng reports the latitude and longitude. +func (p Point) LatLng() (lat, lng Degrees, err error) { + if p.IsZero() { + return 0 * Degree, 0 * Degree, ErrBadPoint + } + return p.lat, p.lng180 - 180.0*Degree, nil +} + +// LatLng reports the latitude and longitude in float64. If err is nil, then lat +// and lng will never both be 0.0 to disambiguate between an empty struct and +// Null Island (0° 0°). +func (p Point) LatLngFloat64() (lat, lng float64, err error) { + dlat, dlng, err := p.LatLng() + if err != nil { + return 0.0, 0.0, err + } + if dlat == 0.0 && dlng == 0.0 { + // dlng must survive conversion to float32. + dlng = math.SmallestNonzeroFloat32 + } + return float64(dlat), float64(dlng), err +} + +// SphericalAngleTo returns the angular distance from p to q, calculated on a +// spherical Earth. +func (p Point) SphericalAngleTo(q Point) (Radians, error) { + pLat, pLng, pErr := p.LatLng() + qLat, qLng, qErr := q.LatLng() + switch { + case pErr != nil && qErr != nil: + return 0.0, fmt.Errorf("spherical distance from %v to %v: %w", p, q, errors.Join(pErr, qErr)) + case pErr != nil: + return 0.0, fmt.Errorf("spherical distance from %v: %w", p, pErr) + case qErr != nil: + return 0.0, fmt.Errorf("spherical distance to %v: %w", q, qErr) + } + // The spherical law of cosines is accurate enough for close points when + // using float64. + // + // The haversine formula is an alternative, but it is poorly behaved + // when points are on opposite sides of the sphere. + rLat, rLng := float64(pLat.Radians()), float64(pLng.Radians()) + sLat, sLng := float64(qLat.Radians()), float64(qLng.Radians()) + cosA := math.Sin(rLat)*math.Sin(sLat) + + math.Cos(rLat)*math.Cos(sLat)*math.Cos(rLng-sLng) + return Radians(math.Acos(cosA)), nil +} + +// DistanceTo reports the great-circle distance between p and q, in meters. +func (p Point) DistanceTo(q Point) (Distance, error) { + r, err := p.SphericalAngleTo(q) + if err != nil { + return 0, err + } + return DistanceOnEarth(r.Turns()), nil +} + +// String returns a space-separated pair of latitude and longitude, in decimal +// degrees. Positive latitudes are in the northern hemisphere, and positive +// longitudes are east of the prime meridian. If p was not initialized, this +// will return "nowhere". +func (p Point) String() string { + lat, lng, err := p.LatLng() + if err != nil { + if err == ErrBadPoint { + return "nowhere" + } + panic(err) + } + + return lat.String() + " " + lng.String() +} + +// AppendBinary implements [encoding.BinaryAppender]. The output consists of two +// float32s in big-endian byte order: latitude and longitude offset by 180°. +// If p is not a valid, the output will be an 8-byte zero value. +func (p Point) AppendBinary(b []byte) ([]byte, error) { + end := binary.BigEndian + b = end.AppendUint32(b, math.Float32bits(float32(p.lat))) + b = end.AppendUint32(b, math.Float32bits(float32(p.lng180))) + return b, nil +} + +// MarshalBinary implements [encoding.BinaryMarshaller]. The output matches that +// of calling [Point.AppendBinary]. +func (p Point) MarshalBinary() ([]byte, error) { + var b [8]byte + return p.AppendBinary(b[:0]) +} + +// UnmarshalBinary implements [encoding.BinaryUnmarshaler]. It expects input +// that was formatted by [Point.AppendBinary]: in big-endian byte order, a +// float32 representing latitude followed by a float32 representing longitude +// offset by 180°. If latitude and longitude fall outside valid ranges, then +// an error is returned. +func (p *Point) UnmarshalBinary(data []byte) error { + if len(data) < 8 { // Two uint32s are 8 bytes long + return fmt.Errorf("%w: not enough data: %q", ErrBadPoint, data) + } + + end := binary.BigEndian + lat := Degrees(math.Float32frombits(end.Uint32(data[0:]))) + if lat < -90*Degree || lat > 90*Degree { + return fmt.Errorf("%w: latitude outside [-90°, +90°]: %s", ErrBadPoint, lat) + } + lng180 := Degrees(math.Float32frombits(end.Uint32(data[4:]))) + if lng180 != 0 && (lng180 < 0*Degree || lng180 > 360*Degree) { + // lng180 == 0 is OK: the zero value represents invalid points. + lng := lng180 - 180*Degree + return fmt.Errorf("%w: longitude outside (-180°, +180°]: %s", ErrBadPoint, lng) + } + + p.lat = lat + p.lng180 = lng180 + return nil +} + +// AppendText implements [encoding.TextAppender]. The output is a point +// formatted as OGC Well-Known Text, as "POINT (longitude latitude)" where +// longitude and latitude are in decimal degrees. If p is not valid, the output +// will be "POINT EMPTY". +func (p Point) AppendText(b []byte) ([]byte, error) { + if p.IsZero() { + b = append(b, []byte("POINT EMPTY")...) + return b, nil + } + + lat, lng, err := p.LatLng() + if err != nil { + return b, err + } + + b = append(b, []byte("POINT (")...) + b = strconv.AppendFloat(b, float64(lng), 'f', -1, 64) + b = append(b, ' ') + b = strconv.AppendFloat(b, float64(lat), 'f', -1, 64) + b = append(b, ')') + return b, nil +} + +// MarshalText implements [encoding.TextMarshaller]. The output matches that +// of calling [Point.AppendText]. +func (p Point) MarshalText() ([]byte, error) { + var b [8]byte + return p.AppendText(b[:0]) +} + +// MarshalUint64 produces the same output as MashalBinary, encoded in a uint64. +func (p Point) MarshalUint64() (uint64, error) { + b, err := p.MarshalBinary() + return binary.NativeEndian.Uint64(b), err +} + +// UnmarshalUint64 expects input formatted by MarshalUint64. +func (p *Point) UnmarshalUint64(v uint64) error { + b := binary.NativeEndian.AppendUint64(nil, v) + return p.UnmarshalBinary(b) +} + +// IsZero reports if p is the zero value. +func (p Point) IsZero() bool { + return p == Point{} +} + +// EqualApprox reports if p and q are approximately equal: that is the absolute +// difference of both latitude and longitude are less than tol. If tol is +// negative, then tol defaults to a reasonably small number (10⁻⁵). If tol is +// zero, then p and q must be exactly equal. +func (p Point) EqualApprox(q Point, tol float64) bool { + if tol == 0 { + return p == q + } + + if p.IsZero() && q.IsZero() { + return true + } else if p.IsZero() || q.IsZero() { + return false + } + + plat, plng, err := p.LatLng() + if err != nil { + panic(err) + } + qlat, qlng, err := q.LatLng() + if err != nil { + panic(err) + } + + if tol < 0 { + tol = 1e-5 + } + + dlat := float64(plat) - float64(qlat) + dlng := float64(plng) - float64(qlng) + return ((dlat < 0 && -dlat < tol) || (dlat >= 0 && dlat < tol)) && + ((dlng < 0 && -dlng < tol) || (dlng >= 0 && dlng < tol)) +} diff --git a/types/geo/point_test.go b/types/geo/point_test.go new file mode 100644 index 0000000000000..308c1a1834377 --- /dev/null +++ b/types/geo/point_test.go @@ -0,0 +1,541 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "fmt" + "math" + "testing" + "testing/quick" + + "tailscale.com/types/geo" +) + +func TestPointZero(t *testing.T) { + var zero geo.Point + + if got := zero.IsZero(); !got { + t.Errorf("IsZero() got %t", got) + } + + if got := zero.Valid(); got { + t.Errorf("Valid() got %t", got) + } + + wantErr := geo.ErrBadPoint.Error() + if _, _, err := zero.LatLng(); err.Error() != wantErr { + t.Errorf("LatLng() err %q, want %q", err, wantErr) + } + + wantStr := "nowhere" + if got := zero.String(); got != wantStr { + t.Errorf("String() got %q, want %q", got, wantStr) + } + + wantB := []byte{0, 0, 0, 0, 0, 0, 0, 0} + if b, err := zero.MarshalBinary(); err != nil { + t.Errorf("MarshalBinary() err %q, want nil", err) + } else if string(b) != string(wantB) { + t.Errorf("MarshalBinary got %q, want %q", b, wantB) + } + + wantI := uint64(0x00000000) + if i, err := zero.MarshalUint64(); err != nil { + t.Errorf("MarshalUint64() err %q, want nil", err) + } else if i != wantI { + t.Errorf("MarshalUint64 got %v, want %v", i, wantI) + } +} + +func TestPoint(t *testing.T) { + for _, tt := range []struct { + name string + lat geo.Degrees + lng geo.Degrees + wantLat geo.Degrees + wantLng geo.Degrees + wantString string + wantText string + }{ + { + name: "null-island", + lat: +0.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +0.0, + wantString: "+0° +0°", + wantText: "POINT (0 0)", + }, + { + name: "north-pole", + lat: +90.0, + lng: +0.0, + wantLat: +90.0, + wantLng: +0.0, + wantString: "+90° +0°", + wantText: "POINT (0 90)", + }, + { + name: "south-pole", + lat: -90.0, + lng: +0.0, + wantLat: -90.0, + wantLng: +0.0, + wantString: "-90° +0°", + wantText: "POINT (0 -90)", + }, + { + name: "north-pole-weird-longitude", + lat: +90.0, + lng: +1.0, + wantLat: +90.0, + wantLng: +0.0, + wantString: "+90° +0°", + wantText: "POINT (0 90)", + }, + { + name: "south-pole-weird-longitude", + lat: -90.0, + lng: +1.0, + wantLat: -90.0, + wantLng: +0.0, + wantString: "-90° +0°", + wantText: "POINT (0 -90)", + }, + { + name: "almost-north", + lat: +89.0, + lng: +0.0, + wantLat: +89.0, + wantLng: +0.0, + wantString: "+89° +0°", + wantText: "POINT (0 89)", + }, + { + name: "past-north", + lat: +91.0, + lng: +0.0, + wantLat: +89.0, + wantLng: +180.0, + wantString: "+89° +180°", + wantText: "POINT (180 89)", + }, + { + name: "almost-south", + lat: -89.0, + lng: +0.0, + wantLat: -89.0, + wantLng: +0.0, + wantString: "-89° +0°", + wantText: "POINT (0 -89)", + }, + { + name: "past-south", + lat: -91.0, + lng: +0.0, + wantLat: -89.0, + wantLng: +180.0, + wantString: "-89° +180°", + wantText: "POINT (180 -89)", + }, + { + name: "antimeridian-north", + lat: +180.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "antimeridian-south", + lat: -180.0, + lng: +0.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "almost-antimeridian-north", + lat: +179.0, + lng: +0.0, + wantLat: +1.0, + wantLng: +180.0, + wantString: "+1° +180°", + wantText: "POINT (180 1)", + }, + { + name: "past-antimeridian-north", + lat: +181.0, + lng: +0.0, + wantLat: -1.0, + wantLng: +180.0, + wantString: "-1° +180°", + wantText: "POINT (180 -1)", + }, + { + name: "almost-antimeridian-south", + lat: -179.0, + lng: +0.0, + wantLat: -1.0, + wantLng: +180.0, + wantString: "-1° +180°", + wantText: "POINT (180 -1)", + }, + { + name: "past-antimeridian-south", + lat: -181.0, + lng: +0.0, + wantLat: +1.0, + wantLng: +180.0, + wantString: "+1° +180°", + wantText: "POINT (180 1)", + }, + { + name: "circumnavigate-north", + lat: +360.0, + lng: +1.0, + wantLat: +0.0, + wantLng: +1.0, + wantString: "+0° +1°", + wantText: "POINT (1 0)", + }, + { + name: "circumnavigate-south", + lat: -360.0, + lng: +1.0, + wantLat: +0.0, + wantLng: +1.0, + wantString: "+0° +1°", + wantText: "POINT (1 0)", + }, + { + name: "almost-circumnavigate-north", + lat: +359.0, + lng: +1.0, + wantLat: -1.0, + wantLng: +1.0, + wantString: "-1° +1°", + wantText: "POINT (1 -1)", + }, + { + name: "past-circumnavigate-north", + lat: +361.0, + lng: +1.0, + wantLat: +1.0, + wantLng: +1.0, + wantString: "+1° +1°", + wantText: "POINT (1 1)", + }, + { + name: "almost-circumnavigate-south", + lat: -359.0, + lng: +1.0, + wantLat: +1.0, + wantLng: +1.0, + wantString: "+1° +1°", + wantText: "POINT (1 1)", + }, + { + name: "past-circumnavigate-south", + lat: -361.0, + lng: +1.0, + wantLat: -1.0, + wantLng: +1.0, + wantString: "-1° +1°", + wantText: "POINT (1 -1)", + }, + { + name: "antimeridian-east", + lat: +0.0, + lng: +180.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "antimeridian-west", + lat: +0.0, + lng: -180.0, + wantLat: +0.0, + wantLng: +180.0, + wantString: "+0° +180°", + wantText: "POINT (180 0)", + }, + { + name: "almost-antimeridian-east", + lat: +0.0, + lng: +179.0, + wantLat: +0.0, + wantLng: +179.0, + wantString: "+0° +179°", + wantText: "POINT (179 0)", + }, + { + name: "past-antimeridian-east", + lat: +0.0, + lng: +181.0, + wantLat: +0.0, + wantLng: -179.0, + wantString: "+0° -179°", + wantText: "POINT (-179 0)", + }, + { + name: "almost-antimeridian-west", + lat: +0.0, + lng: -179.0, + wantLat: +0.0, + wantLng: -179.0, + wantString: "+0° -179°", + wantText: "POINT (-179 0)", + }, + { + name: "past-antimeridian-west", + lat: +0.0, + lng: -181.0, + wantLat: +0.0, + wantLng: +179.0, + wantString: "+0° +179°", + wantText: "POINT (179 0)", + }, + { + name: "montreal", + lat: +45.508888, + lng: -73.561668, + wantLat: +45.508888, + wantLng: -73.561668, + wantString: "+45.508888° -73.561668°", + wantText: "POINT (-73.561668 45.508888)", + }, + { + name: "canada", + lat: 57.550480044655636, + lng: -98.41680517868062, + wantLat: 57.550480044655636, + wantLng: -98.41680517868062, + wantString: "+57.550480044655636° -98.41680517868062°", + wantText: "POINT (-98.41680517868062 57.550480044655636)", + }, + } { + t.Run(tt.name, func(t *testing.T) { + p := geo.MakePoint(tt.lat, tt.lng) + + lat, lng, err := p.LatLng() + if !approx(lat, tt.wantLat) { + t.Errorf("MakePoint: lat %v, want %v", lat, tt.wantLat) + } + if !approx(lng, tt.wantLng) { + t.Errorf("MakePoint: lng %v, want %v", lng, tt.wantLng) + } + if err != nil { + t.Fatalf("LatLng: err %q, expected nil", err) + } + + if got := p.String(); got != tt.wantString { + t.Errorf("String: got %q, wantString %q", got, tt.wantString) + } + + txt, err := p.MarshalText() + if err != nil { + t.Errorf("Text: err %q, expected nil", err) + } else if string(txt) != tt.wantText { + t.Errorf("Text: got %q, wantText %q", txt, tt.wantText) + } + + b, err := p.MarshalBinary() + if err != nil { + t.Fatalf("MarshalBinary: err %q, expected nil", err) + } + + var q geo.Point + if err := q.UnmarshalBinary(b); err != nil { + t.Fatalf("UnmarshalBinary: err %q, expected nil", err) + } + if !q.EqualApprox(p, -1) { + t.Errorf("UnmarshalBinary: roundtrip failed: %#v != %#v", q, p) + } + + i, err := p.MarshalUint64() + if err != nil { + t.Fatalf("MarshalUint64: err %q, expected nil", err) + } + + var r geo.Point + if err := r.UnmarshalUint64(i); err != nil { + t.Fatalf("UnmarshalUint64: err %r, expected nil", err) + } + if !q.EqualApprox(r, -1) { + t.Errorf("UnmarshalUint64: roundtrip failed: %#v != %#v", r, p) + } + }) + } +} + +func TestPointMarshalBinary(t *testing.T) { + roundtrip := func(p geo.Point) error { + b, err := p.MarshalBinary() + if err != nil { + return fmt.Errorf("marshal: %v", err) + } + var q geo.Point + if err := q.UnmarshalBinary(b); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + if q != p { + return fmt.Errorf("%#v != %#v", q, p) + } + return nil + } + + t.Run("nowhere", func(t *testing.T) { + var nowhere geo.Point + if err := roundtrip(nowhere); err != nil { + t.Errorf("roundtrip: %v", err) + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat geo.Degrees, lng geo.Degrees) (ok bool) { + pt := geo.MakePoint(lat, lng) + if err := roundtrip(pt); err != nil { + t.Errorf("roundtrip: %v", err) + } + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } + }) +} + +func TestPointMarshalUint64(t *testing.T) { + t.Skip("skip") + roundtrip := func(p geo.Point) error { + i, err := p.MarshalUint64() + if err != nil { + return fmt.Errorf("marshal: %v", err) + } + var q geo.Point + if err := q.UnmarshalUint64(i); err != nil { + return fmt.Errorf("unmarshal: %v", err) + } + if q != p { + return fmt.Errorf("%#v != %#v", q, p) + } + return nil + } + + t.Run("nowhere", func(t *testing.T) { + var nowhere geo.Point + if err := roundtrip(nowhere); err != nil { + t.Errorf("roundtrip: %v", err) + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat geo.Degrees, lng geo.Degrees) (ok bool) { + if err := roundtrip(geo.MakePoint(lat, lng)); err != nil { + t.Errorf("roundtrip: %v", err) + } + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } + }) +} + +func TestPointSphericalAngleTo(t *testing.T) { + const earthRadius = 6371.000 // volumetric mean radius (km) + const kmToRad = 1 / earthRadius + for _, tt := range []struct { + name string + x geo.Point + y geo.Point + want geo.Radians + wantErr string + }{ + { + name: "same-point-null-island", + x: geo.MakePoint(0, 0), + y: geo.MakePoint(0, 0), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-north-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(+90, +90), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-south-pole", + x: geo.MakePoint(-90, 0), + y: geo.MakePoint(-90, -90), + want: 0.0 * geo.Radian, + }, + { + name: "north-pole-to-south-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(-90, -90), + want: math.Pi * geo.Radian, + }, + { + name: "toronto-to-montreal", + x: geo.MakePoint(+43.6532, -79.3832), + y: geo.MakePoint(+45.5019, -73.5674), + want: 504.26 * kmToRad * geo.Radian, + }, + { + name: "sydney-to-san-francisco", + x: geo.MakePoint(-33.8727, +151.2057), + y: geo.MakePoint(+37.7749, -122.4194), + want: 11948.18 * kmToRad * geo.Radian, + }, + { + name: "new-york-to-paris", + x: geo.MakePoint(+40.7128, -74.0060), + y: geo.MakePoint(+48.8575, +2.3514), + want: 5837.15 * kmToRad * geo.Radian, + }, + { + name: "seattle-to-tokyo", + x: geo.MakePoint(+47.6061, -122.3328), + y: geo.MakePoint(+35.6764, +139.6500), + want: 7700.00 * kmToRad * geo.Radian, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.x.SphericalAngleTo(tt.y) + if tt.wantErr == "" && err != nil { + t.Fatalf("err %q, expected nil", err) + } + if tt.wantErr != "" && (err == nil || err.Error() != tt.wantErr) { + t.Fatalf("err %q, expected %q", err, tt.wantErr) + } + if tt.wantErr != "" { + return + } + + if !approx(got, tt.want) { + t.Errorf("x to y: got %v, want %v", got, tt.want) + } + + // Distance should be commutative + got, err = tt.y.SphericalAngleTo(tt.x) + if err != nil { + t.Fatalf("err %q, expected nil", err) + } + if !approx(got, tt.want) { + t.Errorf("y to x: got %v, want %v", got, tt.want) + } + t.Logf("x to y: %v km", got/kmToRad) + }) + } +} + +func approx[T ~float64](x, y T) bool { + return math.Abs(float64(x)-float64(y)) <= 1e-5 +} diff --git a/types/geo/quantize.go b/types/geo/quantize.go new file mode 100644 index 0000000000000..18ec11f9f119c --- /dev/null +++ b/types/geo/quantize.go @@ -0,0 +1,106 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "math" + "sync" +) + +// MinSeparation is the minimum separation between two points after quantizing. +// [Point.Quantize] guarantees that two points will either be snapped to exactly +// the same point, which conflates multiple positions together, or that the two +// points will be far enough apart that successfully performing most reverse +// lookups would be highly improbable. +const MinSeparation = 50_000 * Meter + +// Latitude +var ( + // numSepsEquatorToPole is the number of separations between a point on + // the equator to a point on a pole, that satisfies [minPointSep]. In + // other words, the number of separations between 0° and +90° degrees + // latitude. + numSepsEquatorToPole = int(math.Floor(float64( + earthPolarCircumference / MinSeparation / 4))) + + // latSep is the number of degrees between two adjacent latitudinal + // points. In other words, the next point going straight north of + // 0° would be latSep°. + latSep = Degrees(90.0 / float64(numSepsEquatorToPole)) +) + +// snapToLat returns the number of the nearest latitudinal separation to +// lat. A positive result is north of the equator, a negative result is south, +// and zero is the equator itself. For example, a result of -1 would mean a +// point that is [latSep] south of the equator. +func snapToLat(lat Degrees) int { + return int(math.Round(float64(lat / latSep))) +} + +// lngSep is a lookup table for the number of degrees between two adjacent +// longitudinal separations. where the index corresponds to the absolute value +// of the latitude separation. The first value corresponds to the equator and +// the last value corresponds to the separation before the pole. There is no +// value for the pole itself, because longitude has no meaning there. +// +// [lngSep] is calculated on init, which is so quick and will be used so often +// that the startup cost is negligible. +var lngSep = sync.OnceValue(func() []Degrees { + lut := make([]Degrees, numSepsEquatorToPole) + + // i ranges from the equator to a pole + for i := range len(lut) { + // lat ranges from [0°, 90°], because the southern hemisphere is + // a reflection of the northern one. + lat := Degrees(i) * latSep + ratio := math.Cos(float64(lat.Radians())) + circ := Distance(ratio) * earthEquatorialCircumference + num := int(math.Floor(float64(circ / MinSeparation))) + // We define lut[0] as 0°, lut[len(lut)] to be the north pole, + // which means -lut[len(lut)] is the south pole. + lut[i] = Degrees(360.0 / float64(num)) + } + return lut +}) + +// snapToLatLng returns the number of the nearest latitudinal separation to lat, +// and the nearest longitudinal separation to lng. +func snapToLatLng(lat, lng Degrees) (Degrees, Degrees) { + latN := snapToLat(lat) + + // absolute index into lngSep + n := latN + if n < 0 { + n = -latN + } + + lngSep := lngSep() + if n < len(lngSep) { + sep := lngSep[n] + lngN := int(math.Round(float64(lng / sep))) + return Degrees(latN) * latSep, Degrees(lngN) * sep + } + if latN < 0 { // south pole + return -90 * Degree, 0 * Degree + } else { // north pole + return +90 * Degree, 0 * Degree + } +} + +// Quantize returns a new [Point] after throwing away enough location data in p +// so that it would be difficult to distinguish a node among all the other nodes +// in its general vicinity. One caveat is that if there’s only one point in an +// obscure location, someone could triangulate the node using additional data. +// +// This method is stable: given the same p, it will always return the same +// result. It is equivalent to snapping to points on Earth that are at least +// [MinSeparation] apart. +func (p Point) Quantize() Point { + if p.IsZero() { + return p + } + + lat, lng := snapToLatLng(p.lat, p.lng180-180) + return MakePoint(lat, lng) +} diff --git a/types/geo/quantize_test.go b/types/geo/quantize_test.go new file mode 100644 index 0000000000000..3c707e303c250 --- /dev/null +++ b/types/geo/quantize_test.go @@ -0,0 +1,130 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "testing" + "testing/quick" + + "tailscale.com/types/geo" +) + +func TestPointAnonymize(t *testing.T) { + t.Run("nowhere", func(t *testing.T) { + var zero geo.Point + p := zero.Quantize() + want := zero.Valid() + if got := p.Valid(); got != want { + t.Fatalf("zero.Valid %t, want %t", got, want) + } + }) + + t.Run("separation", func(t *testing.T) { + // Walk from the south pole to the north pole and check that each + // point on the latitude is approximately MinSeparation apart. + const southPole = -90 * geo.Degree + const northPole = 90 * geo.Degree + const dateLine = 180 * geo.Degree + + llat := southPole + for lat := llat; lat <= northPole; lat += 0x1p-4 { + last := geo.MakePoint(llat, 0) + cur := geo.MakePoint(lat, 0) + anon := cur.Quantize() + switch l, g, err := anon.LatLng(); { + case err != nil: + t.Fatal(err) + case lat == southPole: + // initialize llng, to the first snapped longitude + llat = l + goto Lng + case g != 0: + t.Fatalf("%v is west or east of %v", anon, last) + case l < llat: + t.Fatalf("%v is south of %v", anon, last) + case l == llat: + continue + case l > llat: + switch dist, err := last.DistanceTo(anon); { + case err != nil: + t.Fatal(err) + case dist == 0.0: + continue + case dist < geo.MinSeparation: + t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon) + t.Fatalf("%v is too close to %v", anon, last) + default: + llat = l + } + } + + Lng: + llng := dateLine + for lng := llng; lng <= dateLine && lng >= -dateLine; lng -= 0x1p-3 { + last := geo.MakePoint(llat, llng) + cur := geo.MakePoint(lat, lng) + anon := cur.Quantize() + switch l, g, err := anon.LatLng(); { + case err != nil: + t.Fatal(err) + case lng == dateLine: + // initialize llng, to the first snapped longitude + llng = g + continue + case l != llat: + t.Fatalf("%v is north or south of %v", anon, last) + case g != llng: + const tolerance = geo.MinSeparation * 0x1p-9 + switch dist, err := last.DistanceTo(anon); { + case err != nil: + t.Fatal(err) + case dist < tolerance: + continue + case dist < (geo.MinSeparation - tolerance): + t.Logf("lat=%v lng=%v last=%v cur=%v anon=%v", lat, lng, last, cur, anon) + t.Fatalf("%v is too close to %v: %v", anon, last, dist) + default: + llng = g + } + + } + } + } + if llat == southPole { + t.Fatal("llat never incremented") + } + }) + + t.Run("quick-check", func(t *testing.T) { + f := func(lat, lng geo.Degrees) bool { + p := geo.MakePoint(lat, lng) + q := p.Quantize() + t.Logf("quantize %v = %v", p, q) + + lat, lng, err := q.LatLng() + if err != nil { + t.Errorf("err %v, want nil", err) + return !t.Failed() + } + + if lat < -90*geo.Degree || lat > 90*geo.Degree { + t.Errorf("lat outside [-90°, +90°]: %v", lat) + } + if lng < -180*geo.Degree || lng > 180*geo.Degree { + t.Errorf("lng outside [-180°, +180°], %v", lng) + } + + if dist, err := p.DistanceTo(q); err != nil { + t.Error(err) + } else if dist > (geo.MinSeparation * 2) { + t.Errorf("moved too far: %v", dist) + } + + return !t.Failed() + } + if err := quick.Check(f, nil); err != nil { + t.Fatal(err) + } + }) +} diff --git a/types/geo/units.go b/types/geo/units.go new file mode 100644 index 0000000000000..76a4c02f79f34 --- /dev/null +++ b/types/geo/units.go @@ -0,0 +1,191 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo + +import ( + "math" + "strconv" + "strings" + "unicode" +) + +const ( + Degree Degrees = 1 + Radian Radians = 1 + Turn Turns = 1 + Meter Distance = 1 +) + +// Degrees represents a latitude or longitude, in decimal degrees. +type Degrees float64 + +// ParseDegrees parses s as decimal degrees. +func ParseDegrees(s string) (Degrees, error) { + s = strings.TrimSuffix(s, "°") + f, err := strconv.ParseFloat(s, 64) + return Degrees(f), err +} + +// MustParseDegrees parses s as decimal degrees, but panics on error. +func MustParseDegrees(s string) Degrees { + d, err := ParseDegrees(s) + if err != nil { + panic(err) + } + return d +} + +// String implements the [Stringer] interface. The output is formatted in +// decimal degrees, prefixed by either the appropriate + or - sign, and suffixed +// by a ° degree symbol. +func (d Degrees) String() string { + b, _ := d.AppendText(nil) + b = append(b, []byte("°")...) + return string(b) +} + +// AppendText implements [encoding.TextAppender]. The output is formatted in +// decimal degrees, prefixed by either the appropriate + or - sign. +func (d Degrees) AppendText(b []byte) ([]byte, error) { + b = d.AppendZeroPaddedText(b, 0) + return b, nil +} + +// AppendZeroPaddedText appends d formatted as decimal degrees to b. The number of +// integer digits will be zero-padded to nint. +func (d Degrees) AppendZeroPaddedText(b []byte, nint int) []byte { + n := float64(d) + + if math.IsInf(n, 0) || math.IsNaN(n) { + return strconv.AppendFloat(b, n, 'f', -1, 64) + } + + sign := byte('+') + if math.Signbit(n) { + sign = '-' + n = -n + } + b = append(b, sign) + + pad := nint - 1 + for nn := n / 10; nn >= 1 && pad > 0; nn /= 10 { + pad-- + } + for range pad { + b = append(b, '0') + } + return strconv.AppendFloat(b, n, 'f', -1, 64) +} + +// Radians converts d into radians. +func (d Degrees) Radians() Radians { + return Radians(d * math.Pi / 180.0) +} + +// Turns converts d into a number of turns. +func (d Degrees) Turns() Turns { + return Turns(d / 360.0) +} + +// Radians represents a latitude or longitude, in radians. +type Radians float64 + +// ParseRadians parses s as radians. +func ParseRadians(s string) (Radians, error) { + s = strings.TrimSuffix(s, "rad") + s = strings.TrimRightFunc(s, unicode.IsSpace) + f, err := strconv.ParseFloat(s, 64) + return Radians(f), err +} + +// MustParseRadians parses s as radians, but panics on error. +func MustParseRadians(s string) Radians { + r, err := ParseRadians(s) + if err != nil { + panic(err) + } + return r +} + +// String implements the [Stringer] interface. +func (r Radians) String() string { + return strconv.FormatFloat(float64(r), 'f', -1, 64) + " rad" +} + +// Degrees converts r into decimal degrees. +func (r Radians) Degrees() Degrees { + return Degrees(r * 180.0 / math.Pi) +} + +// Turns converts r into a number of turns. +func (r Radians) Turns() Turns { + return Turns(r / 2 / math.Pi) +} + +// Turns represents a number of complete revolutions around a sphere. +type Turns float64 + +// String implements the [Stringer] interface. +func (o Turns) String() string { + return strconv.FormatFloat(float64(o), 'f', -1, 64) +} + +// Degrees converts t into decimal degrees. +func (o Turns) Degrees() Degrees { + return Degrees(o * 360.0) +} + +// Radians converts t into radians. +func (o Turns) Radians() Radians { + return Radians(o * 2 * math.Pi) +} + +// Distance represents a great-circle distance in meters. +type Distance float64 + +// ParseDistance parses s as distance in meters. +func ParseDistance(s string) (Distance, error) { + s = strings.TrimSuffix(s, "m") + s = strings.TrimRightFunc(s, unicode.IsSpace) + f, err := strconv.ParseFloat(s, 64) + return Distance(f), err +} + +// MustParseDistance parses s as distance in meters, but panics on error. +func MustParseDistance(s string) Distance { + d, err := ParseDistance(s) + if err != nil { + panic(err) + } + return d +} + +// String implements the [Stringer] interface. +func (d Distance) String() string { + return strconv.FormatFloat(float64(d), 'f', -1, 64) + "m" +} + +// DistanceOnEarth converts t turns into the great-circle distance, in meters. +func DistanceOnEarth(t Turns) Distance { + return Distance(t) * EarthMeanCircumference +} + +// Earth Fact Sheet +// https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html +const ( + // EarthMeanRadius is the volumetric mean radius of the Earth. + EarthMeanRadius = 6_371_000 * Meter + // EarthMeanCircumference is the volumetric mean circumference of the Earth. + EarthMeanCircumference = 2 * math.Pi * EarthMeanRadius + + // earthEquatorialRadius is the equatorial radius of the Earth. + earthEquatorialRadius = 6_378_137 * Meter + // earthEquatorialCircumference is the equatorial circumference of the Earth. + earthEquatorialCircumference = 2 * math.Pi * earthEquatorialRadius + + // earthPolarRadius is the polar radius of the Earth. + earthPolarRadius = 6_356_752 * Meter + // earthPolarCircumference is the polar circumference of the Earth. + earthPolarCircumference = 2 * math.Pi * earthPolarRadius +) diff --git a/types/geo/units_test.go b/types/geo/units_test.go new file mode 100644 index 0000000000000..b6f724ce0d9b3 --- /dev/null +++ b/types/geo/units_test.go @@ -0,0 +1,395 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package geo_test + +import ( + "math" + "strings" + "testing" + + "tailscale.com/types/geo" +) + +func TestDegrees(t *testing.T) { + for _, tt := range []struct { + name string + degs geo.Degrees + wantStr string + wantText string + wantPad string + wantRads geo.Radians + wantTurns geo.Turns + }{ + { + name: "zero", + degs: 0.0 * geo.Degree, + wantStr: "+0°", + wantText: "+0", + wantPad: "+000", + wantRads: 0.0 * geo.Radian, + wantTurns: 0 * geo.Turn, + }, + { + name: "quarter-turn", + degs: 90.0 * geo.Degree, + wantStr: "+90°", + wantText: "+90", + wantPad: "+090", + wantRads: 0.5 * math.Pi * geo.Radian, + wantTurns: 0.25 * geo.Turn, + }, + { + name: "half-turn", + degs: 180.0 * geo.Degree, + wantStr: "+180°", + wantText: "+180", + wantPad: "+180", + wantRads: 1.0 * math.Pi * geo.Radian, + wantTurns: 0.5 * geo.Turn, + }, + { + name: "full-turn", + degs: 360.0 * geo.Degree, + wantStr: "+360°", + wantText: "+360", + wantPad: "+360", + wantRads: 2.0 * math.Pi * geo.Radian, + wantTurns: 1.0 * geo.Turn, + }, + { + name: "negative-zero", + degs: geo.MustParseDegrees("-0.0"), + wantStr: "-0°", + wantText: "-0", + wantPad: "-000", + wantRads: 0 * geo.Radian * -1, + wantTurns: 0 * geo.Turn * -1, + }, + { + name: "small-degree", + degs: -1.2003 * geo.Degree, + wantStr: "-1.2003°", + wantText: "-1.2003", + wantPad: "-001.2003", + wantRads: -0.020949187011687936 * geo.Radian, + wantTurns: -0.0033341666666666663 * geo.Turn, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.degs.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + d, err := geo.ParseDegrees(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if d != tt.degs { + t.Errorf("ParseDegrees got %q, want %q", d, tt.degs) + } + + b, err := tt.degs.AppendText(nil) + if err != nil { + t.Fatalf("AppendText err %q, want nil", err.Error()) + } + if string(b) != tt.wantText { + t.Errorf("AppendText got %q, want %q", b, tt.wantText) + } + + b = tt.degs.AppendZeroPaddedText(nil, 3) + if string(b) != tt.wantPad { + t.Errorf("AppendZeroPaddedText got %q, want %q", b, tt.wantPad) + } + + r := tt.degs.Radians() + if r != tt.wantRads { + t.Errorf("Radian got %v, want %v", r, tt.wantRads) + } + if d := r.Degrees(); d != tt.degs { // Roundtrip + t.Errorf("Degrees got %v, want %v", d, tt.degs) + } + + o := tt.degs.Turns() + if o != tt.wantTurns { + t.Errorf("Turns got %v, want %v", o, tt.wantTurns) + } + }) + } +} + +func TestRadians(t *testing.T) { + for _, tt := range []struct { + name string + rads geo.Radians + wantStr string + wantText string + wantDegs geo.Degrees + wantTurns geo.Turns + }{ + { + name: "zero", + rads: 0.0 * geo.Radian, + wantStr: "0 rad", + wantDegs: 0.0 * geo.Degree, + wantTurns: 0 * geo.Turn, + }, + { + name: "quarter-turn", + rads: 0.5 * math.Pi * geo.Radian, + wantStr: "1.5707963267948966 rad", + wantDegs: 90.0 * geo.Degree, + wantTurns: 0.25 * geo.Turn, + }, + { + name: "half-turn", + rads: 1.0 * math.Pi * geo.Radian, + wantStr: "3.141592653589793 rad", + wantDegs: 180.0 * geo.Degree, + wantTurns: 0.5 * geo.Turn, + }, + { + name: "full-turn", + rads: 2.0 * math.Pi * geo.Radian, + wantStr: "6.283185307179586 rad", + wantDegs: 360.0 * geo.Degree, + wantTurns: 1.0 * geo.Turn, + }, + { + name: "negative-zero", + rads: geo.MustParseRadians("-0"), + wantStr: "-0 rad", + wantDegs: 0 * geo.Degree * -1, + wantTurns: 0 * geo.Turn * -1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.rads.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + r, err := geo.ParseRadians(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if r != tt.rads { + t.Errorf("ParseDegrees got %q, want %q", r, tt.rads) + } + + d := tt.rads.Degrees() + if d != tt.wantDegs { + t.Errorf("Degrees got %v, want %v", d, tt.wantDegs) + } + if r := d.Radians(); r != tt.rads { // Roundtrip + t.Errorf("Radians got %v, want %v", r, tt.rads) + } + + o := tt.rads.Turns() + if o != tt.wantTurns { + t.Errorf("Turns got %v, want %v", o, tt.wantTurns) + } + }) + } +} + +func TestTurns(t *testing.T) { + for _, tt := range []struct { + name string + turns geo.Turns + wantStr string + wantText string + wantDegs geo.Degrees + wantRads geo.Radians + }{ + { + name: "zero", + turns: 0.0, + wantStr: "0", + wantDegs: 0.0 * geo.Degree, + wantRads: 0 * geo.Radian, + }, + { + name: "quarter-turn", + turns: 0.25, + wantStr: "0.25", + wantDegs: 90.0 * geo.Degree, + wantRads: 0.5 * math.Pi * geo.Radian, + }, + { + name: "half-turn", + turns: 0.5, + wantStr: "0.5", + wantDegs: 180.0 * geo.Degree, + wantRads: 1.0 * math.Pi * geo.Radian, + }, + { + name: "full-turn", + turns: 1.0, + wantStr: "1", + wantDegs: 360.0 * geo.Degree, + wantRads: 2.0 * math.Pi * geo.Radian, + }, + { + name: "negative-zero", + turns: geo.Turns(math.Copysign(0, -1)), + wantStr: "-0", + wantDegs: 0 * geo.Degree * -1, + wantRads: 0 * geo.Radian * -1, + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.turns.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + d := tt.turns.Degrees() + if d != tt.wantDegs { + t.Errorf("Degrees got %v, want %v", d, tt.wantDegs) + } + if o := d.Turns(); o != tt.turns { // Roundtrip + t.Errorf("Turns got %v, want %v", o, tt.turns) + } + + r := tt.turns.Radians() + if r != tt.wantRads { + t.Errorf("Turns got %v, want %v", r, tt.wantRads) + } + }) + } +} + +func TestDistance(t *testing.T) { + for _, tt := range []struct { + name string + dist geo.Distance + wantStr string + }{ + { + name: "zero", + dist: 0.0 * geo.Meter, + wantStr: "0m", + }, + { + name: "random", + dist: 4 * geo.Meter, + wantStr: "4m", + }, + { + name: "light-second", + dist: 299_792_458 * geo.Meter, + wantStr: "299792458m", + }, + { + name: "planck-length", + dist: 1.61625518e-35 * geo.Meter, + wantStr: "0.0000000000000000000000000000000000161625518m", + }, + { + name: "negative-zero", + dist: geo.Distance(math.Copysign(0, -1)), + wantStr: "-0m", + }, + } { + t.Run(tt.name, func(t *testing.T) { + if got := tt.dist.String(); got != tt.wantStr { + t.Errorf("String got %q, want %q", got, tt.wantStr) + } + + r, err := geo.ParseDistance(tt.wantStr) + if err != nil { + t.Fatalf("ParseDegrees err %q, want nil", err.Error()) + } + if r != tt.dist { + t.Errorf("ParseDegrees got %q, want %q", r, tt.dist) + } + }) + } +} + +func TestDistanceOnEarth(t *testing.T) { + for _, tt := range []struct { + name string + here geo.Point + there geo.Point + want geo.Distance + wantErr string + }{ + { + name: "no-points", + here: geo.Point{}, + there: geo.Point{}, + wantErr: "not a valid point", + }, + { + name: "not-here", + here: geo.Point{}, + there: geo.MakePoint(0, 0), + wantErr: "not a valid point", + }, + { + name: "not-there", + here: geo.MakePoint(0, 0), + there: geo.Point{}, + wantErr: "not a valid point", + }, + { + name: "null-island", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(0, 0), + want: 0 * geo.Meter, + }, + { + name: "equator-to-south-pole", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(-90, 0), + want: geo.EarthMeanCircumference / 4, + }, + { + name: "north-pole-to-south-pole", + here: geo.MakePoint(+90, 0), + there: geo.MakePoint(-90, 0), + want: geo.EarthMeanCircumference / 2, + }, + { + name: "meridian-to-antimeridian", + here: geo.MakePoint(0, 0), + there: geo.MakePoint(0, -180), + want: geo.EarthMeanCircumference / 2, + }, + { + name: "positive-to-negative-antimeridian", + here: geo.MakePoint(0, 180), + there: geo.MakePoint(0, -180), + want: 0 * geo.Meter, + }, + { + name: "toronto-to-montreal", + here: geo.MakePoint(+43.70011, -79.41630), + there: geo.MakePoint(+45.50884, -73.58781), + want: 503_200 * geo.Meter, + }, + { + name: "montreal-to-san-francisco", + here: geo.MakePoint(+45.50884, -73.58781), + there: geo.MakePoint(+37.77493, -122.41942), + want: 4_082_600 * geo.Meter, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.here.DistanceTo(tt.there) + if tt.wantErr == "" && err != nil { + t.Fatalf("err %q, want nil", err) + } + if tt.wantErr != "" && !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("err %q, want %q", err, tt.wantErr) + } + + approx := func(x, y geo.Distance) bool { + return math.Abs(float64(x)-float64(y)) <= 10 + } + if !approx(got, tt.want) { + t.Fatalf("got %v, want %v", got, tt.want) + } + }) + } +} From d334d9ba07fa8ae8abb5d39fa5a3e7a277f2dc32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 18 Jul 2025 10:55:17 -0400 Subject: [PATCH 0133/1093] client/local,cmd/tailscale/cli,ipn/localapi: expose eventbus graph (#16597) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it possible to dump the eventbus graph as JSON or DOT to both debug and document what is communicated via the bus. Updates #15160 Signed-off-by: Claus Lensbøl --- client/local/local.go | 6 ++++ cmd/tailscale/cli/debug.go | 56 ++++++++++++++++++++++++++++++++++++++ ipn/localapi/localapi.go | 50 ++++++++++++++++++++++++++++++++++ util/eventbus/debug.go | 13 +++++++++ 4 files changed, 125 insertions(+) diff --git a/client/local/local.go b/client/local/local.go index 74c4f0b6f8a2c..55d14f95eee5a 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -432,6 +432,12 @@ func (lc *Client) TailDaemonLogs(ctx context.Context) (io.Reader, error) { return res.Body, nil } +// EventBusGraph returns a graph of active publishers and subscribers in the eventbus +// as a [eventbus.DebugTopics] +func (lc *Client) EventBusGraph(ctx context.Context) ([]byte, error) { + return lc.get200(ctx, "/localapi/v0/debug-bus-graph") +} + // StreamBusEvents returns an iterator of Tailscale bus events as they arrive. // Each pair is a valid event and a nil error, or a zero event a non-nil error. // In case of error, the iterator ends after the pair reporting the error. diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 8473c4a1707fa..fb062fd17c7aa 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -6,6 +6,7 @@ package cli import ( "bufio" "bytes" + "cmp" "context" "encoding/binary" "encoding/json" @@ -108,6 +109,17 @@ func debugCmd() *ffcli.Command { Exec: runDaemonBusEvents, ShortHelp: "Watch events on the tailscaled bus", }, + { + Name: "daemon-bus-graph", + ShortUsage: "tailscale debug daemon-bus-graph", + Exec: runDaemonBusGraph, + ShortHelp: "Print graph for the tailscaled bus", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("debug-bus-graph") + fs.StringVar(&daemonBusGraphArgs.format, "format", "json", "output format [json/dot]") + return fs + })(), + }, { Name: "metrics", ShortUsage: "tailscale debug metrics", @@ -807,6 +819,50 @@ func runDaemonBusEvents(ctx context.Context, args []string) error { return nil } +var daemonBusGraphArgs struct { + format string +} + +func runDaemonBusGraph(ctx context.Context, args []string) error { + graph, err := localClient.EventBusGraph(ctx) + if err != nil { + return err + } + if format := daemonBusGraphArgs.format; format != "json" && format != "dot" { + return fmt.Errorf("unrecognized output format %q", format) + } + if daemonBusGraphArgs.format == "dot" { + var topics eventbus.DebugTopics + if err := json.Unmarshal(graph, &topics); err != nil { + return fmt.Errorf("unable to parse json: %w", err) + } + fmt.Print(generateDOTGraph(topics.Topics)) + } else { + fmt.Print(string(graph)) + } + return nil +} + +// generateDOTGraph generates the DOT graph format based on the events +func generateDOTGraph(topics []eventbus.DebugTopic) string { + var sb strings.Builder + sb.WriteString("digraph event_bus {\n") + + for _, topic := range topics { + // If no subscribers, still ensure the topic is drawn + if len(topic.Subscribers) == 0 { + topic.Subscribers = append(topic.Subscribers, "no-subscribers") + } + for _, subscriber := range topic.Subscribers { + fmt.Fprintf(&sb, "\t%q -> %q [label=%q];\n", + topic.Publisher, subscriber, cmp.Or(topic.Name, "???")) + } + } + + sb.WriteString("}\n") + return sb.String() +} + var metricsArgs struct { watch bool } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d7c64b917ead4..2409aa1ae3a36 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -93,6 +93,7 @@ var handler = map[string]LocalAPIHandler{ "component-debug-logging": (*Handler).serveComponentDebugLogging, "debug": (*Handler).serveDebug, "debug-bus-events": (*Handler).serveDebugBusEvents, + "debug-bus-graph": (*Handler).serveEventBusGraph, "debug-derp-region": (*Handler).serveDebugDERPRegion, "debug-dial-types": (*Handler).serveDebugDialTypes, "debug-log": (*Handler).serveDebugLog, @@ -1004,6 +1005,55 @@ func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { } } +// serveEventBusGraph taps into the event bus and dumps out the active graph of +// publishers and subscribers. It does not represent anything about the messages +// exchanged. +func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + clients := debugger.Clients() + + graph := map[string]eventbus.DebugTopic{} + + for _, client := range clients { + for _, pub := range debugger.PublishTypes(client) { + topic, ok := graph[pub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: pub.Name()} + } + topic.Publisher = client.Name() + graph[pub.Name()] = topic + } + for _, sub := range debugger.SubscribeTypes(client) { + topic, ok := graph[sub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: sub.Name()} + } + topic.Subscribers = append(topic.Subscribers, client.Name()) + graph[sub.Name()] = topic + } + } + + // The top level map is not really needed for the client, convert to a list. + topics := eventbus.DebugTopics{} + for _, v := range graph { + topics.Topics = append(topics.Topics, v) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(topics) +} + func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { http.Error(w, "debug access denied", http.StatusForbidden) diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index b6264f82fd0eb..a055f078fc4f2 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -195,3 +195,16 @@ type DebugEvent struct { To []string Event any } + +// DebugTopics provides the JSON encoding as a wrapper for a collection of [DebugTopic]. +type DebugTopics struct { + Topics []DebugTopic +} + +// DebugTopic provides the JSON encoding of publishers and subscribers for a +// given topic. +type DebugTopic struct { + Name string + Publisher string + Subscribers []string +} From 871f73d9924bc046a90d62fdbc0f74b783cc4630 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 18 Jul 2025 10:55:43 -0400 Subject: [PATCH 0134/1093] Kevin/add drain sub command for serve services (#16502) * cmd/tailscale/cli: add drain subCommand for serve This commit adds the drain subcommand for serving services. After we merge advertise and serve service as one step, we now need a way to unadvertise service and this is it. Updates tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * move runServeDrain and some update regarding pr comments Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * some code structure change Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 48 ++++++++++++++++++++++++++++++ cmd/tailscale/cli/serve_v2_test.go | 48 ++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 15de0609c72ad..6fa1a1b08c66c 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -203,6 +203,16 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Exec: e.runServeReset, FlagSet: e.newFlags("serve-reset", nil), }, + { + Name: "drain", + ShortUsage: fmt.Sprintf("tailscale %s drain ", info.Name), + ShortHelp: "Drain a service from the current node", + LongHelp: "Make the current node no longer accept new connections for the specified service.\n" + + "Existing connections will continue to work until they are closed, but no new connections will be accepted.\n" + + "Use this command to gracefully remove a service from the current node without disrupting existing connections.\n" + + " should be a service name (e.g., svc:my-service).", + Exec: e.runServeDrain, + }, }, } } @@ -443,6 +453,44 @@ func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName string) er return err } +func (e *serveEnv) removeServiceFromPrefs(ctx context.Context, serviceName tailcfg.ServiceName) error { + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting prefs: %w", err) + } + if len(prefs.AdvertiseServices) == 0 { + return nil // nothing to remove + } + initialLen := len(prefs.AdvertiseServices) + prefs.AdvertiseServices = slices.DeleteFunc(prefs.AdvertiseServices, func(s string) bool { return s == serviceName.String() }) + if initialLen == len(prefs.AdvertiseServices) { + return nil // serviceName not advertised + } + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: prefs.AdvertiseServices, + }, + }) + return err +} + +func (e *serveEnv) runServeDrain(ctx context.Context, args []string) error { + if len(args) == 0 { + return errHelp + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := args[0] + svcName := tailcfg.ServiceName(svc) + if err := svcName.Validate(); err != nil { + return fmt.Errorf("invalid service name: %s", err) + } + return e.removeServiceFromPrefs(ctx, svcName) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index b3e7ea773c698..2ba0b3f8434c8 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1212,6 +1212,54 @@ func TestAddServiceToPrefs(t *testing.T) { } +func TestRemoveServiceFromPrefs(t *testing.T) { + tests := []struct { + name string + svcName tailcfg.ServiceName + startServices []string + expected []string + }{ + { + name: "remove service from empty prefs", + svcName: "svc:foo", + expected: []string{}, + }, + { + name: "remove existing service from prefs", + svcName: "svc:foo", + startServices: []string{"svc:foo"}, + expected: []string{}, + }, + { + name: "remove service not in prefs", + svcName: "svc:bar", + startServices: []string{"svc:foo"}, + expected: []string{"svc:foo"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lc := &fakeLocalServeClient{} + ctx := t.Context() + lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: tt.startServices, + }, + }) + e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} + err := e.removeServiceFromPrefs(ctx, tt.svcName) + if err != nil { + t.Fatalf("removeServiceFromPrefs(%q) returned unexpected error: %v", tt.svcName, err) + } + if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { + t.Errorf("removeServiceFromPrefs(%q) = %v, want %v", tt.svcName, lc.prefs.AdvertiseServices, tt.expected) + } + }) + } +} + func TestMessageForPort(t *testing.T) { svcIPMap := tailcfg.ServiceIPMappings{ "svc:foo": []netip.Addr{ From d1ceb62e2726ce0408a8376e22a27656dbb77d7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 17 Jul 2025 09:13:19 -0400 Subject: [PATCH 0135/1093] client/systray: look for ubuntu gnome MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ubuntu gnome has a different name on at least 25.04. Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/systray.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index a87783c06ce5a..76c93ae18e781 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -128,7 +128,7 @@ func init() { desktop := strings.ToLower(os.Getenv("XDG_CURRENT_DESKTOP")) switch desktop { - case "gnome": + case "gnome", "ubuntu:gnome": // GNOME expands submenus downward in the main menu, rather than flyouts to the side. // Either as a result of that or another limitation, there seems to be a maximum depth of submenus. // Mullvad countries that have a city submenu are not being rendered, and so can't be selected. From 6c206fab58fc556b253e78547cc0073ef0c53975 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 18 Jul 2025 10:17:40 -0700 Subject: [PATCH 0136/1093] feature/tpm: try opening /dev/tpmrm0 before /tmp/tpm0 on Linux (#16600) The tpmrm0 is a kernel-managed version of tpm0 that multiplexes multiple concurrent connections. The basic tpm0 can only be accessed by one application at a time, which can be pretty unreliable. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm_linux.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go index f2d0f1402c16e..6c8131e8d8a28 100644 --- a/feature/tpm/tpm_linux.go +++ b/feature/tpm/tpm_linux.go @@ -9,5 +9,9 @@ import ( ) func open() (transport.TPMCloser, error) { + tpm, err := linuxtpm.Open("/dev/tpmrm0") + if err == nil { + return tpm, nil + } return linuxtpm.Open("/dev/tpm0") } From e01618a7c4eb5113f17f644b9b2ed8204c23a99b Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 18 Jul 2025 13:46:03 -0400 Subject: [PATCH 0137/1093] cmd/tailscale/cli: Add clear subcommand for serve services (#16509) * cmd/tailscale/cli: add clear subcommand for serve services This commit adds a clear subcommand for serve command, to remove all config for a passed service. This is a short cut for user to remove services after they drain a service. As an indipendent command it would avoid accidently remove a service on typo. Updates tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * update regarding comments Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * log when clearing a non-existing service but not error Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 36 ++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 6fa1a1b08c66c..8832a232d0f4e 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -213,6 +213,13 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { " should be a service name (e.g., svc:my-service).", Exec: e.runServeDrain, }, + { + Name: "clear", + ShortUsage: fmt.Sprintf("tailscale %s clear ", info.Name), + ShortHelp: "Remove all config for a service", + LongHelp: "Remove all handlers configured for the specified service.", + Exec: e.runServeClear, + }, }, } } @@ -486,11 +493,38 @@ func (e *serveEnv) runServeDrain(ctx context.Context, args []string) error { svc := args[0] svcName := tailcfg.ServiceName(svc) if err := svcName.Validate(); err != nil { - return fmt.Errorf("invalid service name: %s", err) + return fmt.Errorf("invalid service name: %w", err) } return e.removeServiceFromPrefs(ctx, svcName) } +func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { + if len(args) == 0 { + return errHelp + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := tailcfg.ServiceName(args[0]) + if err := svc.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("error getting serve config: %w", err) + } + if _, ok := sc.Services[svc]; !ok { + log.Printf("service %s not found in serve config, nothing to clear", svc) + return nil + } + delete(sc.Services, svc) + if err := e.removeServiceFromPrefs(ctx, svc); err != nil { + return fmt.Errorf("error removing service %s from prefs: %w", svc, err) + } + return e.lc.SetServeConfig(ctx, sc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. From 5adde9e3f3f87cd9ce47832244aad49bcfb96bd8 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 18 Jul 2025 15:06:09 -0400 Subject: [PATCH 0138/1093] cmd/tailscale/cli: remove advertise command (#16592) This commit removes the advertise command for service. The advertising is now embedded into serve command and unadvertising is moved to drain subcommand Fixes tailscale/corp#22954 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/advertise.go | 76 ---------------------------------- cmd/tailscale/cli/cli.go | 1 - cmd/tailscale/cli/cli_test.go | 2 +- 3 files changed, 1 insertion(+), 78 deletions(-) delete mode 100644 cmd/tailscale/cli/advertise.go diff --git a/cmd/tailscale/cli/advertise.go b/cmd/tailscale/cli/advertise.go deleted file mode 100644 index 83d1a35aa8a14..0000000000000 --- a/cmd/tailscale/cli/advertise.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cli - -import ( - "context" - "flag" - "fmt" - "strings" - - "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/envknob" - "tailscale.com/ipn" - "tailscale.com/tailcfg" -) - -var advertiseArgs struct { - services string // comma-separated list of services to advertise -} - -// TODO(naman): This flag may move to set.go or serve_v2.go after the WIPCode -// envknob is not needed. -func advertiseCmd() *ffcli.Command { - if !envknob.UseWIPCode() { - return nil - } - return &ffcli.Command{ - Name: "advertise", - ShortUsage: "tailscale advertise --services=", - ShortHelp: "Advertise this node as a destination for a service", - Exec: runAdvertise, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("advertise") - fs.StringVar(&advertiseArgs.services, "services", "", "comma-separated services to advertise; each must start with \"svc:\" (e.g. \"svc:idp,svc:nas,svc:database\")") - return fs - })(), - } -} - -func runAdvertise(ctx context.Context, args []string) error { - if len(args) > 0 { - return flag.ErrHelp - } - - services, err := parseServiceNames(advertiseArgs.services) - if err != nil { - return err - } - - _, err = localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ - AdvertiseServicesSet: true, - Prefs: ipn.Prefs{ - AdvertiseServices: services, - }, - }) - return err -} - -// parseServiceNames takes a comma-separated list of service names -// (eg. "svc:hello,svc:webserver,svc:catphotos"), splits them into -// a list and validates each service name. If valid, it returns -// the service names in a slice of strings. -func parseServiceNames(servicesArg string) ([]string, error) { - var services []string - if servicesArg != "" { - services = strings.Split(servicesArg, ",") - for _, svc := range services { - err := tailcfg.ServiceName(svc).Validate() - if err != nil { - return nil, fmt.Errorf("service %q: %s", svc, err) - } - } - } - return services, nil -} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d7e8e5ca22dce..bdfc7af423bf4 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -260,7 +260,6 @@ change in the future. debugCmd(), driveCmd, idTokenCmd, - advertiseCmd(), configureHostCmd(), ), FlagSet: rootfs, diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 5dd4fa2340360..2e1bec8c9bcb0 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -964,7 +964,7 @@ func TestPrefFlagMapping(t *testing.T) { // flag for this. continue case "AdvertiseServices": - // Handled by the tailscale advertise subcommand, we don't want a + // Handled by the tailscale serve subcommand, we don't want a // CLI flag for this. continue case "InternalExitNodePrior": From f421907c38df057e1b293613644532f31e77b24b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 21 Jul 2025 11:03:21 +0100 Subject: [PATCH 0139/1093] all-kube: create Tailscale Service for HA kube-apiserver ProxyGroup (#16572) Adds a new reconciler for ProxyGroups of type kube-apiserver that will provision a Tailscale Service for each replica to advertise. Adds two new condition types to the ProxyGroup, TailscaleServiceValid and TailscaleServiceConfigured, to post updates on the state of that reconciler in a way that's consistent with the service-pg reconciler. The created Tailscale Service name is configurable via a new ProxyGroup field spec.kubeAPISserver.ServiceName, which expects a string of the form "svc:". Lots of supporting changes were needed to implement this in a way that's consistent with other operator workflows, including: * Pulled containerboot's ensureServicesUnadvertised and certManager into kube/ libraries to be shared with k8s-proxy. Use those in k8s-proxy to aid Service cert sharing between replicas and graceful Service shutdown. * For certManager, add an initial wait to the cert loop to wait until the domain appears in the devices's netmap to avoid a guaranteed error on the first issue attempt when it's quick to start. * Made several methods in ingress-for-pg.go and svc-for-pg.go into functions to share with the new reconciler * Added a Resource struct to the owner refs stored in Tailscale Service annotations to be able to distinguish between Ingress- and ProxyGroup- based Services that need cleaning up in the Tailscale API. * Added a ListVIPServices method to the internal tailscale client to aid cleaning up orphaned Services * Support for reading config from a kube Secret, and partial support for config reloading, to prevent us having to force Pod restarts when config changes. * Fixed up the zap logger so it's possible to set debug log level. Updates #13358 Change-Id: Ia9607441157dd91fb9b6ecbc318eecbef446e116 Signed-off-by: Tom Proctor --- cmd/containerboot/main.go | 3 +- cmd/containerboot/serve.go | 10 +- cmd/k8s-operator/api-server-proxy-pg.go | 479 ++++++++++++++++++ cmd/k8s-operator/api-server-proxy-pg_test.go | 384 ++++++++++++++ .../{proxy.go => api-server-proxy.go} | 0 .../crds/tailscale.com_proxygroups.yaml | 56 +- .../deploy/manifests/operator.yaml | 56 +- cmd/k8s-operator/egress-eps_test.go | 3 +- cmd/k8s-operator/ingress-for-pg.go | 77 +-- cmd/k8s-operator/ingress-for-pg_test.go | 32 +- cmd/k8s-operator/operator.go | 70 ++- cmd/k8s-operator/proxygroup.go | 131 +++-- cmd/k8s-operator/proxygroup_specs.go | 41 +- cmd/k8s-operator/proxygroup_test.go | 162 +++++- cmd/k8s-operator/svc-for-pg.go | 18 +- cmd/k8s-operator/svc-for-pg_test.go | 13 +- cmd/k8s-operator/testutils_test.go | 8 +- cmd/k8s-operator/tsclient.go | 2 + cmd/k8s-proxy/internal/config/config.go | 264 ++++++++++ cmd/k8s-proxy/internal/config/config_test.go | 245 +++++++++ cmd/k8s-proxy/k8s-proxy.go | 268 ++++++++-- internal/client/tailscale/vip_service.go | 28 + ipn/store/kubestore/store_kube.go | 6 +- ipn/store/kubestore/store_kube_test.go | 7 +- k8s-operator/api-proxy/proxy.go | 65 ++- k8s-operator/api.md | 25 +- k8s-operator/apis/v1alpha1/types_connector.go | 3 + .../apis/v1alpha1/types_proxygroup.go | 54 +- k8s-operator/conditions.go | 10 + {cmd/containerboot => kube/certs}/certs.go | 107 ++-- .../certs}/certs_test.go | 35 +- kube/k8s-proxy/conf/conf.go | 54 +- kube/k8s-proxy/conf/conf_test.go | 9 +- kube/kubetypes/types.go | 6 + kube/localclient/fake-client.go | 35 ++ kube/localclient/local-client.go | 49 ++ .../services}/services.go | 20 +- kube/state/state.go | 16 +- kube/state/state_test.go | 97 ++-- 39 files changed, 2551 insertions(+), 397 deletions(-) create mode 100644 cmd/k8s-operator/api-server-proxy-pg.go create mode 100644 cmd/k8s-operator/api-server-proxy-pg_test.go rename cmd/k8s-operator/{proxy.go => api-server-proxy.go} (100%) create mode 100644 cmd/k8s-proxy/internal/config/config.go create mode 100644 cmd/k8s-proxy/internal/config/config_test.go rename {cmd/containerboot => kube/certs}/certs.go (60%) rename {cmd/containerboot => kube/certs}/certs_test.go (89%) create mode 100644 kube/localclient/fake-client.go create mode 100644 kube/localclient/local-client.go rename {cmd/containerboot => kube/services}/services.go (74%) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 52b30b8375a4c..49c8a473a596d 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -122,6 +122,7 @@ import ( "tailscale.com/ipn" kubeutils "tailscale.com/k8s-operator" "tailscale.com/kube/kubetypes" + "tailscale.com/kube/services" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/ptr" @@ -210,7 +211,7 @@ func run() error { ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) defer cancel() - if err := ensureServicesNotAdvertised(ctx, client); err != nil { + if err := services.EnsureServicesNotAdvertised(ctx, client, log.Printf); err != nil { log.Printf("Error ensuring services are not advertised: %v", err) } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 37fd497779c75..5fa8e580d5828 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -19,7 +19,9 @@ import ( "github.com/fsnotify/fsnotify" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/kube/certs" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" "tailscale.com/types/netmap" ) @@ -52,11 +54,9 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom var certDomain string var prevServeConfig *ipn.ServeConfig - var cm certManager + var cm *certs.CertManager if cfg.CertShareMode == "rw" { - cm = certManager{ - lc: lc, - } + cm = certs.NewCertManager(klc.New(lc), log.Printf) } for { select { @@ -93,7 +93,7 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom if cfg.CertShareMode != "rw" { continue } - if err := cm.ensureCertLoops(ctx, sc); err != nil { + if err := cm.EnsureCertLoops(ctx, sc); err != nil { log.Fatalf("serve proxy: error ensuring cert loops: %v", err) } } diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go new file mode 100644 index 0000000000000..252859eb37197 --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -0,0 +1,479 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "maps" + "slices" + "strings" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstime" +) + +const ( + proxyPGFinalizerName = "tailscale.com/kube-apiserver-finalizer" + + // Reasons for KubeAPIServerProxyValid condition. + reasonKubeAPIServerProxyInvalid = "KubeAPIServerProxyInvalid" + reasonKubeAPIServerProxyValid = "KubeAPIServerProxyValid" + + // Reasons for KubeAPIServerProxyConfigured condition. + reasonKubeAPIServerProxyConfigured = "KubeAPIServerProxyConfigured" + reasonKubeAPIServerProxyNoBackends = "KubeAPIServerProxyNoBackends" +) + +// KubeAPIServerTSServiceReconciler reconciles the Tailscale Services required for an +// HA deployment of the API Server Proxy. +type KubeAPIServerTSServiceReconciler struct { + client.Client + recorder record.EventRecorder + logger *zap.SugaredLogger + tsClient tsClient + tsNamespace string + lc localClient + defaultTags []string + operatorID string // stableID of the operator's Tailscale device + + clock tstime.Clock +} + +// Reconcile is the entry point for the controller. +func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := r.logger.With("ProxyGroup", req.Name) + logger.Debugf("starting reconcile") + defer logger.Debugf("reconcile finished") + + pg := new(tsapi.ProxyGroup) + err = r.Get(ctx, req.NamespacedName, pg) + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + logger.Debugf("ProxyGroup not found, assuming it was deleted") + return res, nil + } else if err != nil { + return res, fmt.Errorf("failed to get ProxyGroup: %w", err) + } + + serviceName := serviceNameForAPIServerProxy(pg) + logger = logger.With("Tailscale Service", serviceName) + + if markedForDeletion(pg) { + logger.Debugf("ProxyGroup is being deleted, ensuring any created resources are cleaned up") + if err = r.maybeCleanup(ctx, serviceName, pg, logger); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return res, nil + } + + return res, err + } + + err = r.maybeProvision(ctx, serviceName, pg, logger) + if err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +// maybeProvision ensures that a Tailscale Service for this ProxyGroup exists +// and is up to date. +// +// Returns true if the operation resulted in a Tailscale Service update. +func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) { + var dnsName string + oldPGStatus := pg.Status.DeepCopy() + defer func() { + podsAdvertising, podsErr := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) + if podsErr != nil { + err = errors.Join(err, fmt.Errorf("failed to get number of advertised Pods: %w", podsErr)) + // Continue, updating the status with the best available information. + } + + // Update the ProxyGroup status with the Tailscale Service information + // Update the condition based on how many pods are advertising the service + conditionStatus := metav1.ConditionFalse + conditionReason := reasonKubeAPIServerProxyNoBackends + conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", podsAdvertising, pgReplicas(pg)) + + pg.Status.URL = "" + if podsAdvertising > 0 { + // At least one pod is advertising the service, consider it configured + conditionStatus = metav1.ConditionTrue + conditionReason = reasonKubeAPIServerProxyConfigured + if dnsName != "" { + pg.Status.URL = "https://" + dnsName + } + } + + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, conditionStatus, conditionReason, conditionMessage, pg.Generation, r.clock, logger) + + if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) { + // An error encountered here should get returned by the Reconcile function. + err = errors.Join(err, r.Client.Status().Update(ctx, pg)) + } + }() + + if !tsoperator.ProxyGroupAvailable(pg) { + return nil + } + + if !slices.Contains(pg.Finalizers, proxyPGFinalizerName) { + // This log line is printed exactly once during initial provisioning, + // because once the finalizer is in place this block gets skipped. So, + // this is a nice place to tell the operator that the high level, + // multi-reconcile operation is underway. + logger.Info("provisioning Tailscale Service for ProxyGroup") + pg.Finalizers = append(pg.Finalizers, proxyPGFinalizerName) + if err := r.Update(ctx, pg); err != nil { + return fmt.Errorf("failed to add finalizer: %w", err) + } + } + + // 1. Check there isn't a Tailscale Service with the same hostname + // already created and not owned by this ProxyGroup. + existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + if isErrorFeatureFlagNotEnabled(err) { + logger.Warn(msgFeatureFlagNotEnabled) + r.recorder.Event(pg, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msgFeatureFlagNotEnabled, pg.Generation, r.clock, logger) + return nil + } + if err != nil && !isErrorTailscaleServiceNotFound(err) { + return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) + } + + updatedAnnotations, err := exclusiveOwnerAnnotations(pg, r.operatorID, existingTSSvc) + if err != nil { + const instr = "To proceed, you can either manually delete the existing Tailscale Service or choose a different Service name in the ProxyGroup's spec.kubeAPIServer.serviceName field" + msg := fmt.Sprintf("error ensuring exclusive ownership of Tailscale Service %s: %v. %s", serviceName, err, instr) + logger.Warn(msg) + r.recorder.Event(pg, corev1.EventTypeWarning, "InvalidTailscaleService", msg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msg, pg.Generation, r.clock, logger) + return nil + } + + // After getting this far, we know the Tailscale Service is valid. + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, reasonKubeAPIServerProxyValid, pg.Generation, r.clock, logger) + + // Service tags are limited to matching the ProxyGroup's tags until we have + // support for querying peer caps for a Service-bound request. + serviceTags := r.defaultTags + if len(pg.Spec.Tags) > 0 { + serviceTags = pg.Spec.Tags.Stringify() + } + + tsSvc := &tailscale.VIPService{ + Name: serviceName, + Tags: serviceTags, + Ports: []string{"tcp:443"}, + Comment: managedTSServiceComment, + Annotations: updatedAnnotations, + } + if existingTSSvc != nil { + tsSvc.Addrs = existingTSSvc.Addrs + } + + // 2. Ensure the Tailscale Service exists and is up to date. + if existingTSSvc == nil || + !slices.Equal(tsSvc.Tags, existingTSSvc.Tags) || + !ownersAreSetAndEqual(tsSvc, existingTSSvc) || + !slices.Equal(tsSvc.Ports, existingTSSvc.Ports) { + logger.Infof("Ensuring Tailscale Service exists and is up to date") + if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + return fmt.Errorf("error creating Tailscale Service: %w", err) + } + } + + // 3. Ensure that TLS Secret and RBAC exists. + tcd, err := tailnetCertDomain(ctx, r.lc) + if err != nil { + return fmt.Errorf("error determining DNS name base: %w", err) + } + dnsName = serviceName.WithoutPrefix() + "." + tcd + if err = r.ensureCertResources(ctx, pg, dnsName); err != nil { + return fmt.Errorf("error ensuring cert resources: %w", err) + } + + // 4. Configure the Pods to advertise the Tailscale Service. + if err = r.maybeAdvertiseServices(ctx, pg, serviceName, logger); err != nil { + return fmt.Errorf("error updating advertised Tailscale Services: %w", err) + } + + // 5. Clean up any stale Tailscale Services from previous resource versions. + if err = r.maybeDeleteStaleServices(ctx, pg, logger); err != nil { + return fmt.Errorf("failed to delete stale Tailscale Services: %w", err) + } + + return nil +} + +// maybeCleanup ensures that any resources, such as a Tailscale Service created for this Service, are cleaned up when the +// Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only +// deleted if it does not contain any other owner references. If it does, the cleanup only removes the owner reference +// corresponding to this Service. +func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) { + ix := slices.Index(pg.Finalizers, proxyPGFinalizerName) + if ix < 0 { + logger.Debugf("no finalizer, nothing to do") + return nil + } + logger.Infof("Ensuring that Service %q is cleaned up", serviceName) + + defer func() { + if err == nil { + err = r.deleteFinalizer(ctx, pg, logger) + } + }() + + if _, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger); err != nil { + return fmt.Errorf("error deleting Tailscale Service: %w", err) + } + + if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, serviceName); err != nil { + return fmt.Errorf("failed to clean up cert resources: %w", err) + } + + return nil +} + +// maybeDeleteStaleServices deletes Services that have previously been created for +// this ProxyGroup but are no longer needed. +func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error { + serviceName := serviceNameForAPIServerProxy(pg) + + svcs, err := r.tsClient.ListVIPServices(ctx) + if err != nil { + return fmt.Errorf("error listing Tailscale Services: %w", err) + } + + for _, svc := range svcs.VIPServices { + if svc.Name == serviceName { + continue + } + + owners, err := parseOwnerAnnotation(&svc) + if err != nil { + logger.Warnf("error parsing owner annotation for Tailscale Service %s: %v", svc.Name, err) + continue + } + if owners == nil || len(owners.OwnerRefs) != 1 || owners.OwnerRefs[0].OperatorID != r.operatorID { + continue + } + + owner := owners.OwnerRefs[0] + if owner.Resource == nil || owner.Resource.Kind != "ProxyGroup" || owner.Resource.UID != string(pg.UID) { + continue + } + + logger.Infof("Deleting Tailscale Service %s", svc.Name) + if err := r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + return fmt.Errorf("error deleting Tailscale Service %s: %w", svc.Name, err) + } + + if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, svc.Name); err != nil { + return fmt.Errorf("failed to clean up cert resources: %w", err) + } + } + + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) deleteFinalizer(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error { + pg.Finalizers = slices.DeleteFunc(pg.Finalizers, func(f string) bool { + return f == proxyPGFinalizerName + }) + logger.Debugf("ensure %q finalizer is removed", proxyPGFinalizerName) + + if err := r.Update(ctx, pg); err != nil { + return fmt.Errorf("failed to remove finalizer %q: %w", proxyPGFinalizerName, err) + } + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) ensureCertResources(ctx context.Context, pg *tsapi.ProxyGroup, domain string) error { + secret := certSecret(pg.Name, r.tsNamespace, domain, pg) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, func(s *corev1.Secret) { + s.Labels = secret.Labels + }); err != nil { + return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) + } + role := certSecretRole(pg.Name, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + r.Labels = role.Labels + r.Rules = role.Rules + }); err != nil { + return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) + } + rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { + rb.Labels = rolebinding.Labels + rb.Subjects = rolebinding.Subjects + rb.RoleRef = rolebinding.RoleRef + }); err != nil { + return fmt.Errorf("failed to create or update RoleBinding %s: %w", rolebinding.Name, err) + } + return nil +} + +func (r *KubeAPIServerTSServiceReconciler) maybeAdvertiseServices(ctx context.Context, pg *tsapi.ProxyGroup, serviceName tailcfg.ServiceName, logger *zap.SugaredLogger) error { + // Get all config Secrets for this ProxyGroup + cfgSecrets := &corev1.SecretList{} + if err := r.List(ctx, cfgSecrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil { + return fmt.Errorf("failed to list config Secrets: %w", err) + } + + // Only advertise a Tailscale Service once the TLS certs required for + // serving it are available. + shouldBeAdvertised, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName) + if err != nil { + return fmt.Errorf("error checking TLS credentials provisioned for Tailscale Service %q: %w", serviceName, err) + } + var advertiseServices []string + if shouldBeAdvertised { + advertiseServices = []string{serviceName.String()} + } + + for _, s := range cfgSecrets.Items { + if len(s.Data[kubetypes.KubeAPIServerConfigFile]) == 0 { + continue + } + + // Parse the existing config. + cfg, err := conf.Load(s.Data[kubetypes.KubeAPIServerConfigFile]) + if err != nil { + return fmt.Errorf("error loading config from Secret %q: %w", s.Name, err) + } + + if cfg.Parsed.APIServerProxy == nil { + return fmt.Errorf("config Secret %q does not contain APIServerProxy config", s.Name) + } + + existingCfgSecret := s.DeepCopy() + + var updated bool + if cfg.Parsed.APIServerProxy.ServiceName == nil || *cfg.Parsed.APIServerProxy.ServiceName != serviceName { + cfg.Parsed.APIServerProxy.ServiceName = &serviceName + updated = true + } + + // Update the services to advertise if required. + if !slices.Equal(cfg.Parsed.AdvertiseServices, advertiseServices) { + cfg.Parsed.AdvertiseServices = advertiseServices + updated = true + } + + if !updated { + continue + } + + // Update the config Secret. + cfgB, err := json.Marshal(conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &cfg.Parsed, + }) + if err != nil { + return err + } + + s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + if !apiequality.Semantic.DeepEqual(existingCfgSecret, s) { + logger.Debugf("Updating the Tailscale Services in ProxyGroup config Secret %s", s.Name) + if err := r.Update(ctx, &s); err != nil { + return err + } + } + } + + return nil +} + +func serviceNameForAPIServerProxy(pg *tsapi.ProxyGroup) tailcfg.ServiceName { + if pg.Spec.KubeAPIServer != nil && pg.Spec.KubeAPIServer.Hostname != "" { + return tailcfg.ServiceName("svc:" + pg.Spec.KubeAPIServer.Hostname) + } + + return tailcfg.ServiceName("svc:" + pg.Name) +} + +// exclusiveOwnerAnnotations returns the updated annotations required to ensure this +// instance of the operator is the exclusive owner. If the Tailscale Service is not +// nil, but does not contain an owner reference we return an error as this likely means +// that the Service was created by something other than a Tailscale Kubernetes operator. +// We also error if it is already owned by another operator instance, as we do not +// want to load balance a kube-apiserver ProxyGroup across multiple clusters. +func exclusiveOwnerAnnotations(pg *tsapi.ProxyGroup, operatorID string, svc *tailscale.VIPService) (map[string]string, error) { + ref := OwnerRef{ + OperatorID: operatorID, + Resource: &Resource{ + Kind: "ProxyGroup", + Name: pg.Name, + UID: string(pg.UID), + }, + } + if svc == nil { + c := ownerAnnotationValue{OwnerRefs: []OwnerRef{ref}} + json, err := json.Marshal(c) + if err != nil { + return nil, fmt.Errorf("[unexpected] unable to marshal Tailscale Service's owner annotation contents: %w, please report this", err) + } + return map[string]string{ + ownerAnnotation: string(json), + }, nil + } + o, err := parseOwnerAnnotation(svc) + if err != nil { + return nil, err + } + if o == nil || len(o.OwnerRefs) == 0 { + return nil, fmt.Errorf("Tailscale Service %s exists, but does not contain owner annotation with owner references; not proceeding as this is likely a resource created by something other than the Tailscale Kubernetes operator", svc.Name) + } + if len(o.OwnerRefs) > 1 || o.OwnerRefs[0].OperatorID != operatorID { + return nil, fmt.Errorf("Tailscale Service %s is already owned by other operator(s) and cannot be shared across multiple clusters; configure a difference Service name to continue", svc.Name) + } + if o.OwnerRefs[0].Resource == nil { + return nil, fmt.Errorf("Tailscale Service %s exists, but does not reference an owning resource; not proceeding as this is likely a Service already owned by an Ingress", svc.Name) + } + if o.OwnerRefs[0].Resource.Kind != "ProxyGroup" || o.OwnerRefs[0].Resource.UID != string(pg.UID) { + return nil, fmt.Errorf("Tailscale Service %s is already owned by another resource: %#v; configure a difference Service name to continue", svc.Name, o.OwnerRefs[0].Resource) + } + if o.OwnerRefs[0].Resource.Name != pg.Name { + // ProxyGroup name can be updated in place. + o.OwnerRefs[0].Resource.Name = pg.Name + } + + oBytes, err := json.Marshal(o) + if err != nil { + return nil, err + } + + newAnnots := make(map[string]string, len(svc.Annotations)+1) + maps.Copy(newAnnots, svc.Annotations) + newAnnots[ownerAnnotation] = string(oBytes) + + return newAnnots, nil +} diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go new file mode 100644 index 0000000000000..dfef63f22ff04 --- /dev/null +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -0,0 +1,384 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "encoding/json" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/types/opt" + "tailscale.com/types/ptr" +) + +func TestAPIServerProxyReconciler(t *testing.T) { + const ( + pgName = "test-pg" + pgUID = "test-pg-uid" + ns = "operator-ns" + defaultDomain = "test-pg.ts.net" + ) + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + Generation: 1, + UID: pgUID, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + }, + Status: tsapi.ProxyGroupStatus{ + Conditions: []metav1.Condition{ + { + Type: string(tsapi.ProxyGroupAvailable), + Status: metav1.ConditionTrue, + ObservedGeneration: 1, + }, + }, + }, + } + initialCfg := &conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("test-key"), + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + }, + }, + } + expectedCfg := *initialCfg + initialCfgB, err := json.Marshal(initialCfg) + if err != nil { + t.Fatalf("marshaling initial config: %v", err) + } + pgCfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), + }, + Data: map[string][]byte{ + // Existing config should be preserved. + kubetypes.KubeAPIServerConfigFile: initialCfgB, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, pgCfgSecret). + WithStatusSubresource(pg). + Build() + expectCfg := func(c *conf.VersionedConfig) { + t.Helper() + cBytes, err := json.Marshal(c) + if err != nil { + t.Fatalf("marshaling expected config: %v", err) + } + pgCfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cBytes + expectEqual(t, fc, pgCfgSecret) + } + + ft := &fakeTSClient{} + ingressTSSvc := &tailscale.VIPService{ + Name: "svc:some-ingress-hostname", + Comment: managedTSServiceComment, + Annotations: map[string]string{ + // No resource field. + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, + }, + Ports: []string{"tcp:443"}, + Tags: []string{"tag:k8s"}, + Addrs: []string{"5.6.7.8"}, + } + ft.CreateOrUpdateVIPService(t.Context(), ingressTSSvc) + + lc := &fakeLocalClient{ + status: &ipnstate.Status{ + CurrentTailnet: &ipnstate.TailnetStatus{ + MagicDNSSuffix: "ts.net", + }, + }, + } + + r := &KubeAPIServerTSServiceReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + tsNamespace: ns, + logger: zap.Must(zap.NewDevelopment()).Sugar(), + recorder: record.NewFakeRecorder(10), + lc: lc, + clock: tstest.NewClock(tstest.ClockOpts{}), + operatorID: "self-id", + } + + // Create a Tailscale Service that will conflict with the initial config. + if err := ft.CreateOrUpdateVIPService(t.Context(), &tailscale.VIPService{ + Name: "svc:" + pgName, + }); err != nil { + t.Fatalf("creating initial Tailscale Service: %v", err) + } + expectReconciled(t, r, "", pgName) + pg.ObjectMeta.Finalizers = []string{proxyPGFinalizerName} + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, "", 1, r.clock, r.logger) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + expectMissing[corev1.Secret](t, fc, ns, defaultDomain) + expectMissing[rbacv1.Role](t, fc, ns, defaultDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) + expectEqual(t, fc, pgCfgSecret) // Unchanged. + + // Delete Tailscale Service; should see Service created and valid condition updated to true. + if err := ft.DeleteVIPService(t.Context(), "svc:"+pgName); err != nil { + t.Fatalf("deleting initial Tailscale Service: %v", err) + } + expectReconciled(t, r, "", pgName) + + tsSvc, err := ft.GetVIPService(t.Context(), "svc:"+pgName) + if err != nil { + t.Fatalf("getting Tailscale Service: %v", err) + } + if tsSvc == nil { + t.Fatalf("expected Tailscale Service to be created, but got nil") + } + expectedTSSvc := &tailscale.VIPService{ + Name: "svc:" + pgName, + Comment: managedTSServiceComment, + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"test-pg-uid"}}]}`, + }, + Ports: []string{"tcp:443"}, + Tags: []string{"tag:k8s"}, + Addrs: []string{"5.6.7.8"}, + } + if !reflect.DeepEqual(tsSvc, expectedTSSvc) { + t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) + } + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.logger) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + expectedCfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:" + pgName)) + expectCfg(&expectedCfg) + + expectEqual(t, fc, certSecret(pgName, ns, defaultDomain, pg)) + expectEqual(t, fc, certSecretRole(pgName, ns, defaultDomain)) + expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain)) + + // Simulate certs being issued; should observe AdvertiseServices config change. + if err := populateTLSSecret(t.Context(), fc, pgName, defaultDomain); err != nil { + t.Fatalf("populating TLS Secret: %v", err) + } + expectReconciled(t, r, "", pgName) + + expectedCfg.AdvertiseServices = []string{"svc:" + pgName} + expectCfg(&expectedCfg) + + expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Unchanged status. + + // Simulate Pod prefs updated with advertised services; should see Configured condition updated to true. + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + "_current-profile": []byte("profile-foo"), + "profile-foo": []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`), + }, + }) + expectReconciled(t, r, "", pgName) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) + pg.Status.URL = "https://" + defaultDomain + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Rename the Tailscale Service - old one + cert resources should be cleaned up. + updatedServiceName := tailcfg.ServiceName("svc:test-pg-renamed") + updatedDomain := "test-pg-renamed.ts.net" + pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ + Hostname: updatedServiceName.WithoutPrefix(), + } + mustUpdate(t, fc, "", pgName, func(p *tsapi.ProxyGroup) { + p.Spec.KubeAPIServer = pg.Spec.KubeAPIServer + }) + expectReconciled(t, r, "", pgName) + _, err = ft.GetVIPService(t.Context(), "svc:"+pgName) + if !isErrorTailscaleServiceNotFound(err) { + t.Fatalf("Expected 404, got: %v", err) + } + tsSvc, err = ft.GetVIPService(t.Context(), updatedServiceName) + if err != nil { + t.Fatalf("Expected renamed svc, got error: %v", err) + } + expectedTSSvc.Name = updatedServiceName + if !reflect.DeepEqual(tsSvc, expectedTSSvc) { + t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) + } + // Check cfg and status reset until TLS certs are available again. + expectedCfg.APIServerProxy.ServiceName = ptr.To(updatedServiceName) + expectedCfg.AdvertiseServices = nil + expectCfg(&expectedCfg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) + pg.Status.URL = "" + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + expectEqual(t, fc, certSecret(pgName, ns, updatedDomain, pg)) + expectEqual(t, fc, certSecretRole(pgName, ns, updatedDomain)) + expectEqual(t, fc, certSecretRoleBinding(pg, ns, updatedDomain)) + expectMissing[corev1.Secret](t, fc, ns, defaultDomain) + expectMissing[rbacv1.Role](t, fc, ns, defaultDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) + + // Check we get the new hostname in the status once ready. + if err := populateTLSSecret(t.Context(), fc, pgName, updatedDomain); err != nil { + t.Fatalf("populating TLS Secret: %v", err) + } + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) { + s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`) + }) + expectReconciled(t, r, "", pgName) + expectedCfg.AdvertiseServices = []string{updatedServiceName.String()} + expectCfg(&expectedCfg) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) + pg.Status.URL = "https://" + updatedDomain + + // Delete the ProxyGroup and verify Tailscale Service and cert resources are cleaned up. + if err := fc.Delete(t.Context(), pg); err != nil { + t.Fatalf("deleting ProxyGroup: %v", err) + } + expectReconciled(t, r, "", pgName) + expectMissing[corev1.Secret](t, fc, ns, updatedDomain) + expectMissing[rbacv1.Role](t, fc, ns, updatedDomain) + expectMissing[rbacv1.RoleBinding](t, fc, ns, updatedDomain) + _, err = ft.GetVIPService(t.Context(), updatedServiceName) + if !isErrorTailscaleServiceNotFound(err) { + t.Fatalf("Expected 404, got: %v", err) + } + + // Ingress Tailscale Service should not be affected. + svc, err := ft.GetVIPService(t.Context(), ingressTSSvc.Name) + if err != nil { + t.Fatalf("getting ingress Tailscale Service: %v", err) + } + if !reflect.DeepEqual(svc, ingressTSSvc) { + t.Fatalf("expected ingress Tailscale Service to be unmodified %+v, got %+v", ingressTSSvc, svc) + } +} + +func TestExclusiveOwnerAnnotations(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg1", + UID: "pg1-uid", + }, + } + const ( + selfOperatorID = "self-id" + pg1Owner = `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg1","uid":"pg1-uid"}}]}` + ) + + for name, tc := range map[string]struct { + svc *tailscale.VIPService + wantErr string + }{ + "no_svc": { + svc: nil, + }, + "empty_svc": { + svc: &tailscale.VIPService{}, + wantErr: "likely a resource created by something other than the Tailscale Kubernetes operator", + }, + "already_owner": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: pg1Owner, + }, + }, + }, + "already_owner_name_updated": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"old-pg1-name","uid":"pg1-uid"}}]}`, + }, + }, + }, + "preserves_existing_annotations": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + "existing": "annotation", + ownerAnnotation: pg1Owner, + }, + }, + }, + "owned_by_another_operator": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"}]}`, + }, + }, + wantErr: "already owned by other operator(s)", + }, + "owned_by_an_ingress": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id"}]}`, // Ingress doesn't set Resource field (yet). + }, + }, + wantErr: "does not reference an owning resource", + }, + "owned_by_another_pg": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"pg2","uid":"pg2-uid"}}]}`, + }, + }, + wantErr: "already owned by another resource", + }, + } { + t.Run(name, func(t *testing.T) { + got, err := exclusiveOwnerAnnotations(pg, "self-id", tc.svc) + if tc.wantErr != "" { + if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("exclusiveOwnerAnnotations() error = %v, wantErr %v", err, tc.wantErr) + } + } else if diff := cmp.Diff(pg1Owner, got[ownerAnnotation]); diff != "" { + t.Errorf("exclusiveOwnerAnnotations() mismatch (-want +got):\n%s", diff) + } + if tc.svc == nil { + return // Don't check annotations being preserved. + } + for k, v := range tc.svc.Annotations { + if k == ownerAnnotation { + continue + } + if got[k] != v { + t.Errorf("exclusiveOwnerAnnotations() did not preserve annotation %q: got %q, want %q", k, got[k], v) + } + } + }) + } +} + +func omitPGStatusConditionMessages(p *tsapi.ProxyGroup) { + for i := range p.Status.Conditions { + // Don't bother validating the message. + p.Status.Conditions[i].Message = "" + } +} diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/api-server-proxy.go similarity index 100% rename from cmd/k8s-operator/proxy.go rename to cmd/k8s-operator/api-server-proxy.go diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 06c8479252873..98ca1c378ab8d 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -20,6 +20,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver. + jsonPath: .status.url + name: URL + type: string - description: ProxyGroup type. jsonPath: .spec.type name: Type @@ -32,15 +36,22 @@ spec: openAPIV3Schema: description: |- ProxyGroup defines a set of Tailscale devices that will act as proxies. - Currently only egress ProxyGroups are supported. + Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver + proxies. In addition to running a highly available set of proxies, ingress + and egress ProxyGroups also allow for serving many annotated Services from a + single set of proxies to minimise resource consumption. + + For ingress and egress, use the tailscale.com/proxy-group annotation on a + Service to specify that the proxy should be implemented by a ProxyGroup + instead of a single dedicated proxy. - Use the tailscale.com/proxy-group annotation on a Service to specify that - the egress proxy should be implemented by a ProxyGroup instead of a single - dedicated proxy. In addition to running a highly available set of proxies, - ProxyGroup also allows for serving many annotated Services from a single - set of proxies to minimise resource consumption. + More info: + * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress - More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + For kube-apiserver, the ProxyGroup is a standalone resource. Use the + spec.kubeAPIServer field to configure options specific to the kube-apiserver + ProxyGroup type. type: object required: - spec @@ -83,6 +94,14 @@ spec: ProxyGroup type. This field is only used when Type is set to "kube-apiserver". type: object properties: + hostname: + description: |- + Hostname is the hostname with which to expose the Kubernetes API server + proxies. Must be a valid DNS label no longer than 63 characters. If not + specified, the name of the ProxyGroup is used as the hostname. Must be + unique across the whole tailnet. + type: string + pattern: ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ mode: description: |- Mode to run the API server proxy in. Supported modes are auth and noauth. @@ -141,10 +160,20 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. - `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled - and ready. `ProxyGroupAvailable` indicates that at least one proxy is - ready to serve traffic. + resources. Known condition types include `ProxyGroupReady` and + `ProxyGroupAvailable`. + + * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + all expected conditions are true. + * `ProxyGroupAvailable` indicates that at least one proxy is ready to + serve traffic. + + For ProxyGroups of type kube-apiserver, there are two additional conditions: + + * `KubeAPIServerProxyConfigured` indicates that at least one API server + proxy is configured and ready to serve traffic. + * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + valid. type: array items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -231,6 +260,11 @@ spec: x-kubernetes-list-map-keys: - hostname x-kubernetes-list-type: map + url: + description: |- + URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + any. Only applies to ProxyGroups of type kube-apiserver. + type: string served: true storage: true subresources: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index ff3705cb343ff..ac8143e98c22b 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -2873,6 +2873,10 @@ spec: jsonPath: .status.conditions[?(@.type == "ProxyGroupReady")].reason name: Status type: string + - description: URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver. + jsonPath: .status.url + name: URL + type: string - description: ProxyGroup type. jsonPath: .spec.type name: Type @@ -2885,15 +2889,22 @@ spec: openAPIV3Schema: description: |- ProxyGroup defines a set of Tailscale devices that will act as proxies. - Currently only egress ProxyGroups are supported. + Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver + proxies. In addition to running a highly available set of proxies, ingress + and egress ProxyGroups also allow for serving many annotated Services from a + single set of proxies to minimise resource consumption. + + For ingress and egress, use the tailscale.com/proxy-group annotation on a + Service to specify that the proxy should be implemented by a ProxyGroup + instead of a single dedicated proxy. - Use the tailscale.com/proxy-group annotation on a Service to specify that - the egress proxy should be implemented by a ProxyGroup instead of a single - dedicated proxy. In addition to running a highly available set of proxies, - ProxyGroup also allows for serving many annotated Services from a single - set of proxies to minimise resource consumption. + More info: + * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress - More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress + For kube-apiserver, the ProxyGroup is a standalone resource. Use the + spec.kubeAPIServer field to configure options specific to the kube-apiserver + ProxyGroup type. properties: apiVersion: description: |- @@ -2929,6 +2940,14 @@ spec: KubeAPIServer contains configuration specific to the kube-apiserver ProxyGroup type. This field is only used when Type is set to "kube-apiserver". properties: + hostname: + description: |- + Hostname is the hostname with which to expose the Kubernetes API server + proxies. Must be a valid DNS label no longer than 63 characters. If not + specified, the name of the ProxyGroup is used as the hostname. Must be + unique across the whole tailnet. + pattern: ^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$ + type: string mode: description: |- Mode to run the API server proxy in. Supported modes are auth and noauth. @@ -2990,10 +3009,20 @@ spec: conditions: description: |- List of status conditions to indicate the status of the ProxyGroup - resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. - `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled - and ready. `ProxyGroupAvailable` indicates that at least one proxy is - ready to serve traffic. + resources. Known condition types include `ProxyGroupReady` and + `ProxyGroupAvailable`. + + * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + all expected conditions are true. + * `ProxyGroupAvailable` indicates that at least one proxy is ready to + serve traffic. + + For ProxyGroups of type kube-apiserver, there are two additional conditions: + + * `KubeAPIServerProxyConfigured` indicates that at least one API server + proxy is configured and ready to serve traffic. + * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + valid. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -3080,6 +3109,11 @@ spec: x-kubernetes-list-map-keys: - hostname x-kubernetes-list-type: map + url: + description: |- + URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + any. Only applies to ProxyGroups of type kube-apiserver. + type: string type: object required: - spec diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index bd81071cb5e4f..bd80112aeb8a2 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -20,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/egressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/util/mak" ) @@ -200,7 +201,7 @@ func podAndSecretForProxyGroup(pg string) (*corev1.Pod, *corev1.Secret) { ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-0", pg), Namespace: "operator-ns", - Labels: pgSecretLabels(pg, "state"), + Labels: pgSecretLabels(pg, kubetypes.LabelSecretTypeState), }, } return p, s diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index aaf22d471353f..3afeb528f7f8f 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -248,7 +248,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } // 3. Ensure that TLS Secret and RBAC exists - tcd, err := r.tailnetCertDomain(ctx) + tcd, err := tailnetCertDomain(ctx, r.lc) if err != nil { return false, fmt.Errorf("error determining DNS name base: %w", err) } @@ -358,7 +358,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } // 6. Update Ingress status if ProxyGroup Pods are ready. - count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) + count, err := numberPodsAdvertising(ctx, r.Client, r.tsNamespace, pg.Name, serviceName) if err != nil { return false, fmt.Errorf("failed to check if any Pods are configured: %w", err) } @@ -370,7 +370,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin ing.Status.LoadBalancer.Ingress = nil default: var ports []networkingv1.IngressPortStatus - hasCerts, err := r.hasCerts(ctx, serviceName) + hasCerts, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName) if err != nil { return false, fmt.Errorf("error checking TLS credentials provisioned for Ingress: %w", err) } @@ -481,7 +481,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG delete(cfg.Services, tsSvcName) serveConfigChanged = true } - if err := r.cleanupCertResources(ctx, proxyGroupName, tsSvcName); err != nil { + if err := cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, proxyGroupName, tsSvcName); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } } @@ -557,7 +557,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 3. Clean up any cluster resources - if err := r.cleanupCertResources(ctx, pg, serviceName); err != nil { + if err := cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg, serviceName); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } @@ -634,8 +634,8 @@ type localClient interface { } // tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := r.lc.StatusWithoutPeers(ctx) +func tailnetCertDomain(ctx context.Context, lc localClient) (string, error) { + st, err := lc.StatusWithoutPeers(ctx) if err != nil { return "", fmt.Errorf("error getting tailscale status: %w", err) } @@ -761,7 +761,7 @@ const ( func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, logger *zap.SugaredLogger) (err error) { // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } @@ -773,7 +773,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con // The only exception is Ingresses with an HTTP endpoint enabled - if an // Ingress has an HTTP endpoint enabled, it will be advertised even if the // TLS cert is not yet provisioned. - hasCert, err := a.hasCerts(ctx, serviceName) + hasCert, err := hasCerts(ctx, a.Client, a.lc, a.tsNamespace, serviceName) if err != nil { return fmt.Errorf("error checking TLS credentials provisioned for service %q: %w", serviceName, err) } @@ -822,10 +822,10 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } -func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { +func numberPodsAdvertising(ctx context.Context, cl client.Client, tsNamespace, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + if err := cl.List(ctx, secrets, client.InNamespace(tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } @@ -859,7 +859,14 @@ type ownerAnnotationValue struct { // Kubernetes operator instance. type OwnerRef struct { // OperatorID is the stable ID of the operator's Tailscale device. - OperatorID string `json:"operatorID,omitempty"` + OperatorID string `json:"operatorID,omitempty"` + Resource *Resource `json:"resource,omitempty"` // optional, used to identify the ProxyGroup that owns this Tailscale Service. +} + +type Resource struct { + Kind string `json:"kind,omitempty"` // "ProxyGroup" + Name string `json:"name,omitempty"` // Name of the ProxyGroup that owns this Tailscale Service. Informational only. + UID string `json:"uid,omitempty"` // UID of the ProxyGroup that owns this Tailscale Service. } // ownerAnnotations returns the updated annotations required to ensure this @@ -891,6 +898,9 @@ func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string] if slices.Contains(o.OwnerRefs, ref) { // up to date return svc.Annotations, nil } + if o.OwnerRefs[0].Resource != nil { + return nil, fmt.Errorf("Tailscale Service %s is owned by another resource: %#v; cannot be reused for an Ingress", svc.Name, o.OwnerRefs[0].Resource) + } o.OwnerRefs = append(o.OwnerRefs, ref) json, err := json.Marshal(o) if err != nil { @@ -949,7 +959,7 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi }); err != nil { return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) } - rolebinding := certSecretRoleBinding(pg.Name, r.tsNamespace, domain) + rolebinding := certSecretRoleBinding(pg, r.tsNamespace, domain) if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rolebinding, func(rb *rbacv1.RoleBinding) { // Labels and subjects might have changed if the Ingress has been updated to use a // different ProxyGroup. @@ -963,19 +973,19 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi // cleanupCertResources ensures that the TLS Secret and associated RBAC // resources that allow proxies to read/write to the Secret are deleted. -func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName string, name tailcfg.ServiceName) error { - domainName, err := r.dnsNameForService(ctx, tailcfg.ServiceName(name)) +func cleanupCertResources(ctx context.Context, cl client.Client, lc localClient, tsNamespace, pgName string, serviceName tailcfg.ServiceName) error { + domainName, err := dnsNameForService(ctx, lc, serviceName) if err != nil { - return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", name, err) + return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", serviceName, err) } labels := certResourceLabels(pgName, domainName) - if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting RoleBinding for domain name %s: %w", domainName, err) } - if err := r.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting Role for domain name %s: %w", domainName, err) } - if err := r.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + if err := cl.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting Secret for domain name %s: %w", domainName, err) } return nil @@ -1018,17 +1028,17 @@ func certSecretRole(pgName, namespace, domain string) *rbacv1.Role { // certSecretRoleBinding creates a RoleBinding for Role that will allow proxies // to manage the TLS Secret for the given domain. Domain must be a valid // Kubernetes resource name. -func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding { +func certSecretRoleBinding(pg *tsapi.ProxyGroup, namespace, domain string) *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: domain, Namespace: namespace, - Labels: certResourceLabels(pgName, domain), + Labels: certResourceLabels(pg.Name, domain), }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: pgName, + Name: pgServiceAccountName(pg), Namespace: namespace, }, }, @@ -1041,14 +1051,17 @@ func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding // certSecret creates a Secret that will store the TLS certificate and private // key for the given domain. Domain must be a valid Kubernetes resource name. -func certSecret(pgName, namespace, domain string, ing *networkingv1.Ingress) *corev1.Secret { +func certSecret(pgName, namespace, domain string, parent client.Object) *corev1.Secret { labels := certResourceLabels(pgName, domain) - labels[kubetypes.LabelSecretType] = "certs" + labels[kubetypes.LabelSecretType] = kubetypes.LabelSecretTypeCerts // Labels that let us identify the Ingress resource lets us reconcile // the Ingress when the TLS Secret is updated (for example, when TLS // certs have been provisioned). - labels[LabelParentName] = ing.Name - labels[LabelParentNamespace] = ing.Namespace + labels[LabelParentType] = strings.ToLower(parent.GetObjectKind().GroupVersionKind().Kind) + labels[LabelParentName] = parent.GetName() + if ns := parent.GetNamespace(); ns != "" { + labels[LabelParentNamespace] = ns + } return &corev1.Secret{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", @@ -1076,9 +1089,9 @@ func certResourceLabels(pgName, domain string) map[string]string { } // dnsNameForService returns the DNS name for the given Tailscale Service's name. -func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { +func dnsNameForService(ctx context.Context, lc localClient, svc tailcfg.ServiceName) (string, error) { s := svc.WithoutPrefix() - tcd, err := r.tailnetCertDomain(ctx) + tcd, err := tailnetCertDomain(ctx, lc) if err != nil { return "", fmt.Errorf("error determining DNS name base: %w", err) } @@ -1086,14 +1099,14 @@ func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg } // hasCerts checks if the TLS Secret for the given service has non-zero cert and key data. -func (r *HAIngressReconciler) hasCerts(ctx context.Context, svc tailcfg.ServiceName) (bool, error) { - domain, err := r.dnsNameForService(ctx, svc) +func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string, svc tailcfg.ServiceName) (bool, error) { + domain, err := dnsNameForService(ctx, lc, svc) if err != nil { return false, fmt.Errorf("failed to get DNS name for service: %w", err) } secret := &corev1.Secret{} - err = r.Get(ctx, client.ObjectKey{ - Namespace: r.tsNamespace, + err = cl.Get(ctx, client.ObjectKey{ + Namespace: ns, Name: domain, }, secret) if err != nil { diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 5de86cdad573a..77e5ecb37a677 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -75,8 +75,13 @@ func TestIngressPGReconciler(t *testing.T) { // Verify that Role and RoleBinding have been created for the first Ingress. // Do not verify the cert Secret as that was already verified implicitly above. + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg", + }, + } expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" @@ -137,7 +142,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify that Role and RoleBinding have been created for the second Ingress. // Do not verify the cert Secret as that was already verified implicitly above. expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-other-svc.ts.net")) // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) @@ -186,7 +191,12 @@ func TestIngressPGReconciler(t *testing.T) { }) expectReconciled(t, ingPGR, "default", "test-ingress") expectEqual(t, fc, certSecretRole("test-pg-second", "operator-ns", "my-svc.ts.net")) - expectEqual(t, fc, certSecretRoleBinding("test-pg-second", "operator-ns", "my-svc.ts.net")) + pg = &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-second", + }, + } + expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { @@ -515,7 +525,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-pg-0", Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "state"), + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), }, Data: map[string][]byte{ "_current-profile": []byte("profile-foo"), @@ -686,6 +696,14 @@ func TestOwnerAnnotations(t *testing.T) { ownerAnnotation: `{"ownerRefs":[{"operatorID":"operator-2"},{"operatorID":"self-id"}]}`, }, }, + "owned_by_proxygroup": { + svc: &tailscale.VIPService{ + Annotations: map[string]string{ + ownerAnnotation: `{"ownerRefs":[{"operatorID":"self-id","resource":{"kind":"ProxyGroup","name":"test-pg","uid":"1234-UID"}}]}`, + }, + }, + wantErr: "owned by another resource", + }, } { t.Run(name, func(t *testing.T) { got, err := ownerAnnotations("self-id", tc.svc) @@ -708,7 +726,7 @@ func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain stri kubetypes.LabelManaged: "true", labelProxyGroup: pgName, labelDomain: domain, - kubetypes.LabelSecretType: "certs", + kubetypes.LabelSecretType: kubetypes.LabelSecretTypeCerts, }, }, Type: corev1.SecretTypeTLS, @@ -806,7 +824,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels(pgName, "config"), + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), @@ -845,7 +863,7 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pgName, 0), Namespace: "operator-ns", - Labels: pgSecretLabels(pgName, "config"), + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte("{}"), diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 870a6f8b7f37e..94a0a6a781c4d 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -123,7 +123,7 @@ func main() { defer s.Close() restConfig := config.GetConfigOrDie() if mode != apiServerProxyModeDisabled { - ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled) + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled, true) if err != nil { zlog.Fatalf("error creating API server proxy: %v", err) } @@ -633,6 +633,32 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create Recorder reconciler: %v", err) } + // kube-apiserver's Tailscale Service reconciler. + err = builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroup{}, builder.WithPredicates( + predicate.NewPredicateFuncs(func(obj client.Object) bool { + pg, ok := obj.(*tsapi.ProxyGroup) + return ok && pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer + }), + )). + Named("kube-apiserver-ts-service-reconciler"). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(kubeAPIServerPGsFromSecret(mgr.GetClient(), startlog))). + Complete(&KubeAPIServerTSServiceReconciler{ + Client: mgr.GetClient(), + recorder: eventRecorder, + logger: opts.log.Named("kube-apiserver-ts-service-reconciler"), + tsClient: opts.tsClient, + tsNamespace: opts.tailscaleNamespace, + lc: lc, + defaultTags: strings.Split(opts.proxyTags, ","), + operatorID: id, + clock: tstime.DefaultClock{}, + }) + if err != nil { + startlog.Fatalf("could not create Kubernetes API server Tailscale Service reconciler: %v", err) + } + // ProxyGroup reconciler. ownedByProxyGroupFilter := handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &tsapi.ProxyGroup{}) proxyClassFilterForProxyGroup := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForProxyGroup(mgr.GetClient(), startlog)) @@ -1214,7 +1240,7 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } - if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != "state" { + if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != kubetypes.LabelSecretTypeState { return nil } pg, ok := o.GetLabels()[LabelParentName] @@ -1304,7 +1330,7 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. func isTLSSecret(secret *corev1.Secret) bool { return secret.Type == corev1.SecretTypeTLS && secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "certs" && + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeCerts && secret.ObjectMeta.Labels[labelDomain] != "" && secret.ObjectMeta.Labels[labelProxyGroup] != "" } @@ -1312,7 +1338,7 @@ func isTLSSecret(secret *corev1.Secret) bool { func isPGStateSecret(secret *corev1.Secret) bool { return secret.ObjectMeta.Labels[kubetypes.LabelManaged] == "true" && secret.ObjectMeta.Labels[LabelParentType] == "proxygroup" && - secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == "state" + secret.ObjectMeta.Labels[kubetypes.LabelSecretType] == kubetypes.LabelSecretTypeState } // HAIngressesFromSecret returns a handler that returns reconcile requests for @@ -1394,6 +1420,42 @@ func HAServicesFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.M } } +// kubeAPIServerPGsFromSecret finds ProxyGroups of type "kube-apiserver" that +// need to be reconciled after a ProxyGroup-owned Secret is updated. +func kubeAPIServerPGsFromSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] Secret handler triggered for an object that is not a Secret") + return nil + } + if secret.ObjectMeta.Labels[kubetypes.LabelManaged] != "true" || + secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { + return nil + } + + var pg tsapi.ProxyGroup + if err := cl.Get(ctx, types.NamespacedName{Name: secret.ObjectMeta.Labels[LabelParentName]}, &pg); err != nil { + logger.Infof("error getting ProxyGroup %s: %v", secret.ObjectMeta.Labels[LabelParentName], err) + return nil + } + + if pg.Spec.Type != tsapi.ProxyGroupTypeKubernetesAPIServer { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: secret.ObjectMeta.Labels[LabelParentNamespace], + Name: secret.ObjectMeta.Labels[LabelParentName], + }, + }, + } + + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 1fdc076f94cad..d62cb0f117a1d 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -68,8 +68,7 @@ const ( // // tailcfg.CurrentCapabilityVersion was 106 when the ProxyGroup controller was // first introduced. - pgMinCapabilityVersion = 106 - kubeAPIServerConfigFile = "config.hujson" + pgMinCapabilityVersion = 106 ) var ( @@ -127,6 +126,10 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } if done, err := r.maybeCleanup(ctx, pg); err != nil { + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error, retrying: %s", err) + return reconcile.Result{}, nil + } return reconcile.Result{}, err } else if !done { logger.Debugf("ProxyGroup resource cleanup not yet finished, will retry...") @@ -158,7 +161,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG logger.Infof("ensuring ProxyGroup is set up") pg.Finalizers = append(pg.Finalizers, FinalizerName) if err := r.Update(ctx, pg); err != nil { - return r.notReadyErrf(pg, "error adding finalizer: %w", err) + return r.notReadyErrf(pg, logger, "error adding finalizer: %w", err) } } @@ -174,31 +177,25 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG if apierrors.IsNotFound(err) { msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q does not (yet) exist", proxyClassName) logger.Info(msg) - return r.notReady(reasonProxyGroupCreating, msg) + return notReady(reasonProxyGroupCreating, msg) } if err != nil { - return r.notReadyErrf(pg, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) + return r.notReadyErrf(pg, logger, "error getting ProxyGroup's ProxyClass %q: %w", proxyClassName, err) } if !tsoperator.ProxyClassIsReady(proxyClass) { msg := fmt.Sprintf("the ProxyGroup's ProxyClass %q is not yet in a ready state, waiting...", proxyClassName) logger.Info(msg) - return r.notReady(reasonProxyGroupCreating, msg) + return notReady(reasonProxyGroupCreating, msg) } } if err := r.validate(ctx, pg, proxyClass, logger); err != nil { - return r.notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) + return notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) } staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) if err != nil { - if strings.Contains(err.Error(), optimisticLockErrorMsg) { - msg := fmt.Sprintf("optimistic lock error, retrying: %s", nrr.message) - logger.Info(msg) - return r.notReady(reasonProxyGroupCreating, msg) - } else { - return nil, nrr, err - } + return nil, nrr, err } return staticEndpoints, nrr, nil @@ -299,9 +296,9 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning NodePort Services for static endpoints: %v", err) r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) - return r.notReady(reason, msg) + return notReady(reason, msg) } - return r.notReadyErrf(pg, "error provisioning NodePort Services for static endpoints: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning NodePort Services for static endpoints: %w", err) } } @@ -312,9 +309,9 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning config Secrets: %v", err) r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) - return r.notReady(reason, msg) + return notReady(reason, msg) } - return r.notReadyErrf(pg, "error provisioning config Secrets: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning config Secrets: %w", err) } // State secrets are precreated so we can use the ProxyGroup CR as their owner ref. @@ -325,7 +322,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sec.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning state Secrets: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning state Secrets: %w", err) } } @@ -339,7 +336,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = sa.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning ServiceAccount: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning ServiceAccount: %w", err) } } @@ -350,7 +347,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.ObjectMeta.OwnerReferences = role.ObjectMeta.OwnerReferences r.Rules = role.Rules }); err != nil { - return r.notReadyErrf(pg, "error provisioning Role: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning Role: %w", err) } roleBinding := pgRoleBinding(pg, r.tsNamespace) @@ -361,7 +358,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects }); err != nil { - return r.notReadyErrf(pg, "error provisioning RoleBinding: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning RoleBinding: %w", err) } if pg.Spec.Type == tsapi.ProxyGroupTypeEgress { @@ -371,7 +368,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences mak.Set(&existing.BinaryData, egressservices.KeyHEPPings, hp) }); err != nil { - return r.notReadyErrf(pg, "error provisioning egress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, logger, "error provisioning egress ConfigMap %q: %w", cm.Name, err) } } @@ -381,7 +378,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro existing.ObjectMeta.Labels = cm.ObjectMeta.Labels existing.ObjectMeta.OwnerReferences = cm.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) + return r.notReadyErrf(pg, logger, "error provisioning ingress ConfigMap %q: %w", cm.Name, err) } } @@ -391,7 +388,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro } ss, err := pgStatefulSet(pg, r.tsNamespace, defaultImage, r.tsFirewallMode, tailscaledPort, proxyClass) if err != nil { - return r.notReadyErrf(pg, "error generating StatefulSet spec: %w", err) + return r.notReadyErrf(pg, logger, "error generating StatefulSet spec: %w", err) } cfg := &tailscaleSTSConfig{ proxyType: string(pg.Spec.Type), @@ -404,7 +401,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.ObjectMeta.OwnerReferences = ss.ObjectMeta.OwnerReferences }); err != nil { - return r.notReadyErrf(pg, "error provisioning StatefulSet: %w", err) + return r.notReadyErrf(pg, logger, "error provisioning StatefulSet: %w", err) } mo := &metricsOpts{ @@ -414,11 +411,11 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro proxyType: "proxygroup", } if err := reconcileMetricsResources(ctx, logger, mo, proxyClass, r.Client); err != nil { - return r.notReadyErrf(pg, "error reconciling metrics resources: %w", err) + return r.notReadyErrf(pg, logger, "error reconciling metrics resources: %w", err) } if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { - return r.notReadyErrf(pg, "error cleaning up dangling resources: %w", err) + return r.notReadyErrf(pg, logger, "error cleaning up dangling resources: %w", err) } logger.Info("ProxyGroup resources synced") @@ -430,6 +427,10 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za defer func() { if !apiequality.Semantic.DeepEqual(*oldPGStatus, pg.Status) { if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil { + if strings.Contains(updateErr.Error(), optimisticLockErrorMsg) { + logger.Infof("optimistic lock error updating status, retrying: %s", updateErr) + updateErr = nil + } err = errors.Join(err, updateErr) } } @@ -457,6 +458,7 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, status, reason, message, 0, r.clock, logger) // Set ProxyGroupReady condition. + tsSvcValid, tsSvcSet := tsoperator.KubeAPIServerProxyValid(pg) status = metav1.ConditionFalse reason = reasonProxyGroupCreating switch { @@ -464,9 +466,15 @@ func (r *ProxyGroupReconciler) maybeUpdateStatus(ctx context.Context, logger *za // If we failed earlier, that reason takes precedence. reason = nrr.reason message = nrr.message + case pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer && tsSvcSet && !tsSvcValid: + reason = reasonProxyGroupInvalid + message = "waiting for config in spec.kubeAPIServer to be marked valid" case len(devices) < desiredReplicas: case len(devices) > desiredReplicas: message = fmt.Sprintf("waiting for %d ProxyGroup pods to shut down", len(devices)-desiredReplicas) + case pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer && !tsoperator.KubeAPIServerProxyConfigured(pg): + reason = reasonProxyGroupCreating + message = "waiting for proxies to start advertising the kube-apiserver proxy's hostname" default: status = metav1.ConditionTrue reason = reasonProxyGroupReady @@ -714,7 +722,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pg.Name, i), Namespace: r.tsNamespace, - Labels: pgSecretLabels(pg.Name, "config"), + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig), OwnerReferences: pgOwnerReference(pg), }, } @@ -775,13 +783,6 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } - // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we - // don't overwrite it if already set. - existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) - if err != nil { - return nil, err - } - if pg.Spec.Type == tsapi.ProxyGroupTypeKubernetesAPIServer { hostname := pgHostname(pg, i) @@ -795,7 +796,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } if !deviceAuthed { existingCfg := conf.ConfigV1Alpha1{} - if err := json.Unmarshal(existingCfgSecret.Data[kubeAPIServerConfigFile], &existingCfg); err != nil { + if err := json.Unmarshal(existingCfgSecret.Data[kubetypes.KubeAPIServerConfigFile], &existingCfg); err != nil { return nil, fmt.Errorf("error unmarshalling existing config: %w", err) } if existingCfg.AuthKey != nil { @@ -803,19 +804,42 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } } + cfg := conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ - Hostname: &hostname, + AuthKey: authKey, State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), - AuthKey: authKey, - KubeAPIServer: &conf.KubeAPIServer{ + LogLevel: ptr.To(logger.Level().String()), + + // Reloadable fields. + Hostname: &hostname, + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)), + // The first replica is elected as the cert issuer, same + // as containerboot does for ingress-pg-reconciler. + IssueCerts: opt.NewBool(i == 0), }, }, } + // Copy over config that the apiserver-proxy-service-reconciler sets. + if existingCfgSecret != nil { + if k8sProxyCfg, ok := cfgSecret.Data[kubetypes.KubeAPIServerConfigFile]; ok { + k8sCfg := &conf.ConfigV1Alpha1{} + if err := json.Unmarshal(k8sProxyCfg, k8sCfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal kube-apiserver config: %w", err) + } + + cfg.AdvertiseServices = k8sCfg.AdvertiseServices + if k8sCfg.APIServerProxy != nil { + cfg.APIServerProxy.ServiceName = k8sCfg.APIServerProxy.ServiceName + } + } + } + if r.loginServer != "" { cfg.ServerURL = &r.loginServer } @@ -832,8 +856,15 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if err != nil { return nil, fmt.Errorf("error marshalling k8s-proxy config: %w", err) } - mak.Set(&cfgSecret.Data, kubeAPIServerConfigFile, cfgB) + mak.Set(&cfgSecret.Data, kubetypes.KubeAPIServerConfigFile, cfgB) } else { + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it if already set. + existingAdvertiseServices, err := extractAdvertiseServicesConfig(existingCfgSecret) + if err != nil { + return nil, err + } + configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) @@ -1024,16 +1055,16 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) return nil, nil } - conf, err := latestConfigFromSecret(cfgSecret) + cfg, err := latestConfigFromSecret(cfgSecret) if err != nil { return nil, err } - if conf == nil { + if cfg == nil { return nil, nil } - return conf.AdvertiseServices, nil + return cfg.AdvertiseServices, nil } // getNodeMetadata gets metadata for all the pods owned by this ProxyGroup by @@ -1045,7 +1076,7 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { // List all state Secrets owned by this ProxyGroup. secrets := &corev1.SecretList{} - if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, "state"))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState))); err != nil { return nil, fmt.Errorf("failed to list state Secrets: %w", err) } for _, secret := range secrets.Items { @@ -1140,15 +1171,21 @@ type nodeMetadata struct { dnsName string } -func (r *ProxyGroupReconciler) notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { +func notReady(reason, msg string) (map[string][]netip.AddrPort, *notReadyReason, error) { return nil, ¬ReadyReason{ reason: reason, message: msg, }, nil } -func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) notReadyErrf(pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, format string, a ...any) (map[string][]netip.AddrPort, *notReadyReason, error) { err := fmt.Errorf(format, a...) + if strings.Contains(err.Error(), optimisticLockErrorMsg) { + msg := fmt.Sprintf("optimistic lock error, retrying: %s", err.Error()) + logger.Info(msg) + return notReady(reasonProxyGroupCreating, msg) + } + r.recorder.Event(pg, corev1.EventTypeWarning, reasonProxyGroupCreationFailed, err.Error()) return nil, ¬ReadyReason{ reason: reasonProxyGroupCreationFailed, diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 71398d0d54c2f..e185499f0e19d 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -7,7 +7,6 @@ package main import ( "fmt" - "path/filepath" "slices" "strconv" "strings" @@ -16,6 +15,7 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -341,8 +341,11 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por }, }, { - Name: "TS_K8S_PROXY_CONFIG", - Value: filepath.Join("/etc/tsconfig/$(POD_NAME)/", kubeAPIServerConfigFile), + Name: "TS_K8S_PROXY_CONFIG", + Value: "kube:" + types.NamespacedName{ + Namespace: namespace, + Name: "$(POD_NAME)-config", + }.String(), }, } @@ -355,20 +358,6 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por return envs }(), - VolumeMounts: func() []corev1.VolumeMount { - var mounts []corev1.VolumeMount - - // TODO(tomhjp): Read config directly from the Secret instead. - for i := range pgReplicas(pg) { - mounts = append(mounts, corev1.VolumeMount{ - Name: fmt.Sprintf("k8s-proxy-config-%d", i), - ReadOnly: true, - MountPath: fmt.Sprintf("/etc/tsconfig/%s-%d", pg.Name, i), - }) - } - - return mounts - }(), Ports: []corev1.ContainerPort{ { Name: "k8s-proxy", @@ -378,21 +367,6 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por }, }, }, - Volumes: func() []corev1.Volume { - var volumes []corev1.Volume - for i := range pgReplicas(pg) { - volumes = append(volumes, corev1.Volume{ - Name: fmt.Sprintf("k8s-proxy-config-%d", i), - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: pgConfigSecretName(pg.Name, i), - }, - }, - }) - } - - return volumes - }(), }, }, }, @@ -426,6 +400,7 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { Resources: []string{"secrets"}, Verbs: []string{ "list", + "watch", // For k8s-proxy. }, }, { @@ -508,7 +483,7 @@ func pgStateSecrets(pg *tsapi.ProxyGroup, namespace string) (secrets []*corev1.S ObjectMeta: metav1.ObjectMeta{ Name: pgStateSecretName(pg.Name, i), Namespace: namespace, - Labels: pgSecretLabels(pg.Name, "state"), + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState), OwnerReferences: pgOwnerReference(pg), }, }) diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 6f143c0566dff..ef6babc5679cc 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -31,8 +31,11 @@ import ( kube "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" + "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/opt" "tailscale.com/types/ptr" ) @@ -1256,6 +1259,163 @@ func TestProxyGroupTypes(t *testing.T) { }) } +func TestKubeAPIServerStatusConditionFlow(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + Generation: 1, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: ptr.To[int32](1), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + }, + }, + } + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg, stateSecret). + WithStatusSubresource(pg). + Build() + r := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + expectReconciled(t, r, "", pg.Name) + pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set kube-apiserver valid. + mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) + }) + expectReconciled(t, r, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set available. + addNodeIDToStateSecrets(t, fc, pg) + expectReconciled(t, r, "", pg.Name) + pg.Status.Devices = []tsapi.TailnetDevice{ + { + Hostname: "hostname-nodeid-0", + TailnetIPs: []string{"1.2.3.4", "::1"}, + }, + } + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) + + // Set kube-apiserver configured. + mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) + }) + expectReconciled(t, r, "", pg.Name) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.l) + expectEqual(t, fc, pg, omitPGStatusConditionMessages) +} + +func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithStatusSubresource(&tsapi.ProxyGroup{}). + Build() + + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-k8s-apiserver", + UID: "test-k8s-apiserver-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeKubernetesAPIServer, + Replicas: ptr.To[int32](1), + KubeAPIServer: &tsapi.KubeAPIServerConfig{ + Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), // Avoid needing to pre-create the static ServiceAccount. + }, + }, + } + if err := fc.Create(t.Context(), pg); err != nil { + t.Fatal(err) + } + expectReconciled(t, reconciler, "", pg.Name) + + cfg := conf.VersionedConfig{ + Version: "v1alpha1", + ConfigV1Alpha1: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("secret-authkey"), + State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, 0))), + App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), + LogLevel: ptr.To("debug"), + + Hostname: ptr.To("test-k8s-apiserver-0"), + APIServerProxy: &conf.APIServerProxyConfig{ + Enabled: opt.NewBool(true), + AuthMode: opt.NewBool(false), + IssueCerts: opt.NewBool(true), + }, + }, + } + cfgB, err := json.Marshal(cfg) + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + + cfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pg.Name, 0), + Namespace: tsNamespace, + Labels: pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig), + OwnerReferences: pgOwnerReference(pg), + }, + Data: map[string][]byte{ + kubetypes.KubeAPIServerConfigFile: cfgB, + }, + } + expectEqual(t, fc, cfgSecret) + + // Now simulate the kube-apiserver services reconciler updating config, + // then check the proxygroup reconciler doesn't overwrite it. + cfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:some-svc-name")) + cfg.AdvertiseServices = []string{"svc:should-not-be-overwritten"} + cfgB, err = json.Marshal(cfg) + if err != nil { + t.Fatalf("failed to marshal config: %v", err) + } + mustUpdate(t, fc, tsNamespace, cfgSecret.Name, func(s *corev1.Secret) { + s.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + }) + expectReconciled(t, reconciler, "", pg.Name) + + cfgSecret.Data[kubetypes.KubeAPIServerConfigFile] = cfgB + expectEqual(t, fc, cfgSecret) +} + func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). @@ -1660,7 +1820,7 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG if _, err := createOrUpdate(t.Context(), fc, "tailscale", pod, nil); err != nil { t.Fatalf("failed to create or update Pod %s: %v", pod.Name, err) } - mustUpdate(t, fc, tsNamespace, fmt.Sprintf("test-%d", i), func(s *corev1.Secret) { + mustUpdate(t, fc, tsNamespace, pgStateSecretName(pg.Name, i), func(s *corev1.Secret) { s.Data = map[string][]byte{ currentProfileKey: []byte(key), key: bytes, diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 4247eaaa0040f..62cc36bd4a82b 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -41,7 +41,7 @@ import ( ) const ( - finalizerName = "tailscale.com/service-pg-finalizer" + svcPGFinalizerName = "tailscale.com/service-pg-finalizer" reasonIngressSvcInvalid = "IngressSvcInvalid" reasonIngressSvcValid = "IngressSvcValid" @@ -174,13 +174,13 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } - if !slices.Contains(svc.Finalizers, finalizerName) { + if !slices.Contains(svc.Finalizers, svcPGFinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, // this is a nice place to tell the operator that the high level, // multi-reconcile operation is underway. logger.Infof("exposing Service over tailscale") - svc.Finalizers = append(svc.Finalizers, finalizerName) + svc.Finalizers = append(svc.Finalizers, svcPGFinalizerName) if err := r.Update(ctx, svc); err != nil { return false, fmt.Errorf("failed to add finalizer: %w", err) } @@ -378,7 +378,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // corresponding to this Service. func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Service are cleaned up") - ix := slices.Index(svc.Finalizers, finalizerName) + ix := slices.Index(svc.Finalizers, svcPGFinalizerName) if ix < 0 { logger.Debugf("no finalizer, nothing to do") return false, nil @@ -485,12 +485,12 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG func (r *HAServiceReconciler) deleteFinalizer(ctx context.Context, svc *corev1.Service, logger *zap.SugaredLogger) error { svc.Finalizers = slices.DeleteFunc(svc.Finalizers, func(f string) bool { - return f == finalizerName + return f == svcPGFinalizerName }) - logger.Debugf("ensure %q finalizer is removed", finalizerName) + logger.Debugf("ensure %q finalizer is removed", svcPGFinalizerName) if err := r.Update(ctx, svc); err != nil { - return fmt.Errorf("failed to remove finalizer %q: %w", finalizerName, err) + return fmt.Errorf("failed to remove finalizer %q: %w", svcPGFinalizerName, err) } r.mu.Lock() defer r.mu.Unlock() @@ -653,7 +653,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con // Get all config Secrets for this ProxyGroup. // Get all Pods secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } @@ -720,7 +720,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 054c3ed49f5cb..baaa07727df06 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -26,6 +26,7 @@ import ( tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/ingressservices" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" "tailscale.com/util/mak" @@ -139,7 +140,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName("test-pg", 0), Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", "config"), + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(`{"Version":""}`), @@ -298,12 +299,12 @@ func TestServicePGReconciler_MultiCluster(t *testing.T) { t.Fatalf("getting Tailscale Service: %v", err) } - if len(tsSvcs) != 1 { - t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs)) + if len(tsSvcs.VIPServices) != 1 { + t.Fatalf("unexpected number of Tailscale Services (%d)", len(tsSvcs.VIPServices)) } - for name := range tsSvcs { - t.Logf("found Tailscale Service with name %q", name.String()) + for _, svc := range tsSvcs.VIPServices { + t.Logf("found Tailscale Service with name %q", svc.Name) } } } @@ -336,7 +337,7 @@ func TestIgnoreRegularService(t *testing.T) { tsSvcs, err := ft.ListVIPServices(context.Background()) if err == nil { - if len(tsSvcs) > 0 { + if len(tsSvcs.VIPServices) > 0 { t.Fatal("unexpected Tailscale Services found") } } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 56542700d951c..6ae32d6fbac13 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -891,13 +891,17 @@ func (c *fakeTSClient) GetVIPService(ctx context.Context, name tailcfg.ServiceNa return svc, nil } -func (c *fakeTSClient) ListVIPServices(ctx context.Context) (map[tailcfg.ServiceName]*tailscale.VIPService, error) { +func (c *fakeTSClient) ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) { c.Lock() defer c.Unlock() if c.vipServices == nil { return nil, &tailscale.ErrResponse{Status: http.StatusNotFound} } - return c.vipServices, nil + result := &tailscale.VIPServiceList{} + for _, svc := range c.vipServices { + result.VIPServices = append(result.VIPServices, *svc) + } + return result, nil } func (c *fakeTSClient) CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error { diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index a94d55afed604..50620c26ddf27 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -56,6 +56,8 @@ type tsClient interface { DeleteDevice(ctx context.Context, nodeStableID string) error // GetVIPService is a method for getting a Tailscale Service. VIPService is the original name for Tailscale Service. GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*tailscale.VIPService, error) + // ListVIPServices is a method for listing all Tailscale Services. VIPService is the original name for Tailscale Service. + ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) // CreateOrUpdateVIPService is a method for creating or updating a Tailscale Service. CreateOrUpdateVIPService(ctx context.Context, svc *tailscale.VIPService) error // DeleteVIPService is a method for deleting a Tailscale Service. diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go new file mode 100644 index 0000000000000..4013047e76f0c --- /dev/null +++ b/cmd/k8s-proxy/internal/config/config.go @@ -0,0 +1,264 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package config provides watchers for the various supported ways to load a +// config file for k8s-proxy; currently file or Kubernetes Secret. +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/ptr" + "tailscale.com/util/testenv" +) + +type configLoader struct { + logger *zap.SugaredLogger + client clientcorev1.CoreV1Interface + + cfgChan chan<- *conf.Config + previous []byte + + once sync.Once // For use in tests. To close cfgIgnored. + cfgIgnored chan struct{} // For use in tests. +} + +func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interface, cfgChan chan<- *conf.Config) *configLoader { + return &configLoader{ + logger: logger, + client: client, + cfgChan: cfgChan, + } +} + +func (l *configLoader) WatchConfig(ctx context.Context, path string) error { + secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") + if isKubeSecret { + secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) + if !ok { + return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format /", path) + } + if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) + } + + return nil + } + + if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { + return fmt.Errorf("error watching config file %q: %w", path, err) + } + + return nil +} + +func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { + if bytes.Equal(raw, l.previous) { + if l.cfgIgnored != nil && testenv.InTest() { + l.once.Do(func() { + close(l.cfgIgnored) + }) + } + return nil + } + + cfg, err := conf.Load(raw) + if err != nil { + return fmt.Errorf("error loading config: %w", err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case l.cfgChan <- &cfg: + } + + l.previous = raw + return nil +} + +func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { + var ( + tickChan <-chan time.Time + eventChan <-chan fsnotify.Event + errChan <-chan error + ) + + if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 + l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + tickChan = ticker.C + } else { + dir := filepath.Dir(path) + file := filepath.Base(path) + l.logger.Infof("Watching directory %q for changes to config file %q", dir, file) + defer w.Close() + if err := w.Add(dir); err != nil { + return fmt.Errorf("failed to add fsnotify watch: %w", err) + } + eventChan = w.Events + errChan = w.Errors + } + + // Read the initial config file, but after the watcher is already set up to + // avoid an unlucky race condition if the config file is edited in between. + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading config file %q: %w", path, err) + } + if err := l.reloadConfig(ctx, b); err != nil { + return fmt.Errorf("error loading initial config file %q: %w", path, err) + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err, ok := <-errChan: + if !ok { + // Watcher was closed. + return nil + } + return fmt.Errorf("watcher error: %w", err) + case <-tickChan: + case ev, ok := <-eventChan: + if !ok { + // Watcher was closed. + return nil + } + if ev.Name != path || ev.Op&fsnotify.Write == 0 { + // Ignore irrelevant events. + continue + } + } + b, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error reading config file: %w", err) + } + // Writers such as os.WriteFile may truncate the file before writing + // new contents, so it's possible to read an empty file if we read before + // the write has completed. + if len(b) == 0 { + continue + } + if err := l.reloadConfig(ctx, b); err != nil { + return fmt.Errorf("error reloading config file %q: %v", path, err) + } + } +} + +func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { + secrets := l.client.Secrets(secretNamespace) + w, err := secrets.Watch(ctx, metav1.ListOptions{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + // Re-watch regularly to avoid relying on long-lived connections. + // See https://github.com/kubernetes-client/javascript/issues/596#issuecomment-786419380 + TimeoutSeconds: ptr.To(int64(600)), + FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), + Watch: true, + }) + if err != nil { + return fmt.Errorf("failed to watch config Secret %q: %w", secretName, err) + } + defer func() { + // May not be the original watcher by the time we exit. + if w != nil { + w.Stop() + } + }() + + // Get the initial config Secret now we've got the watcher set up. + secret, err := secrets.Get(ctx, secretName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) + } + + if err := l.configFromSecret(ctx, secret); err != nil { + return fmt.Errorf("error loading initial config: %w", err) + } + + l.logger.Infof("Watching config Secret %q for changes", secretName) + for { + var secret *corev1.Secret + select { + case <-ctx.Done(): + return ctx.Err() + case ev, ok := <-w.ResultChan(): + if !ok { + w.Stop() + w, err = secrets.Watch(ctx, metav1.ListOptions{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + TimeoutSeconds: ptr.To(int64(600)), + FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), + Watch: true, + }) + if err != nil { + return fmt.Errorf("failed to re-watch config Secret %q: %w", secretName, err) + } + continue + } + + switch ev.Type { + case watch.Added, watch.Modified: + // New config available to load. + var ok bool + secret, ok = ev.Object.(*corev1.Secret) + if !ok { + return fmt.Errorf("unexpected object type %T in watch event for config Secret %q", ev.Object, secretName) + } + if secret == nil || secret.Data == nil { + continue + } + if err := l.configFromSecret(ctx, secret); err != nil { + return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) + } + case watch.Error: + return fmt.Errorf("error watching config Secret %q: %v", secretName, ev.Object) + default: + // Ignore, no action required. + continue + } + } + } +} + +func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { + b := s.Data[kubetypes.KubeAPIServerConfigFile] + if len(b) == 0 { + return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) + } + + if err := l.reloadConfig(ctx, b); err != nil { + return err + } + + return nil +} diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go new file mode 100644 index 0000000000000..1603dbe1f398f --- /dev/null +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -0,0 +1,245 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package config + +import ( + "context" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/ptr" +) + +func TestWatchConfig(t *testing.T) { + type phase struct { + config string + cancel bool + expectedConf *conf.ConfigV1Alpha1 + expectedErr string + } + + // Same set of behaviour tests for each config source. + for _, env := range []string{"file", "kube"} { + t.Run(env, func(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + initialConfig string + phases []phase + }{ + { + name: "no_config", + phases: []phase{{ + expectedErr: "error loading initial config", + }}, + }, + { + name: "valid_config", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{{ + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }}, + }, + { + name: "can_cancel", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }, + { + cancel: true, + }, + }, + }, + { + name: "can_reload", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }, + { + config: `{"version": "v1alpha1", "authKey": "def456"}`, + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("def456"), + }, + }, + }, + }, + { + name: "ignores_events_with_no_changes", + initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, + phases: []phase{ + { + expectedConf: &conf.ConfigV1Alpha1{ + AuthKey: ptr.To("abc123"), + }, + }, + { + config: `{"version": "v1alpha1", "authKey": "abc123"}`, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + root := t.TempDir() + cl := fake.NewClientset() + + var cfgPath string + var writeFile func(*testing.T, string) + if env == "file" { + cfgPath = filepath.Join(root, kubetypes.KubeAPIServerConfigFile) + writeFile = func(t *testing.T, content string) { + if err := os.WriteFile(cfgPath, []byte(content), 0o644); err != nil { + t.Fatalf("error writing config file %q: %v", cfgPath, err) + } + } + } else { + cfgPath = "kube:default/config-secret" + writeFile = func(t *testing.T, content string) { + s := secretFrom(content) + mustCreateOrUpdate(t, cl, s) + } + } + configChan := make(chan *conf.Config) + l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + l.cfgIgnored = make(chan struct{}) + errs := make(chan error) + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + writeFile(t, tc.initialConfig) + go func() { + errs <- l.WatchConfig(ctx, cfgPath) + }() + + for i, p := range tc.phases { + if p.config != "" { + writeFile(t, p.config) + } + if p.cancel { + cancel() + } + + select { + case cfg := <-configChan: + if diff := cmp.Diff(*p.expectedConf, cfg.Parsed); diff != "" { + t.Errorf("unexpected config (-want +got):\n%s", diff) + } + case err := <-errs: + if p.cancel { + if err != nil { + t.Fatalf("unexpected error after cancel: %v", err) + } + } else if p.expectedErr == "" { + t.Fatalf("unexpected error: %v", err) + } else if !strings.Contains(err.Error(), p.expectedErr) { + t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) + } + case <-l.cfgIgnored: + if p.expectedConf != nil { + t.Fatalf("expected config to be reloaded, but got ignored signal") + } + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected event in phase: %d", i) + } + } + }) + } + }) + } +} + +func TestWatchConfigSecret_Rewatches(t *testing.T) { + cl := fake.NewClientset() + var watchCount int + var watcher *watch.RaceFreeFakeWatcher + expected := []string{ + `{"version": "v1alpha1", "authKey": "abc123"}`, + `{"version": "v1alpha1", "authKey": "def456"}`, + `{"version": "v1alpha1", "authKey": "ghi789"}`, + } + cl.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + watcher = watch.NewRaceFreeFake() + watcher.Add(secretFrom(expected[watchCount])) + if action.GetVerb() == "watch" && action.GetResource().Resource == "secrets" { + watchCount++ + } + return true, watcher, nil + }) + + configChan := make(chan *conf.Config) + l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + + mustCreateOrUpdate(t, cl, secretFrom(expected[0])) + + errs := make(chan error) + go func() { + errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret") + }() + + for i := range 2 { + select { + case cfg := <-configChan: + if exp := expected[i]; cfg.Parsed.AuthKey == nil || !strings.Contains(exp, *cfg.Parsed.AuthKey) { + t.Fatalf("expected config to have authKey %q, got: %v", exp, cfg.Parsed.AuthKey) + } + if i == 0 { + watcher.Stop() + } + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + case <-l.cfgIgnored: + t.Fatalf("expected config to be reloaded, but got ignored signal") + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected event") + } + } + + if watchCount != 2 { + t.Fatalf("expected 2 watch API calls, got %d", watchCount) + } +} + +func secretFrom(content string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-secret", + }, + Data: map[string][]byte{ + kubetypes.KubeAPIServerConfigFile: []byte(content), + }, + } +} + +func mustCreateOrUpdate(t *testing.T, cl *fake.Clientset, s *corev1.Secret) { + t.Helper() + if _, err := cl.CoreV1().Secrets("default").Create(t.Context(), s, metav1.CreateOptions{}); err != nil { + if _, updateErr := cl.CoreV1().Secrets("default").Update(t.Context(), s, metav1.UpdateOptions{}); updateErr != nil { + t.Fatalf("error writing config Secret %q: %v", s.Name, updateErr) + } + } +} diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index b7f3d9535a071..eea1f15f7fdd8 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -14,6 +14,7 @@ import ( "fmt" "os" "os/signal" + "reflect" "strings" "syscall" "time" @@ -21,20 +22,37 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" "golang.org/x/sync/errgroup" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/strings/slices" + "tailscale.com/client/local" + "tailscale.com/cmd/k8s-proxy/internal/config" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" apiproxy "tailscale.com/k8s-operator/api-proxy" + "tailscale.com/kube/certs" "tailscale.com/kube/k8s-proxy/conf" + klc "tailscale.com/kube/localclient" + "tailscale.com/kube/services" "tailscale.com/kube/state" + "tailscale.com/tailcfg" "tailscale.com/tsnet" ) func main() { - logger := zap.Must(zap.NewProduction()).Sugar() + encoderCfg := zap.NewProductionEncoderConfig() + encoderCfg.EncodeTime = zapcore.RFC3339TimeEncoder + logger := zap.Must(zap.Config{ + Level: zap.NewAtomicLevelAt(zap.DebugLevel), + Encoding: "json", + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + EncoderConfig: encoderCfg, + }.Build()).Sugar() defer logger.Sync() + if err := run(logger); err != nil { logger.Fatal(err.Error()) } @@ -42,18 +60,58 @@ func main() { func run(logger *zap.SugaredLogger) error { var ( - configFile = os.Getenv("TS_K8S_PROXY_CONFIG") + configPath = os.Getenv("TS_K8S_PROXY_CONFIG") podUID = os.Getenv("POD_UID") ) - if configFile == "" { + if configPath == "" { return errors.New("TS_K8S_PROXY_CONFIG unset") } - // TODO(tomhjp): Support reloading config. - // TODO(tomhjp): Support reading config from a Secret. - cfg, err := conf.Load(configFile) + // serveCtx to live for the lifetime of the process, only gets cancelled + // once the Tailscale Service has been drained + serveCtx, serveCancel := context.WithCancel(context.Background()) + defer serveCancel() + + // ctx to cancel to start the shutdown process. + ctx, cancel := context.WithCancel(serveCtx) + defer cancel() + + sigsChan := make(chan os.Signal, 1) + signal.Notify(sigsChan, syscall.SIGINT, syscall.SIGTERM) + go func() { + select { + case <-ctx.Done(): + case s := <-sigsChan: + logger.Infof("Received shutdown signal %s, exiting", s) + cancel() + } + }() + + var group *errgroup.Group + group, ctx = errgroup.WithContext(ctx) + + restConfig, err := getRestConfig(logger) + if err != nil { + return fmt.Errorf("error getting rest config: %w", err) + } + clientset, err := kubernetes.NewForConfig(restConfig) if err != nil { - return fmt.Errorf("error loading config file %q: %w", configFile, err) + return fmt.Errorf("error creating Kubernetes clientset: %w", err) + } + + // Load and watch config. + cfgChan := make(chan *conf.Config) + cfgLoader := config.NewConfigLoader(logger, clientset.CoreV1(), cfgChan) + group.Go(func() error { + return cfgLoader.WatchConfig(ctx, configPath) + }) + + // Get initial config. + var cfg *conf.Config + select { + case <-ctx.Done(): + return group.Wait() + case cfg = <-cfgChan: } if cfg.Parsed.LogLevel != nil { @@ -82,6 +140,14 @@ func run(logger *zap.SugaredLogger) error { hostinfo.SetApp(*cfg.Parsed.App) } + // TODO(tomhjp): Pass this setting directly into the store instead of using + // environment variables. + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) { + os.Setenv("TS_CERT_SHARE_MODE", "rw") + } else { + os.Setenv("TS_CERT_SHARE_MODE", "ro") + } + st, err := getStateStore(cfg.Parsed.State, logger) if err != nil { return err @@ -115,10 +181,6 @@ func run(logger *zap.SugaredLogger) error { ts.Hostname = *cfg.Parsed.Hostname } - // ctx to live for the lifetime of the process. - ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - defer cancel() - // Make sure we crash loop if Up doesn't complete in reasonable time. upCtx, upCancel := context.WithTimeout(ctx, time.Minute) defer upCancel() @@ -126,9 +188,6 @@ func run(logger *zap.SugaredLogger) error { return fmt.Errorf("error starting tailscale server: %w", err) } defer ts.Close() - - group, groupCtx := errgroup.WithContext(ctx) - lc, err := ts.LocalClient() if err != nil { return fmt.Errorf("error getting local client: %w", err) @@ -136,23 +195,13 @@ func run(logger *zap.SugaredLogger) error { // Setup for updating state keys. if podUID != "" { - w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap) - if err != nil { - return fmt.Errorf("error watching IPN bus: %w", err) - } - defer w.Close() - group.Go(func() error { - if err := state.KeepKeysUpdated(st, w.Next); err != nil && err != groupCtx.Err() { - return fmt.Errorf("error keeping state keys updated: %w", err) - } - - return nil + return state.KeepKeysUpdated(ctx, st, klc.New(lc)) }) } if cfg.Parsed.AcceptRoutes != nil { - _, err = lc.EditPrefs(groupCtx, &ipn.MaskedPrefs{ + _, err = lc.EditPrefs(ctx, &ipn.MaskedPrefs{ RouteAllSet: true, Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes}, }) @@ -161,34 +210,97 @@ func run(logger *zap.SugaredLogger) error { } } - // Setup for the API server proxy. - restConfig, err := getRestConfig(logger) - if err != nil { - return fmt.Errorf("error getting rest config: %w", err) + // TODO(tomhjp): There seems to be a bug that on restart the device does + // not get reassigned it's already working Service IPs unless we clear and + // reset the serve config. + if err := lc.SetServeConfig(ctx, &ipn.ServeConfig{}); err != nil { + return fmt.Errorf("error clearing existing ServeConfig: %w", err) } - authMode := true - if cfg.Parsed.KubeAPIServer != nil { - v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get() - if ok { - authMode = v + + var cm *certs.CertManager + if shouldIssueCerts(cfg) { + logger.Infof("Will issue TLS certs for Tailscale Service") + cm = certs.NewCertManager(klc.New(lc), logger.Infof) + } + if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil { + return err + } + + if cfg.Parsed.AdvertiseServices != nil { + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: cfg.Parsed.AdvertiseServices, + }, + }); err != nil { + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) } } - ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode) + + // Setup for the API server proxy. + authMode := true + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.AuthMode.EqualBool(false) { + authMode = false + } + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode, false) if err != nil { return fmt.Errorf("error creating api server proxy: %w", err) } - // TODO(tomhjp): Work out whether we should use TS_CERT_SHARE_MODE or not, - // and possibly issue certs upfront here before serving. group.Go(func() error { - if err := ap.Run(groupCtx); err != nil { + if err := ap.Run(serveCtx); err != nil { return fmt.Errorf("error running API server proxy: %w", err) } return nil }) - return group.Wait() + for { + select { + case <-ctx.Done(): + // Context cancelled, exit. + logger.Info("Context cancelled, exiting") + shutdownCtx, shutdownCancel := context.WithTimeout(serveCtx, 20*time.Second) + unadvertiseErr := services.EnsureServicesNotAdvertised(shutdownCtx, lc, logger.Infof) + shutdownCancel() + serveCancel() + return errors.Join(unadvertiseErr, group.Wait()) + case cfg = <-cfgChan: + // Handle config reload. + // TODO(tomhjp): Make auth mode reloadable. + var prefs ipn.MaskedPrefs + cfgLogger := logger + currentPrefs, err := lc.GetPrefs(ctx) + if err != nil { + return fmt.Errorf("error getting current prefs: %w", err) + } + if !slices.Equal(currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices) { + cfgLogger = cfgLogger.With("AdvertiseServices", fmt.Sprintf("%v -> %v", currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices)) + prefs.AdvertiseServicesSet = true + prefs.Prefs.AdvertiseServices = cfg.Parsed.AdvertiseServices + } + if cfg.Parsed.Hostname != nil && *cfg.Parsed.Hostname != currentPrefs.Hostname { + cfgLogger = cfgLogger.With("Hostname", fmt.Sprintf("%s -> %s", currentPrefs.Hostname, *cfg.Parsed.Hostname)) + prefs.HostnameSet = true + prefs.Hostname = *cfg.Parsed.Hostname + } + if cfg.Parsed.AcceptRoutes != nil && *cfg.Parsed.AcceptRoutes != currentPrefs.RouteAll { + cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, *cfg.Parsed.AcceptRoutes)) + prefs.RouteAllSet = true + prefs.Prefs.RouteAll = *cfg.Parsed.AcceptRoutes + } + if !prefs.IsEmpty() { + if _, err := lc.EditPrefs(ctx, &prefs); err != nil { + return fmt.Errorf("error editing prefs: %w", err) + } + } + if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil { + return fmt.Errorf("error setting serve config: %w", err) + } + + cfgLogger.Infof("Config reloaded") + } + } } func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { @@ -226,3 +338,79 @@ func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) { return restConfig, nil } + +func apiServerProxyService(cfg *conf.Config) tailcfg.ServiceName { + if cfg.Parsed.APIServerProxy != nil && + cfg.Parsed.APIServerProxy.Enabled.EqualBool(true) && + cfg.Parsed.APIServerProxy.ServiceName != nil && + *cfg.Parsed.APIServerProxy.ServiceName != "" { + return tailcfg.ServiceName(*cfg.Parsed.APIServerProxy.ServiceName) + } + + return "" +} + +func shouldIssueCerts(cfg *conf.Config) bool { + return cfg.Parsed.APIServerProxy != nil && + cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) +} + +// setServeConfig sets up serve config such that it's serving for the passed in +// Tailscale Service, and does nothing if it's already up to date. +func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager, name tailcfg.ServiceName) error { + existingServeConfig, err := lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("error getting existing serve config: %w", err) + } + + // Ensure serve config is cleared if no Tailscale Service. + if name == "" { + if reflect.DeepEqual(*existingServeConfig, ipn.ServeConfig{}) { + // Already up to date. + return nil + } + + if cm != nil { + cm.EnsureCertLoops(ctx, &ipn.ServeConfig{}) + } + return lc.SetServeConfig(ctx, &ipn.ServeConfig{}) + } + + status, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("error getting local client status: %w", err) + } + serviceHostPort := ipn.HostPort(fmt.Sprintf("%s.%s:443", name.WithoutPrefix(), status.CurrentTailnet.MagicDNSSuffix)) + + serveConfig := ipn.ServeConfig{ + // Configure for the Service hostname. + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + name: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + serviceHostPort: { + Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: fmt.Sprintf("http://%s:80", strings.TrimSuffix(status.Self.DNSName, ".")), + }, + }, + }, + }, + }, + }, + } + + if reflect.DeepEqual(*existingServeConfig, serveConfig) { + // Already up to date. + return nil + } + + if cm != nil { + cm.EnsureCertLoops(ctx, &serveConfig) + } + return lc.SetServeConfig(ctx, &serveConfig) +} diff --git a/internal/client/tailscale/vip_service.go b/internal/client/tailscale/vip_service.go index 64fcfdf5e86d6..48c59ce4569da 100644 --- a/internal/client/tailscale/vip_service.go +++ b/internal/client/tailscale/vip_service.go @@ -36,6 +36,11 @@ type VIPService struct { Tags []string `json:"tags,omitempty"` } +// VIPServiceList represents the JSON response to the list VIP Services API. +type VIPServiceList struct { + VIPServices []VIPService `json:"vipServices"` +} + // GetVIPService retrieves a VIPService by its name. It returns 404 if the VIPService is not found. func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceName) (*VIPService, error) { path := client.BuildTailnetURL("vip-services", name.String()) @@ -59,6 +64,29 @@ func (client *Client) GetVIPService(ctx context.Context, name tailcfg.ServiceNam return svc, nil } +// ListVIPServices retrieves all existing Services and returns them as a list. +func (client *Client) ListVIPServices(ctx context.Context) (*VIPServiceList, error) { + path := client.BuildTailnetURL("vip-services") + req, err := http.NewRequestWithContext(ctx, httpm.GET, path, nil) + if err != nil { + return nil, fmt.Errorf("error creating new HTTP request: %w", err) + } + b, resp, err := SendRequest(client, req) + if err != nil { + return nil, fmt.Errorf("error making Tailsale API request: %w", err) + } + // If status code was not successful, return the error. + // TODO: Change the check for the StatusCode to include other 2XX success codes. + if resp.StatusCode != http.StatusOK { + return nil, HandleErrorResponse(b, resp) + } + result := &VIPServiceList{} + if err := json.Unmarshal(b, result); err != nil { + return nil, err + } + return result, nil +} + // CreateOrUpdateVIPService creates or updates a VIPService by its name. Caller must ensure that, if the // VIPService already exists, the VIPService is fetched first to ensure that any auto-allocated IP addresses are not // lost during the update. If the VIPService was created without any IP addresses explicitly set (so that they were diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 14025bbb4150a..a9ad514e755b2 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -394,8 +394,8 @@ func (s *Store) canPatchSecret(secret string) bool { // certSecretSelector returns a label selector that can be used to list all // Secrets that aren't Tailscale state Secrets and contain TLS certificates for // HTTPS endpoints that this node serves. -// Currently (3/2025) this only applies to the Kubernetes Operator's ingress -// ProxyGroup. +// Currently (7/2025) this only applies to the Kubernetes Operator's ProxyGroup +// when spec.Type is "ingress" or "kube-apiserver". func (s *Store) certSecretSelector() map[string]string { if s.podName == "" { return map[string]string{} @@ -406,7 +406,7 @@ func (s *Store) certSecretSelector() map[string]string { } pgName := s.podName[:p] return map[string]string{ - kubetypes.LabelSecretType: "certs", + kubetypes.LabelSecretType: kubetypes.LabelSecretTypeCerts, kubetypes.LabelManaged: "true", "tailscale.com/proxy-group": pgName, } diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 0d709264e5c08..9a49f30288840 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -17,6 +17,7 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" ) func TestWriteState(t *testing.T) { @@ -516,7 +517,7 @@ func TestNewWithClient(t *testing.T) { ) certSecretsLabels := map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "ingress-proxies", } @@ -582,7 +583,7 @@ func TestNewWithClient(t *testing.T) { makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), makeSecret("some-other-secret", nil, "3"), makeSecret("app3.other-proxies.ts.net", map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "some-other-proxygroup", }, "4"), @@ -606,7 +607,7 @@ func TestNewWithClient(t *testing.T) { makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), makeSecret("some-other-secret", nil, "3"), makeSecret("app3.other-proxies.ts.net", map[string]string{ - "tailscale.com/secret-type": "certs", + "tailscale.com/secret-type": kubetypes.LabelSecretTypeCerts, "tailscale.com/managed": "true", "tailscale.com/proxy-group": "some-other-proxygroup", }, "4"), diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index d33c088de78db..e079e984ff5a1 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -10,6 +10,7 @@ import ( "crypto/tls" "errors" "fmt" + "net" "net/http" "net/http/httputil" "net/netip" @@ -46,7 +47,7 @@ var ( // caller's Tailscale identity and the rules defined in the tailnet ACLs. // - false: the proxy is started and requests are passed through to the // Kubernetes API without any auth modifications. -func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool) (*APIServerProxy, error) { +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool, https bool) (*APIServerProxy, error) { if !authMode { restConfig = rest.AnonymousClientConfig(restConfig) } @@ -85,6 +86,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn log: zlog, lc: lc, authMode: authMode, + https: https, upstreamURL: u, ts: ts, } @@ -104,11 +106,6 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn // // It return when ctx is cancelled or ServeTLS fails. func (ap *APIServerProxy) Run(ctx context.Context) error { - ln, err := ap.ts.Listen("tcp", ":443") - if err != nil { - return fmt.Errorf("could not listen on :443: %v", err) - } - mux := http.NewServeMux() mux.HandleFunc("/", ap.serveDefault) mux.HandleFunc("POST /api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExecSPDY) @@ -117,32 +114,61 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) ap.hs = &http.Server{ + Handler: mux, + ErrorLog: zap.NewStdLog(ap.log.Desugar()), + } + + mode := "noauth" + if ap.authMode { + mode = "auth" + } + var tsLn net.Listener + var serve func(ln net.Listener) error + if ap.https { + var err error + tsLn, err = ap.ts.Listen("tcp", ":443") + if err != nil { + return fmt.Errorf("could not listen on :443: %w", err) + } + serve = func(ln net.Listener) error { + return ap.hs.ServeTLS(ln, "", "") + } + // Kubernetes uses SPDY for exec and port-forward, however SPDY is // incompatible with HTTP/2; so disable HTTP/2 in the proxy. - TLSConfig: &tls.Config{ + ap.hs.TLSConfig = &tls.Config{ GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, - }, - TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: mux, + } + ap.hs.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) + } else { + var err error + tsLn, err = ap.ts.Listen("tcp", ":80") + if err != nil { + return fmt.Errorf("could not listen on :80: %w", err) + } + serve = ap.hs.Serve } errs := make(chan error) go func() { - ap.log.Infof("API server proxy is listening on %s with auth mode: %v", ln.Addr(), ap.authMode) - if err := ap.hs.ServeTLS(ln, "", ""); err != nil && err != http.ErrServerClosed { - errs <- fmt.Errorf("failed to serve: %w", err) + ap.log.Infof("API server proxy in %s mode is listening on tailnet addresses %s", mode, tsLn.Addr()) + if err := serve(tsLn); err != nil && err != http.ErrServerClosed { + errs <- fmt.Errorf("error serving: %w", err) } }() select { case <-ctx.Done(): - shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - return ap.hs.Shutdown(shutdownCtx) case err := <-errs: + ap.hs.Close() return err } + + // Graceful shutdown with a timeout of 10s. + shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + return ap.hs.Shutdown(shutdownCtx) } // APIServerProxy is an [net/http.Handler] that authenticates requests using the Tailscale @@ -152,7 +178,8 @@ type APIServerProxy struct { lc *local.Client rp *httputil.ReverseProxy - authMode bool + authMode bool // Whether to run with impersonation using caller's tailnet identity. + https bool // Whether to serve on https for the device hostname; true for k8s-operator, false for k8s-proxy. ts *tsnet.Server hs *http.Server upstreamURL *url.URL @@ -181,13 +208,13 @@ func (ap *APIServerProxy) serveExecWS(w http.ResponseWriter, r *http.Request) { ap.sessionForProto(w, r, ksr.ExecSessionType, ksr.WSProtocol) } -// serveExecSPDY serves '/attach' requests for sessions streamed over SPDY, +// serveAttachSPDY serves '/attach' requests for sessions streamed over SPDY, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveAttachSPDY(w http.ResponseWriter, r *http.Request) { ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.SPDYProtocol) } -// serveExecWS serves '/attach' requests for sessions streamed over WebSocket, +// serveAttachWS serves '/attach' requests for sessions streamed over WebSocket, // optionally configuring the kubectl exec sessions to be recorded. func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) { ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index c09152da6f6c1..cd36798d69f8b 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -342,6 +342,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `mode` _[APIServerProxyMode](#apiserverproxymode)_ | Mode to run the API server proxy in. Supported modes are auth and noauth.
In auth mode, requests from the tailnet proxied over to the Kubernetes
API server are additionally impersonated using the sender's tailnet identity.
If not specified, defaults to auth mode. | | Enum: [auth noauth]
Type: string
| +| `hostname` _string_ | Hostname is the hostname with which to expose the Kubernetes API server
proxies. Must be a valid DNS label no longer than 63 characters. If not
specified, the name of the ProxyGroup is used as the hostname. Must be
unique across the whole tailnet. | | Pattern: `^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$`
Type: string
| #### LabelValue @@ -610,15 +611,22 @@ _Appears in:_ ProxyGroup defines a set of Tailscale devices that will act as proxies. -Currently only egress ProxyGroups are supported. +Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver +proxies. In addition to running a highly available set of proxies, ingress +and egress ProxyGroups also allow for serving many annotated Services from a +single set of proxies to minimise resource consumption. -Use the tailscale.com/proxy-group annotation on a Service to specify that -the egress proxy should be implemented by a ProxyGroup instead of a single -dedicated proxy. In addition to running a highly available set of proxies, -ProxyGroup also allows for serving many annotated Services from a single -set of proxies to minimise resource consumption. +For ingress and egress, use the tailscale.com/proxy-group annotation on a +Service to specify that the proxy should be implemented by a ProxyGroup +instead of a single dedicated proxy. -More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +More info: +* https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +* https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress + +For kube-apiserver, the ProxyGroup is a standalone resource. Use the +spec.kubeAPIServer field to configure options specific to the kube-apiserver +ProxyGroup type. @@ -690,8 +698,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`.
`ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled
and ready. `ProxyGroupAvailable` indicates that at least one proxy is
ready to serve traffic. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | List of status conditions to indicate the status of the ProxyGroup
resources. Known condition types include `ProxyGroupReady` and
`ProxyGroupAvailable`.
* `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and
all expected conditions are true.
* `ProxyGroupAvailable` indicates that at least one proxy is ready to
serve traffic.
For ProxyGroups of type kube-apiserver, there are two additional conditions:
* `KubeAPIServerProxyConfigured` indicates that at least one API server
proxy is configured and ready to serve traffic.
* `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is
valid. | | | | `devices` _[TailnetDevice](#tailnetdevice) array_ | List of tailnet devices associated with the ProxyGroup StatefulSet. | | | +| `url` _string_ | URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if
any. Only applies to ProxyGroups of type kube-apiserver. | | | #### ProxyGroupType diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 88fd07346cd5b..ce6a1411b9ea8 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -226,4 +226,7 @@ const ( IngressSvcValid ConditionType = `TailscaleIngressSvcValid` IngressSvcConfigured ConditionType = `TailscaleIngressSvcConfigured` + + KubeAPIServerProxyValid ConditionType = `KubeAPIServerProxyValid` // The kubeAPIServer config for the ProxyGroup is valid. + KubeAPIServerProxyConfigured ConditionType = `KubeAPIServerProxyConfigured` // At least one of the ProxyGroup's Pods is advertising the kube-apiserver proxy's hostname. ) diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index ad5b113612bbf..28fd9e00973c5 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -13,19 +13,27 @@ import ( // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Cluster,shortName=pg // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupReady")].reason`,description="Status of the deployed ProxyGroup resources." +// +kubebuilder:printcolumn:name="URL",type="string",JSONPath=`.status.url`,description="URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if any. Only applies to ProxyGroups of type kube-apiserver." // +kubebuilder:printcolumn:name="Type",type="string",JSONPath=`.spec.type`,description="ProxyGroup type." // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // ProxyGroup defines a set of Tailscale devices that will act as proxies. -// Currently only egress ProxyGroups are supported. +// Depending on spec.Type, it can be a group of egress, ingress, or kube-apiserver +// proxies. In addition to running a highly available set of proxies, ingress +// and egress ProxyGroups also allow for serving many annotated Services from a +// single set of proxies to minimise resource consumption. // -// Use the tailscale.com/proxy-group annotation on a Service to specify that -// the egress proxy should be implemented by a ProxyGroup instead of a single -// dedicated proxy. In addition to running a highly available set of proxies, -// ProxyGroup also allows for serving many annotated Services from a single -// set of proxies to minimise resource consumption. +// For ingress and egress, use the tailscale.com/proxy-group annotation on a +// Service to specify that the proxy should be implemented by a ProxyGroup +// instead of a single dedicated proxy. // -// More info: https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +// More info: +// * https://tailscale.com/kb/1438/kubernetes-operator-cluster-egress +// * https://tailscale.com/kb/1439/kubernetes-operator-cluster-ingress +// +// For kube-apiserver, the ProxyGroup is a standalone resource. Use the +// spec.kubeAPIServer field to configure options specific to the kube-apiserver +// ProxyGroup type. type ProxyGroup struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -93,10 +101,20 @@ type ProxyGroupSpec struct { type ProxyGroupStatus struct { // List of status conditions to indicate the status of the ProxyGroup - // resources. Known condition types are `ProxyGroupReady`, `ProxyGroupAvailable`. - // `ProxyGroupReady` indicates all ProxyGroup resources are fully reconciled - // and ready. `ProxyGroupAvailable` indicates that at least one proxy is - // ready to serve traffic. + // resources. Known condition types include `ProxyGroupReady` and + // `ProxyGroupAvailable`. + // + // * `ProxyGroupReady` indicates all ProxyGroup resources are reconciled and + // all expected conditions are true. + // * `ProxyGroupAvailable` indicates that at least one proxy is ready to + // serve traffic. + // + // For ProxyGroups of type kube-apiserver, there are two additional conditions: + // + // * `KubeAPIServerProxyConfigured` indicates that at least one API server + // proxy is configured and ready to serve traffic. + // * `KubeAPIServerProxyValid` indicates that spec.kubeAPIServer config is + // valid. // // +listType=map // +listMapKey=type @@ -108,6 +126,11 @@ type ProxyGroupStatus struct { // +listMapKey=hostname // +optional Devices []TailnetDevice `json:"devices,omitempty"` + + // URL of the kube-apiserver proxy advertised by the ProxyGroup devices, if + // any. Only applies to ProxyGroups of type kube-apiserver. + // +optional + URL string `json:"url,omitempty"` } type TailnetDevice struct { @@ -157,4 +180,13 @@ type KubeAPIServerConfig struct { // If not specified, defaults to auth mode. // +optional Mode *APIServerProxyMode `json:"mode,omitempty"` + + // Hostname is the hostname with which to expose the Kubernetes API server + // proxies. Must be a valid DNS label no longer than 63 characters. If not + // specified, the name of the ProxyGroup is used as the hostname. Must be + // unique across the whole tailnet. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern=`^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$` + // +optional + Hostname string `json:"hostname,omitempty"` } diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index f6858c0059162..ae465a728f0ff 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -146,6 +146,16 @@ func ProxyGroupAvailable(pg *tsapi.ProxyGroup) bool { return cond != nil && cond.Status == metav1.ConditionTrue } +func KubeAPIServerProxyValid(pg *tsapi.ProxyGroup) (valid bool, set bool) { + cond := proxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation, cond != nil +} + +func KubeAPIServerProxyConfigured(pg *tsapi.ProxyGroup) bool { + cond := proxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured) + return cond != nil && cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == pg.Generation +} + func proxyGroupCondition(pg *tsapi.ProxyGroup, condType tsapi.ConditionType) *metav1.Condition { idx := xslices.IndexFunc(pg.Status.Conditions, func(cond metav1.Condition) bool { return cond.Type == string(condType) diff --git a/cmd/containerboot/certs.go b/kube/certs/certs.go similarity index 60% rename from cmd/containerboot/certs.go rename to kube/certs/certs.go index 504ef7988072b..8e2e5fb43a8ac 100644 --- a/cmd/containerboot/certs.go +++ b/kube/certs/certs.go @@ -1,29 +1,32 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +// Package certs implements logic to help multiple Kubernetes replicas share TLS +// certs for a common Tailscale Service. +package certs import ( "context" "fmt" - "log" "net" + "slices" "sync" "time" "tailscale.com/ipn" + "tailscale.com/kube/localclient" + "tailscale.com/types/logger" "tailscale.com/util/goroutines" "tailscale.com/util/mak" ) -// certManager is responsible for issuing certificates for known domains and for +// CertManager is responsible for issuing certificates for known domains and for // maintaining a loop that re-attempts issuance daily. // Currently cert manager logic is only run on ingress ProxyGroup replicas that are responsible for managing certs for // HA Ingress HTTPS endpoints ('write' replicas). -type certManager struct { - lc localClient +type CertManager struct { + lc localclient.LocalClient + logf logger.Logf tracker goroutines.Tracker // tracks running goroutines mu sync.Mutex // guards the following // certLoops contains a map of DNS names, for which we currently need to @@ -32,11 +35,18 @@ type certManager struct { certLoops map[string]context.CancelFunc } -// ensureCertLoops ensures that, for all currently managed Service HTTPS +func NewCertManager(lc localclient.LocalClient, logf logger.Logf) *CertManager { + return &CertManager{ + lc: lc, + logf: logf, + } +} + +// EnsureCertLoops ensures that, for all currently managed Service HTTPS // endpoints, there is a cert loop responsible for issuing and ensuring the // renewal of the TLS certs. // ServeConfig must not be nil. -func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { +func (cm *CertManager) EnsureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { if sc == nil { return fmt.Errorf("[unexpected] ensureCertLoops called with nil ServeConfig") } @@ -87,12 +97,18 @@ func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) // renewed at that point. Renewal here is needed to prevent the shared certs from expiry in edge cases where the 'write' // replica does not get any HTTPS requests. // https://letsencrypt.org/docs/integration-guide/#retrying-failures -func (cm *certManager) runCertLoop(ctx context.Context, domain string) { +func (cm *CertManager) runCertLoop(ctx context.Context, domain string) { const ( normalInterval = 24 * time.Hour // regular renewal check initialRetry = 1 * time.Minute // initial backoff after a failure maxRetryInterval = 24 * time.Hour // max backoff period ) + + if err := cm.waitForCertDomain(ctx, domain); err != nil { + // Best-effort, log and continue with the issuing loop. + cm.logf("error waiting for cert domain %s: %v", domain, err) + } + timer := time.NewTimer(0) // fire off timer immediately defer timer.Stop() retryCount := 0 @@ -101,38 +117,31 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { case <-ctx.Done(): return case <-timer.C: - // We call the certificate endpoint, but don't do anything - // with the returned certs here. - // The call to the certificate endpoint will ensure that - // certs are issued/renewed as needed and stored in the - // relevant state store. For example, for HA Ingress - // 'write' replica, the cert and key will be stored in a - // Kubernetes Secret named after the domain for which we - // are issuing. - // Note that renewals triggered by the call to the - // certificates endpoint here and by renewal check - // triggered during a call to node's HTTPS endpoint - // share the same state/renewal lock mechanism, so we - // should not run into redundant issuances during - // concurrent renewal checks. - // TODO(irbekrm): maybe it is worth adding a new - // issuance endpoint that explicitly only triggers - // issuance and stores certs in the relevant store, but - // does not return certs to the caller? + // We call the certificate endpoint, but don't do anything with the + // returned certs here. The call to the certificate endpoint will + // ensure that certs are issued/renewed as needed and stored in the + // relevant state store. For example, for HA Ingress 'write' replica, + // the cert and key will be stored in a Kubernetes Secret named after + // the domain for which we are issuing. + // + // Note that renewals triggered by the call to the certificates + // endpoint here and by renewal check triggered during a call to + // node's HTTPS endpoint share the same state/renewal lock mechanism, + // so we should not run into redundant issuances during concurrent + // renewal checks. - // An issuance holds a shared lock, so we need to avoid - // a situation where other services cannot issue certs - // because a single one is holding the lock. + // An issuance holds a shared lock, so we need to avoid a situation + // where other services cannot issue certs because a single one is + // holding the lock. ctxT, cancel := context.WithTimeout(ctx, time.Second*300) - defer cancel() _, _, err := cm.lc.CertPair(ctxT, domain) + cancel() if err != nil { - log.Printf("error refreshing certificate for %s: %v", domain, err) + cm.logf("error refreshing certificate for %s: %v", domain, err) } var nextInterval time.Duration - // TODO(irbekrm): distinguish between LE rate limit - // errors and other error types like transient network - // errors. + // TODO(irbekrm): distinguish between LE rate limit errors and other + // error types like transient network errors. if err == nil { retryCount = 0 nextInterval = normalInterval @@ -147,10 +156,34 @@ func (cm *certManager) runCertLoop(ctx context.Context, domain string) { backoff = maxRetryInterval } nextInterval = backoff - log.Printf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", + cm.logf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", domain, retryCount, err, nextInterval) } timer.Reset(nextInterval) } } } + +// waitForCertDomain ensures the requested domain is in the list of allowed +// domains before issuing the cert for the first time. +func (cm *CertManager) waitForCertDomain(ctx context.Context, domain string) error { + w, err := cm.lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + + for { + n, err := w.Next() + if err != nil { + return err + } + if n.NetMap == nil { + continue + } + + if slices.Contains(n.NetMap.DNS.CertDomains, domain) { + return nil + } + } +} diff --git a/cmd/containerboot/certs_test.go b/kube/certs/certs_test.go similarity index 89% rename from cmd/containerboot/certs_test.go rename to kube/certs/certs_test.go index 577311ea36a64..8434f21ae6976 100644 --- a/cmd/containerboot/certs_test.go +++ b/kube/certs/certs_test.go @@ -1,17 +1,18 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +package certs import ( "context" + "log" "testing" "time" "tailscale.com/ipn" + "tailscale.com/kube/localclient" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" ) // TestEnsureCertLoops tests that the certManager correctly starts and stops @@ -161,8 +162,28 @@ func TestEnsureCertLoops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cm := &certManager{ - lc: &fakeLocalClient{}, + notifyChan := make(chan ipn.Notify) + go func() { + for { + notifyChan <- ipn.Notify{ + NetMap: &netmap.NetworkMap{ + DNS: tailcfg.DNSConfig{ + CertDomains: []string{ + "my-app.tailnetxyz.ts.net", + "my-other-app.tailnetxyz.ts.net", + }, + }, + }, + } + } + }() + cm := &CertManager{ + lc: &localclient.FakeLocalClient{ + FakeIPNBusWatcher: localclient.FakeIPNBusWatcher{ + NotifyChan: notifyChan, + }, + }, + logf: log.Printf, certLoops: make(map[string]context.CancelFunc), } @@ -179,7 +200,7 @@ func TestEnsureCertLoops(t *testing.T) { } })() - err := cm.ensureCertLoops(ctx, tt.initialConfig) + err := cm.EnsureCertLoops(ctx, tt.initialConfig) if (err != nil) != tt.wantErr { t.Fatalf("ensureCertLoops() error = %v", err) } @@ -189,7 +210,7 @@ func TestEnsureCertLoops(t *testing.T) { } if tt.updatedConfig != nil { - if err := cm.ensureCertLoops(ctx, tt.updatedConfig); err != nil { + if err := cm.EnsureCertLoops(ctx, tt.updatedConfig); err != nil { t.Fatalf("ensureCertLoops() error on update = %v", err) } diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 8882360c5ea21..a32e0c03ef2bc 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -9,11 +9,12 @@ package conf import ( "encoding/json" + "errors" "fmt" "net/netip" - "os" "github.com/tailscale/hujson" + "tailscale.com/tailcfg" "tailscale.com/types/opt" ) @@ -21,12 +22,11 @@ const v1Alpha1 = "v1alpha1" // Config describes a config file. type Config struct { - Path string // disk path of HuJSON - Raw []byte // raw bytes from disk, in HuJSON form + Raw []byte // raw bytes, in HuJSON form Std []byte // standardized JSON form Version string // "v1alpha1" - // Parsed is the parsed config, converted from its on-disk version to the + // Parsed is the parsed config, converted from its raw bytes version to the // latest known format. Parsed ConfigV1Alpha1 } @@ -48,47 +48,49 @@ type VersionedConfig struct { } type ConfigV1Alpha1 struct { - AuthKey *string `json:",omitempty"` // Tailscale auth key to use. - Hostname *string `json:",omitempty"` // Tailscale device hostname. - State *string `json:",omitempty"` // Path to the Tailscale state. - LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". - App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer - KubeAPIServer *KubeAPIServer `json:",omitempty"` // Config specific to the API Server proxy. - ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. - AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. // StaticEndpoints are additional, user-defined endpoints that this node // should advertise amongst its wireguard endpoints. StaticEndpoints []netip.AddrPort `json:",omitempty"` + + // TODO(tomhjp): The remaining fields should all be reloadable during + // runtime, but currently missing most of the APIServerProxy fields. + Hostname *string `json:",omitempty"` // Tailscale device hostname. + AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AdvertiseServices []string `json:",omitempty"` // Tailscale Services to advertise. + APIServerProxy *APIServerProxyConfig `json:",omitempty"` // Config specific to the API Server proxy. } -type KubeAPIServer struct { - AuthMode opt.Bool `json:",omitempty"` +type APIServerProxyConfig struct { + Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. + AuthMode opt.Bool `json:",omitempty"` // Run in auth or noauth mode. + ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. + IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. } // Load reads and parses the config file at the provided path on disk. -func Load(path string) (c Config, err error) { - c.Path = path - - c.Raw, err = os.ReadFile(path) - if err != nil { - return c, fmt.Errorf("error reading config file %q: %w", path, err) - } +func Load(raw []byte) (c Config, err error) { + c.Raw = raw c.Std, err = hujson.Standardize(c.Raw) if err != nil { - return c, fmt.Errorf("error parsing config file %q HuJSON/JSON: %w", path, err) + return c, fmt.Errorf("error parsing config as HuJSON/JSON: %w", err) } var ver VersionedConfig if err := json.Unmarshal(c.Std, &ver); err != nil { - return c, fmt.Errorf("error parsing config file %q: %w", path, err) + return c, fmt.Errorf("error parsing config: %w", err) } rootV1Alpha1 := (ver.Version == v1Alpha1) backCompatV1Alpha1 := (ver.V1Alpha1 != nil) switch { case ver.Version == "": - return c, fmt.Errorf("error parsing config file %q: no \"version\" field provided", path) + return c, errors.New("error parsing config: no \"version\" field provided") case rootV1Alpha1 && backCompatV1Alpha1: // Exactly one of these should be set. - return c, fmt.Errorf("error parsing config file %q: both root and v1alpha1 config provided", path) + return c, errors.New("error parsing config: both root and v1alpha1 config provided") case rootV1Alpha1 != backCompatV1Alpha1: c.Version = v1Alpha1 switch { @@ -100,7 +102,7 @@ func Load(path string) (c Config, err error) { c.Parsed = ConfigV1Alpha1{} } default: - return c, fmt.Errorf("error parsing config file %q: unsupported \"version\" value %q; want \"%s\"", path, ver.Version, v1Alpha1) + return c, fmt.Errorf("error parsing config: unsupported \"version\" value %q; want \"%s\"", ver.Version, v1Alpha1) } return c, nil diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go index a47391dc90ade..3082be1ba9dcd 100644 --- a/kube/k8s-proxy/conf/conf_test.go +++ b/kube/k8s-proxy/conf/conf_test.go @@ -6,8 +6,6 @@ package conf import ( - "os" - "path/filepath" "strings" "testing" @@ -57,12 +55,7 @@ func TestVersionedConfig(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { - dir := t.TempDir() - path := filepath.Join(dir, "config.json") - if err := os.WriteFile(path, []byte(tc.inputConfig), 0644); err != nil { - t.Fatalf("failed to write config file: %v", err) - } - cfg, err := Load(path) + cfg, err := Load([]byte(tc.inputConfig)) switch { case tc.expectedError == "" && err != nil: t.Fatalf("unexpected error: %v", err) diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 20b0050143c93..5e7d4cd1f1fd1 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -54,4 +54,10 @@ const ( LabelManaged = "tailscale.com/managed" LabelSecretType = "tailscale.com/secret-type" // "config", "state" "certs" + + LabelSecretTypeConfig = "config" + LabelSecretTypeState = "state" + LabelSecretTypeCerts = "certs" + + KubeAPIServerConfigFile = "config.hujson" ) diff --git a/kube/localclient/fake-client.go b/kube/localclient/fake-client.go new file mode 100644 index 0000000000000..7f0a08316634e --- /dev/null +++ b/kube/localclient/fake-client.go @@ -0,0 +1,35 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package localclient + +import ( + "context" + "fmt" + + "tailscale.com/ipn" +) + +type FakeLocalClient struct { + FakeIPNBusWatcher +} + +func (f *FakeLocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return &f.FakeIPNBusWatcher, nil +} + +func (f *FakeLocalClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return nil, nil, fmt.Errorf("CertPair not implemented") +} + +type FakeIPNBusWatcher struct { + NotifyChan chan ipn.Notify +} + +func (f *FakeIPNBusWatcher) Close() error { + return nil +} + +func (f *FakeIPNBusWatcher) Next() (ipn.Notify, error) { + return <-f.NotifyChan, nil +} diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go new file mode 100644 index 0000000000000..5d541e3655ddb --- /dev/null +++ b/kube/localclient/local-client.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package localclient provides an interface for all the local.Client methods +// kube needs to use, so that we can easily mock it in tests. +package localclient + +import ( + "context" + "io" + + "tailscale.com/client/local" + "tailscale.com/ipn" +) + +// LocalClient is roughly a subset of the local.Client struct's methods, used +// for easier testing. +type LocalClient interface { + WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) + CertIssuer +} + +// IPNBusWatcher is local.IPNBusWatcher's methods restated in an interface to +// allow for easier mocking in tests. +type IPNBusWatcher interface { + io.Closer + Next() (ipn.Notify, error) +} + +type CertIssuer interface { + CertPair(context.Context, string) ([]byte, []byte, error) +} + +// New returns a LocalClient that wraps the provided local.Client. +func New(lc *local.Client) LocalClient { + return &localClient{lc: lc} +} + +type localClient struct { + lc *local.Client +} + +func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return l.lc.WatchIPNBus(ctx, mask) +} + +func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return l.lc.CertPair(ctx, domain) +} diff --git a/cmd/containerboot/services.go b/kube/services/services.go similarity index 74% rename from cmd/containerboot/services.go rename to kube/services/services.go index 6079128c02b19..a9e50975ca9f1 100644 --- a/cmd/containerboot/services.go +++ b/kube/services/services.go @@ -1,25 +1,25 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux - -package main +// Package services manages graceful shutdown of Tailscale Services advertised +// by Kubernetes clients. +package services import ( "context" "fmt" - "log" "time" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/types/logger" ) -// ensureServicesNotAdvertised is a function that gets called on containerboot -// termination and ensures that any currently advertised VIPServices get -// unadvertised to give clients time to switch to another node before this one -// is shut down. -func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { +// EnsureServicesNotAdvertised is a function that gets called on containerboot +// or k8s-proxy termination and ensures that any currently advertised Services +// get unadvertised to give clients time to switch to another node before this +// one is shut down. +func EnsureServicesNotAdvertised(ctx context.Context, lc *local.Client, logf logger.Logf) error { prefs, err := lc.GetPrefs(ctx) if err != nil { return fmt.Errorf("error getting prefs: %w", err) @@ -28,7 +28,7 @@ func ensureServicesNotAdvertised(ctx context.Context, lc *local.Client) error { return nil } - log.Printf("unadvertising services: %v", prefs.AdvertiseServices) + logf("unadvertising services: %v", prefs.AdvertiseServices) if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ AdvertiseServicesSet: true, Prefs: ipn.Prefs{ diff --git a/kube/state/state.go b/kube/state/state.go index 4831a5f5b367a..2605f0952f708 100644 --- a/kube/state/state.go +++ b/kube/state/state.go @@ -11,11 +11,13 @@ package state import ( + "context" "encoding/json" "fmt" "tailscale.com/ipn" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" "tailscale.com/tailcfg" "tailscale.com/util/deephash" ) @@ -56,12 +58,20 @@ func SetInitialKeys(store ipn.StateStore, podUID string) error { // cancelled or it hits an error. The passed in next function is expected to be // from a local.IPNBusWatcher that is at least subscribed to // ipn.NotifyInitialNetMap. -func KeepKeysUpdated(store ipn.StateStore, next func() (ipn.Notify, error)) error { - var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum +func KeepKeysUpdated(ctx context.Context, store ipn.StateStore, lc klc.LocalClient) error { + w, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("error watching IPN bus: %w", err) + } + defer w.Close() + var currentDeviceID, currentDeviceIPs, currentDeviceFQDN deephash.Sum for { - n, err := next() // Blocks on a streaming LocalAPI HTTP call. + n, err := w.Next() // Blocks on a streaming LocalAPI HTTP call. if err != nil { + if err == ctx.Err() { + return nil + } return err } if n.NetMap == nil { diff --git a/kube/state/state_test.go b/kube/state/state_test.go index 0375b1c01d91a..8701aa1b7fa65 100644 --- a/kube/state/state_test.go +++ b/kube/state/state_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "tailscale.com/ipn" "tailscale.com/ipn/store" + klc "tailscale.com/kube/localclient" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/netmap" @@ -100,24 +101,20 @@ func TestSetInitialStateKeys(t *testing.T) { } func TestKeepStateKeysUpdated(t *testing.T) { - store, err := store.New(logger.Discard, "mem:") - if err != nil { - t.Fatalf("error creating in-memory store: %v", err) + store := fakeStore{ + writeChan: make(chan string), } - nextWaiting := make(chan struct{}) - go func() { - <-nextWaiting // Acknowledge the initial signal. - }() - notifyCh := make(chan ipn.Notify) - next := func() (ipn.Notify, error) { - nextWaiting <- struct{}{} // Send signal to test that state is consistent. - return <-notifyCh, nil // Wait for test input. + errs := make(chan error) + notifyChan := make(chan ipn.Notify) + lc := &klc.FakeLocalClient{ + FakeIPNBusWatcher: klc.FakeIPNBusWatcher{ + NotifyChan: notifyChan, + }, } - errs := make(chan error, 1) go func() { - err := KeepKeysUpdated(store, next) + err := KeepKeysUpdated(t.Context(), store, lc) if err != nil { errs <- fmt.Errorf("keepStateKeysUpdated returned with error: %w", err) } @@ -126,16 +123,12 @@ func TestKeepStateKeysUpdated(t *testing.T) { for _, tc := range []struct { name string notify ipn.Notify - expected map[ipn.StateKey][]byte + expected []string }{ { - name: "initial_not_authed", - notify: ipn.Notify{}, - expected: map[ipn.StateKey][]byte{ - keyDeviceID: nil, - keyDeviceFQDN: nil, - keyDeviceIPs: nil, - }, + name: "initial_not_authed", + notify: ipn.Notify{}, + expected: nil, }, { name: "authed", @@ -148,10 +141,10 @@ func TestKeepStateKeysUpdated(t *testing.T) { }).View(), }, }, - expected: map[ipn.StateKey][]byte{ - keyDeviceID: []byte("TESTCTRL00000001"), - keyDeviceFQDN: []byte("test-node.test.ts.net"), - keyDeviceIPs: []byte(`["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), + expected: []string{ + fmt.Sprintf("%s=%s", keyDeviceID, "TESTCTRL00000001"), + fmt.Sprintf("%s=%s", keyDeviceFQDN, "test-node.test.ts.net"), + fmt.Sprintf("%s=%s", keyDeviceIPs, `["100.64.0.1","fd7a:115c:a1e0:ab12:4843:cd96:0:1"]`), }, }, { @@ -165,39 +158,39 @@ func TestKeepStateKeysUpdated(t *testing.T) { }).View(), }, }, - expected: map[ipn.StateKey][]byte{ - keyDeviceID: []byte("TESTCTRL00000001"), - keyDeviceFQDN: []byte("updated.test.ts.net"), - keyDeviceIPs: []byte(`["100.64.0.250"]`), + expected: []string{ + fmt.Sprintf("%s=%s", keyDeviceFQDN, "updated.test.ts.net"), + fmt.Sprintf("%s=%s", keyDeviceIPs, `["100.64.0.250"]`), }, }, } { t.Run(tc.name, func(t *testing.T) { - // Send test input. - select { - case notifyCh <- tc.notify: - case <-errs: - t.Fatal("keepStateKeysUpdated returned before test input") - case <-time.After(5 * time.Second): - t.Fatal("timed out waiting for next() to be called again") - } - - // Wait for next() to be called again so we know the goroutine has - // processed the event. - select { - case <-nextWaiting: - case <-errs: - t.Fatal("keepStateKeysUpdated returned before test input") - case <-time.After(5 * time.Second): - t.Fatal("timed out waiting for next() to be called again") - } - - for key, value := range tc.expected { - got, _ := store.ReadState(key) - if !bytes.Equal(got, value) { - t.Errorf("state key %q mismatch: expected %q, got %q", key, value, got) + notifyChan <- tc.notify + for _, expected := range tc.expected { + select { + case got := <-store.writeChan: + if got != expected { + t.Errorf("expected %q, got %q", expected, got) + } + case err := <-errs: + t.Fatalf("unexpected error: %v", err) + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for expected write %q", expected) } } }) } } + +type fakeStore struct { + writeChan chan string +} + +func (f fakeStore) ReadState(key ipn.StateKey) ([]byte, error) { + return nil, fmt.Errorf("ReadState not implemented") +} + +func (f fakeStore) WriteState(key ipn.StateKey, value []byte) error { + f.writeChan <- fmt.Sprintf("%s=%s", key, value) + return nil +} From d6d29abbb6878fc777a9a21dd631ec3a8455e4ec Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Mon, 14 Jul 2025 15:23:45 -0500 Subject: [PATCH 0140/1093] tstest/integration/testcontrol: include peer CapMaps in MapResponses Fixes #16560 Signed-off-by: Raj Singh --- tstest/integration/capmap_test.go | 147 ++++++++++++++++++ tstest/integration/testcontrol/testcontrol.go | 4 + 2 files changed, 151 insertions(+) create mode 100644 tstest/integration/capmap_test.go diff --git a/tstest/integration/capmap_test.go b/tstest/integration/capmap_test.go new file mode 100644 index 0000000000000..0ee05be2f57d7 --- /dev/null +++ b/tstest/integration/capmap_test.go @@ -0,0 +1,147 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package integration + +import ( + "errors" + "testing" + "time" + + "tailscale.com/tailcfg" + "tailscale.com/tstest" +) + +// TestPeerCapMap tests that the node capability map (CapMap) is included in peer information. +func TestPeerCapMap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + // Spin up two nodes. + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + n1.AwaitListening() + n1.MustUp() + n1.AwaitRunning() + + n2 := NewTestNode(t, env) + d2 := n2.StartDaemon() + n2.AwaitListening() + n2.MustUp() + n2.AwaitRunning() + + n1.AwaitIP4() + n2.AwaitIP4() + + // Get the nodes from the control server. + nodes := env.Control.AllNodes() + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %d nodes", len(nodes)) + } + + // Figure out which node is which by comparing keys. + st1 := n1.MustStatus() + var tn1, tn2 *tailcfg.Node + for _, n := range nodes { + if n.Key == st1.Self.PublicKey { + tn1 = n + } else { + tn2 = n + } + } + + // Set CapMap on both nodes. + caps := make(tailcfg.NodeCapMap) + caps["example:custom"] = []tailcfg.RawMessage{`"value"`} + caps["example:enabled"] = []tailcfg.RawMessage{`true`} + + env.Control.SetNodeCapMap(tn1.Key, caps) + env.Control.SetNodeCapMap(tn2.Key, caps) + + // Check that nodes see each other's CapMap. + if err := tstest.WaitFor(10*time.Second, func() error { + st1 := n1.MustStatus() + st2 := n2.MustStatus() + + if len(st1.Peer) == 0 || len(st2.Peer) == 0 { + return errors.New("no peers") + } + + // Check n1 sees n2's CapMap. + p1 := st1.Peer[st1.Peers()[0]] + if p1.CapMap == nil { + return errors.New("peer CapMap is nil") + } + if p1.CapMap["example:custom"] == nil || p1.CapMap["example:enabled"] == nil { + return errors.New("peer CapMap missing entries") + } + + // Check n2 sees n1's CapMap. + p2 := st2.Peer[st2.Peers()[0]] + if p2.CapMap == nil { + return errors.New("peer CapMap is nil") + } + if p2.CapMap["example:custom"] == nil || p2.CapMap["example:enabled"] == nil { + return errors.New("peer CapMap missing entries") + } + + return nil + }); err != nil { + t.Fatal(err) + } + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} + +// TestSetNodeCapMap tests that SetNodeCapMap updates are propagated to peers. +func TestSetNodeCapMap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + n1.AwaitListening() + n1.MustUp() + n1.AwaitRunning() + + nodes := env.Control.AllNodes() + if len(nodes) != 1 { + t.Fatalf("expected 1 node, got %d nodes", len(nodes)) + } + node1 := nodes[0] + + // Set initial CapMap. + caps := make(tailcfg.NodeCapMap) + caps["test:state"] = []tailcfg.RawMessage{`"initial"`} + env.Control.SetNodeCapMap(node1.Key, caps) + + // Start second node and verify it sees the first node's CapMap. + n2 := NewTestNode(t, env) + d2 := n2.StartDaemon() + n2.AwaitListening() + n2.MustUp() + n2.AwaitRunning() + + if err := tstest.WaitFor(5*time.Second, func() error { + st := n2.MustStatus() + if len(st.Peer) == 0 { + return errors.New("no peers") + } + p := st.Peer[st.Peers()[0]] + if p.CapMap == nil || p.CapMap["test:state"] == nil { + return errors.New("peer CapMap not set") + } + if string(p.CapMap["test:state"][0]) != `"initial"` { + return errors.New("wrong CapMap value") + } + return nil + }); err != nil { + t.Fatal(err) + } + + d1.MustCleanShutdown(t) + d2.MustCleanShutdown(t) +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 71205f897aad8..739795bb3d245 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1000,7 +1000,11 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() peerAddress := s.masquerades[p.Key][node.Key] routes := s.nodeSubnetRoutes[p.Key] + peerCapMap := maps.Clone(s.nodeCapMaps[p.Key]) s.mu.Unlock() + if peerCapMap != nil { + p.CapMap = peerCapMap + } if peerAddress.IsValid() { if peerAddress.Is6() { p.Addresses[1] = netip.PrefixFrom(peerAddress, peerAddress.BitLen()) From 5d4e67fd937bef4f3ad5ec8e93174a5b6bd7dceb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 21 Jul 2025 08:36:43 -0700 Subject: [PATCH 0141/1093] net/dns/recursive: set EDNS on queries Updates tailscale/corp#30631 Change-Id: Ib88ea1bb51dd917c04f8d41bcaa6d59b9abd4f73 Signed-off-by: Brad Fitzpatrick --- net/dns/recursive/recursive.go | 1 + 1 file changed, 1 insertion(+) diff --git a/net/dns/recursive/recursive.go b/net/dns/recursive/recursive.go index eb23004d88190..fd865e37ab737 100644 --- a/net/dns/recursive/recursive.go +++ b/net/dns/recursive/recursive.go @@ -547,6 +547,7 @@ func (r *Resolver) queryNameserverProto( // Prepare a message asking for an appropriately-typed record // for the name we're querying. m := new(dns.Msg) + m.SetEdns0(1232, false /* no DNSSEC */) m.SetQuestion(name.WithTrailingDot(), uint16(qtype)) // Allow mocking out the network components with our exchange hook. From 1677fb190519710d66354600f659b50af77d7759 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 21 Jul 2025 10:02:37 -0700 Subject: [PATCH 0142/1093] wgengine/magicsock,all: allocate peer relay over disco instead of PeerAPI (#16603) Updates tailscale/corp#30583 Updates tailscale/corp#30534 Updates tailscale/corp#30557 Signed-off-by: Dylan Bargatze Signed-off-by: Jordan Whited Co-authored-by: Dylan Bargatze --- disco/disco.go | 261 +++++++++---- disco/disco_test.go | 41 +- feature/relayserver/relayserver.go | 151 +++---- feature/relayserver/relayserver_test.go | 10 + ipn/ipnlocal/local.go | 2 +- ipn/localapi/localapi.go | 2 +- net/udprelay/endpoint/endpoint.go | 9 + net/udprelay/server.go | 45 +-- tailcfg/tailcfg.go | 3 +- types/key/disco.go | 38 ++ types/key/disco_test.go | 18 + wgengine/magicsock/endpoint.go | 17 +- wgengine/magicsock/magicsock.go | 355 +++++++++++++---- wgengine/magicsock/magicsock_test.go | 335 ++++++++-------- wgengine/magicsock/relaymanager.go | 498 ++++++++++++++---------- wgengine/magicsock/relaymanager_test.go | 42 +- 16 files changed, 1187 insertions(+), 640 deletions(-) diff --git a/disco/disco.go b/disco/disco.go index d4623c119dbcb..1689d2a93da77 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -42,13 +42,15 @@ const NonceLen = 24 type MessageType byte const ( - TypePing = MessageType(0x01) - TypePong = MessageType(0x02) - TypeCallMeMaybe = MessageType(0x03) - TypeBindUDPRelayEndpoint = MessageType(0x04) - TypeBindUDPRelayEndpointChallenge = MessageType(0x05) - TypeBindUDPRelayEndpointAnswer = MessageType(0x06) - TypeCallMeMaybeVia = MessageType(0x07) + TypePing = MessageType(0x01) + TypePong = MessageType(0x02) + TypeCallMeMaybe = MessageType(0x03) + TypeBindUDPRelayEndpoint = MessageType(0x04) + TypeBindUDPRelayEndpointChallenge = MessageType(0x05) + TypeBindUDPRelayEndpointAnswer = MessageType(0x06) + TypeCallMeMaybeVia = MessageType(0x07) + TypeAllocateUDPRelayEndpointRequest = MessageType(0x08) + TypeAllocateUDPRelayEndpointResponse = MessageType(0x09) ) const v0 = byte(0) @@ -97,6 +99,10 @@ func Parse(p []byte) (Message, error) { return parseBindUDPRelayEndpointAnswer(ver, p) case TypeCallMeMaybeVia: return parseCallMeMaybeVia(ver, p) + case TypeAllocateUDPRelayEndpointRequest: + return parseAllocateUDPRelayEndpointRequest(ver, p) + case TypeAllocateUDPRelayEndpointResponse: + return parseAllocateUDPRelayEndpointResponse(ver, p) default: return nil, fmt.Errorf("unknown message type 0x%02x", byte(t)) } @@ -381,9 +387,7 @@ func (m *BindUDPRelayEndpointCommon) decode(b []byte) error { } // BindUDPRelayEndpoint is the first messaged transmitted from UDP relay client -// towards UDP relay server as part of the 3-way bind handshake. This message -// type is currently considered experimental and is not yet tied to a -// tailcfg.CapabilityVersion. +// towards UDP relay server as part of the 3-way bind handshake. type BindUDPRelayEndpoint struct { BindUDPRelayEndpointCommon } @@ -405,8 +409,7 @@ func parseBindUDPRelayEndpoint(ver uint8, p []byte) (m *BindUDPRelayEndpoint, er // BindUDPRelayEndpointChallenge is transmitted from UDP relay server towards // UDP relay client in response to a BindUDPRelayEndpoint message as part of the -// 3-way bind handshake. This message type is currently considered experimental -// and is not yet tied to a tailcfg.CapabilityVersion. +// 3-way bind handshake. type BindUDPRelayEndpointChallenge struct { BindUDPRelayEndpointCommon } @@ -427,9 +430,7 @@ func parseBindUDPRelayEndpointChallenge(ver uint8, p []byte) (m *BindUDPRelayEnd } // BindUDPRelayEndpointAnswer is transmitted from UDP relay client to UDP relay -// server in response to a BindUDPRelayEndpointChallenge message. This message -// type is currently considered experimental and is not yet tied to a -// tailcfg.CapabilityVersion. +// server in response to a BindUDPRelayEndpointChallenge message. type BindUDPRelayEndpointAnswer struct { BindUDPRelayEndpointCommon } @@ -449,6 +450,168 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi return m, nil } +// AllocateUDPRelayEndpointRequest is a message sent only over DERP to request +// allocation of a relay endpoint on a [tailscale.com/net/udprelay.Server] +type AllocateUDPRelayEndpointRequest struct { + // ClientDisco are the Disco public keys of the clients that should be + // permitted to handshake with the endpoint. + ClientDisco [2]key.DiscoPublic + // Generation represents the allocation request generation. The server must + // echo it back in the [AllocateUDPRelayEndpointResponse] to enable request + // and response alignment client-side. + Generation uint32 +} + +// allocateUDPRelayEndpointRequestLen is the length of a marshaled +// [AllocateUDPRelayEndpointRequest] message without the message header. +const allocateUDPRelayEndpointRequestLen = key.DiscoPublicRawLen*2 + // ClientDisco + 4 // Generation + +func (m *AllocateUDPRelayEndpointRequest) AppendMarshal(b []byte) []byte { + ret, p := appendMsgHeader(b, TypeAllocateUDPRelayEndpointRequest, v0, allocateUDPRelayEndpointRequestLen) + for i := 0; i < len(m.ClientDisco); i++ { + disco := m.ClientDisco[i].AppendTo(nil) + copy(p, disco) + p = p[key.DiscoPublicRawLen:] + } + binary.BigEndian.PutUint32(p, m.Generation) + return ret +} + +func parseAllocateUDPRelayEndpointRequest(ver uint8, p []byte) (m *AllocateUDPRelayEndpointRequest, err error) { + m = new(AllocateUDPRelayEndpointRequest) + if ver != 0 { + return + } + if len(p) < allocateUDPRelayEndpointRequestLen { + return m, errShort + } + for i := 0; i < len(m.ClientDisco); i++ { + m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) + p = p[key.DiscoPublicRawLen:] + } + m.Generation = binary.BigEndian.Uint32(p) + return m, nil +} + +// AllocateUDPRelayEndpointResponse is a message sent only over DERP in response +// to a [AllocateUDPRelayEndpointRequest]. +type AllocateUDPRelayEndpointResponse struct { + // Generation represents the allocation request generation. The server must + // echo back the [AllocateUDPRelayEndpointRequest.Generation] here to enable + // request and response alignment client-side. + Generation uint32 + UDPRelayEndpoint +} + +func (m *AllocateUDPRelayEndpointResponse) AppendMarshal(b []byte) []byte { + endpointsLen := epLength * len(m.AddrPorts) + generationLen := 4 + ret, d := appendMsgHeader(b, TypeAllocateUDPRelayEndpointResponse, v0, generationLen+udpRelayEndpointLenMinusAddrPorts+endpointsLen) + binary.BigEndian.PutUint32(d, m.Generation) + m.encode(d[4:]) + return ret +} + +func parseAllocateUDPRelayEndpointResponse(ver uint8, p []byte) (m *AllocateUDPRelayEndpointResponse, err error) { + m = new(AllocateUDPRelayEndpointResponse) + if ver != 0 { + return m, nil + } + if len(p) < 4 { + return m, errShort + } + m.Generation = binary.BigEndian.Uint32(p) + err = m.decode(p[4:]) + return m, err +} + +const udpRelayEndpointLenMinusAddrPorts = key.DiscoPublicRawLen + // ServerDisco + (key.DiscoPublicRawLen * 2) + // ClientDisco + 8 + // LamportID + 4 + // VNI + 8 + // BindLifetime + 8 // SteadyStateLifetime + +// UDPRelayEndpoint is a mirror of [tailscale.com/net/udprelay/endpoint.ServerEndpoint], +// refer to it for field documentation. [UDPRelayEndpoint] is carried in both +// [CallMeMaybeVia] and [AllocateUDPRelayEndpointResponse] messages. +type UDPRelayEndpoint struct { + // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] + ServerDisco key.DiscoPublic + // ClientDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ClientDisco] + ClientDisco [2]key.DiscoPublic + // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] + LamportID uint64 + // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] + VNI uint32 + // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] + BindLifetime time.Duration + // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] + SteadyStateLifetime time.Duration + // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] + AddrPorts []netip.AddrPort +} + +// encode encodes m in b. b must be at least [udpRelayEndpointLenMinusAddrPorts] +// + [epLength] * len(m.AddrPorts) bytes long. +func (m *UDPRelayEndpoint) encode(b []byte) { + disco := m.ServerDisco.AppendTo(nil) + copy(b, disco) + b = b[key.DiscoPublicRawLen:] + for i := 0; i < len(m.ClientDisco); i++ { + disco = m.ClientDisco[i].AppendTo(nil) + copy(b, disco) + b = b[key.DiscoPublicRawLen:] + } + binary.BigEndian.PutUint64(b[:8], m.LamportID) + b = b[8:] + binary.BigEndian.PutUint32(b[:4], m.VNI) + b = b[4:] + binary.BigEndian.PutUint64(b[:8], uint64(m.BindLifetime)) + b = b[8:] + binary.BigEndian.PutUint64(b[:8], uint64(m.SteadyStateLifetime)) + b = b[8:] + for _, ipp := range m.AddrPorts { + a := ipp.Addr().As16() + copy(b, a[:]) + binary.BigEndian.PutUint16(b[16:18], ipp.Port()) + b = b[epLength:] + } +} + +// decode decodes m from b. +func (m *UDPRelayEndpoint) decode(b []byte) error { + if len(b) < udpRelayEndpointLenMinusAddrPorts+epLength || + (len(b)-udpRelayEndpointLenMinusAddrPorts)%epLength != 0 { + return errShort + } + m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + for i := 0; i < len(m.ClientDisco); i++ { + m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) + b = b[key.DiscoPublicRawLen:] + } + m.LamportID = binary.BigEndian.Uint64(b[:8]) + b = b[8:] + m.VNI = binary.BigEndian.Uint32(b[:4]) + b = b[4:] + m.BindLifetime = time.Duration(binary.BigEndian.Uint64(b[:8])) + b = b[8:] + m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(b[:8])) + b = b[8:] + m.AddrPorts = make([]netip.AddrPort, 0, len(b)-udpRelayEndpointLenMinusAddrPorts/epLength) + for len(b) > 0 { + var a [16]byte + copy(a[:], b) + m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( + netip.AddrFrom16(a).Unmap(), + binary.BigEndian.Uint16(b[16:18]))) + b = b[epLength:] + } + return nil +} + // CallMeMaybeVia is a message sent only over DERP to request that the recipient // try to open up a magicsock path back to the sender. The 'Via' in // CallMeMaybeVia highlights that candidate paths are served through an @@ -464,78 +627,22 @@ func parseBindUDPRelayEndpointAnswer(ver uint8, p []byte) (m *BindUDPRelayEndpoi // The recipient may choose to not open a path back if it's already happy with // its path. Direct connections, e.g. [CallMeMaybe]-signaled, take priority over // CallMeMaybeVia paths. -// -// This message type is currently considered experimental and is not yet tied to -// a [tailscale.com/tailcfg.CapabilityVersion]. type CallMeMaybeVia struct { - // ServerDisco is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.ServerDisco] - ServerDisco key.DiscoPublic - // LamportID is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.LamportID] - LamportID uint64 - // VNI is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.VNI] - VNI uint32 - // BindLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.BindLifetime] - BindLifetime time.Duration - // SteadyStateLifetime is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.SteadyStateLifetime] - SteadyStateLifetime time.Duration - // AddrPorts is [tailscale.com/net/udprelay/endpoint.ServerEndpoint.AddrPorts] - AddrPorts []netip.AddrPort + UDPRelayEndpoint } -const cmmvDataLenMinusEndpoints = key.DiscoPublicRawLen + // ServerDisco - 8 + // LamportID - 4 + // VNI - 8 + // BindLifetime - 8 // SteadyStateLifetime - func (m *CallMeMaybeVia) AppendMarshal(b []byte) []byte { endpointsLen := epLength * len(m.AddrPorts) - ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, cmmvDataLenMinusEndpoints+endpointsLen) - disco := m.ServerDisco.AppendTo(nil) - copy(p, disco) - p = p[key.DiscoPublicRawLen:] - binary.BigEndian.PutUint64(p[:8], m.LamportID) - p = p[8:] - binary.BigEndian.PutUint32(p[:4], m.VNI) - p = p[4:] - binary.BigEndian.PutUint64(p[:8], uint64(m.BindLifetime)) - p = p[8:] - binary.BigEndian.PutUint64(p[:8], uint64(m.SteadyStateLifetime)) - p = p[8:] - for _, ipp := range m.AddrPorts { - a := ipp.Addr().As16() - copy(p, a[:]) - binary.BigEndian.PutUint16(p[16:18], ipp.Port()) - p = p[epLength:] - } + ret, p := appendMsgHeader(b, TypeCallMeMaybeVia, v0, udpRelayEndpointLenMinusAddrPorts+endpointsLen) + m.encode(p) return ret } func parseCallMeMaybeVia(ver uint8, p []byte) (m *CallMeMaybeVia, err error) { m = new(CallMeMaybeVia) - if len(p) < cmmvDataLenMinusEndpoints+epLength || - (len(p)-cmmvDataLenMinusEndpoints)%epLength != 0 || - ver != 0 { + if ver != 0 { return m, nil } - m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) - p = p[key.DiscoPublicRawLen:] - m.LamportID = binary.BigEndian.Uint64(p[:8]) - p = p[8:] - m.VNI = binary.BigEndian.Uint32(p[:4]) - p = p[4:] - m.BindLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) - p = p[8:] - m.SteadyStateLifetime = time.Duration(binary.BigEndian.Uint64(p[:8])) - p = p[8:] - m.AddrPorts = make([]netip.AddrPort, 0, len(p)-cmmvDataLenMinusEndpoints/epLength) - for len(p) > 0 { - var a [16]byte - copy(a[:], p) - m.AddrPorts = append(m.AddrPorts, netip.AddrPortFrom( - netip.AddrFrom16(a).Unmap(), - binary.BigEndian.Uint16(p[16:18]))) - p = p[epLength:] - } - return m, nil + err = m.decode(p) + return m, err } diff --git a/disco/disco_test.go b/disco/disco_test.go index 9fb71ff83b73b..71b68338a8c90 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -25,6 +25,19 @@ func TestMarshalAndParse(t *testing.T) { }, } + udpRelayEndpoint := UDPRelayEndpoint{ + ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), + ClientDisco: [2]key.DiscoPublic{key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 3: 3, 30: 30, 31: 31})), key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 4: 4, 30: 30, 31: 31}))}, + LamportID: 123, + VNI: 456, + BindLifetime: time.Second, + SteadyStateLifetime: time.Minute, + AddrPorts: []netip.AddrPort{ + netip.MustParseAddrPort("1.2.3.4:567"), + netip.MustParseAddrPort("[2001::3456]:789"), + }, + } + tests := []struct { name string want string @@ -117,17 +130,25 @@ func TestMarshalAndParse(t *testing.T) { { name: "call_me_maybe_via", m: &CallMeMaybeVia{ - ServerDisco: key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 30: 30, 31: 31})), - LamportID: 123, - VNI: 456, - BindLifetime: time.Second, - SteadyStateLifetime: time.Minute, - AddrPorts: []netip.AddrPort{ - netip.MustParseAddrPort("1.2.3.4:567"), - netip.MustParseAddrPort("[2001::3456]:789"), - }, + UDPRelayEndpoint: udpRelayEndpoint, + }, + want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + }, + { + name: "allocate_udp_relay_endpoint_request", + m: &AllocateUDPRelayEndpointRequest{ + ClientDisco: [2]key.DiscoPublic{key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 3: 3, 30: 30, 31: 31})), key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 2: 2, 4: 4, 30: 30, 31: 31}))}, + Generation: 1, + }, + want: "08 00 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 01", + }, + { + name: "allocate_udp_relay_endpoint_response", + m: &AllocateUDPRelayEndpointResponse{ + Generation: 1, + UDPRelayEndpoint: udpRelayEndpoint, }, - want: "07 00 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", + want: "09 00 00 00 00 01 00 01 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 03 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 01 02 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 1e 1f 00 00 00 00 00 00 00 7b 00 00 01 c8 00 00 00 00 3b 9a ca 00 00 00 00 0d f8 47 58 00 00 00 00 00 00 00 00 00 00 00 ff ff 01 02 03 04 02 37 20 01 00 00 00 00 00 00 00 00 00 00 00 00 34 56 03 15", }, } for _, tt := range tests { diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index d0ad27624f09f..f4077b5f9da0b 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,25 +6,21 @@ package relayserver import ( - "encoding/json" "errors" - "fmt" - "io" - "net/http" "sync" - "time" + "tailscale.com/disco" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" - "tailscale.com/ipn/ipnlocal" "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" - "tailscale.com/util/httpm" + "tailscale.com/util/eventbus" + "tailscale.com/wgengine/magicsock" ) // featureName is the name of the feature implemented by this package. @@ -34,26 +30,34 @@ const featureName = "relayserver" func init() { feature.Register(featureName) ipnext.RegisterExtension(featureName, newExtension) - ipnlocal.RegisterPeerAPIHandler("/v0/relay/endpoint", handlePeerAPIRelayAllocateEndpoint) } // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server // extension. It is registered with [ipnext.RegisterExtension] if the package is // imported. -func newExtension(logf logger.Logf, _ ipnext.SafeBackend) (ipnext.Extension, error) { - return &extension{logf: logger.WithPrefix(logf, featureName+": ")}, nil +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + logf: logger.WithPrefix(logf, featureName+": "), + bus: sb.Sys().Bus.Get(), + }, nil } // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { logf logger.Logf + bus *eventbus.Bus - mu sync.Mutex // guards the following fields + mu sync.Mutex // guards the following fields + eventClient *eventbus.Client // closed to stop consumeEventbusTopics + reqSub *eventbus.Subscriber[magicsock.UDPRelayAllocReq] // receives endpoint alloc requests from magicsock + respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] // publishes endpoint alloc responses to magicsock shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer - server relayServer // lazily initialized + port *int // ipn.Prefs.RelayServerPort, nil if disabled + busDoneCh chan struct{} // non-nil if port is non-nil, closed when consumeEventbusTopics returns + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + server relayServer // lazily initialized + } // relayServer is the interface of [udprelay.Server]. @@ -77,6 +81,18 @@ func (e *extension) Init(host ipnext.Host) error { return nil } +// initBusConnection initializes the [*eventbus.Client], [*eventbus.Subscriber], +// [*eventbus.Publisher], and [chan struct{}] used to publish/receive endpoint +// allocation messages to/from the [*eventbus.Bus]. It also starts +// consumeEventbusTopics in a separate goroutine. +func (e *extension) initBusConnection() { + e.eventClient = e.bus.Client("relayserver.extension") + e.reqSub = eventbus.Subscribe[magicsock.UDPRelayAllocReq](e.eventClient) + e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.eventClient) + e.busDoneCh = make(chan struct{}) + go e.consumeEventbusTopics() +} + func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() @@ -98,11 +114,57 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.server.Close() e.server = nil } + if e.port != nil { + e.eventClient.Close() + <-e.busDoneCh + } e.port = nil if ok { e.port = ptr.To(newPort) + e.initBusConnection() + } + } +} + +func (e *extension) consumeEventbusTopics() { + defer close(e.busDoneCh) + + for { + select { + case <-e.reqSub.Done(): + // If reqSub is done, the eventClient has been closed, which is a + // signal to return. + return + case req := <-e.reqSub.Events(): + rs, err := e.relayServerOrInit() + if err != nil { + e.logf("error initializing server: %v", err) + continue + } + se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) + if err != nil { + e.logf("error allocating endpoint: %v", err) + continue + } + e.respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, + }, + }) } } + } // Shutdown implements [ipnlocal.Extension]. @@ -114,6 +176,10 @@ func (e *extension) Shutdown() error { e.server.Close() e.server = nil } + if e.port != nil { + e.eventClient.Close() + <-e.busDoneCh + } return nil } @@ -139,60 +205,3 @@ func (e *extension) relayServerOrInit() (relayServer, error) { } return e.server, nil } - -func handlePeerAPIRelayAllocateEndpoint(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - e, ok := ipnlocal.GetExt[*extension](h.LocalBackend()) - if !ok { - http.Error(w, "relay failed to initialize", http.StatusServiceUnavailable) - return - } - - httpErrAndLog := func(message string, code int) { - http.Error(w, message, code) - h.Logf("relayserver: request from %v returned code %d: %s", h.RemoteAddr(), code, message) - } - - if !h.PeerCaps().HasCapability(tailcfg.PeerCapabilityRelay) { - httpErrAndLog("relay not permitted", http.StatusForbidden) - return - } - - if r.Method != httpm.POST { - httpErrAndLog("only POST method is allowed", http.StatusMethodNotAllowed) - return - } - - var allocateEndpointReq struct { - DiscoKeys []key.DiscoPublic - } - err := json.NewDecoder(io.LimitReader(r.Body, 512)).Decode(&allocateEndpointReq) - if err != nil { - httpErrAndLog(err.Error(), http.StatusBadRequest) - return - } - if len(allocateEndpointReq.DiscoKeys) != 2 { - httpErrAndLog("2 disco public keys must be supplied", http.StatusBadRequest) - return - } - - rs, err := e.relayServerOrInit() - if err != nil { - httpErrAndLog(err.Error(), http.StatusServiceUnavailable) - return - } - ep, err := rs.AllocateEndpoint(allocateEndpointReq.DiscoKeys[0], allocateEndpointReq.DiscoKeys[1]) - if err != nil { - var notReady udprelay.ErrServerNotReady - if errors.As(err, ¬Ready) { - w.Header().Set("Retry-After", fmt.Sprintf("%d", notReady.RetryAfter.Round(time.Second)/time.Second)) - httpErrAndLog(err.Error(), http.StatusServiceUnavailable) - return - } - httpErrAndLog(err.Error(), http.StatusInternalServerError) - return - } - err = json.NewEncoder(w).Encode(&ep) - if err != nil { - httpErrAndLog(err.Error(), http.StatusInternalServerError) - } -} diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index cc7f05f67fbdd..84158188e90fb 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -9,6 +9,7 @@ import ( "tailscale.com/ipn" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/tsd" "tailscale.com/types/key" "tailscale.com/types/ptr" ) @@ -108,9 +109,18 @@ func Test_extension_profileStateChanged(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + sys := tsd.NewSystem() + bus := sys.Bus.Get() e := &extension{ port: tt.fields.port, server: tt.fields.server, + bus: bus, + } + if e.port != nil { + // Entering profileStateChanged with a non-nil port requires + // bus init, which is called in profileStateChanged when + // transitioning port from nil to non-nil. + e.initBusConnection() } e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) if tt.wantNilServer != (e.server == nil) { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d3754e5409c1a..8665a88c4f867 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6957,7 +6957,7 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } -func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.AddrPort] { +func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.Addr] { return b.MagicConn().PeerRelays() } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2409aa1ae3a36..0acc5a65fca8a 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -699,7 +699,7 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { h.b.DebugForcePreferDERP(n) case "peer-relay-servers": servers := h.b.DebugPeerRelayServers().Slice() - slices.SortFunc(servers, func(a, b netip.AddrPort) int { + slices.SortFunc(servers, func(a, b netip.Addr) int { return a.Compare(b) }) err = json.NewEncoder(w).Encode(servers) diff --git a/net/udprelay/endpoint/endpoint.go b/net/udprelay/endpoint/endpoint.go index 2672a856b797b..0d2a14e965a4a 100644 --- a/net/udprelay/endpoint/endpoint.go +++ b/net/udprelay/endpoint/endpoint.go @@ -7,11 +7,16 @@ package endpoint import ( "net/netip" + "time" "tailscale.com/tstime" "tailscale.com/types/key" ) +// ServerRetryAfter is the default +// [tailscale.com/net/udprelay.ErrServerNotReady.RetryAfter] value. +const ServerRetryAfter = time.Second * 3 + // ServerEndpoint contains details for an endpoint served by a // [tailscale.com/net/udprelay.Server]. type ServerEndpoint struct { @@ -21,6 +26,10 @@ type ServerEndpoint struct { // unique ServerEndpoint allocation. ServerDisco key.DiscoPublic + // ClientDisco are the Disco public keys of the relay participants permitted + // to handshake with this endpoint. + ClientDisco [2]key.DiscoPublic + // LamportID is unique and monotonically non-decreasing across // ServerEndpoint allocations for the lifetime of Server. It enables clients // to dedup and resolve allocation event order. Clients may race to allocate diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 7651bf295a233..c34a4b5f6835c 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -73,23 +73,7 @@ type Server struct { lamportID uint64 vniPool []uint32 // the pool of available VNIs byVNI map[uint32]*serverEndpoint - byDisco map[pairOfDiscoPubKeys]*serverEndpoint -} - -// pairOfDiscoPubKeys is a pair of key.DiscoPublic. It must be constructed via -// newPairOfDiscoPubKeys to ensure lexicographical ordering. -type pairOfDiscoPubKeys [2]key.DiscoPublic - -func (p pairOfDiscoPubKeys) String() string { - return fmt.Sprintf("%s <=> %s", p[0].ShortString(), p[1].ShortString()) -} - -func newPairOfDiscoPubKeys(discoA, discoB key.DiscoPublic) pairOfDiscoPubKeys { - pair := pairOfDiscoPubKeys([2]key.DiscoPublic{discoA, discoB}) - slices.SortFunc(pair[:], func(a, b key.DiscoPublic) int { - return a.Compare(b) - }) - return pair + byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } // serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. @@ -99,7 +83,7 @@ type serverEndpoint struct { // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys pairOfDiscoPubKeys + discoPubKeys key.SortedPairOfDiscoPublic discoSharedSecrets [2]key.DiscoShared handshakeGeneration [2]uint32 // or zero if a handshake has never started for that relay leg handshakeAddrPorts [2]netip.AddrPort // or zero value if a handshake has never started for that relay leg @@ -126,7 +110,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if common.VNI != e.vni { return errors.New("mismatching VNI") } - if common.RemoteKey.Compare(e.discoPubKeys[otherSender]) != 0 { + if common.RemoteKey.Compare(e.discoPubKeys.Get()[otherSender]) != 0 { return errors.New("mismatching RemoteKey") } return nil @@ -152,7 +136,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex m := new(disco.BindUDPRelayEndpointChallenge) m.VNI = e.vni m.Generation = discoMsg.Generation - m.RemoteKey = e.discoPubKeys[otherSender] + m.RemoteKey = e.discoPubKeys.Get()[otherSender] rand.Read(e.challenge[senderIndex][:]) copy(m.Challenge[:], e.challenge[senderIndex][:]) reply := make([]byte, packet.GeneveFixedHeaderLength, 512) @@ -200,9 +184,9 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by sender := key.DiscoPublicFromRaw32(mem.B(senderRaw)) senderIndex := -1 switch { - case sender.Compare(e.discoPubKeys[0]) == 0: + case sender.Compare(e.discoPubKeys.Get()[0]) == 0: senderIndex = 0 - case sender.Compare(e.discoPubKeys[1]) == 0: + case sender.Compare(e.discoPubKeys.Get()[1]) == 0: senderIndex = 1 default: // unknown Disco public key @@ -291,12 +275,12 @@ func (e *serverEndpoint) isBound() bool { // which is useful to override in tests. func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, err error) { s = &Server{ - logf: logger.WithPrefix(logf, "relayserver"), + logf: logf, disco: key.NewDisco(), bindLifetime: defaultBindLifetime, steadyStateLifetime: defaultSteadyStateLifetime, closeCh: make(chan struct{}), - byDisco: make(map[pairOfDiscoPubKeys]*serverEndpoint), + byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), byVNI: make(map[uint32]*serverEndpoint), } s.discoPublic = s.disco.Public() @@ -315,7 +299,7 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve } s.netChecker = &netcheck.Client{ NetMon: netMon, - Logf: logger.WithPrefix(logf, "relayserver: netcheck:"), + Logf: logger.WithPrefix(logf, "netcheck: "), SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { if addrPort.Addr().Is4() { return s.uc4.WriteToUDPAddrPort(b, addrPort) @@ -615,7 +599,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv if len(s.addrPorts) == 0 { if !s.addrDiscoveryOnce { - return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: 3 * time.Second} + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} } return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") } @@ -624,7 +608,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{}, fmt.Errorf("client disco equals server disco: %s", s.discoPublic.ShortString()) } - pair := newPairOfDiscoPubKeys(discoA, discoB) + pair := key.NewSortedPairOfDiscoPublic(discoA, discoB) e, ok := s.byDisco[pair] if ok { // Return the existing allocation. Clients can resolve duplicate @@ -639,6 +623,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv // behaviors and endpoint state (bound or not). We might want to // consider storing them (maybe interning) in the [*serverEndpoint] // at allocation time. + ClientDisco: pair.Get(), AddrPorts: slices.Clone(s.addrPorts), VNI: e.vni, LamportID: e.lamportID, @@ -657,15 +642,17 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv lamportID: s.lamportID, allocatedAt: time.Now(), } - e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys[0]) - e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys[1]) + e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) + e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1]) e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] s.byDisco[pair] = e s.byVNI[e.vni] = e + s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, + ClientDisco: pair.Get(), AddrPorts: slices.Clone(s.addrPorts), VNI: e.vni, LamportID: e.lamportID, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 398a2c8a2b93a..550914b96e31a 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -165,7 +165,8 @@ type CapabilityVersion int // - 118: 2025-07-01: Client sends Hostinfo.StateEncrypted to report whether the state file is encrypted at rest (#15830) // - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. // - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions -const CurrentCapabilityVersion CapabilityVersion = 120 +// - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] +const CurrentCapabilityVersion CapabilityVersion = 121 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/types/key/disco.go b/types/key/disco.go index 1013ce5bf89af..ce5f9b36fd9a1 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -73,6 +73,44 @@ func (k DiscoPrivate) Shared(p DiscoPublic) DiscoShared { return ret } +// SortedPairOfDiscoPublic is a lexicographically sorted container of two +// [DiscoPublic] keys. +type SortedPairOfDiscoPublic struct { + k [2]DiscoPublic +} + +// Get returns the underlying keys. +func (s SortedPairOfDiscoPublic) Get() [2]DiscoPublic { + return s.k +} + +// NewSortedPairOfDiscoPublic returns a SortedPairOfDiscoPublic from a and b. +func NewSortedPairOfDiscoPublic(a, b DiscoPublic) SortedPairOfDiscoPublic { + s := SortedPairOfDiscoPublic{} + if a.Compare(b) < 0 { + s.k[0] = a + s.k[1] = b + } else { + s.k[0] = b + s.k[1] = a + } + return s +} + +func (s SortedPairOfDiscoPublic) String() string { + return fmt.Sprintf("%s <=> %s", s.k[0].ShortString(), s.k[1].ShortString()) +} + +// Equal returns true if s and b are equal, otherwise it returns false. +func (s SortedPairOfDiscoPublic) Equal(b SortedPairOfDiscoPublic) bool { + for i := range s.k { + if s.k[i].Compare(b.k[i]) != 0 { + return false + } + } + return true +} + // DiscoPublic is the public portion of a DiscoPrivate. type DiscoPublic struct { k [32]byte diff --git a/types/key/disco_test.go b/types/key/disco_test.go index c62c13cbf8970..131fe350f508a 100644 --- a/types/key/disco_test.go +++ b/types/key/disco_test.go @@ -81,3 +81,21 @@ func TestDiscoShared(t *testing.T) { t.Error("k1.Shared(k2) != k2.Shared(k1)") } } + +func TestSortedPairOfDiscoPublic(t *testing.T) { + pubA := DiscoPublic{} + pubA.k[0] = 0x01 + pubB := DiscoPublic{} + pubB.k[0] = 0x02 + sortedInput := NewSortedPairOfDiscoPublic(pubA, pubB) + unsortedInput := NewSortedPairOfDiscoPublic(pubB, pubA) + if sortedInput.Get() != unsortedInput.Get() { + t.Fatal("sortedInput.Get() != unsortedInput.Get()") + } + if unsortedInput.Get()[0] != pubA { + t.Fatal("unsortedInput.Get()[0] != pubA") + } + if unsortedInput.Get()[1] != pubB { + t.Fatal("unsortedInput.Get()[1] != pubB") + } +} diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 48d5ef5a11338..6381b021088b6 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -879,8 +879,14 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { // discoverUDPRelayPathsLocked starts UDP relay path discovery. func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { - // TODO(jwhited): return early if there are no relay servers set, otherwise - // we spin up and down relayManager.runLoop unnecessarily. + if !de.c.hasPeerRelayServers.Load() { + // Changes in this value between its access and the logic following + // are fine, we will eventually do the "right" thing during future path + // discovery. The worst case is we suppress path discovery for the + // current cycle, or we unnecessarily call into [relayManager] and do + // some wasted work. + return + } de.lastUDPRelayPathDiscovery = now lastBest := de.bestAddr lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) @@ -2035,8 +2041,15 @@ func (de *endpoint) numStopAndReset() int64 { return atomic.LoadInt64(&de.numStopAndResetAtomic) } +// setDERPHome sets the provided regionID as home for de. Calls to setDERPHome +// must never run concurrent to [Conn.updateRelayServersSet], otherwise +// [candidatePeerRelay] DERP home changes may be missed from the perspective of +// [relayManager]. func (de *endpoint) setDERPHome(regionID uint16) { de.mu.Lock() defer de.mu.Unlock() de.derpAddr = netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID)) + if de.c.hasPeerRelayServers.Load() { + de.c.relayManager.handleDERPHomeChange(de.publicKey, regionID) + } } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index ad07003f72fbb..ee0ee40ca1d13 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -175,13 +175,15 @@ type Conn struct { // These [eventbus.Subscriber] fields are solely accessed by // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmapper.Mapping] - filterSub *eventbus.Subscriber[FilterUpdate] - nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] - nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] - syncSub *eventbus.Subscriber[syncPoint] - syncPub *eventbus.Publisher[syncPoint] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + pmSub *eventbus.Subscriber[portmapper.Mapping] + filterSub *eventbus.Subscriber[FilterUpdate] + nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] + nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] + syncSub *eventbus.Subscriber[syncPoint] + syncPub *eventbus.Publisher[syncPoint] + allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] + allocRelayEndpointSub *eventbus.Subscriber[UDPRelayAllocResp] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -271,6 +273,14 @@ type Conn struct { // captureHook, if non-nil, is the pcap logging callback when capturing. captureHook syncs.AtomicValue[packet.CaptureCallback] + // hasPeerRelayServers is whether [relayManager] is configured with at least + // one peer relay server via [relayManager.handleRelayServersSet]. It is + // only accessed by [Conn.updateRelayServersSet], [endpoint.setDERPHome], + // and [endpoint.discoverUDPRelayPathsLocked]. It exists to suppress + // calls into [relayManager] leading to wasted work involving channel + // operations and goroutine creation. + hasPeerRelayServers atomic.Bool + // discoPrivate is the private naclbox key used for active // discovery traffic. It is always present, and immutable. discoPrivate key.DiscoPrivate @@ -567,6 +577,36 @@ func (s syncPoint) Signal() { close(s) } +// UDPRelayAllocReq represents a [*disco.AllocateUDPRelayEndpointRequest] +// reception event. This is signaled over an [eventbus.Bus] from +// [magicsock.Conn] towards [relayserver.extension]. +type UDPRelayAllocReq struct { + // RxFromNodeKey is the unauthenticated (DERP server claimed src) node key + // of the transmitting party, noted at disco message reception time over + // DERP. This node key is unambiguously-aligned with RxFromDiscoKey being + // that the disco message is received over DERP. + RxFromNodeKey key.NodePublic + // RxFromDiscoKey is the disco key of the transmitting party, noted and + // authenticated at reception time. + RxFromDiscoKey key.DiscoPublic + // Message is the disco message. + Message *disco.AllocateUDPRelayEndpointRequest +} + +// UDPRelayAllocResp represents a [*disco.AllocateUDPRelayEndpointResponse] +// that is yet to be transmitted over DERP (or delivered locally if +// ReqRxFromNodeKey is self). This is signaled over an [eventbus.Bus] from +// [relayserver.extension] towards [magicsock.Conn]. +type UDPRelayAllocResp struct { + // ReqRxFromNodeKey is copied from [UDPRelayAllocReq.RxFromNodeKey]. It + // enables peer lookup leading up to transmission over DERP. + ReqRxFromNodeKey key.NodePublic + // ReqRxFromDiscoKey is copied from [UDPRelayAllocReq.RxFromDiscoKey]. + ReqRxFromDiscoKey key.DiscoPublic + // Message is the disco message. + Message *disco.AllocateUDPRelayEndpointResponse +} + // newConn is the error-free, network-listening-side-effect-free based // of NewConn. Mostly for tests. func newConn(logf logger.Logf) *Conn { @@ -625,10 +665,40 @@ func (c *Conn) consumeEventbusTopics() { case syncPoint := <-c.syncSub.Events(): c.dlogf("magicsock: received sync point after reconfig") syncPoint.Signal() + case allocResp := <-c.allocRelayEndpointSub.Events(): + c.onUDPRelayAllocResp(allocResp) } } } +func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { + c.mu.Lock() + defer c.mu.Unlock() + ep, ok := c.peerMap.endpointForNodeKey(allocResp.ReqRxFromNodeKey) + if !ok { + // If it's not a peer, it might be for self (we can peer relay through + // ourselves), in which case we want to hand it down to [relayManager] + // now versus taking a network round-trip through DERP. + selfNodeKey := c.publicKeyAtomic.Load() + if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && + allocResp.ReqRxFromDiscoKey.Compare(c.discoPublic) == 0 { + c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) + } + return + } + disco := ep.disco.Load() + if disco == nil { + return + } + if disco.key.Compare(allocResp.ReqRxFromDiscoKey) != 0 { + return + } + ep.mu.Lock() + defer ep.mu.Unlock() + derpAddr := ep.derpAddr + go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) +} + // Synchronize waits for all [eventbus] events published // prior to this call to be processed by the receiver. func (c *Conn) Synchronize() { @@ -670,6 +740,8 @@ func NewConn(opts Options) (*Conn, error) { c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) c.syncPub = eventbus.Publish[syncPoint](c.eventClient) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) + c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) c.subsDoneCh = make(chan struct{}) go c.consumeEventbusTopics() } @@ -1847,6 +1919,24 @@ func (v *virtualNetworkID) get() uint32 { return v._vni & vniGetMask } +// sendDiscoAllocateUDPRelayEndpointRequest is primarily an alias for +// sendDiscoMessage, but it will alternatively send m over the eventbus if dst +// is a DERP IP:port, and dstKey is self. This saves a round-trip through DERP +// when we are attempting to allocate on a self (in-process) peer relay server. +func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.NodePublic, dstDisco key.DiscoPublic, allocReq *disco.AllocateUDPRelayEndpointRequest, logLevel discoLogLevel) (sent bool, err error) { + isDERP := dst.ap.Addr() == tailcfg.DerpMagicIPAddr + selfNodeKey := c.publicKeyAtomic.Load() + if isDERP && dstKey.Compare(selfNodeKey) == 0 { + c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + RxFromNodeKey: selfNodeKey, + RxFromDiscoKey: c.discoPublic, + Message: allocReq, + }) + return true, nil + } + return c.sendDiscoMessage(dst, dstKey, dstDisco, allocReq, logLevel) +} + // sendDiscoMessage sends discovery message m to dstDisco at dst. // // If dst.ap is a DERP IP:port, then dstKey must be non-zero. @@ -2176,7 +2266,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake c.logf("[unexpected] %T packets should not come from a relay server with Geneve control bit set", dm) return } - c.relayManager.handleGeneveEncapDiscoMsg(c, challenge, di, src) + c.relayManager.handleRxDiscoMsg(c, challenge, key.NodePublic{}, di.discoKey, src) return } @@ -2201,7 +2291,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake // If it's an unknown TxID, and it's Geneve-encapsulated, then // make [relayManager] aware. It might be in the middle of probing // src. - c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) + c.relayManager.handleRxDiscoMsg(c, dm, key.NodePublic{}, di.discoKey, src) } case *disco.CallMeMaybe, *disco.CallMeMaybeVia: var via *disco.CallMeMaybeVia @@ -2276,7 +2366,95 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake len(cmm.MyNumber)) go ep.handleCallMeMaybe(cmm) } + case *disco.AllocateUDPRelayEndpointRequest, *disco.AllocateUDPRelayEndpointResponse: + var resp *disco.AllocateUDPRelayEndpointResponse + isResp := false + msgType := "AllocateUDPRelayEndpointRequest" + req, ok := dm.(*disco.AllocateUDPRelayEndpointRequest) + if ok { + metricRecvDiscoAllocUDPRelayEndpointRequest.Add(1) + } else { + metricRecvDiscoAllocUDPRelayEndpointResponse.Add(1) + resp = dm.(*disco.AllocateUDPRelayEndpointResponse) + msgType = "AllocateUDPRelayEndpointResponse" + isResp = true + } + + if !isDERP { + // These messages should only come via DERP. + c.logf("[unexpected] %s packets should only come via DERP", msgType) + return + } + nodeKey := derpNodeSrc + ep, ok := c.peerMap.endpointForNodeKey(nodeKey) + if !ok { + c.logf("magicsock: disco: ignoring %s from %v; %v is unknown", msgType, sender.ShortString(), derpNodeSrc.ShortString()) + return + } + epDisco := ep.disco.Load() + if epDisco == nil { + return + } + if epDisco.key != di.discoKey { + if isResp { + metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco.Add(1) + } else { + metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco.Add(1) + } + c.logf("[unexpected] %s from peer via DERP whose netmap discokey != disco source", msgType) + return + } + if isResp { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, %d endpoints", + c.discoShort, epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + msgType, + len(resp.AddrPorts)) + c.relayManager.handleRxDiscoMsg(c, resp, nodeKey, di.discoKey, src) + return + } else if sender.Compare(req.ClientDisco[0]) != 0 && sender.Compare(req.ClientDisco[1]) != 0 { + // An allocation request must contain the sender's disco key in + // ClientDisco. One of the relay participants must be the sender. + c.logf("magicsock: disco: %s from %v; %v does not contain sender's disco key", + msgType, sender.ShortString(), derpNodeSrc.ShortString()) + return + } else { + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, for %d<->%d", + c.discoShort, epDisco.short, + ep.publicKey.ShortString(), derpStr(src.String()), + msgType, + req.ClientDisco[0], req.ClientDisco[1]) + } + + if c.filt == nil { + return + } + // Binary search of peers is O(log n) while c.mu is held. + // TODO: We might be able to use ep.nodeAddr instead of all addresses, + // or we might be able to release c.mu before doing this work. Keep it + // simple and slow for now. c.peers.AsSlice is a copy. We may need to + // write our own binary search for a [views.Slice]. + peerI, ok := slices.BinarySearchFunc(c.peers.AsSlice(), ep.nodeID, func(peer tailcfg.NodeView, target tailcfg.NodeID) int { + if peer.ID() < target { + return -1 + } else if peer.ID() > target { + return 1 + } + return 0 + }) + if !ok { + // unexpected + return + } + if !nodeHasCap(c.filt, c.peers.At(peerI), c.self, tailcfg.PeerCapabilityRelay) { + return + } + c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + RxFromDiscoKey: sender, + RxFromNodeKey: nodeKey, + Message: req, + }) } return } @@ -2337,7 +2515,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // Geneve-encapsulated [disco.Ping] messages in the interest of // simplicity. It might be in the middle of probing src, so it must be // made aware. - c.relayManager.handleGeneveEncapDiscoMsg(c, dm, di, src) + c.relayManager.handleRxDiscoMsg(c, dm, key.NodePublic{}, di.discoKey, src) return } @@ -2687,7 +2865,7 @@ func (c *Conn) SetProbeUDPLifetime(v bool) { // capVerIsRelayCapable returns true if version is relay client and server // capable, otherwise it returns false. func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { - return version >= 120 + return version >= 121 } // onFilterUpdate is called when a [FilterUpdate] is received over the @@ -2715,6 +2893,11 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // peers are passed as args (vs c.mu-guarded fields) to enable callers to // release c.mu before calling as this is O(m * n) (we iterate all cap rules 'm' // in filt for every peer 'n'). +// +// Calls to updateRelayServersSet must never run concurrent to +// [endpoint.setDERPHome], otherwise [candidatePeerRelay] DERP home changes may +// be missed from the perspective of [relayManager]. +// // TODO: Optimize this so that it's not O(m * n). This might involve: // 1. Changes to [filter.Filter], e.g. adding a CapsWithValues() to check for // a given capability instead of building and returning a map of all of @@ -2722,69 +2905,75 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // 2. Moving this work upstream into [nodeBackend] or similar, and publishing // the computed result over the eventbus instead. func (c *Conn) updateRelayServersSet(filt *filter.Filter, self tailcfg.NodeView, peers views.Slice[tailcfg.NodeView]) { - relayServers := make(set.Set[netip.AddrPort]) + relayServers := make(set.Set[candidatePeerRelay]) nodes := append(peers.AsSlice(), self) for _, maybeCandidate := range nodes { - peerAPI := peerAPIIfCandidateRelayServer(filt, self, maybeCandidate) - if peerAPI.IsValid() { - relayServers.Add(peerAPI) + if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { + // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, + // we skip it. If maybeCandidate happens to be self, then this check is + // unnecessary as self is always capable from this point (the statically + // compiled [tailcfg.CurrentCapabilityVersion]) forward. + continue + } + if !nodeHasCap(filt, maybeCandidate, self, tailcfg.PeerCapabilityRelayTarget) { + continue } + relayServers.Add(candidatePeerRelay{ + nodeKey: maybeCandidate.Key(), + discoKey: maybeCandidate.DiscoKey(), + derpHomeRegionID: uint16(maybeCandidate.HomeDERP()), + }) } c.relayManager.handleRelayServersSet(relayServers) + if len(relayServers) > 0 { + c.hasPeerRelayServers.Store(true) + } else { + c.hasPeerRelayServers.Store(false) + } } -// peerAPIIfCandidateRelayServer returns the peer API address of maybeCandidate -// if it is considered to be a candidate relay server upon evaluation against -// filt and self, otherwise it returns a zero value. self and maybeCandidate -// may be equal. -func peerAPIIfCandidateRelayServer(filt *filter.Filter, self, maybeCandidate tailcfg.NodeView) netip.AddrPort { +// nodeHasCap returns true if src has cap on dst, otherwise it returns false. +func nodeHasCap(filt *filter.Filter, src, dst tailcfg.NodeView, cap tailcfg.PeerCapability) bool { if filt == nil || - !self.Valid() || - !maybeCandidate.Valid() || - !maybeCandidate.Hostinfo().Valid() { - return netip.AddrPort{} - } - if maybeCandidate.ID() != self.ID() && !capVerIsRelayCapable(maybeCandidate.Cap()) { - // If maybeCandidate's [tailcfg.CapabilityVersion] is not relay-capable, - // we skip it. If maybeCandidate happens to be self, then this check is - // unnecessary as self is always capable from this point (the statically - // compiled [tailcfg.CurrentCapabilityVersion]) forward. - return netip.AddrPort{} - } - for _, maybeCandidatePrefix := range maybeCandidate.Addresses().All() { - if !maybeCandidatePrefix.IsSingleIP() { + !src.Valid() || + !dst.Valid() { + return false + } + for _, srcPrefix := range src.Addresses().All() { + if !srcPrefix.IsSingleIP() { continue } - maybeCandidateAddr := maybeCandidatePrefix.Addr() - for _, selfPrefix := range self.Addresses().All() { - if !selfPrefix.IsSingleIP() { + srcAddr := srcPrefix.Addr() + for _, dstPrefix := range dst.Addresses().All() { + if !dstPrefix.IsSingleIP() { continue } - selfAddr := selfPrefix.Addr() - if selfAddr.BitLen() == maybeCandidateAddr.BitLen() { // same address family - if filt.CapsWithValues(maybeCandidateAddr, selfAddr).HasCapability(tailcfg.PeerCapabilityRelayTarget) { - for _, s := range maybeCandidate.Hostinfo().Services().All() { - if maybeCandidateAddr.Is4() && s.Proto == tailcfg.PeerAPI4 || - maybeCandidateAddr.Is6() && s.Proto == tailcfg.PeerAPI6 { - return netip.AddrPortFrom(maybeCandidateAddr, s.Port) - } - } - return netip.AddrPort{} // no peerAPI - } else { - // [nodeBackend.peerCapsLocked] only returns/considers the - // [tailcfg.PeerCapMap] between the passed src and the - // _first_ host (/32 or /128) address for self. We are - // consistent with that behavior here. If self and - // maybeCandidate host addresses are of the same address - // family they either have the capability or not. We do not - // check against additional host addresses of the same - // address family. - return netip.AddrPort{} - } + dstAddr := dstPrefix.Addr() + if dstAddr.BitLen() == srcAddr.BitLen() { // same address family + // [nodeBackend.peerCapsLocked] only returns/considers the + // [tailcfg.PeerCapMap] between the passed src and the _first_ + // host (/32 or /128) address for self. We are consistent with + // that behavior here. If src and dst host addresses are of the + // same address family they either have the capability or not. + // We do not check against additional host addresses of the same + // address family. + return filt.CapsWithValues(srcAddr, dstAddr).HasCapability(cap) } } } - return netip.AddrPort{} + return false +} + +// candidatePeerRelay represents the identifiers and DERP home region ID for a +// peer relay server. +type candidatePeerRelay struct { + nodeKey key.NodePublic + discoKey key.DiscoPublic + derpHomeRegionID uint16 +} + +func (c *candidatePeerRelay) isValid() bool { + return !c.nodeKey.IsZero() && !c.discoKey.IsZero() } // onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the @@ -3792,18 +3981,22 @@ var ( metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") - metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") - metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") - metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") - metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") - metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") - metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") - metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") - metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") - metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") - metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") - metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") - metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") + metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") + metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") + metricRecvDiscoPing = clientmetric.NewCounter("magicsock_disco_recv_ping") + metricRecvDiscoPong = clientmetric.NewCounter("magicsock_disco_recv_pong") + metricRecvDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe") + metricRecvDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia") + metricRecvDiscoCallMeMaybeBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_node") + metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") + metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") + metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request") + metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") + metricRecvDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response") + metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") + metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has // changed from non-zero to a different non-zero. metricDERPHomeChange = clientmetric.NewCounter("derp_home_change") @@ -3985,6 +4178,22 @@ func (le *lazyEndpoint) FromPeer(peerPublicKey [32]byte) { } // PeerRelays returns the current set of candidate peer relays. -func (c *Conn) PeerRelays() set.Set[netip.AddrPort] { - return c.relayManager.getServers() +func (c *Conn) PeerRelays() set.Set[netip.Addr] { + candidatePeerRelays := c.relayManager.getServers() + servers := make(set.Set[netip.Addr], len(candidatePeerRelays)) + c.mu.Lock() + defer c.mu.Unlock() + for relay := range candidatePeerRelays { + pi, ok := c.peerMap.byNodeKey[relay.nodeKey] + if !ok { + if c.self.Key().Compare(relay.nodeKey) == 0 { + if c.self.Addresses().Len() > 0 { + servers.Add(c.self.Addresses().At(0).Addr()) + } + } + continue + } + servers.Add(pi.ep.nodeAddr) + } + return servers } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1d76e6c595a47..8a09df27d2ce7 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -19,7 +19,6 @@ import ( "net/http/httptest" "net/netip" "os" - "reflect" "runtime" "strconv" "strings" @@ -64,6 +63,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/ptr" + "tailscale.com/types/views" "tailscale.com/util/cibuild" "tailscale.com/util/eventbus" "tailscale.com/util/must" @@ -3384,61 +3384,72 @@ func Test_virtualNetworkID(t *testing.T) { } } -func Test_peerAPIIfCandidateRelayServer(t *testing.T) { - hostInfo := &tailcfg.Hostinfo{ - Services: []tailcfg.Service{ - { - Proto: tailcfg.PeerAPI4, - Port: 4, - }, - { - Proto: tailcfg.PeerAPI6, - Port: 6, - }, +func Test_looksLikeInitiationMsg(t *testing.T) { + initMsg := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) + initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) + binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) + tests := []struct { + name string + b []byte + want bool + }{ + { + name: "valid initiation", + b: initMsg, + want: true, + }, + { + name: "invalid message type field", + b: initMsgSizeTransportType, + want: false, + }, + { + name: "too small", + b: initMsg[:device.MessageInitiationSize-1], + want: false, + }, + { + name: "too big", + b: append(initMsg, 0), + want: false, }, } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := looksLikeInitiationMsg(tt.b); got != tt.want { + t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + } + }) + } +} - selfOnlyIPv4 := &tailcfg.Node{ +func Test_nodeHasCap(t *testing.T) { + nodeAOnlyIPv4 := &tailcfg.Node{ ID: 1, - // Intentionally set a value < 120 to verify the statically compiled - // [tailcfg.CurrentCapabilityVersion] is used when self is - // maybeCandidate. - Cap: 119, Addresses: []netip.Prefix{ netip.MustParsePrefix("1.1.1.1/32"), }, - Hostinfo: hostInfo.View(), } - selfOnlyIPv6 := selfOnlyIPv4.Clone() - selfOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") + nodeBOnlyIPv6 := nodeAOnlyIPv4.Clone() + nodeBOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::1/128") - peerOnlyIPv4 := &tailcfg.Node{ - ID: 2, - Cap: 120, + nodeCOnlyIPv4 := &tailcfg.Node{ + ID: 2, Addresses: []netip.Prefix{ netip.MustParsePrefix("2.2.2.2/32"), }, - Hostinfo: hostInfo.View(), } - - peerOnlyIPv4NotCapable := peerOnlyIPv4.Clone() - peerOnlyIPv4NotCapable.Cap = 119 - - peerOnlyIPv6 := peerOnlyIPv4.Clone() - peerOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") - - peerOnlyIPv4ZeroCapVer := peerOnlyIPv4.Clone() - peerOnlyIPv4ZeroCapVer.Cap = 0 - - peerOnlyIPv4NilHostinfo := peerOnlyIPv4.Clone() - peerOnlyIPv4NilHostinfo.Hostinfo = tailcfg.HostinfoView{} + nodeDOnlyIPv6 := nodeCOnlyIPv4.Clone() + nodeDOnlyIPv6.Addresses[0] = netip.MustParsePrefix("::2/128") tests := []struct { - name string - filt *filter.Filter - self tailcfg.NodeView - maybeCandidate tailcfg.NodeView - want netip.AddrPort + name string + filt *filter.Filter + src tailcfg.NodeView + dst tailcfg.NodeView + cap tailcfg.PeerCapability + want bool }{ { name: "match v4", @@ -3453,26 +3464,10 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4.View(), - want: netip.MustParseAddrPort("2.2.2.2:4"), - }, - { - name: "match v4 self", - filt: filter.New([]filtertype.Match{ - { - Srcs: []netip.Prefix{selfOnlyIPv4.Addresses[0]}, - Caps: []filtertype.CapMatch{ - { - Dst: selfOnlyIPv4.Addresses[0], - Cap: tailcfg.PeerCapabilityRelayTarget, - }, - }, - }, - }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: selfOnlyIPv4.View(), - want: netip.AddrPortFrom(selfOnlyIPv4.Addresses[0].Addr(), 4), + src: nodeCOnlyIPv4.View(), + dst: nodeAOnlyIPv4.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: true, }, { name: "match v6", @@ -3487,77 +3482,67 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: peerOnlyIPv6.View(), - want: netip.MustParseAddrPort("[::2]:6"), - }, - { - name: "match v6 self", - filt: filter.New([]filtertype.Match{ - { - Srcs: []netip.Prefix{selfOnlyIPv6.Addresses[0]}, - Caps: []filtertype.CapMatch{ - { - Dst: selfOnlyIPv6.Addresses[0], - Cap: tailcfg.PeerCapabilityRelayTarget, - }, - }, - }, - }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: selfOnlyIPv6.View(), - want: netip.AddrPortFrom(selfOnlyIPv6.Addresses[0].Addr(), 6), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: true, }, { - name: "peer incapable", + name: "no match CapMatch Dst", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: netip.MustParsePrefix("::3/128"), Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4NotCapable.View(), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, { - name: "no match dst", + name: "no match peer cap", filt: filter.New([]filtertype.Match{ { Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("::3/128"), - Cap: tailcfg.PeerCapabilityRelayTarget, + Dst: netip.MustParsePrefix("::1/128"), + Cap: tailcfg.PeerCapabilityIngress, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: peerOnlyIPv6.View(), + src: nodeDOnlyIPv6.View(), + dst: nodeBOnlyIPv6.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, { - name: "no match peer cap", + name: "nil src", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("::2/128")}, + Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("::1/128"), - Cap: tailcfg.PeerCapabilityIngress, + Dst: netip.MustParsePrefix("1.1.1.1/32"), + Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv6.View(), - maybeCandidate: peerOnlyIPv6.View(), + src: tailcfg.NodeView{}, + dst: nodeAOnlyIPv4.View(), + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, { - name: "cap ver not relay capable", + name: "nil dst", filt: filter.New([]filtertype.Match{ { Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, @@ -3569,108 +3554,136 @@ func Test_peerAPIIfCandidateRelayServer(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: peerOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4ZeroCapVer.View(), + src: nodeCOnlyIPv4.View(), + dst: tailcfg.NodeView{}, + cap: tailcfg.PeerCapabilityRelayTarget, + want: false, }, - { - name: "nil filt", - filt: nil, - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4.View(), + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := nodeHasCap(tt.filt, tt.src, tt.dst, tt.cap); got != tt.want { + t.Errorf("nodeHasCap() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConn_updateRelayServersSet(t *testing.T) { + peerNodeCandidateRelay := &tailcfg.Node{ + Cap: 121, + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), }, + HomeDERP: 1, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + peerNodeNotCandidateRelayCapVer := &tailcfg.Node{ + Cap: 120, // intentionally lower to fail capVer check + ID: 1, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("1.1.1.1/32"), + }, + HomeDERP: 1, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + selfNode := &tailcfg.Node{ + Cap: 120, // intentionally lower than capVerIsRelayCapable to verify self check + ID: 2, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("2.2.2.2/32"), + }, + HomeDERP: 2, + Key: key.NewNode().Public(), + DiscoKey: key.NewDisco().Public(), + } + + tests := []struct { + name string + filt *filter.Filter + self tailcfg.NodeView + peers views.Slice[tailcfg.NodeView] + wantRelayServers set.Set[candidatePeerRelay] + }{ { - name: "nil self", + name: "candidate relay server", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: peerNodeCandidateRelay.Addresses, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: selfNode.Addresses[0], Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: tailcfg.NodeView{}, - maybeCandidate: peerOnlyIPv4.View(), + self: selfNode.View(), + peers: views.SliceOf([]tailcfg.NodeView{peerNodeCandidateRelay.View()}), + wantRelayServers: set.SetOf([]candidatePeerRelay{ + { + nodeKey: peerNodeCandidateRelay.Key, + discoKey: peerNodeCandidateRelay.DiscoKey, + derpHomeRegionID: 1, + }, + }), }, { - name: "nil peer", + name: "self candidate relay server", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: selfNode.Addresses, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: selfNode.Addresses[0], Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: tailcfg.NodeView{}, + self: selfNode.View(), + peers: views.SliceOf([]tailcfg.NodeView{selfNode.View()}), + wantRelayServers: set.SetOf([]candidatePeerRelay{ + { + nodeKey: selfNode.Key, + discoKey: selfNode.DiscoKey, + derpHomeRegionID: 2, + }, + }), }, { - name: "nil peer hostinfo", + name: "no candidate relay server", filt: filter.New([]filtertype.Match{ { - Srcs: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + Srcs: peerNodeNotCandidateRelayCapVer.Addresses, Caps: []filtertype.CapMatch{ { - Dst: netip.MustParsePrefix("1.1.1.1/32"), + Dst: selfNode.Addresses[0], Cap: tailcfg.PeerCapabilityRelayTarget, }, }, }, }, nil, nil, nil, nil, nil), - self: selfOnlyIPv4.View(), - maybeCandidate: peerOnlyIPv4NilHostinfo.View(), + self: selfNode.View(), + peers: views.SliceOf([]tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}), + wantRelayServers: make(set.Set[candidatePeerRelay]), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := peerAPIIfCandidateRelayServer(tt.filt, tt.self, tt.maybeCandidate); !reflect.DeepEqual(got, tt.want) { - t.Errorf("peerAPIIfCandidateRelayServer() = %v, want %v", got, tt.want) + c := &Conn{} + c.updateRelayServersSet(tt.filt, tt.self, tt.peers) + got := c.relayManager.getServers() + if !got.Equal(tt.wantRelayServers) { + t.Fatalf("got: %v != want: %v", got, tt.wantRelayServers) } - }) - } -} - -func Test_looksLikeInitiationMsg(t *testing.T) { - initMsg := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) - initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) - tests := []struct { - name string - b []byte - want bool - }{ - { - name: "valid initiation", - b: initMsg, - want: true, - }, - { - name: "invalid message type field", - b: initMsgSizeTransportType, - want: false, - }, - { - name: "too small", - b: initMsg[:device.MessageInitiationSize-1], - want: false, - }, - { - name: "too big", - b: append(initMsg, 0), - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := looksLikeInitiationMsg(tt.b); got != tt.want { - t.Errorf("looksLikeInitiationMsg() = %v, want %v", got, tt.want) + if len(tt.wantRelayServers) > 0 != c.hasPeerRelayServers.Load() { + t.Fatalf("c.hasPeerRelayServers: %v != wantRelayServers: %v", c.hasPeerRelayServers.Load(), tt.wantRelayServers) } }) } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index d7acf80b51a58..ad8c5fc763adb 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -4,23 +4,18 @@ package magicsock import ( - "bytes" "context" - "encoding/json" "errors" - "fmt" - "io" - "net/http" "net/netip" - "strconv" "sync" "time" "tailscale.com/disco" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" + "tailscale.com/tailcfg" + "tailscale.com/tstime" "tailscale.com/types/key" - "tailscale.com/util/httpm" "tailscale.com/util/set" ) @@ -38,26 +33,28 @@ type relayManager struct { // =================================================================== // The following fields are owned by a single goroutine, runLoop(). - serversByAddrPort map[netip.AddrPort]key.DiscoPublic - serversByDisco map[key.DiscoPublic]netip.AddrPort - allocWorkByEndpoint map[*endpoint]*relayEndpointAllocWork - handshakeWorkByEndpointByServerDisco map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork - handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork - handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI - addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork - handshakeGeneration uint32 + serversByNodeKey map[key.NodePublic]candidatePeerRelay + allocWorkByCandidatePeerRelayByEndpoint map[*endpoint]map[candidatePeerRelay]*relayEndpointAllocWork + allocWorkByDiscoKeysByServerNodeKey map[key.NodePublic]map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork + handshakeWorkByServerDiscoByEndpoint map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork + handshakeWorkByServerDiscoVNI map[serverDiscoVNI]*relayHandshakeWork + handshakeWorkAwaitingPong map[*relayHandshakeWork]addrPortVNI + addrPortVNIToHandshakeWork map[addrPortVNI]*relayHandshakeWork + handshakeGeneration uint32 + allocGeneration uint32 // =================================================================== // The following chan fields serve event inputs to a single goroutine, // runLoop(). - startDiscoveryCh chan endpointWithLastBest - allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent - handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent - cancelWorkCh chan *endpoint - newServerEndpointCh chan newRelayServerEndpointEvent - rxHandshakeDiscoMsgCh chan relayHandshakeDiscoMsgEvent - serversCh chan set.Set[netip.AddrPort] - getServersCh chan chan set.Set[netip.AddrPort] + startDiscoveryCh chan endpointWithLastBest + allocateWorkDoneCh chan relayEndpointAllocWorkDoneEvent + handshakeWorkDoneCh chan relayEndpointHandshakeWorkDoneEvent + cancelWorkCh chan *endpoint + newServerEndpointCh chan newRelayServerEndpointEvent + rxDiscoMsgCh chan relayDiscoMsgEvent + serversCh chan set.Set[candidatePeerRelay] + getServersCh chan chan set.Set[candidatePeerRelay] + derpHomeChangeCh chan derpHomeChangeEvent discoInfoMu sync.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo @@ -86,7 +83,7 @@ type relayHandshakeWork struct { // relayManager.handshakeWorkDoneCh if runLoop() can receive it. runLoop() // must select{} read on doneCh to prevent deadlock when attempting to write // to rxDiscoMsgCh. - rxDiscoMsgCh chan relayHandshakeDiscoMsgEvent + rxDiscoMsgCh chan relayDiscoMsgEvent doneCh chan relayEndpointHandshakeWorkDoneEvent ctx context.Context @@ -100,14 +97,15 @@ type relayHandshakeWork struct { type newRelayServerEndpointEvent struct { wlb endpointWithLastBest se udprelay.ServerEndpoint - server netip.AddrPort // zero value if learned via [disco.CallMeMaybeVia] + server candidatePeerRelay // zero value if learned via [disco.CallMeMaybeVia] } // relayEndpointAllocWorkDoneEvent indicates relay server endpoint allocation // work for an [*endpoint] has completed. This structure is immutable once // initialized. type relayEndpointAllocWorkDoneEvent struct { - work *relayEndpointAllocWork + work *relayEndpointAllocWork + allocated udprelay.ServerEndpoint // !allocated.ServerDisco.IsZero() if allocation succeeded } // relayEndpointHandshakeWorkDoneEvent indicates relay server endpoint handshake @@ -122,18 +120,42 @@ type relayEndpointHandshakeWorkDoneEvent struct { // hasActiveWorkRunLoop returns true if there is outstanding allocation or // handshaking work for any endpoint, otherwise it returns false. func (r *relayManager) hasActiveWorkRunLoop() bool { - return len(r.allocWorkByEndpoint) > 0 || len(r.handshakeWorkByEndpointByServerDisco) > 0 + return len(r.allocWorkByCandidatePeerRelayByEndpoint) > 0 || len(r.handshakeWorkByServerDiscoByEndpoint) > 0 } // hasActiveWorkForEndpointRunLoop returns true if there is outstanding // allocation or handshaking work for the provided endpoint, otherwise it // returns false. func (r *relayManager) hasActiveWorkForEndpointRunLoop(ep *endpoint) bool { - _, handshakeWork := r.handshakeWorkByEndpointByServerDisco[ep] - _, allocWork := r.allocWorkByEndpoint[ep] + _, handshakeWork := r.handshakeWorkByServerDiscoByEndpoint[ep] + _, allocWork := r.allocWorkByCandidatePeerRelayByEndpoint[ep] return handshakeWork || allocWork } +// derpHomeChangeEvent represents a change in the DERP home region for the +// node identified by nodeKey. This structure is immutable once initialized. +type derpHomeChangeEvent struct { + nodeKey key.NodePublic + regionID uint16 +} + +// handleDERPHomeChange handles a DERP home change event for nodeKey and +// regionID. +func (r *relayManager) handleDERPHomeChange(nodeKey key.NodePublic, regionID uint16) { + relayManagerInputEvent(r, nil, &r.derpHomeChangeCh, derpHomeChangeEvent{ + nodeKey: nodeKey, + regionID: regionID, + }) +} + +func (r *relayManager) handleDERPHomeChangeRunLoop(event derpHomeChangeEvent) { + c, ok := r.serversByNodeKey[event.nodeKey] + if ok { + c.derpHomeRegionID = event.regionID + r.serversByNodeKey[event.nodeKey] = c + } +} + // runLoop is a form of event loop. It ensures exclusive access to most of // [relayManager] state. func (r *relayManager) runLoop() { @@ -151,13 +173,7 @@ func (r *relayManager) runLoop() { return } case done := <-r.allocateWorkDoneCh: - work, ok := r.allocWorkByEndpoint[done.work.ep] - if ok && work == done.work { - // Verify the work in the map is the same as the one that we're - // cleaning up. New events on r.startDiscoveryCh can - // overwrite pre-existing keys. - delete(r.allocWorkByEndpoint, done.work.ep) - } + r.handleAllocWorkDoneRunLoop(done) if !r.hasActiveWorkRunLoop() { return } @@ -176,8 +192,8 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } - case discoMsgEvent := <-r.rxHandshakeDiscoMsgCh: - r.handleRxHandshakeDiscoMsgRunLoop(discoMsgEvent) + case discoMsgEvent := <-r.rxDiscoMsgCh: + r.handleRxDiscoMsgRunLoop(discoMsgEvent) if !r.hasActiveWorkRunLoop() { return } @@ -191,69 +207,77 @@ func (r *relayManager) runLoop() { if !r.hasActiveWorkRunLoop() { return } + case derpHomeChange := <-r.derpHomeChangeCh: + r.handleDERPHomeChangeRunLoop(derpHomeChange) + if !r.hasActiveWorkRunLoop() { + return + } } } } -func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[netip.AddrPort]) { - servers := make(set.Set[netip.AddrPort], len(r.serversByAddrPort)) - for server := range r.serversByAddrPort { - servers.Add(server) +func (r *relayManager) handleGetServersRunLoop(getServersCh chan set.Set[candidatePeerRelay]) { + servers := make(set.Set[candidatePeerRelay], len(r.serversByNodeKey)) + for _, v := range r.serversByNodeKey { + servers.Add(v) } getServersCh <- servers } -func (r *relayManager) getServers() set.Set[netip.AddrPort] { - ch := make(chan set.Set[netip.AddrPort]) +func (r *relayManager) getServers() set.Set[candidatePeerRelay] { + ch := make(chan set.Set[candidatePeerRelay]) relayManagerInputEvent(r, nil, &r.getServersCh, ch) return <-ch } -func (r *relayManager) handleServersUpdateRunLoop(update set.Set[netip.AddrPort]) { - for k, v := range r.serversByAddrPort { - if !update.Contains(k) { - delete(r.serversByAddrPort, k) - delete(r.serversByDisco, v) +func (r *relayManager) handleServersUpdateRunLoop(update set.Set[candidatePeerRelay]) { + for _, v := range r.serversByNodeKey { + if !update.Contains(v) { + delete(r.serversByNodeKey, v.nodeKey) } } for _, v := range update.Slice() { - _, ok := r.serversByAddrPort[v] - if ok { - // don't zero known disco keys - continue - } - r.serversByAddrPort[v] = key.DiscoPublic{} + r.serversByNodeKey[v.nodeKey] = v } } -type relayHandshakeDiscoMsgEvent struct { - conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] - msg disco.Message - disco key.DiscoPublic - from netip.AddrPort - vni uint32 - at time.Time +type relayDiscoMsgEvent struct { + conn *Conn // for access to [Conn] if there is no associated [relayHandshakeWork] + msg disco.Message + relayServerNodeKey key.NodePublic // nonzero if msg is a [*disco.AllocateUDPRelayEndpointResponse] + disco key.DiscoPublic + from netip.AddrPort + vni uint32 + at time.Time } // relayEndpointAllocWork serves to track in-progress relay endpoint allocation // for an [*endpoint]. This structure is immutable once initialized. type relayEndpointAllocWork struct { - // ep is the [*endpoint] associated with the work - ep *endpoint - // cancel() will signal all associated goroutines to return + wlb endpointWithLastBest + discoKeys key.SortedPairOfDiscoPublic + candidatePeerRelay candidatePeerRelay + + // allocateServerEndpoint() always writes to doneCh (len 1) when it + // returns. It may end up writing the same event afterward to + // [relayManager.allocateWorkDoneCh] if runLoop() can receive it. runLoop() + // must select{} read on doneCh to prevent deadlock when attempting to write + // to rxDiscoMsgCh. + rxDiscoMsgCh chan *disco.AllocateUDPRelayEndpointResponse + doneCh chan relayEndpointAllocWorkDoneEvent + + ctx context.Context cancel context.CancelFunc - // wg.Wait() will return once all associated goroutines have returned - wg *sync.WaitGroup } // init initializes [relayManager] if it is not already initialized. func (r *relayManager) init() { r.initOnce.Do(func() { r.discoInfoByServerDisco = make(map[key.DiscoPublic]*relayHandshakeDiscoInfo) - r.serversByDisco = make(map[key.DiscoPublic]netip.AddrPort) - r.serversByAddrPort = make(map[netip.AddrPort]key.DiscoPublic) - r.allocWorkByEndpoint = make(map[*endpoint]*relayEndpointAllocWork) - r.handshakeWorkByEndpointByServerDisco = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) + r.serversByNodeKey = make(map[key.NodePublic]candidatePeerRelay) + r.allocWorkByCandidatePeerRelayByEndpoint = make(map[*endpoint]map[candidatePeerRelay]*relayEndpointAllocWork) + r.allocWorkByDiscoKeysByServerNodeKey = make(map[key.NodePublic]map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork) + r.handshakeWorkByServerDiscoByEndpoint = make(map[*endpoint]map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoVNI = make(map[serverDiscoVNI]*relayHandshakeWork) r.handshakeWorkAwaitingPong = make(map[*relayHandshakeWork]addrPortVNI) r.addrPortVNIToHandshakeWork = make(map[addrPortVNI]*relayHandshakeWork) @@ -262,9 +286,10 @@ func (r *relayManager) init() { r.handshakeWorkDoneCh = make(chan relayEndpointHandshakeWorkDoneEvent) r.cancelWorkCh = make(chan *endpoint) r.newServerEndpointCh = make(chan newRelayServerEndpointEvent) - r.rxHandshakeDiscoMsgCh = make(chan relayHandshakeDiscoMsgEvent) - r.serversCh = make(chan set.Set[netip.AddrPort]) - r.getServersCh = make(chan chan set.Set[netip.AddrPort]) + r.rxDiscoMsgCh = make(chan relayDiscoMsgEvent) + r.serversCh = make(chan set.Set[candidatePeerRelay]) + r.getServersCh = make(chan chan set.Set[candidatePeerRelay]) + r.derpHomeChangeCh = make(chan derpHomeChangeEvent) r.runLoopStoppedCh = make(chan struct{}, 1) r.runLoopStoppedCh <- struct{}{} }) @@ -330,6 +355,7 @@ func (r *relayManager) discoInfo(serverDisco key.DiscoPublic) (_ *discoInfo, ok func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool, dm *disco.CallMeMaybeVia) { se := udprelay.ServerEndpoint{ ServerDisco: dm.ServerDisco, + ClientDisco: dm.ClientDisco, LamportID: dm.LamportID, AddrPorts: dm.AddrPorts, VNI: dm.VNI, @@ -346,14 +372,25 @@ func (r *relayManager) handleCallMeMaybeVia(ep *endpoint, lastBest addrQuality, }) } -// handleGeneveEncapDiscoMsg handles reception of Geneve-encapsulated disco -// messages. -func (r *relayManager) handleGeneveEncapDiscoMsg(conn *Conn, dm disco.Message, di *discoInfo, src epAddr) { - relayManagerInputEvent(r, nil, &r.rxHandshakeDiscoMsgCh, relayHandshakeDiscoMsgEvent{conn: conn, msg: dm, disco: di.discoKey, from: src.ap, vni: src.vni.get(), at: time.Now()}) +// handleRxDiscoMsg handles reception of disco messages that [relayManager] +// may be interested in. This includes all Geneve-encapsulated disco messages +// and [*disco.AllocateUDPRelayEndpointResponse]. If dm is a +// [*disco.AllocateUDPRelayEndpointResponse] then relayServerNodeKey must be +// nonzero. +func (r *relayManager) handleRxDiscoMsg(conn *Conn, dm disco.Message, relayServerNodeKey key.NodePublic, discoKey key.DiscoPublic, src epAddr) { + relayManagerInputEvent(r, nil, &r.rxDiscoMsgCh, relayDiscoMsgEvent{ + conn: conn, + msg: dm, + relayServerNodeKey: relayServerNodeKey, + disco: discoKey, + from: src.ap, + vni: src.vni.get(), + at: time.Now(), + }) } // handleRelayServersSet handles an update of the complete relay server set. -func (r *relayManager) handleRelayServersSet(servers set.Set[netip.AddrPort]) { +func (r *relayManager) handleRelayServersSet(servers set.Set[candidatePeerRelay]) { relayManagerInputEvent(r, nil, &r.serversCh, servers) } @@ -396,7 +433,11 @@ type endpointWithLastBest struct { // startUDPRelayPathDiscoveryFor starts UDP relay path discovery for ep on all // known relay servers if ep has no in-progress work. func (r *relayManager) startUDPRelayPathDiscoveryFor(ep *endpoint, lastBest addrQuality, lastBestIsTrusted bool) { - relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ep, lastBest, lastBestIsTrusted}) + relayManagerInputEvent(r, nil, &r.startDiscoveryCh, endpointWithLastBest{ + ep: ep, + lastBest: lastBest, + lastBestIsTrusted: lastBestIsTrusted, + }) } // stopWork stops all outstanding allocation & handshaking work for 'ep'. @@ -407,13 +448,15 @@ func (r *relayManager) stopWork(ep *endpoint) { // stopWorkRunLoop cancels & clears outstanding allocation and handshaking // work for 'ep'. func (r *relayManager) stopWorkRunLoop(ep *endpoint) { - allocWork, ok := r.allocWorkByEndpoint[ep] + byDiscoKeys, ok := r.allocWorkByCandidatePeerRelayByEndpoint[ep] if ok { - allocWork.cancel() - allocWork.wg.Wait() - delete(r.allocWorkByEndpoint, ep) + for _, work := range byDiscoKeys { + work.cancel() + done := <-work.doneCh + r.handleAllocWorkDoneRunLoop(done) + } } - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[ep] if ok { for _, handshakeWork := range byServerDisco { handshakeWork.cancel() @@ -430,13 +473,33 @@ type addrPortVNI struct { vni uint32 } -func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDiscoMsgEvent) { +func (r *relayManager) handleRxDiscoMsgRunLoop(event relayDiscoMsgEvent) { var ( work *relayHandshakeWork ok bool ) apv := addrPortVNI{event.from, event.vni} switch msg := event.msg.(type) { + case *disco.AllocateUDPRelayEndpointResponse: + sorted := key.NewSortedPairOfDiscoPublic(msg.ClientDisco[0], msg.ClientDisco[1]) + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[event.relayServerNodeKey] + if !ok { + // No outstanding work tied to this relay sever, discard. + return + } + allocWork, ok := byDiscoKeys[sorted] + if !ok { + // No outstanding work tied to these disco keys, discard. + return + } + select { + case done := <-allocWork.doneCh: + // allocateServerEndpoint returned, clean up its state + r.handleAllocWorkDoneRunLoop(done) + return + case allocWork.rxDiscoMsgCh <- msg: + return + } case *disco.BindUDPRelayEndpointChallenge: work, ok = r.handshakeWorkByServerDiscoVNI[serverDiscoVNI{event.disco, event.vni}] if !ok { @@ -504,8 +567,39 @@ func (r *relayManager) handleRxHandshakeDiscoMsgRunLoop(event relayHandshakeDisc } } +func (r *relayManager) handleAllocWorkDoneRunLoop(done relayEndpointAllocWorkDoneEvent) { + byCandidatePeerRelay, ok := r.allocWorkByCandidatePeerRelayByEndpoint[done.work.wlb.ep] + if !ok { + return + } + work, ok := byCandidatePeerRelay[done.work.candidatePeerRelay] + if !ok || work != done.work { + return + } + delete(byCandidatePeerRelay, done.work.candidatePeerRelay) + if len(byCandidatePeerRelay) == 0 { + delete(r.allocWorkByCandidatePeerRelayByEndpoint, done.work.wlb.ep) + } + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[done.work.candidatePeerRelay.nodeKey] + if !ok { + // unexpected + return + } + delete(byDiscoKeys, done.work.discoKeys) + if len(byDiscoKeys) == 0 { + delete(r.allocWorkByDiscoKeysByServerNodeKey, done.work.candidatePeerRelay.nodeKey) + } + if !done.allocated.ServerDisco.IsZero() { + r.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: done.work.wlb, + se: done.allocated, + server: done.work.candidatePeerRelay, + }) + } +} + func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshakeWorkDoneEvent) { - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[done.work.wlb.ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[done.work.wlb.ep] if !ok { return } @@ -515,7 +609,7 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak } delete(byServerDisco, done.work.se.ServerDisco) if len(byServerDisco) == 0 { - delete(r.handshakeWorkByEndpointByServerDisco, done.work.wlb.ep) + delete(r.handshakeWorkByServerDiscoByEndpoint, done.work.wlb.ep) } delete(r.handshakeWorkByServerDiscoVNI, serverDiscoVNI{done.work.se.ServerDisco, done.work.se.VNI}) apv, ok := r.handshakeWorkAwaitingPong[work] @@ -562,7 +656,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay } // Check for duplicate work by [*endpoint] + server disco. - byServerDisco, ok := r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] + byServerDisco, ok := r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] if ok { existingWork, ok := byServerDisco[newServerEndpoint.se.ServerDisco] if ok { @@ -580,33 +674,9 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay // We're now reasonably sure we're dealing with the latest // [udprelay.ServerEndpoint] from a server event order perspective - // (LamportID). Update server disco key tracking if appropriate. - if newServerEndpoint.server.IsValid() { - serverDisco, ok := r.serversByAddrPort[newServerEndpoint.server] - if !ok { - // Allocation raced with an update to our known servers set. This - // server is no longer known. Return early. - return - } - if serverDisco.Compare(newServerEndpoint.se.ServerDisco) != 0 { - // The server's disco key has either changed, or simply become - // known for the first time. In the former case we end up detaching - // any in-progress handshake work from a "known" relay server. - // Practically speaking we expect the detached work to fail - // if the server key did in fact change (server restart) while we - // were attempting to handshake with it. It is possible, though - // unlikely, for a server addr:port to effectively move between - // nodes. Either way, there is no harm in detaching existing work, - // and we explicitly let that happen for the rare case the detached - // handshake would complete and remain functional. - delete(r.serversByDisco, serverDisco) - delete(r.serversByAddrPort, newServerEndpoint.server) - r.serversByDisco[serverDisco] = newServerEndpoint.server - r.serversByAddrPort[newServerEndpoint.server] = serverDisco - } - } + // (LamportID). - if newServerEndpoint.server.IsValid() { + if newServerEndpoint.server.isValid() { // Send a [disco.CallMeMaybeVia] to the remote peer if we allocated this // endpoint, regardless of if we start a handshake below. go r.sendCallMeMaybeVia(newServerEndpoint.wlb.ep, newServerEndpoint.se) @@ -641,14 +711,14 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay work := &relayHandshakeWork{ wlb: newServerEndpoint.wlb, se: newServerEndpoint.se, - rxDiscoMsgCh: make(chan relayHandshakeDiscoMsgEvent), + rxDiscoMsgCh: make(chan relayDiscoMsgEvent), doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), ctx: ctx, cancel: cancel, } if byServerDisco == nil { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) - r.handshakeWorkByEndpointByServerDisco[newServerEndpoint.wlb.ep] = byServerDisco + r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] = byServerDisco } byServerDisco[newServerEndpoint.se.ServerDisco] = work r.handshakeWorkByServerDiscoVNI[sdv] = work @@ -674,12 +744,15 @@ func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoi return } callMeMaybeVia := &disco.CallMeMaybeVia{ - ServerDisco: se.ServerDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, } ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) } @@ -800,7 +873,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat // one. // // We don't need to TX a pong, that was already handled for us - // in handleRxHandshakeDiscoMsgRunLoop(). + // in handleRxDiscoMsgRunLoop(). txPing(msgEvent.from, nil) case *disco.Pong: at, ok := sentPingAt[msg.TxID] @@ -823,104 +896,113 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat } } -func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { - if len(r.serversByAddrPort) == 0 { - return - } - ctx, cancel := context.WithCancel(context.Background()) - started := &relayEndpointAllocWork{ep: wlb.ep, cancel: cancel, wg: &sync.WaitGroup{}} - for k := range r.serversByAddrPort { - started.wg.Add(1) - go r.allocateSingleServer(ctx, started.wg, k, wlb) - } - r.allocWorkByEndpoint[wlb.ep] = started - go func() { - started.wg.Wait() - relayManagerInputEvent(r, ctx, &r.allocateWorkDoneCh, relayEndpointAllocWorkDoneEvent{work: started}) - // cleanup context cancellation must come after the - // relayManagerInputEvent call, otherwise it returns early without - // writing the event to runLoop(). - started.cancel() - }() -} +const allocateUDPRelayEndpointRequestTimeout = time.Second * 10 -type errNotReady struct{ retryAfter time.Duration } +func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, generation uint32) { + done := relayEndpointAllocWorkDoneEvent{work: work} -func (e errNotReady) Error() string { - return fmt.Sprintf("server not ready, retry after %v", e.retryAfter) -} + defer func() { + work.doneCh <- done + relayManagerInputEvent(r, work.ctx, &r.allocateWorkDoneCh, done) + work.cancel() + }() -const reqTimeout = time.Second * 10 + dm := &disco.AllocateUDPRelayEndpointRequest{ + ClientDisco: work.discoKeys.Get(), + Generation: generation, + } + + sendAllocReq := func() { + work.wlb.ep.c.sendDiscoAllocateUDPRelayEndpointRequest( + epAddr{ + ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, work.candidatePeerRelay.derpHomeRegionID), + }, + work.candidatePeerRelay.nodeKey, + work.candidatePeerRelay.discoKey, + dm, + discoVerboseLog, + ) + } + go sendAllocReq() + + returnAfterTimer := time.NewTimer(allocateUDPRelayEndpointRequestTimeout) + defer returnAfterTimer.Stop() + // While connections to DERP are over TCP, they can be lossy on the DERP + // server when data moves between the two independent streams. Also, the + // peer relay server may not be "ready" (see [tailscale.com/net/udprelay.ErrServerNotReady]). + // So, start a timer to retry once if needed. + retryAfterTimer := time.NewTimer(udprelay.ServerRetryAfter) + defer retryAfterTimer.Stop() -func doAllocate(ctx context.Context, server netip.AddrPort, discoKeys [2]key.DiscoPublic) (udprelay.ServerEndpoint, error) { - var reqBody bytes.Buffer - type allocateRelayEndpointReq struct { - DiscoKeys []key.DiscoPublic - } - a := &allocateRelayEndpointReq{ - DiscoKeys: []key.DiscoPublic{discoKeys[0], discoKeys[1]}, - } - err := json.NewEncoder(&reqBody).Encode(a) - if err != nil { - return udprelay.ServerEndpoint{}, err - } - reqCtx, cancel := context.WithTimeout(ctx, reqTimeout) - defer cancel() - req, err := http.NewRequestWithContext(reqCtx, httpm.POST, "http://"+server.String()+"/v0/relay/endpoint", &reqBody) - if err != nil { - return udprelay.ServerEndpoint{}, err - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return udprelay.ServerEndpoint{}, err - } - defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - var se udprelay.ServerEndpoint - err = json.NewDecoder(io.LimitReader(resp.Body, 4096)).Decode(&se) - return se, err - case http.StatusServiceUnavailable: - raHeader := resp.Header.Get("Retry-After") - raSeconds, err := strconv.ParseUint(raHeader, 10, 32) - if err == nil { - return udprelay.ServerEndpoint{}, errNotReady{retryAfter: time.Second * time.Duration(raSeconds)} + for { + select { + case <-work.ctx.Done(): + return + case <-returnAfterTimer.C: + return + case <-retryAfterTimer.C: + go sendAllocReq() + case resp := <-work.rxDiscoMsgCh: + if resp.Generation != generation || + !work.discoKeys.Equal(key.NewSortedPairOfDiscoPublic(resp.ClientDisco[0], resp.ClientDisco[1])) { + continue + } + done.allocated = udprelay.ServerEndpoint{ + ServerDisco: resp.ServerDisco, + ClientDisco: resp.ClientDisco, + LamportID: resp.LamportID, + AddrPorts: resp.AddrPorts, + VNI: resp.VNI, + BindLifetime: tstime.GoDuration{Duration: resp.BindLifetime}, + SteadyStateLifetime: tstime.GoDuration{Duration: resp.SteadyStateLifetime}, + } + return } - fallthrough - default: - return udprelay.ServerEndpoint{}, fmt.Errorf("non-200 status: %d", resp.StatusCode) } } -func (r *relayManager) allocateSingleServer(ctx context.Context, wg *sync.WaitGroup, server netip.AddrPort, wlb endpointWithLastBest) { - // TODO(jwhited): introduce client metrics counters for notable failures - defer wg.Done() +func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { + if len(r.serversByNodeKey) == 0 { + return + } remoteDisco := wlb.ep.disco.Load() if remoteDisco == nil { return } - firstTry := true - for { - se, err := doAllocate(ctx, server, [2]key.DiscoPublic{wlb.ep.c.discoPublic, remoteDisco.key}) - if err == nil { - relayManagerInputEvent(r, ctx, &r.newServerEndpointCh, newRelayServerEndpointEvent{ - wlb: wlb, - se: se, - server: server, // we allocated this endpoint (vs CallMeMaybeVia reception), mark it as such - }) - return - } - wlb.ep.c.logf("[v1] magicsock: relayManager: error allocating endpoint on %v for %v: %v", server, wlb.ep.discoShort(), err) - var notReady errNotReady - if firstTry && errors.As(err, ¬Ready) { - select { - case <-ctx.Done(): - return - case <-time.After(min(notReady.retryAfter, reqTimeout)): - firstTry = false + discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoPublic, remoteDisco.key) + for _, v := range r.serversByNodeKey { + byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] + if !ok { + byDiscoKeys = make(map[key.SortedPairOfDiscoPublic]*relayEndpointAllocWork) + r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] = byDiscoKeys + } else { + _, ok = byDiscoKeys[discoKeys] + if ok { + // If there is an existing key, a disco key collision may have + // occurred across peers ([*endpoint]). Do not overwrite the + // existing work, let it finish. + wlb.ep.c.logf("[unexpected] magicsock: relayManager: suspected disco key collision on server %v for keys: %v", v.nodeKey.ShortString(), discoKeys) continue } } - return + ctx, cancel := context.WithCancel(context.Background()) + started := &relayEndpointAllocWork{ + wlb: wlb, + discoKeys: discoKeys, + candidatePeerRelay: v, + rxDiscoMsgCh: make(chan *disco.AllocateUDPRelayEndpointResponse), + doneCh: make(chan relayEndpointAllocWorkDoneEvent, 1), + ctx: ctx, + cancel: cancel, + } + byDiscoKeys[discoKeys] = started + byCandidatePeerRelay, ok := r.allocWorkByCandidatePeerRelayByEndpoint[wlb.ep] + if !ok { + byCandidatePeerRelay = make(map[candidatePeerRelay]*relayEndpointAllocWork) + r.allocWorkByCandidatePeerRelayByEndpoint[wlb.ep] = byCandidatePeerRelay + } + byCandidatePeerRelay[v] = started + r.allocGeneration++ + go r.allocateServerEndpoint(started, r.allocGeneration) } } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 01f9258ad7521..e4891f5678a24 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -4,7 +4,6 @@ package magicsock import ( - "net/netip" "testing" "tailscale.com/disco" @@ -22,26 +21,57 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{ServerDisco: key.NewDisco().Public()}) + rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleGeneveEncapDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, &discoInfo{}, epAddr{}) + rm.handleRxDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleRelayServersSet(make(set.Set[netip.AddrPort])) + rm.handleRelayServersSet(make(set.Set[candidatePeerRelay])) <-rm.runLoopStoppedCh rm = relayManager{} rm.getServers() <-rm.runLoopStoppedCh + + rm = relayManager{} + rm.handleDERPHomeChange(key.NodePublic{}, 1) + <-rm.runLoopStoppedCh +} + +func TestRelayManagerHandleDERPHomeChange(t *testing.T) { + rm := relayManager{} + servers := make(set.Set[candidatePeerRelay], 1) + c := candidatePeerRelay{ + nodeKey: key.NewNode().Public(), + discoKey: key.NewDisco().Public(), + derpHomeRegionID: 1, + } + servers.Add(c) + rm.handleRelayServersSet(servers) + want := c + want.derpHomeRegionID = 2 + rm.handleDERPHomeChange(c.nodeKey, 2) + got := rm.getServers() + if len(got) != 1 { + t.Fatalf("got %d servers, want 1", len(got)) + } + _, ok := got[want] + if !ok { + t.Fatal("DERP home change failed to propagate") + } } func TestRelayManagerGetServers(t *testing.T) { rm := relayManager{} - servers := make(set.Set[netip.AddrPort], 1) - servers.Add(netip.MustParseAddrPort("192.0.2.1:7")) + servers := make(set.Set[candidatePeerRelay], 1) + c := candidatePeerRelay{ + nodeKey: key.NewNode().Public(), + discoKey: key.NewDisco().Public(), + } + servers.Add(c) rm.handleRelayServersSet(servers) got := rm.getServers() if !servers.Equal(got) { From 0d03a3746a0229fe749b94b1d60491de64b135cd Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 21 Jul 2025 10:35:53 -0700 Subject: [PATCH 0143/1093] feature/tpm: log errors on the initial info fetch (#16574) This function is behind a sync.Once so we should only see errors at startup. In particular the error from `open` is useful to diagnose why TPM might not be accessible. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 9499ed02a8b2f..0260cca586e13 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -44,8 +44,10 @@ func init() { func info() *tailcfg.TPMInfo { tpm, err := open() if err != nil { + log.Printf("TPM: error opening: %v", err) return nil } + log.Printf("TPM: successfully opened") defer tpm.Close() info := new(tailcfg.TPMInfo) @@ -74,10 +76,12 @@ func info() *tailcfg.TPMInfo { PropertyCount: 1, }.Execute(tpm) if err != nil { + log.Printf("TPM: GetCapability %v: %v", cap.prop, err) continue } props, err := resp.CapabilityData.Data.TPMProperties() if err != nil { + log.Printf("TPM: GetCapability %v: %v", cap.prop, err) continue } if len(props.TPMProperty) == 0 { From c989824aac0df05b00275ae8911b7bbf26797d9d Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 21 Jul 2025 19:06:36 +0100 Subject: [PATCH 0144/1093] cmd/k8s-operator: Allow specifying cluster ips for nameservers (#16477) This commit modifies the kubernetes operator's `DNSConfig` resource with the addition of a new field at `nameserver.service.clusterIP`. This field allows users to specify a static in-cluster IP address of the nameserver when deployed. Fixes #14305 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 9 +- .../deploy/manifests/operator.yaml | 9 +- cmd/k8s-operator/nameserver.go | 9 +- cmd/k8s-operator/nameserver_test.go | 177 +++++++++++------- k8s-operator/api.md | 19 +- .../apis/v1alpha1/types_tsdnsconfig.go | 11 +- .../apis/v1alpha1/zz_generated.deepcopy.go | 20 ++ 7 files changed, 179 insertions(+), 75 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 268d978c15f37..bffad47f97191 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -101,6 +101,13 @@ spec: tag: description: Tag defaults to unstable. type: string + service: + description: Service configuration. + type: object + properties: + clusterIP: + description: ClusterIP sets the static IP of the service used by the nameserver. + type: string status: description: |- Status describes the status of the DNSConfig. This is set @@ -172,7 +179,7 @@ spec: ip: description: |- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - Currently you must manually update your cluster DNS config to add + Currently, you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index ac8143e98c22b..175f2a7fbe9ba 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -389,6 +389,13 @@ spec: description: Tag defaults to unstable. type: string type: object + service: + description: Service configuration. + properties: + clusterIP: + description: ClusterIP sets the static IP of the service used by the nameserver. + type: string + type: object type: object required: - nameserver @@ -462,7 +469,7 @@ spec: ip: description: |- IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - Currently you must manually update your cluster DNS config to add + Currently, you must manually update your cluster DNS config to add this address as a stub nameserver for ts.net for cluster workloads to be able to resolve MagicDNS names associated with egress or Ingress proxies. diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 20d66f7d0766a..983a28c918276 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -7,14 +7,13 @@ package main import ( "context" + _ "embed" "errors" "fmt" "slices" "strings" "sync" - _ "embed" - "go.uber.org/zap" xslices "golang.org/x/exp/slices" appsv1 "k8s.io/api/apps/v1" @@ -183,6 +182,10 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Tag != "" { dCfg.imageTag = tsDNSCfg.Spec.Nameserver.Image.Tag } + if tsDNSCfg.Spec.Nameserver.Service != nil { + dCfg.clusterIP = tsDNSCfg.Spec.Nameserver.Service.ClusterIP + } + for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { return fmt.Errorf("error reconciling %s: %w", deployable.kind, err) @@ -213,6 +216,7 @@ type deployConfig struct { labels map[string]string ownerRefs []metav1.OwnerReference namespace string + clusterIP string } var ( @@ -267,6 +271,7 @@ var ( svc.ObjectMeta.Labels = cfg.labels svc.ObjectMeta.OwnerReferences = cfg.ownerRefs svc.ObjectMeta.Namespace = cfg.namespace + svc.Spec.ClusterIP = cfg.clusterIP _, err := createOrUpdate[corev1.Service](ctx, kubeClient, cfg.namespace, svc, func(*corev1.Service) {}) return err }, diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index cec95b84ee719..55a998ac31979 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -26,7 +26,7 @@ import ( ) func TestNameserverReconciler(t *testing.T) { - dnsCfg := &tsapi.DNSConfig{ + dnsConfig := &tsapi.DNSConfig{ TypeMeta: metav1.TypeMeta{Kind: "DNSConfig", APIVersion: "tailscale.com/v1alpha1"}, ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -37,91 +37,130 @@ func TestNameserverReconciler(t *testing.T) { Repo: "test", Tag: "v0.0.1", }, + Service: &tsapi.NameserverService{ + ClusterIP: "5.4.3.2", + }, }, }, } fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(dnsCfg). - WithStatusSubresource(dnsCfg). + WithObjects(dnsConfig). + WithStatusSubresource(dnsConfig). Build() - zl, err := zap.NewDevelopment() + + logger, err := zap.NewDevelopment() if err != nil { t.Fatal(err) } - cl := tstest.NewClock(tstest.ClockOpts{}) - nr := &NameserverReconciler{ + + clock := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &NameserverReconciler{ Client: fc, - clock: cl, - logger: zl.Sugar(), - tsNamespace: "tailscale", + clock: clock, + logger: logger.Sugar(), + tsNamespace: tsNamespace, } - expectReconciled(t, nr, "", "test") - // Verify that nameserver Deployment has been created and has the expected fields. - wantsDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: "tailscale"}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} - if err := yaml.Unmarshal(deployYaml, wantsDeploy); err != nil { - t.Fatalf("unmarshalling yaml: %v", err) - } - dnsCfgOwnerRef := metav1.NewControllerRef(dnsCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig")) - wantsDeploy.OwnerReferences = []metav1.OwnerReference{*dnsCfgOwnerRef} - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" - wantsDeploy.Namespace = "tailscale" - labels := nameserverResourceLabels("test", "tailscale") - wantsDeploy.ObjectMeta.Labels = labels - expectEqual(t, fc, wantsDeploy) - - // Verify that DNSConfig advertizes the nameserver's Service IP address, - // has the ready status condition and tailscale finalizer. - mustUpdate(t, fc, "tailscale", "nameserver", func(svc *corev1.Service) { - svc.Spec.ClusterIP = "1.2.3.4" + expectReconciled(t, reconciler, "", "test") + + ownerReference := metav1.NewControllerRef(dnsConfig, tsapi.SchemeGroupVersion.WithKind("DNSConfig")) + nameserverLabels := nameserverResourceLabels(dnsConfig.Name, tsNamespace) + + wantsDeploy := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: tsNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: appsv1.SchemeGroupVersion.Identifier()}} + t.Run("deployment has expected fields", func(t *testing.T) { + if err = yaml.Unmarshal(deployYaml, wantsDeploy); err != nil { + t.Fatalf("unmarshalling yaml: %v", err) + } + wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" + wantsDeploy.Namespace = tsNamespace + wantsDeploy.ObjectMeta.Labels = nameserverLabels + expectEqual(t, fc, wantsDeploy) }) - expectReconciled(t, nr, "", "test") - dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{ - IP: "1.2.3.4", - } - dnsCfg.Finalizers = []string{FinalizerName} - dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ - Type: string(tsapi.NameserverReady), - Status: metav1.ConditionTrue, - Reason: reasonNameserverCreated, - Message: reasonNameserverCreated, - LastTransitionTime: metav1.Time{Time: cl.Now().Truncate(time.Second)}, + + wantsSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "nameserver", Namespace: tsNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: corev1.SchemeGroupVersion.Identifier()}} + t.Run("service has expected fields", func(t *testing.T) { + if err = yaml.Unmarshal(svcYaml, wantsSvc); err != nil { + t.Fatalf("unmarshalling yaml: %v", err) + } + wantsSvc.Spec.ClusterIP = dnsConfig.Spec.Nameserver.Service.ClusterIP + wantsSvc.OwnerReferences = []metav1.OwnerReference{*ownerReference} + wantsSvc.Namespace = tsNamespace + wantsSvc.ObjectMeta.Labels = nameserverLabels + expectEqual(t, fc, wantsSvc) }) - expectEqual(t, fc, dnsCfg) - // // Verify that nameserver image gets updated to match DNSConfig spec. - mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { - dnsCfg.Spec.Nameserver.Image.Tag = "v0.0.2" + t.Run("dns config status is set", func(t *testing.T) { + // Verify that DNSConfig advertizes the nameserver's Service IP address, + // has the ready status condition and tailscale finalizer. + mustUpdate(t, fc, "tailscale", "nameserver", func(svc *corev1.Service) { + svc.Spec.ClusterIP = "1.2.3.4" + }) + expectReconciled(t, reconciler, "", "test") + + dnsConfig.Finalizers = []string{FinalizerName} + dnsConfig.Status.Nameserver = &tsapi.NameserverStatus{ + IP: "1.2.3.4", + } + dnsConfig.Status.Conditions = append(dnsConfig.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + Reason: reasonNameserverCreated, + Message: reasonNameserverCreated, + LastTransitionTime: metav1.Time{Time: clock.Now().Truncate(time.Second)}, + }) + + expectEqual(t, fc, dnsConfig) }) - expectReconciled(t, nr, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" - expectEqual(t, fc, wantsDeploy) - - // Verify that when another actor sets ConfigMap data, it does not get - // overwritten by nameserver reconciler. - dnsRecords := &operatorutils.Records{Version: "v1alpha1", IP4: map[string][]string{"foo.ts.net": {"1.2.3.4"}}} - bs, err := json.Marshal(dnsRecords) - if err != nil { - t.Fatalf("error marshalling ConfigMap contents: %v", err) - } - mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) { - mak.Set(&cm.Data, "records.json", string(bs)) + + t.Run("nameserver image can be updated", func(t *testing.T) { + // Verify that nameserver image gets updated to match DNSConfig spec. + mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { + dnsCfg.Spec.Nameserver.Image.Tag = "v0.0.2" + }) + expectReconciled(t, reconciler, "", "test") + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.2" + expectEqual(t, fc, wantsDeploy) + }) + + t.Run("reconciler does not overwrite custom configuration", func(t *testing.T) { + // Verify that when another actor sets ConfigMap data, it does not get + // overwritten by nameserver reconciler. + dnsRecords := &operatorutils.Records{Version: "v1alpha1", IP4: map[string][]string{"foo.ts.net": {"1.2.3.4"}}} + bs, err := json.Marshal(dnsRecords) + if err != nil { + t.Fatalf("error marshalling ConfigMap contents: %v", err) + } + + mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) { + mak.Set(&cm.Data, "records.json", string(bs)) + }) + + expectReconciled(t, reconciler, "", "test") + + wantCm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dnsrecords", + Namespace: "tailscale", + Labels: nameserverLabels, + OwnerReferences: []metav1.OwnerReference{*ownerReference}, + }, + TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, + Data: map[string]string{"records.json": string(bs)}, + } + + expectEqual(t, fc, wantCm) }) - expectReconciled(t, nr, "", "test") - wantCm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dnsrecords", - Namespace: "tailscale", Labels: labels, OwnerReferences: []metav1.OwnerReference{*dnsCfgOwnerRef}}, - TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}, - Data: map[string]string{"records.json": string(bs)}, - } - expectEqual(t, fc, wantCm) - // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, - // the nameserver image defaults to tailscale/k8s-nameserver:unstable. - mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { - dnsCfg.Spec.Nameserver.Image = nil + t.Run("uses default nameserver image", func(t *testing.T) { + // Verify that if dnsconfig.spec.nameserver.image.{repo,tag} are unset, + // the nameserver image defaults to tailscale/k8s-nameserver:unstable. + mustUpdate(t, fc, "", "test", func(dnsCfg *tsapi.DNSConfig) { + dnsCfg.Spec.Nameserver.Image = nil + }) + expectReconciled(t, reconciler, "", "test") + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" + expectEqual(t, fc, wantsDeploy) }) - expectReconciled(t, nr, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" - expectEqual(t, fc, wantsDeploy) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index cd36798d69f8b..564c87f503a22 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -422,6 +422,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | +| `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | #### NameserverImage @@ -441,6 +442,22 @@ _Appears in:_ | `tag` _string_ | Tag defaults to unstable. | | | +#### NameserverService + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `clusterIP` _string_ | ClusterIP sets the static IP of the service used by the nameserver. | | | + + #### NameserverStatus @@ -454,7 +471,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
Currently you must manually update your cluster DNS config to add
this address as a stub nameserver for ts.net for cluster workloads to be
able to resolve MagicDNS names associated with egress or Ingress
proxies.
The IP address will change if you delete and recreate the DNSConfig. | | | +| `ip` _string_ | IP is the ClusterIP of the Service fronting the deployed ts.net nameserver.
Currently, you must manually update your cluster DNS config to add
this address as a stub nameserver for ts.net for cluster workloads to be
able to resolve MagicDNS names associated with egress or Ingress
proxies.
The IP address will change if you delete and recreate the DNSConfig. | | | #### NodePortConfig diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0178d60eab606..0e26ee6476d7a 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -82,6 +82,9 @@ type Nameserver struct { // Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. // +optional Image *NameserverImage `json:"image,omitempty"` + // Service configuration. + // +optional + Service *NameserverService `json:"service,omitempty"` } type NameserverImage struct { @@ -93,6 +96,12 @@ type NameserverImage struct { Tag string `json:"tag,omitempty"` } +type NameserverService struct { + // ClusterIP sets the static IP of the service used by the nameserver. + // +optional + ClusterIP string `json:"clusterIP,omitempty"` +} + type DNSConfigStatus struct { // +listType=map // +listMapKey=type @@ -105,7 +114,7 @@ type DNSConfigStatus struct { type NameserverStatus struct { // IP is the ClusterIP of the Service fronting the deployed ts.net nameserver. - // Currently you must manually update your cluster DNS config to add + // Currently, you must manually update your cluster DNS config to add // this address as a stub nameserver for ts.net for cluster workloads to be // able to resolve MagicDNS names associated with egress or Ingress // proxies. diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 32adbd6804ed0..6586c13546f4f 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -385,6 +385,11 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverImage) **out = **in } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(NameserverService) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. @@ -412,6 +417,21 @@ func (in *NameserverImage) DeepCopy() *NameserverImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverService) DeepCopyInto(out *NameserverService) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverService. +func (in *NameserverService) DeepCopy() *NameserverService { + if in == nil { + return nil + } + out := new(NameserverService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverStatus) DeepCopyInto(out *NameserverStatus) { *out = *in From 8453170aa120227dfec3c3141f081d9495a0a7c1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 21 Jul 2025 12:36:16 -0700 Subject: [PATCH 0145/1093] feature/relayserver: fix consumeEventbusTopics deadlock (#16618) consumeEventbusTopics now owns server and related eventbus machinery. Updates tailscale/corp#30651 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 121 +++++++++------------ feature/relayserver/relayserver_test.go | 134 +++++++++++++++--------- 2 files changed, 136 insertions(+), 119 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index f4077b5f9da0b..b90a6234508f2 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,7 +6,6 @@ package relayserver import ( - "errors" "sync" "tailscale.com/disco" @@ -48,16 +47,12 @@ type extension struct { logf logger.Logf bus *eventbus.Bus - mu sync.Mutex // guards the following fields - eventClient *eventbus.Client // closed to stop consumeEventbusTopics - reqSub *eventbus.Subscriber[magicsock.UDPRelayAllocReq] // receives endpoint alloc requests from magicsock - respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] // publishes endpoint alloc responses to magicsock + mu sync.Mutex // guards the following fields shutdown bool port *int // ipn.Prefs.RelayServerPort, nil if disabled - busDoneCh chan struct{} // non-nil if port is non-nil, closed when consumeEventbusTopics returns + disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return + busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer - server relayServer // lazily initialized - } // relayServer is the interface of [udprelay.Server]. @@ -81,26 +76,27 @@ func (e *extension) Init(host ipnext.Host) error { return nil } -// initBusConnection initializes the [*eventbus.Client], [*eventbus.Subscriber], -// [*eventbus.Publisher], and [chan struct{}] used to publish/receive endpoint -// allocation messages to/from the [*eventbus.Bus]. It also starts -// consumeEventbusTopics in a separate goroutine. -func (e *extension) initBusConnection() { - e.eventClient = e.bus.Client("relayserver.extension") - e.reqSub = eventbus.Subscribe[magicsock.UDPRelayAllocReq](e.eventClient) - e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.eventClient) +// handleBusLifetimeLocked handles the lifetime of consumeEventbusTopics. +func (e *extension) handleBusLifetimeLocked() { + busShouldBeRunning := !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer + if !busShouldBeRunning { + e.disconnectFromBusLocked() + return + } + if e.busDoneCh != nil { + return // already running + } + port := *e.port + e.disconnectFromBusCh = make(chan struct{}) e.busDoneCh = make(chan struct{}) - go e.consumeEventbusTopics() + go e.consumeEventbusTopics(port) } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) - if e.hasNodeAttrDisableRelayServer && e.server != nil { - e.server.Close() - e.server = nil - } + e.handleBusLifetimeLocked() } func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { @@ -110,43 +106,52 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV enableOrDisableServer := ok != (e.port != nil) portChanged := ok && e.port != nil && newPort != *e.port if enableOrDisableServer || portChanged || !sameNode { - if e.server != nil { - e.server.Close() - e.server = nil - } - if e.port != nil { - e.eventClient.Close() - <-e.busDoneCh - } + e.disconnectFromBusLocked() e.port = nil if ok { e.port = ptr.To(newPort) - e.initBusConnection() } } + e.handleBusLifetimeLocked() } -func (e *extension) consumeEventbusTopics() { +func (e *extension) consumeEventbusTopics(port int) { defer close(e.busDoneCh) + eventClient := e.bus.Client("relayserver.extension") + reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](eventClient) + respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](eventClient) + defer eventClient.Close() + + var rs relayServer // lazily initialized + defer func() { + if rs != nil { + rs.Close() + } + }() for { select { - case <-e.reqSub.Done(): + case <-e.disconnectFromBusCh: + return + case <-reqSub.Done(): // If reqSub is done, the eventClient has been closed, which is a // signal to return. return - case req := <-e.reqSub.Events(): - rs, err := e.relayServerOrInit() - if err != nil { - e.logf("error initializing server: %v", err) - continue + case req := <-reqSub.Events(): + if rs == nil { + var err error + rs, err = udprelay.NewServer(e.logf, port, nil) + if err != nil { + e.logf("error initializing server: %v", err) + continue + } } se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) if err != nil { e.logf("error allocating endpoint: %v", err) continue } - e.respPub.Publish(magicsock.UDPRelayAllocResp{ + respPub.Publish(magicsock.UDPRelayAllocResp{ ReqRxFromNodeKey: req.RxFromNodeKey, ReqRxFromDiscoKey: req.RxFromDiscoKey, Message: &disco.AllocateUDPRelayEndpointResponse{ @@ -164,44 +169,22 @@ func (e *extension) consumeEventbusTopics() { }) } } +} +func (e *extension) disconnectFromBusLocked() { + if e.busDoneCh != nil { + close(e.disconnectFromBusCh) + <-e.busDoneCh + e.busDoneCh = nil + e.disconnectFromBusCh = nil + } } // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { e.mu.Lock() defer e.mu.Unlock() + e.disconnectFromBusLocked() e.shutdown = true - if e.server != nil { - e.server.Close() - e.server = nil - } - if e.port != nil { - e.eventClient.Close() - <-e.busDoneCh - } return nil } - -func (e *extension) relayServerOrInit() (relayServer, error) { - e.mu.Lock() - defer e.mu.Unlock() - if e.shutdown { - return nil, errors.New("relay server is shutdown") - } - if e.server != nil { - return e.server, nil - } - if e.port == nil { - return nil, errors.New("relay server is not configured") - } - if e.hasNodeAttrDisableRelayServer { - return nil, errors.New("disable-relay-server node attribute is present") - } - var err error - e.server, err = udprelay.NewServer(e.logf, *e.port, nil) - if err != nil { - return nil, err - } - return e.server, nil -} diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 84158188e90fb..d3fc36a83674a 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -4,107 +4,91 @@ package relayserver import ( - "errors" "testing" "tailscale.com/ipn" - "tailscale.com/net/udprelay/endpoint" "tailscale.com/tsd" - "tailscale.com/types/key" "tailscale.com/types/ptr" + "tailscale.com/util/eventbus" ) -type fakeRelayServer struct{} - -func (f *fakeRelayServer) Close() error { return nil } - -func (f *fakeRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { - return endpoint.ServerEndpoint{}, errors.New("fake relay server") -} - func Test_extension_profileStateChanged(t *testing.T) { prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(1)} prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} type fields struct { - server relayServer - port *int + port *int } type args struct { prefs ipn.PrefsView sameNode bool } tests := []struct { - name string - fields fields - args args - wantPort *int - wantNilServer bool + name string + fields fields + args args + wantPort *int + wantBusRunning bool }{ { - name: "no changes non-nil server", + name: "no changes non-nil port", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: ptr.To(1), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantNilServer: false, + wantPort: ptr.To(1), + wantBusRunning: true, }, { name: "prefs port nil", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: ptr.To(1), }, args: args{ prefs: prefsWithNilPort.View(), sameNode: true, }, - wantPort: nil, - wantNilServer: true, + wantPort: nil, + wantBusRunning: false, }, { name: "prefs port changed", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(2), + port: ptr.To(2), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: ptr.To(1), + wantBusRunning: true, }, { name: "sameNode false", fields: fields{ - server: &fakeRelayServer{}, - port: ptr.To(1), + port: ptr.To(1), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: ptr.To(1), + wantBusRunning: true, }, { name: "prefs port non-nil extension port nil", fields: fields{ - server: nil, - port: nil, + port: nil, }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantNilServer: true, + wantPort: ptr.To(1), + wantBusRunning: true, }, } for _, tt := range tests { @@ -112,19 +96,13 @@ func Test_extension_profileStateChanged(t *testing.T) { sys := tsd.NewSystem() bus := sys.Bus.Get() e := &extension{ - port: tt.fields.port, - server: tt.fields.server, - bus: bus, - } - if e.port != nil { - // Entering profileStateChanged with a non-nil port requires - // bus init, which is called in profileStateChanged when - // transitioning port from nil to non-nil. - e.initBusConnection() + port: tt.fields.port, + bus: bus, } + defer e.disconnectFromBusLocked() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantNilServer != (e.server == nil) { - t.Errorf("wantNilServer: %v != (e.server == nil): %v", tt.wantNilServer, e.server == nil) + if tt.wantBusRunning != (e.busDoneCh != nil) { + t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) @@ -134,3 +112,59 @@ func Test_extension_profileStateChanged(t *testing.T) { }) } } + +func Test_extension_handleBusLifetimeLocked(t *testing.T) { + tests := []struct { + name string + shutdown bool + port *int + busDoneCh chan struct{} + hasNodeAttrDisableRelayServer bool + wantBusRunning bool + }{ + { + name: "want running", + shutdown: false, + port: ptr.To(1), + hasNodeAttrDisableRelayServer: false, + wantBusRunning: true, + }, + { + name: "shutdown true", + shutdown: true, + port: ptr.To(1), + hasNodeAttrDisableRelayServer: false, + wantBusRunning: false, + }, + { + name: "port nil", + shutdown: false, + port: nil, + hasNodeAttrDisableRelayServer: false, + wantBusRunning: false, + }, + { + name: "hasNodeAttrDisableRelayServer true", + shutdown: false, + port: nil, + hasNodeAttrDisableRelayServer: true, + wantBusRunning: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &extension{ + bus: eventbus.New(), + shutdown: tt.shutdown, + port: tt.port, + busDoneCh: tt.busDoneCh, + hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, + } + e.handleBusLifetimeLocked() + defer e.disconnectFromBusLocked() + if tt.wantBusRunning != (e.busDoneCh != nil) { + t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + } + }) + } +} From 6f7e78b10ffac8f1dcd79aebe12b38ee96e76ce7 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 22 Jul 2025 10:07:09 +0100 Subject: [PATCH 0146/1093] cmd/tailscale/cli: make configure kubeconfig accept Tailscale Services (#16601) The Kubernetes API server proxy is getting the ability to serve on a Tailscale Service instead of individual node names. Update the configure kubeconfig sub-command to accept arguments that look like a Tailscale Service. Note, we can't know for sure whether a peer is advertising a Tailscale Service, we can only guess based on the ExtraRecords in the netmap and that IP showing up in a peer's AllowedIPs. Also adds an --http flag to allow targeting individual proxies that can be adverting on http for their node name, and makes the command a bit more forgiving on the range of inputs it accepts and how eager it is to print the help text when the input is obviously wrong. Updates #13358 Change-Id: Ica0509c6b2c707252a43d7c18b530ec1acf7508f Signed-off-by: Tom Proctor --- cmd/tailscale/cli/configure-kube.go | 151 +++++++++++++++++++++-- cmd/tailscale/cli/configure-kube_test.go | 56 ++++++++- 2 files changed, 194 insertions(+), 13 deletions(-) diff --git a/cmd/tailscale/cli/configure-kube.go b/cmd/tailscale/cli/configure-kube.go index 6bc4e202efd4e..e74e8877996fe 100644 --- a/cmd/tailscale/cli/configure-kube.go +++ b/cmd/tailscale/cli/configure-kube.go @@ -9,17 +9,29 @@ import ( "errors" "flag" "fmt" + "net/netip" + "net/url" "os" "path/filepath" "slices" "strings" + "time" "github.com/peterbourgon/ff/v3/ffcli" "k8s.io/client-go/util/homedir" "sigs.k8s.io/yaml" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" + "tailscale.com/types/netmap" + "tailscale.com/util/dnsname" "tailscale.com/version" ) +var configureKubeconfigArgs struct { + http bool // Use HTTP instead of HTTPS (default) for the auth proxy. +} + func configureKubeconfigCmd() *ffcli.Command { return &ffcli.Command{ Name: "kubeconfig", @@ -34,6 +46,7 @@ See: https://tailscale.com/s/k8s-auth-proxy `), FlagSet: (func() *flag.FlagSet { fs := newFlagSet("kubeconfig") + fs.BoolVar(&configureKubeconfigArgs.http, "http", false, "Use HTTP instead of HTTPS to connect to the auth proxy. Ignored if you include a scheme in the hostname argument.") return fs })(), Exec: runConfigureKubeconfig, @@ -70,10 +83,13 @@ func kubeconfigPath() (string, error) { } func runConfigureKubeconfig(ctx context.Context, args []string) error { - if len(args) != 1 { - return errors.New("unknown arguments") + if len(args) != 1 || args[0] == "" { + return flag.ErrHelp + } + hostOrFQDNOrIP, http, err := getInputs(args[0], configureKubeconfigArgs.http) + if err != nil { + return fmt.Errorf("error parsing inputs: %w", err) } - hostOrFQDN := args[0] st, err := localClient.Status(ctx) if err != nil { @@ -82,22 +98,45 @@ func runConfigureKubeconfig(ctx context.Context, args []string) error { if st.BackendState != "Running" { return errors.New("Tailscale is not running") } - targetFQDN, ok := nodeDNSNameFromArg(st, hostOrFQDN) - if !ok { - return fmt.Errorf("no peer found with hostname %q", hostOrFQDN) + nm, err := getNetMap(ctx) + if err != nil { + return err + } + + targetFQDN, err := nodeOrServiceDNSNameFromArg(st, nm, hostOrFQDNOrIP) + if err != nil { + return err } targetFQDN = strings.TrimSuffix(targetFQDN, ".") var kubeconfig string if kubeconfig, err = kubeconfigPath(); err != nil { return err } - if err = setKubeconfigForPeer(targetFQDN, kubeconfig); err != nil { + scheme := "https://" + if http { + scheme = "http://" + } + if err = setKubeconfigForPeer(scheme, targetFQDN, kubeconfig); err != nil { return err } - printf("kubeconfig configured for %q\n", hostOrFQDN) + printf("kubeconfig configured for %q at URL %q\n", targetFQDN, scheme+targetFQDN) return nil } +func getInputs(arg string, httpArg bool) (string, bool, error) { + u, err := url.Parse(arg) + if err != nil { + return "", false, err + } + + switch u.Scheme { + case "http", "https": + return u.Host, u.Scheme == "http", nil + default: + return arg, httpArg, nil + } +} + // appendOrSetNamed finds a map with a "name" key matching name in dst, and // replaces it with val. If no such map is found, val is appended to dst. func appendOrSetNamed(dst []any, name string, val map[string]any) []any { @@ -116,7 +155,7 @@ func appendOrSetNamed(dst []any, name string, val map[string]any) []any { var errInvalidKubeconfig = errors.New("invalid kubeconfig") -func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { +func updateKubeconfig(cfgYaml []byte, scheme, fqdn string) ([]byte, error) { var cfg map[string]any if len(cfgYaml) > 0 { if err := yaml.Unmarshal(cfgYaml, &cfg); err != nil { @@ -139,7 +178,7 @@ func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { cfg["clusters"] = appendOrSetNamed(clusters, fqdn, map[string]any{ "name": fqdn, "cluster": map[string]string{ - "server": "https://" + fqdn, + "server": scheme + fqdn, }, }) @@ -172,7 +211,7 @@ func updateKubeconfig(cfgYaml []byte, fqdn string) ([]byte, error) { return yaml.Marshal(cfg) } -func setKubeconfigForPeer(fqdn, filePath string) error { +func setKubeconfigForPeer(scheme, fqdn, filePath string) error { dir := filepath.Dir(filePath) if _, err := os.Stat(dir); err != nil { if !os.IsNotExist(err) { @@ -191,9 +230,97 @@ func setKubeconfigForPeer(fqdn, filePath string) error { if err != nil && !os.IsNotExist(err) { return fmt.Errorf("reading kubeconfig: %w", err) } - b, err = updateKubeconfig(b, fqdn) + b, err = updateKubeconfig(b, scheme, fqdn) if err != nil { return err } return os.WriteFile(filePath, b, 0600) } + +// nodeOrServiceDNSNameFromArg returns the PeerStatus.DNSName value from a peer +// in st that matches the input arg which can be a base name, full DNS name, or +// an IP. If none is found, it looks for a Tailscale Service +func nodeOrServiceDNSNameFromArg(st *ipnstate.Status, nm *netmap.NetworkMap, arg string) (string, error) { + // First check for a node DNS name. + if dnsName, ok := nodeDNSNameFromArg(st, arg); ok { + return dnsName, nil + } + + // If not found, check for a Tailscale Service DNS name. + rec, ok := serviceDNSRecordFromNetMap(nm, st.CurrentTailnet.MagicDNSSuffix, arg) + if !ok { + return "", fmt.Errorf("no peer found for %q", arg) + } + + // Validate we can see a peer advertising the Tailscale Service. + ip, err := netip.ParseAddr(rec.Value) + if err != nil { + return "", fmt.Errorf("error parsing ExtraRecord IP address %q: %w", rec.Value, err) + } + ipPrefix := netip.PrefixFrom(ip, ip.BitLen()) + for _, ps := range st.Peer { + for _, allowedIP := range ps.AllowedIPs.All() { + if allowedIP == ipPrefix { + return rec.Name, nil + } + } + } + + return "", fmt.Errorf("%q is in MagicDNS, but is not currently reachable on any known peer", arg) +} + +func getNetMap(ctx context.Context) (*netmap.NetworkMap, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return nil, err + } + defer watcher.Close() + + n, err := watcher.Next() + if err != nil { + return nil, err + } + + return n.NetMap, nil +} + +func serviceDNSRecordFromNetMap(nm *netmap.NetworkMap, tcd, arg string) (rec tailcfg.DNSRecord, ok bool) { + argIP, _ := netip.ParseAddr(arg) + argFQDN, err := dnsname.ToFQDN(arg) + argFQDNValid := err == nil + if !argIP.IsValid() && !argFQDNValid { + return rec, false + } + + for _, rec := range nm.DNS.ExtraRecords { + if argIP.IsValid() { + recIP, _ := netip.ParseAddr(rec.Value) + if recIP == argIP { + return rec, true + } + continue + } + + if !argFQDNValid { + continue + } + + recFirstLabel := dnsname.FirstLabel(rec.Name) + if strings.EqualFold(arg, recFirstLabel) { + return rec, true + } + + recFQDN, err := dnsname.ToFQDN(rec.Name) + if err != nil { + continue + } + if strings.EqualFold(argFQDN.WithTrailingDot(), recFQDN.WithTrailingDot()) { + return rec, true + } + } + + return tailcfg.DNSRecord{}, false +} diff --git a/cmd/tailscale/cli/configure-kube_test.go b/cmd/tailscale/cli/configure-kube_test.go index d71a9b627e7f0..0c8b6b2b6cc0e 100644 --- a/cmd/tailscale/cli/configure-kube_test.go +++ b/cmd/tailscale/cli/configure-kube_test.go @@ -6,6 +6,7 @@ package cli import ( "bytes" + "fmt" "strings" "testing" @@ -16,6 +17,7 @@ func TestKubeconfig(t *testing.T) { const fqdn = "foo.tail-scale.ts.net" tests := []struct { name string + http bool in string want string wantErr error @@ -48,6 +50,27 @@ contexts: current-context: foo.tail-scale.ts.net kind: Config users: +- name: tailscale-auth + user: + token: unused`, + }, + { + name: "empty_http", + http: true, + in: "", + want: `apiVersion: v1 +clusters: +- cluster: + server: http://foo.tail-scale.ts.net + name: foo.tail-scale.ts.net +contexts: +- context: + cluster: foo.tail-scale.ts.net + user: tailscale-auth + name: foo.tail-scale.ts.net +current-context: foo.tail-scale.ts.net +kind: Config +users: - name: tailscale-auth user: token: unused`, @@ -202,7 +225,11 @@ users: } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := updateKubeconfig([]byte(tt.in), fqdn) + scheme := "https://" + if tt.http { + scheme = "http://" + } + got, err := updateKubeconfig([]byte(tt.in), scheme, fqdn) if err != nil { if err != tt.wantErr { t.Fatalf("updateKubeconfig() error = %v, wantErr %v", err, tt.wantErr) @@ -219,3 +246,30 @@ users: }) } } + +func TestGetInputs(t *testing.T) { + for _, arg := range []string{ + "foo.tail-scale.ts.net", + "foo", + "127.0.0.1", + } { + for _, prefix := range []string{"", "https://", "http://"} { + for _, httpFlag := range []bool{false, true} { + expectedHost := arg + expectedHTTP := (httpFlag && !strings.HasPrefix(prefix, "https://")) || strings.HasPrefix(prefix, "http://") + t.Run(fmt.Sprintf("%s%s_http=%v", prefix, arg, httpFlag), func(t *testing.T) { + host, http, err := getInputs(prefix+arg, httpFlag) + if err != nil { + t.Fatal(err) + } + if host != expectedHost { + t.Errorf("host = %v, want %v", host, expectedHost) + } + if http != expectedHTTP { + t.Errorf("http = %v, want %v", http, expectedHTTP) + } + }) + } + } + } +} From 22a8e0ac50ee2211e013fae2f2dbd8a9622657d8 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 22 Jul 2025 14:46:38 +0100 Subject: [PATCH 0147/1093] cmd/{k8s-operator,k8s-proxy},kube: use consistent type for auth mode config (#16626) Updates k8s-proxy's config so its auth mode config matches that we set in kube-apiserver ProxyGroups for consistency. Updates #13358 Change-Id: I95e29cec6ded2dc7c6d2d03f968a25c822bc0e01 Signed-off-by: Tom Proctor --- cmd/k8s-operator/api-server-proxy.go | 36 +++++-------------- cmd/k8s-operator/operator.go | 6 ++-- cmd/k8s-operator/proxygroup.go | 8 +++-- cmd/k8s-operator/proxygroup_test.go | 2 +- cmd/k8s-proxy/k8s-proxy.go | 9 ++--- k8s-operator/api-proxy/proxy.go | 8 ++--- k8s-operator/sessionrecording/hijacker.go | 2 +- kube/k8s-proxy/conf/conf.go | 9 ++--- kube/kubetypes/types.go | 23 ++++++++++++- kube/kubetypes/types_test.go | 42 +++++++++++++++++++++++ 10 files changed, 98 insertions(+), 47 deletions(-) create mode 100644 kube/kubetypes/types_test.go diff --git a/cmd/k8s-operator/api-server-proxy.go b/cmd/k8s-operator/api-server-proxy.go index 09a7b8c6232d2..70333d2c48d41 100644 --- a/cmd/k8s-operator/api-server-proxy.go +++ b/cmd/k8s-operator/api-server-proxy.go @@ -9,30 +9,12 @@ import ( "fmt" "log" "os" -) - -type apiServerProxyMode int - -func (a apiServerProxyMode) String() string { - switch a { - case apiServerProxyModeDisabled: - return "disabled" - case apiServerProxyModeEnabled: - return "auth" - case apiServerProxyModeNoAuth: - return "noauth" - default: - return "unknown" - } -} -const ( - apiServerProxyModeDisabled apiServerProxyMode = iota - apiServerProxyModeEnabled - apiServerProxyModeNoAuth + "tailscale.com/kube/kubetypes" + "tailscale.com/types/ptr" ) -func parseAPIProxyMode() apiServerProxyMode { +func parseAPIProxyMode() *kubetypes.APIServerProxyMode { haveAuthProxyEnv := os.Getenv("AUTH_PROXY") != "" haveAPIProxyEnv := os.Getenv("APISERVER_PROXY") != "" switch { @@ -41,21 +23,21 @@ func parseAPIProxyMode() apiServerProxyMode { case haveAuthProxyEnv: var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated if authProxyEnv { - return apiServerProxyModeEnabled + return ptr.To(kubetypes.APIServerProxyModeAuth) } - return apiServerProxyModeDisabled + return nil case haveAPIProxyEnv: var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" switch apiProxyEnv { case "true": - return apiServerProxyModeEnabled + return ptr.To(kubetypes.APIServerProxyModeAuth) case "false", "": - return apiServerProxyModeDisabled + return nil case "noauth": - return apiServerProxyModeNoAuth + return ptr.To(kubetypes.APIServerProxyModeNoAuth) default: panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) } } - return apiServerProxyModeDisabled + return nil } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 94a0a6a781c4d..76d2df51d47d2 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -113,7 +113,7 @@ func main() { // additionally act as api-server proxy // https://tailscale.com/kb/1236/kubernetes-operator/?q=kubernetes#accessing-the-kubernetes-control-plane-using-an-api-server-proxy. mode := parseAPIProxyMode() - if mode == apiServerProxyModeDisabled { + if mode == nil { hostinfo.SetApp(kubetypes.AppOperator) } else { hostinfo.SetApp(kubetypes.AppInProcessAPIServerProxy) @@ -122,8 +122,8 @@ func main() { s, tsc := initTSNet(zlog, loginServer) defer s.Close() restConfig := config.GetConfigOrDie() - if mode != apiServerProxyModeDisabled { - ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, mode == apiServerProxyModeEnabled, true) + if mode != nil { + ap, err := apiproxy.NewAPIServerProxy(zlog, restConfig, s, *mode, true) if err != nil { zlog.Fatalf("error creating API server proxy: %v", err) } diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index d62cb0f117a1d..f9c12797d523d 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -805,6 +805,10 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } } + mode := kubetypes.APIServerProxyModeAuth + if !isAuthAPIServerProxy(pg) { + mode = kubetypes.APIServerProxyModeNoAuth + } cfg := conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ @@ -816,8 +820,8 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p // Reloadable fields. Hostname: &hostname, APIServerProxy: &conf.APIServerProxyConfig{ - Enabled: opt.NewBool(true), - AuthMode: opt.NewBool(isAuthAPIServerProxy(pg)), + Enabled: opt.NewBool(true), + Mode: &mode, // The first replica is elected as the cert issuer, same // as containerboot does for ingress-pg-reconciler. IssueCerts: opt.NewBool(i == 0), diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index ef6babc5679cc..0dc791b0412de 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1376,7 +1376,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { Hostname: ptr.To("test-k8s-apiserver-0"), APIServerProxy: &conf.APIServerProxyConfig{ Enabled: opt.NewBool(true), - AuthMode: opt.NewBool(false), + Mode: ptr.To(kubetypes.APIServerProxyModeNoAuth), IssueCerts: opt.NewBool(true), }, }, diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index eea1f15f7fdd8..b56ceaab0d5ca 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -34,6 +34,7 @@ import ( apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" "tailscale.com/kube/k8s-proxy/conf" + "tailscale.com/kube/kubetypes" klc "tailscale.com/kube/localclient" "tailscale.com/kube/services" "tailscale.com/kube/state" @@ -238,11 +239,11 @@ func run(logger *zap.SugaredLogger) error { } // Setup for the API server proxy. - authMode := true - if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.AuthMode.EqualBool(false) { - authMode = false + mode := kubetypes.APIServerProxyModeAuth + if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.Mode != nil { + mode = *cfg.Parsed.APIServerProxy.Mode } - ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode, false) + ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, mode, false) if err != nil { return fmt.Errorf("error creating api server proxy: %w", err) } diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index e079e984ff5a1..c648e1622537d 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -47,8 +47,8 @@ var ( // caller's Tailscale identity and the rules defined in the tailnet ACLs. // - false: the proxy is started and requests are passed through to the // Kubernetes API without any auth modifications. -func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, authMode bool, https bool) (*APIServerProxy, error) { - if !authMode { +func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsnet.Server, mode kubetypes.APIServerProxyMode, https bool) (*APIServerProxy, error) { + if mode == kubetypes.APIServerProxyModeNoAuth { restConfig = rest.AnonymousClientConfig(restConfig) } @@ -85,7 +85,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn ap := &APIServerProxy{ log: zlog, lc: lc, - authMode: authMode, + authMode: mode == kubetypes.APIServerProxyModeAuth, https: https, upstreamURL: u, ts: ts, @@ -278,7 +278,7 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request Namespace: r.PathValue(namespaceNameKey), Log: ap.log, } - h := ksr.New(opts) + h := ksr.NewHijacker(opts) ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index e8c534afc9319..675a9b1ddacc6 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -57,7 +57,7 @@ var ( counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded") ) -func New(opts HijackerOpts) *Hijacker { +func NewHijacker(opts HijackerOpts) *Hijacker { return &Hijacker{ ts: opts.TS, req: opts.Req, diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index a32e0c03ef2bc..fdb6301ac5a1d 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -14,6 +14,7 @@ import ( "net/netip" "github.com/tailscale/hujson" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/types/opt" ) @@ -66,10 +67,10 @@ type ConfigV1Alpha1 struct { } type APIServerProxyConfig struct { - Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. - AuthMode opt.Bool `json:",omitempty"` // Run in auth or noauth mode. - ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. - IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. + Enabled opt.Bool `json:",omitempty"` // Whether to enable the API Server proxy. + Mode *kubetypes.APIServerProxyMode `json:",omitempty"` // "auth" or "noauth" mode. + ServiceName *tailcfg.ServiceName `json:",omitempty"` // Name of the Tailscale Service to advertise. + IssueCerts opt.Bool `json:",omitempty"` // Whether this replica should issue TLS certs for the Tailscale Service. } // Load reads and parses the config file at the provided path on disk. diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 5e7d4cd1f1fd1..44b01fe1ad1f5 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -3,6 +3,8 @@ package kubetypes +import "fmt" + const ( // Hostinfo App values for the Tailscale Kubernetes Operator components. AppOperator = "k8s-operator" @@ -59,5 +61,24 @@ const ( LabelSecretTypeState = "state" LabelSecretTypeCerts = "certs" - KubeAPIServerConfigFile = "config.hujson" + KubeAPIServerConfigFile = "config.hujson" + APIServerProxyModeAuth APIServerProxyMode = "auth" + APIServerProxyModeNoAuth APIServerProxyMode = "noauth" ) + +// APIServerProxyMode specifies whether the API server proxy will add +// impersonation headers to requests based on the caller's Tailscale identity. +// May be "auth" or "noauth". +type APIServerProxyMode string + +func (a *APIServerProxyMode) UnmarshalJSON(data []byte) error { + switch string(data) { + case `"auth"`: + *a = APIServerProxyModeAuth + case `"noauth"`: + *a = APIServerProxyModeNoAuth + default: + return fmt.Errorf("unknown APIServerProxyMode %q", data) + } + return nil +} diff --git a/kube/kubetypes/types_test.go b/kube/kubetypes/types_test.go new file mode 100644 index 0000000000000..ea1846b3253e8 --- /dev/null +++ b/kube/kubetypes/types_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package kubetypes + +import ( + "encoding/json" + "testing" +) + +func TestUnmarshalAPIServerProxyMode(t *testing.T) { + tests := []struct { + data string + expected APIServerProxyMode + }{ + {data: `{"mode":"auth"}`, expected: APIServerProxyModeAuth}, + {data: `{"mode":"noauth"}`, expected: APIServerProxyModeNoAuth}, + {data: `{"mode":""}`, expected: ""}, + {data: `{"mode":"Auth"}`, expected: ""}, + {data: `{"mode":"unknown"}`, expected: ""}, + } + + for _, tc := range tests { + var s struct { + Mode *APIServerProxyMode `json:",omitempty"` + } + err := json.Unmarshal([]byte(tc.data), &s) + if tc.expected == "" { + if err == nil { + t.Errorf("expected error for %q, got none", tc.data) + } + continue + } + if err != nil { + t.Errorf("unexpected error for %q: %v", tc.data, err) + continue + } + if *s.Mode != tc.expected { + t.Errorf("for %q expected %q, got %q", tc.data, tc.expected, *s.Mode) + } + } +} From 44947054967e3eda476c92206e0a14fd1ffc4ec0 Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 22 Jul 2025 17:07:51 +0100 Subject: [PATCH 0148/1093] cmd/{k8s-proxy,containerboot,k8s-operator},kube: add health check and metrics endpoints for k8s-proxy (#16540) * Modifies the k8s-proxy to expose health check and metrics endpoints on the Pod's IP. * Moves cmd/containerboot/healthz.go and cmd/containerboot/metrics.go to /kube to be shared with /k8s-proxy. Updates #13358 Signed-off-by: David Bond --- cmd/containerboot/healthz.go | 57 ------------- cmd/containerboot/main.go | 16 ++-- cmd/k8s-operator/proxygroup.go | 8 +- cmd/k8s-operator/proxygroup_test.go | 2 + cmd/k8s-proxy/k8s-proxy.go | 67 +++++++++++++-- kube/health/healthz.go | 84 +++++++++++++++++++ kube/k8s-proxy/conf/conf.go | 36 ++++++-- .../containerboot => kube/metrics}/metrics.go | 8 +- 8 files changed, 196 insertions(+), 82 deletions(-) delete mode 100644 cmd/containerboot/healthz.go create mode 100644 kube/health/healthz.go rename {cmd/containerboot => kube/metrics}/metrics.go (90%) diff --git a/cmd/containerboot/healthz.go b/cmd/containerboot/healthz.go deleted file mode 100644 index d6a64a37c4ac5..0000000000000 --- a/cmd/containerboot/healthz.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux - -package main - -import ( - "fmt" - "log" - "net/http" - "sync" - - "tailscale.com/kube/kubetypes" -) - -// healthz is a simple health check server, if enabled it returns 200 OK if -// this tailscale node currently has at least one tailnet IP address else -// returns 503. -type healthz struct { - sync.Mutex - hasAddrs bool - podIPv4 string -} - -func (h *healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.Lock() - defer h.Unlock() - - if h.hasAddrs { - w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) - if _, err := w.Write([]byte("ok")); err != nil { - http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) - } - } else { - http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) - } -} - -func (h *healthz) update(healthy bool) { - h.Lock() - defer h.Unlock() - - if h.hasAddrs != healthy { - log.Println("Setting healthy", healthy) - } - h.hasAddrs = healthy -} - -// registerHealthHandlers registers a simple health handler at /healthz. -// A containerized tailscale instance is considered healthy if -// it has at least one tailnet IP address. -func registerHealthHandlers(mux *http.ServeMux, podIPv4 string) *healthz { - h := &healthz{podIPv4: podIPv4} - mux.Handle("GET /healthz", h) - return h -} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 49c8a473a596d..f056d26f3c2c0 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -121,7 +121,9 @@ import ( "tailscale.com/client/tailscale" "tailscale.com/ipn" kubeutils "tailscale.com/k8s-operator" + healthz "tailscale.com/kube/health" "tailscale.com/kube/kubetypes" + "tailscale.com/kube/metrics" "tailscale.com/kube/services" "tailscale.com/tailcfg" "tailscale.com/types/logger" @@ -232,13 +234,13 @@ func run() error { } defer killTailscaled() - var healthCheck *healthz + var healthCheck *healthz.Healthz ep := &egressProxy{} if cfg.HealthCheckAddrPort != "" { mux := http.NewServeMux() log.Printf("Running healthcheck endpoint at %s/healthz", cfg.HealthCheckAddrPort) - healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) + healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf) close := runHTTPServer(mux, cfg.HealthCheckAddrPort) defer close() @@ -249,12 +251,12 @@ func run() error { if cfg.localMetricsEnabled() { log.Printf("Running metrics endpoint at %s/metrics", cfg.LocalAddrPort) - registerMetricsHandlers(mux, client, cfg.DebugAddrPort) + metrics.RegisterMetricsHandlers(mux, client, cfg.DebugAddrPort) } if cfg.localHealthEnabled() { log.Printf("Running healthcheck endpoint at %s/healthz", cfg.LocalAddrPort) - healthCheck = registerHealthHandlers(mux, cfg.PodIPv4) + healthCheck = healthz.RegisterHealthHandlers(mux, cfg.PodIPv4, log.Printf) } if cfg.egressSvcsTerminateEPEnabled() { @@ -438,8 +440,8 @@ authLoop: ) // egressSvcsErrorChan will get an error sent to it if this containerboot instance is configured to expose 1+ // egress services in HA mode and errored. - var egressSvcsErrorChan = make(chan error) - var ingressSvcsErrorChan = make(chan error) + egressSvcsErrorChan := make(chan error) + ingressSvcsErrorChan := make(chan error) defer t.Stop() // resetTimer resets timer for when to next attempt to resolve the DNS // name for the proxy configured with TS_EXPERIMENTAL_DEST_DNS_NAME. The @@ -644,7 +646,7 @@ runLoop: } if healthCheck != nil { - healthCheck.update(len(addrs) != 0) + healthCheck.Update(len(addrs) != 0) } if cfg.ServeConfigPath != "" { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index f9c12797d523d..debeb5c6b3442 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -826,6 +826,8 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p // as containerboot does for ingress-pg-reconciler. IssueCerts: opt.NewBool(i == 0), }, + LocalPort: ptr.To(uint16(9002)), + HealthCheckEnabled: opt.NewBool(true), }, } @@ -849,7 +851,11 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { - cfg.AcceptRoutes = &proxyClass.Spec.TailscaleConfig.AcceptRoutes + cfg.AcceptRoutes = opt.NewBool(proxyClass.Spec.TailscaleConfig.AcceptRoutes) + } + + if proxyClass != nil && proxyClass.Spec.Metrics != nil { + cfg.MetricsEnabled = opt.NewBool(proxyClass.Spec.Metrics.Enable) } if len(endpoints[nodePortSvcName]) > 0 { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 0dc791b0412de..d763cf92276ec 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1379,6 +1379,8 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { Mode: ptr.To(kubetypes.APIServerProxyModeNoAuth), IssueCerts: opt.NewBool(true), }, + LocalPort: ptr.To(uint16(9002)), + HealthCheckEnabled: opt.NewBool(true), }, } cfgB, err := json.Marshal(cfg) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index b56ceaab0d5ca..448bbe3971c0d 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -12,9 +12,12 @@ import ( "context" "errors" "fmt" + "net" + "net/http" "os" "os/signal" "reflect" + "strconv" "strings" "syscall" "time" @@ -33,9 +36,11 @@ import ( "tailscale.com/ipn/store" apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" + healthz "tailscale.com/kube/health" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" klc "tailscale.com/kube/localclient" + "tailscale.com/kube/metrics" "tailscale.com/kube/services" "tailscale.com/kube/state" "tailscale.com/tailcfg" @@ -63,6 +68,7 @@ func run(logger *zap.SugaredLogger) error { var ( configPath = os.Getenv("TS_K8S_PROXY_CONFIG") podUID = os.Getenv("POD_UID") + podIP = os.Getenv("POD_IP") ) if configPath == "" { return errors.New("TS_K8S_PROXY_CONFIG unset") @@ -201,10 +207,57 @@ func run(logger *zap.SugaredLogger) error { }) } - if cfg.Parsed.AcceptRoutes != nil { + if cfg.Parsed.HealthCheckEnabled.EqualBool(true) || cfg.Parsed.MetricsEnabled.EqualBool(true) { + addr := podIP + if addr == "" { + addr = cfg.GetLocalAddr() + } + + addrPort := getLocalAddrPort(addr, cfg.GetLocalPort()) + mux := http.NewServeMux() + localSrv := &http.Server{Addr: addrPort, Handler: mux} + + if cfg.Parsed.MetricsEnabled.EqualBool(true) { + logger.Infof("Running metrics endpoint at %s/metrics", addrPort) + metrics.RegisterMetricsHandlers(mux, lc, "") + } + + if cfg.Parsed.HealthCheckEnabled.EqualBool(true) { + ipV4, _ := ts.TailscaleIPs() + hz := healthz.RegisterHealthHandlers(mux, ipV4.String(), logger.Infof) + group.Go(func() error { + err := hz.MonitorHealth(ctx, lc) + if err == nil || errors.Is(err, context.Canceled) { + return nil + } + return err + }) + } + + group.Go(func() error { + errChan := make(chan error) + go func() { + if err := localSrv.ListenAndServe(); err != nil { + errChan <- err + } + close(errChan) + }() + + select { + case <-ctx.Done(): + sCtx, scancel := context.WithTimeout(serveCtx, 10*time.Second) + defer scancel() + return localSrv.Shutdown(sCtx) + case err := <-errChan: + return err + } + }) + } + + if v, ok := cfg.Parsed.AcceptRoutes.Get(); ok { _, err = lc.EditPrefs(ctx, &ipn.MaskedPrefs{ RouteAllSet: true, - Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes}, + Prefs: ipn.Prefs{RouteAll: v}, }) if err != nil { return fmt.Errorf("error editing prefs: %w", err) @@ -285,10 +338,10 @@ func run(logger *zap.SugaredLogger) error { prefs.HostnameSet = true prefs.Hostname = *cfg.Parsed.Hostname } - if cfg.Parsed.AcceptRoutes != nil && *cfg.Parsed.AcceptRoutes != currentPrefs.RouteAll { - cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, *cfg.Parsed.AcceptRoutes)) + if v, ok := cfg.Parsed.AcceptRoutes.Get(); ok && v != currentPrefs.RouteAll { + cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, v)) prefs.RouteAllSet = true - prefs.Prefs.RouteAll = *cfg.Parsed.AcceptRoutes + prefs.Prefs.RouteAll = v } if !prefs.IsEmpty() { if _, err := lc.EditPrefs(ctx, &prefs); err != nil { @@ -304,6 +357,10 @@ func run(logger *zap.SugaredLogger) error { } } +func getLocalAddrPort(addr string, port uint16) string { + return net.JoinHostPort(addr, strconv.FormatUint(uint64(port), 10)) +} + func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) { p := "mem:" if path != nil { diff --git a/kube/health/healthz.go b/kube/health/healthz.go new file mode 100644 index 0000000000000..c8cfcc7ec01b4 --- /dev/null +++ b/kube/health/healthz.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package health contains shared types and underlying methods for serving +// a `/healthz` endpoint for containerboot and k8s-proxy. +package health + +import ( + "context" + "fmt" + "net/http" + "sync" + + "tailscale.com/client/local" + "tailscale.com/ipn" + "tailscale.com/kube/kubetypes" + "tailscale.com/types/logger" +) + +// Healthz is a simple health check server, if enabled it returns 200 OK if +// this tailscale node currently has at least one tailnet IP address else +// returns 503. +type Healthz struct { + sync.Mutex + hasAddrs bool + podIPv4 string + logger logger.Logf +} + +func (h *Healthz) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs { + w.Header().Add(kubetypes.PodIPv4Header, h.podIPv4) + if _, err := w.Write([]byte("ok")); err != nil { + http.Error(w, fmt.Sprintf("error writing status: %v", err), http.StatusInternalServerError) + } + } else { + http.Error(w, "node currently has no tailscale IPs", http.StatusServiceUnavailable) + } +} + +func (h *Healthz) Update(healthy bool) { + h.Lock() + defer h.Unlock() + + if h.hasAddrs != healthy { + h.logger("Setting healthy %v", healthy) + } + h.hasAddrs = healthy +} + +func (h *Healthz) MonitorHealth(ctx context.Context, lc *local.Client) error { + w, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialNetMap) + if err != nil { + return fmt.Errorf("failed to watch IPN bus: %w", err) + } + + for { + n, err := w.Next() + if err != nil { + return err + } + + if n.NetMap != nil { + h.Update(n.NetMap.SelfNode.Addresses().Len() != 0) + } + } +} + +// RegisterHealthHandlers registers a simple health handler at /healthz. +// A containerized tailscale instance is considered healthy if +// it has at least one tailnet IP address. +func RegisterHealthHandlers(mux *http.ServeMux, podIPv4 string, logger logger.Logf) *Healthz { + h := &Healthz{ + podIPv4: podIPv4, + logger: logger, + } + mux.Handle("GET /healthz", h) + return h +} diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index fdb6301ac5a1d..5294952438896 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -49,21 +49,23 @@ type VersionedConfig struct { } type ConfigV1Alpha1 struct { - AuthKey *string `json:",omitempty"` // Tailscale auth key to use. - State *string `json:",omitempty"` // Path to the Tailscale state. - LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". - App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer - ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. - // StaticEndpoints are additional, user-defined endpoints that this node - // should advertise amongst its wireguard endpoints. - StaticEndpoints []netip.AddrPort `json:",omitempty"` + AuthKey *string `json:",omitempty"` // Tailscale auth key to use. + State *string `json:",omitempty"` // Path to the Tailscale state. + LogLevel *string `json:",omitempty"` // "debug", "info". Defaults to "info". + App *string `json:",omitempty"` // e.g. kubetypes.AppProxyGroupKubeAPIServer + ServerURL *string `json:",omitempty"` // URL of the Tailscale coordination server. + LocalAddr *string `json:",omitempty"` // The address to use for serving HTTP health checks and metrics (defaults to all interfaces). + LocalPort *uint16 `json:",omitempty"` // The port to use for serving HTTP health checks and metrics (defaults to 9002). + MetricsEnabled opt.Bool `json:",omitempty"` // Serve metrics on :/metrics. + HealthCheckEnabled opt.Bool `json:",omitempty"` // Serve health check on :/metrics. // TODO(tomhjp): The remaining fields should all be reloadable during // runtime, but currently missing most of the APIServerProxy fields. Hostname *string `json:",omitempty"` // Tailscale device hostname. - AcceptRoutes *bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. + AcceptRoutes opt.Bool `json:",omitempty"` // Accepts routes advertised by other Tailscale nodes. AdvertiseServices []string `json:",omitempty"` // Tailscale Services to advertise. APIServerProxy *APIServerProxyConfig `json:",omitempty"` // Config specific to the API Server proxy. + StaticEndpoints []netip.AddrPort `json:",omitempty"` // StaticEndpoints are additional, user-defined endpoints that this node should advertise amongst its wireguard endpoints. } type APIServerProxyConfig struct { @@ -108,3 +110,19 @@ func Load(raw []byte) (c Config, err error) { return c, nil } + +func (c *Config) GetLocalAddr() string { + if c.Parsed.LocalAddr == nil { + return "[::]" + } + + return *c.Parsed.LocalAddr +} + +func (c *Config) GetLocalPort() uint16 { + if c.Parsed.LocalPort == nil { + return uint16(9002) + } + + return *c.Parsed.LocalPort +} diff --git a/cmd/containerboot/metrics.go b/kube/metrics/metrics.go similarity index 90% rename from cmd/containerboot/metrics.go rename to kube/metrics/metrics.go index bbd050de6df26..0db683008f91e 100644 --- a/cmd/containerboot/metrics.go +++ b/kube/metrics/metrics.go @@ -1,9 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build !plan9 -package main +// Package metrics contains shared types and underlying methods for serving +// localapi metrics. This is primarily consumed by containerboot and k8s-proxy. +package metrics import ( "fmt" @@ -68,7 +70,7 @@ func (m *metrics) handleDebug(w http.ResponseWriter, r *http.Request) { // In 1.78.x and 1.80.x, it also proxies debug paths to tailscaled's debug // endpoint if configured to ease migration for a breaking change serving user // metrics instead of debug metrics on the "metrics" port. -func registerMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { +func RegisterMetricsHandlers(mux *http.ServeMux, lc *local.Client, debugAddrPort string) { m := &metrics{ lc: lc, debugEndpoint: debugAddrPort, From 0de5e7b94f0bb89bcaed108f656d3ed50da85d02 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 22 Jul 2025 09:22:17 -1000 Subject: [PATCH 0149/1093] util/set: add IntSet (#16602) IntSet is a set optimized for integers. Updates tailscale/corp#29809 Signed-off-by: Joe Tsai --- util/set/intset.go | 172 +++++++++++++++++++++++++++++++++++++++ util/set/intset_test.go | 174 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 346 insertions(+) create mode 100644 util/set/intset.go create mode 100644 util/set/intset_test.go diff --git a/util/set/intset.go b/util/set/intset.go new file mode 100644 index 0000000000000..b747d3bffa9fd --- /dev/null +++ b/util/set/intset.go @@ -0,0 +1,172 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "iter" + "maps" + "math/bits" + "math/rand/v2" + + "golang.org/x/exp/constraints" + "tailscale.com/util/mak" +) + +// IntSet is a set optimized for integer values close to zero +// or set of integers that are close in value. +type IntSet[T constraints.Integer] struct { + // bits is a [bitSet] for numbers less than [bits.UintSize]. + bits bitSet + + // extra is a mapping of [bitSet] for numbers not in bits, + // where the key is a number modulo [bits.UintSize]. + extra map[uint64]bitSet + + // extraLen is the count of numbers in extra since len(extra) + // does not reflect that each bitSet may have multiple numbers. + extraLen int +} + +// Values returns an iterator over the elements of the set. +// The iterator will yield the elements in no particular order. +func (s IntSet[T]) Values() iter.Seq[T] { + return func(yield func(T) bool) { + if s.bits != 0 { + for i := range s.bits.values() { + if !yield(decodeZigZag[T](i)) { + return + } + } + } + if s.extra != nil { + for hi, bs := range s.extra { + for lo := range bs.values() { + if !yield(decodeZigZag[T](hi*bits.UintSize + lo)) { + return + } + } + } + } + } +} + +// Contains reports whether e is in the set. +func (s IntSet[T]) Contains(e T) bool { + if v := encodeZigZag(e); v < bits.UintSize { + return s.bits.contains(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + return s.extra[hi].contains(lo) + } +} + +// Add adds e to the set. +// +// When storing a IntSet in a map as a value type, +// it is important to re-assign the map entry after calling Add or Delete, +// as the IntSet's representation may change. +func (s *IntSet[T]) Add(e T) { + if v := encodeZigZag(e); v < bits.UintSize { + s.bits.add(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + if bs := s.extra[hi]; !bs.contains(lo) { + bs.add(lo) + mak.Set(&s.extra, hi, bs) + s.extra[hi] = bs + s.extraLen++ + } + } +} + +// AddSeq adds the values from seq to the set. +func (s *IntSet[T]) AddSeq(seq iter.Seq[T]) { + for e := range seq { + s.Add(e) + } +} + +// Len reports the number of elements in the set. +func (s IntSet[T]) Len() int { + return s.bits.len() + s.extraLen +} + +// Delete removes e from the set. +// +// When storing a IntSet in a map as a value type, +// it is important to re-assign the map entry after calling Add or Delete, +// as the IntSet's representation may change. +func (s *IntSet[T]) Delete(e T) { + if v := encodeZigZag(e); v < bits.UintSize { + s.bits.delete(v) + } else { + hi, lo := v/uint64(bits.UintSize), v%uint64(bits.UintSize) + if bs := s.extra[hi]; bs.contains(lo) { + bs.delete(lo) + mak.Set(&s.extra, hi, bs) + s.extra[hi] = bs + s.extraLen-- + } + } +} + +// Clone returns a copy of s that doesn't alias the original. +func (s IntSet[T]) Clone() IntSet[T] { + return IntSet[T]{ + bits: s.bits, + extra: maps.Clone(s.extra), + extraLen: s.extraLen, + } +} + +type bitSet uint + +func (s bitSet) values() iter.Seq[uint64] { + return func(yield func(uint64) bool) { + // Hyrum-proofing: randomly iterate in forwards or reverse. + if rand.Uint64()%2 == 0 { + for i := 0; i < bits.UintSize; i++ { + if s.contains(uint64(i)) && !yield(uint64(i)) { + return + } + } + } else { + for i := bits.UintSize; i >= 0; i-- { + if s.contains(uint64(i)) && !yield(uint64(i)) { + return + } + } + } + } +} +func (s bitSet) len() int { return bits.OnesCount(uint(s)) } +func (s bitSet) contains(i uint64) bool { return s&(1< 0 } +func (s *bitSet) add(i uint64) { *s |= 1 << i } +func (s *bitSet) delete(i uint64) { *s &= ^(1 << i) } + +// encodeZigZag encodes an integer as an unsigned integer ensuring that +// negative integers near zero still have a near zero positive value. +// For unsigned integers, it returns the value verbatim. +func encodeZigZag[T constraints.Integer](v T) uint64 { + var zero T + if ^zero >= 0 { // must be constraints.Unsigned + return uint64(v) + } else { // must be constraints.Signed + // See [google.golang.org/protobuf/encoding/protowire.EncodeZigZag] + return uint64(int64(v)<<1) ^ uint64(int64(v)>>63) + } +} + +// decodeZigZag decodes an unsigned integer as an integer ensuring that +// negative integers near zero still have a near zero positive value. +// For unsigned integers, it returns the value verbatim. +func decodeZigZag[T constraints.Integer](v uint64) T { + var zero T + if ^zero >= 0 { // must be constraints.Unsigned + return T(v) + } else { // must be constraints.Signed + // See [google.golang.org/protobuf/encoding/protowire.DecodeZigZag] + return T(int64(v>>1) ^ int64(v)<<63>>63) + } +} diff --git a/util/set/intset_test.go b/util/set/intset_test.go new file mode 100644 index 0000000000000..9523fe88db127 --- /dev/null +++ b/util/set/intset_test.go @@ -0,0 +1,174 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package set + +import ( + "maps" + "math" + "slices" + "testing" + + "golang.org/x/exp/constraints" +) + +func TestIntSet(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + ss := make(Set[int64]) + var si IntSet[int64] + intValues(t, ss, si) + deleteInt(t, ss, &si, -5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + addInt(t, ss, &si, 2) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, -3) + addInt(t, ss, &si, math.MinInt64) + addInt(t, ss, &si, 8) + intValues(t, ss, si) + addInt(t, ss, &si, 77) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + intValues(t, ss, si) + addInt(t, ss, &si, -5) + addInt(t, ss, &si, 7) + addInt(t, ss, &si, -83) + addInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + deleteInt(t, ss, &si, -5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + deleteInt(t, ss, &si, math.MinInt64) + deleteInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + }) + + t.Run("Uint64", func(t *testing.T) { + ss := make(Set[uint64]) + var si IntSet[uint64] + intValues(t, ss, si) + deleteInt(t, ss, &si, 5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + addInt(t, ss, &si, 2) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 75) + addInt(t, ss, &si, 3) + addInt(t, ss, &si, 3) + addInt(t, ss, &si, 8) + intValues(t, ss, si) + addInt(t, ss, &si, 77) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + addInt(t, ss, &si, 76) + intValues(t, ss, si) + addInt(t, ss, &si, 5) + addInt(t, ss, &si, 7) + addInt(t, ss, &si, 83) + addInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + deleteInt(t, ss, &si, 5) + deleteInt(t, ss, &si, 2) + deleteInt(t, ss, &si, 75) + intValues(t, ss, si) + deleteInt(t, ss, &si, math.MaxInt64) + intValues(t, ss, si) + }) +} + +func intValues[T constraints.Integer](t testing.TB, ss Set[T], si IntSet[T]) { + got := slices.Collect(maps.Keys(ss)) + slices.Sort(got) + want := slices.Collect(si.Values()) + slices.Sort(want) + if !slices.Equal(got, want) { + t.Fatalf("Values mismatch:\n\tgot %v\n\twant %v", got, want) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func addInt[T constraints.Integer](t testing.TB, ss Set[T], si *IntSet[T], v T) { + t.Helper() + if got, want := si.Contains(v), ss.Contains(v); got != want { + t.Fatalf("Contains(%v) = %v, want %v", v, got, want) + } + ss.Add(v) + si.Add(v) + if !si.Contains(v) { + t.Fatalf("Contains(%v) = false, want true", v) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func deleteInt[T constraints.Integer](t testing.TB, ss Set[T], si *IntSet[T], v T) { + t.Helper() + if got, want := si.Contains(v), ss.Contains(v); got != want { + t.Fatalf("Contains(%v) = %v, want %v", v, got, want) + } + ss.Delete(v) + si.Delete(v) + if si.Contains(v) { + t.Fatalf("Contains(%v) = true, want false", v) + } + if got, want := si.Len(), ss.Len(); got != want { + t.Fatalf("Len() = %v, want %v", got, want) + } +} + +func TestZigZag(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + for _, tt := range []struct { + decoded int64 + encoded uint64 + }{ + {math.MinInt64, math.MaxUint64}, + {-2, 3}, + {-1, 1}, + {0, 0}, + {1, 2}, + {2, 4}, + {math.MaxInt64, math.MaxUint64 - 1}, + } { + encoded := encodeZigZag(tt.decoded) + if encoded != tt.encoded { + t.Errorf("encodeZigZag(%v) = %v, want %v", tt.decoded, encoded, tt.encoded) + } + decoded := decodeZigZag[int64](tt.encoded) + if decoded != tt.decoded { + t.Errorf("decodeZigZag(%v) = %v, want %v", tt.encoded, decoded, tt.decoded) + } + } + }) + t.Run("Uint64", func(t *testing.T) { + for _, tt := range []struct { + decoded uint64 + encoded uint64 + }{ + {0, 0}, + {1, 1}, + {2, 2}, + {math.MaxInt64, math.MaxInt64}, + {math.MaxUint64, math.MaxUint64}, + } { + encoded := encodeZigZag(tt.decoded) + if encoded != tt.encoded { + t.Errorf("encodeZigZag(%v) = %v, want %v", tt.decoded, encoded, tt.encoded) + } + decoded := decodeZigZag[uint64](tt.encoded) + if decoded != tt.decoded { + t.Errorf("decodeZigZag(%v) = %v, want %v", tt.encoded, decoded, tt.decoded) + } + } + }) +} From 19faaff95c6f32a2fae26f003b467fb623962d09 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 22 Jul 2025 16:23:51 -0400 Subject: [PATCH 0150/1093] cmd/tailscale/cli: revert key for web config for services to FQDN (#16627) This commit reverts the key of Web field in ipn.ServiceConfig to use FQDN instead of service name for the host part of HostPort. This change is because k8s operator already build base on the assumption of the part being FQDN. We don't want to break the code with dependency. Fixes tailscale/corp#30695 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_legacy_test.go | 1 + cmd/tailscale/cli/serve_v2.go | 40 ++++++++++++++------------ cmd/tailscale/cli/serve_v2_test.go | 37 ++++++++++++------------ cmd/tsidp/tsidp.go | 2 +- ipn/ipnlocal/serve.go | 4 ++- ipn/serve.go | 10 +++---- 7 files changed, 51 insertions(+), 45 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 7c79f7f7bc972..1a05d0543f58e 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -363,7 +363,7 @@ func (e *serveEnv) handleWebServe(ctx context.Context, srvPort uint16, useTLS bo return errHelp } - sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS) + sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS, noService.String()) if !reflect.DeepEqual(cursc, sc) { if err := e.lc.SetServeConfig(ctx, sc); err != nil { diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 6b053fbd774ba..1ea76e72ca818 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -876,6 +876,7 @@ var fakeStatus = &ipnstate.Status{ tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil, }, }, + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, } func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 8832a232d0f4e..056bfabb0a202 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -331,6 +331,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return fmt.Errorf("getting client status: %w", err) } dnsName := strings.TrimSuffix(st.Self.DNSName, ".") + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix // set parent serve config to always be persisted // at the top level, but a nested config might be @@ -394,7 +395,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { var msg string if turnOff { // only unset serve when trying to unset with type and port flags. - err = e.unsetServe(sc, st, dnsName, srvType, srvPort, mount) + err = e.unsetServe(sc, dnsName, srvType, srvPort, mount, magicDNSSuffix) } else { if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { return err @@ -406,7 +407,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -585,12 +586,12 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: useTLS := srvType == serveTypeHTTPS - err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target) + err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds) if err != nil { return fmt.Errorf("failed apply web serve: %w", err) } @@ -643,11 +644,10 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN var webConfig *ipn.WebServerConfig var tcpHandler *ipn.TCPPortHandler ips := st.TailscaleIPs + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix host := dnsName - displayedHost := dnsName if forService { - displayedHost = strings.Join([]string{svcName.WithoutPrefix(), st.CurrentTailnet.MagicDNSSuffix}, ".") - host = svcName.WithoutPrefix() + host = strings.Join([]string{svcName.WithoutPrefix(), magicDNSSuffix}, ".") } hp := ipn.HostPort(net.JoinHostPort(host, strconv.Itoa(int(srvPort)))) @@ -687,7 +687,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN output.WriteString("\n\n") svc := sc.Services[svcName] if srvType == serveTypeTUN && svc.Tun { - output.WriteString(fmt.Sprintf(msgRunningTunService, displayedHost)) + output.WriteString(fmt.Sprintf(msgRunningTunService, host)) output.WriteString("\n") output.WriteString(fmt.Sprintf(msgDisableServiceTun, dnsName)) output.WriteString("\n") @@ -716,7 +716,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN }) for _, m := range mounts { t, d := srvTypeAndDesc(webConfig.Handlers[m]) - output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, displayedHost, portPart, m)) + output.WriteString(fmt.Sprintf("%s://%s%s%s\n", scheme, host, portPart, m)) output.WriteString(fmt.Sprintf("%s %-5s %s\n\n", "|--", t, d)) } } else if tcpHandler != nil { @@ -726,7 +726,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN tlsStatus = "TLS terminated" } - output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", displayedHost, srvPort, tlsStatus)) + output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus)) for _, a := range ips { ipp := net.JoinHostPort(a.String(), strconv.Itoa(int(srvPort))) output.WriteString(fmt.Sprintf("|-- tcp://%s\n", ipp)) @@ -755,7 +755,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN return output.String() } -func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string) error { +func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string, mds string) error { h := new(ipn.HTTPHandler) switch { case strings.HasPrefix(target, "text:"): @@ -797,7 +797,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return errors.New("cannot serve web; already serving TCP") } - sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS) + sc.SetWebHandler(h, dnsName, srvPort, mount, useTLS, mds) return nil } @@ -850,11 +850,12 @@ func (e *serveEnv) applyFunnel(sc *ipn.ServeConfig, dnsName string, srvPort uint } // unsetServe removes the serve config for the given serve port. -// dnsName is a FQDN or a serviceName (with `svc:` prefix). -func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvType serveType, srvPort uint16, mount string) error { +// dnsName is a FQDN or a serviceName (with `svc:` prefix). mds +// is the Magic DNS suffix, which is used to recreate serve's host. +func (e *serveEnv) unsetServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, mds string) error { switch srvType { case serveTypeHTTPS, serveTypeHTTP: - err := e.removeWebServe(sc, st, dnsName, srvPort, mount) + err := e.removeWebServe(sc, dnsName, srvPort, mount, mds) if err != nil { return fmt.Errorf("failed to remove web serve: %w", err) } @@ -1010,8 +1011,9 @@ func isLegacyInvocation(subcmd serveMode, args []string) (string, bool) { // removeWebServe removes a web handler from the serve config // and removes funnel if no remaining mounts exist for the serve port. // The srvPort argument is the serving port and the mount argument is -// the mount point or registered path to remove. -func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsName string, srvPort uint16, mount string) error { +// the mount point or registered path to remove. mds is the Magic DNS suffix, +// which is used to recreate serve's host. +func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, mount string, mds string) error { if sc == nil { return nil } @@ -1026,7 +1028,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN if svc == nil { return errors.New("service does not exist") } - hostName = svcName.WithoutPrefix() + hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".") webServeMap = svc.Web } @@ -1063,7 +1065,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN } if forService { - sc.RemoveServiceWebHandler(st, svcName, srvPort, mounts) + sc.RemoveServiceWebHandler(svcName, hostName, srvPort, mounts) } else { sc.RemoveWebHandler(dnsName, srvPort, mounts, true) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 2ba0b3f8434c8..95bf5b1012f8c 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1299,7 +1299,7 @@ func TestMessageForPort(t *testing.T) { "foo.test.ts.net:443": true, }, }, - status: &ipnstate.Status{}, + status: &ipnstate.Status{CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}}, dnsName: "foo.test.ts.net", srvType: serveTypeHTTPS, srvPort: 443, @@ -1328,7 +1328,7 @@ func TestMessageForPort(t *testing.T) { }, }, }, - status: &ipnstate.Status{}, + status: &ipnstate.Status{CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}}, dnsName: "foo.test.ts.net", srvType: serveTypeHTTP, srvPort: 80, @@ -1352,7 +1352,7 @@ func TestMessageForPort(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "foo:80": { + "foo.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1396,7 +1396,7 @@ func TestMessageForPort(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1440,7 +1440,7 @@ func TestMessageForPort(t *testing.T) { 2200: {HTTPS: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "foo:2200": { + "foo.test.ts.net:2200": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1670,6 +1670,7 @@ func TestIsLegacyInvocation(t *testing.T) { func TestSetServe(t *testing.T) { e := &serveEnv{} + magicDNSSuffix := "test.ts.net" tests := []struct { name string desc string @@ -1816,7 +1817,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1834,7 +1835,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1853,7 +1854,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3001"}, }, @@ -1871,7 +1872,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1893,12 +1894,12 @@ func TestSetServe(t *testing.T) { 88: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, }, - "bar:88": { + "bar.test.ts.net:88": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3001"}, }, @@ -1916,7 +1917,7 @@ func TestSetServe(t *testing.T) { "svc:bar": { TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -1937,7 +1938,7 @@ func TestSetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, "/added": {Proxy: "http://localhost:3001"}, @@ -1965,7 +1966,7 @@ func TestSetServe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel) + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } @@ -2030,7 +2031,7 @@ func TestUnsetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -2124,7 +2125,7 @@ func TestUnsetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -2199,7 +2200,7 @@ func TestUnsetServe(t *testing.T) { 80: {HTTP: true}, }, Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "bar:80": { + "bar.test.ts.net:80": { Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://localhost:3000"}, }, @@ -2224,7 +2225,7 @@ func TestUnsetServe(t *testing.T) { if tt.setServeEnv { e = tt.serveEnv } - err := e.unsetServe(tt.cfg, tt.st, tt.dnsName, tt.srvType, tt.srvPort, tt.mount) + err := e.unsetServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mount, tt.st.CurrentTailnet.MagicDNSSuffix) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 6a0c2d89e685e..8df68cd744148 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -270,7 +270,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. foregroundSc.SetFunnel(serverURL, dstPort, shouldFunnel) foregroundSc.SetWebHandler(&ipn.HTTPHandler{ Proxy: fmt.Sprintf("https://%s", net.JoinHostPort(serverURL, strconv.Itoa(int(dstPort)))), - }, serverURL, uint16(*flagPort), "/", true) + }, serverURL, uint16(*flagPort), "/", true, st.CurrentTailnet.MagicDNSSuffix) err = lc.SetServeConfig(ctx, sc) if err != nil { return nil, watcherChan, fmt.Errorf("could not set serve config: %v", err) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 28262251c6880..36738b88119f5 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1014,7 +1014,9 @@ func (b *LocalBackend) webServerConfig(hostname string, forVIPService tailcfg.Se return c, false } if forVIPService != "" { - key := ipn.HostPort(net.JoinHostPort(forVIPService.WithoutPrefix(), fmt.Sprintf("%d", port))) + magicDNSSuffix := b.currentNode().NetMap().MagicDNSSuffix() + fqdn := strings.Join([]string{forVIPService.WithoutPrefix(), magicDNSSuffix}, ".") + key := ipn.HostPort(net.JoinHostPort(fqdn, fmt.Sprintf("%d", port))) return b.serveConfig.FindServiceWeb(forVIPService, key) } key := ipn.HostPort(net.JoinHostPort(hostname, fmt.Sprintf("%d", port))) diff --git a/ipn/serve.go b/ipn/serve.go index fae0ad5d6568a..a0f1334d7d150 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -343,8 +343,9 @@ func (sc *ServeConfig) FindConfig(port uint16) (*ServeConfig, bool) { // SetWebHandler sets the given HTTPHandler at the specified host, port, // and mount in the serve config. sc.TCP is also updated to reflect web // serving usage of the given port. The st argument is needed when setting -// a web handler for a service, otherwise it can be nil. -func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool) { +// a web handler for a service, otherwise it can be nil. mds is the Magic DNS +// suffix, which is used to recreate serve's host. +func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uint16, mount string, useTLS bool, mds string) { if sc == nil { sc = new(ServeConfig) } @@ -353,7 +354,7 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin webServerMap := &sc.Web hostName := host if svcName := tailcfg.AsServiceName(host); svcName != "" { - hostName = svcName.WithoutPrefix() + hostName = strings.Join([]string{svcName.WithoutPrefix(), mds}, ".") svc, ok := sc.Services[svcName] if !ok { svc = new(ServiceConfig) @@ -464,8 +465,7 @@ func (sc *ServeConfig) RemoveWebHandler(host string, port uint16, mounts []strin // RemoveServiceWebHandler deletes the web handlers at all of the given mount points // for the provided host and port in the serve config for the given service. -func (sc *ServeConfig) RemoveServiceWebHandler(st *ipnstate.Status, svcName tailcfg.ServiceName, port uint16, mounts []string) { - hostName := svcName.WithoutPrefix() +func (sc *ServeConfig) RemoveServiceWebHandler(svcName tailcfg.ServiceName, hostName string, port uint16, mounts []string) { hp := HostPort(net.JoinHostPort(hostName, strconv.Itoa(int(port)))) svc, ok := sc.Services[svcName] From 729d6532ff356101b3e34c28b3c5ce9a186af44e Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 22 Jul 2025 13:54:28 -0700 Subject: [PATCH 0151/1093] tailcfg: add Hostinfo.ExitNodeID to report the selected exit node (#16625) When a client selects a particular exit node, Control may use that as a signal for deciding other routes. This patch causes the client to report whenever the current exit node changes, through tailcfg.Hostinfo.ExitNodeID. It relies on a properly set ipn.Prefs.ExitNodeID, which should already be resolved by `tailscale set`. Updates tailscale/corp#30536 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 9 +++++ ipn/ipnlocal/local_test.go | 67 +++++++++++++++++++++++++++++--------- tailcfg/tailcfg.go | 4 ++- tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 16 +++++++++ tailcfg/tailcfg_view.go | 2 ++ 6 files changed, 83 insertions(+), 16 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8665a88c4f867..ce0f4f6873d74 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5612,6 +5612,11 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // WireIngress. hi.WireIngress = b.shouldWireInactiveIngressLocked() hi.AppConnector.Set(prefs.AppConnector().Advertise) + + // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node + // was selected, if any. Since [LocalBackend.resolveExitNodeIPLocked] + // has already run, there is no need to consult [ipn.Prefs.ExitNodeIP]. + hi.ExitNodeID = prefs.ExitNodeID() } // enterState transitions the backend into newState, updating internal @@ -6136,6 +6141,10 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { }); err != nil { b.logf("failed to save exit node changes: %v", err) } + + // Send the resolved exit node to Control via Hostinfo. + b.hostinfo.ExitNodeID = prefs.ExitNodeID + b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) return true } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 13681fc0430ea..da6fc8b4a5725 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -614,19 +614,20 @@ func TestConfigureExitNode(t *testing.T) { } tests := []struct { - name string - prefs ipn.Prefs - netMap *netmap.NetworkMap - report *netcheck.Report - changePrefs *ipn.MaskedPrefs - useExitNodeEnabled *bool - exitNodeIDPolicy *tailcfg.StableNodeID - exitNodeIPPolicy *netip.Addr - exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes - exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true - wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] - wantPrefs ipn.Prefs - wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] + name string + prefs ipn.Prefs + netMap *netmap.NetworkMap + report *netcheck.Report + changePrefs *ipn.MaskedPrefs + useExitNodeEnabled *bool + exitNodeIDPolicy *tailcfg.StableNodeID + exitNodeIPPolicy *netip.Addr + exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes + exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true + wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] + wantPrefs ipn.Prefs + wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] + wantHostinfoExitNodeID *tailcfg.StableNodeID }{ { name: "exit-node-id-via-prefs", // set exit node ID via prefs @@ -643,6 +644,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) @@ -659,6 +661,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs @@ -676,6 +679,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node @@ -695,6 +699,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "", // should be unset }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID @@ -711,6 +716,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -727,6 +733,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression @@ -744,6 +751,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" @@ -763,6 +771,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", InternalExitNodePrior: "auto:any", }, + wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), }, { name: "auto-exit-node-via-prefs/on", // toggle the exit node on @@ -779,6 +788,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "auto:any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "id-via-policy", // set exit node ID via syspolicy @@ -791,6 +801,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs @@ -809,7 +820,8 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantChangePrefsErr: errManagedByPolicy, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-ip", // syspolicy should take precedence over prefs @@ -828,7 +840,8 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantChangePrefsErr: errManagedByPolicy, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantChangePrefsErr: errManagedByPolicy, }, { name: "id-via-policy/cannot-override-via-prefs/by-auto-expr", // syspolicy should take precedence over prefs @@ -860,6 +873,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode2.StableID(), }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) @@ -874,6 +888,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) @@ -888,6 +903,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -902,6 +918,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID @@ -918,6 +935,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID @@ -936,6 +954,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -954,6 +973,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), }, { name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -972,6 +992,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap @@ -987,6 +1008,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // switch to the best exit node AutoExitNode: "any", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression @@ -1001,6 +1023,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression @@ -1018,6 +1041,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy @@ -1035,6 +1059,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] @@ -1056,6 +1081,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // overridden by user AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly }, + wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), }, { name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] @@ -1079,6 +1105,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] @@ -1097,6 +1124,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, { name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", @@ -1117,6 +1145,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: "", // has never been resolved, so it should be cleared as well }, + wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), }, { name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", @@ -1137,6 +1166,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained }, + wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), }, } syspolicy.RegisterWellKnownSettingsForTest(t) @@ -1197,6 +1227,13 @@ func TestConfigureExitNode(t *testing.T) { if diff := cmp.Diff(&tt.wantPrefs, lb.Prefs().AsStruct(), opts...); diff != "" { t.Errorf("Prefs(+got -want): %v", diff) } + + // And check Hostinfo. + if tt.wantHostinfoExitNodeID != nil { + if got := lb.hostinfo.ExitNodeID; got != *tt.wantHostinfoExitNodeID { + t.Errorf("Hostinfo.ExitNodeID got %v, want %v", got, *tt.wantHostinfoExitNodeID) + } + } }) } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 550914b96e31a..307b39f93903c 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -166,7 +166,8 @@ type CapabilityVersion int // - 119: 2025-07-10: Client uses Hostinfo.Location.Priority to prioritize one route over another. // - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions // - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] -const CurrentCapabilityVersion CapabilityVersion = 121 +// - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. +const CurrentCapabilityVersion CapabilityVersion = 122 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -875,6 +876,7 @@ type Hostinfo struct { UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. // Location represents geographical location data about a // Tailscale host. Location is optional and only set if diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 412e1f38d18bc..95f8905b84e69 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -186,6 +186,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + ExitNodeID StableNodeID Location *Location TPM *TPMInfo StateEncrypted opt.Bool diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 833314df8fd6d..addd2330ba239 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -67,6 +67,7 @@ func TestHostinfoEqual(t *testing.T) { "UserspaceRouter", "AppConnector", "ServicesHash", + "ExitNodeID", "Location", "TPM", "StateEncrypted", @@ -273,6 +274,21 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{IngressEnabled: true}, false, }, + { + &Hostinfo{ExitNodeID: "stable-exit"}, + &Hostinfo{ExitNodeID: "stable-exit"}, + true, + }, + { + &Hostinfo{ExitNodeID: ""}, + &Hostinfo{}, + true, + }, + { + &Hostinfo{ExitNodeID: ""}, + &Hostinfo{ExitNodeID: "stable-exit"}, + false, + }, } for i, tt := range tests { got := tt.a.Equal(tt.b) diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 7e82cd871c64a..c407800210a5e 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -300,6 +300,7 @@ func (v HostinfoView) Userspace() opt.Bool { return v.ж.User func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } +func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } @@ -345,6 +346,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + ExitNodeID StableNodeID Location *Location TPM *TPMInfo StateEncrypted opt.Bool From 1ae6a97a7313b3412dc89618efffad3181a07997 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 22 Jul 2025 21:13:25 -0400 Subject: [PATCH 0152/1093] cmd/tailscale/cli: add advertise command to advertise a node as service proxy to tailnet (#16620) This commit adds a advertise subcommand for tailscale serve, that would declare the node as a service proxy for a service. This command only adds the service to node's list of advertised service, but doesn't modify the list of services currently advertised. Fixes tailscale/corp#28016 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 33 ++++++++++++++++++++++++++---- cmd/tailscale/cli/serve_v2_test.go | 14 ++++++------- 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 056bfabb0a202..91a23697035a8 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -220,6 +220,16 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { LongHelp: "Remove all handlers configured for the specified service.", Exec: e.runServeClear, }, + { + Name: "advertise", + ShortUsage: fmt.Sprintf("tailscale %s advertise ", info.Name), + ShortHelp: "Advertise this node as a service proxy to the tailnet", + LongHelp: "Advertise this node as a service proxy to the tailnet. This command is used\n" + + "to make the current node be considered as a service host for a service. This is\n" + + "useful to bring a service back after it has been drained. (i.e. after running \n" + + "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", + Exec: e.runServeAdvertise, + }, }, } } @@ -401,7 +411,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return err } if forService { - e.addServiceToPrefs(ctx, svcName.String()) + e.addServiceToPrefs(ctx, svcName) } target := "" if len(args) > 0 { @@ -442,16 +452,16 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } -func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName string) error { +func (e *serveEnv) addServiceToPrefs(ctx context.Context, serviceName tailcfg.ServiceName) error { prefs, err := e.lc.GetPrefs(ctx) if err != nil { return fmt.Errorf("error getting prefs: %w", err) } advertisedServices := prefs.AdvertiseServices - if slices.Contains(advertisedServices, serviceName) { + if slices.Contains(advertisedServices, serviceName.String()) { return nil // already advertised } - advertisedServices = append(advertisedServices, serviceName) + advertisedServices = append(advertisedServices, serviceName.String()) _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ AdvertiseServicesSet: true, Prefs: ipn.Prefs{ @@ -526,6 +536,21 @@ func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { return e.lc.SetServeConfig(ctx, sc) } +func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { + if len(args) == 0 { + return fmt.Errorf("error: missing service name argument") + } + if len(args) != 1 { + fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") + return errHelp + } + svc := tailcfg.ServiceName(args[0]) + if err := svc.Validate(); err != nil { + return fmt.Errorf("invalid service name: %w", err) + } + return e.addServiceToPrefs(ctx, svc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 95bf5b1012f8c..1deeaf3eaa9b5 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1167,24 +1167,24 @@ func TestCleanURLPath(t *testing.T) { func TestAddServiceToPrefs(t *testing.T) { tests := []struct { name string - dnsName string + svcName tailcfg.ServiceName startServices []string expected []string }{ { name: "add service to empty prefs", - dnsName: "svc:foo", + svcName: "svc:foo", expected: []string{"svc:foo"}, }, { name: "add service to existing prefs", - dnsName: "svc:bar", + svcName: "svc:bar", startServices: []string{"svc:foo"}, expected: []string{"svc:foo", "svc:bar"}, }, { name: "add existing service to prefs", - dnsName: "svc:foo", + svcName: "svc:foo", startServices: []string{"svc:foo"}, expected: []string{"svc:foo"}, }, @@ -1200,12 +1200,12 @@ func TestAddServiceToPrefs(t *testing.T) { }, }) e := &serveEnv{lc: lc, bg: bgBoolFlag{true, false}} - err := e.addServiceToPrefs(ctx, tt.dnsName) + err := e.addServiceToPrefs(ctx, tt.svcName) if err != nil { - t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.dnsName, err) + t.Fatalf("addServiceToPrefs(%q) returned unexpected error: %v", tt.svcName, err) } if !slices.Equal(lc.prefs.AdvertiseServices, tt.expected) { - t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.dnsName, lc.prefs.AdvertiseServices, tt.expected) + t.Errorf("addServiceToPrefs(%q) = %v, want %v", tt.svcName, lc.prefs.AdvertiseServices, tt.expected) } }) } From f1f334b23d4891d5195442b4581e72febff17de4 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 23 Jul 2025 11:25:05 -0400 Subject: [PATCH 0153/1093] flake.lock/go.mod.sri: update flake version info (#16631) Update nixpkgs-unstable to include newer golang to satisfy go.mod requirement of 1.24.4 Update vendor hash to current. Updates #15015 Signed-off-by: Mike O'Driscoll --- .github/workflows/update-flake.yml | 2 +- flake.lock | 6 +++--- flake.nix | 3 ++- go.mod.sri | 2 +- shell.nix | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 61a09cea1c990..1968c68302d37 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -8,7 +8,7 @@ on: - main paths: - go.mod - - .github/workflows/update-flakes.yml + - .github/workflows/update-flake.yml workflow_dispatch: concurrency: diff --git a/flake.lock b/flake.lock index 05b0f303e6433..87f234e3ecab1 100644 --- a/flake.lock +++ b/flake.lock @@ -36,11 +36,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1743938762, - "narHash": "sha256-UgFYn8sGv9B8PoFpUfCa43CjMZBl1x/ShQhRDHBFQdI=", + "lastModified": 1753151930, + "narHash": "sha256-XSQy6wRKHhRe//iVY5lS/ZpI/Jn6crWI8fQzl647wCg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "74a40410369a1c35ee09b8a1abee6f4acbedc059", + "rev": "83e677f31c84212343f4cc553bab85c2efcad60a", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 2f920bfd40ba5..17d263a8dd3c9 100644 --- a/flake.nix +++ b/flake.nix @@ -130,4 +130,5 @@ in flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); } -# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= +# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= + diff --git a/go.mod.sri b/go.mod.sri index 6c8357e0468ba..845086191699a 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= \ No newline at end of file +sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= diff --git a/shell.nix b/shell.nix index bb8eacb67ee18..2eb5b441a2d87 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-av4kr09rjNRmag94ziNjJuI/cg8b8lAD3Tk24t/ezH4= +# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= From 1ef8fbf4705637ee73c46300566e3df56c4885e4 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 23 Jul 2025 11:50:42 -0700 Subject: [PATCH 0154/1093] ipn/ipnlocal: send Hostinfo after resolveExitNode for "auto:any" (#16632) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In #16625, I introduced a mechanism for sending the selected exit node to Control via tailcfg.Hostinfo.ExitNodeID as part of the MapRequest. @nickkhyl pointed out that LocalBackend.doSetHostinfoFilterServices needs to be triggered in order to actually send this update. This patch adds that command. It also prevents the client from sending "auto:any" in that field, because that’s not a real exit node ID. This patch also fills in some missing checks in TestConfigureExitNode. Updates tailscale/corp#30536 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 40 +++++++++++++++++----- ipn/ipnlocal/local_test.go | 69 +++++++++++++++++++------------------- 2 files changed, 66 insertions(+), 43 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ce0f4f6873d74..7154b942c1690 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5614,9 +5614,22 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.AppConnector.Set(prefs.AppConnector().Advertise) // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node - // was selected, if any. Since [LocalBackend.resolveExitNodeIPLocked] - // has already run, there is no need to consult [ipn.Prefs.ExitNodeIP]. - hi.ExitNodeID = prefs.ExitNodeID() + // was selected, if any. + // + // If auto exit node is enabled (via [ipn.Prefs.AutoExitNode] or + // [syspolicy.ExitNodeID]), or an exit node is specified by ExitNodeIP + // instead of ExitNodeID , and we don't yet have enough info to resolve + // it (usually due to missing netmap or net report), then ExitNodeID in + // the prefs may be invalid (typically, [unresolvedExitNodeID]) until + // the netmap is available. + // + // In this case, we shouldn't update the Hostinfo with the bogus + // ExitNodeID here; [LocalBackend.ResolveExitNode] will be called once + // the netmap and/or net report have been received to both pick the exit + // node and notify control of the change. + if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { + hi.ExitNodeID = prefs.ExitNodeID() + } } // enterState transitions the backend into newState, updating internal @@ -6117,9 +6130,10 @@ func (b *LocalBackend) RefreshExitNode() { } } -// resolveExitNode determines which exit node to use based on the current -// prefs and netmap. It updates the exit node ID in the prefs if needed, -// sends a notification to clients, and returns true if the exit node has changed. +// resolveExitNode determines which exit node to use based on the current prefs +// and netmap. It updates the exit node ID in the prefs if needed, updates the +// exit node ID in the hostinfo if needed, sends a notification to clients, and +// returns true if the exit node has changed. // // It is the caller's responsibility to reconfigure routes and actually // start using the selected exit node, if needed. @@ -6142,8 +6156,18 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { b.logf("failed to save exit node changes: %v", err) } - // Send the resolved exit node to Control via Hostinfo. - b.hostinfo.ExitNodeID = prefs.ExitNodeID + // Send the resolved exit node to control via [tailcfg.Hostinfo]. + // [LocalBackend.applyPrefsToHostinfoLocked] usually sets the Hostinfo, + // but it deferred until this point because there was a bogus ExitNodeID + // in the prefs. + // + // TODO(sfllaw): Mutating b.hostinfo here is undesirable, mutating + // in-place doubly so. + sid := prefs.ExitNodeID + if sid != unresolvedExitNodeID && b.hostinfo.ExitNodeID != sid { + b.hostinfo.ExitNodeID = sid + b.goTracker.Go(b.doSetHostinfoFilterServices) + } b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) return true diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index da6fc8b4a5725..dd2837022f064 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -627,7 +627,7 @@ func TestConfigureExitNode(t *testing.T) { wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] wantPrefs ipn.Prefs wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] - wantHostinfoExitNodeID *tailcfg.StableNodeID + wantHostinfoExitNodeID tailcfg.StableNodeID }{ { name: "exit-node-id-via-prefs", // set exit node ID via prefs @@ -644,7 +644,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "exit-node-ip-via-prefs", // set exit node IP via prefs (should be resolved to an ID) @@ -661,7 +661,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-exit-node-via-prefs/any", // set auto exit node via prefs @@ -679,7 +679,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-exit-node-via-prefs/set-exit-node-id-via-prefs", // setting exit node ID explicitly should disable auto exit node @@ -699,7 +699,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "", // should be unset }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-exit-node-via-prefs/any/no-report", // set auto exit node via prefs, but no report means we can't resolve the exit node ID @@ -716,7 +716,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-exit-node-via-prefs/any/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -733,7 +733,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // cannot resolve; traffic will be dropped AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-exit-node-via-prefs/foo", // set auto exit node via prefs with an unknown/unsupported expression @@ -751,7 +751,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-exit-node-via-prefs/off", // toggle the exit node off after it was set to "any" @@ -771,7 +771,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", InternalExitNodePrior: "auto:any", }, - wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), + wantHostinfoExitNodeID: "", }, { name: "auto-exit-node-via-prefs/on", // toggle the exit node on @@ -788,7 +788,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "auto:any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "id-via-policy", // set exit node ID via syspolicy @@ -801,7 +801,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "id-via-policy/cannot-override-via-prefs/by-id", // syspolicy should take precedence over prefs @@ -820,7 +820,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), wantChangePrefsErr: errManagedByPolicy, }, { @@ -840,7 +840,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), wantChangePrefsErr: errManagedByPolicy, }, { @@ -860,7 +860,8 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), }, - wantChangePrefsErr: errManagedByPolicy, + wantHostinfoExitNodeID: exitNode1.StableID(), + wantChangePrefsErr: errManagedByPolicy, }, { name: "ip-via-policy", // set exit node IP via syspolicy (should be resolved to an ID) @@ -873,7 +874,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, ExitNodeID: exitNode2.StableID(), }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy", // set auto exit node via syspolicy (an exit node should be selected) @@ -888,7 +889,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/no-report", // set auto exit node via syspolicy without a netcheck report (no exit node should be selected) @@ -903,7 +904,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-policy/no-netmap", // similarly, but without a netmap (no exit node should be selected) @@ -918,7 +919,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-policy/no-netmap/with-existing", // set auto exit node via syspolicy without a netmap, but with a previously set exit node ID @@ -935,7 +936,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/no-netmap/with-allowed-existing", // same, but now with a syspolicy setting that explicitly allows the existing exit node ID @@ -954,7 +955,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -973,7 +974,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: unresolvedExitNodeID, // we don't have a netmap yet, and the current exit node ID is not allowed; block traffic AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(unresolvedExitNodeID), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID @@ -992,7 +993,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // we have a netmap; switch to the best allowed exit node AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/with-netmap/switch-to-better", // if all exit nodes are allowed, switch to the best one once we have a netmap @@ -1008,7 +1009,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // switch to the best exit node AutoExitNode: "any", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-foo-via-policy", // set auto exit node via syspolicy with an unknown/unsupported expression @@ -1023,7 +1024,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-foo-via-edit-prefs", // set auto exit node via EditPrefs with an unknown/unsupported expression @@ -1041,7 +1042,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" AutoExitNode: "foo", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/toggle-off", // cannot toggle off the exit node if it was set via syspolicy @@ -1059,7 +1060,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] @@ -1081,7 +1082,7 @@ func TestConfigureExitNode(t *testing.T) { ExitNodeID: exitNode2.StableID(), // overridden by user AutoExitNode: "", // cleared, as we are setting the exit node ID explicitly }, - wantHostinfoExitNodeID: ptr.To(exitNode2.StableID()), + wantHostinfoExitNodeID: exitNode2.StableID(), }, { name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] @@ -1105,7 +1106,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] @@ -1124,7 +1125,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "any", InternalExitNodePrior: "", }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, { name: "auto-any-via-initial-prefs/no-netmap/clear-auto-exit-node", @@ -1145,7 +1146,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: "", // has never been resolved, so it should be cleared as well }, - wantHostinfoExitNodeID: ptr.To(tailcfg.StableNodeID("")), + wantHostinfoExitNodeID: "", }, { name: "auto-any-via-initial-prefs/with-netmap/clear-auto-exit-node", @@ -1166,7 +1167,7 @@ func TestConfigureExitNode(t *testing.T) { AutoExitNode: "", // cleared ExitNodeID: exitNode1.StableID(), // a resolved exit node ID should be retained }, - wantHostinfoExitNodeID: ptr.To(exitNode1.StableID()), + wantHostinfoExitNodeID: exitNode1.StableID(), }, } syspolicy.RegisterWellKnownSettingsForTest(t) @@ -1229,10 +1230,8 @@ func TestConfigureExitNode(t *testing.T) { } // And check Hostinfo. - if tt.wantHostinfoExitNodeID != nil { - if got := lb.hostinfo.ExitNodeID; got != *tt.wantHostinfoExitNodeID { - t.Errorf("Hostinfo.ExitNodeID got %v, want %v", got, *tt.wantHostinfoExitNodeID) - } + if got := lb.hostinfo.ExitNodeID; got != tt.wantHostinfoExitNodeID { + t.Errorf("Hostinfo.ExitNodeID got %s, want %s", got, tt.wantHostinfoExitNodeID) } }) } From 179745b83ed6d687bdc9d501ccdbfdec1cb3f9d7 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 23 Jul 2025 12:30:04 -0700 Subject: [PATCH 0155/1093] wgengine/magicsock: update discoInfo docs (#16638) discoInfo is also used for holding peer relay server disco keys. Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index ee0ee40ca1d13..fb7f5edcbd14f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3907,14 +3907,18 @@ type epAddrEndpointCache struct { } // discoInfo is the info and state for the DiscoKey -// in the Conn.discoInfo map key. +// in the [Conn.discoInfo] and [relayManager.discoInfoByServerDisco] map keys. +// +// When the disco protocol is used to handshake with a peer relay server, the +// corresponding discoInfo is held in [relayManager.discoInfoByServerDisco] +// instead of [Conn.discoInfo]. // // Note that a DiscoKey does not necessarily map to exactly one // node. In the case of shared nodes and users switching accounts, two // nodes in the NetMap may legitimately have the same DiscoKey. As // such, no fields in here should be considered node-specific. type discoInfo struct { - // discoKey is the same as the Conn.discoInfo map key, + // discoKey is the same as the corresponding map key, // just so you can pass around a *discoInfo alone. // Not modified once initialized. discoKey key.DiscoPublic @@ -3925,11 +3929,13 @@ type discoInfo struct { // sharedKey is the precomputed key for communication with the // peer that has the DiscoKey used to look up this *discoInfo in - // Conn.discoInfo. + // the corresponding map. // Not modified once initialized. sharedKey key.DiscoShared - // Mutable fields follow, owned by Conn.mu: + // Mutable fields follow, owned by [Conn.mu]. These are irrelevant when + // discoInfo is a peer relay server disco key in the + // [relayManager.discoInfoByServerDisco] map: // lastPingFrom is the src of a ping for discoKey. lastPingFrom epAddr From c87f44b687e4b549d30fe420d45bfeebf47e5cd1 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 22 Jul 2025 18:57:24 -0500 Subject: [PATCH 0156/1093] cmd/tailscale/cli: use DNS name instead of Location to hide Mullvad exit nodes from status output Previously, we used a non-nil Location as an indicator that a peer is a Mullvad exit node. However, this is not, or no longer, reliable, since regular exit nodes may also have a non-nil Location, such as when traffic steering is enabled for a tailnet. In this PR, we update the plaintext `tailscale status` output to omit only Mullvad exit nodes, rather than all exit nodes with a non-nil Location. The JSON output remains unchanged and continues to include all peers. Updates tailscale/corp#30614 Signed-off-by: Nick Khyl --- cmd/tailscale/cli/status.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 39e6f9fbdfd8a..726606109aa15 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -70,6 +70,8 @@ var statusArgs struct { peers bool // in CLI mode, show status of peer machines } +const mullvadTCD = "mullvad.ts.net." + func runStatus(ctx context.Context, args []string) error { if len(args) > 0 { return errors.New("unexpected non-flag arguments to 'tailscale status'") @@ -212,9 +214,8 @@ func runStatus(ctx context.Context, args []string) error { if ps.ShareeNode { continue } - if ps.Location != nil && ps.ExitNodeOption && !ps.ExitNode { - // Location based exit nodes are only shown with the - // `exit-node list` command. + if ps.ExitNodeOption && !ps.ExitNode && strings.HasSuffix(ps.DNSName, mullvadTCD) { + // Mullvad exit nodes are only shown with the `exit-node list` command. locBasedExitNode = true continue } From 2a5d9c726993318000eb4f42a8f35e6fcc6e2798 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Thu, 24 Jul 2025 12:20:28 -0600 Subject: [PATCH 0157/1093] VERSION.txt: this is v1.87.0 Signed-off-by: Aaron Klotz --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index f288d11142d11..f6342716723fc 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.85.0 +1.87.0 From c5724425480a4bbd21442ed0138eaa374d7ba02a Mon Sep 17 00:00:00 2001 From: Danni Popova Date: Fri, 25 Jul 2025 10:21:41 +0100 Subject: [PATCH 0158/1093] cmd/tailscale: allow SSH to IPs or DNS names without MagicDNS (#16591) fixes #16381 Signed-off-by: Danni Popova --- cmd/tailscale/cli/ssh.go | 63 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/ssh.go b/cmd/tailscale/cli/ssh.go index ba70e97e9f925..9275c9a1c2814 100644 --- a/cmd/tailscale/cli/ssh.go +++ b/cmd/tailscale/cli/ssh.go @@ -70,12 +70,28 @@ func runSSH(ctx context.Context, args []string) error { return err } + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + // hostForSSH is the hostname we'll tell OpenSSH we're // connecting to, so we have to maintain fewer entries in the // known_hosts files. hostForSSH := host - if v, ok := nodeDNSNameFromArg(st, host); ok { - hostForSSH = v + ps, ok := peerStatusFromArg(st, host) + if ok { + hostForSSH = ps.DNSName + + // If MagicDNS isn't enabled on the client, + // we will use the first IPv4 we know about + // or fallback to the first IPv6 address + if !prefs.CorpDNS { + ipHost, found := ipFromPeerStatus(ps) + if found { + hostForSSH = ipHost + } + } } ssh, err := findSSH() @@ -169,11 +185,40 @@ func genKnownHosts(st *ipnstate.Status) []byte { continue } fmt.Fprintf(&buf, "%s %s\n", ps.DNSName, hostKey) + for _, ip := range ps.TailscaleIPs { + fmt.Fprintf(&buf, "%s %s\n", ip.String(), hostKey) + } } } return buf.Bytes() } +// peerStatusFromArg returns the PeerStatus that matches +// the input arg which can be a base name, full DNS name, or an IP. +func peerStatusFromArg(st *ipnstate.Status, arg string) (*ipnstate.PeerStatus, bool) { + if arg == "" { + return nil, false + } + argIP, _ := netip.ParseAddr(arg) + for _, ps := range st.Peer { + if argIP.IsValid() { + for _, ip := range ps.TailscaleIPs { + if ip == argIP { + return ps, true + } + } + continue + } + if strings.EqualFold(strings.TrimSuffix(arg, "."), strings.TrimSuffix(ps.DNSName, ".")) { + return ps, true + } + if base, _, ok := strings.Cut(ps.DNSName, "."); ok && strings.EqualFold(base, arg) { + return ps, true + } + } + return nil, false +} + // nodeDNSNameFromArg returns the PeerStatus.DNSName value from a peer // in st that matches the input arg which can be a base name, full // DNS name, or an IP. @@ -202,6 +247,20 @@ func nodeDNSNameFromArg(st *ipnstate.Status, arg string) (dnsName string, ok boo return "", false } +func ipFromPeerStatus(ps *ipnstate.PeerStatus) (string, bool) { + if len(ps.TailscaleIPs) < 1 { + return "", false + } + + // Look for a IPv4 address or default to the first IP of the list + for _, ip := range ps.TailscaleIPs { + if ip.Is4() { + return ip.String(), true + } + } + return ps.TailscaleIPs[0].String(), true +} + // getSSHClientEnvVar returns the "SSH_CLIENT" environment variable // for the current process group, if any. var getSSHClientEnvVar = func() string { From bfebf870ae3ecc5dba74cb900f4a8994a2cfd8cc Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Fri, 25 Jul 2025 10:41:02 -0600 Subject: [PATCH 0159/1093] cmd/tailscaled: update installSystemDaemonWindows to set the correct system service depndencies Fixes #16658 Signed-off-by: Aaron Klotz --- cmd/tailscaled/install_windows.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index c667539b04d4f..3e5036fba6bc8 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -25,6 +25,15 @@ func init() { uninstallSystemDaemon = uninstallSystemDaemonWindows } +// serviceDependencies lists all system services that tailscaled depends on. +// This list must be kept in sync with the TailscaledDependencies preprocessor +// variable in the installer. +var serviceDependencies = []string{ + "iphlpsvc", + "netprofm", + "WinHttpAutoProxySvc", +} + func installSystemDaemonWindows(args []string) (err error) { m, err := mgr.Connect() if err != nil { @@ -48,6 +57,7 @@ func installSystemDaemonWindows(args []string) (err error) { ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, StartType: mgr.StartAutomatic, ErrorControl: mgr.ErrorNormal, + Dependencies: serviceDependencies, DisplayName: serviceName, Description: "Connects this computer to others on the Tailscale network.", } From e300a00058b77691a0b8a3354fb8244af6eef59e Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Fri, 25 Jul 2025 19:45:37 -0500 Subject: [PATCH 0160/1093] cmd/k8s-operator: Enhance DNS record handling for ProxyGroup egress services (#16181) This update introduces support for DNS records associated with ProxyGroup egress services, ensuring that the ClusterIP Service IP is used instead of Pod IPs. Fixes #15945 Signed-off-by: Raj Singh --- cmd/k8s-operator/dnsrecords.go | 280 ++++++++++++++++++---------- cmd/k8s-operator/dnsrecords_test.go | 128 ++++++++++++- 2 files changed, 310 insertions(+), 98 deletions(-) diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index f91dd49ec255e..54c1584c6731e 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -31,6 +31,10 @@ import ( const ( dnsRecordsRecocilerFinalizer = "tailscale.com/dns-records-reconciler" annotationTSMagicDNSName = "tailscale.com/magic-dnsname" + + // Service types for consistent string usage + serviceTypeIngress = "ingress" + serviceTypeSvc = "svc" ) // dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS @@ -51,7 +55,7 @@ type dnsRecordsReconciler struct { isDefaultLoadBalancer bool // true if operator is the default ingress controller in this cluster } -// Reconcile takes a reconcile.Request for a headless Service fronting a +// Reconcile takes a reconcile.Request for a Service fronting a // tailscale proxy and updates DNS Records in dnsrecords ConfigMap for the // in-cluster ts.net nameserver if required. func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { @@ -59,8 +63,8 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") - headlessSvc := new(corev1.Service) - err = dnsRR.Client.Get(ctx, req.NamespacedName, headlessSvc) + proxySvc := new(corev1.Service) + err = dnsRR.Client.Get(ctx, req.NamespacedName, proxySvc) if apierrors.IsNotFound(err) { logger.Debugf("Service not found") return reconcile.Result{}, nil @@ -68,14 +72,14 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get Service: %w", err) } - if !(isManagedByType(headlessSvc, "svc") || isManagedByType(headlessSvc, "ingress")) { - logger.Debugf("Service is not a headless Service for a tailscale ingress or egress proxy; do nothing") + if !(isManagedByType(proxySvc, serviceTypeSvc) || isManagedByType(proxySvc, serviceTypeIngress)) { + logger.Debugf("Service is not a proxy Service for a tailscale ingress or egress proxy; do nothing") return reconcile.Result{}, nil } - if !headlessSvc.DeletionTimestamp.IsZero() { + if !proxySvc.DeletionTimestamp.IsZero() { logger.Debug("Service is being deleted, clean up resources") - return reconcile.Result{}, dnsRR.maybeCleanup(ctx, headlessSvc, logger) + return reconcile.Result{}, dnsRR.maybeCleanup(ctx, proxySvc, logger) } // Check that there is a ts.net nameserver deployed to the cluster by @@ -99,7 +103,7 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. return reconcile.Result{}, nil } - if err := dnsRR.maybeProvision(ctx, headlessSvc, logger); err != nil { + if err := dnsRR.maybeProvision(ctx, proxySvc, logger); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) } else { @@ -111,37 +115,33 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. } // maybeProvision ensures that dnsrecords ConfigMap contains a record for the -// proxy associated with the headless Service. +// proxy associated with the Service. // The record is only provisioned if the proxy is for a tailscale Ingress or // egress configured via tailscale.com/tailnet-fqdn annotation. // // For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from // ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses -// retrieved from the EndpoinSlice associated with this headless Service, i.e +// retrieved from the EndpointSlice associated with this Service, i.e // Records{IP4: : <[IPs of the ingress proxy Pods]>} // // For egress, the record is a mapping between tailscale.com/tailnet-fqdn // annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice -// associated with this headless Service, i.e +// associated with this Service, i.e // Records{IP4: {: <[IPs of the egress proxy Pods]>} // +// For ProxyGroup egress, the record is a mapping between tailscale.com/magic-dnsname +// annotation and the ClusterIP Service IP (which provides portmapping), i.e +// Records{IP4: {: <[ClusterIP Service IP]>} +// // If records need to be created for this proxy, maybeProvision will also: -// - update the headless Service with a tailscale.com/magic-dnsname annotation -// - update the headless Service with a finalizer -func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error { - if headlessSvc == nil { - logger.Info("[unexpected] maybeProvision called with a nil Service") - return nil - } - isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, headlessSvc) - if err != nil { - return fmt.Errorf("error checking whether the Service is for an egress proxy: %w", err) - } - if !(isEgressFQDNSvc || isManagedByType(headlessSvc, "ingress")) { +// - update the Service with a tailscale.com/magic-dnsname annotation +// - update the Service with a finalizer +func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { + if !dnsRR.isInterestingService(ctx, proxySvc) { logger.Debug("Service is not fronting a proxy that we create DNS records for; do nothing") return nil } - fqdn, err := dnsRR.fqdnForDNSRecord(ctx, headlessSvc, logger) + fqdn, err := dnsRR.fqdnForDNSRecord(ctx, proxySvc, logger) if err != nil { return fmt.Errorf("error determining DNS name for record: %w", err) } @@ -150,18 +150,18 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return nil // a new reconcile will be triggered once it's added } - oldHeadlessSvc := headlessSvc.DeepCopy() - // Ensure that headless Service is annotated with a finalizer to help + oldProxySvc := proxySvc.DeepCopy() + // Ensure that proxy Service is annotated with a finalizer to help // with records cleanup when proxy resources are deleted. - if !slices.Contains(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) { - headlessSvc.Finalizers = append(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) + if !slices.Contains(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) { + proxySvc.Finalizers = append(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) } - // Ensure that headless Service is annotated with the current MagicDNS + // Ensure that proxy Service is annotated with the current MagicDNS // name to help with records cleanup when proxy resources are deleted or // MagicDNS name changes. - oldFqdn := headlessSvc.Annotations[annotationTSMagicDNSName] + oldFqdn := proxySvc.Annotations[annotationTSMagicDNSName] if oldFqdn != "" && oldFqdn != fqdn { // i.e user has changed the value of tailscale.com/tailnet-fqdn annotation - logger.Debugf("MagicDNS name has changed, remvoving record for %s", oldFqdn) + logger.Debugf("MagicDNS name has changed, removing record for %s", oldFqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, oldFqdn) } @@ -169,57 +169,26 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessS return fmt.Errorf("error removing record for %s: %w", oldFqdn, err) } } - mak.Set(&headlessSvc.Annotations, annotationTSMagicDNSName, fqdn) - if !apiequality.Semantic.DeepEqual(oldHeadlessSvc, headlessSvc) { + mak.Set(&proxySvc.Annotations, annotationTSMagicDNSName, fqdn) + if !apiequality.Semantic.DeepEqual(oldProxySvc, proxySvc) { logger.Infof("provisioning DNS record for MagicDNS name: %s", fqdn) // this will be printed exactly once - if err := dnsRR.Update(ctx, headlessSvc); err != nil { - return fmt.Errorf("error updating proxy headless Service metadata: %w", err) + if err := dnsRR.Update(ctx, proxySvc); err != nil { + return fmt.Errorf("error updating proxy Service metadata: %w", err) } } - // Get the Pod IP addresses for the proxy from the EndpointSlices for - // the headless Service. The Service can have multiple EndpointSlices - // associated with it, for example in dual-stack clusters. - labels := map[string]string{discoveryv1.LabelServiceName: headlessSvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - var eps = new(discoveryv1.EndpointSliceList) - if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { - return fmt.Errorf("error listing EndpointSlices for the proxy's headless Service: %w", err) - } - if len(eps.Items) == 0 { - logger.Debugf("proxy's headless Service EndpointSlice does not yet exist. We will reconcile again once it's created") - return nil - } - // Each EndpointSlice for a Service can have a list of endpoints that each - // can have multiple addresses - these are the IP addresses of any Pods - // selected by that Service. Pick all the IPv4 addresses. - // It is also possible that multiple EndpointSlices have overlapping addresses. - // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints - ips := make(set.Set[string], 0) - for _, slice := range eps.Items { - if slice.AddressType != discoveryv1.AddressTypeIPv4 { - logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) - continue - } - for _, ep := range slice.Endpoints { - if !epIsReady(&ep) { - logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) - continue - } - for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips.Add(ip) - } - } - } + // Get the IP addresses for the DNS record + ips, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) + if err != nil { + return fmt.Errorf("error getting target IPs: %w", err) } - if ips.Len() == 0 { - logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses. We will reconcile again once they are created.") + if len(ips) == 0 { + logger.Debugf("No target IP addresses available yet. We will reconcile again once they are available.") return nil } + updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips.Slice()) + mak.Set(&rec.IP4, fqdn, ips) } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) @@ -243,8 +212,8 @@ func epIsReady(ep *discoveryv1.Endpoint) bool { // has been removed from the Service. If the record is not found in the // ConfigMap, the ConfigMap does not exist, or the Service does not have // tailscale.com/magic-dnsname annotation, just remove the finalizer. -func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error { - ix := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) +func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { + ix := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if ix == -1 { logger.Debugf("no finalizer, nothing to do") return nil @@ -252,24 +221,24 @@ func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *co cm := &corev1.ConfigMap{} err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm) if apierrors.IsNotFound(err) { - logger.Debug("'dsnrecords' ConfigMap not found") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + logger.Debug("'dnsrecords' ConfigMap not found") + return h.removeProxySvcFinalizer(ctx, proxySvc) } if err != nil { return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err) } if cm.Data == nil { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } _, ok := cm.Data[operatorutils.DNSRecordsCMKey] if !ok { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } - fqdn, _ := headlessSvc.GetAnnotations()[annotationTSMagicDNSName] + fqdn, _ := proxySvc.GetAnnotations()[annotationTSMagicDNSName] if fqdn == "" { - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } logger.Infof("removing DNS record for MagicDNS name %s", fqdn) updateFunc := func(rec *operatorutils.Records) { @@ -278,27 +247,28 @@ func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *co if err = h.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS config: %w", err) } - return h.removeHeadlessSvcFinalizer(ctx, headlessSvc) + return h.removeProxySvcFinalizer(ctx, proxySvc) } -func (dnsRR *dnsRecordsReconciler) removeHeadlessSvcFinalizer(ctx context.Context, headlessSvc *corev1.Service) error { - idx := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) +func (dnsRR *dnsRecordsReconciler) removeProxySvcFinalizer(ctx context.Context, proxySvc *corev1.Service) error { + idx := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if idx == -1 { return nil } - headlessSvc.Finalizers = append(headlessSvc.Finalizers[:idx], headlessSvc.Finalizers[idx+1:]...) - return dnsRR.Update(ctx, headlessSvc) + proxySvc.Finalizers = slices.Delete(proxySvc.Finalizers, idx, idx+1) + return dnsRR.Update(ctx, proxySvc) } -// fqdnForDNSRecord returns MagicDNS name associated with a given headless Service. -// If the headless Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname. -// If the headless Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value. -// This function is not expected to be called with headless Services for other +// fqdnForDNSRecord returns MagicDNS name associated with a given proxy Service. +// If the proxy Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname. +// If the proxy Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value. +// For ProxyGroup egress Services, returns the tailnet-fqdn annotation from the parent Service. +// This function is not expected to be called with proxy Services for other // proxy types, or any other Services, but it just returns an empty string if // that happens. -func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) (string, error) { - parentName := parentFromObjectLabels(headlessSvc) - if isManagedByType(headlessSvc, "ingress") { +func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) (string, error) { + parentName := parentFromObjectLabels(proxySvc) + if isManagedByType(proxySvc, serviceTypeIngress) { ing := new(networkingv1.Ingress) if err := dnsRR.Get(ctx, parentName, ing); err != nil { return "", err @@ -308,10 +278,10 @@ func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headles } return ing.Status.LoadBalancer.Ingress[0].Hostname, nil } - if isManagedByType(headlessSvc, "svc") { + if isManagedByType(proxySvc, serviceTypeSvc) { svc := new(corev1.Service) if err := dnsRR.Get(ctx, parentName, svc); apierrors.IsNotFound(err) { - logger.Info("[unexpected] parent Service for egress proxy %s not found", headlessSvc.Name) + logger.Infof("[unexpected] parent Service for egress proxy %s not found", proxySvc.Name) return "", nil } else if err != nil { return "", err @@ -328,7 +298,7 @@ func (dnsRR *dnsRecordsReconciler) updateDNSConfig(ctx context.Context, update f cm := &corev1.ConfigMap{} err := dnsRR.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { - dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an isue and attach operator logs.") + dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an issue and attach operator logs.") return nil } if err != nil { @@ -366,3 +336,119 @@ func (dnsRR *dnsRecordsReconciler) isSvcForFQDNEgressProxy(ctx context.Context, annots := parentSvc.Annotations return annots != nil && annots[AnnotationTailnetTargetFQDN] != "", nil } + +// isProxyGroupEgressService reports whether the Service is a ClusterIP Service +// created for ProxyGroup egress. For ProxyGroup egress, there are no headless +// services. Instead, the DNS reconciler processes the ClusterIP Service +// directly, which has portmapping and should use its own IP for DNS records. +func (dnsRR *dnsRecordsReconciler) isProxyGroupEgressService(svc *corev1.Service) bool { + return svc.GetLabels()[labelProxyGroup] != "" && + svc.GetLabels()[labelSvcType] == typeEgress && + svc.Spec.Type == corev1.ServiceTypeClusterIP && + isManagedByType(svc, serviceTypeSvc) +} + +// isInterestingService reports whether the Service is one that we should create +// DNS records for. +func (dnsRR *dnsRecordsReconciler) isInterestingService(ctx context.Context, svc *corev1.Service) bool { + if isManagedByType(svc, serviceTypeIngress) { + return true + } + + isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, svc) + if err != nil { + return false + } + if isEgressFQDNSvc { + return true + } + + if dnsRR.isProxyGroupEgressService(svc) { + return dnsRR.parentSvcTargetsFQDN(ctx, svc) + } + + return false +} + +// parentSvcTargetsFQDN reports whether the parent Service of a ProxyGroup +// egress Service has an FQDN target (not an IP target). +func (dnsRR *dnsRecordsReconciler) parentSvcTargetsFQDN(ctx context.Context, svc *corev1.Service) bool { + + parentName := parentFromObjectLabels(svc) + parentSvc := new(corev1.Service) + if err := dnsRR.Get(ctx, parentName, parentSvc); err != nil { + return false + } + + return parentSvc.Annotations[AnnotationTailnetTargetFQDN] != "" +} + +// getTargetIPs returns the IP addresses that should be used for DNS records +// for the given proxy Service. +func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + if dnsRR.isProxyGroupEgressService(proxySvc) { + return dnsRR.getClusterIPServiceIPs(proxySvc, logger) + } + return dnsRR.getPodIPs(ctx, proxySvc, logger) +} + +// getClusterIPServiceIPs returns the ClusterIP of a ProxyGroup egress Service. +func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + if proxySvc.Spec.ClusterIP == "" || proxySvc.Spec.ClusterIP == "None" { + logger.Debugf("ProxyGroup egress ClusterIP Service does not have a ClusterIP yet.") + return nil, nil + } + // Validate that ClusterIP is a valid IPv4 address + if !net.IsIPv4String(proxySvc.Spec.ClusterIP) { + logger.Debugf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + return nil, fmt.Errorf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + } + logger.Debugf("Using ClusterIP Service IP %s for ProxyGroup egress DNS record", proxySvc.Spec.ClusterIP) + return []string{proxySvc.Spec.ClusterIP}, nil +} + +// getPodIPs returns Pod IP addresses from EndpointSlices for non-ProxyGroup Services. +func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { + // Get the Pod IP addresses for the proxy from the EndpointSlices for + // the headless Service. The Service can have multiple EndpointSlices + // associated with it, for example in dual-stack clusters. + labels := map[string]string{discoveryv1.LabelServiceName: proxySvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership + var eps = new(discoveryv1.EndpointSliceList) + if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { + return nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) + } + if len(eps.Items) == 0 { + logger.Debugf("proxy's Service EndpointSlice does not yet exist.") + return nil, nil + } + // Each EndpointSlice for a Service can have a list of endpoints that each + // can have multiple addresses - these are the IP addresses of any Pods + // selected by that Service. Pick all the IPv4 addresses. + // It is also possible that multiple EndpointSlices have overlapping addresses. + // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints + ips := make(set.Set[string], 0) + for _, slice := range eps.Items { + if slice.AddressType != discoveryv1.AddressTypeIPv4 { + logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) + continue + } + for _, ep := range slice.Endpoints { + if !epIsReady(&ep) { + logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) + continue + } + for _, ip := range ep.Addresses { + if !net.IsIPv4String(ip) { + logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) + } else { + ips.Add(ip) + } + } + } + } + if ips.Len() == 0 { + logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses.") + return nil, nil + } + return ips.Slice(), nil +} diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 4e73e6c9e33ba..51dfb90497ff7 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -18,6 +18,7 @@ import ( networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" operatorutils "tailscale.com/k8s-operator" @@ -66,7 +67,7 @@ func TestDNSRecordsReconciler(t *testing.T) { } cl := tstest.NewClock(tstest.ClockOpts{}) // Set the ready condition of the DNSConfig - mustUpdateStatus[tsapi.DNSConfig](t, fc, "", "test", func(c *tsapi.DNSConfig) { + mustUpdateStatus(t, fc, "", "test", func(c *tsapi.DNSConfig) { operatorutils.SetDNSConfigCondition(c, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated, 0, cl, zl.Sugar()) }) dnsRR := &dnsRecordsReconciler{ @@ -156,6 +157,131 @@ func TestDNSRecordsReconciler(t *testing.T) { expectReconciled(t, dnsRR, "tailscale", "ts-ingress") wantHosts["another.ingress.ts.net"] = []string{"1.2.3.4"} expectHostsRecords(t, fc, wantHosts) + + // 8. DNS record is created for ProxyGroup egress using ClusterIP Service IP instead of Pod IPs + t.Log("test case 8: ProxyGroup egress") + + // Create the parent ExternalName service with tailnet-fqdn annotation + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "external-service", + Namespace: "default", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "external-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + mustCreate(t, fc, parentEgressSvc) + + proxyGroupEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-egress-abcd1", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "external-service", + LabelParentNamespace: "default", + LabelParentType: "svc", + labelProxyGroup: "test-proxy-group", + labelSvcType: typeEgress, + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.0.100.50", // This IP should be used in DNS, not Pod IPs + Ports: []corev1.ServicePort{{ + Port: 443, + TargetPort: intstr.FromInt(10443), // Port mapping + }}, + }, + } + + // Create EndpointSlice with Pod IPs (these should NOT be used in DNS records) + proxyGroupEps := &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-egress-abcd1-ipv4", + Namespace: "tailscale", + Labels: map[string]string{ + discoveryv1.LabelServiceName: "ts-proxygroup-egress-abcd1", + kubetypes.LabelManaged: "true", + LabelParentName: "external-service", + LabelParentNamespace: "default", + LabelParentType: "svc", + labelProxyGroup: "test-proxy-group", + labelSvcType: typeEgress, + }, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{{ + Addresses: []string{"10.1.0.100", "10.1.0.101", "10.1.0.102"}, // Pod IPs that should NOT be used + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), + Serving: ptr.To(true), + Terminating: ptr.To(false), + }, + }}, + Ports: []discoveryv1.EndpointPort{{ + Port: ptr.To(int32(10443)), + }}, + } + + mustCreate(t, fc, proxyGroupEgressSvc) + mustCreate(t, fc, proxyGroupEps) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + + // Verify DNS record uses ClusterIP Service IP, not Pod IPs + wantHosts["external-service.example.ts.net"] = []string{"10.0.100.50"} + expectHostsRecords(t, fc, wantHosts) + + // 9. ProxyGroup egress DNS record updates when ClusterIP changes + t.Log("test case 9: ProxyGroup egress ClusterIP change") + mustUpdate(t, fc, "tailscale", "ts-proxygroup-egress-abcd1", func(svc *corev1.Service) { + svc.Spec.ClusterIP = "10.0.100.51" + }) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + wantHosts["external-service.example.ts.net"] = []string{"10.0.100.51"} + expectHostsRecords(t, fc, wantHosts) + + // 10. Test ProxyGroup service deletion and DNS cleanup + t.Log("test case 10: ProxyGroup egress service deletion") + mustDeleteAll(t, fc, proxyGroupEgressSvc) + expectReconciled(t, dnsRR, "tailscale", "ts-proxygroup-egress-abcd1") + delete(wantHosts, "external-service.example.ts.net") + expectHostsRecords(t, fc, wantHosts) +} + +func TestDNSRecordsReconcilerErrorCases(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + dnsRR := &dnsRecordsReconciler{ + logger: zl.Sugar(), + } + + testSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: corev1.ServiceSpec{Type: corev1.ServiceTypeClusterIP}, + } + + // Test invalid IP format + testSvc.Spec.ClusterIP = "invalid-ip" + _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + if err == nil { + t.Error("expected error for invalid IP format") + } + + // Test valid IP + testSvc.Spec.ClusterIP = "10.0.100.50" + _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + if err != nil { + t.Errorf("unexpected error for valid IP: %v", err) + } } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { From 5154bbb0b3f556b7cc1c7ba2f92eea92b4d3bfb9 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 28 Jul 2025 11:15:14 +0100 Subject: [PATCH 0161/1093] k8s-operator: adding session type to cast header (#16660) Updates #16490 Signed-off-by: chaosinthecrd --- k8s-operator/sessionrecording/hijacker.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 675a9b1ddacc6..0df72b6c3aaaa 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -184,9 +184,10 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, SrcNode: strings.TrimSuffix(h.who.Node.Name, "."), SrcNodeID: h.who.Node.StableID, Kubernetes: &sessionrecording.Kubernetes{ - PodName: h.pod, - Namespace: h.ns, - Container: container, + PodName: h.pod, + Namespace: h.ns, + Container: container, + SessionType: string(h.sessionType), }, } if !h.who.Node.IsTagged() { From 57318695656ec693f75fc858ea853ee4c4591f57 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Mon, 28 Jul 2025 11:38:34 +0100 Subject: [PATCH 0162/1093] health: add an ETag to UnhealthyState for change detection Updates tailscale/corp#30596 Signed-off-by: James Sanderson --- control/controlclient/map_test.go | 4 +- health/health_test.go | 176 +++++++++++++++++++++++++++++- health/state.go | 38 ++++++- ipn/ipnlocal/local_test.go | 4 +- 4 files changed, 215 insertions(+), 7 deletions(-) diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 7e42f6f6a8b25..ff5df8207ba8f 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1361,7 +1361,7 @@ func TestNetmapHealthIntegration(t *testing.T) { } } - if d := cmp.Diff(want, got); d != "" { + if d := cmp.Diff(want, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); d != "" { t.Fatalf("CurrentStatus().Warnings[\"control-health*\"] different than expected (-want +got)\n%s", d) } } @@ -1414,7 +1414,7 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { }, } - if diff := cmp.Diff(want, state.Warnings); diff != "" { + if diff := cmp.Diff(want, state.Warnings, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("unexpected message contents (-want +got):\n%s", diff) } } diff --git a/health/health_test.go b/health/health_test.go index 53f012ecffd55..d66cea06c0f0b 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -13,8 +13,10 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -517,7 +519,7 @@ func TestControlHealth(t *testing.T) { delete(gotWarns, k) } } - if diff := cmp.Diff(wantWarns, gotWarns); diff != "" { + if diff := cmp.Diff(wantWarns, gotWarns, cmpopts.IgnoreFields(UnhealthyState{}, "ETag")); diff != "" { t.Fatalf(`CurrentState().Warnings["control-health-*"] wrong (-want +got):\n%s`, diff) } }) @@ -664,3 +666,175 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { t.Error("watcher got called, want it to not be called") } } + +// TestCurrentStateETagControlHealth tests that the ETag on an [UnhealthyState] +// created from Control health & returned by [Tracker.CurrentState] is different +// when the details of the [tailcfg.DisplayMessage] are different. +func TestCurrentStateETagControlHealth(t *testing.T) { + ht := Tracker{} + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + msg := tailcfg.DisplayMessage{ + Title: "Test Warning", + Text: "This is a test warning.", + Severity: tailcfg.SeverityHigh, + ImpactsConnectivity: true, + PrimaryAction: &tailcfg.DisplayMessageAction{ + URL: "https://example.com/", + Label: "open", + }, + } + + type test struct { + name string + change func(tailcfg.DisplayMessage) tailcfg.DisplayMessage + wantChangedETag bool + } + tests := []test{ + { + name: "same_value", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { return m }, + wantChangedETag: false, + }, + { + name: "different_severity", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Severity = tailcfg.SeverityLow + return m + }, + wantChangedETag: true, + }, + { + name: "different_title", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Title = "Different Title" + return m + }, + wantChangedETag: true, + }, + { + name: "different_text", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.Text = "This is a different text." + return m + }, + wantChangedETag: true, + }, + { + name: "different_impacts_connectivity", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.ImpactsConnectivity = false + return m + }, + wantChangedETag: true, + }, + { + name: "different_primary_action_label", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.PrimaryAction.Label = "new_label" + return m + }, + wantChangedETag: true, + }, + { + name: "different_primary_action_url", + change: func(m tailcfg.DisplayMessage) tailcfg.DisplayMessage { + m.PrimaryAction.URL = "https://new.example.com/" + return m + }, + wantChangedETag: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": msg, + }) + state := ht.CurrentState().Warnings["control-health.test-message"] + + newMsg := test.change(msg) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message": newMsg, + }) + newState := ht.CurrentState().Warnings["control-health.test-message"] + + if (state.ETag != newState.ETag) != test.wantChangedETag { + if test.wantChangedETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } else { + t.Errorf("got changed ETag, want unchanged") + } + } + }) + } +} + +// TestCurrentStateETagWarnable tests that the ETag on an [UnhealthyState] +// created from a Warnable & returned by [Tracker.CurrentState] is different +// when the details of the Warnable are different. +func TestCurrentStateETagWarnable(t *testing.T) { + newTracker := func(clock tstime.Clock) *Tracker { + ht := &Tracker{ + testClock: clock, + } + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + return ht + } + + t.Run("new_args", func(t *testing.T) { + ht := newTracker(nil) + + ht.SetUnhealthy(testWarnable, Args{ArgError: "initial value"}) + state := ht.CurrentState().Warnings[testWarnable.Code] + + ht.SetUnhealthy(testWarnable, Args{ArgError: "new value"}) + newState := ht.CurrentState().Warnings[testWarnable.Code] + + if state.ETag == newState.ETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } + }) + + t.Run("new_broken_since", func(t *testing.T) { + clock1 := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(123, 0), + }) + ht1 := newTracker(clock1) + + ht1.SetUnhealthy(testWarnable, Args{}) + state := ht1.CurrentState().Warnings[testWarnable.Code] + + // Use a second tracker to get a different broken since time + clock2 := tstest.NewClock(tstest.ClockOpts{ + Start: time.Unix(456, 0), + }) + ht2 := newTracker(clock2) + + ht2.SetUnhealthy(testWarnable, Args{}) + newState := ht2.CurrentState().Warnings[testWarnable.Code] + + if state.ETag == newState.ETag { + t.Errorf("got unchanged ETag, want changed (ETag was %q)", newState.ETag) + } + }) + + t.Run("no_change", func(t *testing.T) { + clock := tstest.NewClock(tstest.ClockOpts{}) + ht1 := newTracker(clock) + + ht1.SetUnhealthy(testWarnable, Args{}) + state := ht1.CurrentState().Warnings[testWarnable.Code] + + // Using a second tracker because SetUnhealthy with no changes is a no-op + ht2 := newTracker(clock) + ht2.SetUnhealthy(testWarnable, Args{}) + newState := ht2.CurrentState().Warnings[testWarnable.Code] + + if state.ETag != newState.ETag { + t.Errorf("got changed ETag, want unchanged") + } + }) +} diff --git a/health/state.go b/health/state.go index b5e6a8a3894d8..116518629f27e 100644 --- a/health/state.go +++ b/health/state.go @@ -4,6 +4,9 @@ package health import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" "time" "tailscale.com/tailcfg" @@ -35,6 +38,36 @@ type UnhealthyState struct { DependsOn []WarnableCode `json:",omitempty"` ImpactsConnectivity bool `json:",omitempty"` PrimaryAction *UnhealthyStateAction `json:",omitempty"` + + // ETag identifies a specific version of an UnhealthyState. If the contents + // of the other fields of two UnhealthyStates are the same, the ETags will + // be the same. If the contents differ, the ETags will also differ. The + // implementation is not defined and the value is opaque: it might be a + // hash, it might be a simple counter. Implementations should not rely on + // any specific implementation detail or format of the ETag string other + // than string (in)equality. + ETag string `json:",omitzero"` +} + +// hash computes a deep hash of UnhealthyState which will be stable across +// different runs of the same binary. +func (u UnhealthyState) hash() []byte { + hasher := sha256.New() + enc := json.NewEncoder(hasher) + + // hash.Hash.Write never returns an error, so this will only fail if u is + // not marshalable, in which case we have much bigger problems. + _ = enc.Encode(u) + return hasher.Sum(nil) +} + +// withETag returns a copy of UnhealthyState with an ETag set. The ETag will be +// the same for all UnhealthyState instances that are equal. If calculating the +// ETag errors, it returns a copy of the UnhealthyState with an empty ETag. +func (u UnhealthyState) withETag() UnhealthyState { + u.ETag = "" + u.ETag = hex.EncodeToString(u.hash()) + return u } // UnhealthyStateAction represents an action (URL and link) to be presented to @@ -107,7 +140,8 @@ func (t *Tracker) CurrentState() *State { // that are unhealthy. continue } - wm[w.Code] = *w.unhealthyState(ws) + state := w.unhealthyState(ws) + wm[w.Code] = state.withETag() } for id, msg := range t.lastNotifiedControlMessages { @@ -127,7 +161,7 @@ func (t *Tracker) CurrentState() *State { } } - wm[state.WarnableCode] = state + wm[state.WarnableCode] = state.withETag() } return &State{ diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index dd2837022f064..37b81c84b80f9 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -6807,7 +6807,7 @@ func TestDisplayMessagesURLFilter(t *testing.T) { Severity: health.SeverityHigh, } - if diff := cmp.Diff(want, got); diff != "" { + if diff := cmp.Diff(want, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("Unexpected message content (-want/+got):\n%s", diff) } } @@ -6879,7 +6879,7 @@ func TestDisplayMessageIPNBus(t *testing.T) { } got, ok := n.Health.Warnings[wantID] if ok { - if diff := cmp.Diff(tt.wantWarning, got); diff != "" { + if diff := cmp.Diff(tt.wantWarning, got, cmpopts.IgnoreFields(health.UnhealthyState{}, "ETag")); diff != "" { t.Errorf("unexpected warning details (-want/+got):\n%s", diff) return true // we failed the test so tell the watcher we've seen what we need to to stop it waiting } From 02084629e208db6e7601a00777619697b49ac770 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 28 Jul 2025 12:03:08 +0100 Subject: [PATCH 0163/1093] k8s-operator: handle multiple WebSocket frames per read (#16678) When kubectl starts an interactive attach session, it sends 2 resize messages in quick succession. It seems that particularly in HTTP mode, we often receive both of these WebSocket frames from the underlying connection in a single read. However, our parser currently assumes 0-1 frames per read, and leaves the second frame in the read buffer until the next read from the underlying connection. It doesn't take long after that before we end up failing to skip a control message as we normally should, and then we parse a control message as though it will have a stream ID (part of the Kubernetes protocol) and error out. Instead, we should keep parsing frames from the read buffer for as long as we're able to parse complete frames, so this commit refactors the messages parsing logic into a loop based on the contents of the read buffer being non-empty. k/k staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go for full details of the resize messages. There are at least a couple more multiple-frame read edge cases we should handle, but this commit is very conservatively fixing a single observed issue to make it a low-risk candidate for cherry picking. Updates #13358 Change-Id: Iafb91ad1cbeed9c5231a1525d4563164fc1f002f Signed-off-by: Tom Proctor --- k8s-operator/api-proxy/proxy.go | 6 +- k8s-operator/sessionrecording/hijacker.go | 1 - k8s-operator/sessionrecording/ws/conn.go | 97 ++++++++++--------- k8s-operator/sessionrecording/ws/conn_test.go | 32 +++++- k8s-operator/sessionrecording/ws/message.go | 4 +- 5 files changed, 86 insertions(+), 54 deletions(-) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index c648e1622537d..ff0373270b2c0 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -114,8 +114,9 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { mux.HandleFunc("GET /api/v1/namespaces/{namespace}/pods/{pod}/attach", ap.serveAttachWS) ap.hs = &http.Server{ - Handler: mux, - ErrorLog: zap.NewStdLog(ap.log.Desugar()), + Handler: mux, + ErrorLog: zap.NewStdLog(ap.log.Desugar()), + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } mode := "noauth" @@ -140,7 +141,6 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { GetCertificate: ap.lc.GetCertificate, NextProtos: []string{"http/1.1"}, } - ap.hs.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } else { var err error tsLn, err = ap.ts.Listen("tcp", ":80") diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 0df72b6c3aaaa..789a9fdb9f6a3 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -237,7 +237,6 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, if err := lc.Close(); err != nil { h.log.Infof("error closing recorder connections: %v", err) } - return }() return lc, nil } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index 0d8aefaace52e..a34379658caa2 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -148,6 +148,8 @@ func (c *conn) Read(b []byte) (int, error) { return 0, nil } + // TODO(tomhjp): If we get multiple frames in a single Read with different + // types, we may parse the second frame with the wrong type. typ := messageType(opcode(b)) if (typ == noOpcode && c.readMsgIsIncomplete()) || c.readBufHasIncompleteFragment() { // subsequent fragment if typ, err = c.curReadMsgType(); err != nil { @@ -157,6 +159,8 @@ func (c *conn) Read(b []byte) (int, error) { // A control message can not be fragmented and we are not interested in // these messages. Just return. + // TODO(tomhjp): If we get multiple frames in a single Read, we may skip + // some non-control messages. if isControlMessage(typ) { return n, nil } @@ -169,62 +173,65 @@ func (c *conn) Read(b []byte) (int, error) { return n, nil } - readMsg := &message{typ: typ} // start a new message... - // ... or pick up an already started one if the previous fragment was not final. - if c.readMsgIsIncomplete() || c.readBufHasIncompleteFragment() { - readMsg = c.currentReadMsg - } - if _, err := c.readBuf.Write(b[:n]); err != nil { return 0, fmt.Errorf("[unexpected] error writing message contents to read buffer: %w", err) } - ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) - if err != nil { - return 0, fmt.Errorf("error parsing message: %v", err) - } - if !ok { // incomplete fragment - return n, nil - } - c.readBuf.Next(len(readMsg.raw)) - - if readMsg.isFinalized && !c.readMsgIsIncomplete() { - // we want to send stream resize messages for terminal sessions - // Stream IDs for websocket streams are static. - // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 - if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { - var msg tsrecorder.ResizeMsg - if err = json.Unmarshal(readMsg.payload, &msg); err != nil { - return 0, fmt.Errorf("error umarshalling resize message: %w", err) - } + for c.readBuf.Len() != 0 { + readMsg := &message{typ: typ} // start a new message... + // ... or pick up an already started one if the previous fragment was not final. + if c.readMsgIsIncomplete() { + readMsg = c.currentReadMsg + } - c.ch.Width = msg.Width - c.ch.Height = msg.Height - - var isInitialResize bool - c.writeCastHeaderOnce.Do(func() { - isInitialResize = true - // If this is a session with a terminal attached, - // we must wait for the terminal width and - // height to be parsed from a resize message - // before sending CastHeader, else tsrecorder - // will not be able to play this recording. - err = c.rec.WriteCastHeader(c.ch) - close(c.initialCastHeaderSent) - }) - if err != nil { - return 0, fmt.Errorf("error writing CastHeader: %w", err) - } + ok, err := readMsg.Parse(c.readBuf.Bytes(), c.log) + if err != nil { + return 0, fmt.Errorf("error parsing message: %v", err) + } + if !ok { // incomplete fragment + return n, nil + } + c.readBuf.Next(len(readMsg.raw)) + + if readMsg.isFinalized && !c.readMsgIsIncomplete() { + // we want to send stream resize messages for terminal sessions + // Stream IDs for websocket streams are static. + // https://github.com/kubernetes/client-go/blob/v0.30.0-rc.1/tools/remotecommand/websocket.go#L218 + if readMsg.streamID.Load() == remotecommand.StreamResize && c.hasTerm { + var msg tsrecorder.ResizeMsg + if err = json.Unmarshal(readMsg.payload, &msg); err != nil { + return 0, fmt.Errorf("error umarshalling resize message: %w", err) + } + + c.ch.Width = msg.Width + c.ch.Height = msg.Height + + var isInitialResize bool + c.writeCastHeaderOnce.Do(func() { + isInitialResize = true + // If this is a session with a terminal attached, + // we must wait for the terminal width and + // height to be parsed from a resize message + // before sending CastHeader, else tsrecorder + // will not be able to play this recording. + err = c.rec.WriteCastHeader(c.ch) + close(c.initialCastHeaderSent) + }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } - if !isInitialResize { - if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { - return 0, fmt.Errorf("error writing resize message: %w", err) + if !isInitialResize { + if err := c.rec.WriteResize(msg.Height, msg.Width); err != nil { + return 0, fmt.Errorf("error writing resize message: %w", err) + } } } } + + c.currentReadMsg = readMsg } - c.currentReadMsg = readMsg return n, nil } diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index f29154c622602..5e23845a70d17 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -58,15 +58,39 @@ func Test_conn_Read(t *testing.T) { wantCastHeaderHeight: 20, }, { - name: "two_reads_resize_message", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}}, + name: "resize_data_frame_two_in_one_read", + inputs: [][]byte{ + fmt.Appendf(nil, "%s%s", + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + append([]byte{0x82, lenResizeMsgPayload}, testResizeMsg...), + ), + }, + wantRecorded: append(fakes.AsciinemaCastHeaderMsg(t, 10, 20), fakes.AsciinemaCastResizeMsg(t, 10, 20)...), + wantCastHeaderWidth: 10, + wantCastHeaderHeight: 20, + }, + { + name: "two_reads_resize_message", + inputs: [][]byte{ + // op, len, stream ID, `{"width` + {0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, + // op, len, stream ID, `:10,"height":20}` + {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3a, 0x32, 0x30, 0x7d}, + }, wantCastHeaderWidth: 10, wantCastHeaderHeight: 20, wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), }, { - name: "three_reads_resize_message_with_split_fragment", - inputs: [][]byte{{0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, {0x80, 0x11, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, {0x22, 0x3a, 0x32, 0x30, 0x7d}}, + name: "three_reads_resize_message_with_split_fragment", + inputs: [][]byte{ + // op, len, stream ID, `{"width"` + {0x2, 0x9, 0x4, 0x7b, 0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x22}, + // op, len, stream ID, `:10,"height` + {0x00, 0x0c, 0x4, 0x3a, 0x31, 0x30, 0x2c, 0x22, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74}, + // op, len, stream ID, `":20}` + {0x80, 0x06, 0x4, 0x22, 0x3a, 0x32, 0x30, 0x7d}, + }, wantCastHeaderWidth: 10, wantCastHeaderHeight: 20, wantRecorded: fakes.AsciinemaCastHeaderMsg(t, 10, 20), diff --git a/k8s-operator/sessionrecording/ws/message.go b/k8s-operator/sessionrecording/ws/message.go index 713febec76ae8..35667ae21a5d0 100644 --- a/k8s-operator/sessionrecording/ws/message.go +++ b/k8s-operator/sessionrecording/ws/message.go @@ -7,10 +7,10 @@ package ws import ( "encoding/binary" + "errors" "fmt" "sync/atomic" - "github.com/pkg/errors" "go.uber.org/zap" "golang.org/x/net/websocket" @@ -139,6 +139,8 @@ func (msg *message) Parse(b []byte, log *zap.SugaredLogger) (bool, error) { return false, errors.New("[unexpected] received a message fragment with no stream ID") } + // Stream ID will be one of the constants from: + // https://github.com/kubernetes/kubernetes/blob/f9ed14bf9b1119a2e091f4b487a3b54930661034/staging/src/k8s.io/apimachinery/pkg/util/remotecommand/constants.go#L57-L64 streamID := uint32(msgPayload[0]) if !isInitialFragment && msg.streamID.Load() != streamID { return false, fmt.Errorf("[unexpected] received message fragments with mismatched streamIDs %d and %d", msg.streamID.Load(), streamID) From 61d42eb300799f4617327f5e5145b69ac795fecf Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 28 Jul 2025 13:33:46 +0100 Subject: [PATCH 0164/1093] k8s-operator: fix test flake (#16680) This occasionally panics waiting on a nil ctx, but was missed in the previous PR because it's quite a rare flake as it needs to progress to a specific point in the parser. Updates #16678 Change-Id: Ifd36dfc915b153aede36b8ee39eff83750031f95 Signed-off-by: Tom Proctor --- k8s-operator/sessionrecording/ws/conn_test.go | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 5e23845a70d17..f2fd4ea55f554 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -9,6 +9,7 @@ import ( "context" "fmt" "reflect" + "runtime/debug" "testing" "time" @@ -284,19 +285,28 @@ func Test_conn_WriteRand(t *testing.T) { sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) for i := range 100 { - tc := &fakes.TestConn{} - c := &conn{ - Conn: tc, - log: zl.Sugar(), - rec: rec, - } - bb := fakes.RandomBytes(t) - for j, input := range bb { - f := func() { - c.Write(input) + t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { + tc := &fakes.TestConn{} + c := &conn{ + Conn: tc, + log: zl.Sugar(), + rec: rec, + + ctx: context.Background(), // ctx must be non-nil. + initialCastHeaderSent: make(chan struct{}), } - testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) - } + // Never block for random data. + c.writeCastHeaderOnce.Do(func() { + close(c.initialCastHeaderSent) + }) + bb := fakes.RandomBytes(t) + for j, input := range bb { + f := func() { + c.Write(input) + } + testPanic(t, f, fmt.Sprintf("[%d %d] Write: panic parsing input of length %d first bytes %b current write message %+#v", i, j, len(input), firstBytes(input), c.currentWriteMsg)) + } + }) } } @@ -304,7 +314,7 @@ func testPanic(t *testing.T, f func(), msg string) { t.Helper() defer func() { if r := recover(); r != nil { - t.Fatal(msg, r) + t.Fatal(msg, r, string(debug.Stack())) } }() f() From 4a435aedcb357877b84f776c99fba4517796b01d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 08:41:59 -0600 Subject: [PATCH 0165/1093] .github: Bump github/codeql-action from 3.29.2 to 3.29.3 (#16615) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.2 to 3.29.3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/181d5eefc20863364f96762470ba6f862bdef56b...d6bbdef45e766d081b84a2def353b0055f728d3e) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4e129b8471ea5..e5616d83a4510 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 + uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 From c962fefa3ed2a1b7234ccbf1a1f9a8bd1c6ef9a0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 08:51:58 -0600 Subject: [PATCH 0166/1093] build(deps): bump form-data from 4.0.0 to 4.0.4 in /client/web (#16623) Bumps [form-data](https://github.com/form-data/form-data) from 4.0.0 to 4.0.4. - [Release notes](https://github.com/form-data/form-data/releases) - [Changelog](https://github.com/form-data/form-data/blob/master/CHANGELOG.md) - [Commits](https://github.com/form-data/form-data/compare/v4.0.0...v4.0.4) --- updated-dependencies: - dependency-name: form-data dependency-version: 4.0.4 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- client/web/yarn.lock | 105 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 102 insertions(+), 3 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index a9b2ae8767b99..455f8dde024d8 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -2450,6 +2450,14 @@ cac@^6.7.14: resolved "https://registry.yarnpkg.com/cac/-/cac-6.7.14.tgz#804e1e6f506ee363cb0e3ccbb09cad5dd9870959" integrity sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ== +call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + call-bind@^1.0.0, call-bind@^1.0.2, call-bind@^1.0.4, call-bind@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513" @@ -2767,6 +2775,15 @@ dot-case@^3.0.4: no-case "^3.0.4" tslib "^2.0.3" +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" + electron-to-chromium@^1.4.535: version "1.4.596" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.596.tgz#6752d1aa795d942d49dfc5d3764d6ea283fab1d7" @@ -2834,6 +2851,16 @@ es-abstract@^1.22.1: unbox-primitive "^1.0.2" which-typed-array "^1.1.13" +es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: version "1.0.15" resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.15.tgz#bd81d275ac766431d19305923707c3efd9f1ae40" @@ -2854,6 +2881,13 @@ es-iterator-helpers@^1.0.12, es-iterator-helpers@^1.0.15: iterator.prototype "^1.1.2" safe-array-concat "^1.0.1" +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" + es-set-tostringtag@^2.0.1: version "2.0.2" resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz#11f7cc9f63376930a5f20be4915834f4bc74f9c9" @@ -2863,6 +2897,16 @@ es-set-tostringtag@^2.0.1: has-tostringtag "^1.0.0" hasown "^2.0.0" +es-set-tostringtag@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== + dependencies: + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + es-shim-unscopables@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763" @@ -3270,12 +3314,14 @@ for-each@^0.3.3: is-callable "^1.1.3" form-data@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" - integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== + version "4.0.4" + resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.4.tgz#784cdcce0669a9d68e94d11ac4eea98088edd2c4" + integrity sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow== dependencies: asynckit "^0.4.0" combined-stream "^1.0.8" + es-set-tostringtag "^2.1.0" + hasown "^2.0.2" mime-types "^2.1.12" fraction.js@^4.2.0: @@ -3333,11 +3379,35 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@ has-symbols "^1.0.3" hasown "^2.0.0" +get-intrinsic@^1.2.6: + version "1.3.0" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== + dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.1.1" + function-bind "^1.1.2" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" + get-nonce@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/get-nonce/-/get-nonce-1.0.1.tgz#fdf3f0278073820d2ce9426c18f07481b1e0cdf3" integrity sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q== +get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + get-stream@^8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-8.0.1.tgz#def9dfd71742cd7754a7761ed43749a27d02eca2" @@ -3437,6 +3507,11 @@ gopd@^1.0.1: dependencies: get-intrinsic "^1.1.3" +gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== + graphemer@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" @@ -3474,6 +3549,11 @@ has-symbols@^1.0.2, has-symbols@^1.0.3: resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== +has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== + has-tostringtag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" @@ -3481,6 +3561,13 @@ has-tostringtag@^1.0.0: dependencies: has-symbols "^1.0.2" +has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + hasown@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" @@ -3488,6 +3575,13 @@ hasown@^2.0.0: dependencies: function-bind "^1.1.2" +hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + html-encoding-sniffer@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz#696df529a7cfd82446369dc5193e590a3735b448" @@ -3992,6 +4086,11 @@ magic-string@^0.30.5: dependencies: "@jridgewell/sourcemap-codec" "^1.4.15" +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + merge-stream@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" From 5ce3845a021b8384814f8279546af80e9fddbf39 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 28 Jul 2025 09:01:41 -0700 Subject: [PATCH 0167/1093] net/portmapper: avert a panic when a mapping is not available (#16686) Ideally when we attempt to create a new port mapping, we should not return without error when no mapping is available. We already log these cases as unexpected, so this change is just to avoiding panicking dispatch on the invalid result in those cases. We still separately need to fix the underlying control flow. Updates #16662 Change-Id: I51e8a116b922b49eda45e31cd27f6b89dd51abc8 Signed-off-by: M. J. Fromberger --- net/portmapper/portmapper.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 1c6c7634bf34a..c82fbf9da112a 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -507,6 +507,13 @@ func (c *Client) createMapping() { c.logf("createOrGetMapping: %v", err) } return + } else if mapping == nil { + return + + // TODO(creachadair): This was already logged in createOrGetMapping. + // It really should not happen at all, but we will need to untangle + // the control flow to eliminate that possibility. Meanwhile, this + // mitigates a panic downstream, cf. #16662. } if c.updates != nil { c.updates.Publish(Mapping{ From a9f3fd1c67ca427aceee708f319a0a12df6a5de8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 28 Jul 2025 09:26:24 -0700 Subject: [PATCH 0168/1093] wgengine/magicsock: fix magicsock deadlock around Conn.NoteRecvActivity (#16687) Updates #16651 Updates tailscale/corp#30836 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 3 ++- wgengine/magicsock/magicsock.go | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 307b39f93903c..5e3c4e5720a92 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -167,7 +167,8 @@ type CapabilityVersion int // - 120: 2025-07-15: Client understands peer relay disco messages, and implements peer client and relay server functions // - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] // - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. -const CurrentCapabilityVersion CapabilityVersion = 122 +// - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) +const CurrentCapabilityVersion CapabilityVersion = 123 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fb7f5edcbd14f..d2835aed34547 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -4119,8 +4119,11 @@ func (le *lazyEndpoint) InitiationMessagePublicKey(peerPublicKey [32]byte) { return } le.c.mu.Lock() - defer le.c.mu.Unlock() ep, ok := le.c.peerMap.endpointForNodeKey(pubKey) + // [Conn.mu] must not be held while [Conn.noteRecvActivity] is called, which + // [endpoint.noteRecvActivity] can end up calling. See + // [Options.NoteRecvActivity] docs. + le.c.mu.Unlock() if !ok { return } From 4df02bbb486d07b0ad23f59c4cb3675ab691e79b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 28 Jul 2025 12:23:40 -0500 Subject: [PATCH 0169/1093] util/syspolicy/setting: use a custom marshaler for time.Duration jsonv2 now returns an error when you marshal or unmarshal a time.Duration without an explicit format flag. This is an intentional, temporary choice until the default [time.Duration] representation is decided (see golang/go#71631). setting.Snapshot can hold time.Duration values inside a map[string]any, so the jsonv2 update breaks marshaling. In this PR, we start using a custom marshaler until that decision is made or golang/go#71664 lets us specify the format explicitly. This fixes `tailscale syspolicy list` failing when KeyExpirationNotice or any other time.Duration policy setting is configured. Fixes #16683 Signed-off-by: Nick Khyl --- util/syspolicy/setting/snapshot.go | 21 ++++++++++++++++++++- util/syspolicy/setting/snapshot_test.go | 12 ++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 087325a04c6f1..3a40785dce9de 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -9,6 +9,7 @@ import ( "maps" "slices" "strings" + "time" jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" @@ -152,6 +153,24 @@ var ( _ jsonv2.UnmarshalerFrom = (*Snapshot)(nil) ) +// As of 2025-07-28, jsonv2 no longer has a default representation for [time.Duration], +// so we need to provide a custom marshaler. +// +// This is temporary until the decision on the default representation is made +// (see https://github.com/golang/go/issues/71631#issuecomment-2981670799). +// +// In the future, we might either use the default representation (if compatible with +// [time.Duration.String]) or specify something like json.WithFormat[time.Duration]("units") +// when golang/go#71664 is implemented. +// +// TODO(nickkhyl): revisit this when the decision on the default [time.Duration] +// representation is made in golang/go#71631 and/or golang/go#71664 is implemented. +var formatDurationAsUnits = jsonv2.JoinOptions( + jsonv2.WithMarshalers(jsonv2.MarshalToFunc(func(e *jsontext.Encoder, t time.Duration) error { + return e.WriteToken(jsontext.String(t.String())) + })), +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} @@ -159,7 +178,7 @@ func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data.Summary = s.summary data.Settings = s.m } - return jsonv2.MarshalEncode(out, data) + return jsonv2.MarshalEncode(out, data, formatDurationAsUnits) } // UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index d41b362f06976..19f014acaa831 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -491,6 +491,18 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`, }, + { + name: "Duration/Zero", + snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), + wantJSON: `{"Settings": {"DurationPolicy": {"Value": "0s"}}}`, + wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("0s")}), + }, + { + name: "Duration/NonZero", + snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), + wantJSON: `{"Settings": {"DurationPolicy": {"Value": "2h0m0s"}}}`, + wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), + }, { name: "Empty/With-Summary", snapshot: NewSnapshot( From e5e4386f334c8eb222bffc94c0de011a37a8bc29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 23:36:36 -0600 Subject: [PATCH 0170/1093] build(deps): bump @babel/runtime from 7.23.4 to 7.26.10 in /client/web (#15299) Bumps [@babel/runtime](https://github.com/babel/babel/tree/HEAD/packages/babel-runtime) from 7.23.4 to 7.26.10. - [Release notes](https://github.com/babel/babel/releases) - [Changelog](https://github.com/babel/babel/blob/main/CHANGELOG.md) - [Commits](https://github.com/babel/babel/commits/v7.26.10/packages/babel-runtime) --- updated-dependencies: - dependency-name: "@babel/runtime" dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- client/web/yarn.lock | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index 455f8dde024d8..7c9d9222ec727 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -1087,11 +1087,9 @@ integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== "@babel/runtime@^7.12.5", "@babel/runtime@^7.13.10", "@babel/runtime@^7.16.3", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4": - version "7.23.4" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.4.tgz#36fa1d2b36db873d25ec631dcc4923fdc1cf2e2e" - integrity sha512-2Yv65nlWnWlSpe3fXEyX5i7fx5kIKo4Qbcj+hMO0odwaneFjfXw5fdum+4yL20O0QiaHpia0cYQ9xpNMqrBwHg== - dependencies: - regenerator-runtime "^0.14.0" + version "7.28.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.28.2.tgz#2ae5a9d51cc583bd1f5673b3bb70d6d819682473" + integrity sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA== "@babel/template@^7.22.15": version "7.22.15" @@ -4642,11 +4640,6 @@ regenerate@^1.4.2: resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-runtime@^0.14.0: - version "0.14.0" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz#5e19d68eb12d486f797e15a3c6a918f7cec5eb45" - integrity sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA== - regenerator-transform@^0.15.2: version "0.15.2" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" From b34cdc971007edb6968b793ea01f87196d8f9439 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 29 Jul 2025 09:04:08 -0700 Subject: [PATCH 0171/1093] ipn,net,tsnet,wgengine: make an eventbus mandatory where it is used (#16594) In the components where an event bus is already plumbed through, remove the exceptions that allow it to be omitted, and update all the tests that relied on those workarounds execute properly. This change applies only to the places where we're already using the bus; it does not enforce the existence of a bus in other components (yet), Updates #15160 Change-Id: Iebb92243caba82b5eb420c49fc3e089a77454f65 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/dnsconfig_test.go | 2 +- ipn/ipnlocal/local.go | 11 +-- ipn/ipnlocal/local_test.go | 6 ++ ipn/ipnlocal/network-lock_test.go | 26 ++++--- ipn/ipnlocal/peerapi_test.go | 102 +++++++++++++-------------- net/portmapper/igd_test.go | 11 ++- net/portmapper/portmapper.go | 38 +++++----- net/udprelay/server.go | 3 + tsnet/tsnet.go | 5 +- wgengine/magicsock/magicsock.go | 41 ++++++----- wgengine/magicsock/magicsock_test.go | 13 ++-- 11 files changed, 134 insertions(+), 124 deletions(-) diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index c0f5b25f38b11..71f1751488788 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -377,7 +377,7 @@ func peersMap(s []tailcfg.NodeView) map[tailcfg.NodeID]tailcfg.NodeView { } func TestAllowExitNodeDNSProxyToServeName(t *testing.T) { - b := &LocalBackend{} + b := newTestLocalBackend(t) if b.allowExitNodeDNSProxyToServeName("google.com") { t.Fatal("unexpected true on backend with nil NetMap") } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7154b942c1690..bf13b2ac1039c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -99,7 +99,6 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" - "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -618,15 +617,7 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - // Auto-init [nodeBackend] in tests for LocalBackend created without the - // NewLocalBackend() constructor. Same reasoning for checking b.sys. - var bus *eventbus.Bus - if b.sys == nil { - bus = eventbus.New() - } else { - bus = b.sys.Bus.Get() - } - v := newNodeBackend(cmp.Or(b.ctx, context.Background()), bus) + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.sys.Bus.Get()) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 37b81c84b80f9..30833e748ea1b 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -463,6 +463,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { var logf logger.Logf = logger.Discard if _, ok := sys.StateStore.GetOK(); !ok { sys.Set(new(mem.Store)) + t.Log("Added memory store for testing") } if _, ok := sys.Engine.GetOK(); !ok { eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) @@ -471,6 +472,11 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { } t.Cleanup(eng.Close) sys.Set(eng) + t.Log("Added fake userspace engine for testing") + } + if _, ok := sys.Dialer.GetOK(); !ok { + sys.Set(tsdial.NewDialer(netmon.NewStatic())) + t.Log("Added static dialer for testing") } lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) if err != nil { diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 838f16cb9001f..443539aecc2cb 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/tsd" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/persist" @@ -935,18 +936,21 @@ func TestTKAForceDisable(t *testing.T) { defer ts.Close() cc := fakeControlClient(t, client) - b := LocalBackend{ - varRoot: temp, - cc: cc, - ccAuto: cc, - logf: t.Logf, - tka: &tkaState{ - authority: authority, - storage: chonk, - }, - pm: pm, - store: pm.Store(), + sys := tsd.NewSystem() + sys.Set(pm.Store()) + + b := newTestLocalBackendWithSys(t, sys) + b.SetVarRoot(temp) + b.SetControlClientGetterForTesting(func(controlclient.Options) (controlclient.Client, error) { + return cc, nil + }) + b.mu.Lock() + b.tka = &tkaState{ + authority: authority, + storage: chonk, } + b.pm = pm + b.mu.Unlock() if err := b.NetworkLockForceLocalDisable(); err != nil { t.Fatalf("NetworkLockForceLocalDisable() failed: %v", err) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index d8655afa08aa8..5654cf27799e2 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -21,10 +21,10 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" + "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -156,10 +156,9 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var e peerAPITestEnv - lb := &LocalBackend{ - logf: e.logBuf.Logf, - clock: &tstest.Clock{}, - } + lb := newTestLocalBackend(t) + lb.logf = e.logBuf.Logf + lb.clock = &tstest.Clock{} lb.currentNode().SetNetMap(&netmap.NetworkMap{SelfNode: selfNode.View()}) e.ph = &peerAPIHandler{ isSelf: tt.isSelf, @@ -195,20 +194,20 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) ht := new(health.Tracker) - reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - }, - } + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + + h.ps = &peerAPIServer{b: b} if h.ps.b.OfferingExitNode() { t.Fatal("unexpectedly offering exit node") } @@ -250,12 +249,12 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) ht := new(health.Tracker) reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -263,16 +262,14 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { } else { a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - // configure as an app connector just to enable the API. - appConnector: a, - }, - } + sys.Set(pm.Store()) + sys.Set(eng) + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a // configure as an app connector just to enable the API. + + h.ps = &peerAPIServer{b: b} h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( dnsmessage.ResourceHeader{ @@ -326,27 +323,29 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) + rc := &appctest.RouteCollector{} ht := new(health.Tracker) - reg := new(usermetric.Registry) - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) + + reg := new(usermetric.Registry) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) var a *appc.AppConnector if shouldStore { a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) } else { a = appc.NewAppConnector(t.Logf, rc, nil, nil) } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - appConnector: a, - }, - } + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a + + h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"example.com"}) h.ps.b.appConnector.Wait(ctx) @@ -393,12 +392,13 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - bus := eventbus.New() - defer bus.Close() + sys := tsd.NewSystem() + t.Cleanup(sys.Bus.Get().Close) + ht := new(health.Tracker) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} - eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, bus) + eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { @@ -406,14 +406,14 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { } else { a = appc.NewAppConnector(t.Logf, rc, nil, nil) } - h.ps = &peerAPIServer{ - b: &LocalBackend{ - e: eng, - pm: pm, - store: pm.Store(), - appConnector: a, - }, - } + sys.Set(pm.Store()) + sys.Set(eng) + + b := newTestLocalBackendWithSys(t, sys) + b.pm = pm + b.appConnector = a + + h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"www.example.com"}) h.ps.b.appConnector.Wait(ctx) diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 3ef7989a3a241..cca87e0b8238e 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -263,16 +263,21 @@ func (d *TestIGD) handlePCPQuery(pkt []byte, src netip.AddrPort) { } // newTestClient configures a new test client connected to igd for mapping updates. -// If bus != nil, update events are published to it. -// A cleanup for the resulting client is added to t. +// If bus == nil, a new empty event bus is constructed that is cleaned up when t exits. +// A cleanup for the resulting client is also added to t. func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { + if bus == nil { + bus = eventbus.New() + t.Log("Created empty event bus for test client") + t.Cleanup(bus.Close) + } var c *Client c = NewClient(Config{ Logf: tstest.WhileTestRunningLogger(t), NetMon: netmon.NewStatic(), ControlKnobs: new(controlknobs.Knobs), EventBus: bus, - OnChange: func() { + OnChange: func() { // TODO(creachadair): Remove. t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) }, diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index c82fbf9da112a..30535157cc892 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -85,7 +85,7 @@ const trustServiceStillAvailableDuration = 10 * time.Minute // Client is a port mapping client. type Client struct { - // The following two fields must either both be nil, or both non-nil. + // The following two fields must both be non-nil. // Both are immutable after construction. pubClient *eventbus.Client updates *eventbus.Publisher[Mapping] @@ -238,8 +238,11 @@ type Config struct { // NewClient constructs a new portmapping [Client] from c. It will panic if any // required parameters are omitted. func NewClient(c Config) *Client { - if c.NetMon == nil { - panic("nil netMon") + switch { + case c.NetMon == nil: + panic("nil NetMon") + case c.EventBus == nil: + panic("nil EventBus") } ret := &Client{ logf: c.Logf, @@ -248,10 +251,8 @@ func NewClient(c Config) *Client { onChange: c.OnChange, controlKnobs: c.ControlKnobs, } - if c.EventBus != nil { - ret.pubClient = c.EventBus.Client("portmapper") - ret.updates = eventbus.Publish[Mapping](ret.pubClient) - } + ret.pubClient = c.EventBus.Client("portmapper") + ret.updates = eventbus.Publish[Mapping](ret.pubClient) if ret.logf == nil { ret.logf = logger.Discard } @@ -286,10 +287,9 @@ func (c *Client) Close() error { } c.closed = true c.invalidateMappingsLocked(true) - if c.updates != nil { - c.updates.Close() - c.pubClient.Close() - } + c.updates.Close() + c.pubClient.Close() + // TODO: close some future ever-listening UDP socket(s), // waiting for multicast announcements from router. return nil @@ -515,14 +515,14 @@ func (c *Client) createMapping() { // the control flow to eliminate that possibility. Meanwhile, this // mitigates a panic downstream, cf. #16662. } - if c.updates != nil { - c.updates.Publish(Mapping{ - External: mapping.External(), - Type: mapping.MappingType(), - GoodUntil: mapping.GoodUntil(), - }) - } - if c.onChange != nil && c.pubClient == nil { + c.updates.Publish(Mapping{ + External: mapping.External(), + Type: mapping.MappingType(), + GoodUntil: mapping.GoodUntil(), + }) + // TODO(creachadair): Remove this entirely once there are no longer any + // places where the callback is set. + if c.onChange != nil { go c.onChange() } } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index c34a4b5f6835c..aece3bc59b0fe 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -291,6 +291,9 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve s.vniPool = append(s.vniPool, uint32(i)) } + // TODO(creachadair): Find a way to plumb this in during initialization. + // As-written, messages published here will not be seen by other components + // in a running client. bus := eventbus.New() s.bus = bus netMon, err := netmon.New(s.bus, logf) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 65367f235482f..d81dec7d62ad5 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -435,10 +435,7 @@ func (s *Server) Close() error { ln.closeLocked() } wg.Wait() - - if bus := s.sys.Bus.Get(); bus != nil { - bus.Close() - } + s.sys.Bus.Get().Close() s.closed = true return nil } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d2835aed34547..6495b13b57db5 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -715,8 +715,11 @@ func (c *Conn) Synchronize() { // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. func NewConn(opts Options) (*Conn, error) { - if opts.NetMon == nil { + switch { + case opts.NetMon == nil: return nil, errors.New("magicsock.Options.NetMon must be non-nil") + case opts.EventBus == nil: + return nil, errors.New("magicsock.Options.EventBus must be non-nil") } c := newConn(opts.logf()) @@ -729,22 +732,20 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity - if c.eventBus != nil { - c.eventClient = c.eventBus.Client("magicsock.Conn") - - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) - c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) - c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) - c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) - c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) - c.syncPub = eventbus.Publish[syncPoint](c.eventClient) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) - c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) - c.subsDoneCh = make(chan struct{}) - go c.consumeEventbusTopics() - } + c.eventClient = c.eventBus.Client("magicsock.Conn") + + // Subscribe calls must return before NewConn otherwise published + // events can be missed. + c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) + c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) + c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) + c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) + c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) + c.syncPub = eventbus.Publish[syncPoint](c.eventClient) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) + c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) + c.subsDoneCh = make(chan struct{}) + go c.consumeEventbusTopics() // Don't log the same log messages possibly every few seconds in our // portmapper. @@ -3327,10 +3328,8 @@ func (c *Conn) Close() error { // deadlock with c.Close(). // 2. Conn.consumeEventbusTopics event handlers may not guard against // undesirable post/in-progress Conn.Close() behaviors. - if c.eventClient != nil { - c.eventClient.Close() - <-c.subsDoneCh - } + c.eventClient.Close() + <-c.subsDoneCh c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 8a09df27d2ce7..480faa694c70d 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -179,7 +179,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen t.Helper() bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logf) if err != nil { @@ -191,6 +191,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary conn, err := NewConn(Options{ NetMon: netMon, + EventBus: bus, Metrics: ®, Logf: logf, HealthTracker: ht, @@ -406,7 +407,7 @@ func TestNewConn(t *testing.T) { } bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -424,6 +425,7 @@ func TestNewConn(t *testing.T) { EndpointsFunc: epFunc, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: new(usermetric.Registry), }) if err != nil { @@ -542,7 +544,7 @@ func TestDeviceStartStop(t *testing.T) { tstest.ResourceCheck(t) bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -554,6 +556,7 @@ func TestDeviceStartStop(t *testing.T) { EndpointsFunc: func(eps []tailcfg.Endpoint) {}, Logf: t.Logf, NetMon: netMon, + EventBus: bus, Metrics: new(usermetric.Registry), }) if err != nil { @@ -1349,7 +1352,7 @@ func newTestConn(t testing.TB) *Conn { port := pickPort(t) bus := eventbus.New() - defer bus.Close() + t.Cleanup(bus.Close) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -1359,6 +1362,7 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, + EventBus: bus, HealthTracker: new(health.Tracker), Metrics: new(usermetric.Registry), DisablePortMapper: true, @@ -3147,6 +3151,7 @@ func TestNetworkDownSendErrors(t *testing.T) { Logf: t.Logf, NetMon: netMon, Metrics: reg, + EventBus: bus, })) defer conn.Close() From e37432afb7acb012576b8df483d31492317b790b Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Tue, 29 Jul 2025 13:59:09 -0400 Subject: [PATCH 0172/1093] cmd/tailscale/cli: update message for disable service (#16705) This commit update the message for recommanding clear command after running serve for service. Instead of a flag, we pass the service name as a parameter. Fixes tailscale/corp#30846 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- cmd/tailscale/cli/serve_v2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 91a23697035a8..acefd881f01b0 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -656,7 +656,7 @@ var ( msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" - msgDisableService = "To remove config for the service, run: tailscale serve clear --service=%s" + msgDisableService = "To remove config for the service, run: tailscale serve clear %s" msgToExit = "Press Ctrl+C to exit." ) From 3d1e4f147afb7359061dde08c270a37032fe5aef Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 29 Jul 2025 14:58:47 -0700 Subject: [PATCH 0173/1093] tstest/natlab: fix conn.Close race with conn.ReadFromUDPAddrPort (#16710) If a conn.Close call raced conn.ReadFromUDPAddrPort before it could "register" itself as an active read, the conn.ReadFromUDPAddrPort would never return. This commit replaces all the activeRead and breakActiveReads machinery with a channel. These constructs were only depended upon by SetReadDeadline, and SetReadDeadline was unused. Updates #16707 Signed-off-by: Jordan Whited --- tstest/natlab/natlab.go | 112 +++++++++------------------------------- 1 file changed, 23 insertions(+), 89 deletions(-) diff --git a/tstest/natlab/natlab.go b/tstest/natlab/natlab.go index 92a4ccb68e25a..ffa02eee46e06 100644 --- a/tstest/natlab/natlab.go +++ b/tstest/natlab/natlab.go @@ -684,10 +684,11 @@ func (m *Machine) ListenPacket(ctx context.Context, network, address string) (ne ipp := netip.AddrPortFrom(ip, port) c := &conn{ - m: m, - fam: fam, - ipp: ipp, - in: make(chan *Packet, 100), // arbitrary + m: m, + fam: fam, + ipp: ipp, + closedCh: make(chan struct{}), + in: make(chan *Packet, 100), // arbitrary } switch c.fam { case 0: @@ -716,70 +717,28 @@ type conn struct { fam uint8 // 0, 4, or 6 ipp netip.AddrPort - mu sync.Mutex - closed bool - readDeadline time.Time - activeReads map[*activeRead]bool - in chan *Packet -} + closeOnce sync.Once + closedCh chan struct{} // closed by Close -type activeRead struct { - cancel context.CancelFunc -} - -// canRead reports whether we can do a read. -func (c *conn) canRead() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return net.ErrClosed - } - if !c.readDeadline.IsZero() && c.readDeadline.Before(time.Now()) { - return errors.New("read deadline exceeded") - } - return nil -} - -func (c *conn) registerActiveRead(ar *activeRead, active bool) { - c.mu.Lock() - defer c.mu.Unlock() - if c.activeReads == nil { - c.activeReads = make(map[*activeRead]bool) - } - if active { - c.activeReads[ar] = true - } else { - delete(c.activeReads, ar) - } + in chan *Packet } func (c *conn) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.closed { - return nil - } - c.closed = true - switch c.fam { - case 0: - c.m.unregisterConn4(c) - c.m.unregisterConn6(c) - case 4: - c.m.unregisterConn4(c) - case 6: - c.m.unregisterConn6(c) - } - c.breakActiveReadsLocked() + c.closeOnce.Do(func() { + switch c.fam { + case 0: + c.m.unregisterConn4(c) + c.m.unregisterConn6(c) + case 4: + c.m.unregisterConn4(c) + case 6: + c.m.unregisterConn6(c) + } + close(c.closedCh) + }) return nil } -func (c *conn) breakActiveReadsLocked() { - for ar := range c.activeReads { - ar.cancel() - } - c.activeReads = nil -} - func (c *conn) LocalAddr() net.Addr { return &net.UDPAddr{ IP: c.ipp.Addr().AsSlice(), @@ -809,25 +768,13 @@ func (c *conn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { } func (c *conn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ar := &activeRead{cancel: cancel} - - if err := c.canRead(); err != nil { - return 0, netip.AddrPort{}, err - } - - c.registerActiveRead(ar, true) - defer c.registerActiveRead(ar, false) - select { + case <-c.closedCh: + return 0, netip.AddrPort{}, net.ErrClosed case pkt := <-c.in: n = copy(p, pkt.Payload) pkt.Trace("PacketConn.ReadFrom") return n, pkt.Src, nil - case <-ctx.Done(): - return 0, netip.AddrPort{}, context.DeadlineExceeded } } @@ -857,18 +804,5 @@ func (c *conn) SetWriteDeadline(t time.Time) error { panic("SetWriteDeadline unsupported; TODO when needed") } func (c *conn) SetReadDeadline(t time.Time) error { - c.mu.Lock() - defer c.mu.Unlock() - - now := time.Now() - if t.After(now) { - panic("SetReadDeadline in the future not yet supported; TODO?") - } - - if !t.IsZero() && t.Before(now) { - c.breakActiveReadsLocked() - } - c.readDeadline = t - - return nil + panic("SetReadDeadline unsupported; TODO when needed") } From aa6a2d1e56a58c9e800b81701fa4636f85c9982a Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 29 Jul 2025 09:11:36 -0500 Subject: [PATCH 0174/1093] drive/driveimpl: use sudo or su to run file server Some systems have `sudo`, some have `su`. This tries both, increasing the chance that we can run the file server as an unprivileged user. Updates #14629 Signed-off-by: Percy Wegmann --- drive/driveimpl/remote_impl.go | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drive/driveimpl/remote_impl.go b/drive/driveimpl/remote_impl.go index 7fd5d3325beb0..2ff98075e3012 100644 --- a/drive/driveimpl/remote_impl.go +++ b/drive/driveimpl/remote_impl.go @@ -333,8 +333,14 @@ func (s *userServer) run() error { args = append(args, s.Name, s.Path) } var cmd *exec.Cmd - if su := s.canSU(); su != "" { - s.logf("starting taildrive file server as user %q", s.username) + + if s.canSudo() { + s.logf("starting taildrive file server with sudo as user %q", s.username) + allArgs := []string{"-n", "-u", s.username, s.executable} + allArgs = append(allArgs, args...) + cmd = exec.Command("sudo", allArgs...) + } else if su := s.canSU(); su != "" { + s.logf("starting taildrive file server with su as user %q", s.username) // Quote and escape arguments. Use single quotes to prevent shell substitutions. for i, arg := range args { args[i] = "'" + strings.ReplaceAll(arg, "'", "'\"'\"'") + "'" @@ -343,7 +349,7 @@ func (s *userServer) run() error { allArgs := []string{s.username, "-c", cmdString} cmd = exec.Command(su, allArgs...) } else { - // If we were root, we should have been able to sudo as a specific + // If we were root, we should have been able to sudo or su as a specific // user, but let's check just to make sure, since we never want to // access shared folders as root. err := s.assertNotRoot() @@ -409,6 +415,18 @@ var writeMethods = map[string]bool{ "DELETE": true, } +// canSudo checks wether we can sudo -u the configured executable as the +// configured user by attempting to call the executable with the '-h' flag to +// print help. +func (s *userServer) canSudo() bool { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + if err := exec.CommandContext(ctx, "sudo", "-n", "-u", s.username, s.executable, "-h").Run(); err != nil { + return false + } + return true +} + // canSU checks whether the current process can run su with the right username. // If su can be run, this returns the path to the su command. // If not, this returns the empty string "". From eed3e5dc611f17de9ca435523bb21ff312f21389 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 30 Jul 2025 13:39:59 +0100 Subject: [PATCH 0175/1093] ipn/store/kubestore,kube: fix cert error in admin UI (#16717) Also adds a test to kube/kubeclient to defend against the error type returned by the client changing in future. Fixes tailscale/corp#30855 Change-Id: Id11d4295003e66ad5c29a687f1239333c21226a4 Signed-off-by: Tom Proctor --- ipn/store/kubestore/store_kube.go | 18 ++++++ ipn/store/kubestore/store_kube_test.go | 7 +++ kube/kubeclient/client_test.go | 76 ++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index a9ad514e755b2..5b25471c75638 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -9,6 +9,7 @@ import ( "fmt" "log" "net" + "net/http" "os" "strings" "time" @@ -203,6 +204,23 @@ func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { // that wraps ipn.ErrStateNotExist here. return nil, nil, ipn.ErrStateNotExist } + st, ok := err.(*kubeapi.Status) + if ok && st.Code == http.StatusForbidden && (s.certShareMode == "ro" || s.certShareMode == "rw") { + // In cert share mode, we read from a dedicated Secret per domain. + // To get here, we already had a cache miss from our in-memory + // store. For write replicas, that means it wasn't available on + // start and it wasn't written since. For read replicas, that means + // it wasn't available on start and it hasn't been reloaded in the + // background. So getting a "forbidden" error is an expected + // "not found" case where we've been asked for a cert we don't + // expect to issue, and so the forbidden error reflects that the + // operator didn't assign permission for a Secret for that domain. + // + // This code path gets triggered by the admin UI's machine page, + // which queries for the node's own TLS cert existing via the + // "tls-cert-status" c2n API. + return nil, nil, ipn.ErrStateNotExist + } return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err) } cert = secret.Data[keyTLSCert] diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 9a49f30288840..8c8e5e87075f0 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -426,6 +426,13 @@ func TestReadTLSCertAndKey(t *testing.T) { secretGetErr: &kubeapi.Status{Code: 404}, wantErr: ipn.ErrStateNotExist, }, + { + name: "cert_share_ro_mode_forbidden", + certShareMode: "ro", + domain: testDomain, + secretGetErr: &kubeapi.Status{Code: 403}, + wantErr: ipn.ErrStateNotExist, + }, { name: "cert_share_ro_mode_empty_cert_in_secret", certShareMode: "ro", diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go index 31878befe4106..8599e7e3c19e2 100644 --- a/kube/kubeclient/client_test.go +++ b/kube/kubeclient/client_test.go @@ -7,6 +7,9 @@ import ( "context" "encoding/json" "net/http" + "net/http/httptest" + "os" + "path/filepath" "testing" "github.com/google/go-cmp/cmp" @@ -104,6 +107,48 @@ func Test_client_Event(t *testing.T) { } } +// TestReturnsKubeStatusError ensures HTTP error codes from the Kubernetes API +// server can always be extracted by casting the error to the *kubeapi.Status +// type, as lots of calling code relies on this cast succeeding. Note that +// transport errors are not expected or required to be of type *kubeapi.Status. +func TestReturnsKubeStatusError(t *testing.T) { + cl := clientForKubeHandler(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusForbidden) + _ = json.NewEncoder(w).Encode(kubeapi.Status{Code: http.StatusForbidden, Message: "test error"}) + })) + + _, err := cl.GetSecret(t.Context(), "test-secret") + if err == nil { + t.Fatal("expected error, got nil") + } + if st, ok := err.(*kubeapi.Status); !ok || st.Code != http.StatusForbidden { + t.Fatalf("expected kubeapi.Status with code %d, got %T: %v", http.StatusForbidden, err, err) + } +} + +// clientForKubeHandler creates a client using the externally accessible package +// API to ensure it's testing behaviour as close to prod as possible. The passed +// in handler mocks the Kubernetes API server's responses to any HTTP requests +// made by the client. +func clientForKubeHandler(t *testing.T, handler http.Handler) Client { + t.Helper() + tmpDir := t.TempDir() + rootPathForTests = tmpDir + saDir := filepath.Join(tmpDir, "var", "run", "secrets", "kubernetes.io", "serviceaccount") + _ = os.MkdirAll(saDir, 0755) + _ = os.WriteFile(filepath.Join(saDir, "token"), []byte("test-token"), 0600) + _ = os.WriteFile(filepath.Join(saDir, "namespace"), []byte("test-namespace"), 0600) + _ = os.WriteFile(filepath.Join(saDir, "ca.crt"), []byte(ca), 0644) + cl, err := New("test-client") + if err != nil { + t.Fatalf("New() error = %v", err) + } + srv := httptest.NewServer(handler) + t.Cleanup(srv.Close) + cl.SetURL(srv.URL) + return cl +} + // args is a set of values for testing a single call to client.kubeAPIRequest. type args struct { // wantsMethod is the expected value of 'method' arg. @@ -149,3 +194,34 @@ func fakeKubeAPIRequest(t *testing.T, argSets []args) kubeAPIRequestFunc { } return f } + +const ca = `-----BEGIN CERTIFICATE----- +MIIFEDCCA3igAwIBAgIRANf5NdPojIfj70wMfJVYUg8wDQYJKoZIhvcNAQELBQAw +gZ8xHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTE6MDgGA1UECwwxZnJv +bWJlcmdlckBzdGFyZHVzdC5sb2NhbCAoTWljaGFlbCBKLiBGcm9tYmVyZ2VyKTFB +MD8GA1UEAww4bWtjZXJ0IGZyb21iZXJnZXJAc3RhcmR1c3QubG9jYWwgKE1pY2hh +ZWwgSi4gRnJvbWJlcmdlcikwHhcNMjMwMjA3MjAzNDE4WhcNMzMwMjA3MjAzNDE4 +WjCBnzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMTowOAYDVQQLDDFm +cm9tYmVyZ2VyQHN0YXJkdXN0LmxvY2FsIChNaWNoYWVsIEouIEZyb21iZXJnZXIp +MUEwPwYDVQQDDDhta2NlcnQgZnJvbWJlcmdlckBzdGFyZHVzdC5sb2NhbCAoTWlj +aGFlbCBKLiBGcm9tYmVyZ2VyKTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoC +ggGBAL5uXNnrZ6dgjcvK0Hc7ZNUIRYEWst9qbO0P9H7le08pJ6d9T2BUWruZtVjk +Q12msv5/bVWHhVk8dZclI9FLXuMsIrocH8bsoP4wruPMyRyp6EedSKODN51fFSRv +/jHbS5vzUVAWTYy9qYmd6qL0uhsHCZCCT6gfigamHPUFKM3sHDn5ZHWvySMwcyGl +AicmPAIkBWqiCZAkB5+WM7+oyRLjmrIalfWIZYxW/rojGLwTfneHv6J5WjVQnpJB +ayWCzCzaiXukK9MeBWeTOe8UfVN0Engd74/rjLWvjbfC+uZSr6RVkZvs2jANLwPF +zgzBPHgRPfAhszU1NNAMjnNQ47+OMOTKRt7e6jYzhO5fyO1qVAAvGBqcfpj+JfDk +cccaUMhUvdiGrhGf1V1tN/PislxvALirzcFipjD01isBKwn0fxRugzvJNrjEo8RA +RvbcdeKcwex7M0o/Cd0+G2B13gZNOFvR33PmG7iTpp7IUrUKfQg28I83Sp8tMY3s +ljJSawIDAQABo0UwQzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/BAgwBgEB/wIB +ADAdBgNVHQ4EFgQU18qto0Fa56kCi/HwfQuC9ECX7cAwDQYJKoZIhvcNAQELBQAD +ggGBAAzs96LwZVOsRSlBdQqMo8oMAvs7HgnYbXt8SqaACLX3+kJ3cV/vrCE3iJrW +ma4CiQbxS/HqsiZjota5m4lYeEevRnUDpXhp+7ugZTiz33Flm1RU99c9UYfQ+919 +ANPAKeqNpoPco/HF5Bz0ocepjcfKQrVZZNTj6noLs8o12FHBLO5976AcF9mqlNfh +8/F0gDJXq6+x7VT5y8u0rY004XKPRe3CklRt8kpeMiP6mhRyyUehOaHeIbNx8ubi +Pi44ByN/ueAnuRhF9zYtyZVZZOaSLysJge01tuPXF8rBXGruoJIv35xTTBa9BzaP +YDOGbGn1ZnajdNagHqCba8vjTLDSpqMvgRj3TFrGHdETA2LDQat38uVxX8gxm68K +va5Tyv7n+6BQ5YTpJjTPnmSJKaXZrrhdLPvG0OU2TxeEsvbcm5LFQofirOOw86Se +vzF2cQ94mmHRZiEk0Av3NO0jF93ELDrBCuiccVyEKq6TknuvPQlutCXKDOYSEb8I +MHctBg== +-----END CERTIFICATE-----` From 1cc842b389a9f928cea2fb01fdd0a2c486ff5939 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 30 Jul 2025 13:08:53 -0700 Subject: [PATCH 0176/1093] util/set: add more functionality to IntSet (#16640) --- util/set/intset.go | 26 ++++++++++++++++++++++++++ util/set/intset_test.go | 6 ++++++ 2 files changed, 32 insertions(+) diff --git a/util/set/intset.go b/util/set/intset.go index b747d3bffa9fd..d325246914488 100644 --- a/util/set/intset.go +++ b/util/set/intset.go @@ -28,6 +28,15 @@ type IntSet[T constraints.Integer] struct { extraLen int } +// IntsOf constructs an [IntSet] with the provided elements. +func IntsOf[T constraints.Integer](slice ...T) IntSet[T] { + var s IntSet[T] + for _, e := range slice { + s.Add(e) + } + return s +} + // Values returns an iterator over the elements of the set. // The iterator will yield the elements in no particular order. func (s IntSet[T]) Values() iter.Seq[T] { @@ -111,6 +120,23 @@ func (s *IntSet[T]) Delete(e T) { } } +// DeleteSeq deletes the values in seq from the set. +func (s *IntSet[T]) DeleteSeq(seq iter.Seq[T]) { + for e := range seq { + s.Delete(e) + } +} + +// Equal reports whether s is equal to other. +func (s IntSet[T]) Equal(other IntSet[T]) bool { + for hi, bits := range s.extra { + if other.extra[hi] != bits { + return false + } + } + return s.extraLen == other.extraLen && s.bits == other.bits +} + // Clone returns a copy of s that doesn't alias the original. func (s IntSet[T]) Clone() IntSet[T] { return IntSet[T]{ diff --git a/util/set/intset_test.go b/util/set/intset_test.go index 9523fe88db127..d838215c97848 100644 --- a/util/set/intset_test.go +++ b/util/set/intset_test.go @@ -47,6 +47,9 @@ func TestIntSet(t *testing.T) { deleteInt(t, ss, &si, math.MinInt64) deleteInt(t, ss, &si, math.MaxInt64) intValues(t, ss, si) + if !si.Equal(IntsOf(ss.Slice()...)) { + t.Errorf("{%v}.Equal({%v}) = false, want true", si, ss) + } }) t.Run("Uint64", func(t *testing.T) { @@ -80,6 +83,9 @@ func TestIntSet(t *testing.T) { intValues(t, ss, si) deleteInt(t, ss, &si, math.MaxInt64) intValues(t, ss, si) + if !si.Equal(IntsOf(ss.Slice()...)) { + t.Errorf("{%v}.Equal({%v}) = false, want true", si, ss) + } }) } From 47b5f10165ad7ff48b65417786b0ea961a481d01 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 31 Jul 2025 12:13:36 -0400 Subject: [PATCH 0177/1093] cmd/tsidp,tsnet: update tsidp oidc-key store path (#16735) The tsidp oidc-key.json ended up in the root directory or home dir of the user process running it. Update this to store it in a known location respecting the TS_STATE_DIR and flagDir options. Fixes #16734 Signed-off-by: Mike O'Driscoll --- cmd/tsidp/tsidp.go | 26 +++++++++++++++++++++++--- tsnet/tsnet.go | 6 ++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 8df68cd744148..e68e55ca914fb 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -29,6 +29,7 @@ import ( "net/url" "os" "os/signal" + "path/filepath" "strconv" "strings" "sync" @@ -60,6 +61,9 @@ type ctxConn struct{} // accessing the IDP over Funnel are persisted. const funnelClientsFile = "oidc-funnel-clients.json" +// oidcKeyFile is where the OIDC private key is persisted. +const oidcKeyFile = "oidc-key.json" + var ( flagVerbose = flag.Bool("verbose", false, "be verbose") flagPort = flag.Int("port", 443, "port to listen on") @@ -80,12 +84,14 @@ func main() { var ( lc *local.Client st *ipnstate.Status + rootPath string err error watcherChan chan error cleanup func() lns []net.Listener ) + if *flagUseLocalTailscaled { lc = &local.Client{} st, err = lc.StatusWithoutPeers(ctx) @@ -110,6 +116,15 @@ func main() { log.Fatalf("failed to listen on any of %v", st.TailscaleIPs) } + if flagDir == nil || *flagDir == "" { + // use user config directory as storage for tsidp oidc key + configDir, err := os.UserConfigDir() + if err != nil { + log.Fatalf("getting user config directory: %v", err) + } + rootPath = filepath.Join(configDir, "tsidp") + } + // tailscaled needs to be setting an HTTP header for funneled requests // that older versions don't provide. // TODO(naman): is this the correct check? @@ -127,6 +142,8 @@ func main() { Hostname: *flagHostname, Dir: *flagDir, } + rootPath = ts.GetRootPath() + log.Printf("tsidp root path: %s", rootPath) if *flagVerbose { ts.Logf = log.Printf } @@ -157,7 +174,9 @@ func main() { lc: lc, funnel: *flagFunnel, localTSMode: *flagUseLocalTailscaled, + rootPath: rootPath, } + if *flagPort != 443 { srv.serverURL = fmt.Sprintf("https://%s:%d", strings.TrimSuffix(st.Self.DNSName, "."), *flagPort) } else { @@ -285,6 +304,7 @@ type idpServer struct { serverURL string // "https://foo.bar.ts.net" funnel bool localTSMode bool + rootPath string // root path, used for storing state files lazyMux lazy.SyncValue[*http.ServeMux] lazySigningKey lazy.SyncValue[*signingKey] @@ -819,8 +839,9 @@ func (s *idpServer) oidcSigner() (jose.Signer, error) { func (s *idpServer) oidcPrivateKey() (*signingKey, error) { return s.lazySigningKey.GetErr(func() (*signingKey, error) { + keyPath := filepath.Join(s.rootPath, oidcKeyFile) var sk signingKey - b, err := os.ReadFile("oidc-key.json") + b, err := os.ReadFile(keyPath) if err == nil { if err := sk.UnmarshalJSON(b); err == nil { return &sk, nil @@ -835,7 +856,7 @@ func (s *idpServer) oidcPrivateKey() (*signingKey, error) { if err != nil { log.Fatalf("Error marshaling key: %v", err) } - if err := os.WriteFile("oidc-key.json", b, 0600); err != nil { + if err := os.WriteFile(keyPath, b, 0600); err != nil { log.Fatalf("Error writing key: %v", err) } return &sk, nil @@ -869,7 +890,6 @@ func (s *idpServer) serveJWKS(w http.ResponseWriter, r *http.Request) { }); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } - return } // openIDProviderMetadata is a partial representation of diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d81dec7d62ad5..2715917a2f1e9 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1268,6 +1268,12 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro return ln, nil } +// GetRootPath returns the root path of the tsnet server. +// This is where the state file and other data is stored. +func (s *Server) GetRootPath() string { + return s.rootPath +} + // CapturePcap can be called by the application code compiled with tsnet to save a pcap // of packets which the netstack within tsnet sees. This is expected to be useful during // debugging, probably not useful for production. From 23a0398136d9d894eaf332c8ed8743dc9ecf4611 Mon Sep 17 00:00:00 2001 From: jishudashu <979260390@qq.com> Date: Fri, 1 Aug 2025 02:36:51 +0800 Subject: [PATCH 0178/1093] ipn/ipnlocal, net/dns: use slices.Equal to simplify code (#16641) Signed-off-by: jishudashu <979260390@qq.com> --- ipn/ipnlocal/local.go | 14 +------------- net/dns/config.go | 15 ++------------- 2 files changed, 3 insertions(+), 26 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bf13b2ac1039c..5fb3d5771b4cb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2166,7 +2166,7 @@ func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { es := b.parseWgStatusLocked(s) cc := b.cc b.engineStatus = es - needUpdateEndpoints := !endpointsEqual(s.LocalAddrs, b.endpoints) + needUpdateEndpoints := !slices.Equal(s.LocalAddrs, b.endpoints) if needUpdateEndpoints { b.endpoints = append([]tailcfg.Endpoint{}, s.LocalAddrs...) } @@ -2192,18 +2192,6 @@ func (b *LocalBackend) broadcastStatusChanged() { b.statusLock.Unlock() } -func endpointsEqual(x, y []tailcfg.Endpoint) bool { - if len(x) != len(y) { - return false - } - for i := range x { - if x[i] != y[i] { - return false - } - } - return true -} - // SetNotifyCallback sets the function to call when the backend has something to // notify the frontend about. Only one callback can be set at a time, so calling // this function will replace the previous callback. diff --git a/net/dns/config.go b/net/dns/config.go index b2f4e6dbd9dc2..b2c7c428593ff 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -8,6 +8,7 @@ import ( "bufio" "fmt" "net/netip" + "slices" "sort" "tailscale.com/control/controlknobs" @@ -181,19 +182,7 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { if a[i].Addr != b[i].Addr { return false } - if !sameIPs(a[i].BootstrapResolution, b[i].BootstrapResolution) { - return false - } - } - return true -} - -func sameIPs(a, b []netip.Addr) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { + if !slices.Equal(a[i].BootstrapResolution, b[i].BootstrapResolution) { return false } } From f2fd7a051437ed19a8a77a7c71e4acdc86ad84c9 Mon Sep 17 00:00:00 2001 From: Lee Briggs Date: Thu, 31 Jul 2025 23:35:48 -0700 Subject: [PATCH 0179/1093] cmd/k8s-operator,k8s-operator: allow setting a `priorityClassName` (#16685) * cmd/k8s-operator,k8s-operator: allow setting a `priorityClassName` Fixes #16682 Signed-off-by: Lee Briggs * Update k8s-operator/apis/v1alpha1/types_proxyclass.go Co-authored-by: Tom Proctor Signed-off-by: Lee Briggs * run make kube-generate-all Change-Id: I5f8f16694fdc181b048217b9f05ec2ee2aa04def Signed-off-by: Tom Proctor --------- Signed-off-by: Lee Briggs Signed-off-by: Lee Briggs Signed-off-by: Tom Proctor Co-authored-by: Tom Proctor --- .../deploy/crds/tailscale.com_proxyclasses.yaml | 6 ++++++ cmd/k8s-operator/deploy/manifests/operator.yaml | 6 ++++++ cmd/k8s-operator/sts.go | 1 + cmd/k8s-operator/sts_test.go | 3 +++ k8s-operator/api.md | 1 + k8s-operator/apis/v1alpha1/types_proxyclass.go | 5 +++++ 6 files changed, 22 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index c5dc9c3e96a83..cb9e0b991a4eb 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1093,6 +1093,12 @@ spec: type: object additionalProperties: type: string + priorityClassName: + description: |- + PriorityClassName for the proxy Pod. + By default Tailscale Kubernetes operator does not apply any priority class. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: string securityContext: description: |- Proxy Pod's security context. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 175f2a7fbe9ba..5e0cca9b59339 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1574,6 +1574,12 @@ spec: selector. https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling type: object + priorityClassName: + description: |- + PriorityClassName for the proxy Pod. + By default Tailscale Kubernetes operator does not apply any priority class. + https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + type: string securityContext: description: |- Proxy Pod's security context. diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index df12554e0feca..911d0283242d7 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -809,6 +809,7 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.NodeSelector = wantsPod.NodeSelector ss.Spec.Template.Spec.Affinity = wantsPod.Affinity ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations + ss.Spec.Template.Spec.PriorityClassName = wantsPod.PriorityClassName ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints // Update containers. diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index afa791ccc7904..e2cb2962fde48 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -76,6 +76,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + PriorityClassName: "high-priority", TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { WhenUnsatisfiable: "DoNotSchedule", @@ -198,6 +199,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.InitContainers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = "IfNotPresent" + wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -236,6 +238,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].Env = append(wantSS.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{{Name: "foo", Value: "bar"}, {Name: "TS_USERSPACE", Value: "true"}, {Name: "bar"}}...) wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" + wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 564c87f503a22..93a024b31d3c9 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -515,6 +515,7 @@ _Appears in:_ | `nodeSelector` _object (keys:string, values:string)_ | Proxy Pod's node selector.
By default Tailscale Kubernetes operator does not apply any node
selector.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
By default Tailscale Kubernetes operator does not apply any
tolerations.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
By default Tailscale Kubernetes operator does not apply any topology spread constraints.
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | +| `priorityClassName` _string_ | PriorityClassName for the proxy Pod.
By default Tailscale Kubernetes operator does not apply any priority class.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | #### PortRange diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 6a4114bfa83da..ea4e6a27c49de 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -298,6 +298,11 @@ type Pod struct { // https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ // +optional TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // PriorityClassName for the proxy Pod. + // By default Tailscale Kubernetes operator does not apply any priority class. + // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` } // +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" From 5865d0a61a493ecbb15d33a9b84263952a81d7b0 Mon Sep 17 00:00:00 2001 From: mzbenami Date: Fri, 1 Aug 2025 13:30:42 -0400 Subject: [PATCH 0180/1093] Makefile: 'generate' target (#16746) Signed-off-by: Michael Ben-Ami --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index 55e55f209575c..9fffdc48a74a9 100644 --- a/Makefile +++ b/Makefile @@ -133,6 +133,10 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container echo "Testing on ubuntu:noble" && docker build --build-arg="BASE=ubuntu:noble" -t ssh-ubuntu-noble ssh/tailssh/testcontainers && \ echo "Testing on alpine:latest" && docker build --build-arg="BASE=alpine:latest" -t ssh-alpine-latest ssh/tailssh/testcontainers +.PHONY: generate +generate: ## Generate code + ./tool/go generate ./... + help: ## Show this help @echo "\nSpecify a command. The choices are:\n" @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' From d897d809d649b312a3f87d01d9f9426d518cdced Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Fri, 1 Aug 2025 15:10:00 -0700 Subject: [PATCH 0181/1093] feature/taildrop: do not use m.opts.Dir for Android (#16316) In Android, we are prompting the user to select a Taildrop directory when they first receive a Taildrop: we block writes on Taildrop dir selection. This means that we cannot use Dir inside managerOptions, since the http request would not get the new Taildrop extension. This PR removes, in the Android case, the reliance on m.opts.Dir, and instead has FileOps hold the correct directory. This expands FileOps to be the Taildrop interface for all file system operations. Updates tailscale/corp#29211 Signed-off-by: kari-ts restore tstest --- feature/taildrop/delete.go | 52 +++--- feature/taildrop/delete_test.go | 34 ++-- feature/taildrop/ext.go | 67 +++----- feature/taildrop/fileops.go | 41 +++++ feature/taildrop/fileops_fs.go | 221 ++++++++++++++++++++++++ feature/taildrop/paths.go | 2 +- feature/taildrop/peerapi_test.go | 41 +++-- feature/taildrop/resume.go | 28 ++-- feature/taildrop/resume_test.go | 9 +- feature/taildrop/retrieve.go | 116 +++++++------ feature/taildrop/send.go | 270 ++++-------------------------- feature/taildrop/send_test.go | 131 ++++----------- feature/taildrop/taildrop.go | 83 +++------ feature/taildrop/taildrop_test.go | 56 +++---- 14 files changed, 555 insertions(+), 596 deletions(-) create mode 100644 feature/taildrop/fileops.go create mode 100644 feature/taildrop/fileops_fs.go diff --git a/feature/taildrop/delete.go b/feature/taildrop/delete.go index e9c8d7f1c90fa..0b7259879f941 100644 --- a/feature/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -6,9 +6,7 @@ package taildrop import ( "container/list" "context" - "io/fs" "os" - "path/filepath" "strings" "sync" "time" @@ -28,7 +26,6 @@ const deleteDelay = time.Hour type fileDeleter struct { logf logger.Logf clock tstime.DefaultClock - dir string event func(string) // called for certain events; for testing only mu sync.Mutex @@ -39,6 +36,7 @@ type fileDeleter struct { group syncs.WaitGroup shutdownCtx context.Context shutdown context.CancelFunc + fs FileOps // must be used for all filesystem operations } // deleteFile is a specific file to delete after deleteDelay. @@ -50,15 +48,14 @@ type deleteFile struct { func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.logf = m.opts.Logf d.clock = m.opts.Clock - d.dir = m.opts.Dir d.event = eventHook + d.fs = m.opts.fileOps d.byName = make(map[string]*list.Element) d.emptySignal = make(chan struct{}) d.shutdownCtx, d.shutdown = context.WithCancel(context.Background()) // From a cold-start, load the list of partial and deleted files. - // // Only run this if we have ever received at least one file // to avoid ever touching the taildrop directory on systems (e.g., MacOS) // that pop up a security dialog window upon first access. @@ -71,38 +68,45 @@ func (d *fileDeleter) Init(m *manager, eventHook func(string)) { d.group.Go(func() { d.event("start full-scan") defer d.event("end full-scan") - rangeDir(d.dir, func(de fs.DirEntry) bool { + + if d.fs == nil { + d.logf("deleter: nil FileOps") + } + + files, err := d.fs.ListFiles() + if err != nil { + d.logf("deleter: ListDir error: %v", err) + return + } + for _, filename := range files { switch { case d.shutdownCtx.Err() != nil: - return false // terminate early - case !de.Type().IsRegular(): - return true - case strings.HasSuffix(de.Name(), partialSuffix): + return // terminate early + case strings.HasSuffix(filename, partialSuffix): // Only enqueue the file for deletion if there is no active put. - nameID := strings.TrimSuffix(de.Name(), partialSuffix) + nameID := strings.TrimSuffix(filename, partialSuffix) if i := strings.LastIndexByte(nameID, '.'); i > 0 { key := incomingFileKey{clientID(nameID[i+len("."):]), nameID[:i]} m.incomingFiles.LoadFunc(key, func(_ *incomingFile, loaded bool) { if !loaded { - d.Insert(de.Name()) + d.Insert(filename) } }) } else { - d.Insert(de.Name()) + d.Insert(filename) } - case strings.HasSuffix(de.Name(), deletedSuffix): + case strings.HasSuffix(filename, deletedSuffix): // Best-effort immediate deletion of deleted files. - name := strings.TrimSuffix(de.Name(), deletedSuffix) - if os.Remove(filepath.Join(d.dir, name)) == nil { - if os.Remove(filepath.Join(d.dir, de.Name())) == nil { - break + name := strings.TrimSuffix(filename, deletedSuffix) + if d.fs.Remove(name) == nil { + if d.fs.Remove(filename) == nil { + continue } } - // Otherwise, enqueue the file for later deletion. - d.Insert(de.Name()) + // Otherwise enqueue for later deletion. + d.Insert(filename) } - return true - }) + } }) } @@ -149,13 +153,13 @@ func (d *fileDeleter) waitAndDelete(wait time.Duration) { // Delete the expired file. if name, ok := strings.CutSuffix(file.name, deletedSuffix); ok { - if err := os.Remove(filepath.Join(d.dir, name)); err != nil && !os.IsNotExist(err) { + if err := d.fs.Remove(name); err != nil && !os.IsNotExist(err) { d.logf("could not delete: %v", redactError(err)) failed = append(failed, elem) continue } } - if err := os.Remove(filepath.Join(d.dir, file.name)); err != nil && !os.IsNotExist(err) { + if err := d.fs.Remove(file.name); err != nil && !os.IsNotExist(err) { d.logf("could not delete: %v", redactError(err)) failed = append(failed, elem) continue diff --git a/feature/taildrop/delete_test.go b/feature/taildrop/delete_test.go index 7a58de55c2492..36950f58288cb 100644 --- a/feature/taildrop/delete_test.go +++ b/feature/taildrop/delete_test.go @@ -5,7 +5,6 @@ package taildrop import ( "os" - "path/filepath" "slices" "testing" "time" @@ -20,11 +19,20 @@ import ( func TestDeleter(t *testing.T) { dir := t.TempDir() - must.Do(touchFile(filepath.Join(dir, "foo.partial"))) - must.Do(touchFile(filepath.Join(dir, "bar.partial"))) - must.Do(touchFile(filepath.Join(dir, "fizz"))) - must.Do(touchFile(filepath.Join(dir, "fizz.deleted"))) - must.Do(touchFile(filepath.Join(dir, "buzz.deleted"))) // lacks a matching "buzz" file + var m manager + var fd fileDeleter + m.opts.Logf = t.Logf + m.opts.Clock = tstime.DefaultClock{Clock: tstest.NewClock(tstest.ClockOpts{ + Start: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + })} + m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(dir) + + must.Do(m.touchFile("foo.partial")) + must.Do(m.touchFile("bar.partial")) + must.Do(m.touchFile("fizz")) + must.Do(m.touchFile("fizz.deleted")) + must.Do(m.touchFile("buzz.deleted")) // lacks a matching "buzz" file checkDirectory := func(want ...string) { t.Helper() @@ -69,12 +77,10 @@ func TestDeleter(t *testing.T) { } eventHook := func(event string) { eventsChan <- event } - var m manager - var fd fileDeleter m.opts.Logf = t.Logf m.opts.Clock = tstime.DefaultClock{Clock: clock} - m.opts.Dir = dir m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(dir) must.Do(m.opts.State.WriteState(ipn.TaildropReceivedKey, []byte{1})) fd.Init(&m, eventHook) defer fd.Shutdown() @@ -100,17 +106,17 @@ func TestDeleter(t *testing.T) { checkEvents("end waitAndDelete") checkDirectory() - must.Do(touchFile(filepath.Join(dir, "one.partial"))) + must.Do(m.touchFile("one.partial")) insert("one.partial") checkEvents("start waitAndDelete") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "two.partial"))) + must.Do(m.touchFile("two.partial")) insert("two.partial") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "three.partial"))) + must.Do(m.touchFile("three.partial")) insert("three.partial") advance(deleteDelay / 4) - must.Do(touchFile(filepath.Join(dir, "four.partial"))) + must.Do(m.touchFile("four.partial")) insert("four.partial") advance(deleteDelay / 4) @@ -145,8 +151,8 @@ func TestDeleterInitWithoutTaildrop(t *testing.T) { var m manager var fd fileDeleter m.opts.Logf = t.Logf - m.opts.Dir = t.TempDir() m.opts.State = must.Get(mem.New(nil, "")) + m.opts.fileOps, _ = newFileOps(t.TempDir()) fd.Init(&m, func(event string) { t.Errorf("unexpected event: %v", event) }) fd.Shutdown() } diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index c11fe3af427a1..f8f45b53fae26 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "maps" - "os" "path/filepath" "runtime" "slices" @@ -75,7 +74,7 @@ type Extension struct { // FileOps abstracts platform-specific file operations needed for file transfers. // This is currently being used for Android to use the Storage Access Framework. - FileOps FileOps + fileOps FileOps nodeBackendForTest ipnext.NodeBackend // if non-nil, pretend we're this node state for tests @@ -89,30 +88,6 @@ type Extension struct { outgoingFiles map[string]*ipn.OutgoingFile } -// safDirectoryPrefix is used to determine if the directory is managed via SAF. -const SafDirectoryPrefix = "content://" - -// PutMode controls how Manager.PutFile writes files to storage. -// -// PutModeDirect – write files directly to a filesystem path (default). -// PutModeAndroidSAF – use Android’s Storage Access Framework (SAF), where -// the OS manages the underlying directory permissions. -type PutMode int - -const ( - PutModeDirect PutMode = iota - PutModeAndroidSAF -) - -// FileOps defines platform-specific file operations. -type FileOps interface { - OpenFileWriter(filename string) (io.WriteCloser, string, error) - - // RenamePartialFile finalizes a partial file. - // It returns the new SAF URI as a string and an error. - RenamePartialFile(partialUri, targetDirUri, targetName string) (string, error) -} - func (e *Extension) Name() string { return "taildrop" } @@ -176,23 +151,34 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie return } - // If we have a netmap, create a taildrop manager. - fileRoot, isDirectFileMode := e.fileRoot(uid, activeLogin) - if fileRoot == "" { - e.logf("no Taildrop directory configured") - } - mode := PutModeDirect - if e.directFileRoot != "" && strings.HasPrefix(e.directFileRoot, SafDirectoryPrefix) { - mode = PutModeAndroidSAF + // Use the provided [FileOps] implementation (typically for SAF access on Android), + // or create an [fsFileOps] instance rooted at fileRoot. + // + // A non-nil [FileOps] also implies that we are in DirectFileMode. + fops := e.fileOps + isDirectFileMode := fops != nil + if fops == nil { + var fileRoot string + if fileRoot, isDirectFileMode = e.fileRoot(uid, activeLogin); fileRoot == "" { + e.logf("no Taildrop directory configured") + e.setMgrLocked(nil) + return + } + + var err error + if fops, err = newFileOps(fileRoot); err != nil { + e.logf("taildrop: cannot create FileOps: %v", err) + e.setMgrLocked(nil) + return + } } + e.setMgrLocked(managerOptions{ Logf: e.logf, Clock: tstime.DefaultClock{Clock: e.sb.Clock()}, State: e.stateStore, - Dir: fileRoot, DirectFileMode: isDirectFileMode, - FileOps: e.FileOps, - Mode: mode, + fileOps: fops, SendFileNotify: e.sendFileNotify, }.New()) } @@ -221,12 +207,7 @@ func (e *Extension) fileRoot(uid tailcfg.UserID, activeLogin string) (root strin baseDir := fmt.Sprintf("%s-uid-%d", strings.ReplaceAll(activeLogin, "@", "-"), uid) - dir := filepath.Join(varRoot, "files", baseDir) - if err := os.MkdirAll(dir, 0700); err != nil { - e.logf("Taildrop disabled; error making directory: %v", err) - return "", false - } - return dir, false + return filepath.Join(varRoot, "files", baseDir), false } // hasCapFileSharing reports whether the current node has the file sharing diff --git a/feature/taildrop/fileops.go b/feature/taildrop/fileops.go new file mode 100644 index 0000000000000..14f76067a8094 --- /dev/null +++ b/feature/taildrop/fileops.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package taildrop + +import ( + "io" + "io/fs" + "os" +) + +// FileOps abstracts over both local‐FS paths and Android SAF URIs. +type FileOps interface { + // OpenWriter creates or truncates a file named relative to the receiver's root, + // seeking to the specified offset. If the file does not exist, it is created with mode perm + // on platforms that support it. + // + // It returns an [io.WriteCloser] and the file's absolute path, or an error. + // This call may block. Callers should avoid holding locks when calling OpenWriter. + OpenWriter(name string, offset int64, perm os.FileMode) (wc io.WriteCloser, path string, err error) + + // Remove deletes a file or directory relative to the receiver's root. + // It returns [io.ErrNotExist] if the file or directory does not exist. + Remove(name string) error + + // Rename atomically renames oldPath to a new file named newName, + // returning the full new path or an error. + Rename(oldPath, newName string) (newPath string, err error) + + // ListFiles returns just the basenames of all regular files + // in the root directory. + ListFiles() ([]string, error) + + // Stat returns the FileInfo for the given name or an error. + Stat(name string) (fs.FileInfo, error) + + // OpenReader opens the given basename for the given name or an error. + OpenReader(name string) (io.ReadCloser, error) +} + +var newFileOps func(dir string) (FileOps, error) diff --git a/feature/taildrop/fileops_fs.go b/feature/taildrop/fileops_fs.go new file mode 100644 index 0000000000000..4fecbe4af6bbb --- /dev/null +++ b/feature/taildrop/fileops_fs.go @@ -0,0 +1,221 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +//go:build !android + +package taildrop + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" + "sync" + "unicode/utf8" +) + +var renameMu sync.Mutex + +// fsFileOps implements FileOps using the local filesystem rooted at a directory. +// It is used on non-Android platforms. +type fsFileOps struct{ rootDir string } + +func init() { + newFileOps = func(dir string) (FileOps, error) { + if dir == "" { + return nil, errors.New("rootDir cannot be empty") + } + if err := os.MkdirAll(dir, 0o700); err != nil { + return nil, fmt.Errorf("mkdir %q: %w", dir, err) + } + return fsFileOps{rootDir: dir}, nil + } +} + +func (f fsFileOps) OpenWriter(name string, offset int64, perm os.FileMode) (io.WriteCloser, string, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, "", err + } + if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return nil, "", err + } + fi, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, perm) + if err != nil { + return nil, "", err + } + if offset != 0 { + curr, err := fi.Seek(0, io.SeekEnd) + if err != nil { + fi.Close() + return nil, "", err + } + if offset < 0 || offset > curr { + fi.Close() + return nil, "", fmt.Errorf("offset %d out of range", offset) + } + if _, err := fi.Seek(offset, io.SeekStart); err != nil { + fi.Close() + return nil, "", err + } + if err := fi.Truncate(offset); err != nil { + fi.Close() + return nil, "", err + } + } + return fi, path, nil +} + +func (f fsFileOps) Remove(name string) error { + path, err := joinDir(f.rootDir, name) + if err != nil { + return err + } + return os.Remove(path) +} + +// Rename moves the partial file into its final name. +// newName must be a base name (not absolute or containing path separators). +// It will retry up to 10 times, de-dup same-checksum files, etc. +func (f fsFileOps) Rename(oldPath, newName string) (newPath string, err error) { + var dst string + if filepath.IsAbs(newName) || strings.ContainsRune(newName, os.PathSeparator) { + return "", fmt.Errorf("invalid newName %q: must not be an absolute path or contain path separators", newName) + } + + dst = filepath.Join(f.rootDir, newName) + + if err := os.MkdirAll(filepath.Dir(dst), 0o700); err != nil { + return "", err + } + + st, err := os.Stat(oldPath) + if err != nil { + return "", err + } + wantSize := st.Size() + + const maxRetries = 10 + for i := 0; i < maxRetries; i++ { + renameMu.Lock() + fi, statErr := os.Stat(dst) + // Atomically rename the partial file as the destination file if it doesn't exist. + // Otherwise, it returns the length of the current destination file. + // The operation is atomic. + if os.IsNotExist(statErr) { + err = os.Rename(oldPath, dst) + renameMu.Unlock() + if err != nil { + return "", err + } + return dst, nil + } + if statErr != nil { + renameMu.Unlock() + return "", statErr + } + gotSize := fi.Size() + renameMu.Unlock() + + // Avoid the final rename if a destination file has the same contents. + // + // Note: this is best effort and copying files from iOS from the Media Library + // results in processing on the iOS side which means the size and shas of the + // same file can be different. + if gotSize == wantSize { + sumP, err := sha256File(oldPath) + if err != nil { + return "", err + } + sumD, err := sha256File(dst) + if err != nil { + return "", err + } + if bytes.Equal(sumP[:], sumD[:]) { + if err := os.Remove(oldPath); err != nil { + return "", err + } + return dst, nil + } + } + + // Choose a new destination filename and try again. + dst = filepath.Join(filepath.Dir(dst), nextFilename(filepath.Base(dst))) + } + + return "", fmt.Errorf("too many retries trying to rename %q to %q", oldPath, newName) +} + +// sha256File computes the SHA‑256 of a file. +func sha256File(path string) (sum [sha256.Size]byte, _ error) { + f, err := os.Open(path) + if err != nil { + return sum, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return sum, err + } + copy(sum[:], h.Sum(nil)) + return sum, nil +} + +func (f fsFileOps) ListFiles() ([]string, error) { + entries, err := os.ReadDir(f.rootDir) + if err != nil { + return nil, err + } + var names []string + for _, e := range entries { + if e.Type().IsRegular() { + names = append(names, e.Name()) + } + } + return names, nil +} + +func (f fsFileOps) Stat(name string) (fs.FileInfo, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, err + } + return os.Stat(path) +} + +func (f fsFileOps) OpenReader(name string) (io.ReadCloser, error) { + path, err := joinDir(f.rootDir, name) + if err != nil { + return nil, err + } + return os.Open(path) +} + +// joinDir is like [filepath.Join] but returns an error if baseName is too long, +// is a relative path instead of a basename, or is otherwise invalid or unsafe for incoming files. +func joinDir(dir, baseName string) (string, error) { + if !utf8.ValidString(baseName) || + strings.TrimSpace(baseName) != baseName || + len(baseName) > 255 { + return "", ErrInvalidFileName + } + // TODO: validate unicode normalization form too? Varies by platform. + clean := path.Clean(baseName) + if clean != baseName || clean == "." || clean == ".." { + return "", ErrInvalidFileName + } + for _, r := range baseName { + if !validFilenameRune(r) { + return "", ErrInvalidFileName + } + } + if !filepath.IsLocal(baseName) { + return "", ErrInvalidFileName + } + return filepath.Join(dir, baseName), nil +} diff --git a/feature/taildrop/paths.go b/feature/taildrop/paths.go index 22d01160cff8e..79dc37d8f0699 100644 --- a/feature/taildrop/paths.go +++ b/feature/taildrop/paths.go @@ -21,7 +21,7 @@ func (e *Extension) SetDirectFileRoot(root string) { // SetFileOps sets the platform specific file operations. This is used // to call Android's Storage Access Framework APIs. func (e *Extension) SetFileOps(fileOps FileOps) { - e.FileOps = fileOps + e.fileOps = fileOps } func (e *Extension) setPlatformDefaultDirectFileRoot() { diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 1a003b6eddca7..6339973544453 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -24,6 +24,7 @@ import ( "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/logger" + "tailscale.com/util/must" ) // peerAPIHandler serves the PeerAPI for a source specific client. @@ -93,7 +94,16 @@ func bodyContains(sub string) check { func fileHasSize(name string, size int) check { return func(t *testing.T, e *peerAPITestEnv) { - root := e.taildrop.Dir() + fsImpl, ok := e.taildrop.opts.fileOps.(*fsFileOps) + if !ok { + t.Skip("fileHasSize only supported on fsFileOps backend") + return + } + root := fsImpl.rootDir + if root == "" { + t.Errorf("no rootdir; can't check whether %q has size %v", name, size) + return + } if root == "" { t.Errorf("no rootdir; can't check whether %q has size %v", name, size) return @@ -109,12 +119,12 @@ func fileHasSize(name string, size int) check { func fileHasContents(name string, want string) check { return func(t *testing.T, e *peerAPITestEnv) { - root := e.taildrop.Dir() - if root == "" { - t.Errorf("no rootdir; can't check contents of %q", name) + fsImpl, ok := e.taildrop.opts.fileOps.(*fsFileOps) + if !ok { + t.Skip("fileHasContents only supported on fsFileOps backend") return } - path := filepath.Join(root, name) + path := filepath.Join(fsImpl.rootDir, name) got, err := os.ReadFile(path) if err != nil { t.Errorf("fileHasContents: %v", err) @@ -172,9 +182,10 @@ func TestHandlePeerAPI(t *testing.T) { reqs: []*http.Request{httptest.NewRequest("PUT", "/v0/put/foo", nil)}, checks: checks( httpStatus(http.StatusForbidden), - bodyContains("Taildrop disabled; no storage directory"), + bodyContains("Taildrop disabled"), ), }, + { name: "bad_method", isSelf: true, @@ -471,14 +482,18 @@ func TestHandlePeerAPI(t *testing.T) { selfNode.CapMap = tailcfg.NodeCapMap{tailcfg.CapabilityDebug: nil} } var rootDir string + var fo FileOps if !tt.omitRoot { - rootDir = t.TempDir() + var err error + if fo, err = newFileOps(t.TempDir()); err != nil { + t.Fatalf("newFileOps: %v", err) + } } var e peerAPITestEnv e.taildrop = managerOptions{ - Logf: e.logBuf.Logf, - Dir: rootDir, + Logf: e.logBuf.Logf, + fileOps: fo, }.New() ext := &fakeExtension{ @@ -490,9 +505,7 @@ func TestHandlePeerAPI(t *testing.T) { e.ph = &peerAPIHandler{ isSelf: tt.isSelf, selfNode: selfNode.View(), - peerNode: (&tailcfg.Node{ - ComputedName: "some-peer-name", - }).View(), + peerNode: (&tailcfg.Node{ComputedName: "some-peer-name"}).View(), } for _, req := range tt.reqs { e.rr = httptest.NewRecorder() @@ -526,8 +539,8 @@ func TestHandlePeerAPI(t *testing.T) { func TestFileDeleteRace(t *testing.T) { dir := t.TempDir() taildropMgr := managerOptions{ - Logf: t.Logf, - Dir: dir, + Logf: t.Logf, + fileOps: must.Get(newFileOps(dir)), }.New() ph := &peerAPIHandler{ diff --git a/feature/taildrop/resume.go b/feature/taildrop/resume.go index 211a1ff6b68dd..20ef527a6da55 100644 --- a/feature/taildrop/resume.go +++ b/feature/taildrop/resume.go @@ -9,7 +9,6 @@ import ( "encoding/hex" "fmt" "io" - "io/fs" "os" "strings" ) @@ -51,19 +50,20 @@ func (cs *checksum) UnmarshalText(b []byte) error { // PartialFiles returns a list of partial files in [Handler.Dir] // that were sent (or is actively being sent) by the provided id. -func (m *manager) PartialFiles(id clientID) (ret []string, err error) { - if m == nil || m.opts.Dir == "" { +func (m *manager) PartialFiles(id clientID) ([]string, error) { + if m == nil || m.opts.fileOps == nil { return nil, ErrNoTaildrop } - suffix := id.partialSuffix() - if err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - if name := de.Name(); strings.HasSuffix(name, suffix) { - ret = append(ret, name) + files, err := m.opts.fileOps.ListFiles() + if err != nil { + return nil, redactError(err) + } + var ret []string + for _, filename := range files { + if strings.HasSuffix(filename, suffix) { + ret = append(ret, filename) } - return true - }); err != nil { - return ret, redactError(err) } return ret, nil } @@ -73,17 +73,13 @@ func (m *manager) PartialFiles(id clientID) (ret []string, err error) { // It returns (BlockChecksum{}, io.EOF) when the stream is complete. // It is the caller's responsibility to call close. func (m *manager) HashPartialFile(id clientID, baseName string) (next func() (blockChecksum, error), close func() error, err error) { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return nil, nil, ErrNoTaildrop } noopNext := func() (blockChecksum, error) { return blockChecksum{}, io.EOF } noopClose := func() error { return nil } - dstFile, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return nil, nil, err - } - f, err := os.Open(dstFile + id.partialSuffix()) + f, err := m.opts.fileOps.OpenReader(baseName + id.partialSuffix()) if err != nil { if os.IsNotExist(err) { return noopNext, noopClose, nil diff --git a/feature/taildrop/resume_test.go b/feature/taildrop/resume_test.go index dac3c657bfb58..4e59d401dcc53 100644 --- a/feature/taildrop/resume_test.go +++ b/feature/taildrop/resume_test.go @@ -8,6 +8,7 @@ import ( "io" "math/rand" "os" + "path/filepath" "testing" "testing/iotest" @@ -19,7 +20,9 @@ func TestResume(t *testing.T) { defer func() { blockSize = oldBlockSize }() blockSize = 256 - m := managerOptions{Logf: t.Logf, Dir: t.TempDir()}.New() + dir := t.TempDir() + + m := managerOptions{Logf: t.Logf, fileOps: must.Get(newFileOps(dir))}.New() defer m.Shutdown() rn := rand.New(rand.NewSource(0)) @@ -37,7 +40,7 @@ func TestResume(t *testing.T) { must.Do(close()) // Windows wants the file handle to be closed to rename it. must.Get(m.PutFile("", "foo", r, offset, -1)) - got := must.Get(os.ReadFile(must.Get(joinDir(m.opts.Dir, "foo")))) + got := must.Get(os.ReadFile(filepath.Join(dir, "foo"))) if !bytes.Equal(got, want) { t.Errorf("content mismatches") } @@ -66,7 +69,7 @@ func TestResume(t *testing.T) { t.Fatalf("too many iterations to complete the test") } } - got := must.Get(os.ReadFile(must.Get(joinDir(m.opts.Dir, "bar")))) + got := must.Get(os.ReadFile(filepath.Join(dir, "bar"))) if !bytes.Equal(got, want) { t.Errorf("content mismatches") } diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index 6fb97519363bc..b048a1b3b5f9d 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -9,19 +9,19 @@ import ( "io" "io/fs" "os" - "path/filepath" "runtime" "sort" "time" "tailscale.com/client/tailscale/apitype" "tailscale.com/logtail/backoff" + "tailscale.com/util/set" ) // HasFilesWaiting reports whether any files are buffered in [Handler.Dir]. // This always returns false when [Handler.DirectFileMode] is false. -func (m *manager) HasFilesWaiting() (has bool) { - if m == nil || m.opts.Dir == "" || m.opts.DirectFileMode { +func (m *manager) HasFilesWaiting() bool { + if m == nil || m.opts.fileOps == nil || m.opts.DirectFileMode { return false } @@ -30,63 +30,66 @@ func (m *manager) HasFilesWaiting() (has bool) { // has-files-or-not values as the macOS/iOS client might // in the future use+delete the files directly. So only // keep this negative cache. - totalReceived := m.totalReceived.Load() - if totalReceived == m.emptySince.Load() { + total := m.totalReceived.Load() + if total == m.emptySince.Load() { return false } - // Check whether there is at least one one waiting file. - err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - name := de.Name() - if isPartialOrDeleted(name) || !de.Type().IsRegular() { - return true + files, err := m.opts.fileOps.ListFiles() + if err != nil { + return false + } + + // Build a set of filenames present in Dir + fileSet := set.Of(files...) + + for _, filename := range files { + if isPartialOrDeleted(filename) { + continue } - _, err := os.Stat(filepath.Join(m.opts.Dir, name+deletedSuffix)) - if os.IsNotExist(err) { - has = true - return false + if fileSet.Contains(filename + deletedSuffix) { + continue // already handled } + // Found at least one downloadable file return true - }) - - // If there are no more waiting files, record totalReceived as emptySince - // so that we can short-circuit the expensive directory traversal - // if no files have been received after the start of this call. - if err == nil && !has { - m.emptySince.Store(totalReceived) } - return has + + // No waiting files → update negative‑result cache + m.emptySince.Store(total) + return false } // WaitingFiles returns the list of files that have been sent by a // peer that are waiting in [Handler.Dir]. // This always returns nil when [Handler.DirectFileMode] is false. -func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { - if m == nil || m.opts.Dir == "" { +func (m *manager) WaitingFiles() ([]apitype.WaitingFile, error) { + if m == nil || m.opts.fileOps == nil { return nil, ErrNoTaildrop } if m.opts.DirectFileMode { return nil, nil } - if err := rangeDir(m.opts.Dir, func(de fs.DirEntry) bool { - name := de.Name() - if isPartialOrDeleted(name) || !de.Type().IsRegular() { - return true + names, err := m.opts.fileOps.ListFiles() + if err != nil { + return nil, redactError(err) + } + var ret []apitype.WaitingFile + for _, name := range names { + if isPartialOrDeleted(name) { + continue } - _, err := os.Stat(filepath.Join(m.opts.Dir, name+deletedSuffix)) - if os.IsNotExist(err) { - fi, err := de.Info() - if err != nil { - return true - } - ret = append(ret, apitype.WaitingFile{ - Name: filepath.Base(name), - Size: fi.Size(), - }) + // A corresponding .deleted marker means the file was already handled. + if _, err := m.opts.fileOps.Stat(name + deletedSuffix); err == nil { + continue } - return true - }); err != nil { - return nil, redactError(err) + fi, err := m.opts.fileOps.Stat(name) + if err != nil { + continue + } + ret = append(ret, apitype.WaitingFile{ + Name: name, + Size: fi.Size(), + }) } sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) return ret, nil @@ -95,21 +98,18 @@ func (m *manager) WaitingFiles() (ret []apitype.WaitingFile, err error) { // DeleteFile deletes a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. func (m *manager) DeleteFile(baseName string) error { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return ErrNoTaildrop } if m.opts.DirectFileMode { return errors.New("deletes not allowed in direct mode") } - path, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return err - } + var bo *backoff.Backoff logf := m.opts.Logf t0 := m.opts.Clock.Now() for { - err := os.Remove(path) + err := m.opts.fileOps.Remove(baseName) if err != nil && !os.IsNotExist(err) { err = redactError(err) // Put a retry loop around deletes on Windows. @@ -129,7 +129,7 @@ func (m *manager) DeleteFile(baseName string) error { bo.BackOff(context.Background(), err) continue } - if err := touchFile(path + deletedSuffix); err != nil { + if err := m.touchFile(baseName + deletedSuffix); err != nil { logf("peerapi: failed to leave deleted marker: %v", err) } m.deleter.Insert(baseName + deletedSuffix) @@ -141,35 +141,31 @@ func (m *manager) DeleteFile(baseName string) error { } } -func touchFile(path string) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666) +func (m *manager) touchFile(name string) error { + wc, _, err := m.opts.fileOps.OpenWriter(name /* offset= */, 0, 0666) if err != nil { return redactError(err) } - return f.Close() + return wc.Close() } // OpenFile opens a file of the given baseName from [Handler.Dir]. // This method is only allowed when [Handler.DirectFileMode] is false. func (m *manager) OpenFile(baseName string) (rc io.ReadCloser, size int64, err error) { - if m == nil || m.opts.Dir == "" { + if m == nil || m.opts.fileOps == nil { return nil, 0, ErrNoTaildrop } if m.opts.DirectFileMode { return nil, 0, errors.New("opens not allowed in direct mode") } - path, err := joinDir(m.opts.Dir, baseName) - if err != nil { - return nil, 0, err - } - if _, err := os.Stat(path + deletedSuffix); err == nil { - return nil, 0, redactError(&fs.PathError{Op: "open", Path: path, Err: fs.ErrNotExist}) + if _, err := m.opts.fileOps.Stat(baseName + deletedSuffix); err == nil { + return nil, 0, redactError(&fs.PathError{Op: "open", Path: baseName, Err: fs.ErrNotExist}) } - f, err := os.Open(path) + f, err := m.opts.fileOps.OpenReader(baseName) if err != nil { return nil, 0, redactError(err) } - fi, err := f.Stat() + fi, err := m.opts.fileOps.Stat(baseName) if err != nil { f.Close() return nil, 0, redactError(err) diff --git a/feature/taildrop/send.go b/feature/taildrop/send.go index 59a1701da6f0d..32ba5f6f0d644 100644 --- a/feature/taildrop/send.go +++ b/feature/taildrop/send.go @@ -4,11 +4,8 @@ package taildrop import ( - "crypto/sha256" "fmt" "io" - "os" - "path/filepath" "sync" "time" @@ -73,9 +70,10 @@ func (f *incomingFile) Write(p []byte) (n int, err error) { // specific partial file. This allows the client to determine whether to resume // a partial file. While resuming, PutFile may be called again with a non-zero // offset to specify where to resume receiving data at. -func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (int64, error) { +func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, length int64) (fileLength int64, err error) { + switch { - case m == nil || m.opts.Dir == "": + case m == nil || m.opts.fileOps == nil: return 0, ErrNoTaildrop case !envknob.CanTaildrop(): return 0, ErrNoTaildrop @@ -83,47 +81,47 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len return 0, ErrNotAccessible } - //Compute dstPath & avoid mid‑upload deletion - var dstPath string - if m.opts.Mode == PutModeDirect { - var err error - dstPath, err = joinDir(m.opts.Dir, baseName) + if err := validateBaseName(baseName); err != nil { + return 0, err + } + + // and make sure we don't delete it while uploading: + m.deleter.Remove(baseName) + + // Create (if not already) the partial file with read-write permissions. + partialName := baseName + id.partialSuffix() + wc, partialPath, err := m.opts.fileOps.OpenWriter(partialName, offset, 0o666) + if err != nil { + return 0, m.redactAndLogError("Create", err) + } + defer func() { + wc.Close() if err != nil { - return 0, err + m.deleter.Insert(partialName) // mark partial file for eventual deletion } - } else { - // In SAF mode, we simply use the baseName as the destination "path" - // (the actual directory is managed by SAF). - dstPath = baseName - } - m.deleter.Remove(filepath.Base(dstPath)) // avoid deleting the partial file while receiving + }() // Check whether there is an in-progress transfer for the file. - partialFileKey := incomingFileKey{id, baseName} - inFile, loaded := m.incomingFiles.LoadOrInit(partialFileKey, func() *incomingFile { - return &incomingFile{ + inFileKey := incomingFileKey{id, baseName} + inFile, loaded := m.incomingFiles.LoadOrInit(inFileKey, func() *incomingFile { + inFile := &incomingFile{ clock: m.opts.Clock, started: m.opts.Clock.Now(), size: length, sendFileNotify: m.opts.SendFileNotify, } + if m.opts.DirectFileMode { + inFile.partialPath = partialPath + } + return inFile }) + + inFile.w = wc + if loaded { return 0, ErrFileExists } - defer m.incomingFiles.Delete(partialFileKey) - - // Open writer & populate inFile paths - wc, partialPath, err := m.openWriterAndPaths(id, m.opts.Mode, inFile, baseName, dstPath, offset) - if err != nil { - return 0, m.redactAndLogError("Create", err) - } - defer func() { - wc.Close() - if err != nil { - m.deleter.Insert(filepath.Base(partialPath)) // mark partial file for eventual deletion - } - }() + defer m.incomingFiles.Delete(inFileKey) // Record that we have started to receive at least one file. // This is used by the deleter upon a cold-start to scan the directory @@ -148,220 +146,26 @@ func (m *manager) PutFile(id clientID, baseName string, r io.Reader, offset, len return 0, m.redactAndLogError("Close", err) } - fileLength := offset + copyLength + fileLength = offset + copyLength inFile.mu.Lock() inFile.done = true inFile.mu.Unlock() - // Finalize rename - switch m.opts.Mode { - case PutModeDirect: - var finalDst string - finalDst, err = m.finalizeDirect(inFile, partialPath, dstPath, fileLength) - if err != nil { - return 0, m.redactAndLogError("Rename", err) - } - inFile.finalPath = finalDst - - case PutModeAndroidSAF: - if err = m.finalizeSAF(partialPath, baseName); err != nil { - return 0, m.redactAndLogError("Rename", err) - } + // 6) Finalize (rename/move) the partial into place via FileOps.Rename + finalPath, err := m.opts.fileOps.Rename(partialPath, baseName) + if err != nil { + return 0, m.redactAndLogError("Rename", err) } + inFile.finalPath = finalPath m.totalReceived.Add(1) m.opts.SendFileNotify() return fileLength, nil } -// openWriterAndPaths opens the correct writer, seeks/truncates if needed, -// and sets inFile.partialPath & inFile.finalPath for later cleanup/rename. -// The caller is responsible for closing the file on completion. -func (m *manager) openWriterAndPaths( - id clientID, - mode PutMode, - inFile *incomingFile, - baseName string, - dstPath string, - offset int64, -) (wc io.WriteCloser, partialPath string, err error) { - switch mode { - - case PutModeDirect: - partialPath = dstPath + id.partialSuffix() - f, err := os.OpenFile(partialPath, os.O_CREATE|os.O_RDWR, 0o666) - if err != nil { - return nil, "", m.redactAndLogError("Create", err) - } - if offset != 0 { - curr, err := f.Seek(0, io.SeekEnd) - if err != nil { - f.Close() - return nil, "", m.redactAndLogError("Seek", err) - } - if offset < 0 || offset > curr { - f.Close() - return nil, "", m.redactAndLogError("Seek", fmt.Errorf("offset %d out of range", offset)) - } - if _, err := f.Seek(offset, io.SeekStart); err != nil { - f.Close() - return nil, "", m.redactAndLogError("Seek", err) - } - if err := f.Truncate(offset); err != nil { - f.Close() - return nil, "", m.redactAndLogError("Truncate", err) - } - } - inFile.w = f - wc = f - inFile.partialPath = partialPath - inFile.finalPath = dstPath - return wc, partialPath, nil - - case PutModeAndroidSAF: - if m.opts.FileOps == nil { - return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("missing FileOps")) - } - writer, uri, err := m.opts.FileOps.OpenFileWriter(baseName) - if err != nil { - return nil, "", m.redactAndLogError("Create (SAF)", fmt.Errorf("failed to open file for writing via SAF")) - } - if writer == nil || uri == "" { - return nil, "", fmt.Errorf("invalid SAF writer or URI") - } - // SAF mode does not support resuming, so enforce offset == 0. - if offset != 0 { - writer.Close() - return nil, "", m.redactAndLogError("Seek", fmt.Errorf("resuming is not supported in SAF mode")) - } - inFile.w = writer - wc = writer - partialPath = uri - inFile.partialPath = uri - inFile.finalPath = baseName - return wc, partialPath, nil - - default: - return nil, "", fmt.Errorf("unsupported PutMode: %v", mode) - } -} - -// finalizeDirect atomically renames or dedups the partial file, retrying -// under new names up to 10 times. It returns the final path that succeeded. -func (m *manager) finalizeDirect( - inFile *incomingFile, - partialPath string, - initialDst string, - fileLength int64, -) (string, error) { - var ( - once sync.Once - cachedSum [sha256.Size]byte - cacheErr error - computeSum = func() ([sha256.Size]byte, error) { - once.Do(func() { cachedSum, cacheErr = sha256File(partialPath) }) - return cachedSum, cacheErr - } - ) - - dstPath := initialDst - const maxRetries = 10 - for i := 0; i < maxRetries; i++ { - // Atomically rename the partial file as the destination file if it doesn't exist. - // Otherwise, it returns the length of the current destination file. - // The operation is atomic. - lengthOnDisk, err := func() (int64, error) { - m.renameMu.Lock() - defer m.renameMu.Unlock() - fi, statErr := os.Stat(dstPath) - if os.IsNotExist(statErr) { - // dst missing → rename partial into place - return -1, os.Rename(partialPath, dstPath) - } - if statErr != nil { - return -1, statErr - } - return fi.Size(), nil - }() - if err != nil { - return "", err - } - if lengthOnDisk < 0 { - // successfully moved - inFile.finalPath = dstPath - return dstPath, nil - } - - // Avoid the final rename if a destination file has the same contents. - // - // Note: this is best effort and copying files from iOS from the Media Library - // results in processing on the iOS side which means the size and shas of the - // same file can be different. - if lengthOnDisk == fileLength { - partSum, err := computeSum() - if err != nil { - return "", err - } - dstSum, err := sha256File(dstPath) - if err != nil { - return "", err - } - if partSum == dstSum { - // same content → drop the partial - if err := os.Remove(partialPath); err != nil { - return "", err - } - inFile.finalPath = dstPath - return dstPath, nil - } - } - - // Choose a new destination filename and try again. - dstPath = nextFilename(dstPath) - } - - return "", fmt.Errorf("too many retries trying to rename a partial file %q", initialDst) -} - -// finalizeSAF retries RenamePartialFile up to 10 times, generating a new -// name on each failure until the SAF URI changes. -func (m *manager) finalizeSAF( - partialPath, finalName string, -) error { - if m.opts.FileOps == nil { - return fmt.Errorf("missing FileOps for SAF finalize") - } - const maxTries = 10 - name := finalName - for i := 0; i < maxTries; i++ { - newURI, err := m.opts.FileOps.RenamePartialFile(partialPath, m.opts.Dir, name) - if err != nil { - return err - } - if newURI != "" && newURI != name { - return nil - } - name = nextFilename(name) - } - return fmt.Errorf("failed to finalize SAF file after %d retries", maxTries) -} - func (m *manager) redactAndLogError(stage string, err error) error { err = redactError(err) m.opts.Logf("put %s error: %v", stage, err) return err } - -func sha256File(file string) (out [sha256.Size]byte, err error) { - h := sha256.New() - f, err := os.Open(file) - if err != nil { - return out, err - } - defer f.Close() - if _, err := io.Copy(h, f); err != nil { - return out, err - } - return [sha256.Size]byte(h.Sum(nil)), nil -} diff --git a/feature/taildrop/send_test.go b/feature/taildrop/send_test.go index 8edb704172fc5..9ffa5fccc0a36 100644 --- a/feature/taildrop/send_test.go +++ b/feature/taildrop/send_test.go @@ -4,123 +4,64 @@ package taildrop import ( - "bytes" - "fmt" - "io" "os" "path/filepath" + "strings" "testing" "tailscale.com/tstime" + "tailscale.com/util/must" ) -// nopWriteCloser is a no-op io.WriteCloser wrapping a bytes.Buffer. -type nopWriteCloser struct{ *bytes.Buffer } - -func (nwc nopWriteCloser) Close() error { return nil } - -// mockFileOps implements just enough of the FileOps interface for SAF tests. -type mockFileOps struct { - writes *bytes.Buffer - renameOK bool -} - -func (m *mockFileOps) OpenFileWriter(name string) (io.WriteCloser, string, error) { - m.writes = new(bytes.Buffer) - return nopWriteCloser{m.writes}, "uri://" + name + ".partial", nil -} - -func (m *mockFileOps) RenamePartialFile(partialPath, dir, finalName string) (string, error) { - if !m.renameOK { - m.renameOK = true - return "uri://" + finalName, nil - } - return "", io.ErrUnexpectedEOF -} - func TestPutFile(t *testing.T) { const content = "hello, world" tests := []struct { - name string - mode PutMode - setup func(t *testing.T) (*manager, string, *mockFileOps) - wantFile string + name string + directFileMode bool }{ - { - name: "PutModeDirect", - mode: PutModeDirect, - setup: func(t *testing.T) (*manager, string, *mockFileOps) { - dir := t.TempDir() - opts := managerOptions{ - Logf: t.Logf, - Clock: tstime.DefaultClock{}, - State: nil, - Dir: dir, - Mode: PutModeDirect, - DirectFileMode: true, - SendFileNotify: func() {}, - } - mgr := opts.New() - return mgr, dir, nil - }, - wantFile: "file.txt", - }, - { - name: "PutModeAndroidSAF", - mode: PutModeAndroidSAF, - setup: func(t *testing.T) (*manager, string, *mockFileOps) { - // SAF still needs a non-empty Dir to pass the guard. - dir := t.TempDir() - mops := &mockFileOps{} - opts := managerOptions{ - Logf: t.Logf, - Clock: tstime.DefaultClock{}, - State: nil, - Dir: dir, - Mode: PutModeAndroidSAF, - FileOps: mops, - DirectFileMode: true, - SendFileNotify: func() {}, - } - mgr := opts.New() - return mgr, dir, mops - }, - wantFile: "file.txt", - }, + {"DirectFileMode", true}, + {"NonDirectFileMode", false}, } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - mgr, dir, mops := tc.setup(t) - id := clientID(fmt.Sprint(0)) - reader := bytes.NewReader([]byte(content)) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + mgr := managerOptions{ + Logf: t.Logf, + Clock: tstime.DefaultClock{}, + State: nil, + fileOps: must.Get(newFileOps(dir)), + DirectFileMode: tt.directFileMode, + SendFileNotify: func() {}, + }.New() - n, err := mgr.PutFile(id, "file.txt", reader, 0, int64(len(content))) + id := clientID("0") + n, err := mgr.PutFile(id, "file.txt", strings.NewReader(content), 0, int64(len(content))) if err != nil { - t.Fatalf("PutFile(%s) error: %v", tc.name, err) + t.Fatalf("PutFile error: %v", err) } if n != int64(len(content)) { t.Errorf("wrote %d bytes; want %d", n, len(content)) } - switch tc.mode { - case PutModeDirect: - path := filepath.Join(dir, tc.wantFile) - data, err := os.ReadFile(path) - if err != nil { - t.Fatalf("ReadFile error: %v", err) - } - if got := string(data); got != content { - t.Errorf("file contents = %q; want %q", got, content) - } + path := filepath.Join(dir, "file.txt") - case PutModeAndroidSAF: - if mops.writes == nil { - t.Fatal("SAF writer was never created") - } - if got := mops.writes.String(); got != content { - t.Errorf("SAF writes = %q; want %q", got, content) + got, err := os.ReadFile(path) + if err != nil { + t.Fatalf("ReadFile %q: %v", path, err) + } + if string(got) != content { + t.Errorf("file contents = %q; want %q", string(got), content) + } + + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + for _, entry := range entries { + if strings.Contains(entry.Name(), ".partial") { + t.Errorf("unexpected partial file left behind: %s", entry.Name()) } } }) diff --git a/feature/taildrop/taildrop.go b/feature/taildrop/taildrop.go index 2dfa415bbf0cc..6c3deaed1b538 100644 --- a/feature/taildrop/taildrop.go +++ b/feature/taildrop/taildrop.go @@ -12,8 +12,6 @@ package taildrop import ( "errors" "hash/adler32" - "io" - "io/fs" "os" "path" "path/filepath" @@ -21,7 +19,6 @@ import ( "sort" "strconv" "strings" - "sync" "sync/atomic" "unicode" "unicode/utf8" @@ -72,11 +69,6 @@ type managerOptions struct { Clock tstime.DefaultClock // may be nil State ipn.StateStore // may be nil - // Dir is the directory to store received files. - // This main either be the final location for the files - // or just a temporary staging directory (see DirectFileMode). - Dir string - // DirectFileMode reports whether we are writing files // directly to a download directory, rather than writing them to // a temporary staging directory. @@ -91,9 +83,10 @@ type managerOptions struct { // copy them out, and then delete them. DirectFileMode bool - FileOps FileOps - - Mode PutMode + // FileOps abstracts platform-specific file operations needed for file transfers. + // Android's implementation uses the Storage Access Framework, and other platforms + // use fsFileOps. + fileOps FileOps // SendFileNotify is called periodically while a file is actively // receiving the contents for the file. There is a final call @@ -111,9 +104,6 @@ type manager struct { // deleter managers asynchronous deletion of files. deleter fileDeleter - // renameMu is used to protect os.Rename calls so that they are atomic. - renameMu sync.Mutex - // totalReceived counts the cumulative total of received files. totalReceived atomic.Int64 // emptySince specifies that there were no waiting files @@ -137,11 +127,6 @@ func (opts managerOptions) New() *manager { return m } -// Dir returns the directory. -func (m *manager) Dir() string { - return m.opts.Dir -} - // Shutdown shuts down the Manager. // It blocks until all spawned goroutines have stopped running. func (m *manager) Shutdown() { @@ -172,57 +157,29 @@ func isPartialOrDeleted(s string) bool { return strings.HasSuffix(s, deletedSuffix) || strings.HasSuffix(s, partialSuffix) } -func joinDir(dir, baseName string) (fullPath string, err error) { - if !utf8.ValidString(baseName) { - return "", ErrInvalidFileName - } - if strings.TrimSpace(baseName) != baseName { - return "", ErrInvalidFileName - } - if len(baseName) > 255 { - return "", ErrInvalidFileName +func validateBaseName(name string) error { + if !utf8.ValidString(name) || + strings.TrimSpace(name) != name || + len(name) > 255 { + return ErrInvalidFileName } // TODO: validate unicode normalization form too? Varies by platform. - clean := path.Clean(baseName) - if clean != baseName || - clean == "." || clean == ".." || - isPartialOrDeleted(clean) { - return "", ErrInvalidFileName + clean := path.Clean(name) + if clean != name || clean == "." || clean == ".." { + return ErrInvalidFileName } - for _, r := range baseName { + if isPartialOrDeleted(name) { + return ErrInvalidFileName + } + for _, r := range name { if !validFilenameRune(r) { - return "", ErrInvalidFileName + return ErrInvalidFileName } } - if !filepath.IsLocal(baseName) { - return "", ErrInvalidFileName - } - return filepath.Join(dir, baseName), nil -} - -// rangeDir iterates over the contents of a directory, calling fn for each entry. -// It continues iterating while fn returns true. -// It reports the number of entries seen. -func rangeDir(dir string, fn func(fs.DirEntry) bool) error { - f, err := os.Open(dir) - if err != nil { - return err - } - defer f.Close() - for { - des, err := f.ReadDir(10) - for _, de := range des { - if !fn(de) { - return nil - } - } - if err != nil { - if err == io.EOF { - return nil - } - return err - } + if !filepath.IsLocal(name) { + return ErrInvalidFileName } + return nil } // IncomingFiles returns a list of active incoming files. diff --git a/feature/taildrop/taildrop_test.go b/feature/taildrop/taildrop_test.go index da0bd2f430579..0d77273f0aab0 100644 --- a/feature/taildrop/taildrop_test.go +++ b/feature/taildrop/taildrop_test.go @@ -4,40 +4,10 @@ package taildrop import ( - "path/filepath" "strings" "testing" ) -func TestJoinDir(t *testing.T) { - dir := t.TempDir() - tests := []struct { - in string - want string // just relative to m.Dir - wantOk bool - }{ - {"", "", false}, - {"foo", "foo", true}, - {"./foo", "", false}, - {"../foo", "", false}, - {"foo/bar", "", false}, - {"😋", "😋", true}, - {"\xde\xad\xbe\xef", "", false}, - {"foo.partial", "", false}, - {"foo.deleted", "", false}, - {strings.Repeat("a", 1024), "", false}, - {"foo:bar", "", false}, - } - for _, tt := range tests { - got, gotErr := joinDir(dir, tt.in) - got, _ = filepath.Rel(dir, got) - gotOk := gotErr == nil - if got != tt.want || gotOk != tt.wantOk { - t.Errorf("joinDir(%q) = (%v, %v), want (%v, %v)", tt.in, got, gotOk, tt.want, tt.wantOk) - } - } -} - func TestNextFilename(t *testing.T) { tests := []struct { in string @@ -67,3 +37,29 @@ func TestNextFilename(t *testing.T) { } } } + +func TestValidateBaseName(t *testing.T) { + tests := []struct { + in string + wantOk bool + }{ + {"", false}, + {"foo", true}, + {"./foo", false}, + {"../foo", false}, + {"foo/bar", false}, + {"😋", true}, + {"\xde\xad\xbe\xef", false}, + {"foo.partial", false}, + {"foo.deleted", false}, + {strings.Repeat("a", 1024), false}, + {"foo:bar", false}, + } + for _, tt := range tests { + err := validateBaseName(tt.in) + gotOk := err == nil + if gotOk != tt.wantOk { + t.Errorf("validateBaseName(%q) = %v, wantOk = %v", tt.in, err, tt.wantOk) + } + } +} From 0f15e4419683f9a5c0f4048ba9216759de553b20 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 1 Aug 2025 15:17:57 -0700 Subject: [PATCH 0182/1093] Makefile: sort make commands and fix printing newlines Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 9fffdc48a74a9..78812d57c0990 100644 --- a/Makefile +++ b/Makefile @@ -138,8 +138,10 @@ generate: ## Generate code ./tool/go generate ./... help: ## Show this help - @echo "\nSpecify a command. The choices are:\n" - @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' + @echo "" + @echo "Specify a command. The choices are:" + @echo "" + @grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}' @echo "" .PHONY: help From 834630fedfe4e8ac3e675477c806608f2606e664 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 1 Aug 2025 14:16:00 -0700 Subject: [PATCH 0183/1093] cmd/tailscale: add systray subcommand on Linux builds This will start including the sytray app in unstable builds for Linux, unless the `ts_omit_systray` build flag is specified. If we decide not to include it in the v1.88 release, we can pull it back out or restrict it to unstable builds. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- build_dist.sh | 2 +- cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/cli/systray.go | 24 ++++++++++++++++++++++++ cmd/tailscale/cli/systray_omit.go | 31 +++++++++++++++++++++++++++++++ cmd/tailscale/depaware.txt | 27 ++++++++++++++++++++++++--- 5 files changed, 81 insertions(+), 4 deletions(-) create mode 100644 cmd/tailscale/cli/systray.go create mode 100644 cmd/tailscale/cli/systray_omit.go diff --git a/build_dist.sh b/build_dist.sh index fed37c2646175..0fc123ade3824 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_taildrop,ts_omit_tpm" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index bdfc7af423bf4..72924350ca7eb 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -261,6 +261,7 @@ change in the future. driveCmd, idTokenCmd, configureHostCmd(), + systrayCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go new file mode 100644 index 0000000000000..184c85360f97a --- /dev/null +++ b/cmd/tailscale/cli/systray.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_systray + +package cli + +import ( + "context" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/systray" +) + +var systrayCmd = &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale systray", + ShortHelp: "Run a systray application to manage Tailscale", + Exec: func(_ context.Context, _ []string) error { + // TODO(will): pass localClient to menu to use the global --socket flag + new(systray.Menu).Run() + return nil + }, +} diff --git a/cmd/tailscale/cli/systray_omit.go b/cmd/tailscale/cli/systray_omit.go new file mode 100644 index 0000000000000..8d93fd84b52a9 --- /dev/null +++ b/cmd/tailscale/cli/systray_omit.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux || ts_omit_systray + +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" +) + +// TODO(will): update URL to KB article when available +var systrayHelp = strings.TrimSpace(` +The Tailscale systray app is not included in this client build. +To run it manually, see https://github.com/tailscale/tailscale/tree/main/cmd/systray +`) + +var systrayCmd = &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale systray", + ShortHelp: "Not available in this client build", + LongHelp: hidden + systrayHelp, + Exec: func(_ context.Context, _ []string) error { + fmt.Println(systrayHelp) + return nil + }, +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index e44e20e8c92b2..020479ebb1867 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -2,9 +2,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 + L fyne.io/systray from tailscale.com/client/systray + L fyne.io/systray/internal/generated/menu from fyne.io/systray + L fyne.io/systray/internal/generated/notifier from fyne.io/systray + L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + L github.com/atotto/clipboard from tailscale.com/client/systray github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -12,6 +17,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode + L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ @@ -19,6 +25,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + L 💣 github.com/godbus/dbus/v5 from fyne.io/systray+ + L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ + L github.com/godbus/dbus/v5/prop from fyne.io/systray + L github.com/golang/freetype/raster from github.com/fogleman/gg+ + L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache L github.com/google/nftables from tailscale.com/util/linuxfw L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt @@ -59,7 +70,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/web-client-prebuilt from tailscale.com/client/web - github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli + github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ @@ -73,6 +84,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ + L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli @@ -178,6 +190,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + L tailscale.com/util/stringsx from tailscale.com/client/systray tailscale.com/util/syspolicy from tailscale.com/ipn tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ @@ -213,6 +226,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ + L golang.org/x/image/draw from github.com/fogleman/gg + L golang.org/x/image/font from github.com/fogleman/gg+ + L golang.org/x/image/font/basicfont from github.com/fogleman/gg + L golang.org/x/image/math/f64 from github.com/fogleman/gg+ + L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ @@ -339,7 +357,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep html/template from tailscale.com/util/eventbus image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ - image/png from github.com/skip2/go-qrcode + L image/draw from github.com/Kodeworks/golang-image-ico+ + L image/internal/imageutil from image/draw+ + L image/jpeg from github.com/fogleman/gg + image/png from github.com/skip2/go-qrcode+ internal/abi from crypto/x509/internal/macos+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -409,7 +430,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from github.com/coreos/go-iptables/iptables+ - os/signal from tailscale.com/cmd/tailscale/cli + os/signal from tailscale.com/cmd/tailscale/cli+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ From b0018f1e7df47099ee22f3c5fcc91a4112e10523 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 4 Aug 2025 14:21:32 -0700 Subject: [PATCH 0184/1093] wgengine/magicsock: fix looksLikeInitiationMsg endianness (#16771) WireGuard message type is little-endian encoded. Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 7 ++----- wgengine/magicsock/magicsock_test.go | 16 ++++++++++++---- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6495b13b57db5..c99d1b68f83a6 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1765,11 +1765,8 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu // looksLikeInitiationMsg returns true if b looks like a WireGuard initiation // message, otherwise it returns false. func looksLikeInitiationMsg(b []byte) bool { - if len(b) == device.MessageInitiationSize && - binary.BigEndian.Uint32(b) == device.MessageInitiationType { - return true - } - return false + return len(b) == device.MessageInitiationSize && + binary.LittleEndian.Uint32(b) == device.MessageInitiationType } // receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6. diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 480faa694c70d..0d1ac9dfda0f9 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -9,6 +9,7 @@ import ( crand "crypto/rand" "crypto/tls" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" @@ -3390,10 +3391,17 @@ func Test_virtualNetworkID(t *testing.T) { } func Test_looksLikeInitiationMsg(t *testing.T) { - initMsg := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsg, device.MessageInitiationType) - initMsgSizeTransportType := make([]byte, device.MessageInitiationSize) - binary.BigEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) + // initMsg was captured as the first packet from a WireGuard "session" + initMsg, err := hex.DecodeString("01000000d9205f67915a500e377b409e0c3d97ca91e68654b95952de965e75df491000cce00632678cd9e8c8525556aa8daf24e6cfc44c48812bb560ff3c1c5dee061b3f833dfaa48acf13b64bd1e0027aa4d977a3721b82fd6072338702fc3193651404980ad46dae2869ba6416cc0eb38621a4140b5b918eb6402b697202adb3002a6d00000000000000000000000000000000") + if err != nil { + t.Fatal(err) + } + if len(initMsg) != device.MessageInitiationSize { + t.Fatalf("initMsg is not %d bytes long", device.MessageInitiationSize) + } + initMsgSizeTransportType := make([]byte, len(initMsg)) + copy(initMsgSizeTransportType, initMsg) + binary.LittleEndian.PutUint32(initMsgSizeTransportType, device.MessageTransportType) tests := []struct { name string b []byte From 5bb42e3018a0543467a332322f438cda98530c3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 5 Aug 2025 08:31:51 -0400 Subject: [PATCH 0185/1093] wgengine/router: rely on events for deleted IP rules (#16744) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds the eventbus to the router subsystem. The event is currently only used on linux. Also includes facilities to inject events into the bus. Updates #15160 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/tailscaled.go | 2 +- net/netmon/netmon.go | 43 --------------------- net/netmon/netmon_linux.go | 11 ++---- util/eventbus/client.go | 2 +- util/eventbus/eventbustest/eventbustest.go | 38 +++++++++++++++++- wgengine/router/router.go | 7 +++- wgengine/router/router_android.go | 3 +- wgengine/router/router_darwin.go | 3 +- wgengine/router/router_default.go | 3 +- wgengine/router/router_freebsd.go | 3 +- wgengine/router/router_linux.go | 45 ++++++++++++++++++---- wgengine/router/router_linux_test.go | 45 +++++++++++++++------- wgengine/router/router_openbsd.go | 3 +- wgengine/router/router_plan9.go | 3 +- wgengine/router/router_windows.go | 3 +- 15 files changed, 132 insertions(+), 82 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index ab1590132ece6..06d366aa6e68e 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -800,7 +800,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo sys.NetMon.Get().SetTailscaleInterfaceName(devName) } - r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker()) + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker(), sys.Bus.Get()) if err != nil { dev.Close() return false, fmt.Errorf("creating router: %w", err) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index 3f825bc9797fe..b97b184d476f4 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -66,7 +66,6 @@ type Monitor struct { mu sync.Mutex // guards all following fields cbs set.HandleSet[ChangeFunc] - ruleDelCB set.HandleSet[RuleDeleteCallback] ifState *State gwValid bool // whether gw and gwSelfIP are valid gw netip.Addr // our gateway's IP @@ -224,29 +223,6 @@ func (m *Monitor) RegisterChangeCallback(callback ChangeFunc) (unregister func() } } -// RuleDeleteCallback is a callback when a Linux IP policy routing -// rule is deleted. The table is the table number (52, 253, 354) and -// priority is the priority order number (for Tailscale rules -// currently: 5210, 5230, 5250, 5270) -type RuleDeleteCallback func(table uint8, priority uint32) - -// RegisterRuleDeleteCallback adds callback to the set of parties to be -// notified (in their own goroutine) when a Linux ip rule is deleted. -// To remove this callback, call unregister (or close the monitor). -func (m *Monitor) RegisterRuleDeleteCallback(callback RuleDeleteCallback) (unregister func()) { - if m.static { - return func() {} - } - m.mu.Lock() - defer m.mu.Unlock() - handle := m.ruleDelCB.Add(callback) - return func() { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.ruleDelCB, handle) - } -} - // Start starts the monitor. // A monitor can only be started & closed once. func (m *Monitor) Start() { @@ -359,10 +335,6 @@ func (m *Monitor) pump() { time.Sleep(time.Second) continue } - if rdm, ok := msg.(ipRuleDeletedMessage); ok { - m.notifyRuleDeleted(rdm) - continue - } if msg.ignore() { continue } @@ -370,14 +342,6 @@ func (m *Monitor) pump() { } } -func (m *Monitor) notifyRuleDeleted(rdm ipRuleDeletedMessage) { - m.mu.Lock() - defer m.mu.Unlock() - for _, cb := range m.ruleDelCB { - go cb(rdm.table, rdm.priority) - } -} - // isInterestingInterface reports whether the provided interface should be // considered when checking for network state changes. // The ips parameter should be the IPs of the provided interface. @@ -624,10 +588,3 @@ func (m *Monitor) checkWallTimeAdvanceLocked() bool { func (m *Monitor) resetTimeJumpedLocked() { m.timeJumped = false } - -type ipRuleDeletedMessage struct { - table uint8 - priority uint32 -} - -func (ipRuleDeletedMessage) ignore() bool { return true } diff --git a/net/netmon/netmon_linux.go b/net/netmon/netmon_linux.go index 659fcc74bb0e6..a1077c2578b14 100644 --- a/net/netmon/netmon_linux.go +++ b/net/netmon/netmon_linux.go @@ -241,18 +241,15 @@ func (c *nlConn) Receive() (message, error) { // On `ip -4 rule del pref 5210 table main`, logs: // monitor: ip rule deleted: {Family:2 DstLength:0 SrcLength:0 Tos:0 Table:254 Protocol:0 Scope:0 Type:1 Flags:0 Attributes:{Dst: Src: Gateway: OutIface:0 Priority:5210 Table:254 Mark:4294967295 Expires: Metrics: Multipath:[]}} } - c.rulesDeleted.Publish(RuleDeleted{ + rd := RuleDeleted{ Table: rmsg.Table, Priority: rmsg.Attributes.Priority, - }) - rdm := ipRuleDeletedMessage{ - table: rmsg.Table, - priority: rmsg.Attributes.Priority, } + c.rulesDeleted.Publish(rd) if debugNetlinkMessages() { - c.logf("%+v", rdm) + c.logf("%+v", rd) } - return rdm, nil + return ignoreMessage{}, nil case unix.RTM_NEWLINK, unix.RTM_DELLINK: // This is an unhandled message, but don't print an error. // See https://github.com/tailscale/tailscale/issues/6806 diff --git a/util/eventbus/client.go b/util/eventbus/client.go index f4261b13c9f45..a6266a4d8f823 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -119,7 +119,7 @@ func Subscribe[T any](c *Client) *Subscriber[T] { return s } -// Publisher returns a publisher for event type T using the given +// Publish returns a publisher for event type T using the given // client. func Publish[T any](c *Client) *Publisher[T] { p := newPublisher[T](c) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 75d430d53683e..98536ae0affc8 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -21,7 +21,7 @@ func NewBus(t *testing.T) *eventbus.Bus { return bus } -// NewTestWatcher constructs a [Watcher] that can be used to check the stream of +// NewWatcher constructs a [Watcher] that can be used to check the stream of // events generated by code under test. After construction the caller may use // [Expect] and [ExpectExactly], to verify that the desired events were captured. func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { @@ -201,3 +201,39 @@ func eventFilter(f any) filter { return fixup(fv.Call([]reflect.Value{args[0].Elem()})) }).Interface().(filter) } + +// Injector holds a map with [eventbus.Publisher], tied to an [eventbus.Client] +// for testing purposes. +type Injector struct { + client *eventbus.Client + publishers map[reflect.Type]any + // The value for a key is an *eventbus.Publisher[T] for the corresponding type. +} + +// NewInjector constructs an [Injector] that can be used to inject events into +// the the stream of events used by code under test. After construction the +// caller may use [Inject] to insert events into the bus. +func NewInjector(t *testing.T, b *eventbus.Bus) *Injector { + inj := &Injector{ + client: b.Client(t.Name()), + publishers: make(map[reflect.Type]any), + } + t.Cleanup(inj.client.Close) + + return inj +} + +// Inject inserts events of T onto an [eventbus.Bus]. If an [eventbus.Publisher] +// for the type does not exist, it will be initialized lazily. Calling inject is +// synchronous, and the event will as such have been published to the eventbus +// by the time the function returns. +func Inject[T any](inj *Injector, event T) { + eventType := reflect.TypeFor[T]() + + pub, ok := inj.publishers[eventType] + if !ok { + pub = eventbus.Publish[T](inj.client) + inj.publishers[eventType] = pub + } + pub.(*eventbus.Publisher[T]).Publish(event) +} diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 42300897830d9..25d1c08a29f4d 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -14,6 +14,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/preftype" + "tailscale.com/util/eventbus" ) // Router is responsible for managing the system network stack. @@ -45,9 +46,11 @@ type Router interface { // // If netMon is nil, it's not used. It's currently (2021-07-20) only // used on Linux in some situations. -func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, + health *health.Tracker, bus *eventbus.Bus, +) (Router, error) { logf = logger.WithPrefix(logf, "router: ") - return newUserspaceRouter(logf, tundev, netMon, health) + return newUserspaceRouter(logf, tundev, netMon, health, bus) } // CleanUp restores the system network configuration to its original state diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go index deeccda4a7028..de680606f19cf 100644 --- a/wgengine/router/router_android.go +++ b/wgengine/router/router_android.go @@ -10,9 +10,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { // Note, this codepath is _not_ used when building the android app // from github.com/tailscale/tailscale-android. The android app // constructs its own wgengine with a custom router implementation diff --git a/wgengine/router/router_darwin.go b/wgengine/router/router_darwin.go index 73e394b0465b3..ebb2615a0ed1f 100644 --- a/wgengine/router/router_darwin.go +++ b/wgengine/router/router_darwin.go @@ -8,9 +8,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { return newUserspaceBSDRouter(logf, tundev, netMon, health) } diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go index 8dcbd36d0a7a2..190575973a4ee 100644 --- a/wgengine/router/router_default.go +++ b/wgengine/router/router_default.go @@ -13,9 +13,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) } diff --git a/wgengine/router/router_freebsd.go b/wgengine/router/router_freebsd.go index 40523b4fd43ec..ce4753d7dc611 100644 --- a/wgengine/router/router_freebsd.go +++ b/wgengine/router/router_freebsd.go @@ -8,6 +8,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // For now this router only supports the userspace WireGuard implementations. @@ -15,7 +16,7 @@ import ( // Work is currently underway for an in-kernel FreeBSD implementation of wireguard // https://svnweb.freebsd.org/base?view=revision&revision=357986 -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { return newUserspaceBSDRouter(logf, tundev, netMon, health) } diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index adc54c88dad1c..2382e87cd5185 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -29,6 +29,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/preftype" + "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" "tailscale.com/util/multierr" "tailscale.com/version/distro" @@ -48,6 +49,9 @@ type linuxRouter struct { tunname string netMon *netmon.Monitor health *health.Tracker + eventClient *eventbus.Client + ruleDeletedSub *eventbus.Subscriber[netmon.RuleDeleted] + rulesAddedPub *eventbus.Publisher[AddIPRules] unregNetMon func() addrs map[netip.Prefix]bool routes map[netip.Prefix]bool @@ -77,7 +81,7 @@ type linuxRouter struct { magicsockPortV6 uint16 } -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { tunname, err := tunDev.Name() if err != nil { return nil, err @@ -87,15 +91,16 @@ func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Moni ambientCapNetAdmin: useAmbientCaps(), } - return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health) + return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health, bus) } -func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker) (Router, error) { +func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (Router, error) { r := &linuxRouter{ logf: logf, tunname: tunname, netfilterMode: netfilterOff, netMon: netMon, + eventClient: bus.Client("router-linux"), health: health, cmd: cmd, @@ -103,6 +108,10 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon ipRuleFixLimiter: rate.NewLimiter(rate.Every(5*time.Second), 10), ipPolicyPrefBase: 5200, } + r.ruleDeletedSub = eventbus.Subscribe[netmon.RuleDeleted](r.eventClient) + r.rulesAddedPub = eventbus.Publish[AddIPRules](r.eventClient) + go r.consumeEventbusTopics() + if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) } else { @@ -145,6 +154,24 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon return r, nil } +// consumeEventbusTopics consumes events from all [Conn]-relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [portmapper.Mapping] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (r *linuxRouter) consumeEventbusTopics() { + for { + select { + case <-r.ruleDeletedSub.Done(): + return + case rulesDeleted := <-r.ruleDeletedSub.Events(): + r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) + } + } +} + // ipCmdSupportsFwmask returns true if the system 'ip' binary supports using a // fwmark stanza with a mask specified. To our knowledge, everything except busybox // pre-1.33 supports this. @@ -276,6 +303,10 @@ func (r *linuxRouter) fwmaskWorks() bool { return v } +// AddIPRules is used as an event signal to signify that rules have been added. +// It is added to aid testing, but could be extended if there's a reason for it. +type AddIPRules struct{} + // onIPRuleDeleted is the callback from the network monitor for when an IP // policy rule is deleted. See Issue 1591. // @@ -303,6 +334,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { r.ruleRestorePending.Swap(false) return } + + r.rulesAddedPub.Publish(AddIPRules{}) + time.AfterFunc(rr.Delay()+250*time.Millisecond, func() { if r.ruleRestorePending.Swap(false) && !r.closed.Load() { r.logf("somebody (likely systemd-networkd) deleted ip rules; restoring Tailscale's") @@ -312,9 +346,6 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { } func (r *linuxRouter) Up() error { - if r.unregNetMon == nil && r.netMon != nil { - r.unregNetMon = r.netMon.RegisterRuleDeleteCallback(r.onIPRuleDeleted) - } if err := r.setNetfilterMode(netfilterOff); err != nil { return fmt.Errorf("setting netfilter mode: %w", err) } @@ -333,6 +364,7 @@ func (r *linuxRouter) Close() error { if r.unregNetMon != nil { r.unregNetMon() } + r.eventClient.Close() if err := r.downInterface(); err != nil { return err } @@ -1276,7 +1308,6 @@ func (r *linuxRouter) justAddIPRules() error { } var errAcc error for _, family := range r.addrFamilies() { - for _, ru := range ipRules() { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index a289fb0ac4aae..b6a5a1ac04753 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" ) @@ -375,7 +376,7 @@ ip route add throw 192.168.0.0/24 table 52` + basic, fake := NewFakeOS(t) ht := new(health.Tracker) - router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht) + router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht, bus) router.(*linuxRouter).nfr = fake.nfr if err != nil { t.Fatalf("failed to create router: %v", err) @@ -414,7 +415,7 @@ type fakeIPTablesRunner struct { t *testing.T ipt4 map[string][]string ipt6 map[string][]string - //we always assume ipv6 and ipv6 nat are enabled when testing + // we always assume ipv6 and ipv6 nat are enabled when testing } func newIPTablesRunner(t *testing.T) linuxfw.NetfilterRunner { @@ -541,6 +542,7 @@ func (n *fakeIPTablesRunner) EnsureSNATForDst(src, dst netip.Addr) error { func (n *fakeIPTablesRunner) DNATNonTailscaleTraffic(exemptInterface string, dst netip.Addr) error { return errors.New("not implemented") } + func (n *fakeIPTablesRunner) EnsurePortMapRuleForSvc(svc, tun string, targetIP netip.Addr, pm linuxfw.PortMap) error { return errors.New("not implemented") } @@ -781,8 +783,8 @@ type fakeOS struct { ips []string routes []string rules []string - //This test tests on the router level, so we will not bother - //with using iptables or nftables, chose the simpler one. + // This test tests on the router level, so we will not bother + // with using iptables or nftables, chose the simpler one. nfr linuxfw.NetfilterRunner } @@ -974,7 +976,7 @@ func (lt *linuxTest) Close() error { return nil } -func newLinuxRootTest(t *testing.T) *linuxTest { +func newLinuxRootTest(t *testing.T) (*linuxTest, *eventbus.Bus) { if os.Getuid() != 0 { t.Skip("test requires root") } @@ -984,8 +986,7 @@ func newLinuxRootTest(t *testing.T) *linuxTest { logf := lt.logOutput.Logf - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) mon, err := netmon.New(bus, logger.Discard) if err != nil { @@ -995,7 +996,7 @@ func newLinuxRootTest(t *testing.T) *linuxTest { mon.Start() lt.mon = mon - r, err := newUserspaceRouter(logf, lt.tun, mon, nil) + r, err := newUserspaceRouter(logf, lt.tun, mon, nil, bus) if err != nil { lt.Close() t.Fatal(err) @@ -1006,11 +1007,31 @@ func newLinuxRootTest(t *testing.T) *linuxTest { t.Fatal(err) } lt.r = lr - return lt + return lt, bus +} + +func TestRuleDeletedEvent(t *testing.T) { + fake := NewFakeOS(t) + lt, bus := newLinuxRootTest(t) + lt.r.nfr = fake.nfr + defer lt.Close() + event := netmon.RuleDeleted{ + Table: 52, + Priority: 5210, + } + tw := eventbustest.NewWatcher(t, bus) + + t.Logf("Value before: %t", lt.r.ruleRestorePending.Load()) + if lt.r.ruleRestorePending.Load() { + t.Errorf("rule deletion already ongoing") + } + injector := eventbustest.NewInjector(t, bus) + eventbustest.Inject(injector, event) + eventbustest.Expect(tw, eventbustest.Type[AddIPRules]()) } func TestDelRouteIdempotent(t *testing.T) { - lt := newLinuxRootTest(t) + lt, _ := newLinuxRootTest(t) defer lt.Close() for _, s := range []string{ @@ -1036,7 +1057,7 @@ func TestDelRouteIdempotent(t *testing.T) { } func TestAddRemoveRules(t *testing.T) { - lt := newLinuxRootTest(t) + lt, _ := newLinuxRootTest(t) defer lt.Close() r := lt.r @@ -1054,14 +1075,12 @@ func TestAddRemoveRules(t *testing.T) { t.Logf("Rule: %+v", r) } } - } step("init_del_and_add", r.addIPRules) step("dup_add", r.justAddIPRules) step("del", r.delIPRules) step("dup_del", r.delIPRules) - } func TestDebugListLinks(t *testing.T) { diff --git a/wgengine/router/router_openbsd.go b/wgengine/router/router_openbsd.go index 6fdd47ac94c0e..f91878b4c993d 100644 --- a/wgengine/router/router_openbsd.go +++ b/wgengine/router/router_openbsd.go @@ -15,6 +15,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -31,7 +32,7 @@ type openbsdRouter struct { routes set.Set[netip.Prefix] } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err diff --git a/wgengine/router/router_plan9.go b/wgengine/router/router_plan9.go index 7ed7686d9e33f..fd6850ade3762 100644 --- a/wgengine/router/router_plan9.go +++ b/wgengine/router/router_plan9.go @@ -15,9 +15,10 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { r := &plan9Router{ logf: logf, tundev: tundev, diff --git a/wgengine/router/router_windows.go b/wgengine/router/router_windows.go index 64163660d7640..32d05110dca45 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/router_windows.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) type winRouter struct { @@ -38,7 +39,7 @@ type winRouter struct { firewall *firewallTweaker } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { nativeTun := tundev.(*tun.NativeTun) luid := winipcfg.LUID(nativeTun.LUID()) guid, err := luid.GUID() From 9f29c428f44f38d452f1e4090cb016f5e238baf6 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 4 Aug 2025 17:09:32 -0700 Subject: [PATCH 0186/1093] client/systray: allow specifying tailscaled socket Pass a local.Client to systray.Run, so we can use the existing global localClient in the cmd/tailscale CLI. Add socket flag to cmd/systray. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 9 +++++++-- cmd/systray/systray.go | 10 +++++++++- cmd/tailscale/cli/systray.go | 3 +-- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 76c93ae18e781..5cd5e602f5790 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -48,7 +48,12 @@ var ( ) // Run starts the systray menu and blocks until the menu exits. -func (menu *Menu) Run() { +// If client is nil, a default local.Client is used. +func (menu *Menu) Run(client *local.Client) { + if client == nil { + client = &local.Client{} + } + menu.lc = client menu.updateState() // exit cleanly on SIGINT and SIGTERM @@ -71,7 +76,7 @@ func (menu *Menu) Run() { type Menu struct { mu sync.Mutex // protects the entire Menu - lc local.Client + lc *local.Client status *ipnstate.Status curProfile ipn.LoginProfile allProfiles []ipn.LoginProfile diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index 0185a1bc2dc5e..d35595e258e0f 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -7,9 +7,17 @@ package main import ( + "flag" + + "tailscale.com/client/local" "tailscale.com/client/systray" + "tailscale.com/paths" ) +var socket = flag.String("socket", paths.DefaultTailscaledSocket(), "path to tailscaled socket") + func main() { - new(systray.Menu).Run() + flag.Parse() + lc := &local.Client{Socket: *socket} + new(systray.Menu).Run(lc) } diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index 184c85360f97a..05d688faafe4d 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -17,8 +17,7 @@ var systrayCmd = &ffcli.Command{ ShortUsage: "tailscale systray", ShortHelp: "Run a systray application to manage Tailscale", Exec: func(_ context.Context, _ []string) error { - // TODO(will): pass localClient to menu to use the global --socket flag - new(systray.Menu).Run() + new(systray.Menu).Run(&localClient) return nil }, } From ad273d75b7a14680f230b9c6b61bac0c72adca0c Mon Sep 17 00:00:00 2001 From: Erisa A Date: Tue, 5 Aug 2025 18:24:32 +0100 Subject: [PATCH 0187/1093] scripts/installer.sh: add bazzite handling (#16779) Fixes #14540 Signed-off-by: Erisa A --- scripts/installer.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index f81ae529298aa..d259cfda5cd8e 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -280,6 +280,14 @@ main() { echo "services.tailscale.enable = true;" exit 1 ;; + bazzite) + echo "Bazzite comes with Tailscale installed by default." + echo "Please enable Tailscale by running the following commands as root:" + echo + echo "ujust enable-tailscale" + echo "tailscale up" + exit 1 + ;; void) OS="$ID" VERSION="" # rolling release From f80ea9203055a4853bc156909de6869a3c6d4347 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 5 Aug 2025 11:49:33 -0700 Subject: [PATCH 0188/1093] .github/workflows: enforce github action version pinning (#16768) Use https://github.com/stacklok/frizbee via the new `go tool` support from Go 1.24. Updates https://github.com/tailscale/corp/issues/31017 Signed-off-by: Andrew Lytvynov --- .github/workflows/pin-github-actions.yml | 29 ++++++++ .github/workflows/test.yml | 2 +- Makefile | 4 + cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 5 +- cmd/stund/depaware.txt | 1 + go.mod | 31 +++++--- go.sum | 95 +++++++++++++++--------- 8 files changed, 120 insertions(+), 48 deletions(-) create mode 100644 .github/workflows/pin-github-actions.yml diff --git a/.github/workflows/pin-github-actions.yml b/.github/workflows/pin-github-actions.yml new file mode 100644 index 0000000000000..cb66739931bf1 --- /dev/null +++ b/.github/workflows/pin-github-actions.yml @@ -0,0 +1,29 @@ +# Pin images used in github actions to a hash instead of a version tag. +name: pin-github-actions +on: + pull_request: + branches: + - main + paths: + - ".github/workflows/**" + + workflow_dispatch: + +permissions: + contents: read + pull-requests: read + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + run: + name: pin-github-actions + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: pin + run: make pin-github-actions + - name: check for changed workflow files + run: git diff --no-ext-diff --exit-code .github/workflows || (echo "Some github actions versions need pinning, run make pin-github-actions."; exit 1) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d5b09a9e6cc07..c2f539662cd37 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -57,7 +57,7 @@ jobs: # See if the cache entry already exists to avoid downloading it # and doing the cache write again. - id: check-cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} diff --git a/Makefile b/Makefile index 78812d57c0990..0a7fc28dde8a3 100644 --- a/Makefile +++ b/Makefile @@ -137,6 +137,10 @@ sshintegrationtest: ## Run the SSH integration tests in various Docker container generate: ## Generate code ./tool/go generate ./... +.PHONY: pin-github-actions +pin-github-actions: + ./tool/go tool github.com/stacklok/frizbee actions .github/workflows + help: ## Show this help @echo "" @echo "Specify a command. The choices are:" diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7adbf397f2f4f..20b6bfb6e1e17 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -77,6 +77,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + 💣 google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index f810d1b4fd62a..2dbf49d07c53b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -157,7 +157,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd - github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe+ github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag @@ -176,6 +176,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/opencontainers/go-digest from github.com/distribution/reference github.com/pkg/errors from github.com/evanphx/json-patch/v5+ D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack + github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp + github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ @@ -252,6 +254,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + 💣 google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 81544b7505dc7..d389d59a39949 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -38,6 +38,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar 💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+ google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+ + 💣 google.golang.org/protobuf/internal/protolazy from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl diff --git a/go.mod b/go.mod index 3d7514158f069..92de032ffcb00 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.20.2 + github.com/google/go-containerregistry v0.20.3 github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 @@ -72,7 +72,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.55.0 github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff github.com/safchain/ethtool v0.3.0 @@ -96,7 +96,7 @@ require ( github.com/tcnksm/go-httpstat v0.2.0 github.com/toqueteos/webbrowser v1.2.0 github.com/u-root/u-root v0.14.0 - github.com/vishvananda/netns v0.0.4 + github.com/vishvananda/netns v0.0.5 go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba @@ -142,7 +142,9 @@ require ( github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect + github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/deckarep/golang-set/v2 v2.8.0 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -154,6 +156,8 @@ require ( github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/go-github/v66 v66.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect @@ -161,9 +165,14 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/macabu/inamedparam v0.1.3 // indirect + github.com/moby/buildkit v0.20.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + github.com/puzpuzpuz/xsync v1.5.2 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/stacklok/frizbee v0.1.7 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect @@ -231,9 +240,9 @@ require ( github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/docker/cli v27.4.1+incompatible // indirect + github.com/docker/cli v27.5.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.4.1+incompatible // indirect + github.com/docker/docker v27.5.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -246,7 +255,7 @@ require ( github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -261,7 +270,7 @@ require ( github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect @@ -364,9 +373,9 @@ require ( github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect @@ -398,7 +407,7 @@ require ( golang.org/x/image v0.27.0 // indirect golang.org/x/text v0.25.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect @@ -414,3 +423,5 @@ require ( sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) + +tool github.com/stacklok/frizbee diff --git a/go.sum b/go.sum index 995b930100ff9..7db41f5662289 100644 --- a/go.sum +++ b/go.sum @@ -235,11 +235,13 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= @@ -259,6 +261,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk= github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ= +github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ= +github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= @@ -267,12 +271,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI= -github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= +github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= -github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -334,8 +338,8 @@ github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjB github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= @@ -411,8 +415,8 @@ github.com/goccy/go-yaml v1.12.0 h1:/1WHjnMsI1dlIBQutrvSMGZRQufVO3asrHfTwfACoPM= github.com/goccy/go-yaml v1.12.0/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -486,8 +490,12 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= -github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= +github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I= github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba h1:qJEJcuLzH5KDR0gKc0zcktin6KSAwL7+jWKBYceddTc= @@ -547,8 +555,10 @@ github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= @@ -670,6 +680,8 @@ github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= @@ -733,10 +745,12 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= +github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -802,6 +816,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -818,8 +834,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -843,6 +859,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff h1:X1Tly81aZ22DA1fxBdfvR3iw8+yFoUBUHMEd+AX/ZXI= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= +github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= +github.com/puzpuzpuz/xsync v1.5.2/go.mod h1:K98BYhX3k1dQ2M63t1YNVDanbwUPmBCAhNmVrrxfiGg= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -912,16 +930,19 @@ github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNo github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stacklok/frizbee v0.1.7 h1:IgrZy8dqKy+vBxNWrZTbDoctnV0doQKrFC6bNbWP5ho= +github.com/stacklok/frizbee v0.1.7/go.mod h1:eqMjHEgRYDSlpYpir3wXO6jyGpxr1dnFTvrTdrTIF7E= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1021,8 +1042,8 @@ github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7Km github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -1067,14 +1088,14 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEj go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -1435,10 +1456,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw= -google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= +google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1451,8 +1472,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1465,8 +1486,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1476,6 +1497,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= +gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From 57d653014b1a8bbb4dc7c96c98781df506210351 Mon Sep 17 00:00:00 2001 From: TheBigBear <471105+TheBigBear@users.noreply.github.com> Date: Wed, 6 Aug 2025 03:38:50 +0200 Subject: [PATCH 0189/1093] scripts/installer.sh: add FreeBSD 15 (#16741) * Update installer.sh add FreeBSD ver 15 this should fix the issue on https://github.com/tailscale/tailscale/issues/16740 Signed-off-by: TheBigBear <471105+TheBigBear@users.noreply.github.com> * scripts/installer.sh: small indentation change Signed-off-by: Erisa A Fixes #16740 --------- Signed-off-by: TheBigBear <471105+TheBigBear@users.noreply.github.com> Signed-off-by: Erisa A Co-authored-by: Erisa A --- scripts/installer.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index d259cfda5cd8e..4d968cd2b7285 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -408,7 +408,8 @@ main() { freebsd) if [ "$VERSION" != "12" ] && \ [ "$VERSION" != "13" ] && \ - [ "$VERSION" != "14" ] + [ "$VERSION" != "14" ] && \ + [ "$VERSION" != "15" ] then OS_UNSUPPORTED=1 fi From 908f20e0a506f9fe0c3f6479bc6b7c017cab27a1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 09:35:25 -0700 Subject: [PATCH 0190/1093] wgengine/magicsock: add receiveIP() unit tests (#16781) One of these tests highlighted a Geneve encap bug, which is also fixed in this commit. looksLikeInitMsg was passed a packet post Geneve header stripping with slice offsets that had not been updated to account for the stripping. Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 6 +- wgengine/magicsock/magicsock_test.go | 314 +++++++++++++++++++++++++++ 2 files changed, 319 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c99d1b68f83a6..04d4bbbdee3a1 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1823,6 +1823,9 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach return nil, 0, false, false } + // geneveInclusivePacketLen holds the packet length prior to any potential + // Geneve header stripping. + geneveInclusivePacketLen := len(b) if src.vni.isSet() { // Strip away the Geneve header before returning the packet to // wireguard-go. @@ -1831,6 +1834,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // to support returning start offset in order to get rid of this memmove perf // penalty. size = copy(b, b[packet.GeneveFixedHeaderLength:]) + b = b[:size] } if cache.epAddr == src && cache.de != nil && cache.gen == cache.de.numStopAndReset() { @@ -1859,7 +1863,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, len(b)) + stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) } if src.vni.isSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 0d1ac9dfda0f9..685fff4daa8f7 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -20,6 +20,7 @@ import ( "net/http/httptest" "net/netip" "os" + "reflect" "runtime" "strconv" "strings" @@ -66,6 +67,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/cibuild" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" @@ -3701,3 +3703,315 @@ func TestConn_updateRelayServersSet(t *testing.T) { }) } } + +func TestConn_receiveIP(t *testing.T) { + looksLikeNakedDisco := make([]byte, 0, len(disco.Magic)+key.DiscoPublicRawLen) + looksLikeNakedDisco = append(looksLikeNakedDisco, disco.Magic...) + looksLikeNakedDisco = looksLikeNakedDisco[:cap(looksLikeNakedDisco)] + + looksLikeGeneveDisco := make([]byte, packet.GeneveFixedHeaderLength+len(looksLikeNakedDisco)) + gh := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolDisco, + } + err := gh.Encode(looksLikeGeneveDisco) + if err != nil { + t.Fatal(err) + } + copy(looksLikeGeneveDisco[packet.GeneveFixedHeaderLength:], looksLikeNakedDisco) + + looksLikeSTUNBinding := stun.Response(stun.NewTxID(), netip.MustParseAddrPort("127.0.0.1:7777")) + + findMetricByName := func(name string) *clientmetric.Metric { + for _, metric := range clientmetric.Metrics() { + if metric.Name() == name { + return metric + } + } + t.Fatalf("failed to find metric with name: %v", name) + return nil + } + + looksLikeNakedWireGuardInit := make([]byte, device.MessageInitiationSize) + binary.LittleEndian.PutUint32(looksLikeNakedWireGuardInit, device.MessageInitiationType) + + looksLikeGeneveWireGuardInit := make([]byte, packet.GeneveFixedHeaderLength+device.MessageInitiationSize) + gh = packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + VNI: 1, + } + vni := virtualNetworkID{} + vni.set(gh.VNI) + err = gh.Encode(looksLikeGeneveWireGuardInit) + if err != nil { + t.Fatal(err) + } + copy(looksLikeGeneveWireGuardInit[packet.GeneveFixedHeaderLength:], looksLikeNakedWireGuardInit) + + newPeerMapInsertableEndpoint := func(lastRecvWG mono.Time) *endpoint { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + lastRecvWG: lastRecvWG, + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + return ep + } + + tests := []struct { + name string + // A copy of b is used as input, tests may re-use the same value. + b []byte + ipp netip.AddrPort + // cache must be non-nil, and must not be reused across tests. If + // cache.de is non-nil after receiveIP(), then we verify it is equal to + // wantEndpointType. + cache *epAddrEndpointCache + // If true, wantEndpointType is inserted into the [peerMap]. + insertWantEndpointTypeInPeerMap bool + // If insertWantEndpointTypeInPeerMap is true, use this [epAddr] for it + // in the [peerMap.setNodeKeyForEpAddr] call. + peerMapEpAddr epAddr + // If [*endpoint] then we expect 'got' to be the same [*endpoint]. If + // [*lazyEndpoint] and [*lazyEndpoint.maybeEP] is non-nil, we expect + // got.maybeEP to also be non-nil. Must not be reused across tests. + wantEndpointType wgconn.Endpoint + wantSize int + wantIsGeneveEncap bool + wantOk bool + wantMetricInc *clientmetric.Metric + wantNoteRecvActivityCalled bool + }{ + { + name: "naked disco", + b: looksLikeNakedDisco, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: metricRecvDiscoBadPeer, + wantNoteRecvActivityCalled: false, + }, + { + name: "geneve encap disco", + b: looksLikeGeneveDisco, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: metricRecvDiscoBadPeer, + wantNoteRecvActivityCalled: false, + }, + { + name: "STUN binding", + b: looksLikeSTUNBinding, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: nil, + wantSize: 0, + wantIsGeneveEncap: false, + wantOk: false, + wantMetricInc: findMetricByName("netcheck_stun_recv_ipv4"), + wantNoteRecvActivityCalled: false, + }, + { + name: "naked WireGuard init lazyEndpoint empty peerMap", + b: looksLikeNakedWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: &lazyEndpoint{}, + wantSize: len(looksLikeNakedWireGuardInit), + wantIsGeneveEncap: false, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + { + name: "naked WireGuard init endpoint matching peerMap entry", + b: looksLikeNakedWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777")}, + wantEndpointType: newPeerMapInsertableEndpoint(0), + wantSize: len(looksLikeNakedWireGuardInit), + wantIsGeneveEncap: false, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: true, + }, + { + name: "geneve WireGuard init lazyEndpoint empty peerMap", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + wantEndpointType: &lazyEndpoint{}, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + { + name: "geneve WireGuard init lazyEndpoint matching peerMap activity noted", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + wantEndpointType: &lazyEndpoint{ + maybeEP: newPeerMapInsertableEndpoint(0), + }, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: true, + }, + { + name: "geneve WireGuard init lazyEndpoint matching peerMap no activity noted", + b: looksLikeGeneveWireGuardInit, + ipp: netip.MustParseAddrPort("127.0.0.1:7777"), + cache: &epAddrEndpointCache{}, + insertWantEndpointTypeInPeerMap: true, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + wantEndpointType: &lazyEndpoint{ + maybeEP: newPeerMapInsertableEndpoint(mono.Now().Add(time.Hour * 24)), + }, + wantSize: len(looksLikeGeneveWireGuardInit) - packet.GeneveFixedHeaderLength, + wantIsGeneveEncap: true, + wantOk: true, + wantMetricInc: nil, + wantNoteRecvActivityCalled: false, + }, + // TODO(jwhited): verify cache.de is used when conditions permit + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + noteRecvActivityCalled := false + metricBefore := int64(0) + if tt.wantMetricInc != nil { + metricBefore = tt.wantMetricInc.Value() + } + + // Init Conn. + c := &Conn{ + privateKey: key.NewNode(), + netChecker: &netcheck.Client{}, + peerMap: newPeerMap(), + } + c.havePrivateKey.Store(true) + c.noteRecvActivity = func(public key.NodePublic) { + noteRecvActivityCalled = true + } + c.SetStatistics(connstats.NewStatistics(0, 0, nil)) + + if tt.insertWantEndpointTypeInPeerMap { + var insertEPIntoPeerMap *endpoint + switch ep := tt.wantEndpointType.(type) { + case *endpoint: + insertEPIntoPeerMap = ep + case *lazyEndpoint: + insertEPIntoPeerMap = ep.maybeEP + default: + t.Fatal("unexpected tt.wantEndpointType concrete type") + } + insertEPIntoPeerMap.c = c + c.peerMap.upsertEndpoint(insertEPIntoPeerMap, key.DiscoPublic{}) + c.peerMap.setNodeKeyForEpAddr(tt.peerMapEpAddr, insertEPIntoPeerMap.publicKey) + } + + // Allow the same input packet to be used across tests, receiveIP() + // may mutate. + inputPacket := make([]byte, len(tt.b)) + copy(inputPacket, tt.b) + + got, gotSize, gotIsGeneveEncap, gotOk := c.receiveIP(inputPacket, tt.ipp, tt.cache) + if (tt.wantEndpointType == nil) != (got == nil) { + t.Errorf("receiveIP() (tt.wantEndpointType == nil): %v != (got == nil): %v", tt.wantEndpointType == nil, got == nil) + } + if tt.wantEndpointType != nil && reflect.TypeOf(got).String() != reflect.TypeOf(tt.wantEndpointType).String() { + t.Errorf("receiveIP() got = %v, want %v", reflect.TypeOf(got).String(), reflect.TypeOf(tt.wantEndpointType).String()) + } else { + switch ep := tt.wantEndpointType.(type) { + case *endpoint: + if ep != got.(*endpoint) { + t.Errorf("receiveIP() want [*endpoint]: %p != got [*endpoint]: %p", ep, got) + } + case *lazyEndpoint: + if ep.maybeEP != nil && ep.maybeEP != got.(*lazyEndpoint).maybeEP { + t.Errorf("receiveIP() want [*lazyEndpoint.maybeEP]: %p != got [*lazyEndpoint.maybeEP] %p", ep, got) + } + } + } + + if gotSize != tt.wantSize { + t.Errorf("receiveIP() gotSize = %v, want %v", gotSize, tt.wantSize) + } + if gotIsGeneveEncap != tt.wantIsGeneveEncap { + t.Errorf("receiveIP() gotIsGeneveEncap = %v, want %v", gotIsGeneveEncap, tt.wantIsGeneveEncap) + } + if gotOk != tt.wantOk { + t.Errorf("receiveIP() gotOk = %v, want %v", gotOk, tt.wantOk) + } + if tt.wantMetricInc != nil && tt.wantMetricInc.Value() != metricBefore+1 { + t.Errorf("receiveIP() metric %v not incremented", tt.wantMetricInc.Name()) + } + if tt.wantNoteRecvActivityCalled != noteRecvActivityCalled { + t.Errorf("receiveIP() noteRecvActivityCalled = %v, want %v", noteRecvActivityCalled, tt.wantNoteRecvActivityCalled) + } + + if tt.cache.de != nil { + switch ep := got.(type) { + case *endpoint: + if tt.cache.de != ep { + t.Errorf("receiveIP() cache populated with [*endpoint] %p, want %p", tt.cache.de, ep) + } + case *lazyEndpoint: + if tt.cache.de != ep.maybeEP { + t.Errorf("receiveIP() cache populated with [*endpoint] %p, want (lazyEndpoint.maybeEP) %p", tt.cache.de, ep.maybeEP) + } + default: + t.Fatal("receiveIP() unexpected [conn.Endpoint] type") + } + } + + // Verify physical rx stats + stats := c.stats.Load() + _, gotPhy := stats.TestExtract() + wantNonzeroRxStats := false + switch ep := tt.wantEndpointType.(type) { + case *lazyEndpoint: + if ep.maybeEP != nil { + wantNonzeroRxStats = true + } + case *endpoint: + wantNonzeroRxStats = true + } + if tt.wantOk && wantNonzeroRxStats { + wantRxBytes := uint64(tt.wantSize) + if tt.wantIsGeneveEncap { + wantRxBytes += packet.GeneveFixedHeaderLength + } + wantPhy := map[netlogtype.Connection]netlogtype.Counts{ + {Dst: tt.ipp}: { + RxPackets: 1, + RxBytes: wantRxBytes, + }, + } + if !reflect.DeepEqual(gotPhy, wantPhy) { + t.Errorf("receiveIP() got physical conn stats = %v, want %v", gotPhy, wantPhy) + } + } else { + if len(gotPhy) != 0 { + t.Errorf("receiveIP() unexpected nonzero physical count stats: %+v", gotPhy) + } + } + }) + } +} From 02967ffcf258ec0face9955d1d420ed22681b306 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 14:41:05 -0700 Subject: [PATCH 0191/1093] wgengine/magicsock: add lazyEndpoint.InitiationMessagePublicKey tests (#16790) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock_test.go | 81 ++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 685fff4daa8f7..c570862013257 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -4015,3 +4015,84 @@ func TestConn_receiveIP(t *testing.T) { }) } } + +func Test_lazyEndpoint_InitiationMessagePublicKey(t *testing.T) { + tests := []struct { + name string + callWithPeerMapKey bool + maybeEPMatchingKey bool + wantNoteRecvActivityCalled bool + }{ + { + name: "noteRecvActivity called", + callWithPeerMapKey: true, + maybeEPMatchingKey: false, + wantNoteRecvActivityCalled: true, + }, + { + name: "maybeEP early return", + callWithPeerMapKey: true, + maybeEPMatchingKey: true, + wantNoteRecvActivityCalled: false, + }, + { + name: "not in peerMap early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: false, + wantNoteRecvActivityCalled: false, + }, + { + name: "not in peerMap maybeEP early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: true, + wantNoteRecvActivityCalled: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + + var noteRecvActivityCalledFor key.NodePublic + conn := newConn(t.Logf) + conn.noteRecvActivity = func(public key.NodePublic) { + // wireguard-go will call into ParseEndpoint if the "real" + // noteRecvActivity ends up JIT configuring the peer. Mimic that + // to ensure there are no deadlocks around conn.mu. + // See tailscale/tailscale#16651 & http://go/corp#30836 + _, err := conn.ParseEndpoint(ep.publicKey.UntypedHexString()) + if err != nil { + t.Fatalf("ParseEndpoint() err: %v", err) + } + noteRecvActivityCalledFor = public + } + ep.c = conn + + var pubKey [32]byte + if tt.callWithPeerMapKey { + copy(pubKey[:], ep.publicKey.AppendTo(nil)) + } + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + le := &lazyEndpoint{ + c: conn, + } + if tt.maybeEPMatchingKey { + le.maybeEP = ep + } + le.InitiationMessagePublicKey(pubKey) + want := key.NodePublic{} + if tt.wantNoteRecvActivityCalled { + want = ep.publicKey + } + if noteRecvActivityCalledFor.Compare(want) != 0 { + t.Fatalf("noteRecvActivityCalledFor = %v, want %v", noteRecvActivityCalledFor, want) + } + }) + } +} From 0374e6d9060a7eb2c233825e5bbc344375e7f8a3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 14:55:34 -0700 Subject: [PATCH 0192/1093] wgengine/magicsock: add lazyEndpoint.FromPeer tests (#16791) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock_test.go | 72 ++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c570862013257..e12f15b22a5e4 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -4096,3 +4096,75 @@ func Test_lazyEndpoint_InitiationMessagePublicKey(t *testing.T) { }) } } + +func Test_lazyEndpoint_FromPeer(t *testing.T) { + tests := []struct { + name string + callWithPeerMapKey bool + maybeEPMatchingKey bool + wantEpAddrInPeerMap bool + }{ + { + name: "epAddr in peerMap", + callWithPeerMapKey: true, + maybeEPMatchingKey: false, + wantEpAddrInPeerMap: true, + }, + { + name: "maybeEP early return", + callWithPeerMapKey: true, + maybeEPMatchingKey: true, + wantEpAddrInPeerMap: false, + }, + { + name: "not in peerMap early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: false, + wantEpAddrInPeerMap: false, + }, + { + name: "not in peerMap maybeEP early return", + callWithPeerMapKey: false, + maybeEPMatchingKey: true, + wantEpAddrInPeerMap: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ep := &endpoint{ + nodeID: 1, + publicKey: key.NewNode().Public(), + } + ep.disco.Store(&endpointDisco{ + key: key.NewDisco().Public(), + }) + conn := newConn(t.Logf) + ep.c = conn + + var pubKey [32]byte + if tt.callWithPeerMapKey { + copy(pubKey[:], ep.publicKey.AppendTo(nil)) + } + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + le := &lazyEndpoint{ + c: conn, + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777")}, + } + if tt.maybeEPMatchingKey { + le.maybeEP = ep + } + le.FromPeer(pubKey) + if tt.wantEpAddrInPeerMap { + gotEP, ok := conn.peerMap.endpointForEpAddr(le.src) + if !ok { + t.Errorf("lazyEndpoint epAddr not found in peerMap") + } else if gotEP != ep { + t.Errorf("gotEP: %p != ep: %p", gotEP, ep) + } + } else if len(conn.peerMap.byEpAddr) != 0 { + t.Errorf("unexpected epAddr in peerMap") + } + }) + } +} From 4666d4ca2af5885329a6546d14c890d08e65c82e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 6 Aug 2025 14:57:55 -0700 Subject: [PATCH 0193/1093] wgengine/magicsock: fix missing Conn.hasPeerRelayServers.Store() call (#16792) This commit also extends the updateRelayServersSet unit tests to cover onNodeViewsUpdate. Fixes tailscale/corp#31080 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 9 ++- wgengine/magicsock/magicsock_test.go | 89 +++++++++++++++++++++++----- 2 files changed, 78 insertions(+), 20 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 04d4bbbdee3a1..a4ba090ef8cc4 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -274,11 +274,9 @@ type Conn struct { captureHook syncs.AtomicValue[packet.CaptureCallback] // hasPeerRelayServers is whether [relayManager] is configured with at least - // one peer relay server via [relayManager.handleRelayServersSet]. It is - // only accessed by [Conn.updateRelayServersSet], [endpoint.setDERPHome], - // and [endpoint.discoverUDPRelayPathsLocked]. It exists to suppress - // calls into [relayManager] leading to wasted work involving channel - // operations and goroutine creation. + // one peer relay server via [relayManager.handleRelayServersSet]. It exists + // to suppress calls into [relayManager] leading to wasted work involving + // channel operations and goroutine creation. hasPeerRelayServers atomic.Bool // discoPrivate is the private naclbox key used for active @@ -2998,6 +2996,7 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { if peersChanged || relayClientChanged { if !relayClientEnabled { c.relayManager.handleRelayServersSet(nil) + c.hasPeerRelayServers.Store(false) } else { c.updateRelayServersSet(filt, self, peers) } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index e12f15b22a5e4..9399dab322152 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -65,7 +65,6 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/ptr" - "tailscale.com/types/views" "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -3584,7 +3583,7 @@ func Test_nodeHasCap(t *testing.T) { } } -func TestConn_updateRelayServersSet(t *testing.T) { +func TestConn_onNodeViewsUpdate_updateRelayServersSet(t *testing.T) { peerNodeCandidateRelay := &tailcfg.Node{ Cap: 121, ID: 1, @@ -3618,12 +3617,21 @@ func TestConn_updateRelayServersSet(t *testing.T) { DiscoKey: key.NewDisco().Public(), } + selfNodeNodeAttrDisableRelayClient := selfNode.Clone() + selfNodeNodeAttrDisableRelayClient.CapMap = make(tailcfg.NodeCapMap) + selfNodeNodeAttrDisableRelayClient.CapMap[tailcfg.NodeAttrDisableRelayClient] = nil + + selfNodeNodeAttrOnlyTCP443 := selfNode.Clone() + selfNodeNodeAttrOnlyTCP443.CapMap = make(tailcfg.NodeCapMap) + selfNodeNodeAttrOnlyTCP443.CapMap[tailcfg.NodeAttrOnlyTCP443] = nil + tests := []struct { - name string - filt *filter.Filter - self tailcfg.NodeView - peers views.Slice[tailcfg.NodeView] - wantRelayServers set.Set[candidatePeerRelay] + name string + filt *filter.Filter + self tailcfg.NodeView + peers []tailcfg.NodeView + wantRelayServers set.Set[candidatePeerRelay] + wantRelayClientEnabled bool }{ { name: "candidate relay server", @@ -3639,7 +3647,7 @@ func TestConn_updateRelayServersSet(t *testing.T) { }, }, nil, nil, nil, nil, nil), self: selfNode.View(), - peers: views.SliceOf([]tailcfg.NodeView{peerNodeCandidateRelay.View()}), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, wantRelayServers: set.SetOf([]candidatePeerRelay{ { nodeKey: peerNodeCandidateRelay.Key, @@ -3647,6 +3655,43 @@ func TestConn_updateRelayServersSet(t *testing.T) { derpHomeRegionID: 1, }, }), + wantRelayClientEnabled: true, + }, + { + name: "no candidate relay server because self has tailcfg.NodeAttrDisableRelayClient", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNodeNodeAttrDisableRelayClient.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNodeNodeAttrDisableRelayClient.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: false, + }, + { + name: "no candidate relay server because self has tailcfg.NodeAttrOnlyTCP443", + filt: filter.New([]filtertype.Match{ + { + Srcs: peerNodeCandidateRelay.Addresses, + Caps: []filtertype.CapMatch{ + { + Dst: selfNodeNodeAttrOnlyTCP443.Addresses[0], + Cap: tailcfg.PeerCapabilityRelayTarget, + }, + }, + }, + }, nil, nil, nil, nil, nil), + self: selfNodeNodeAttrOnlyTCP443.View(), + peers: []tailcfg.NodeView{peerNodeCandidateRelay.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: false, }, { name: "self candidate relay server", @@ -3662,7 +3707,7 @@ func TestConn_updateRelayServersSet(t *testing.T) { }, }, nil, nil, nil, nil, nil), self: selfNode.View(), - peers: views.SliceOf([]tailcfg.NodeView{selfNode.View()}), + peers: []tailcfg.NodeView{selfNode.View()}, wantRelayServers: set.SetOf([]candidatePeerRelay{ { nodeKey: selfNode.Key, @@ -3670,6 +3715,7 @@ func TestConn_updateRelayServersSet(t *testing.T) { derpHomeRegionID: 2, }, }), + wantRelayClientEnabled: true, }, { name: "no candidate relay server", @@ -3684,21 +3730,34 @@ func TestConn_updateRelayServersSet(t *testing.T) { }, }, }, nil, nil, nil, nil, nil), - self: selfNode.View(), - peers: views.SliceOf([]tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}), - wantRelayServers: make(set.Set[candidatePeerRelay]), + self: selfNode.View(), + peers: []tailcfg.NodeView{peerNodeNotCandidateRelayCapVer.View()}, + wantRelayServers: make(set.Set[candidatePeerRelay]), + wantRelayClientEnabled: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &Conn{} - c.updateRelayServersSet(tt.filt, tt.self, tt.peers) + c := newConn(t.Logf) + c.filt = tt.filt + if len(tt.wantRelayServers) == 0 { + // So we can verify it gets flipped back. + c.hasPeerRelayServers.Store(true) + } + + c.onNodeViewsUpdate(NodeViewsUpdate{ + SelfNode: tt.self, + Peers: tt.peers, + }) got := c.relayManager.getServers() if !got.Equal(tt.wantRelayServers) { t.Fatalf("got: %v != want: %v", got, tt.wantRelayServers) } if len(tt.wantRelayServers) > 0 != c.hasPeerRelayServers.Load() { - t.Fatalf("c.hasPeerRelayServers: %v != wantRelayServers: %v", c.hasPeerRelayServers.Load(), tt.wantRelayServers) + t.Fatalf("c.hasPeerRelayServers: %v != len(tt.wantRelayServers) > 0: %v", c.hasPeerRelayServers.Load(), len(tt.wantRelayServers) > 0) + } + if c.relayClientEnabled != tt.wantRelayClientEnabled { + t.Fatalf("c.relayClientEnabled: %v != wantRelayClientEnabled: %v", c.relayClientEnabled, tt.wantRelayClientEnabled) } }) } From 89954fbceb78a2ecff529166da66ebee614e4253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 7 Aug 2025 11:51:15 -0400 Subject: [PATCH 0194/1093] client/systray: add startup script generator for systemd (#16801) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/startup-creator.go | 76 ++++++++++++++++++++++++ client/systray/tailscale-systray.service | 10 ++++ cmd/tailscale/cli/systray.go | 30 +++++++++- 3 files changed, 113 insertions(+), 3 deletions(-) create mode 100644 client/systray/startup-creator.go create mode 100644 client/systray/tailscale-systray.service diff --git a/client/systray/startup-creator.go b/client/systray/startup-creator.go new file mode 100644 index 0000000000000..cb354856d7f97 --- /dev/null +++ b/client/systray/startup-creator.go @@ -0,0 +1,76 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build cgo || !darwin + +// Package systray provides a minimal Tailscale systray application. +package systray + +import ( + "bufio" + "bytes" + _ "embed" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" +) + +//go:embed tailscale-systray.service +var embedSystemd string + +func InstallStartupScript(initSystem string) error { + switch initSystem { + case "systemd": + return installSystemd() + default: + return fmt.Errorf("unsupported init system '%s'", initSystem) + } +} + +func installSystemd() error { + // Find the path to tailscale, just in case it's not where the example file + // has it placed, and replace that before writing the file. + tailscaleBin, err := exec.LookPath("tailscale") + if err != nil { + return fmt.Errorf("failed to find tailscale binary %w", err) + } + + var output bytes.Buffer + scanner := bufio.NewScanner(strings.NewReader(embedSystemd)) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "ExecStart=") { + line = fmt.Sprintf("ExecStart=%s systray", tailscaleBin) + } + output.WriteString(line + "\n") + } + + configDir, err := os.UserConfigDir() + if err != nil { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("unable to locate user home: %w", err) + } + configDir = filepath.Join(homeDir, ".config") + } + + systemdDir := filepath.Join(configDir, "systemd", "user") + if err := os.MkdirAll(systemdDir, 0o755); err != nil { + return fmt.Errorf("failed creating systemd uuser dir: %w", err) + } + + serviceFile := filepath.Join(systemdDir, "tailscale-systray.service") + + if err := os.WriteFile(serviceFile, output.Bytes(), 0o755); err != nil { + return fmt.Errorf("failed writing systemd user service: %w", err) + } + + fmt.Printf("Successfully installed systemd service to: %s\n", serviceFile) + fmt.Println("To enable and start the service, run:") + fmt.Println(" systemctl --user daemon-reload") + fmt.Println(" systemctl --user enable --now tailscale-systray") + + return nil +} diff --git a/client/systray/tailscale-systray.service b/client/systray/tailscale-systray.service new file mode 100644 index 0000000000000..a4d987563ec0a --- /dev/null +++ b/client/systray/tailscale-systray.service @@ -0,0 +1,10 @@ +[Unit] +Description=Tailscale System Tray +After=systemd.service + +[Service] +Type=simple +ExecStart=/usr/bin/tailscale systray + +[Install] +WantedBy=default.target diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index 05d688faafe4d..c0296ae2644df 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -7,17 +7,41 @@ package cli import ( "context" + "flag" + "fmt" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/systray" ) +var systrayArgs struct { + initSystem string + installStartup bool +} + var systrayCmd = &ffcli.Command{ Name: "systray", ShortUsage: "tailscale systray", ShortHelp: "Run a systray application to manage Tailscale", - Exec: func(_ context.Context, _ []string) error { - new(systray.Menu).Run(&localClient) + LongHelp: `Run a systray application to manage Tailscale. +To have the application run on startup, use the --enable-startup flag.`, + Exec: runSystray, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("systray") + fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", + "Install startup script for init system. Currently supported systems are [systemd].") + return fs + })(), +} + +func runSystray(ctx context.Context, _ []string) error { + if systrayArgs.initSystem != "" { + if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { + fmt.Printf("%s\n\n", err.Error()) + return flag.ErrHelp + } return nil - }, + } + new(systray.Menu).Run(&localClient) + return nil } From d4060f1a394e95a20797e0824fea6c0f9a0d7e42 Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 7 Aug 2025 19:27:56 +0100 Subject: [PATCH 0195/1093] CODE_OF_CONDUCT.md: update Code of Conduct (#16806) Updates #cleanup Signed-off-by: Erisa A --- CODE_OF_CONDUCT.md | 142 +++++++++++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 64 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index be5564ef4a3de..3d33bba98c863 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,71 +2,72 @@ ## Our Pledge -We as members, contributors, and leaders pledge to make participation -in our community a harassment-free experience for everyone, regardless -of age, body size, visible or invisible disability, ethnicity, sex -characteristics, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, -race, religion, or sexual identity and orientation. - -We pledge to act and interact in ways that contribute to an open, -welcoming, diverse, inclusive, and healthy community. +We are committed to creating an open, welcoming, diverse, inclusive, +healthy and respectful community. ## Our Standards -Examples of behavior that contributes to a positive environment for -our community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback +Examples of behavior that contributes to a positive environment for our +community include: +* Demonstrating empathy and kindness toward other people. +* Being respectful of differing opinions, viewpoints, and experiences. +* Giving and gracefully accepting constructive feedback. * Accepting responsibility and apologizing to those affected by our - mistakes, and learning from the experience + mistakes, and learning from the experience. * Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: + overall community. +Examples of unacceptable behavior include without limitation: * The use of sexualized language or imagery, and sexual attention or - advances of any kind + advances of any kind. +* The use of violent, intimidating or bullying language or imagery. * Trolling, insulting or derogatory comments, and personal or - political attacks -* Public or private harassment + political attacks. +* Public or private harassment. * Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in - a professional setting + address, without their explicit permission. +* Spamming community channels and members, such as sending repeat messages, + low-effort content, or automated messages. +* Phishing or any similar activity; +* Distributing or promoting malware; +* Other conduct which could reasonably be considered inappropriate in a + professional setting. +* Please also see the Tailscale Acceptable Use Policy, available at + [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). -## Enforcement Responsibilities +Please also see the Tailscale Acceptable Use Policy, available at +[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). -Community leaders are responsible for clarifying and enforcing our -standards of acceptable behavior and will take appropriate and fair -corrective action in response to any behavior that they deem -inappropriate, threatening, offensive, or harmful. +# Reporting Incidents -Community leaders have the right and responsibility to remove, edit, -or reject comments, commits, code, wiki edits, issues, and other -contributions that are not aligned to this Code of Conduct, and will -communicate reasons for moderation decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also -applies when an individual is officially representing the community in -public spaces. Examples of representing our community include using an -official e-mail address, posting via an official social media account, -or acting as an appointed representative at an online or offline -event. +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported to Tailscale directly via info@tailscale.com, or to +the community leaders or moderators via DM or similar. +All complaints will be reviewed and investigated promptly and fairly. +We will respect the privacy and safety of the reporter of any issues. + +Please note that this community is not moderated by staff 24/7, and we +do not have, and do not undertake, any obligation to prescreen, monitor, +edit, or remove any content or data, or to actively seek facts or +circumstances indicating illegal activity. While we strive to keep the +community safe and welcoming, moderation may not be immediate at all hours. +If you encounter any issues, report them using the appropriate channels. ## Enforcement -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported to the community leaders responsible for enforcement -at [info@tailscale.com](mailto:info@tailscale.com). All complaints -will be reviewed and investigated promptly and fairly. +Community leaders and moderators are responsible for clarifying and +enforcing our standards of acceptable behavior and will take appropriate +and fair corrective action in response to any behavior that they deem +inappropriate, threatening, offensive, or harmful. -All community leaders are obligated to respect the privacy and -security of the reporter of any incident. +Community leaders and moderators have the right and responsibility to remove, +edit, or reject comments, commits, code, wiki edits, issues, and other +contributions that are not aligned to this Community Code of Conduct. +Tailscale retains full discretion to take action (or not) in response +to a violation of these guidelines with or without notice or liability +to you. We will interpret our policies and resolve disputes in favor of +protecting users, customers, the public, our community and our company, +as a whole. ## Enforcement Guidelines @@ -76,48 +77,61 @@ this Code of Conduct: ### 1. Correction -**Community Impact**: Use of inappropriate language or other behavior +Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. -**Consequence**: A private, written warning from community leaders, +Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning -**Community Impact**: A violation through a single incident or series +Community Impact: A violation through a single incident or series of actions. -**Consequence**: A warning with consequences for continued +Consequence: A warning with consequences for continued behavior. No interaction with the people involved, including -unsolicited interaction with those enforcing the Code of Conduct, for -a specified period of time. This includes avoiding interactions in +unsolicited interaction with those enforcing this Community Code of Conduct, +for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban -**Community Impact**: A serious violation of community standards, +Community Impact: A serious violation of community standards, including sustained inappropriate behavior. -**Consequence**: A temporary ban from any sort of interaction or +Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, -including unsolicited interaction with those enforcing the Code of -Conduct, is allowed during this period. Violating these terms may lead -to a permanent ban. +including unsolicited interaction with those enforcing the Code of Conduct, +is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban -**Community Impact**: Demonstrating a pattern of violation of -community standards, including sustained inappropriate behavior, -harassment of an individual, or aggression toward or disparagement of +Community Impact: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of +an individual, or aggression toward or disparagement of classes of individuals. -**Consequence**: A permanent ban from any sort of public interaction +Consequence: A permanent ban from any sort of public interaction within the community. +## Acceptable Use Policy + +Violation of this Community Code of Conduct may also violate the +Tailscale Acceptable Use Policy, which may result in suspension or +termination of your Tailscale account. For more information, please +see the Tailscale Acceptable Use Policy, available at +[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). + +## Privacy + +Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) +for more information about how Tailscale collects, uses, discloses and protects +information. + ## Attribution This Code of Conduct is adapted from the [Contributor From 3fe022877afd3ccfdbbd10a3b8a94dbac4f930bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 7 Aug 2025 16:02:47 -0400 Subject: [PATCH 0196/1093] client/systray: temporarily replace systray module (#16807) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are waiting for a PR to be reviewed upstream. https://github.com/fyne-io/systray/pull/100 Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/logo.go | 2 +- client/systray/systray.go | 2 +- cmd/tailscale/depaware.txt | 12 ++++++------ go.mod | 2 +- go.sum | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client/systray/logo.go b/client/systray/logo.go index 3467d1b741f93..d9b0932bc522b 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -15,9 +15,9 @@ import ( "sync" "time" - "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/fogleman/gg" + "github.com/tailscale/systray" ) // tsLogo represents the Tailscale logo displayed as the systray icon. diff --git a/client/systray/systray.go b/client/systray/systray.go index 5cd5e602f5790..d5a19f91c3c54 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -24,10 +24,10 @@ import ( "syscall" "time" - "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/atotto/clipboard" dbus "github.com/godbus/dbus/v5" + "github.com/tailscale/systray" "github.com/toqueteos/webbrowser" "tailscale.com/client/local" "tailscale.com/ipn" diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 020479ebb1867..8e28e29332278 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -2,9 +2,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - L fyne.io/systray from tailscale.com/client/systray - L fyne.io/systray/internal/generated/menu from fyne.io/systray - L fyne.io/systray/internal/generated/notifier from fyne.io/systray L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate @@ -25,9 +22,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - L 💣 github.com/godbus/dbus/v5 from fyne.io/systray+ - L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ - L github.com/godbus/dbus/v5/prop from fyne.io/systray + L 💣 github.com/godbus/dbus/v5 from github.com/godbus/dbus/v5/introspect+ + L github.com/godbus/dbus/v5/introspect from github.com/godbus/dbus/v5/prop+ + L github.com/godbus/dbus/v5/prop from github.com/tailscale/systray L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache @@ -69,6 +66,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + L github.com/tailscale/systray from tailscale.com/client/systray + L github.com/tailscale/systray/internal/generated/menu from github.com/tailscale/systray + L github.com/tailscale/systray/internal/generated/notifier from github.com/tailscale/systray github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ diff --git a/go.mod b/go.mod index 92de032ffcb00..09dcd575e2e8f 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.24.4 require ( filippo.io/mkcert v1.4.4 - fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa @@ -88,6 +87,7 @@ require ( github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb + github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da diff --git a/go.sum b/go.sum index 7db41f5662289..23ca2dc9b1f45 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,6 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= -fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a h1:I8mEKo5sawHu8CqYf3FSjIl9b3puXasFVn2D/hrCneY= -fyne.io/systray v1.11.1-0.20250317195939-bcf6eed85e7a/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= @@ -992,6 +990,8 @@ github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+y github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= +github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 h1:H7/LOg6wgQ116wFRVa8tz9KTB8pc6jeNtqS9tyKgeVw= +github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78/go.mod h1:1NbyArqaFj+AzkSWl0odw7flO9DsHIYWC4lMkwCKVAo= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= From b5283ab13a356f83f2f4e0506bfda5c6654e8b69 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 7 Aug 2025 14:41:55 -0700 Subject: [PATCH 0197/1093] go.toolchain.rev: bump to 1.24.6 (#16811) Updates https://github.com/tailscale/corp/issues/31103 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.toolchain.rev | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 09dcd575e2e8f..28b2a764fb7b7 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.24.4 +go 1.24.6 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 33aa564236c3e..116d2fa6e8a1f 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -1cd3bf1a6eaf559aa8c00e749289559c884cef09 +cc1987b0b2df322aeb66514b3fbd584ba1201ef6 From 5297dc3baf386084e9b3791415aca12a261d2d69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 8 Aug 2025 16:12:11 -0400 Subject: [PATCH 0198/1093] cmd/tailscale/cli: move systray configuration to tailscale configure (#16817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #1708 Signed-off-by: Claus Lensbøl --- cmd/tailscale/cli/configure.go | 1 + cmd/tailscale/cli/configure_linux-all.go | 8 ++++ cmd/tailscale/cli/configure_linux.go | 51 ++++++++++++++++++++++++ cmd/tailscale/cli/systray.go | 25 +----------- 4 files changed, 62 insertions(+), 23 deletions(-) create mode 100644 cmd/tailscale/cli/configure_linux-all.go create mode 100644 cmd/tailscale/cli/configure_linux.go diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index da6278ce24330..0354a19446a8f 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -32,6 +32,7 @@ services on the host to use Tailscale in more ways. ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), ccall(maybeJetKVMConfigureCmd), + ccall(maybeSystrayCmd), ), } } diff --git a/cmd/tailscale/cli/configure_linux-all.go b/cmd/tailscale/cli/configure_linux-all.go new file mode 100644 index 0000000000000..e645e9654dfe5 --- /dev/null +++ b/cmd/tailscale/cli/configure_linux-all.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import "github.com/peterbourgon/ff/v3/ffcli" + +var maybeSystrayCmd func() *ffcli.Command // non-nil only on Linux, see configure_linux.go diff --git a/cmd/tailscale/cli/configure_linux.go b/cmd/tailscale/cli/configure_linux.go new file mode 100644 index 0000000000000..4bbde872140ca --- /dev/null +++ b/cmd/tailscale/cli/configure_linux.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_systray + +package cli + +import ( + "context" + "flag" + "fmt" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/systray" +) + +func init() { + maybeSystrayCmd = systrayConfigCmd +} + +var systrayArgs struct { + initSystem string + installStartup bool +} + +func systrayConfigCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "systray", + ShortUsage: "tailscale configure systray [options]", + ShortHelp: "[ALPHA] Manage the systray client for Linux", + LongHelp: "[ALPHA] The systray set of commands provides a way to configure the systray application on Linux.", + Exec: configureSystray, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("systray") + fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", + "Install startup script for init system. Currently supported systems are [systemd].") + return fs + })(), + } +} + +func configureSystray(_ context.Context, _ []string) error { + if systrayArgs.initSystem != "" { + if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { + fmt.Printf("%s\n\n", err.Error()) + return flag.ErrHelp + } + return nil + } + return flag.ErrHelp +} diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index c0296ae2644df..827e8a9a40a30 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -7,41 +7,20 @@ package cli import ( "context" - "flag" - "fmt" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/systray" ) -var systrayArgs struct { - initSystem string - installStartup bool -} - var systrayCmd = &ffcli.Command{ Name: "systray", ShortUsage: "tailscale systray", ShortHelp: "Run a systray application to manage Tailscale", - LongHelp: `Run a systray application to manage Tailscale. -To have the application run on startup, use the --enable-startup flag.`, - Exec: runSystray, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("systray") - fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", - "Install startup script for init system. Currently supported systems are [systemd].") - return fs - })(), + LongHelp: "Run a systray application to manage Tailscale.", + Exec: runSystray, } func runSystray(ctx context.Context, _ []string) error { - if systrayArgs.initSystem != "" { - if err := systray.InstallStartupScript(systrayArgs.initSystem); err != nil { - fmt.Printf("%s\n\n", err.Error()) - return flag.ErrHelp - } - return nil - } new(systray.Menu).Run(&localClient) return nil } From 796eb2120449bd84bb50a6b72fc5ae142c1c7c46 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 8 Aug 2025 15:10:06 -0700 Subject: [PATCH 0199/1093] go.toolchain.rev: bump tsgo toolchain Updates tailscale/go#129 Change-Id: I94debd1d0b7080c5b012f200ad98d22c3048f350 Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 116d2fa6e8a1f..fa951ac1b04ea 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -cc1987b0b2df322aeb66514b3fbd584ba1201ef6 +606f294beebf9df5754804710cd5e16d30532692 From 71d51eb8db62e4e5b2a2afbce0262bad0746ef2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 10 Aug 2025 21:56:17 -0600 Subject: [PATCH 0200/1093] .github: bump github/codeql-action from 3.29.3 to 3.29.5 (#16765) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.3 to 3.29.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/d6bbdef45e766d081b84a2def353b0055f728d3e...51f77329afa6477de8c49fc9c7046c15b9a4e79d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e5616d83a4510..90a20e2f03f7d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3 + uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 From d122f0350e8efc4ee80b295829d447ff9d5ddb08 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 09:04:03 -0700 Subject: [PATCH 0201/1093] control/controlknobs,tailcfg,wgengine/magicsock: deprecate NodeAttrDisableMagicSockCryptoRouting (#16818) Peer Relay is dependent on crypto routing, therefore crypto routing is now mandatory. Updates tailscale/corp#20732 Updates tailscale/corp#31083 Signed-off-by: Jordan Whited --- control/controlknobs/controlknobs.go | 6 ------ tailcfg/tailcfg.go | 6 +++++- wgengine/magicsock/magicsock.go | 6 ------ 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index a86f0af53a829..2578744cade65 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -98,10 +98,6 @@ type Knobs struct { // allows us to disable the new behavior remotely if needed. DisableLocalDNSOverrideViaNRPT atomic.Bool - // DisableCryptorouting indicates that the node should not use the - // magicsock crypto routing feature. - DisableCryptorouting atomic.Bool - // DisableCaptivePortalDetection is whether the node should not perform captive portal detection // automatically when the network state changes. DisableCaptivePortalDetection atomic.Bool @@ -137,7 +133,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes) disableSplitDNSWhenNoCustomResolvers = has(tailcfg.NodeAttrDisableSplitDNSWhenNoCustomResolvers) disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT) - disableCryptorouting = has(tailcfg.NodeAttrDisableMagicSockCryptoRouting) disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue) ) @@ -165,7 +160,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.UserDialUseRoutes.Store(userDialUseRoutes) k.DisableSplitDNSWhenNoCustomResolvers.Store(disableSplitDNSWhenNoCustomResolvers) k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) - k.DisableCryptorouting.Store(disableCryptorouting) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 5e3c4e5720a92..9f4734f1fc7d0 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -168,7 +168,8 @@ type CapabilityVersion int // - 121: 2025-07-19: Client understands peer relay endpoint alloc with [disco.AllocateUDPRelayEndpointRequest] & [disco.AllocateUDPRelayEndpointResponse] // - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) -const CurrentCapabilityVersion CapabilityVersion = 123 +// - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory +const CurrentCapabilityVersion CapabilityVersion = 124 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2590,6 +2591,9 @@ const ( // NodeAttrDisableMagicSockCryptoRouting disables the use of the // magicsock cryptorouting hook. See tailscale/corp#20732. + // + // Deprecated: NodeAttrDisableMagicSockCryptoRouting is deprecated as of + // CapabilityVersion 124, CryptoRouting is now mandatory. See tailscale/corp#31083. NodeAttrDisableMagicSockCryptoRouting NodeCapability = "disable-magicsock-crypto-routing" // NodeAttrDisableCaptivePortalDetection instructs the client to not perform captive portal detection diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a4ba090ef8cc4..8dce6be36d815 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1842,12 +1842,6 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach de, ok := c.peerMap.endpointForEpAddr(src) c.mu.Unlock() if !ok { - if c.controlKnobs != nil && c.controlKnobs.DisableCryptorouting.Load() { - // Note: UDP relay is dependent on cryptorouting enablement. We - // only update Geneve-encapsulated [epAddr]s in the [peerMap] - // via [lazyEndpoint]. - return nil, 0, false, false - } // TODO(jwhited): reuse [lazyEndpoint] across calls to receiveIP() // for the same batch & [epAddr] src. return &lazyEndpoint{c: c, src: src}, size, isGeneveEncap, true From 03c4b2a0d0d23a183b216f065092e691e1844d4b Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 11 Aug 2025 12:57:15 -0400 Subject: [PATCH 0202/1093] derp/derphttp: test improvements (#16723) Update some logging to help future failures. Improve test shutdown concurrency issues. Fixes #16722 Signed-off-by: Mike O'Driscoll --- derp/derphttp/derphttp_test.go | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index bb33e60232357..6e8e0bd21c9e9 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -286,7 +286,6 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer func() { retryInterval = origRetryInterval }() var wg sync.WaitGroup - defer wg.Wait() // Make the watcher server serverPrivateKey1 := key.NewNode() _, s1 := newTestServer(t, serverPrivateKey1) @@ -298,14 +297,15 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer s2.Close() // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() + watcher := newWatcherClient(t, serverPrivateKey1, serverURL2) + defer watcher.Close() ctx, cancel := context.WithCancel(context.Background()) - defer cancel() watcherChan := make(chan int, 1) + defer close(watcherChan) errChan := make(chan error, 1) + defer close(errChan) // Start the watcher thread (which connects to the watched server) wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 @@ -323,7 +323,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { errChan <- err } - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) + watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) }() timer := time.NewTimer(5 * time.Second) @@ -335,7 +335,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { select { case peers := <-watcherChan: if peers != 1 { - t.Fatal("wrong number of peers added during watcher connection") + t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) } case err := <-errChan: if !strings.Contains(err.Error(), "use of closed network connection") { @@ -344,12 +344,13 @@ func TestBreakWatcherConnRecv(t *testing.T) { case <-timer.C: t.Fatalf("watcher did not process the peer update") } - watcher1.breakConnection(watcher1.client) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - timer.Reset(5 * time.Second) + watcher.breakConnection(watcher.client) + // re-establish connection by sending a packet + watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } + cancel() // Cancel the context to stop the watcher loop. + wg.Wait() } // Test that a watcher connection successfully reconnects and processes peer @@ -364,7 +365,6 @@ func TestBreakWatcherConn(t *testing.T) { defer func() { retryInterval = origRetryInterval }() var wg sync.WaitGroup - defer wg.Wait() // Make the watcher server serverPrivateKey1 := key.NewNode() _, s1 := newTestServer(t, serverPrivateKey1) @@ -380,7 +380,6 @@ func TestBreakWatcherConn(t *testing.T) { defer watcher1.Close() ctx, cancel := context.WithCancel(context.Background()) - defer cancel() watcherChan := make(chan int, 1) breakerChan := make(chan bool, 1) @@ -396,8 +395,12 @@ func TestBreakWatcherConn(t *testing.T) { peers++ // Signal that the watcher has run watcherChan <- peers + select { + case <-ctx.Done(): + return // Wait for breaker to run - <-breakerChan + case <-breakerChan: + } } remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } notifyError := func(err error) { @@ -416,7 +419,7 @@ func TestBreakWatcherConn(t *testing.T) { select { case peers := <-watcherChan: if peers != 1 { - t.Fatal("wrong number of peers added during watcher connection") + t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) } case err := <-errorChan: if !strings.Contains(err.Error(), "use of closed network connection") { @@ -433,6 +436,9 @@ func TestBreakWatcherConn(t *testing.T) { timer.Reset(5 * time.Second) } + watcher1.Close() + cancel() + wg.Wait() } func noopAdd(derp.PeerPresentMessage) {} From 36397f17946e0b20e1c1a79370666a44bcc5c634 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 13:29:57 -0700 Subject: [PATCH 0203/1093] wgengine/magicsock: add clientmetrics for TX direction Peer Relay disco messages (#16831) Updates tailscale/corp#30527 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 39 +++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8dce6be36d815..9dc201cdcd10a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2025,6 +2025,16 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. metricSentDiscoPong.Add(1) case *disco.CallMeMaybe: metricSentDiscoCallMeMaybe.Add(1) + case *disco.CallMeMaybeVia: + metricSentDiscoCallMeMaybeVia.Add(1) + case *disco.BindUDPRelayEndpoint: + metricSentDiscoBindUDPRelayEndpoint.Add(1) + case *disco.BindUDPRelayEndpointAnswer: + metricSentDiscoBindUDPRelayEndpointAnswer.Add(1) + case *disco.AllocateUDPRelayEndpointRequest: + metricSentDiscoAllocUDPRelayEndpointRequest.Add(1) + case *disco.AllocateUDPRelayEndpointResponse: + metricSentDiscoAllocUDPRelayEndpointResponse.Add(1) } } else if err == nil { // Can't send. (e.g. no IPv6 locally) @@ -3967,18 +3977,23 @@ var ( metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") // Disco packets - metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") - metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") - metricSentDiscoUDP = clientmetric.NewCounter("magicsock_disco_sent_udp") - metricSentDiscoDERP = clientmetric.NewCounter("magicsock_disco_sent_derp") - metricSentDiscoPing = clientmetric.NewCounter("magicsock_disco_sent_ping") - metricSentDiscoPong = clientmetric.NewCounter("magicsock_disco_sent_pong") - metricSentDiscoPeerMTUProbes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probes") - metricSentDiscoPeerMTUProbeBytes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probe_bytes") - metricSentDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_sent_callmemaybe") - metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") - metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") - metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") + metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") + metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") + metricSentDiscoUDP = clientmetric.NewCounter("magicsock_disco_sent_udp") + metricSentDiscoDERP = clientmetric.NewCounter("magicsock_disco_sent_derp") + metricSentDiscoPing = clientmetric.NewCounter("magicsock_disco_sent_ping") + metricSentDiscoPong = clientmetric.NewCounter("magicsock_disco_sent_pong") + metricSentDiscoPeerMTUProbes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probes") + metricSentDiscoPeerMTUProbeBytes = clientmetric.NewCounter("magicsock_disco_sent_peer_mtu_probe_bytes") + metricSentDiscoCallMeMaybe = clientmetric.NewCounter("magicsock_disco_sent_callmemaybe") + metricSentDiscoCallMeMaybeVia = clientmetric.NewCounter("magicsock_disco_sent_callmemaybevia") + metricSentDiscoBindUDPRelayEndpoint = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint") + metricSentDiscoBindUDPRelayEndpointAnswer = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint_answer") + metricSentDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_request") + metricSentDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_response") + metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") + metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") + metricRecvDiscoBadParse = clientmetric.NewCounter("magicsock_disco_recv_bad_parse") metricRecvDiscoUDP = clientmetric.NewCounter("magicsock_disco_recv_udp") metricRecvDiscoDERP = clientmetric.NewCounter("magicsock_disco_recv_derp") From 4fa27db8dd3ed05c0f3c704d2d97b449236b90d8 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 14:48:19 -0700 Subject: [PATCH 0204/1093] wgengine/magicsock: add clientmetrics for locally delivered Peer Relay alloc disco (#16833) Expected when Peer Relay'ing via self. These disco messages never get sealed, and never leave the process. Updates tailscale/corp#30527 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 9dc201cdcd10a..a7b6d1178e6ea 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -681,6 +681,7 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && allocResp.ReqRxFromDiscoKey.Compare(c.discoPublic) == 0 { c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) + metricLocalDiscoAllocUDPRelayEndpointResponse.Add(1) } return } @@ -1926,6 +1927,7 @@ func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.N RxFromDiscoKey: c.discoPublic, Message: allocReq, }) + metricLocalDiscoAllocUDPRelayEndpointRequest.Add(1) return true, nil } return c.sendDiscoMessage(dst, dstKey, dstDisco, allocReq, logLevel) @@ -3990,6 +3992,7 @@ var ( metricSentDiscoBindUDPRelayEndpoint = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint") metricSentDiscoBindUDPRelayEndpointAnswer = clientmetric.NewCounter("magicsock_disco_sent_bind_udp_relay_endpoint_answer") metricSentDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_request") + metricLocalDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_local_alloc_udp_relay_endpoint_request") metricSentDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_sent_alloc_udp_relay_endpoint_response") metricRecvDiscoBadPeer = clientmetric.NewCounter("magicsock_disco_recv_bad_peer") metricRecvDiscoBadKey = clientmetric.NewCounter("magicsock_disco_recv_bad_key") @@ -4009,6 +4012,7 @@ var ( metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") metricRecvDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response") + metricLocalDiscoAllocUDPRelayEndpointResponse = clientmetric.NewCounter("magicsock_disco_local_alloc_udp_relay_endpoint_response") metricRecvDiscoDERPPeerNotHere = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_not_here") metricRecvDiscoDERPPeerGoneUnknown = clientmetric.NewCounter("magicsock_disco_recv_derp_peer_gone_unknown") // metricDERPHomeChange is how many times our DERP home region DI has From cde65dba16a24d799fafc804595b209b17481ebb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 11 Aug 2025 14:53:25 -0700 Subject: [PATCH 0205/1093] wgengine/magicsock: add clientmetric for Peer Relay challenge reception (#16834) Updates tailscale/corp#30527 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a7b6d1178e6ea..0fac793ef446f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2273,6 +2273,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } c.relayManager.handleRxDiscoMsg(c, challenge, key.NodePublic{}, di.discoKey, src) + metricRecvDiscoBindUDPRelayEndpointChallenge.Add(1) return } @@ -4008,6 +4009,7 @@ var ( metricRecvDiscoCallMeMaybeViaBadNode = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_node") metricRecvDiscoCallMeMaybeBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybe_bad_disco") metricRecvDiscoCallMeMaybeViaBadDisco = clientmetric.NewCounter("magicsock_disco_recv_callmemaybevia_bad_disco") + metricRecvDiscoBindUDPRelayEndpointChallenge = clientmetric.NewCounter("magicsock_disco_recv_bind_udp_relay_endpoint_challenge") metricRecvDiscoAllocUDPRelayEndpointRequest = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request") metricRecvDiscoAllocUDPRelayEndpointRequestBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_request_bad_disco") metricRecvDiscoAllocUDPRelayEndpointResponseBadDisco = clientmetric.NewCounter("magicsock_disco_recv_alloc_udp_relay_endpoint_response_bad_disco") From ee0c7b05a5b6deabd9492276db608952cff11b57 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 12 Aug 2025 10:19:33 -0700 Subject: [PATCH 0206/1093] cmd/tailscale: fix a panic in netcheck portmapper construction (#16843) This affects the 1.87.33 unstable release. Updates #16842 Updates #15160 Change-Id: Ie6d1b2c094d1a6059fbd1023760567900f06e0ad Signed-off-by: M. J. Fromberger --- cmd/tailscale/cli/netcheck.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 3cf05a3b7987f..0bdab59cb8beb 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -59,8 +59,9 @@ func runNetcheck(ctx context.Context, args []string) error { // Ensure that we close the portmapper after running a netcheck; this // will release any port mappings created. pm := portmapper.NewClient(portmapper.Config{ - Logf: logf, - NetMon: netMon, + Logf: logf, + NetMon: netMon, + EventBus: bus, }) defer pm.Close() From d07166b87daeeee314115c4f05610c87a00827cb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 20:36:47 -0600 Subject: [PATCH 0207/1093] .github: Bump actions/cache from 4.2.3 to 4.2.4 (#16829) Bumps [actions/cache](https://github.com/actions/cache) from 4.2.3 to 4.2.4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/5a3ec84eff668545956fd18022155c47e93e2684...0400d5f644dc74513175e3cd8d07132dd4860809) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 4.2.4 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 46 +++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c2f539662cd37..7ccb3986968fc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -57,7 +57,7 @@ jobs: # See if the cache entry already exists to avoid downloading it # and doing the cache write again. - id: check-cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -69,7 +69,7 @@ jobs: run: go mod download - name: Cache Go modules if: steps.check-cache.outputs.cache-hit != 'true' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -92,7 +92,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -130,13 +130,13 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -241,7 +241,7 @@ jobs: - name: Restore Go module cache if: matrix.key != 'win-tool-go' - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -249,7 +249,7 @@ jobs: - name: Restore Cache if: matrix.key != 'win-tool-go' - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: | ~/.cache/go-build @@ -298,7 +298,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -321,7 +321,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -377,7 +377,7 @@ jobs: with: path: src - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -393,7 +393,7 @@ jobs: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -429,7 +429,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -467,7 +467,7 @@ jobs: with: path: src - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -483,7 +483,7 @@ jobs: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -518,7 +518,7 @@ jobs: # some Android breakages early. # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -539,7 +539,7 @@ jobs: with: path: src - name: Restore Cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -555,7 +555,7 @@ jobs: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} ${{ github.job }}-${{ runner.os }}-go-2- - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -588,7 +588,7 @@ jobs: - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -671,7 +671,7 @@ jobs: - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -689,7 +689,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -712,7 +712,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -734,7 +734,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -788,7 +788,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} From f22c7657e54cf4b3a10a2bc635f6a68f89123bfb Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 13 Aug 2025 11:19:29 -0700 Subject: [PATCH 0208/1093] cmd/tailscale: add --json-docs flag (#16851) This prints all command and flag docs as JSON. To be used for generating the contents of https://tailscale.com/kb/1080/cli. Updates https://github.com/tailscale/tailscale-www/issues/4722 Signed-off-by: Andrew Lytvynov --- cmd/tailscale/cli/cli.go | 59 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 72924350ca7eb..208ee93fd9388 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -7,6 +7,7 @@ package cli import ( "context" + "encoding/json" "errors" "flag" "fmt" @@ -217,8 +218,10 @@ func newRootCmd() *ffcli.Command { return nil }) rootfs.Lookup("socket").DefValue = localClient.Socket + jsonDocs := rootfs.Bool("json-docs", false, hidden+"print JSON-encoded docs for all subcommands and flags") - rootCmd := &ffcli.Command{ + var rootCmd *ffcli.Command + rootCmd = &ffcli.Command{ Name: "tailscale", ShortUsage: "tailscale [flags] [command flags]", ShortHelp: "The easiest, most secure way to use WireGuard.", @@ -265,6 +268,9 @@ change in the future. ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { + if *jsonDocs { + return printJSONDocs(rootCmd) + } if len(args) > 0 { return fmt.Errorf("tailscale: unknown subcommand: %s", args[0]) } @@ -472,3 +478,54 @@ func colorableOutput() (w io.Writer, ok bool) { } return colorable.NewColorableStdout(), true } + +type commandDoc struct { + Name string + Desc string + Subcommands []commandDoc `json:",omitempty"` + Flags []flagDoc `json:",omitempty"` +} + +type flagDoc struct { + Name string + Desc string +} + +func printJSONDocs(root *ffcli.Command) error { + docs := jsonDocsWalk(root) + return json.NewEncoder(os.Stdout).Encode(docs) +} + +func jsonDocsWalk(cmd *ffcli.Command) *commandDoc { + res := &commandDoc{ + Name: cmd.Name, + } + if cmd.LongHelp != "" { + res.Desc = cmd.LongHelp + } else if cmd.ShortHelp != "" { + res.Desc = cmd.ShortHelp + } else { + res.Desc = cmd.ShortUsage + } + if strings.HasPrefix(res.Desc, hidden) { + return nil + } + if cmd.FlagSet != nil { + cmd.FlagSet.VisitAll(func(f *flag.Flag) { + if strings.HasPrefix(f.Usage, hidden) { + return + } + res.Flags = append(res.Flags, flagDoc{ + Name: f.Name, + Desc: f.Usage, + }) + }) + } + for _, sub := range cmd.Subcommands { + subj := jsonDocsWalk(sub) + if subj != nil { + res.Subcommands = append(res.Subcommands, *subj) + } + } + return res +} From 16bc0a5558aac7617cb94b497db71242a2452db3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 13 Aug 2025 13:13:11 -0700 Subject: [PATCH 0209/1093] net/{batching,packet},wgengine/magicsock: export batchingConn (#16848) For eventual use by net/udprelay.Server. Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + net/batching/conn.go | 48 ++++++++++++ net/batching/conn_default.go | 21 +++++ .../batching/conn_linux.go | 77 +++++++++++-------- .../batching/conn_linux_test.go | 32 +++++--- net/packet/geneve.go | 50 +++++++++--- net/packet/geneve_test.go | 56 +++++++++++++- net/udprelay/server.go | 5 +- net/udprelay/server_test.go | 10 ++- tsnet/depaware.txt | 1 + wgengine/magicsock/batching_conn.go | 23 ------ wgengine/magicsock/batching_conn_default.go | 14 ---- wgengine/magicsock/debughttp.go | 4 +- wgengine/magicsock/endpoint.go | 36 ++++----- wgengine/magicsock/endpoint_test.go | 13 ++-- wgengine/magicsock/magicsock.go | 56 ++++---------- wgengine/magicsock/magicsock_default.go | 4 - wgengine/magicsock/magicsock_linux.go | 8 -- wgengine/magicsock/magicsock_test.go | 71 +++-------------- wgengine/magicsock/peermap.go | 4 +- wgengine/magicsock/peermap_test.go | 5 +- wgengine/magicsock/rebinding_conn.go | 40 +++++----- wgengine/magicsock/relaymanager.go | 15 ++-- 25 files changed, 328 insertions(+), 268 deletions(-) create mode 100644 net/batching/conn.go create mode 100644 net/batching/conn_default.go rename wgengine/magicsock/batching_conn_linux.go => net/batching/conn_linux.go (88%) rename wgengine/magicsock/batching_conn_linux_test.go => net/batching/conn_linux_test.go (89%) delete mode 100644 wgengine/magicsock/batching_conn.go delete mode 100644 wgengine/magicsock/batching_conn_default.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2dbf49d07c53b..1ecef4953ea9f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -838,6 +838,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7c4885a4be4c4..07f5958ca37f6 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -311,6 +311,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b28460352bb9f..5e558a0cd3fc9 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -268,6 +268,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/net/batching/conn.go b/net/batching/conn.go new file mode 100644 index 0000000000000..2c6100258cb04 --- /dev/null +++ b/net/batching/conn.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package batching implements a socket optimized for increased throughput. +package batching + +import ( + "net/netip" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "tailscale.com/net/packet" + "tailscale.com/types/nettype" +) + +var ( + // This acts as a compile-time check for our usage of ipv6.Message in + // [Conn] for both IPv6 and IPv4 operations. + _ ipv6.Message = ipv4.Message{} +) + +// Conn is a nettype.PacketConn that provides batched i/o using +// platform-specific optimizations, e.g. {recv,send}mmsg & UDP GSO/GRO. +// +// Conn originated from (and is still used by) magicsock where its API was +// strongly influenced by [wireguard-go/conn.Bind] constraints, namely +// wireguard-go's ownership of packet memory. +type Conn interface { + nettype.PacketConn + // ReadBatch reads messages from [Conn] into msgs. It returns the number of + // messages the caller should evaluate for nonzero len, as a zero len + // message may fall on either side of a nonzero. + // + // Each [ipv6.Message.OOB] must be sized to at least MinControlMessageSize(). + // len(msgs) must be at least MinReadBatchMsgsLen(). + ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) + // WriteBatchTo writes buffs to addr. + // + // If geneve.VNI.IsSet(), then geneve is encoded into the space preceding + // offset, and offset must equal [packet.GeneveFixedHeaderLength]. If + // !geneve.VNI.IsSet() then the space preceding offset is ignored. + // + // len(buffs) must be <= batchSize supplied in TryUpgradeToConn(). + // + // WriteBatchTo may return a [neterror.ErrUDPGSODisabled] error if UDP GSO + // was disabled as a result of a send error. + WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error +} diff --git a/net/batching/conn_default.go b/net/batching/conn_default.go new file mode 100644 index 0000000000000..ed5c494f3fb3a --- /dev/null +++ b/net/batching/conn_default.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package batching + +import ( + "tailscale.com/types/nettype" +) + +// TryUpgradeToConn is no-op on all platforms except linux. +func TryUpgradeToConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { + return pconn +} + +var controlMessageSize = 0 + +func MinControlMessageSize() int { + return controlMessageSize +} diff --git a/wgengine/magicsock/batching_conn_linux.go b/net/batching/conn_linux.go similarity index 88% rename from wgengine/magicsock/batching_conn_linux.go rename to net/batching/conn_linux.go index a0607c624445c..0416c2729977b 100644 --- a/wgengine/magicsock/batching_conn_linux.go +++ b/net/batching/conn_linux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package batching import ( "encoding/binary" @@ -43,10 +43,15 @@ type xnetBatchWriter interface { WriteBatch([]ipv6.Message, int) (int, error) } +var ( + // [linuxBatchingConn] implements [Conn]. + _ Conn = &linuxBatchingConn{} +) + // linuxBatchingConn is a UDP socket that provides batched i/o. It implements -// batchingConn. +// [Conn]. type linuxBatchingConn struct { - pc nettype.PacketConn + pc *net.UDPConn xpc xnetBatchReaderWriter rxOffload bool // supports UDP GRO or similar txOffload atomic.Bool // supports UDP GSO or similar @@ -98,9 +103,8 @@ const ( // // All msgs have their Addr field set to addr. // -// All msgs[i].Buffers[0] are preceded by a Geneve header with vni.get() if -// vni.isSet(). -func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetworkID, buffs [][]byte, msgs []ipv6.Message, offset int) int { +// All msgs[i].Buffers[0] are preceded by a Geneve header (geneve) if geneve.VNI.IsSet(). +func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, geneve packet.GeneveHeader, buffs [][]byte, msgs []ipv6.Message, offset int) int { var ( base = -1 // index of msg we are currently coalescing into gsoSize int // segmentation size of msgs[base] @@ -111,15 +115,10 @@ func (c *linuxBatchingConn) coalesceMessages(addr *net.UDPAddr, vni virtualNetwo if addr.IP.To4() == nil { maxPayloadLen = maxIPv6PayloadLen } - vniIsSet := vni.isSet() - var gh packet.GeneveHeader - if vniIsSet { - gh.Protocol = packet.GeneveProtocolWireGuard - gh.VNI = vni.get() - } + vniIsSet := geneve.VNI.IsSet() for i, buff := range buffs { if vniIsSet { - gh.Encode(buffs[i]) + geneve.Encode(buff) } else { buff = buff[offset:] } @@ -179,37 +178,34 @@ func (c *linuxBatchingConn) putSendBatch(batch *sendBatch) { c.sendBatchPool.Put(batch) } -func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { +func (c *linuxBatchingConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error { batch := c.getSendBatch() defer c.putSendBatch(batch) - if addr.ap.Addr().Is6() { - as16 := addr.ap.Addr().As16() + if addr.Addr().Is6() { + as16 := addr.Addr().As16() copy(batch.ua.IP, as16[:]) batch.ua.IP = batch.ua.IP[:16] } else { - as4 := addr.ap.Addr().As4() + as4 := addr.Addr().As4() copy(batch.ua.IP, as4[:]) batch.ua.IP = batch.ua.IP[:4] } - batch.ua.Port = int(addr.ap.Port()) + batch.ua.Port = int(addr.Port()) var ( n int retried bool ) retry: if c.txOffload.Load() { - n = c.coalesceMessages(batch.ua, addr.vni, buffs, batch.msgs, offset) + n = c.coalesceMessages(batch.ua, geneve, buffs, batch.msgs, offset) } else { - vniIsSet := addr.vni.isSet() - var gh packet.GeneveHeader + vniIsSet := geneve.VNI.IsSet() if vniIsSet { - gh.Protocol = packet.GeneveProtocolWireGuard - gh.VNI = addr.vni.get() offset -= packet.GeneveFixedHeaderLength } for i := range buffs { if vniIsSet { - gh.Encode(buffs[i]) + geneve.Encode(buffs[i]) } batch.msgs[i].Buffers[0] = buffs[i][offset:] batch.msgs[i].Addr = batch.ua @@ -231,11 +227,7 @@ retry: } func (c *linuxBatchingConn) SyscallConn() (syscall.RawConn, error) { - sc, ok := c.pc.(syscall.Conn) - if !ok { - return nil, errUnsupportedConnType - } - return sc.SyscallConn() + return c.pc.SyscallConn() } func (c *linuxBatchingConn) writeBatch(msgs []ipv6.Message) error { @@ -391,9 +383,10 @@ func setGSOSizeInControl(control *[]byte, gsoSize uint16) { *control = (*control)[:unix.CmsgSpace(2)] } -// tryUpgradeToBatchingConn probes the capabilities of the OS and pconn, and -// upgrades pconn to a *linuxBatchingConn if appropriate. -func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { +// TryUpgradeToConn probes the capabilities of the OS and pconn, and upgrades +// pconn to a [Conn] if appropriate. A batch size of MinReadBatchMsgsLen() is +// suggested for the best performance. +func TryUpgradeToConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { if runtime.GOOS != "linux" { // Exclude Android. return pconn @@ -415,7 +408,7 @@ func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSiz return pconn } b := &linuxBatchingConn{ - pc: pconn, + pc: uc, getGSOSizeFromControl: getGSOSizeFromControl, setGSOSizeInControl: setGSOSizeInControl, sendBatchPool: sync.Pool{ @@ -449,3 +442,21 @@ func tryUpgradeToBatchingConn(pconn nettype.PacketConn, network string, batchSiz b.txOffload.Store(txOffload) return b } + +var controlMessageSize = -1 // bomb if used for allocation before init + +func init() { + // controlMessageSize is set to hold a UDP_GRO or UDP_SEGMENT control + // message. These contain a single uint16 of data. + controlMessageSize = unix.CmsgSpace(2) +} + +// MinControlMessageSize returns the minimum control message size required to +// support read batching via [Conn.ReadBatch]. +func MinControlMessageSize() int { + return controlMessageSize +} + +func MinReadBatchMsgsLen() int { + return 128 +} diff --git a/wgengine/magicsock/batching_conn_linux_test.go b/net/batching/conn_linux_test.go similarity index 89% rename from wgengine/magicsock/batching_conn_linux_test.go rename to net/batching/conn_linux_test.go index 7e0ab8fc485eb..e33ad6d7aad75 100644 --- a/wgengine/magicsock/batching_conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package batching import ( "encoding/binary" "net" "testing" + "github.com/tailscale/wireguard-go/conn" "golang.org/x/net/ipv6" "tailscale.com/net/packet" ) @@ -159,13 +160,15 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { return make([]byte, len+packet.GeneveFixedHeaderLength, cap+packet.GeneveFixedHeaderLength) } - vni1 := virtualNetworkID{} - vni1.set(1) + geneve := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + } + geneve.VNI.Set(1) cases := []struct { name string buffs [][]byte - vni virtualNetworkID + geneve packet.GeneveHeader wantLens []int wantGSO []int }{ @@ -182,7 +185,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { buffs: [][]byte{ withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{1 + packet.GeneveFixedHeaderLength}, wantGSO: []int{0}, }, @@ -201,7 +204,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(1, 2+packet.GeneveFixedHeaderLength), withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{2 + (2 * packet.GeneveFixedHeaderLength)}, wantGSO: []int{1 + packet.GeneveFixedHeaderLength}, }, @@ -220,7 +223,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(2, 3+packet.GeneveFixedHeaderLength), withGeneveSpace(1, 1), }, - vni: vni1, + geneve: geneve, wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength)}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength}, }, @@ -241,7 +244,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(1, 1), withGeneveSpace(2, 2), }, - vni: vni1, + geneve: geneve, wantLens: []int{3 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, }, @@ -262,7 +265,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { withGeneveSpace(2, 2), withGeneveSpace(2, 2), }, - vni: vni1, + geneve: geneve, wantLens: []int{4 + (2 * packet.GeneveFixedHeaderLength), 2 + packet.GeneveFixedHeaderLength}, wantGSO: []int{2 + packet.GeneveFixedHeaderLength, 0}, }, @@ -279,7 +282,7 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { msgs[i].Buffers = make([][]byte, 1) msgs[i].OOB = make([]byte, 0, 2) } - got := c.coalesceMessages(addr, tt.vni, tt.buffs, msgs, packet.GeneveFixedHeaderLength) + got := c.coalesceMessages(addr, tt.geneve, tt.buffs, msgs, packet.GeneveFixedHeaderLength) if got != len(tt.wantLens) { t.Fatalf("got len %d want: %d", got, len(tt.wantLens)) } @@ -302,3 +305,12 @@ func Test_linuxBatchingConn_coalesceMessages(t *testing.T) { }) } } + +func TestMinReadBatchMsgsLen(t *testing.T) { + // So long as magicsock uses [Conn], and [wireguard-go/conn.Bind] API is + // shaped for wireguard-go to control packet memory, these values should be + // aligned. + if MinReadBatchMsgsLen() != conn.IdealBatchSize { + t.Fatalf("MinReadBatchMsgsLen():%d != conn.IdealBatchSize(): %d", MinReadBatchMsgsLen(), conn.IdealBatchSize) + } +} diff --git a/net/packet/geneve.go b/net/packet/geneve.go index 29970a8fd6bfb..71b365ae89414 100644 --- a/net/packet/geneve.go +++ b/net/packet/geneve.go @@ -24,6 +24,33 @@ const ( GeneveProtocolWireGuard uint16 = 0x7A12 ) +// VirtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network +// identifier. Its methods are NOT thread-safe. +type VirtualNetworkID struct { + _vni uint32 +} + +const ( + vniSetMask uint32 = 0xFF000000 + vniGetMask uint32 = ^vniSetMask +) + +// IsSet returns true if Set() had been called previously, otherwise false. +func (v *VirtualNetworkID) IsSet() bool { + return v._vni&vniSetMask != 0 +} + +// Set sets the provided VNI. If VNI exceeds the 3-byte storage it will be +// clamped. +func (v *VirtualNetworkID) Set(vni uint32) { + v._vni = vni | vniSetMask +} + +// Get returns the VNI value. +func (v *VirtualNetworkID) Get() uint32 { + return v._vni & vniGetMask +} + // GeneveHeader represents the fixed size Geneve header from RFC8926. // TLVs/options are not implemented/supported. // @@ -51,7 +78,7 @@ type GeneveHeader struct { // decisions or MAY be used as a mechanism to distinguish between // overlapping address spaces contained in the encapsulated packet when load // balancing across CPUs. - VNI uint32 + VNI VirtualNetworkID // O (1 bit): Control packet. This packet contains a control message. // Control messages are sent between tunnel endpoints. Tunnel endpoints MUST @@ -65,12 +92,18 @@ type GeneveHeader struct { Control bool } -// Encode encodes GeneveHeader into b. If len(b) < GeneveFixedHeaderLength an -// io.ErrShortBuffer error is returned. +var ErrGeneveVNIUnset = errors.New("VNI is unset") + +// Encode encodes GeneveHeader into b. If len(b) < [GeneveFixedHeaderLength] an +// [io.ErrShortBuffer] error is returned. If !h.VNI.IsSet() then an +// [ErrGeneveVNIUnset] error is returned. func (h *GeneveHeader) Encode(b []byte) error { if len(b) < GeneveFixedHeaderLength { return io.ErrShortBuffer } + if !h.VNI.IsSet() { + return ErrGeneveVNIUnset + } if h.Version > 3 { return errors.New("version must be <= 3") } @@ -81,15 +114,12 @@ func (h *GeneveHeader) Encode(b []byte) error { b[1] |= 0x80 } binary.BigEndian.PutUint16(b[2:], h.Protocol) - if h.VNI > 1<<24-1 { - return errors.New("VNI must be <= 2^24-1") - } - binary.BigEndian.PutUint32(b[4:], h.VNI<<8) + binary.BigEndian.PutUint32(b[4:], h.VNI.Get()<<8) return nil } -// Decode decodes GeneveHeader from b. If len(b) < GeneveFixedHeaderLength an -// io.ErrShortBuffer error is returned. +// Decode decodes GeneveHeader from b. If len(b) < [GeneveFixedHeaderLength] an +// [io.ErrShortBuffer] error is returned. func (h *GeneveHeader) Decode(b []byte) error { if len(b) < GeneveFixedHeaderLength { return io.ErrShortBuffer @@ -99,6 +129,6 @@ func (h *GeneveHeader) Decode(b []byte) error { h.Control = true } h.Protocol = binary.BigEndian.Uint16(b[2:]) - h.VNI = binary.BigEndian.Uint32(b[4:]) >> 8 + h.VNI.Set(binary.BigEndian.Uint32(b[4:]) >> 8) return nil } diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go index 029638638aa96..be9784998adf2 100644 --- a/net/packet/geneve_test.go +++ b/net/packet/geneve_test.go @@ -4,18 +4,21 @@ package packet import ( + "math" "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/types/ptr" ) func TestGeneveHeader(t *testing.T) { in := GeneveHeader{ Version: 3, Protocol: GeneveProtocolDisco, - VNI: 1<<24 - 1, Control: true, } + in.VNI.Set(1<<24 - 1) b := make([]byte, GeneveFixedHeaderLength) err := in.Encode(b) if err != nil { @@ -26,7 +29,56 @@ func TestGeneveHeader(t *testing.T) { if err != nil { t.Fatal(err) } - if diff := cmp.Diff(out, in); diff != "" { + if diff := cmp.Diff(out, in, cmpopts.EquateComparable(VirtualNetworkID{})); diff != "" { t.Fatalf("wrong results (-got +want)\n%s", diff) } } + +func TestVirtualNetworkID(t *testing.T) { + tests := []struct { + name string + set *uint32 + want uint32 + }{ + { + "don't Set", + nil, + 0, + }, + { + "Set 0", + ptr.To(uint32(0)), + 0, + }, + { + "Set 1", + ptr.To(uint32(1)), + 1, + }, + { + "Set math.MaxUint32", + ptr.To(uint32(math.MaxUint32)), + 1<<24 - 1, + }, + { + "Set max 3-byte value", + ptr.To(uint32(1<<24 - 1)), + 1<<24 - 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := VirtualNetworkID{} + if tt.set != nil { + v.Set(*tt.set) + } + if v.IsSet() != (tt.set != nil) { + t.Fatalf("IsSet: %v != wantIsSet: %v", v.IsSet(), tt.set != nil) + } + if v.Get() != tt.want { + t.Fatalf("Get(): %v != want: %v", v.Get(), tt.want) + } + }) + } +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index aece3bc59b0fe..e138c33f23f32 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -140,7 +140,8 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex rand.Read(e.challenge[senderIndex][:]) copy(m.Challenge[:], e.challenge[senderIndex][:]) reply := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: e.vni, Protocol: packet.GeneveProtocolDisco} + gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} + gh.VNI.Set(e.vni) err = gh.Encode(reply) if err != nil { return @@ -543,7 +544,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSo // it simple (and slow) for now. s.mu.Lock() defer s.mu.Unlock() - e, ok := s.byVNI[gh.VNI] + e, ok := s.byVNI[gh.VNI.Get()] if !ok { // unknown VNI return diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index de1c293644992..8fc4a4f78cb47 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -62,7 +62,8 @@ func (c *testClient) read(t *testing.T) []byte { func (c *testClient) writeDataPkt(t *testing.T, b []byte) { pkt := make([]byte, packet.GeneveFixedHeaderLength, packet.GeneveFixedHeaderLength+len(b)) - gh := packet.GeneveHeader{Control: false, VNI: c.vni, Protocol: packet.GeneveProtocolWireGuard} + gh := packet.GeneveHeader{Control: false, Protocol: packet.GeneveProtocolWireGuard} + gh.VNI.Set(c.vni) err := gh.Encode(pkt) if err != nil { t.Fatal(err) @@ -84,7 +85,7 @@ func (c *testClient) readDataPkt(t *testing.T) []byte { if gh.Control { t.Fatal("unexpected control") } - if gh.VNI != c.vni { + if gh.VNI.Get() != c.vni { t.Fatal("unexpected vni") } return b[packet.GeneveFixedHeaderLength:] @@ -92,7 +93,8 @@ func (c *testClient) readDataPkt(t *testing.T) []byte { func (c *testClient) writeControlDiscoMsg(t *testing.T, msg disco.Message) { pkt := make([]byte, packet.GeneveFixedHeaderLength, 512) - gh := packet.GeneveHeader{Control: true, VNI: c.vni, Protocol: packet.GeneveProtocolDisco} + gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} + gh.VNI.Set(c.vni) err := gh.Encode(pkt) if err != nil { t.Fatal(err) @@ -117,7 +119,7 @@ func (c *testClient) readControlDiscoMsg(t *testing.T) disco.Message { if !gh.Control { t.Fatal("unexpected non-control") } - if gh.VNI != c.vni { + if gh.VNI.Get() != c.vni { t.Fatal("unexpected vni") } b = b[packet.GeneveFixedHeaderLength:] diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index da3175b8c42d2..9ad340c908876 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -264,6 +264,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ diff --git a/wgengine/magicsock/batching_conn.go b/wgengine/magicsock/batching_conn.go deleted file mode 100644 index b769907dbe88f..0000000000000 --- a/wgengine/magicsock/batching_conn.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package magicsock - -import ( - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" - "tailscale.com/types/nettype" -) - -var ( - // This acts as a compile-time check for our usage of ipv6.Message in - // batchingConn for both IPv6 and IPv4 operations. - _ ipv6.Message = ipv4.Message{} -) - -// batchingConn is a nettype.PacketConn that provides batched i/o. -type batchingConn interface { - nettype.PacketConn - ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) - WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error -} diff --git a/wgengine/magicsock/batching_conn_default.go b/wgengine/magicsock/batching_conn_default.go deleted file mode 100644 index 519cf8082d5ac..0000000000000 --- a/wgengine/magicsock/batching_conn_default.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux - -package magicsock - -import ( - "tailscale.com/types/nettype" -) - -func tryUpgradeToBatchingConn(pconn nettype.PacketConn, _ string, _ int) nettype.PacketConn { - return pconn -} diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index cfdf8c1e12d78..a0159d21e592f 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -152,7 +152,7 @@ func printEndpointHTML(w io.Writer, ep *endpoint) { io.WriteString(w, "

Endpoints:

    ") for _, ipp := range eps { s := ep.endpointState[ipp] - if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.isSet() { + if ipp == ep.bestAddr.ap && !ep.bestAddr.vni.IsSet() { fmt.Fprintf(w, "
  • %s: (best)
      ", ipp) } else { fmt.Fprintf(w, "
    • %s: ...
        ", ipp) @@ -208,7 +208,7 @@ func epAddrLess(a, b epAddr) bool { return v < 0 } if a.ap.Port() == b.ap.Port() { - return a.vni.get() < b.vni.get() + return a.vni.Get() < b.vni.Get() } return a.ap.Port() < b.ap.Port() } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 6381b021088b6..951e59011d32a 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -108,7 +108,7 @@ func (de *endpoint) udpRelayEndpointReady(maybeBest addrQuality) { defer de.mu.Unlock() now := mono.Now() curBestAddrTrusted := now.Before(de.trustBestAddrUntil) - sameRelayServer := de.bestAddr.vni.isSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 + sameRelayServer := de.bestAddr.vni.IsSet() && maybeBest.relayServerDisco.Compare(de.bestAddr.relayServerDisco) == 0 if !curBestAddrTrusted || sameRelayServer || @@ -1070,7 +1070,7 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { switch { case udpAddr.ap.Addr().Is4(): - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { de.c.metrics.outboundPacketsPeerRelayIPv4Total.Add(int64(len(buffs))) de.c.metrics.outboundBytesPeerRelayIPv4Total.Add(int64(txBytes)) } else { @@ -1078,7 +1078,7 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { de.c.metrics.outboundBytesIPv4Total.Add(int64(txBytes)) } case udpAddr.ap.Addr().Is6(): - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { de.c.metrics.outboundPacketsPeerRelayIPv6Total.Add(int64(len(buffs))) de.c.metrics.outboundBytesPeerRelayIPv6Total.Add(int64(txBytes)) } else { @@ -1160,7 +1160,7 @@ func (de *endpoint) discoPingTimeout(txid stun.TxID) { return } bestUntrusted := mono.Now().After(de.trustBestAddrUntil) - if sp.to == de.bestAddr.epAddr && sp.to.vni.isSet() && bestUntrusted { + if sp.to == de.bestAddr.epAddr && sp.to.vni.IsSet() && bestUntrusted { // TODO(jwhited): consider applying this to direct UDP paths as well de.clearBestAddrLocked() } @@ -1274,7 +1274,7 @@ func (de *endpoint) startDiscoPingLocked(ep epAddr, now mono.Time, purpose disco return } if purpose != pingCLI && - !ep.vni.isSet() { // de.endpointState is only relevant for direct/non-vni epAddr's + !ep.vni.IsSet() { // de.endpointState is only relevant for direct/non-vni epAddr's st, ok := de.endpointState[ep.ap] if !ok { // Shouldn't happen. But don't ping an endpoint that's @@ -1610,7 +1610,7 @@ func (de *endpoint) noteBadEndpoint(udpAddr epAddr) { de.clearBestAddrLocked() - if !udpAddr.vni.isSet() { + if !udpAddr.vni.IsSet() { if st, ok := de.endpointState[udpAddr.ap]; ok { st.clear() } @@ -1644,7 +1644,7 @@ func pingSizeToPktLen(size int, udpAddr epAddr) tstun.WireMTU { headerLen = ipv6.HeaderLen } headerLen += 8 // UDP header length - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { headerLen += packet.GeneveFixedHeaderLength } return tstun.WireMTU(size + headerLen) @@ -1699,7 +1699,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd now := mono.Now() latency := now.Sub(sp.at) - if !isDerp && !src.vni.isSet() { + if !isDerp && !src.vni.IsSet() { // Note: we check vni.isSet() as relay [epAddr]'s are not stored in // endpointState, they are either de.bestAddr or not. st, ok := de.endpointState[sp.to.ap] @@ -1748,7 +1748,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // we don't clear direct UDP paths on disco ping timeout (see // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { - if src.vni.isSet() { + if src.vni.IsSet() { // This would be unexpected. Switching to a Geneve-encapsulated // path should only happen in de.relayEndpointReady(). de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) @@ -1778,23 +1778,23 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd } // epAddr is a [netip.AddrPort] with an optional Geneve header (RFC8926) -// [virtualNetworkID]. +// [packet.VirtualNetworkID]. type epAddr struct { - ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set - vni virtualNetworkID // vni.isSet() indicates if this [epAddr] involves a Geneve header + ap netip.AddrPort // if ap == tailcfg.DerpMagicIPAddr then vni is never set + vni packet.VirtualNetworkID // vni.IsSet() indicates if this [epAddr] involves a Geneve header } // isDirect returns true if e.ap is valid and not tailcfg.DerpMagicIPAddr, // and a VNI is not set. func (e epAddr) isDirect() bool { - return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.isSet() + return e.ap.IsValid() && e.ap.Addr() != tailcfg.DerpMagicIPAddr && !e.vni.IsSet() } func (e epAddr) String() string { - if !e.vni.isSet() { + if !e.vni.IsSet() { return e.ap.String() } - return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.get()) + return fmt.Sprintf("%v:vni:%d", e.ap.String(), e.vni.Get()) } // addrQuality is an [epAddr], an optional [key.DiscoPublic] if a relay server @@ -1833,10 +1833,10 @@ func betterAddr(a, b addrQuality) bool { // Geneve-encapsulated paths (UDP relay servers) are lower preference in // relation to non. - if !a.vni.isSet() && b.vni.isSet() { + if !a.vni.IsSet() && b.vni.IsSet() { return true } - if a.vni.isSet() && !b.vni.isSet() { + if a.vni.IsSet() && !b.vni.IsSet() { return false } @@ -1982,7 +1982,7 @@ func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { ps.Active = now.Sub(de.lastSendExt) < sessionActiveTimeout if udpAddr, derpAddr, _ := de.addrForSendLocked(now); udpAddr.ap.IsValid() && !derpAddr.IsValid() { - if udpAddr.vni.isSet() { + if udpAddr.vni.IsSet() { ps.PeerRelay = udpAddr.String() } else { ps.CurAddr = udpAddr.String() diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 92f4ef1d3aac1..666d862310c44 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -327,24 +328,24 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { } func Test_epAddr_isDirectUDP(t *testing.T) { - vni := virtualNetworkID{} - vni.set(7) + vni := packet.VirtualNetworkID{} + vni.Set(7) tests := []struct { name string ap netip.AddrPort - vni virtualNetworkID + vni packet.VirtualNetworkID want bool }{ { name: "true", ap: netip.MustParseAddrPort("192.0.2.1:7"), - vni: virtualNetworkID{}, + vni: packet.VirtualNetworkID{}, want: true, }, { name: "false derp magic addr", ap: netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, 0), - vni: virtualNetworkID{}, + vni: packet.VirtualNetworkID{}, want: false, }, { @@ -370,7 +371,7 @@ func Test_epAddr_isDirectUDP(t *testing.T) { func Test_endpoint_udpRelayEndpointReady(t *testing.T) { directAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.1:7")}} peerRelayAddrQuality := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.2:77")}, latency: time.Second} - peerRelayAddrQuality.vni.set(1) + peerRelayAddrQuality.vni.Set(1) peerRelayAddrQualityHigherLatencySameServer := addrQuality{ epAddr: epAddr{ap: netip.MustParseAddrPort("192.0.2.3:77"), vni: peerRelayAddrQuality.vni}, latency: peerRelayAddrQuality.latency * 10, diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0fac793ef446f..a99a0a8e34285 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -36,6 +36,7 @@ import ( "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" + "tailscale.com/net/batching" "tailscale.com/net/connstats" "tailscale.com/net/netcheck" "tailscale.com/net/neterror" @@ -626,7 +627,7 @@ func newConn(logf logger.Logf) *Conn { msgs := make([]ipv6.Message, c.bind.BatchSize()) for i := range msgs { msgs[i].Buffers = make([][]byte, 1) - msgs[i].OOB = make([]byte, controlMessageSize) + msgs[i].OOB = make([]byte, batching.MinControlMessageSize()) } batch := &receiveBatch{ msgs: msgs, @@ -1206,7 +1207,7 @@ func (c *Conn) Ping(peer tailcfg.NodeView, res *ipnstate.PingResult, size int, c func (c *Conn) populateCLIPingResponseLocked(res *ipnstate.PingResult, latency time.Duration, ep epAddr) { res.LatencySeconds = latency.Seconds() if ep.ap.Addr() != tailcfg.DerpMagicIPAddr { - if ep.vni.isSet() { + if ep.vni.IsSet() { res.PeerRelay = ep.String() } else { res.Endpoint = ep.String() @@ -1473,9 +1474,9 @@ func (c *Conn) Send(buffs [][]byte, ep conn.Endpoint, offset int) (err error) { // deemed "under handshake load" and ends up transmitting a cookie reply // using the received [conn.Endpoint] in [device.SendHandshakeCookie]. if ep.src.ap.Addr().Is6() { - return c.pconn6.WriteBatchTo(buffs, ep.src, offset) + return c.pconn6.WriteWireGuardBatchTo(buffs, ep.src, offset) } - return c.pconn4.WriteBatchTo(buffs, ep.src, offset) + return c.pconn4.WriteWireGuardBatchTo(buffs, ep.src, offset) } return nil } @@ -1498,9 +1499,9 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, panic("bogus sendUDPBatch addr type") } if isIPv6 { - err = c.pconn6.WriteBatchTo(buffs, addr, offset) + err = c.pconn6.WriteWireGuardBatchTo(buffs, addr, offset) } else { - err = c.pconn4.WriteBatchTo(buffs, addr, offset) + err = c.pconn4.WriteWireGuardBatchTo(buffs, addr, offset) } if err != nil { var errGSO neterror.ErrUDPGSODisabled @@ -1793,7 +1794,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach c.logf("[unexpected] geneve header decoding error: %v", err) return nil, 0, false, false } - src.vni.set(geneve.VNI) + src.vni = geneve.VNI } switch pt { case packetLooksLikeDisco: @@ -1825,7 +1826,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach // geneveInclusivePacketLen holds the packet length prior to any potential // Geneve header stripping. geneveInclusivePacketLen := len(b) - if src.vni.isSet() { + if src.vni.IsSet() { // Strip away the Geneve header before returning the packet to // wireguard-go. // @@ -1858,7 +1859,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach if stats := c.stats.Load(); stats != nil { stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) } - if src.vni.isSet() && (connNoted || looksLikeInitiationMsg(b)) { + if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who // we believe for all initiation messages, otherwise we could get // unlucky and fail to JIT configure the "correct" peer. @@ -1887,33 +1888,6 @@ const ( // speeds. var debugIPv4DiscoPingPenalty = envknob.RegisterDuration("TS_DISCO_PONG_IPV4_DELAY") -// virtualNetworkID is a Geneve header (RFC8926) 3-byte virtual network -// identifier. Its field must only ever be accessed via its methods. -type virtualNetworkID struct { - _vni uint32 -} - -const ( - vniSetMask uint32 = 0xFF000000 - vniGetMask uint32 = ^vniSetMask -) - -// isSet returns true if set() had been called previously, otherwise false. -func (v *virtualNetworkID) isSet() bool { - return v._vni&vniSetMask != 0 -} - -// set sets the provided VNI. If VNI exceeds the 3-byte storage it will be -// clamped. -func (v *virtualNetworkID) set(vni uint32) { - v._vni = vni | vniSetMask -} - -// get returns the VNI value. -func (v *virtualNetworkID) get() uint32 { - return v._vni & vniGetMask -} - // sendDiscoAllocateUDPRelayEndpointRequest is primarily an alias for // sendDiscoMessage, but it will alternatively send m over the eventbus if dst // is a DERP IP:port, and dstKey is self. This saves a round-trip through DERP @@ -1981,11 +1955,11 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. c.mu.Unlock() pkt := make([]byte, 0, 512) // TODO: size it correctly? pool? if it matters. - if dst.vni.isSet() { + if dst.vni.IsSet() { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: dst.vni.get(), + VNI: dst.vni, Control: isRelayHandshakeMsg, } pkt = append(pkt, make([]byte, packet.GeneveFixedHeaderLength)...) @@ -2006,7 +1980,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. box := di.sharedKey.Seal(m.AppendMarshal(nil)) pkt = append(pkt, box...) const isDisco = true - sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.isSet()) + sent, err = c.sendAddr(dst.ap, dstKey, pkt, isDisco, dst.vni.IsSet()) if sent { if logLevel == discoLog || (logLevel == discoVerboseLog && debugDisco()) { node := "?" @@ -2294,7 +2268,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } return true }) - if !knownTxID && src.vni.isSet() { + if !knownTxID && src.vni.IsSet() { // If it's an unknown TxID, and it's Geneve-encapsulated, then // make [relayManager] aware. It might be in the middle of probing // src. @@ -2512,7 +2486,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN di.lastPingTime = time.Now() isDerp := src.ap.Addr() == tailcfg.DerpMagicIPAddr - if src.vni.isSet() { + if src.vni.IsSet() { if isDerp { c.logf("[unexpected] got Geneve-encapsulated disco ping from %v/%v over DERP", src, derpNodeSrc) return diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 7614c64c92559..4922f2c096bc4 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -21,7 +21,3 @@ func (c *Conn) listenRawDisco(family string) (io.Closer, error) { func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { portableTrySetSocketBuffer(pconn, logf) } - -const ( - controlMessageSize = 0 -) diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index 07038002912f7..3369bcb89eca3 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -516,11 +516,3 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { } } } - -var controlMessageSize = -1 // bomb if used for allocation before init - -func init() { - // controlMessageSize is set to hold a UDP_GRO or UDP_SEGMENT control - // message. These contain a single uint16 of data. - controlMessageSize = unix.CmsgSpace(2) -} diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 9399dab322152..5e348b02b7a24 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "math" "math/rand" "net" "net/http" @@ -1787,7 +1786,7 @@ func TestBetterAddr(t *testing.T) { } avl := func(ipps string, vni uint32, d time.Duration) addrQuality { q := al(ipps, d) - q.vni.set(vni) + q.vni.Set(vni) return q } zero := addrQuality{} @@ -3178,9 +3177,9 @@ func Test_packetLooksLike(t *testing.T) { gh := packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err := gh.Encode(geneveEncapDisco) if err != nil { t.Fatal(err) @@ -3200,9 +3199,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolWireGuard, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapWireGuard) if err != nil { t.Fatal(err) @@ -3213,9 +3212,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 1, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveVersion) if err != nil { t.Fatal(err) @@ -3226,9 +3225,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveReservedBits) if err != nil { t.Fatal(err) @@ -3240,9 +3239,9 @@ func Test_packetLooksLike(t *testing.T) { gh = packet.GeneveHeader{ Version: 0, Protocol: packet.GeneveProtocolDisco, - VNI: 1, Control: true, } + gh.VNI.Set(1) err = gh.Encode(geneveEncapDiscoNonZeroGeneveVNILSB) if err != nil { t.Fatal(err) @@ -3342,55 +3341,6 @@ func Test_packetLooksLike(t *testing.T) { } } -func Test_virtualNetworkID(t *testing.T) { - tests := []struct { - name string - set *uint32 - want uint32 - }{ - { - "don't set", - nil, - 0, - }, - { - "set 0", - ptr.To(uint32(0)), - 0, - }, - { - "set 1", - ptr.To(uint32(1)), - 1, - }, - { - "set math.MaxUint32", - ptr.To(uint32(math.MaxUint32)), - 1<<24 - 1, - }, - { - "set max 3-byte value", - ptr.To(uint32(1<<24 - 1)), - 1<<24 - 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := virtualNetworkID{} - if tt.set != nil { - v.set(*tt.set) - } - if v.isSet() != (tt.set != nil) { - t.Fatalf("isSet: %v != wantIsSet: %v", v.isSet(), tt.set != nil) - } - if v.get() != tt.want { - t.Fatalf("get(): %v != want: %v", v.get(), tt.want) - } - }) - } -} - func Test_looksLikeInitiationMsg(t *testing.T) { // initMsg was captured as the first packet from a WireGuard "session" initMsg, err := hex.DecodeString("01000000d9205f67915a500e377b409e0c3d97ca91e68654b95952de965e75df491000cce00632678cd9e8c8525556aa8daf24e6cfc44c48812bb560ff3c1c5dee061b3f833dfaa48acf13b64bd1e0027aa4d977a3721b82fd6072338702fc3193651404980ad46dae2869ba6416cc0eb38621a4140b5b918eb6402b697202adb3002a6d00000000000000000000000000000000") @@ -3772,6 +3722,7 @@ func TestConn_receiveIP(t *testing.T) { gh := packet.GeneveHeader{ Protocol: packet.GeneveProtocolDisco, } + gh.VNI.Set(1) err := gh.Encode(looksLikeGeneveDisco) if err != nil { t.Fatal(err) @@ -3796,10 +3747,8 @@ func TestConn_receiveIP(t *testing.T) { looksLikeGeneveWireGuardInit := make([]byte, packet.GeneveFixedHeaderLength+device.MessageInitiationSize) gh = packet.GeneveHeader{ Protocol: packet.GeneveProtocolWireGuard, - VNI: 1, } - vni := virtualNetworkID{} - vni.set(gh.VNI) + gh.VNI.Set(1) err = gh.Encode(looksLikeGeneveWireGuardInit) if err != nil { t.Fatal(err) @@ -3922,7 +3871,7 @@ func TestConn_receiveIP(t *testing.T) { ipp: netip.MustParseAddrPort("127.0.0.1:7777"), cache: &epAddrEndpointCache{}, insertWantEndpointTypeInPeerMap: true, - peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: gh.VNI}, wantEndpointType: &lazyEndpoint{ maybeEP: newPeerMapInsertableEndpoint(0), }, @@ -3938,7 +3887,7 @@ func TestConn_receiveIP(t *testing.T) { ipp: netip.MustParseAddrPort("127.0.0.1:7777"), cache: &epAddrEndpointCache{}, insertWantEndpointTypeInPeerMap: true, - peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: vni}, + peerMapEpAddr: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:7777"), vni: gh.VNI}, wantEndpointType: &lazyEndpoint{ maybeEP: newPeerMapInsertableEndpoint(mono.Now().Add(time.Hour * 24)), }, diff --git a/wgengine/magicsock/peermap.go b/wgengine/magicsock/peermap.go index 838905396002d..136353563e2bd 100644 --- a/wgengine/magicsock/peermap.go +++ b/wgengine/magicsock/peermap.go @@ -184,12 +184,12 @@ func (m *peerMap) setNodeKeyForEpAddr(addr epAddr, nk key.NodePublic) { if pi := m.byEpAddr[addr]; pi != nil { delete(pi.epAddrs, addr) delete(m.byEpAddr, addr) - if addr.vni.isSet() { + if addr.vni.IsSet() { delete(m.relayEpAddrByNodeKey, pi.ep.publicKey) } } if pi, ok := m.byNodeKey[nk]; ok { - if addr.vni.isSet() { + if addr.vni.IsSet() { relay, ok := m.relayEpAddrByNodeKey[nk] if ok { delete(pi.epAddrs, relay) diff --git a/wgengine/magicsock/peermap_test.go b/wgengine/magicsock/peermap_test.go index 52504272ff8e2..171e22a6d5795 100644 --- a/wgengine/magicsock/peermap_test.go +++ b/wgengine/magicsock/peermap_test.go @@ -7,6 +7,7 @@ import ( "net/netip" "testing" + "tailscale.com/net/packet" "tailscale.com/types/key" ) @@ -20,8 +21,8 @@ func Test_peerMap_oneRelayEpAddrPerNK(t *testing.T) { ed := &endpointDisco{key: key.NewDisco().Public()} ep.disco.Store(ed) pm.upsertEndpoint(ep, key.DiscoPublic{}) - vni := virtualNetworkID{} - vni.set(1) + vni := packet.VirtualNetworkID{} + vni.Set(1) relayEpAddrA := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:1"), vni: vni} relayEpAddrB := epAddr{ap: netip.MustParseAddrPort("127.0.0.1:2"), vni: vni} pm.setNodeKeyForEpAddr(relayEpAddrA, nk) diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 8b9ad4bb0bead..2798abbf20ed8 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -13,6 +13,7 @@ import ( "syscall" "golang.org/x/net/ipv6" + "tailscale.com/net/batching" "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/types/nettype" @@ -42,7 +43,7 @@ type RebindingUDPConn struct { // disrupting surrounding code that assumes nettype.PacketConn is a // *net.UDPConn. func (c *RebindingUDPConn) setConnLocked(p nettype.PacketConn, network string, batchSize int) { - upc := tryUpgradeToBatchingConn(p, network, batchSize) + upc := batching.TryUpgradeToConn(p, network, batchSize) c.pconn = upc c.pconnAtomic.Store(&upc) c.port = uint16(c.localAddrLocked().Port) @@ -72,25 +73,27 @@ func (c *RebindingUDPConn) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, e return c.readFromWithInitPconn(*c.pconnAtomic.Load(), b) } -// WriteBatchTo writes buffs to addr. -func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) error { +// WriteWireGuardBatchTo writes buffs to addr. It serves primarily as an alias +// for [batching.Conn.WriteBatchTo], with fallback to single packet operations +// if c.pconn is not a [batching.Conn]. +// +// WriteWireGuardBatchTo assumes buffs are WireGuard packets, which is notable +// for Geneve encapsulation: Geneve protocol is set to [packet.GeneveProtocolWireGuard], +// and the control bit is left unset. +func (c *RebindingUDPConn) WriteWireGuardBatchTo(buffs [][]byte, addr epAddr, offset int) error { if offset != packet.GeneveFixedHeaderLength { - return fmt.Errorf("RebindingUDPConn.WriteBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + return fmt.Errorf("RebindingUDPConn.WriteWireGuardBatchTo: [unexpected] offset (%d) != Geneve header length (%d)", offset, packet.GeneveFixedHeaderLength) + } + gh := packet.GeneveHeader{ + Protocol: packet.GeneveProtocolWireGuard, + VNI: addr.vni, } for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(batchingConn) + b, ok := pconn.(batching.Conn) if !ok { - vniIsSet := addr.vni.isSet() - var gh packet.GeneveHeader - if vniIsSet { - gh = packet.GeneveHeader{ - Protocol: packet.GeneveProtocolWireGuard, - VNI: addr.vni.get(), - } - } for _, buf := range buffs { - if vniIsSet { + if gh.VNI.IsSet() { gh.Encode(buf) } else { buf = buf[offset:] @@ -102,7 +105,7 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) } return nil } - err := b.WriteBatchTo(buffs, addr, offset) + err := b.WriteBatchTo(buffs, addr.ap, gh, offset) if err != nil { if pconn != c.currentConn() { continue @@ -113,13 +116,12 @@ func (c *RebindingUDPConn) WriteBatchTo(buffs [][]byte, addr epAddr, offset int) } } -// ReadBatch reads messages from c into msgs. It returns the number of messages -// the caller should evaluate for nonzero len, as a zero len message may fall -// on either side of a nonzero. +// ReadBatch is an alias for [batching.Conn.ReadBatch] with fallback to single +// packet operations if c.pconn is not a [batching.Conn]. func (c *RebindingUDPConn) ReadBatch(msgs []ipv6.Message, flags int) (int, error) { for { pconn := *c.pconnAtomic.Load() - b, ok := pconn.(batchingConn) + b, ok := pconn.(batching.Conn) if !ok { n, ap, err := c.readFromWithInitPconn(pconn, msgs[0].Buffers[0]) if err == nil { diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index ad8c5fc763adb..8a1a4fcf57fe8 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -11,6 +11,7 @@ import ( "time" "tailscale.com/disco" + "tailscale.com/net/packet" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/tailcfg" @@ -384,7 +385,7 @@ func (r *relayManager) handleRxDiscoMsg(conn *Conn, dm disco.Message, relayServe relayServerNodeKey: relayServerNodeKey, disco: discoKey, from: src.ap, - vni: src.vni.get(), + vni: src.vni.Get(), at: time.Now(), }) } @@ -535,8 +536,8 @@ func (r *relayManager) handleRxDiscoMsgRunLoop(event relayDiscoMsgEvent) { // socket on Linux. We make no such efforts here as the raw socket BPF // program does not support Geneve-encapsulated disco, and is also // disabled by default. - vni := virtualNetworkID{} - vni.set(event.vni) + vni := packet.VirtualNetworkID{} + vni.Set(event.vni) go event.conn.sendDiscoMessage(epAddr{ap: event.from, vni: vni}, key.NodePublic{}, event.disco, &disco.Pong{ TxID: msg.TxID, Src: event.from, @@ -622,8 +623,8 @@ func (r *relayManager) handleHandshakeWorkDoneRunLoop(done relayEndpointHandshak return } // This relay endpoint is functional. - vni := virtualNetworkID{} - vni.set(done.work.se.VNI) + vni := packet.VirtualNetworkID{} + vni.Set(done.work.se.VNI) addr := epAddr{ap: done.pongReceivedFrom, vni: vni} // ep.udpRelayEndpointReady() must be called in a new goroutine to prevent // deadlocks as it acquires [endpoint] & [Conn] mutexes. See [relayManager] @@ -784,8 +785,8 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat bind := &disco.BindUDPRelayEndpoint{ BindUDPRelayEndpointCommon: common, } - vni := virtualNetworkID{} - vni.set(work.se.VNI) + vni := packet.VirtualNetworkID{} + vni.Set(work.se.VNI) for _, addrPort := range work.se.AddrPorts { if addrPort.IsValid() { sentBindAny = true From 0f7facfeee1bde318f214b9882349f80fa02d582 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 13 Aug 2025 13:49:27 -0700 Subject: [PATCH 0210/1093] control/controlclient: fix data race on tkaHead (#16855) Grab a copy under mutex in sendMapRequest. Updates #cleanup Signed-off-by: Andrew Lytvynov --- control/controlclient/direct.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 4c9b04ce9b114..78a86e935551d 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -856,6 +856,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap hi := c.hostInfoLocked() backendLogID := hi.BackendLogID connectionHandleForTest := c.connectionHandleForTest + tkaHead := c.tkaHead var epStrs []string var eps []netip.AddrPort var epTypes []tailcfg.EndpointType @@ -906,7 +907,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap Hostinfo: hi, DebugFlags: c.debugFlags, OmitPeers: nu == nil, - TKAHead: c.tkaHead, + TKAHead: tkaHead, ConnectionHandleForTest: connectionHandleForTest, } var extraDebugFlags []string From e4d2822afcf4e0a2a3ffd6cf54ac256d8291c10f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 13 Aug 2025 17:19:32 -0700 Subject: [PATCH 0211/1093] go.toolchain.rev: bump Go for data race in Go http client Updates golang/go#73522 Updates tailscale/go#131 Updates tailscale/corp#31133 Change-Id: Ibb7a98944ef287d455ce4f5d202b2e2bd6d8742b Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index fa951ac1b04ea..6e3bd7ff9e3b6 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -606f294beebf9df5754804710cd5e16d30532692 +54f31cd8fc7b3d7d87c1ea455c8bb4b33372f706 From 5402620db804a8f88cc5eb249b3fc3802310012d Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 14 Aug 2025 07:19:20 -0700 Subject: [PATCH 0212/1093] net/tshttpproxy: add macOS support for system proxy (#16826) Adds a setter for proxyFunc to allow macOS to pull defined system proxies. Disallows overriding if proxyFunc is set via config. Updates tailscale/corp#30668 Signed-off-by: Will Hannah --- net/tshttpproxy/tshttpproxy.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/net/tshttpproxy/tshttpproxy.go b/net/tshttpproxy/tshttpproxy.go index 2ca440b57be74..ab2fd39e37858 100644 --- a/net/tshttpproxy/tshttpproxy.go +++ b/net/tshttpproxy/tshttpproxy.go @@ -38,6 +38,23 @@ var ( proxyFunc func(*url.URL) (*url.URL, error) ) +// SetProxyFunc can be used by clients to set a platform-specific function for proxy resolution. +// If config is set when this function is called, an error will be returned. +// The provided function should return a proxy URL for the given request URL, +// nil if no proxy is enabled for the request URL, or an error if proxy settings cannot be resolved. +func SetProxyFunc(fn func(*url.URL) (*url.URL, error)) error { + mu.Lock() + defer mu.Unlock() + + // Allow override only if config is not set + if config != nil { + return fmt.Errorf("tshttpproxy: SetProxyFunc can only be called when config is not set") + } + + proxyFunc = fn + return nil +} + func getProxyFunc() func(*url.URL) (*url.URL, error) { // Create config/proxyFunc if it's not created mu.Lock() From 819db6759cd5087c47ba83598be5f67c936ab156 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Thu, 14 Aug 2025 14:02:19 +0100 Subject: [PATCH 0213/1093] tka: block key addition when the max number of keys is reached Updates #16607 Signed-off-by: Anton Tolchanov --- tka/builder.go | 5 +++++ tka/builder_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/tka/builder.go b/tka/builder.go index c14ba2330ae0d..ec38bb6fa15f7 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -67,6 +67,11 @@ func (b *UpdateBuilder) AddKey(key Key) error { if _, err := b.state.GetKey(keyID); err == nil { return fmt.Errorf("cannot add key %v: already exists", key) } + + if len(b.state.Keys) >= maxKeys { + return fmt.Errorf("cannot add key %v: maximum number of keys reached", key) + } + return b.mkUpdate(AUM{MessageKind: AUMAddKey, Key: &key}) } diff --git a/tka/builder_test.go b/tka/builder_test.go index 3dbd4347abf06..52907186b6d30 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -58,6 +58,50 @@ func TestAuthorityBuilderAddKey(t *testing.T) { t.Errorf("could not read new key: %v", err) } } +func TestAuthorityBuilderMaxKey(t *testing.T) { + pub, priv := testingKey25519(t, 1) + key := Key{Kind: Key25519, Public: pub, Votes: 2} + + storage := &Mem{} + a, _, err := Create(storage, State{ + Keys: []Key{key}, + DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, + }, signer25519(priv)) + if err != nil { + t.Fatalf("Create() failed: %v", err) + } + + for i := 0; i <= maxKeys; i++ { + pub2, _ := testingKey25519(t, int64(2+i)) + key2 := Key{Kind: Key25519, Public: pub2, Votes: 1} + + b := a.NewUpdater(signer25519(priv)) + err := b.AddKey(key2) + if i < maxKeys-1 { + if err != nil { + t.Fatalf("AddKey(%v) failed: %v", key2, err) + } + } else { + // Too many keys. + if err == nil { + t.Fatalf("AddKey(%v) succeeded unexpectedly", key2) + } + continue + } + + updates, err := b.Finalize(storage) + if err != nil { + t.Fatalf("Finalize() failed: %v", err) + } + + if err := a.Inform(storage, updates); err != nil { + t.Fatalf("could not apply generated updates: %v", err) + } + if _, err := a.state.GetKey(key2.MustID()); err != nil { + t.Errorf("could not read new key: %v", err) + } + } +} func TestAuthorityBuilderRemoveKey(t *testing.T) { pub, priv := testingKey25519(t, 1) From c083a9b05330372aa0435f4c89fb1784c826f9bb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 14 Aug 2025 10:48:06 -0700 Subject: [PATCH 0214/1093] net/batching: fix compile-time assert (#16864) Updates #cleanup Signed-off-by: Jordan Whited --- net/batching/conn_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 0416c2729977b..09a80ed9f5e34 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -45,7 +45,7 @@ type xnetBatchWriter interface { var ( // [linuxBatchingConn] implements [Conn]. - _ Conn = &linuxBatchingConn{} + _ Conn = (*linuxBatchingConn)(nil) ) // linuxBatchingConn is a UDP socket that provides batched i/o. It implements From fbb91758ac41d279bf67103d204690ba8520afa2 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 14 Aug 2025 13:46:48 -0700 Subject: [PATCH 0215/1093] cmd/viewer, types/views: implement support for json/v2 (#16852) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for having every viewer type implement jsonv2.MarshalerTo and jsonv2.UnmarshalerFrom. This provides a significant boost in performance as the json package no longer needs to validate the entirety of the JSON value outputted by MarshalJSON, nor does it need to identify the boundaries of a JSON value in order to call UnmarshalJSON. For deeply nested and recursive MarshalJSON or UnmarshalJSON calls, this can improve runtime from O(N²) to O(N). This still references "github.com/go-json-experiment/json" instead of the experimental "encoding/json/v2" package now available in Go 1.25 under goexperiment.jsonv2 so that code still builds without the experiment tag. Of note, the "github.com/go-json-experiment/json" package aliases the standard library under the right build conditions. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/cloner/cloner.go | 12 +- cmd/stund/depaware.txt | 2 +- cmd/viewer/tests/tests_view.go | 288 +++++++++- cmd/viewer/viewer.go | 40 +- cmd/viewer/viewer_test.go | 12 +- drive/drive_view.go | 30 +- go.mod | 2 +- go.sum | 4 +- ipn/ipn_view.go | 186 ++++++- tailcfg/tailcfg_view.go | 524 ++++++++++++++++-- types/dnstype/dnstype_view.go | 30 +- types/persist/persist_view.go | 30 +- .../prefs/prefs_example/prefs_example_view.go | 82 ++- types/prefs/prefs_view_test.go | 135 ++++- types/views/views.go | 166 ++++-- types/views/views_test.go | 92 ++- util/codegen/codegen.go | 29 +- 17 files changed, 1463 insertions(+), 201 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index a1ffc30feafb2..15a808141e626 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -136,13 +136,13 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("if src.%s[i] == nil { dst.%s[i] = nil } else {", fname, fname) if codegen.ContainsPointers(ptr.Elem()) { if _, isIface := ptr.Elem().Underlying().(*types.Interface); isIface { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\tdst.%s[i] = ptr.To((*src.%s[i]).Clone())", fname, fname) } else { writef("\tdst.%s[i] = src.%s[i].Clone()", fname, fname) } } else { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\tdst.%s[i] = ptr.To(*src.%s[i])", fname, fname) } writef("}") @@ -165,7 +165,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("dst.%s = src.%s.Clone()", fname, fname) continue } - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("if dst.%s != nil {", fname) if _, isIface := base.Underlying().(*types.Interface); isIface && hasPtrs { writef("\tdst.%s = ptr.To((*src.%s).Clone())", fname, fname) @@ -197,13 +197,13 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname) if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { if _, isIface := base.(*types.Interface); isIface { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname) } else { writef("\t\t\tdst.%s[k] = v.Clone()", fname) } } else { - it.Import("tailscale.com/types/ptr") + it.Import("", "tailscale.com/types/ptr") writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname) } writef("}") @@ -224,7 +224,7 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t}") writef("}") } else { - it.Import("maps") + it.Import("", "maps") writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } case *types.Interface: diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index d389d59a39949..8e4db75aebb8e 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -2,7 +2,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus - github.com/go-json-experiment/json from tailscale.com/types/opt + github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index f1d8f424ff01b..bc95fea015656 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -6,10 +6,12 @@ package tests import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/views" ) @@ -44,8 +46,17 @@ func (v StructWithPtrsView) AsStruct() *StructWithPtrs { return v.ж.Clone() } -func (v StructWithPtrsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithPtrsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithPtrsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -54,7 +65,20 @@ func (v *StructWithPtrsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithPtrs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithPtrsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithPtrs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -108,8 +132,17 @@ func (v StructWithoutPtrsView) AsStruct() *StructWithoutPtrs { return v.ж.Clone() } -func (v StructWithoutPtrsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithoutPtrsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithoutPtrsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithoutPtrsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -118,7 +151,20 @@ func (v *StructWithoutPtrsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithoutPtrs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithoutPtrsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithoutPtrs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -162,8 +208,17 @@ func (v MapView) AsStruct() *Map { return v.ж.Clone() } -func (v MapView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v MapView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v MapView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *MapView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -172,7 +227,20 @@ func (v *MapView) UnmarshalJSON(b []byte) error { return nil } var x Map - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *MapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Map + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -268,8 +336,17 @@ func (v StructWithSlicesView) AsStruct() *StructWithSlices { return v.ж.Clone() } -func (v StructWithSlicesView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithSlicesView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithSlicesView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithSlicesView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -278,7 +355,20 @@ func (v *StructWithSlicesView) UnmarshalJSON(b []byte) error { return nil } var x StructWithSlices - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithSlicesView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithSlices + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -342,8 +432,17 @@ func (v StructWithEmbeddedView) AsStruct() *StructWithEmbedded { return v.ж.Clone() } -func (v StructWithEmbeddedView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithEmbeddedView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithEmbeddedView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithEmbeddedView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -352,7 +451,20 @@ func (v *StructWithEmbeddedView) UnmarshalJSON(b []byte) error { return nil } var x StructWithEmbedded - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithEmbeddedView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithEmbedded + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -398,8 +510,17 @@ func (v GenericIntStructView[T]) AsStruct() *GenericIntStruct[T] { return v.ж.Clone() } -func (v GenericIntStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericIntStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericIntStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -408,7 +529,20 @@ func (v *GenericIntStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x GenericIntStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericIntStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericIntStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -470,8 +604,17 @@ func (v GenericNoPtrsStructView[T]) AsStruct() *GenericNoPtrsStruct[T] { return v.ж.Clone() } -func (v GenericNoPtrsStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericNoPtrsStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericNoPtrsStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -480,7 +623,20 @@ func (v *GenericNoPtrsStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x GenericNoPtrsStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericNoPtrsStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericNoPtrsStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -542,8 +698,17 @@ func (v GenericCloneableStructView[T, V]) AsStruct() *GenericCloneableStruct[T, return v.ж.Clone() } -func (v GenericCloneableStructView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v GenericCloneableStructView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericCloneableStructView[T, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericCloneableStructView[T, V]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -552,7 +717,20 @@ func (v *GenericCloneableStructView[T, V]) UnmarshalJSON(b []byte) error { return nil } var x GenericCloneableStruct[T, V] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericCloneableStructView[T, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericCloneableStruct[T, V] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -617,8 +795,17 @@ func (v StructWithContainersView) AsStruct() *StructWithContainers { return v.ж.Clone() } -func (v StructWithContainersView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithContainersView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithContainersView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -627,7 +814,20 @@ func (v *StructWithContainersView) UnmarshalJSON(b []byte) error { return nil } var x StructWithContainers - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithContainersView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithContainers + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -689,8 +889,17 @@ func (v StructWithTypeAliasFieldsView) AsStruct() *StructWithTypeAliasFields { return v.ж.Clone() } -func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithTypeAliasFieldsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithTypeAliasFieldsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -699,7 +908,20 @@ func (v *StructWithTypeAliasFieldsView) UnmarshalJSON(b []byte) error { return nil } var x StructWithTypeAliasFields - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithTypeAliasFieldsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithTypeAliasFields + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -787,10 +1009,17 @@ func (v GenericTypeAliasStructView[T, T2, V2]) AsStruct() *GenericTypeAliasStruc return v.ж.Clone() } +// MarshalJSON implements [jsonv1.Marshaler]. func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSON() ([]byte, error) { - return json.Marshal(v.ж) + return jsonv1.Marshal(v.ж) } +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v GenericTypeAliasStructView[T, T2, V2]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -799,7 +1028,20 @@ func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSON(b []byte) error { return nil } var x GenericTypeAliasStruct[T, T2, V2] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *GenericTypeAliasStructView[T, T2, V2]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x GenericTypeAliasStruct[T, T2, V2] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 2d30cc2eb1f2d..a9617ac1064e6 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -49,8 +49,17 @@ func (v {{.ViewName}}{{.TypeParamNames}}) AsStruct() *{{.StructName}}{{.TypePara return v.ж.Clone() } -func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v {{.ViewName}}{{.TypeParamNames}}) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -59,10 +68,23 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSON(b []byte) error { return nil } var x {{.StructName}}{{.TypeParamNames}} - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x {{.StructName}}{{.TypeParamNames}} + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } - v.ж=&x + v.ж = &x return nil } @@ -125,8 +147,10 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if !ok || codegen.IsViewType(t) { return } - it.Import("encoding/json") - it.Import("errors") + it.Import("jsonv1", "encoding/json") + it.Import("jsonv2", "github.com/go-json-experiment/json") + it.Import("", "github.com/go-json-experiment/json/jsontext") + it.Import("", "errors") args := struct { StructName string @@ -182,11 +206,11 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * switch elem.String() { case "byte": args.FieldType = it.QualifiedName(fieldType) - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") writeTemplate("byteSliceField") default: args.FieldType = it.QualifiedName(elem) - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") shallow, deep, base := requiresCloning(elem) if deep { switch elem.Underlying().(type) { @@ -252,7 +276,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * writeTemplate("unsupportedField") continue } - it.Import("tailscale.com/types/views") + it.Import("", "tailscale.com/types/views") args.MapKeyType = it.QualifiedName(key) mElem := m.Elem() var template string diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go index cd5f3d95f9c93..d12d496551327 100644 --- a/cmd/viewer/viewer_test.go +++ b/cmd/viewer/viewer_test.go @@ -20,19 +20,19 @@ func TestViewerImports(t *testing.T) { name string content string typeNames []string - wantImports []string + wantImports [][2]string }{ { name: "Map", content: `type Test struct { Map map[string]int }`, typeNames: []string{"Test"}, - wantImports: []string{"tailscale.com/types/views"}, + wantImports: [][2]string{{"", "tailscale.com/types/views"}}, }, { name: "Slice", content: `type Test struct { Slice []int }`, typeNames: []string{"Test"}, - wantImports: []string{"tailscale.com/types/views"}, + wantImports: [][2]string{{"", "tailscale.com/types/views"}}, }, } for _, tt := range tests { @@ -68,9 +68,9 @@ func TestViewerImports(t *testing.T) { genView(&output, tracker, namedType, pkg) } - for _, pkgName := range tt.wantImports { - if !tracker.Has(pkgName) { - t.Errorf("missing import %q", pkgName) + for _, pkg := range tt.wantImports { + if !tracker.Has(pkg[0], pkg[1]) { + t.Errorf("missing import %q", pkg) } } }) diff --git a/drive/drive_view.go b/drive/drive_view.go index 0f6686f24da68..6338705a6f469 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -6,9 +6,11 @@ package drive import ( - "encoding/json" + jsonv1 "encoding/json" "errors" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/views" ) @@ -42,8 +44,17 @@ func (v ShareView) AsStruct() *Share { return v.ж.Clone() } -func (v ShareView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ShareView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ShareView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ShareView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -52,7 +63,20 @@ func (v *ShareView) UnmarshalJSON(b []byte) error { return nil } var x Share - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ShareView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Share + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/go.mod b/go.mod index 28b2a764fb7b7..fba5a4f54d3a1 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.7.0 github.com/gaissmai/bart v0.18.0 - github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 + github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 diff --git a/go.sum b/go.sum index 23ca2dc9b1f45..df5d27313731e 100644 --- a/go.sum +++ b/go.sum @@ -345,8 +345,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= -github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I= +github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 1d31ced9d3847..0f0f652d11922 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -6,10 +6,12 @@ package ipn import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/drive" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -48,8 +50,17 @@ func (v LoginProfileView) AsStruct() *LoginProfile { return v.ж.Clone() } -func (v LoginProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LoginProfileView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LoginProfileView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LoginProfileView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -58,7 +69,20 @@ func (v *LoginProfileView) UnmarshalJSON(b []byte) error { return nil } var x LoginProfile - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LoginProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x LoginProfile + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -114,8 +138,17 @@ func (v PrefsView) AsStruct() *Prefs { return v.ж.Clone() } -func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -124,7 +157,20 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error { return nil } var x Prefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Prefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -239,8 +285,17 @@ func (v ServeConfigView) AsStruct() *ServeConfig { return v.ж.Clone() } -func (v ServeConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ServeConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ServeConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ServeConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -249,7 +304,20 @@ func (v *ServeConfigView) UnmarshalJSON(b []byte) error { return nil } var x ServeConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ServeConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x ServeConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -323,8 +391,17 @@ func (v ServiceConfigView) AsStruct() *ServiceConfig { return v.ж.Clone() } -func (v ServiceConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ServiceConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ServiceConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -333,7 +410,20 @@ func (v *ServiceConfigView) UnmarshalJSON(b []byte) error { return nil } var x ServiceConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ServiceConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x ServiceConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -388,8 +478,17 @@ func (v TCPPortHandlerView) AsStruct() *TCPPortHandler { return v.ж.Clone() } -func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TCPPortHandlerView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TCPPortHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -398,7 +497,20 @@ func (v *TCPPortHandlerView) UnmarshalJSON(b []byte) error { return nil } var x TCPPortHandler - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TCPPortHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TCPPortHandler + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -446,8 +558,17 @@ func (v HTTPHandlerView) AsStruct() *HTTPHandler { return v.ж.Clone() } -func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v HTTPHandlerView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v HTTPHandlerView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -456,7 +577,20 @@ func (v *HTTPHandlerView) UnmarshalJSON(b []byte) error { return nil } var x HTTPHandler - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *HTTPHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x HTTPHandler + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -502,8 +636,17 @@ func (v WebServerConfigView) AsStruct() *WebServerConfig { return v.ж.Clone() } -func (v WebServerConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v WebServerConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v WebServerConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *WebServerConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -512,7 +655,20 @@ func (v *WebServerConfigView) UnmarshalJSON(b []byte) error { return nil } var x WebServerConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *WebServerConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x WebServerConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index c407800210a5e..8dc4f1ca80e49 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -6,11 +6,13 @@ package tailcfg import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" "time" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -49,8 +51,17 @@ func (v UserView) AsStruct() *User { return v.ж.Clone() } -func (v UserView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v UserView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v UserView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *UserView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -59,7 +70,20 @@ func (v *UserView) UnmarshalJSON(b []byte) error { return nil } var x User - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x User + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -107,8 +131,17 @@ func (v NodeView) AsStruct() *Node { return v.ж.Clone() } -func (v NodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v NodeView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v NodeView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *NodeView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -117,7 +150,20 @@ func (v *NodeView) UnmarshalJSON(b []byte) error { return nil } var x Node - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Node + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -246,8 +292,17 @@ func (v HostinfoView) AsStruct() *Hostinfo { return v.ж.Clone() } -func (v HostinfoView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v HostinfoView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v HostinfoView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *HostinfoView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -256,7 +311,20 @@ func (v *HostinfoView) UnmarshalJSON(b []byte) error { return nil } var x Hostinfo - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *HostinfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Hostinfo + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -380,8 +448,17 @@ func (v NetInfoView) AsStruct() *NetInfo { return v.ж.Clone() } -func (v NetInfoView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v NetInfoView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v NetInfoView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *NetInfoView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -390,7 +467,20 @@ func (v *NetInfoView) UnmarshalJSON(b []byte) error { return nil } var x NetInfo - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x NetInfo + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -460,8 +550,17 @@ func (v LoginView) AsStruct() *Login { return v.ж.Clone() } -func (v LoginView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LoginView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LoginView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LoginView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -470,7 +569,20 @@ func (v *LoginView) UnmarshalJSON(b []byte) error { return nil } var x Login - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LoginView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Login + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -521,8 +633,17 @@ func (v DNSConfigView) AsStruct() *DNSConfig { return v.ж.Clone() } -func (v DNSConfigView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DNSConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DNSConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DNSConfigView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -531,7 +652,20 @@ func (v *DNSConfigView) UnmarshalJSON(b []byte) error { return nil } var x DNSConfig - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DNSConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DNSConfig + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -602,8 +736,17 @@ func (v RegisterResponseView) AsStruct() *RegisterResponse { return v.ж.Clone() } -func (v RegisterResponseView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterResponseView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterResponseView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -612,7 +755,20 @@ func (v *RegisterResponseView) UnmarshalJSON(b []byte) error { return nil } var x RegisterResponse - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterResponseView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x RegisterResponse + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -668,8 +824,17 @@ func (v RegisterResponseAuthView) AsStruct() *RegisterResponseAuth { return v.ж.Clone() } -func (v RegisterResponseAuthView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterResponseAuthView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterResponseAuthView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -678,7 +843,20 @@ func (v *RegisterResponseAuthView) UnmarshalJSON(b []byte) error { return nil } var x RegisterResponseAuth - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterResponseAuthView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x RegisterResponseAuth + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -726,8 +904,17 @@ func (v RegisterRequestView) AsStruct() *RegisterRequest { return v.ж.Clone() } -func (v RegisterRequestView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v RegisterRequestView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RegisterRequestView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *RegisterRequestView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -736,7 +923,20 @@ func (v *RegisterRequestView) UnmarshalJSON(b []byte) error { return nil } var x RegisterRequest - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *RegisterRequestView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x RegisterRequest + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -816,8 +1016,17 @@ func (v DERPHomeParamsView) AsStruct() *DERPHomeParams { return v.ж.Clone() } -func (v DERPHomeParamsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPHomeParamsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPHomeParamsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPHomeParamsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -826,7 +1035,20 @@ func (v *DERPHomeParamsView) UnmarshalJSON(b []byte) error { return nil } var x DERPHomeParams - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPHomeParamsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPHomeParams + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -870,8 +1092,17 @@ func (v DERPRegionView) AsStruct() *DERPRegion { return v.ж.Clone() } -func (v DERPRegionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPRegionView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPRegionView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPRegionView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -880,7 +1111,20 @@ func (v *DERPRegionView) UnmarshalJSON(b []byte) error { return nil } var x DERPRegion - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPRegionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPRegion + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -938,8 +1182,17 @@ func (v DERPMapView) AsStruct() *DERPMap { return v.ж.Clone() } -func (v DERPMapView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPMapView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPMapView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPMapView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -948,7 +1201,20 @@ func (v *DERPMapView) UnmarshalJSON(b []byte) error { return nil } var x DERPMap - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPMapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPMap + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -999,8 +1265,17 @@ func (v DERPNodeView) AsStruct() *DERPNode { return v.ж.Clone() } -func (v DERPNodeView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v DERPNodeView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v DERPNodeView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *DERPNodeView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1009,7 +1284,20 @@ func (v *DERPNodeView) UnmarshalJSON(b []byte) error { return nil } var x DERPNode - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *DERPNodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x DERPNode + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1073,8 +1361,17 @@ func (v SSHRuleView) AsStruct() *SSHRule { return v.ж.Clone() } -func (v SSHRuleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHRuleView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHRuleView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHRuleView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1083,7 +1380,20 @@ func (v *SSHRuleView) UnmarshalJSON(b []byte) error { return nil } var x SSHRule - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHRuleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHRule + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1139,8 +1449,17 @@ func (v SSHActionView) AsStruct() *SSHAction { return v.ж.Clone() } -func (v SSHActionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHActionView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHActionView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHActionView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1149,7 +1468,20 @@ func (v *SSHActionView) UnmarshalJSON(b []byte) error { return nil } var x SSHAction - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHActionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHAction + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1211,8 +1543,17 @@ func (v SSHPrincipalView) AsStruct() *SSHPrincipal { return v.ж.Clone() } -func (v SSHPrincipalView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPrincipalView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPrincipalView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1221,7 +1562,20 @@ func (v *SSHPrincipalView) UnmarshalJSON(b []byte) error { return nil } var x SSHPrincipal - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPrincipalView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHPrincipal + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1273,8 +1627,17 @@ func (v ControlDialPlanView) AsStruct() *ControlDialPlan { return v.ж.Clone() } -func (v ControlDialPlanView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ControlDialPlanView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ControlDialPlanView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ControlDialPlanView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1283,7 +1646,20 @@ func (v *ControlDialPlanView) UnmarshalJSON(b []byte) error { return nil } var x ControlDialPlan - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ControlDialPlanView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x ControlDialPlan + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1327,8 +1703,17 @@ func (v LocationView) AsStruct() *Location { return v.ж.Clone() } -func (v LocationView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v LocationView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v LocationView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *LocationView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1337,7 +1722,20 @@ func (v *LocationView) UnmarshalJSON(b []byte) error { return nil } var x Location - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *LocationView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Location + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1391,8 +1789,17 @@ func (v UserProfileView) AsStruct() *UserProfile { return v.ж.Clone() } -func (v UserProfileView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v UserProfileView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v UserProfileView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *UserProfileView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1401,7 +1808,20 @@ func (v *UserProfileView) UnmarshalJSON(b []byte) error { return nil } var x UserProfile - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *UserProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x UserProfile + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -1450,8 +1870,17 @@ func (v VIPServiceView) AsStruct() *VIPService { return v.ж.Clone() } -func (v VIPServiceView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v VIPServiceView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v VIPServiceView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *VIPServiceView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -1460,7 +1889,20 @@ func (v *VIPServiceView) UnmarshalJSON(b []byte) error { return nil } var x VIPService - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *VIPServiceView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x VIPService + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index c77ff9a406106..3d374ab47f76c 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -6,10 +6,12 @@ package dnstype import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/views" ) @@ -43,8 +45,17 @@ func (v ResolverView) AsStruct() *Resolver { return v.ж.Clone() } -func (v ResolverView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ResolverView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ResolverView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *ResolverView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -53,7 +64,20 @@ func (v *ResolverView) UnmarshalJSON(b []byte) error { return nil } var x Resolver - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ResolverView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Resolver + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 55eb40c51ac47..99a86a6a52bbd 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -6,9 +6,11 @@ package persist import ( - "encoding/json" + jsonv1 "encoding/json" "errors" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/structs" @@ -45,8 +47,17 @@ func (v PersistView) AsStruct() *Persist { return v.ж.Clone() } -func (v PersistView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PersistView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PersistView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PersistView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -55,7 +66,20 @@ func (v *PersistView) UnmarshalJSON(b []byte) error { return nil } var x Persist - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Persist + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index 9aaac6e9c3ed6..afc9f1781f565 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -6,10 +6,12 @@ package prefs_example import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "tailscale.com/drive" "tailscale.com/tailcfg" "tailscale.com/types/opt" @@ -48,8 +50,17 @@ func (v PrefsView) AsStruct() *Prefs { return v.ж.Clone() } -func (v PrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v PrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v PrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *PrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -58,7 +69,20 @@ func (v *PrefsView) UnmarshalJSON(b []byte) error { return nil } var x Prefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Prefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -160,8 +184,17 @@ func (v AutoUpdatePrefsView) AsStruct() *AutoUpdatePrefs { return v.ж.Clone() } -func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v AutoUpdatePrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v AutoUpdatePrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -170,7 +203,20 @@ func (v *AutoUpdatePrefsView) UnmarshalJSON(b []byte) error { return nil } var x AutoUpdatePrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *AutoUpdatePrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x AutoUpdatePrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -214,8 +260,17 @@ func (v AppConnectorPrefsView) AsStruct() *AppConnectorPrefs { return v.ж.Clone() } -func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v AppConnectorPrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v AppConnectorPrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -224,7 +279,20 @@ func (v *AppConnectorPrefsView) UnmarshalJSON(b []byte) error { return nil } var x AppConnectorPrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *AppConnectorPrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x AppConnectorPrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index f6cfc918d02c0..44c3beb877097 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -6,9 +6,12 @@ package prefs import ( - "encoding/json" + jsonv1 "encoding/json" "errors" "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" ) //go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup -tags=test @@ -41,8 +44,17 @@ func (v TestPrefsView) AsStruct() *TestPrefs { return v.ж.Clone() } -func (v TestPrefsView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestPrefsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestPrefsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestPrefsView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -51,7 +63,20 @@ func (v *TestPrefsView) UnmarshalJSON(b []byte) error { return nil } var x TestPrefs - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestPrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestPrefs + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -145,8 +170,17 @@ func (v TestBundleView) AsStruct() *TestBundle { return v.ж.Clone() } -func (v TestBundleView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestBundleView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestBundleView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestBundleView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -155,7 +189,20 @@ func (v *TestBundleView) UnmarshalJSON(b []byte) error { return nil } var x TestBundle - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestBundleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestBundle + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -200,8 +247,17 @@ func (v TestValueStructView) AsStruct() *TestValueStruct { return v.ж.Clone() } -func (v TestValueStructView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestValueStructView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestValueStructView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestValueStructView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -210,7 +266,20 @@ func (v *TestValueStructView) UnmarshalJSON(b []byte) error { return nil } var x TestValueStruct - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestValueStructView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestValueStruct + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -253,8 +322,17 @@ func (v TestGenericStructView[T]) AsStruct() *TestGenericStruct[T] { return v.ж.Clone() } -func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestGenericStructView[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestGenericStructView[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -263,7 +341,20 @@ func (v *TestGenericStructView[T]) UnmarshalJSON(b []byte) error { return nil } var x TestGenericStruct[T] - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestGenericStructView[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestGenericStruct[T] + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x @@ -308,8 +399,17 @@ func (v TestPrefsGroupView) AsStruct() *TestPrefsGroup { return v.ж.Clone() } -func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v TestPrefsGroupView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v TestPrefsGroupView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} +// UnmarshalJSON implements [jsonv1.Unmarshaler]. func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") @@ -318,7 +418,20 @@ func (v *TestPrefsGroupView) UnmarshalJSON(b []byte) error { return nil } var x TestPrefsGroup - if err := json.Unmarshal(b, &x); err != nil { + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *TestPrefsGroupView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x TestPrefsGroup + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { return err } v.ж = &x diff --git a/types/views/views.go b/types/views/views.go index 3911f111258a8..6d15b80d4e499 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -7,7 +7,7 @@ package views import ( "bytes" - "encoding/json" + jsonv1 "encoding/json" "errors" "fmt" "iter" @@ -15,20 +15,12 @@ import ( "reflect" "slices" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" "go4.org/mem" "tailscale.com/types/ptr" ) -func unmarshalSliceFromJSON[T any](b []byte, x *[]T) error { - if *x != nil { - return errors.New("already initialized") - } - if len(b) == 0 { - return nil - } - return json.Unmarshal(b, x) -} - // ByteSlice is a read-only accessor for types that are backed by a []byte. type ByteSlice[T ~[]byte] struct { // ж is the underlying mutable value, named with a hard-to-type @@ -93,15 +85,32 @@ func (v ByteSlice[T]) SliceTo(i int) ByteSlice[T] { return ByteSlice[T]{v.ж[:i] // Slice returns v[i:j] func (v ByteSlice[T]) Slice(i, j int) ByteSlice[T] { return ByteSlice[T]{v.ж[i:j]} } -// MarshalJSON implements json.Marshaler. -func (v ByteSlice[T]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ByteSlice[T]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ByteSlice[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized ByteSlice. func (v *ByteSlice[T]) UnmarshalJSON(b []byte) error { if v.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &v.ж) + return jsonv1.Unmarshal(b, &v.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized ByteSlice. +func (v *ByteSlice[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.ж) } // StructView represents the corresponding StructView of a Viewable. The concrete types are @@ -159,11 +168,35 @@ func (v SliceView[T, V]) All() iter.Seq2[int, V] { } } -// MarshalJSON implements json.Marshaler. -func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) } +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SliceView[T, V]) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SliceView[T, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} -// UnmarshalJSON implements json.Unmarshaler. -func (v *SliceView[T, V]) UnmarshalJSON(b []byte) error { return unmarshalSliceFromJSON(b, &v.ж) } +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized SliceView. +func (v *SliceView[T, V]) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } else if len(b) == 0 { + return nil + } + return jsonv1.Unmarshal(b, &v.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized SliceView. +func (v *SliceView[T, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.ж) +} // IsNil reports whether the underlying slice is nil. func (v SliceView[T, V]) IsNil() bool { return v.ж == nil } @@ -252,14 +285,34 @@ func SliceOf[T any](x []T) Slice[T] { return Slice[T]{x} } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (v Slice[T]) MarshalJSON() ([]byte, error) { - return json.Marshal(v.ж) + return jsonv1.Marshal(v.ж) } -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v Slice[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized Slice. func (v *Slice[T]) UnmarshalJSON(b []byte) error { - return unmarshalSliceFromJSON(b, &v.ж) + if v.ж != nil { + return errors.New("already initialized") + } else if len(b) == 0 { + return nil + } + return jsonv1.Unmarshal(b, &v.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized Slice. +func (v *Slice[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &v.ж) } // IsNil reports whether the underlying slice is nil. @@ -512,18 +565,32 @@ func (m MapSlice[K, V]) GetOk(k K) (Slice[V], bool) { return SliceOf(v), ok } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (m MapSlice[K, V]) MarshalJSON() ([]byte, error) { - return json.Marshal(m.ж) + return jsonv1.Marshal(m.ж) } -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (m MapSlice[K, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, m.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. // It should only be called on an uninitialized Map. func (m *MapSlice[K, V]) UnmarshalJSON(b []byte) error { if m.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &m.ж) + return jsonv1.Unmarshal(b, &m.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It should only be called on an uninitialized MapSlice. +func (m *MapSlice[K, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if m.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &m.ж) } // AsMap returns a shallow-clone of the underlying map. @@ -600,18 +667,32 @@ func (m Map[K, V]) GetOk(k K) (V, bool) { return v, ok } -// MarshalJSON implements json.Marshaler. +// MarshalJSON implements [jsonv1.Marshaler]. func (m Map[K, V]) MarshalJSON() ([]byte, error) { - return json.Marshal(m.ж) + return jsonv1.Marshal(m.ж) } -// UnmarshalJSON implements json.Unmarshaler. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (m Map[K, V]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, m.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. // It should only be called on an uninitialized Map. func (m *Map[K, V]) UnmarshalJSON(b []byte) error { if m.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &m.ж) + return jsonv1.Unmarshal(b, &m.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized Map. +func (m *Map[K, V]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if m.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &m.ж) } // AsMap returns a shallow-clone of the underlying map. @@ -809,17 +890,32 @@ func ValuePointerOf[T any](v *T) ValuePointer[T] { return ValuePointer[T]{v} } -// MarshalJSON implements [json.Marshaler]. +// MarshalJSON implements [jsonv1.Marshaler]. func (p ValuePointer[T]) MarshalJSON() ([]byte, error) { - return json.Marshal(p.ж) + return jsonv1.Marshal(p.ж) } -// UnmarshalJSON implements [json.Unmarshaler]. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p ValuePointer[T]) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, p.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +// It must only be called on an uninitialized ValuePointer. func (p *ValuePointer[T]) UnmarshalJSON(b []byte) error { if p.ж != nil { return errors.New("already initialized") } - return json.Unmarshal(b, &p.ж) + return jsonv1.Unmarshal(b, &p.ж) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +// It must only be called on an uninitialized ValuePointer. +func (p *ValuePointer[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if p.ж != nil { + return errors.New("already initialized") + } + return jsonv2.UnmarshalDecode(dec, &p.ж) } // ContainsPointers reports whether T contains any pointers, diff --git a/types/views/views_test.go b/types/views/views_test.go index 2205cbc03ab74..5a30c11a13c86 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -4,8 +4,7 @@ package views import ( - "bytes" - "encoding/json" + jsonv1 "encoding/json" "fmt" "net/netip" "reflect" @@ -15,9 +14,27 @@ import ( "unsafe" qt "github.com/frankban/quicktest" + jsonv2 "github.com/go-json-experiment/json" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/types/structs" ) +// Statically verify that each type implements the following interfaces. +var _ = []interface { + jsonv1.Marshaler + jsonv1.Unmarshaler + jsonv2.MarshalerTo + jsonv2.UnmarshalerFrom +}{ + (*ByteSlice[[]byte])(nil), + (*SliceView[*testStruct, testStructView])(nil), + (*Slice[testStruct])(nil), + (*MapSlice[*testStruct, testStructView])(nil), + (*Map[*testStruct, testStructView])(nil), + (*ValuePointer[testStruct])(nil), +} + type viewStruct struct { Int int Addrs Slice[netip.Prefix] @@ -83,14 +100,16 @@ func TestViewsJSON(t *testing.T) { ipp := SliceOf(mustCIDR("192.168.0.0/24")) ss := SliceOf([]string{"bar"}) tests := []struct { - name string - in viewStruct - wantJSON string + name string + in viewStruct + wantJSONv1 string + wantJSONv2 string }{ { - name: "empty", - in: viewStruct{}, - wantJSON: `{"Int":0,"Addrs":null,"Strings":null}`, + name: "empty", + in: viewStruct{}, + wantJSONv1: `{"Int":0,"Addrs":null,"Strings":null}`, + wantJSONv2: `{"Int":0,"Addrs":[],"Strings":[]}`, }, { name: "everything", @@ -101,30 +120,49 @@ func TestViewsJSON(t *testing.T) { StringsPtr: &ss, Strings: ss, }, - wantJSON: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, + wantJSONv1: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, + wantJSONv2: `{"Int":1234,"Addrs":["192.168.0.0/24"],"Strings":["bar"],"AddrsPtr":["192.168.0.0/24"],"StringsPtr":["bar"]}`, }, } - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetIndent("", "") for _, tc := range tests { - buf.Reset() - if err := encoder.Encode(&tc.in); err != nil { - t.Fatal(err) - } - b := buf.Bytes() - gotJSON := strings.TrimSpace(string(b)) - if tc.wantJSON != gotJSON { - t.Fatalf("JSON: %v; want: %v", gotJSON, tc.wantJSON) - } - var got viewStruct - if err := json.Unmarshal(b, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tc.in) { - t.Fatalf("unmarshal resulted in different output: %+v; want %+v", got, tc.in) + cmpOpts := cmp.Options{ + cmp.AllowUnexported(Slice[string]{}), + cmp.AllowUnexported(Slice[netip.Prefix]{}), + cmpopts.EquateComparable(netip.Prefix{}), } + t.Run("JSONv1", func(t *testing.T) { + gotJSON, err := jsonv1.Marshal(tc.in) + if err != nil { + t.Fatal(err) + } + if string(gotJSON) != tc.wantJSONv1 { + t.Fatalf("JSON: %s; want: %s", gotJSON, tc.wantJSONv1) + } + var got viewStruct + if err := jsonv1.Unmarshal(gotJSON, &got); err != nil { + t.Fatal(err) + } + if d := cmp.Diff(got, tc.in, cmpOpts); d != "" { + t.Fatalf("unmarshal mismatch (-got +want):\n%s", d) + } + }) + t.Run("JSONv2", func(t *testing.T) { + gotJSON, err := jsonv2.Marshal(tc.in) + if err != nil { + t.Fatal(err) + } + if string(gotJSON) != tc.wantJSONv2 { + t.Fatalf("JSON: %s; want: %s", gotJSON, tc.wantJSONv2) + } + var got viewStruct + if err := jsonv2.Unmarshal(gotJSON, &got); err != nil { + t.Fatal(err) + } + if d := cmp.Diff(got, tc.in, cmpOpts, cmpopts.EquateEmpty()); d != "" { + t.Fatalf("unmarshal mismatch (-got +want):\n%s", d) + } + }) } } diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index 1b3af10e03ee1..ec02d652b8760 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -85,28 +85,35 @@ func NewImportTracker(thisPkg *types.Package) *ImportTracker { } } +type namePkgPath struct { + name string // optional import name + pkgPath string +} + // ImportTracker provides a mechanism to track and build import paths. type ImportTracker struct { thisPkg *types.Package - packages map[string]bool + packages map[namePkgPath]bool } -func (it *ImportTracker) Import(pkg string) { - if pkg != "" && !it.packages[pkg] { - mak.Set(&it.packages, pkg, true) +// Import imports pkgPath under an optional import name. +func (it *ImportTracker) Import(name, pkgPath string) { + if pkgPath != "" && !it.packages[namePkgPath{name, pkgPath}] { + mak.Set(&it.packages, namePkgPath{name, pkgPath}, true) } } -// Has reports whether the specified package has been imported. -func (it *ImportTracker) Has(pkg string) bool { - return it.packages[pkg] +// Has reports whether the specified package path has been imported +// under the particular import name. +func (it *ImportTracker) Has(name, pkgPath string) bool { + return it.packages[namePkgPath{name, pkgPath}] } func (it *ImportTracker) qualifier(pkg *types.Package) string { if it.thisPkg == pkg { return "" } - it.Import(pkg.Path()) + it.Import("", pkg.Path()) // TODO(maisem): handle conflicts? return pkg.Name() } @@ -128,7 +135,11 @@ func (it *ImportTracker) PackagePrefix(pkg *types.Package) string { func (it *ImportTracker) Write(w io.Writer) { fmt.Fprintf(w, "import (\n") for s := range it.packages { - fmt.Fprintf(w, "\t%q\n", s) + if s.name == "" { + fmt.Fprintf(w, "\t%q\n", s.pkgPath) + } else { + fmt.Fprintf(w, "\t%s %q\n", s.name, s.pkgPath) + } } fmt.Fprintf(w, ")\n\n") } From b8c45a6a8f27315b6cc65e26d455254be7d35196 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 14 Aug 2025 13:46:51 -0700 Subject: [PATCH 0216/1093] client/systray: add CLI error output if operator is missing We already show a message in the menu itself, this just adds it to the CLI output as well. Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index d5a19f91c3c54..98c6156b82184 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -160,6 +160,17 @@ func (menu *Menu) onReady() { log.Printf("starting") setAppIcon(disconnected) menu.rebuild() + + menu.mu.Lock() + if menu.readonly { + fmt.Fprintln(os.Stderr, ` +No permission to manage Tailscale. Set operator by running: + +sudo tailscale set --operator=$USER + +See https://tailscale.com/s/cli-operator for more information.`) + } + menu.mu.Unlock() } // updateState updates the Menu state from the Tailscale local client. From 3f1851a6d9507ed2ffe46098835c53e63dff93b8 Mon Sep 17 00:00:00 2001 From: Michael Ben-Ami Date: Mon, 11 Aug 2025 12:10:33 -0400 Subject: [PATCH 0217/1093] types/dnstype, ipn/ipnlocal: allow other DNS resolvers with exit nodes dnstype.Resolver adds a boolean UseWithExitNode that controls whether the resolver should be used in tailscale exit node contexts (not wireguard exit nodes). If UseWithExitNode resolvers are found, they are installed as the global resolvers. If no UseWithExitNode resolvers are found, the exit node resolver continues to be installed as the global resolver. Split DNS Routes referencing UseWithExitNode resolvers are also installed. Updates #8237 Fixes tailscale/corp#30906 Fixes tailscale/corp#30907 Signed-off-by: Michael Ben-Ami --- client/tailscale/apitype/controltype.go | 49 ++++++++-- ipn/ipnlocal/local_test.go | 113 ++++++++++++++++++++---- ipn/ipnlocal/node_backend.go | 87 +++++++++++++----- tailcfg/tailcfg.go | 10 +-- types/dnstype/dnstype.go | 10 ++- types/dnstype/dnstype_clone.go | 1 + types/dnstype/dnstype_test.go | 14 ++- types/dnstype/dnstype_view.go | 2 + 8 files changed, 235 insertions(+), 51 deletions(-) diff --git a/client/tailscale/apitype/controltype.go b/client/tailscale/apitype/controltype.go index 9a623be319606..d9d79f0ade38b 100644 --- a/client/tailscale/apitype/controltype.go +++ b/client/tailscale/apitype/controltype.go @@ -3,17 +3,50 @@ package apitype +// DNSConfig is the DNS configuration for a tailnet +// used in /tailnet/{tailnet}/dns/config. type DNSConfig struct { - Resolvers []DNSResolver `json:"resolvers"` - FallbackResolvers []DNSResolver `json:"fallbackResolvers"` - Routes map[string][]DNSResolver `json:"routes"` - Domains []string `json:"domains"` - Nameservers []string `json:"nameservers"` - Proxied bool `json:"proxied"` - TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` + // Resolvers are the global DNS resolvers to use + // overriding the local OS configuration. + Resolvers []DNSResolver `json:"resolvers"` + + // FallbackResolvers are used as global resolvers when + // the client is unable to determine the OS's preferred DNS servers. + FallbackResolvers []DNSResolver `json:"fallbackResolvers"` + + // Routes map DNS name suffixes to a set of DNS resolvers, + // used for Split DNS and other advanced routing overlays. + Routes map[string][]DNSResolver `json:"routes"` + + // Domains are the search domains to use. + Domains []string `json:"domains"` + + // Proxied means MagicDNS is enabled. + Proxied bool `json:"proxied"` + + // TempCorpIssue13969 is from an internal hack day prototype, + // See tailscale/corp#13969. + TempCorpIssue13969 string `json:"TempCorpIssue13969,omitempty"` + + // Nameservers are the IP addresses of global nameservers to use. + // This is a deprecated format but may still be found in tailnets + // that were configured a long time ago. When making updates, + // set Resolvers and leave Nameservers empty. + Nameservers []string `json:"nameservers"` } +// DNSResolver is a DNS resolver in a DNS configuration. type DNSResolver struct { - Addr string `json:"addr"` + // Addr is the address of the DNS resolver. + // It is usually an IP address or a DoH URL. + // See dnstype.Resolver.Addr for full details. + Addr string `json:"addr"` + + // BootstrapResolution is an optional suggested resolution for + // the DoT/DoH resolver. BootstrapResolution []string `json:"bootstrapResolution,omitempty"` + + // UseWithExitNode signals this resolver should be used + // even when a tailscale exit node is configured on a device. + UseWithExitNode bool `json:"useWithExitNode,omitempty"` } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 30833e748ea1b..49cfc3e071569 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2080,7 +2080,14 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { wantRoutes map[dnsname.FQDN][]*dnstype.Resolver } - defaultResolvers := []*dnstype.Resolver{{Addr: "default.example.com"}} + const tsUseWithExitNodeResolverAddr = "usewithexitnode.example.com" + defaultResolvers := []*dnstype.Resolver{ + {Addr: "default.example.com"}, + } + containsFlaggedResolvers := append([]*dnstype.Resolver{ + {Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}, + }, defaultResolvers...) + wgResolvers := []*dnstype.Resolver{{Addr: "wg.example.com"}} peers := []tailcfg.NodeView{ (&tailcfg.Node{ @@ -2099,9 +2106,33 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { }).View(), } exitDOH := peerAPIBase(&netmap.NetworkMap{Peers: peers}, peers[0]) + "/dns-query" - routes := map[dnsname.FQDN][]*dnstype.Resolver{ + baseRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "route.example.com.": {{Addr: "route.example.com"}}, + } + containsEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ "route.example.com.": {{Addr: "route.example.com"}}, + "empty.example.com.": {}, + } + containsFlaggedRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "route.example.com.": {{Addr: "route.example.com"}}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + containsFlaggedAndEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + "route.example.com.": {{Addr: "route.example.com"}}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, } + flaggedRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + emptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + } + flaggedAndEmptyRoutes := map[dnsname.FQDN][]*dnstype.Resolver{ + "empty.example.com.": {}, + "withexit.example.com.": {{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + } + stringifyRoutes := func(routes map[dnsname.FQDN][]*dnstype.Resolver) map[string][]*dnstype.Resolver { if routes == nil { return nil @@ -2138,19 +2169,23 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, + { + name: "tsExit/noRoutes/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: nil, + }, - // The following two cases may need to be revisited. For a shared-in - // exit node split-DNS may effectively break, furthermore in the future - // if different nodes observe different DNS configurations, even a - // tailnet local exit node may present a different DNS configuration, - // which may not meet expectations in some use cases. - // In the case where a default resolver is set, the default resolver - // should also perhaps take precedence also. + // When at tailscale exit node is in use, + // only routes that reference resolvers with the UseWithExitNode should be installed, + // as well as routes with 0-length resolver lists, which should be installed in all cases. { name: "tsExit/routes/noResolver", exitNode: "ts", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes)}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes)}, wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, @@ -2158,10 +2193,58 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { name: "tsExit/routes/defaultResolver", exitNode: "ts", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes), Resolvers: defaultResolvers}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: defaultResolvers}, wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, wantRoutes: nil, }, + { + name: "tsExit/routes/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: nil, + }, + { + name: "tsExit/flaggedRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: flaggedRoutes, + }, + { + name: "tsExit/flaggedRoutesOnly/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: flaggedRoutes, + }, + { + name: "tsExit/emptyRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsEmptyRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: emptyRoutes, + }, + { + name: "tsExit/flaggedAndEmptyRoutesOnly/defaultResolver", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedAndEmptyRoutes), Resolvers: defaultResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: exitDOH}}, + wantRoutes: flaggedAndEmptyRoutes, + }, + { + name: "tsExit/flaggedAndEmptyRoutesOnly/flaggedResolverOnly", + exitNode: "ts", + peers: peers, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(containsFlaggedAndEmptyRoutes), Resolvers: containsFlaggedResolvers}, + wantDefaultResolvers: []*dnstype.Resolver{{Addr: tsUseWithExitNodeResolverAddr, UseWithExitNode: true}}, + wantRoutes: flaggedAndEmptyRoutes, + }, // WireGuard exit nodes with DNS capabilities provide a "fallback" type // behavior, they have a lower precedence than a default resolver, but @@ -2187,17 +2270,17 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { name: "wgExit/routes/defaultResolver", exitNode: "wg", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes), Resolvers: defaultResolvers}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes), Resolvers: defaultResolvers}, wantDefaultResolvers: defaultResolvers, - wantRoutes: routes, + wantRoutes: baseRoutes, }, { name: "wgExit/routes/noResolver", exitNode: "wg", peers: peers, - dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(routes)}, + dnsConfig: &tailcfg.DNSConfig{Routes: stringifyRoutes(baseRoutes)}, wantDefaultResolvers: wgResolvers, - wantRoutes: routes, + wantRoutes: baseRoutes, }, } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index ec503f1300ca5..a3889b6434c40 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -578,6 +578,42 @@ func (nb *nodeBackend) doShutdown(cause error) { nb.eventClient.Close() } +// useWithExitNodeResolvers filters out resolvers so the ones that remain +// are all the ones marked for use with exit nodes. +func useWithExitNodeResolvers(resolvers []*dnstype.Resolver) []*dnstype.Resolver { + var filtered []*dnstype.Resolver + for _, res := range resolvers { + if res.UseWithExitNode { + filtered = append(filtered, res) + } + } + return filtered +} + +// useWithExitNodeRoutes filters out routes so the ones that remain +// are either zero-length resolver lists, or lists containing only +// resolvers marked for use with exit nodes. +func useWithExitNodeRoutes(routes map[string][]*dnstype.Resolver) map[string][]*dnstype.Resolver { + var filtered map[string][]*dnstype.Resolver + for suffix, resolvers := range routes { + // Suffixes with no resolvers represent a valid configuration, + // and should persist regardless of exit node considerations. + if len(resolvers) == 0 { + mak.Set(&filtered, suffix, make([]*dnstype.Resolver, 0)) + continue + } + + // In exit node contexts, we filter out resolvers not configured for use with + // exit nodes. If there are no such configured resolvers, there should not be an entry for that suffix. + filteredResolvers := useWithExitNodeResolvers(resolvers) + if len(filteredResolvers) > 0 { + mak.Set(&filtered, suffix, filteredResolvers) + } + } + + return filtered +} + // dnsConfigForNetmap returns a *dns.Config for the given netmap, // prefs, client OS version, and cloud hosting environment. // @@ -700,10 +736,36 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. dcfg.DefaultResolvers = append(dcfg.DefaultResolvers, resolvers...) } + addSplitDNSRoutes := func(routes map[string][]*dnstype.Resolver) { + for suffix, resolvers := range routes { + fqdn, err := dnsname.ToFQDN(suffix) + if err != nil { + logf("[unexpected] non-FQDN route suffix %q", suffix) + } + + // Create map entry even if len(resolvers) == 0; Issue 2706. + // This lets the control plane send ExtraRecords for which we + // can authoritatively answer "name not exists" for when the + // control plane also sends this explicit but empty route + // making it as something we handle. + dcfg.Routes[fqdn] = slices.Clone(resolvers) + } + } + // If we're using an exit node and that exit node is new enough (1.19.x+) - // to run a DoH DNS proxy, then send all our DNS traffic through it. + // to run a DoH DNS proxy, then send all our DNS traffic through it, + // unless we find resolvers with UseWithExitNode set, in which case we use that. if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) + if len(filtered) > 0 { + addDefault(filtered) + } else { + // If no default global resolvers with the override + // are configured, configure the exit node's resolver. + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + } + + addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) return dcfg } @@ -718,25 +780,8 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } } - for suffix, resolvers := range nm.DNS.Routes { - fqdn, err := dnsname.ToFQDN(suffix) - if err != nil { - logf("[unexpected] non-FQDN route suffix %q", suffix) - } - - // Create map entry even if len(resolvers) == 0; Issue 2706. - // This lets the control plane send ExtraRecords for which we - // can authoritatively answer "name not exists" for when the - // control plane also sends this explicit but empty route - // making it as something we handle. - // - // While we're already populating it, might as well size the - // slice appropriately. - // Per #9498 the exact requirements of nil vs empty slice remain - // unclear, this is a haunted graveyard to be resolved. - dcfg.Routes[fqdn] = make([]*dnstype.Resolver, 0, len(resolvers)) - dcfg.Routes[fqdn] = append(dcfg.Routes[fqdn], resolvers...) - } + // Add split DNS routes, with no regard to exit node configuration. + addSplitDNSRoutes(nm.DNS.Routes) // Set FallbackResolvers as the default resolvers in the // scenarios that can't handle a purely split-DNS config. See diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 9f4734f1fc7d0..d2125684d3f37 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -169,7 +169,8 @@ type CapabilityVersion int // - 122: 2025-07-21: Client sends Hostinfo.ExitNodeID to report which exit node it has selected, if any. // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory -const CurrentCapabilityVersion CapabilityVersion = 124 +// - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. +const CurrentCapabilityVersion CapabilityVersion = 125 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -1730,10 +1731,9 @@ type DNSConfig struct { // proxying to be enabled. Proxied bool `json:",omitempty"` - // The following fields are only set and used by - // MapRequest.Version >=9 and <14. - - // Nameservers are the IP addresses of the nameservers to use. + // Nameservers are the IP addresses of the global nameservers to use. + // + // Deprecated: this is only set and used by MapRequest.Version >=9 and <14. Use Resolvers instead. Nameservers []netip.Addr `json:",omitempty"` // CertDomains are the set of DNS names for which the control diff --git a/types/dnstype/dnstype.go b/types/dnstype/dnstype.go index b7f5b9d02fe47..a3ba1b0a981e2 100644 --- a/types/dnstype/dnstype.go +++ b/types/dnstype/dnstype.go @@ -35,6 +35,12 @@ type Resolver struct { // // As of 2022-09-08, BootstrapResolution is not yet used. BootstrapResolution []netip.Addr `json:",omitempty"` + + // UseWithExitNode designates that this resolver should continue to be used when an + // exit node is in use. Normally, DNS resolution is delegated to the exit node but + // there are situations where it is preferable to still use a Split DNS server and/or + // global DNS server instead of the exit node. + UseWithExitNode bool `json:",omitempty"` } // IPPort returns r.Addr as an IP address and port if either @@ -64,5 +70,7 @@ func (r *Resolver) Equal(other *Resolver) bool { return true } - return r.Addr == other.Addr && slices.Equal(r.BootstrapResolution, other.BootstrapResolution) + return r.Addr == other.Addr && + slices.Equal(r.BootstrapResolution, other.BootstrapResolution) && + r.UseWithExitNode == other.UseWithExitNode } diff --git a/types/dnstype/dnstype_clone.go b/types/dnstype/dnstype_clone.go index 86ca0535fa476..3985704aa0638 100644 --- a/types/dnstype/dnstype_clone.go +++ b/types/dnstype/dnstype_clone.go @@ -25,6 +25,7 @@ func (src *Resolver) Clone() *Resolver { var _ResolverCloneNeedsRegeneration = Resolver(struct { Addr string BootstrapResolution []netip.Addr + UseWithExitNode bool }{}) // Clone duplicates src into dst and reports whether it succeeded. diff --git a/types/dnstype/dnstype_test.go b/types/dnstype/dnstype_test.go index e3a941a2040fc..ada5f687def9f 100644 --- a/types/dnstype/dnstype_test.go +++ b/types/dnstype/dnstype_test.go @@ -17,7 +17,7 @@ func TestResolverEqual(t *testing.T) { fieldNames = append(fieldNames, field.Name) } sort.Strings(fieldNames) - if !slices.Equal(fieldNames, []string{"Addr", "BootstrapResolution"}) { + if !slices.Equal(fieldNames, []string{"Addr", "BootstrapResolution", "UseWithExitNode"}) { t.Errorf("Resolver fields changed; update test") } @@ -68,6 +68,18 @@ func TestResolverEqual(t *testing.T) { }, want: false, }, + { + name: "equal UseWithExitNode", + a: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + b: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + want: true, + }, + { + name: "not equal UseWithExitNode", + a: &Resolver{Addr: "dns.example.com", UseWithExitNode: true}, + b: &Resolver{Addr: "dns.example.com", UseWithExitNode: false}, + want: false, + }, } for _, tt := range tests { diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index 3d374ab47f76c..0704670a29606 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -88,10 +88,12 @@ func (v ResolverView) Addr() string { return v.ж.Addr } func (v ResolverView) BootstrapResolution() views.Slice[netip.Addr] { return views.SliceOf(v.ж.BootstrapResolution) } +func (v ResolverView) UseWithExitNode() bool { return v.ж.UseWithExitNode } func (v ResolverView) Equal(v2 ResolverView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ResolverViewNeedsRegeneration = Resolver(struct { Addr string BootstrapResolution []netip.Addr + UseWithExitNode bool }{}) From 5b6c64b1873a6cb824f120343bf268d4cc6ddbf5 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Fri, 15 Aug 2025 06:11:27 -0700 Subject: [PATCH 0218/1093] net/tshttpproxy: use errors.New for error creation (#16860) Updates tailscale/corp#30668 Signed-off-by: Will Hannah --- net/tshttpproxy/tshttpproxy.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/tshttpproxy/tshttpproxy.go b/net/tshttpproxy/tshttpproxy.go index ab2fd39e37858..0456009ed9a81 100644 --- a/net/tshttpproxy/tshttpproxy.go +++ b/net/tshttpproxy/tshttpproxy.go @@ -7,6 +7,7 @@ package tshttpproxy import ( "context" + "errors" "fmt" "log" "net" @@ -48,7 +49,7 @@ func SetProxyFunc(fn func(*url.URL) (*url.URL, error)) error { // Allow override only if config is not set if config != nil { - return fmt.Errorf("tshttpproxy: SetProxyFunc can only be called when config is not set") + return errors.New("tshttpproxy: SetProxyFunc can only be called when config is not set") } proxyFunc = fn From 55beba40948ea406e82e79eca5504be02bbf8c9f Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 15 Aug 2025 11:36:30 -0700 Subject: [PATCH 0219/1093] types/key: init HardwareAttestionKey implementation (#16867) Define the HardwareAttestionKey interface describing a platform-specific hardware backed node identity attestation key. Clients will register the key type implementations for their platform. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- types/key/hardware_attestation.go | 68 +++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 types/key/hardware_attestation.go diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go new file mode 100644 index 0000000000000..be2eefb78319e --- /dev/null +++ b/types/key/hardware_attestation.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package key + +import ( + "crypto" + "encoding/json" + "fmt" +) + +var ErrUnsupported = fmt.Errorf("key type not supported on this platform") + +// HardwareAttestationKey describes a hardware-backed key that is used to +// identify a node. Implementation details will +// vary based on the platform in use (SecureEnclave for Apple, TPM for +// Windows/Linux, Android Hardware-backed Keystore). +// This key can only be marshalled and unmarshalled on the same machine. +type HardwareAttestationKey interface { + crypto.Signer + json.Marshaler + json.Unmarshaler +} + +// emptyHardwareAttestationKey is a function that returns an empty +// HardwareAttestationKey suitable for use with JSON unmarshalling. +var emptyHardwareAttestationKey func() HardwareAttestationKey + +// createHardwareAttestationKey is a function that creates a new +// HardwareAttestationKey for the current platform. +var createHardwareAttestationKey func() (HardwareAttestationKey, error) + +// HardwareAttestationKeyFn is a callback function type that returns a HardwareAttestationKey +// and an error. It is used to register platform-specific implementations of +// HardwareAttestationKey. +type HardwareAttestationKeyFn func() (HardwareAttestationKey, error) + +// RegisterHardwareAttestationKeyFns registers a hardware attestation +// key implementation for the current platform. +func RegisterHardwareAttestationKeyFns(emptyFn func() HardwareAttestationKey, createFn HardwareAttestationKeyFn) { + if emptyHardwareAttestationKey != nil { + panic("emptyPlatformHardwareAttestationKey already registered") + } + emptyHardwareAttestationKey = emptyFn + + if createHardwareAttestationKey != nil { + panic("createPlatformHardwareAttestationKey already registered") + } + createHardwareAttestationKey = createFn +} + +// NewEmptyHardwareAttestationKey returns an empty HardwareAttestationKey +// suitable for JSON unmarshalling. +func NewEmptyHardwareAttestationKey() (HardwareAttestationKey, error) { + if emptyHardwareAttestationKey == nil { + return nil, ErrUnsupported + } + return emptyHardwareAttestationKey(), nil +} + +// NewHardwareAttestationKey returns a newly created HardwareAttestationKey for +// the current platform. +func NewHardwareAttestationKey() (HardwareAttestationKey, error) { + if createHardwareAttestationKey == nil { + return nil, ErrUnsupported + } + return createHardwareAttestationKey() +} From 9c39296ab51c0088f4cf285456dbf5dd04f43f90 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 15 Aug 2025 13:43:45 -0500 Subject: [PATCH 0220/1093] release/dist/qnap: verify code signing This pulls in a change from github.com/tailscale/QDK to verify code signing when using QNAP_SIGNING_SCRIPT. It also upgrades to the latest Google Cloud PKCS#11 library, and reorders the Dockerfile to allow for more efficient future upgrades to the included QDK. Updates tailscale/corp#23528 Signed-off-by: Percy Wegmann --- release/dist/qnap/files/scripts/Dockerfile.qpkg | 12 ++++++------ release/dist/qnap/files/scripts/sign-qpkg.sh | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/release/dist/qnap/files/scripts/Dockerfile.qpkg b/release/dist/qnap/files/scripts/Dockerfile.qpkg index dbcaac11668f0..8e99630d1e098 100644 --- a/release/dist/qnap/files/scripts/Dockerfile.qpkg +++ b/release/dist/qnap/files/scripts/Dockerfile.qpkg @@ -9,13 +9,13 @@ RUN apt-get update -y && \ curl \ patch -# Install QNAP QDK (force a specific version to pick up updates) -RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 6aba74f6b4c8ea0c30b8aec9f3476f428f6a58a1 -RUN cd /QDK && ./InstallToUbuntu.sh install -ENV PATH="/usr/share/QDK/bin:${PATH}" - # Install Google Cloud PKCS11 module RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list RUN apt-get update -y && apt-get install -y --no-install-recommends google-cloud-cli libengine-pkcs11-openssl -RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.6/libkmsp11-1.6-linux-amd64.tar.gz | tar xz +RUN curl -L https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/pkcs11-v1.7/libkmsp11-1.7-linux-amd64.tar.gz | tar xz + +# Install QNAP QDK (force a specific version to pick up updates) +RUN git clone https://github.com/tailscale/QDK.git && cd /QDK && git reset --hard 8478a990decf0b0bb259ae11c636e66bfeff2433 +RUN cd /QDK && ./InstallToUbuntu.sh install +ENV PATH="/usr/share/QDK/bin:${PATH}" diff --git a/release/dist/qnap/files/scripts/sign-qpkg.sh b/release/dist/qnap/files/scripts/sign-qpkg.sh index 5629672f85e95..b6b99a3b1b2ef 100755 --- a/release/dist/qnap/files/scripts/sign-qpkg.sh +++ b/release/dist/qnap/files/scripts/sign-qpkg.sh @@ -13,7 +13,7 @@ log_directory: "/tmp/kmsp11" chmod 0600 pkcs11-config.yaml export KMS_PKCS11_CONFIG=`readlink -f pkcs11-config.yaml` -export PKCS11_MODULE_PATH=/libkmsp11-1.6-linux-amd64/libkmsp11.so +export PKCS11_MODULE_PATH=/libkmsp11-1.7-linux-amd64/libkmsp11.so # Verify signature of pkcs11 module # See https://github.com/GoogleCloudPlatform/kms-integrations/blob/master/kmsp11/docs/user_guide.md#downloading-and-verifying-the-library From 6006bc92b5d1fd6a71f776826fc8e200ebc9b755 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 15 Aug 2025 16:04:23 -0400 Subject: [PATCH 0221/1093] net/{netns, netmon}: use LastKnownDefaultInterface if set and check for utun (#16873) fixes tailscale/corp#31299 Fixes two issues: getInterfaceIndex would occasionally race with netmon's state, returning the cached default interface index after it had be changed by NWNetworkMonitor. This had the potential to cause connections to bind to the prior default. The fix here is to preferentially use the interface index provided by NWNetworkMonitor preferentially. When no interfaces are available, macOS will set the tunnel as the default interface when an exit node is enabled, potentially causing getInterfaceIndex to return utun's index. We now guard against this when taking the defaultIdx path. Signed-off-by: Jonathan Nobels --- net/netmon/defaultroute_darwin.go | 106 +++++++++++++++++---------- net/netmon/interfaces_darwin_test.go | 22 ++++++ net/netns/netns_darwin.go | 41 +++++++++-- 3 files changed, 124 insertions(+), 45 deletions(-) diff --git a/net/netmon/defaultroute_darwin.go b/net/netmon/defaultroute_darwin.go index 4efe2f1aa61bf..57f7e22b7ddce 100644 --- a/net/netmon/defaultroute_darwin.go +++ b/net/netmon/defaultroute_darwin.go @@ -6,6 +6,8 @@ package netmon import ( + "errors" + "fmt" "log" "net" @@ -16,14 +18,26 @@ var ( lastKnownDefaultRouteIfName syncs.AtomicValue[string] ) -// UpdateLastKnownDefaultRouteInterface is called by ipn-go-bridge in the iOS app when +// UpdateLastKnownDefaultRouteInterface is called by ipn-go-bridge from apple network extensions when // our NWPathMonitor instance detects a network path transition. func UpdateLastKnownDefaultRouteInterface(ifName string) { if ifName == "" { return } if old := lastKnownDefaultRouteIfName.Swap(ifName); old != ifName { - log.Printf("defaultroute_darwin: update from Swift, ifName = %s (was %s)", ifName, old) + interfaces, err := netInterfaces() + if err != nil { + log.Printf("defaultroute_darwin: UpdateLastKnownDefaultRouteInterface could not get interfaces: %v", err) + return + } + + netif, err := getInterfaceByName(ifName, interfaces) + if err != nil { + log.Printf("defaultroute_darwin: UpdateLastKnownDefaultRouteInterface could not find interface index for %s: %v", ifName, err) + return + } + + log.Printf("defaultroute_darwin: updated last known default if from OS, ifName = %s index: %d (was %s)", ifName, netif.Index, old) } } @@ -40,57 +54,69 @@ func defaultRoute() (d DefaultRouteDetails, err error) { // // If for any reason the Swift machinery didn't work and we don't get any updates, we will // fallback to the BSD logic. + osRoute, osRouteErr := OSDefaultRoute() + if osRouteErr == nil { + // If we got a valid interface from the OS, use it. + d.InterfaceName = osRoute.InterfaceName + d.InterfaceIndex = osRoute.InterfaceIndex + return d, nil + } - // Start by getting all available interfaces. - interfaces, err := netInterfaces() + // Fallback to the BSD logic + idx, err := DefaultRouteInterfaceIndex() if err != nil { - log.Printf("defaultroute_darwin: could not get interfaces: %v", err) - return d, ErrNoGatewayIndexFound + return d, err } - - getInterfaceByName := func(name string) *Interface { - for _, ifc := range interfaces { - if ifc.Name != name { - continue - } - - if !ifc.IsUp() { - log.Printf("defaultroute_darwin: %s is down", name) - return nil - } - - addrs, _ := ifc.Addrs() - if len(addrs) == 0 { - log.Printf("defaultroute_darwin: %s has no addresses", name) - return nil - } - return &ifc - } - return nil + iface, err := net.InterfaceByIndex(idx) + if err != nil { + return d, err } + d.InterfaceName = iface.Name + d.InterfaceIndex = idx + return d, nil +} + +// OSDefaultRoute returns the DefaultRouteDetails for the default interface as provided by the OS +// via UpdateLastKnownDefaultRouteInterface. If UpdateLastKnownDefaultRouteInterface has not been called, +// the interface name is not valid, or we cannot find its index, an error is returned. +func OSDefaultRoute() (d DefaultRouteDetails, err error) { // Did Swift set lastKnownDefaultRouteInterface? If so, we should use it and don't bother // with anything else. However, for sanity, do check whether Swift gave us with an interface - // that exists, is up, and has an address. + // that exists, is up, and has an address and is not the tunnel itself. if swiftIfName := lastKnownDefaultRouteIfName.Load(); swiftIfName != "" { - ifc := getInterfaceByName(swiftIfName) - if ifc != nil { + // Start by getting all available interfaces. + interfaces, err := netInterfaces() + if err != nil { + log.Printf("defaultroute_darwin: could not get interfaces: %v", err) + return d, err + } + + if ifc, err := getInterfaceByName(swiftIfName, interfaces); err == nil { d.InterfaceName = ifc.Name d.InterfaceIndex = ifc.Index return d, nil } } + err = errors.New("no os provided default route interface found") + return d, err +} - // Fallback to the BSD logic - idx, err := DefaultRouteInterfaceIndex() - if err != nil { - return d, err - } - iface, err := net.InterfaceByIndex(idx) - if err != nil { - return d, err +func getInterfaceByName(name string, interfaces []Interface) (*Interface, error) { + for _, ifc := range interfaces { + if ifc.Name != name { + continue + } + + if !ifc.IsUp() { + return nil, fmt.Errorf("defaultroute_darwin: %s is down", name) + } + + addrs, _ := ifc.Addrs() + if len(addrs) == 0 { + return nil, fmt.Errorf("defaultroute_darwin: %s has no addresses", name) + } + return &ifc, nil } - d.InterfaceName = iface.Name - d.InterfaceIndex = idx - return d, nil + return nil, errors.New("no interfaces found") } diff --git a/net/netmon/interfaces_darwin_test.go b/net/netmon/interfaces_darwin_test.go index d756d13348bc3..c3d40a6f0e34e 100644 --- a/net/netmon/interfaces_darwin_test.go +++ b/net/netmon/interfaces_darwin_test.go @@ -112,3 +112,25 @@ func TestFetchRoutingTable(t *testing.T) { } } } + +func TestUpdateLastKnownDefaultRouteInterface(t *testing.T) { + // Pick some interface on the machine + interfaces, err := netInterfaces() + if err != nil || len(interfaces) == 0 { + t.Fatalf("netInterfaces() error: %v", err) + } + + // Set it as our last known default route interface + ifName := interfaces[0].Name + UpdateLastKnownDefaultRouteInterface(ifName) + + // And make sure we can get it back + route, err := OSDefaultRoute() + if err != nil { + t.Fatalf("OSDefaultRoute() error: %v", err) + } + want, got := ifName, route.InterfaceName + if want != got { + t.Errorf("OSDefaultRoute() = %q, want %q", got, want) + } +} diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index ac5e89d76cc2e..f2ed16601b88e 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -78,10 +78,38 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) return -1, errInterfaceStateInvalid } - if iface, ok := state.Interface[state.DefaultRouteInterface]; ok { - return iface.Index, nil + // Netmon's cached view of the default inteface + cachedIdx, ok := state.Interface[state.DefaultRouteInterface] + // OSes view (if available) of the default interface + osIf, osIferr := netmon.OSDefaultRoute() + + idx := -1 + errOut := errInterfaceStateInvalid + // Preferentially choose the OS's view of the default if index. Due to the way darwin sets the delegated + // interface on tunnel creation only, it is possible for netmon to have a stale view of the default and + // netmon's view is often temporarily wrong during network transitions, or for us to not have the + // the the oses view of the defaultIf yet. + if osIferr == nil { + idx = osIf.InterfaceIndex + errOut = nil + } else if ok { + idx = cachedIdx.Index + errOut = nil + } + + if osIferr == nil && ok && (osIf.InterfaceIndex != cachedIdx.Index) { + logf("netns: [unexpected] os default if %q (%d) != netmon cached if %q (%d)", osIf.InterfaceName, osIf.InterfaceIndex, cachedIdx.Name, cachedIdx.Index) + } + + // Sanity check to make sure we didn't pick the tailscale interface + if tsif, err2 := tailscaleInterface(); tsif != nil && err2 == nil && errOut == nil { + if tsif.Index == idx { + idx = -1 + errOut = errInterfaceStateInvalid + } } - return -1, errInterfaceStateInvalid + + return idx, errOut } useRoute := bindToInterfaceByRoute.Load() || bindToInterfaceByRouteEnv() @@ -100,7 +128,7 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) idx, err := interfaceIndexFor(addr, true /* canRecurse */) if err != nil { - logf("netns: error in interfaceIndexFor: %v", err) + logf("netns: error getting interface index for %q: %v", address, err) return defaultIdx() } @@ -108,10 +136,13 @@ func getInterfaceIndex(logf logger.Logf, netMon *netmon.Monitor, address string) // if so, we fall back to binding from the default. tsif, err2 := tailscaleInterface() if err2 == nil && tsif != nil && tsif.Index == idx { - logf("[unexpected] netns: interfaceIndexFor returned Tailscale interface") + // note: with an exit node enabled, this is almost always true. defaultIdx() is the + // right thing to do here. return defaultIdx() } + logf("netns: completed success interfaceIndexFor(%s) = %d", address, idx) + return idx, err } From 192fa6f05d12cfadaa3044d57e0a74f2b9f46a55 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 15 Aug 2025 15:45:17 -0500 Subject: [PATCH 0222/1093] {cmd/dist,release/dist}: add support for intermediary QNAP signing certificates Updates #23528 Signed-off-by: Percy Wegmann --- cmd/dist/dist.go | 20 +++++++++++--------- release/dist/qnap/files/scripts/sign-qpkg.sh | 7 +++++-- release/dist/qnap/pkgs.go | 12 +++++++----- release/dist/qnap/targets.go | 15 ++++++++------- 4 files changed, 31 insertions(+), 23 deletions(-) diff --git a/cmd/dist/dist.go b/cmd/dist/dist.go index 038ced708e0f0..c7406298d8188 100644 --- a/cmd/dist/dist.go +++ b/cmd/dist/dist.go @@ -21,12 +21,13 @@ import ( ) var ( - synologyPackageCenter bool - gcloudCredentialsBase64 string - gcloudProject string - gcloudKeyring string - qnapKeyName string - qnapCertificateBase64 string + synologyPackageCenter bool + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + qnapKeyName string + qnapCertificateBase64 string + qnapCertificateIntermediariesBase64 string ) func getTargets() ([]dist.Target, error) { @@ -47,11 +48,11 @@ func getTargets() ([]dist.Target, error) { // To build for package center, run // ./tool/go run ./cmd/dist build --synology-package-center synology ret = append(ret, synology.Targets(synologyPackageCenter, nil)...) - qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64} + qnapSigningArgs := []string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64, qnapCertificateIntermediariesBase64} if cmp.Or(qnapSigningArgs...) != "" && slices.Contains(qnapSigningArgs, "") { - return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name and --qnap-certificate must be set") + return nil, errors.New("all of --gcloud-credentials, --gcloud-project, --gcloud-keyring, --qnap-key-name, --qnap-certificate and --qnap-certificate-intermediaries must be set") } - ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64)...) + ret = append(ret, qnap.Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, qnapKeyName, qnapCertificateBase64, qnapCertificateIntermediariesBase64)...) return ret, nil } @@ -65,6 +66,7 @@ func main() { subcmd.FlagSet.StringVar(&gcloudKeyring, "gcloud-keyring", "", "path to keyring in GCP KMS (used when signing QNAP builds)") subcmd.FlagSet.StringVar(&qnapKeyName, "qnap-key-name", "", "name of GCP key to use when signing QNAP builds") subcmd.FlagSet.StringVar(&qnapCertificateBase64, "qnap-certificate", "", "base64 encoded certificate to use when signing QNAP builds") + subcmd.FlagSet.StringVar(&qnapCertificateIntermediariesBase64, "qnap-certificate-intermediaries", "", "base64 encoded intermediary certificate to use when signing QNAP builds") } } diff --git a/release/dist/qnap/files/scripts/sign-qpkg.sh b/release/dist/qnap/files/scripts/sign-qpkg.sh index b6b99a3b1b2ef..1dacb876f3b16 100755 --- a/release/dist/qnap/files/scripts/sign-qpkg.sh +++ b/release/dist/qnap/files/scripts/sign-qpkg.sh @@ -24,7 +24,9 @@ MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEtfLbXkHUVc9oUPTNyaEK3hIwmuGRoTtd -----END PUBLIC KEY-----" > pkcs11-release-signing-key.pem openssl dgst -sha384 -verify pkcs11-release-signing-key.pem -signature "$PKCS11_MODULE_PATH.sig" "$PKCS11_MODULE_PATH" -echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > cert.crt +echo "$QNAP_SIGNING_CERT_BASE64" | base64 --decode > signer.pem + +echo "$QNAP_SIGNING_CERT_INTERMEDIARIES_BASE64" | base64 --decode > certs.pem openssl cms \ -sign \ @@ -35,6 +37,7 @@ openssl cms \ -inkey "pkcs11:object=$QNAP_SIGNING_KEY_NAME" \ -keyopt rsa_padding_mode:pss \ -keyopt rsa_pss_saltlen:digest \ - -signer cert.crt \ + -signer signer.pem \ + -certfile certs.pem \ -in "$1" \ -out - diff --git a/release/dist/qnap/pkgs.go b/release/dist/qnap/pkgs.go index 7dc3b94958639..5062011f06ea6 100644 --- a/release/dist/qnap/pkgs.go +++ b/release/dist/qnap/pkgs.go @@ -27,11 +27,12 @@ type target struct { } type signer struct { - gcloudCredentialsBase64 string - gcloudProject string - gcloudKeyring string - keyName string - certificateBase64 string + gcloudCredentialsBase64 string + gcloudProject string + gcloudKeyring string + keyName string + certificateBase64 string + certificateIntermediariesBase64 string } func (t *target) String() string { @@ -90,6 +91,7 @@ func (t *target) buildQPKG(b *dist.Build, qnapBuilds *qnapBuilds, inner *innerPk "-e", fmt.Sprintf("GCLOUD_KEYRING=%s", t.signer.gcloudKeyring), "-e", fmt.Sprintf("QNAP_SIGNING_KEY_NAME=%s", t.signer.keyName), "-e", fmt.Sprintf("QNAP_SIGNING_CERT_BASE64=%s", t.signer.certificateBase64), + "-e", fmt.Sprintf("QNAP_SIGNING_CERT_INTERMEDIARIES_BASE64=%s", t.signer.certificateIntermediariesBase64), "-e", fmt.Sprintf("QNAP_SIGNING_SCRIPT=%s", "/sign-qpkg.sh"), "-v", fmt.Sprintf("%s:/sign-qpkg.sh", filepath.Join(qnapBuilds.tmpDir, "files/scripts/sign-qpkg.sh")), ) diff --git a/release/dist/qnap/targets.go b/release/dist/qnap/targets.go index 1c1818a700cd1..0a02139548b17 100644 --- a/release/dist/qnap/targets.go +++ b/release/dist/qnap/targets.go @@ -18,15 +18,16 @@ import ( // gcloudKeyring is the full path to the Google Cloud keyring containing the signing key. // keyName is the name of the key. // certificateBase64 is the PEM certificate to use in the signature, base64 encoded. -func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64 string) []dist.Target { +func Targets(gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64, certificateIntermediariesBase64 string) []dist.Target { var signerInfo *signer - if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64}, "") { + if !slices.Contains([]string{gcloudCredentialsBase64, gcloudProject, gcloudKeyring, keyName, certificateBase64, certificateIntermediariesBase64}, "") { signerInfo = &signer{ - gcloudCredentialsBase64: gcloudCredentialsBase64, - gcloudProject: gcloudProject, - gcloudKeyring: gcloudKeyring, - keyName: keyName, - certificateBase64: certificateBase64, + gcloudCredentialsBase64: gcloudCredentialsBase64, + gcloudProject: gcloudProject, + gcloudKeyring: gcloudKeyring, + keyName: keyName, + certificateBase64: certificateBase64, + certificateIntermediariesBase64: certificateIntermediariesBase64, } } return []dist.Target{ From 6d45663dd4305b41c27354febe30b0b7dd0d273d Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Sat, 16 Aug 2025 09:42:25 -0400 Subject: [PATCH 0223/1093] cmd/derpprobe,prober: add run all probes handler (#16875) Add a Run all probes handler that executes all probes except those that are continuous or the derpmap probe. This is leveraged by other tooling to confirm DERP stability after a deploy. Updates tailscale/corp#27370 Signed-off-by: Mike O'Driscoll --- cmd/derpprobe/derpprobe.go | 1 + prober/prober.go | 64 +++++++++++++++++++- prober/prober_test.go | 118 +++++++++++++++++++++++++++++++++++++ 3 files changed, 182 insertions(+), 1 deletion(-) diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 25159d649408e..5d2179b512c23 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -107,6 +107,7 @@ func main() { mux := http.NewServeMux() d := tsweb.Debugger(mux) d.Handle("probe-run", "Run a probe", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunHandler), tsweb.HandlerOptions{Logf: log.Printf})) + d.Handle("probe-all", "Run all configured probes", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{Logf: log.Printf})) mux.Handle("/", tsweb.StdHandler(p.StatusHandler( prober.WithTitle("DERP Prober"), prober.WithPageLink("Prober metrics", "/debug/varz"), diff --git a/prober/prober.go b/prober/prober.go index 1237611f4e0c9..b69d26821dfba 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -7,6 +7,7 @@ package prober import ( + "bytes" "cmp" "container/ring" "context" @@ -21,6 +22,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" "tailscale.com/syncs" "tailscale.com/tsweb" ) @@ -574,7 +576,67 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err) } w.WriteHeader(respStatus) - w.Write([]byte(fmt.Sprintf("Probe succeeded in %v\n%s", info.Latency, stats))) + fmt.Fprintf(w, "Probe succeeded in %v\n%s", info.Latency, stats) + return nil +} + +type RunHandlerAllResponse struct { + Results map[string]RunHandlerResponse +} + +func (p *Prober) RunAllHandler(w http.ResponseWriter, r *http.Request) error { + probes := make(map[string]*Probe) + p.mu.Lock() + for _, probe := range p.probes { + if !probe.IsContinuous() && probe.name != "derpmap-probe" { + probes[probe.name] = probe + } + } + p.mu.Unlock() + + // Do not abort running probes just because one of them has failed. + g := new(errgroup.Group) + + var resultsMu sync.Mutex + results := make(map[string]RunHandlerResponse) + + for name, probe := range probes { + g.Go(func() error { + probe.mu.Lock() + prevInfo := probe.probeInfoLocked() + probe.mu.Unlock() + + info, err := probe.run() + + resultsMu.Lock() + results[name] = RunHandlerResponse{ + ProbeInfo: info, + PreviousSuccessRatio: prevInfo.RecentSuccessRatio(), + PreviousMedianLatency: prevInfo.RecentMedianLatency(), + } + resultsMu.Unlock() + return err + }) + } + + respStatus := http.StatusOK + if err := g.Wait(); err != nil { + respStatus = http.StatusFailedDependency + } + + // Return serialized JSON response if the client requested JSON + resp := &RunHandlerAllResponse{ + Results: results, + } + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(resp); err != nil { + return tsweb.Error(http.StatusInternalServerError, "error encoding JSON response", err) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(respStatus) + w.Write(b.Bytes()) + return nil } diff --git a/prober/prober_test.go b/prober/prober_test.go index 21c975a73a655..7cb841936b33f 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -639,6 +639,124 @@ func TestProberRunHandler(t *testing.T) { } +func TestRunAllHandler(t *testing.T) { + clk := newFakeTime() + + tests := []struct { + name string + probeFunc []func(context.Context) error + wantResponseCode int + wantJSONResponse RunHandlerAllResponse + wantPlaintextResponse string + }{ + { + name: "successProbe", + probeFunc: []func(context.Context) error{func(context.Context) error { return nil }, func(context.Context) error { return nil }}, + wantResponseCode: http.StatusOK, + wantJSONResponse: RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "successProbe-0": { + ProbeInfo: ProbeInfo{ + Name: "successProbe-0", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + "successProbe-1": { + ProbeInfo: ProbeInfo{ + Name: "successProbe-1", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + }, + }, + wantPlaintextResponse: "Probe successProbe-0: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\nProbe successProbe-1: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\n\n", + }, + { + name: "successAndFailureProbes", + probeFunc: []func(context.Context) error{func(context.Context) error { return nil }, func(context.Context) error { return fmt.Errorf("error2") }}, + wantResponseCode: http.StatusFailedDependency, + wantJSONResponse: RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "successAndFailureProbes-0": { + ProbeInfo: ProbeInfo{ + Name: "successAndFailureProbes-0", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + "successAndFailureProbes-1": { + ProbeInfo: ProbeInfo{ + Name: "successAndFailureProbes-1", + Interval: probeInterval, + Status: ProbeStatusFailed, + Error: "error2", + RecentResults: []bool{false, false}, + }, + }, + }, + }, + wantPlaintextResponse: "Probe successAndFailureProbes-0: succeeded\n\tLast run: 0s\n\tPrevious success rate: 100.0%\n\tPrevious median latency: 0s\nProbe successAndFailureProbes-1: failed\n\tLast run: 0s\n\tPrevious success rate: 0.0%\n\tPrevious median latency: 0s\n\n\tLast error: error2\n\n", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + for i, pfunc := range tc.probeFunc { + probe := p.Run(fmt.Sprintf("%s-%d", tc.name, i), probeInterval, nil, FuncProbe(pfunc)) + defer probe.Close() + <-probe.stopped // wait for the first run. + } + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/runall/", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/runall/", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != tc.wantResponseCode { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tc.wantResponseCode) + } + + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) + } + var gotJSON RunHandlerAllResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) + } + if diff := cmp.Diff(tc.wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } + + }) + } + +} + type fakeTicker struct { ch chan time.Time interval time.Duration From 9d9a70d81d87849971add8588dc47120db81bc9d Mon Sep 17 00:00:00 2001 From: Will Norris Date: Sun, 17 Aug 2025 08:26:59 -0700 Subject: [PATCH 0224/1093] client/systray: disable 'more settings' menu if backend not running Updates #1708 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 98c6156b82184..b1bc45fa82100 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -306,11 +306,14 @@ func (menu *Menu) rebuild() { menu.rebuildExitNodeMenu(ctx) } - if menu.status != nil { - menu.more = systray.AddMenuItem("More settings", "") + menu.more = systray.AddMenuItem("More settings", "") + if menu.status != nil && menu.status.BackendState == "Running" { + // web client is only available if backend is running onClick(ctx, menu.more, func(_ context.Context) { webbrowser.Open("http://100.100.100.100/") }) + } else { + menu.more.Disable() } // TODO(#15528): this menu item shouldn't be necessary at all, From 02f6030dbd3b48a30d5e33803eb04a8fcdce7856 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 21 Jul 2025 14:14:02 -0600 Subject: [PATCH 0225/1093] tool, tool/gocross: update gocross to support building natively on Windows and add a PowerShell Core wrapper script gocross-wrapper.ps1 is a PowerShell core script that is essentially a straight port of gocross-wrapper.sh. It requires PowerShell 7.4, which is the latest LTS release of PSCore. Why use PowerShell Core instead of Windows PowerShell? Essentially because the former is much better to script with and is the edition that is currently maintained. Because we're using PowerShell Core, but many people will be running scripts from a machine that only has Windows PowerShell, go.cmd has been updated to prompt the user for PowerShell core installation if necessary. gocross-wrapper.sh has also been updated to utilize the PSCore script when running under cygwin or msys. gocross itself required a couple of updates: We update gocross to output the PowerShell Core wrapper alongside the bash wrapper, which will propagate the revised scripts to other repos as necessary. We also fix a couple of things in gocross that didn't work on Windows: we change the toolchain resolution code to use os.UserHomeDir instead of directly referencing the HOME environment variable, and we fix a bug in the way arguments were being passed into exec.Command on non-Unix systems. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- .github/workflows/test.yml | 2 +- tool/go.cmd | 36 ++- tool/gocross/exec_other.go | 2 +- tool/gocross/gocross-wrapper.ps1 | 220 +++++++++++++++++++ tool/gocross/gocross-wrapper.sh | 6 + tool/gocross/gocross.go | 15 +- tool/gocross/gocross_wrapper_test.go | 2 +- tool/gocross/gocross_wrapper_windows_test.go | 25 +++ tool/gocross/toolchain.go | 10 +- 9 files changed, 310 insertions(+), 8 deletions(-) create mode 100644 tool/gocross/gocross-wrapper.ps1 create mode 100644 tool/gocross/gocross_wrapper_windows_test.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7ccb3986968fc..fe7849af6f992 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -177,7 +177,7 @@ jobs: run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper - name: test all working-directory: src - run: NOBASHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} + run: NOBASHDEBUG=true NOPWSHDEBUG=true PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} env: GOARCH: ${{ matrix.goarch }} TS_TEST_SHARD: ${{ matrix.shard }} diff --git a/tool/go.cmd b/tool/go.cmd index 04172a28d5b25..b7b5d0483b972 100644 --- a/tool/go.cmd +++ b/tool/go.cmd @@ -1,2 +1,36 @@ @echo off -powershell -NoProfile -ExecutionPolicy Bypass -File "%~dp0go-win.ps1" %* +rem Checking for PowerShell Core using PowerShell for Windows... +powershell -NoProfile -NonInteractive -Command "& {Get-Command -Name pwsh -ErrorAction Stop}" > NUL +if ERRORLEVEL 1 ( + rem Ask the user whether they should install the dependencies. Note that this + rem code path never runs in CI because pwsh is always explicitly installed. + + rem Time out after 5 minutes, defaulting to 'N' + choice /c yn /t 300 /d n /m "PowerShell Core is required. Install now" + if ERRORLEVEL 2 ( + echo Aborting due to unmet dependencies. + exit /b 1 + ) + + rem Check for a .NET Core runtime using PowerShell for Windows... + powershell -NoProfile -NonInteractive -Command "& {if (-not (dotnet --list-runtimes | Select-String 'Microsoft\.NETCore\.App' -Quiet)) {exit 1}}" > NUL + rem Install .NET Core if missing to provide PowerShell Core's runtime library. + if ERRORLEVEL 1 ( + rem Time out after 5 minutes, defaulting to 'N' + choice /c yn /t 300 /d n /m "PowerShell Core requires .NET Core for its runtime library. Install now" + if ERRORLEVEL 2 ( + echo Aborting due to unmet dependencies. + exit /b 1 + ) + + winget install --accept-package-agreements --id Microsoft.DotNet.Runtime.8 -e --source winget + ) + + rem Now install PowerShell Core. + winget install --accept-package-agreements --id Microsoft.PowerShell -e --source winget + if ERRORLEVEL 0 echo Please re-run this script within a new console session to pick up PATH changes. + rem Either way we didn't build, so return 1. + exit /b 1 +) + +pwsh -NoProfile -ExecutionPolicy Bypass "%~dp0..\tool\gocross\gocross-wrapper.ps1" %* diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 8d4df0db334dd..7bce0c0993620 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -11,7 +11,7 @@ import ( ) func doExec(cmd string, args []string, env []string) error { - c := exec.Command(cmd, args...) + c := exec.Command(cmd, args[1:]...) c.Env = env c.Stdin = os.Stdin c.Stdout = os.Stdout diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 new file mode 100644 index 0000000000000..fcc010dce735a --- /dev/null +++ b/tool/gocross/gocross-wrapper.ps1 @@ -0,0 +1,220 @@ +# Copyright (c) Tailscale Inc & AUTHORS +# SPDX-License-Identifier: BSD-3-Clause + +#Requires -Version 7.4 + +$ErrorActionPreference = 'Stop' +Set-StrictMode -Version 3.0 + +if (($Env:CI -eq 'true') -and ($Env:NOPWSHDEBUG -ne 'true')) { + Set-PSDebug -Trace 1 +} + +<# + .DESCRIPTION + Copies the script's $args variable into an array, which is easier to work with + when preparing to start child processes. +#> +function Copy-ScriptArgs { + $list = [System.Collections.Generic.List[string]]::new($Script:args.Count) + foreach ($arg in $Script:args) { + $list.Add($arg) + } + return $list.ToArray() +} + +<# + .DESCRIPTION + Copies the current environment into a hashtable, which is easier to work with + when preparing to start child processes. +#> +function Copy-Environment { + $result = @{} + foreach ($pair in (Get-Item -Path Env:)) { + $result[$pair.Key] = $pair.Value + } + return $result +} + +<# + .DESCRIPTION + Outputs the fully-qualified path to the repository's root directory. This + function expects to be run from somewhere within a git repository. + The directory containing the git executable must be somewhere in the PATH. +#> +function Get-RepoRoot { + Get-Command -Name 'git' | Out-Null + $repoRoot = & git rev-parse --show-toplevel + if ($LASTEXITCODE -ne 0) { + throw "failed obtaining repo root: git failed with code $LASTEXITCODE" + } + + # Git outputs a path containing forward slashes. Canonicalize. + return [System.IO.Path]::GetFullPath($repoRoot) +} + +<# + .DESCRIPTION + Runs the provided ScriptBlock in a child scope, restoring any changes to the + current working directory once the script block completes. +#> +function Start-ChildScope { + param ( + [Parameter(Mandatory = $true)] + [ScriptBlock]$ScriptBlock + ) + + $initialLocation = Get-Location + try { + Invoke-Command -ScriptBlock $ScriptBlock + } + finally { + Set-Location -Path $initialLocation + } +} + +<# + .SYNOPSIS + Write-Output with timestamps prepended to each line. +#> +function Write-Log { + param ($message) + $timestamp = (Get-Date).ToString('yyyy-MM-dd HH:mm:ss') + Write-Output "$timestamp - $message" +} + +$bootstrapScriptBlock = { + + $repoRoot = Get-RepoRoot + + Set-Location -LiteralPath $repoRoot + + switch -Wildcard -File .\go.toolchain.rev { + "/*" { $toolchain = $_ } + default { + $rev = $_ + $tsgo = Join-Path $Env:USERPROFILE '.cache' 'tsgo' + $toolchain = Join-Path $tsgo $rev + if (-not (Test-Path -LiteralPath "$toolchain.extracted" -PathType Leaf -ErrorAction SilentlyContinue)) { + New-Item -Force -Path $tsgo -ItemType Directory | Out-Null + Remove-Item -Force -Recurse -LiteralPath $toolchain -ErrorAction SilentlyContinue + Write-Log "Downloading Go toolchain $rev" + + # Values from https://web.archive.org/web/20250227081443/https://learn.microsoft.com/en-us/dotnet/api/system.runtime.interopservices.architecture?view=net-9.0 + $cpuArch = ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture | Out-String -NoNewline) + # Comparison in switch is case-insensitive by default. + switch ($cpuArch) { + 'x86' { $goArch = '386' } + 'x64' { $goArch = 'amd64' } + default { $goArch = $cpuArch } + } + + Invoke-WebRequest -Uri "https://github.com/tailscale/go/releases/download/build-$rev/windows-$goArch.tar.gz" -OutFile "$toolchain.tar.gz" + try { + New-Item -Force -Path $toolchain -ItemType Directory | Out-Null + Start-ChildScope -ScriptBlock { + Set-Location -LiteralPath $toolchain + tar --strip-components=1 -xf "$toolchain.tar.gz" + if ($LASTEXITCODE -ne 0) { + throw "tar failed with exit code $LASTEXITCODE" + } + } + $rev | Out-File -FilePath "$toolchain.extracted" + } + finally { + Remove-Item -Force "$toolchain.tar.gz" -ErrorAction Continue + } + + # Cleanup old toolchains. + $maxDays = 90 + $oldFiles = Get-ChildItem -Path $tsgo -Filter '*.extracted' -File -Recurse -Depth 1 | Where-Object { $_.LastWriteTime -lt (Get-Date).AddDays(-$maxDays) } + foreach ($file in $oldFiles) { + Write-Log "Cleaning up old Go toolchain $($file.Basename)" + Remove-Item -LiteralPath $file.FullName -Force -ErrorAction Continue + $dirName = Join-Path $file.DirectoryName $file.Basename -Resolve -ErrorAction Continue + if ($dirName -and (Test-Path -LiteralPath $dirName -PathType Container -ErrorAction Continue)) { + Remove-Item -LiteralPath $dirName -Recurse -Force -ErrorAction Continue + } + } + } + } + } + + if ($Env:TS_USE_GOCROSS -ne '1') { + return + } + + if (Test-Path -LiteralPath $toolchain -PathType Container -ErrorAction SilentlyContinue) { + $goMod = Join-Path $repoRoot 'go.mod' -Resolve + $goLine = Get-Content -LiteralPath $goMod | Select-String -Pattern '^go (.*)$' -List + $wantGoMinor = $goLine.Matches.Groups[1].Value.split('.')[1] + $versionFile = Join-Path $toolchain 'VERSION' + if (Test-Path -LiteralPath $versionFile -PathType Leaf -ErrorAction SilentlyContinue) { + try { + $haveGoMinor = ((Get-Content -LiteralPath $versionFile -TotalCount 1).split('.')[1]) -replace 'rc.*', '' + } + catch { + } + } + + if ([string]::IsNullOrEmpty($haveGoMinor) -or ($haveGoMinor -lt $wantGoMinor)) { + Remove-Item -Force -Recurse -LiteralPath $toolchain -ErrorAction Continue + Remove-Item -Force -LiteralPath "$toolchain.extracted" -ErrorAction Continue + } + } + + $wantVer = & git rev-parse HEAD + $gocrossOk = $false + $gocrossPath = '.\gocross.exe' + if (Get-Command -Name $gocrossPath -CommandType Application -ErrorAction SilentlyContinue) { + $gotVer = & $gocrossPath gocross-version 2> $null + if ($gotVer -eq $wantVer) { + $gocrossOk = $true + } + } + + if (-not $gocrossOk) { + $goBuildEnv = Copy-Environment + $goBuildEnv['CGO_ENABLED'] = '0' + $goBuildEnv.Remove('GOOS') + $goBuildEnv.Remove('GOARCH') + $goBuildEnv.Remove('GO111MODULE') + $goBuildEnv.Remove('GOROOT') + + $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -Wait -PassThru + if ($proc.ExitCode -ne 0) { + throw 'error building gocross' + } + } + +} # bootstrapScriptBlock + +Start-ChildScope -ScriptBlock $bootstrapScriptBlock + +$repoRoot = Get-RepoRoot + +$execEnv = Copy-Environment +$execEnv.Remove('GOROOT') + +$argList = Copy-ScriptArgs + +if ($Env:TS_USE_GOCROSS -ne '1') { + $revFile = Join-Path $repoRoot 'go.toolchain.rev' -Resolve + switch -Wildcard -File $revFile { + "/*" { $toolchain = $_ } + default { + $rev = $_ + $tsgo = Join-Path $Env:USERPROFILE '.cache' 'tsgo' + $toolchain = Join-Path $tsgo $rev -Resolve + } + } + + $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -Wait -PassThru + exit $proc.ExitCode +} + +$procExe = Join-Path $repoRoot 'gocross.exe' -Resolve +$proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -Wait -PassThru +exit $proc.ExitCode diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 90485d31b95af..d93b137aab6f5 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -15,6 +15,12 @@ if [[ "${CI:-}" == "true" && "${NOBASHDEBUG:-}" != "true" ]]; then set -x fi +if [[ "${OSTYPE:-}" == "cygwin" || "${OSTYPE:-}" == "msys" ]]; then + hash pwsh 2>/dev/null || { echo >&2 "This operation requires PowerShell Core."; exit 1; } + pwsh -NoProfile -ExecutionPolicy Bypass "${BASH_SOURCE%/*}/gocross-wrapper.ps1" "$@" + exit +fi + # Locate a bootstrap toolchain and (re)build gocross if necessary. We run all of # this in a subshell because posix shell semantics make it very easy to # accidentally mutate the input environment that will get passed to gocross at diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index d14ea03885868..6d5d06aebace4 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -16,6 +16,7 @@ import ( "os" "path/filepath" "runtime/debug" + "strings" "tailscale.com/atomicfile" ) @@ -68,8 +69,13 @@ func main() { fmt.Fprintf(os.Stderr, "usage: gocross write-wrapper-script \n") os.Exit(1) } - if err := atomicfile.WriteFile(os.Args[2], wrapperScript, 0755); err != nil { - fmt.Fprintf(os.Stderr, "writing wrapper script: %v\n", err) + if err := atomicfile.WriteFile(os.Args[2], wrapperScriptBash, 0755); err != nil { + fmt.Fprintf(os.Stderr, "writing bash wrapper script: %v\n", err) + os.Exit(1) + } + psFileName := strings.TrimSuffix(os.Args[2], filepath.Ext(os.Args[2])) + ".ps1" + if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0755); err != nil { + fmt.Fprintf(os.Stderr, "writing PowerShell wrapper script: %v\n", err) os.Exit(1) } os.Exit(0) @@ -112,7 +118,10 @@ func main() { } //go:embed gocross-wrapper.sh -var wrapperScript []byte +var wrapperScriptBash []byte + +//go:embed gocross-wrapper.ps1 +var wrapperScriptPowerShell []byte func debugf(format string, args ...any) { debug := os.Getenv("GOCROSS_DEBUG") diff --git a/tool/gocross/gocross_wrapper_test.go b/tool/gocross/gocross_wrapper_test.go index f4dcec4292695..6937ccec7188f 100644 --- a/tool/gocross/gocross_wrapper_test.go +++ b/tool/gocross/gocross_wrapper_test.go @@ -21,7 +21,7 @@ func TestGocrossWrapper(t *testing.T) { t.Fatalf("gocross-wrapper.sh failed: %v\n%s", err, out) } if i > 0 && !strings.Contains(string(out), "gocross_ok=1\n") { - t.Errorf("expected to find 'gocross-ok=1'; got output:\n%s", out) + t.Errorf("expected to find 'gocross_ok=1'; got output:\n%s", out) } } } diff --git a/tool/gocross/gocross_wrapper_windows_test.go b/tool/gocross/gocross_wrapper_windows_test.go new file mode 100644 index 0000000000000..aa4277425d442 --- /dev/null +++ b/tool/gocross/gocross_wrapper_windows_test.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "os" + "os/exec" + "strings" + "testing" +) + +func TestGocrossWrapper(t *testing.T) { + for i := range 2 { // once to build gocross; second to test it's cached + cmd := exec.Command("pwsh", "-NoProfile", "-ExecutionPolicy", "Bypass", ".\\gocross-wrapper.ps1", "version") + cmd.Env = append(os.Environ(), "CI=true", "NOPWSHDEBUG=false", "TS_USE_GOCROSS=1") // for Set-PSDebug verbosity + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("gocross-wrapper.ps1 failed: %v\n%s", err, out) + } + if i > 0 && !strings.Contains(string(out), "$gocrossOk = $true\r\n") { + t.Errorf("expected to find '$gocrossOk = $true'; got output:\n%s", out) + } + } +} diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index f422e289e3571..9cf7f892b9b17 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -60,7 +60,15 @@ func getToolchain() (toolchainDir, gorootDir string, err error) { return "", "", err } - cache := filepath.Join(os.Getenv("HOME"), ".cache") + homeDir, err := os.UserHomeDir() + if err != nil { + return "", "", err + } + + // We use ".cache" instead of os.UserCacheDir for legacy reasons and we + // don't want to break that on platforms where the latter returns a different + // result. + cache := filepath.Join(homeDir, ".cache") toolchainDir = filepath.Join(cache, "tsgo", rev) gorootDir = filepath.Join(cache, "tsgoroot", rev) From 55698c8511cb2e52fb41fa89dd71093a02cdff93 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 18 Aug 2025 10:56:17 -0700 Subject: [PATCH 0226/1093] ipn/localapi: plumb an event bus through the localapi.Handler (#16892) Some of the operations of the local API need an event bus to correctly instantiate other components (notably including the portmapper). This commit adds that, and as the parameter list is starting to get a bit long and hard to read, I took the opportunity to move the arguments to a config type. Only a few call sites needed to be updated and this API is not intended for general use, so I did not bother to stage the change. Updates #15160 Updates #16842 Change-Id: I7b057d71161bd859f5acb96e2f878a34c85be0ef Signed-off-by: M. J. Fromberger --- ipn/ipnserver/server.go | 8 +++++++- ipn/localapi/localapi.go | 25 ++++++++++++++++++++++--- net/portmapper/portmapper.go | 7 ++----- tsnet/tsnet.go | 16 ++++++++++++++-- 4 files changed, 45 insertions(+), 11 deletions(-) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index a7ded9c0088ec..fdbd82b0b9e33 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -199,7 +199,13 @@ func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) { ci = actorWithAccessOverride(actor, string(reason)) } - lah := localapi.NewHandler(ci, lb, s.logf, s.backendLogID) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ci, + Backend: lb, + Logf: s.logf, + LogID: s.backendLogID, + EventBus: lb.Sys().Bus.Get(), + }) if actor, ok := ci.(*actor); ok { lah.PermitRead, lah.PermitWrite = actor.Permissions(lb.OperatorUserID()) lah.PermitCert = actor.CanFetchCerts() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 0acc5a65fca8a..a199a29082aab 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -172,9 +172,26 @@ var ( metrics = map[string]*clientmetric.Metric{} ) -// NewHandler creates a new LocalAPI HTTP handler. All parameters are required. -func NewHandler(actor ipnauth.Actor, b *ipnlocal.LocalBackend, logf logger.Logf, logID logid.PublicID) *Handler { - return &Handler{Actor: actor, b: b, logf: logf, backendLogID: logID, clock: tstime.StdClock{}} +// NewHandler creates a new LocalAPI HTTP handler from the given config. +func NewHandler(cfg HandlerConfig) *Handler { + return &Handler{ + Actor: cfg.Actor, + b: cfg.Backend, + logf: cfg.Logf, + backendLogID: cfg.LogID, + clock: tstime.StdClock{}, + eventBus: cfg.EventBus, + } +} + +// HandlerConfig carries the settings for a local API handler. +// All fields are required. +type HandlerConfig struct { + Actor ipnauth.Actor + Backend *ipnlocal.LocalBackend + Logf logger.Logf + LogID logid.PublicID + EventBus *eventbus.Bus } type Handler struct { @@ -203,6 +220,7 @@ type Handler struct { logf logger.Logf backendLogID logid.PublicID clock tstime.Clock + eventBus *eventbus.Bus // read-only after initialization } func (h *Handler) Logf(format string, args ...any) { @@ -850,6 +868,7 @@ func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { NetMon: h.b.NetMon(), DebugKnobs: debugKnobs, ControlKnobs: h.b.ControlKnobs(), + EventBus: h.eventBus, OnChange: func() { logf("portmapping changed.") logf("have mapping: %v", c.HaveMapping()) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 30535157cc892..a1ab868155219 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -209,11 +209,8 @@ func (m *pmpMapping) Release(ctx context.Context) { // Config carries the settings for a [Client]. type Config struct { - // EventBus, if non-nil, is used for event publication and subscription by - // portmapper clients created from this config. - // - // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to - // become required non-nil. + // EventBus, which must be non-nil, is used for event publication and + // subscription by portmapper clients created from this config. EventBus *eventbus.Bus // Logf is called to generate text logs for the client. If nil, logger.Discard is used. diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 2715917a2f1e9..06709bf8b017d 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -274,7 +274,13 @@ func (s *Server) Loopback() (addr string, proxyCred, localAPICred string, err er // out the CONNECT code from tailscaled/proxy.go that uses // httputil.ReverseProxy and adding auth support. go func() { - lah := localapi.NewHandler(ipnauth.Self, s.lb, s.logf, s.logid) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ipnauth.Self, + Backend: s.lb, + Logf: s.logf, + LogID: s.logid, + EventBus: s.sys.Bus.Get(), + }) lah.PermitWrite = true lah.PermitRead = true lah.RequiredPassword = s.localAPICred @@ -676,7 +682,13 @@ func (s *Server) start() (reterr error) { go s.printAuthURLLoop() // Run the localapi handler, to allow fetching LetsEncrypt certs. - lah := localapi.NewHandler(ipnauth.Self, lb, tsLogf, s.logid) + lah := localapi.NewHandler(localapi.HandlerConfig{ + Actor: ipnauth.Self, + Backend: lb, + Logf: tsLogf, + LogID: s.logid, + EventBus: sys.Bus.Get(), + }) lah.PermitWrite = true lah.PermitRead = true From d92789affa7f46cf52f94f0d5ed36a9095ece00b Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 18 Aug 2025 14:33:02 -0600 Subject: [PATCH 0227/1093] tool/gocross: don't set executable bits on PowerShell script Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/gocross.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index 6d5d06aebace4..c71012d73778b 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -74,7 +74,7 @@ func main() { os.Exit(1) } psFileName := strings.TrimSuffix(os.Args[2], filepath.Ext(os.Args[2])) + ".ps1" - if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0755); err != nil { + if err := atomicfile.WriteFile(psFileName, wrapperScriptPowerShell, 0644); err != nil { fmt.Fprintf(os.Stderr, "writing PowerShell wrapper script: %v\n", err) os.Exit(1) } From 84472167dd9cf0a9e1c3c911ac91df91a3ce6de8 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 18 Aug 2025 15:27:16 -0600 Subject: [PATCH 0228/1093] tool/gocross: fix environment variable clearing in gocross-wrapper.ps1 The -Environment argument to Start-Process is essentially being treated as a delta; removing a particular variable from the argument's hash table does not indicate to delete. Instead we must set the value of each unwanted variable to $null. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/gocross-wrapper.ps1 | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 index fcc010dce735a..fe0b46996204d 100644 --- a/tool/gocross/gocross-wrapper.ps1 +++ b/tool/gocross/gocross-wrapper.ps1 @@ -176,10 +176,13 @@ $bootstrapScriptBlock = { if (-not $gocrossOk) { $goBuildEnv = Copy-Environment $goBuildEnv['CGO_ENABLED'] = '0' - $goBuildEnv.Remove('GOOS') - $goBuildEnv.Remove('GOARCH') - $goBuildEnv.Remove('GO111MODULE') - $goBuildEnv.Remove('GOROOT') + # Start-Process's -Environment arg applies diffs, so instead of removing + # these variables from $goBuildEnv, we must set them to $null to indicate + # that they should be cleared. + $goBuildEnv['GOOS'] = $null + $goBuildEnv['GOARCH'] = $null + $goBuildEnv['GO111MODULE'] = $null + $goBuildEnv['GOROOT'] = $null $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -Wait -PassThru @@ -195,7 +198,10 @@ Start-ChildScope -ScriptBlock $bootstrapScriptBlock $repoRoot = Get-RepoRoot $execEnv = Copy-Environment -$execEnv.Remove('GOROOT') +# Start-Process's -Environment arg applies diffs, so instead of removing +# these variables from $execEnv, we must set them to $null to indicate +# that they should be cleared. +$execEnv['GOROOT'] = $null $argList = Copy-ScriptArgs From e4031daa086e4a6dce10ab0ffdca1e32b889320c Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 19 Aug 2025 10:46:07 -0400 Subject: [PATCH 0229/1093] .github/Makefile/flake: update nix flake support (#16636) Cleanup nix support, make flake easier to read with nix-systems. This also harmonizes with golinks flake setup and reduces an input dependency by 1. Update deps test to ensure the vendor hash stays harmonized with go.mod. Update make tidy to ensure vendor hash stays current. Overlay the current version of golang, tailscale runs recent releases faster than nixpkgs can update them into the unstable branch. Updates #16637 Signed-off-by: Mike O'Driscoll --- .github/workflows/test.yml | 4 +- Makefile | 3 +- flake.lock | 22 +--------- flake.nix | 90 +++++++++++++++++++++++--------------- go.mod.sri | 2 +- shell.nix | 2 +- 6 files changed, 62 insertions(+), 61 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fe7849af6f992..17e08ae9dc251 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -720,10 +720,10 @@ jobs: - name: check that 'go mod tidy' is clean working-directory: src run: | - ./tool/go mod tidy + make tidy echo echo - git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1) + git diff --name-only --exit-code || (echo "Please run 'make tidy'"; exit 1) licenses: runs-on: ubuntu-24.04 diff --git a/Makefile b/Makefile index 0a7fc28dde8a3..532bded9413b9 100644 --- a/Makefile +++ b/Makefile @@ -8,8 +8,9 @@ PLATFORM ?= "flyio" ## flyio==linux/amd64. Set to "" to build all platforms. vet: ## Run go vet ./tool/go vet ./... -tidy: ## Run go mod tidy +tidy: ## Run go mod tidy and update nix flake hashes ./tool/go mod tidy + ./update-flake.sh lint: ## Run golangci-lint ./tool/go run github.com/golangci/golangci-lint/cmd/golangci-lint run diff --git a/flake.lock b/flake.lock index 87f234e3ecab1..1623342c62407 100644 --- a/flake.lock +++ b/flake.lock @@ -16,24 +16,6 @@ "type": "github" } }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1710146030, - "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "nixpkgs": { "locked": { "lastModified": 1753151930, @@ -53,8 +35,8 @@ "root": { "inputs": { "flake-compat": "flake-compat", - "flake-utils": "flake-utils", - "nixpkgs": "nixpkgs" + "nixpkgs": "nixpkgs", + "systems": "systems" } }, "systems": { diff --git a/flake.nix b/flake.nix index 17d263a8dd3c9..311c422fb0400 100644 --- a/flake.nix +++ b/flake.nix @@ -32,7 +32,7 @@ { inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - flake-utils.url = "github:numtide/flake-utils"; + systems.url = "github:nix-systems/default"; # Used by shell.nix as a compat shim. flake-compat = { url = "github:edolstra/flake-compat"; @@ -43,13 +43,29 @@ outputs = { self, nixpkgs, - flake-utils, + systems, flake-compat, }: let - # tailscaleRev is the git commit at which this flake was imported, - # or the empty string when building from a local checkout of the - # tailscale repo. + go124Version = "1.24.6"; + goHash = "sha256-4ctVgqq1iGaLwEwH3hhogHD2uMmyqvNh+CHhm9R8/b0="; + eachSystem = f: + nixpkgs.lib.genAttrs (import systems) (system: + f (import nixpkgs { + system = system; + overlays = [ + (final: prev: { + go_1_24 = prev.go_1_24.overrideAttrs { + version = go124Version; + src = prev.fetchurl { + url = "https://go.dev/dl/go${go124Version}.src.tar.gz"; + hash = goHash; + }; + }; + }) + ]; + })); tailscaleRev = self.rev or ""; + in { # tailscale takes a nixpkgs package set, and builds Tailscale from # the same commit as this flake. IOW, it provides "tailscale built # from HEAD", where HEAD is "whatever commit you imported the @@ -67,16 +83,20 @@ # So really, this flake is for tailscale devs to dogfood with, if # you're an end user you should be prepared for this flake to not # build periodically. - tailscale = pkgs: - pkgs.buildGo124Module rec { + packages = eachSystem (pkgs: rec { + default = pkgs.buildGo124Module { name = "tailscale"; - + pname = "tailscale"; src = ./.; vendorHash = pkgs.lib.fileContents ./go.mod.sri; - nativeBuildInputs = pkgs.lib.optionals pkgs.stdenv.isLinux [pkgs.makeWrapper]; + nativeBuildInputs = [pkgs.makeWrapper pkgs.installShellFiles]; ldflags = ["-X tailscale.com/version.gitCommitStamp=${tailscaleRev}"]; env.CGO_ENABLED = 0; - subPackages = ["cmd/tailscale" "cmd/tailscaled"]; + subPackages = [ + "cmd/tailscale" + "cmd/tailscaled" + "cmd/tsidp" + ]; doCheck = false; # NOTE: We strip the ${PORT} and $FLAGS because they are unset in the @@ -84,32 +104,31 @@ # point, there should be a NixOS module that allows configuration of these # things, but for now, we hardcode the default of port 41641 (taken from # ./cmd/tailscaled/tailscaled.defaults). - postInstall = pkgs.lib.optionalString pkgs.stdenv.isLinux '' - wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} - wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} + postInstall = + pkgs.lib.optionalString pkgs.stdenv.isLinux '' + wrapProgram $out/bin/tailscaled --prefix PATH : ${pkgs.lib.makeBinPath [pkgs.iproute2 pkgs.iptables pkgs.getent pkgs.shadow]} + wrapProgram $out/bin/tailscale --suffix PATH : ${pkgs.lib.makeBinPath [pkgs.procps]} - sed -i \ - -e "s#/usr/sbin#$out/bin#" \ - -e "/^EnvironmentFile/d" \ - -e 's/''${PORT}/41641/' \ - -e 's/$FLAGS//' \ - ./cmd/tailscaled/tailscaled.service + sed -i \ + -e "s#/usr/sbin#$out/bin#" \ + -e "/^EnvironmentFile/d" \ + -e 's/''${PORT}/41641/' \ + -e 's/$FLAGS//' \ + ./cmd/tailscaled/tailscaled.service - install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service - ''; + install -D -m0444 -t $out/lib/systemd/system ./cmd/tailscaled/tailscaled.service + '' + + pkgs.lib.optionalString (pkgs.stdenv.buildPlatform.canExecute pkgs.stdenv.hostPlatform) '' + installShellCompletion --cmd tailscale \ + --bash <($out/bin/tailscale completion bash) \ + --fish <($out/bin/tailscale completion fish) \ + --zsh <($out/bin/tailscale completion zsh) + ''; }; + tailscale = default; + }); - # This whole blob makes the tailscale package available for all - # OS/CPU combos that nix supports, as well as a dev shell so that - # "nix develop" and "nix-shell" give you a dev env. - flakeForSystem = nixpkgs: system: let - pkgs = nixpkgs.legacyPackages.${system}; - ts = tailscale pkgs; - in { - packages = { - default = ts; - tailscale = ts; - }; + devShells = eachSystem (pkgs: { devShell = pkgs.mkShell { packages = with pkgs; [ curl @@ -126,9 +145,8 @@ e2fsprogs ]; }; - }; - in - flake-utils.lib.eachDefaultSystem (system: flakeForSystem nixpkgs system); + }); + }; } -# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= +# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= diff --git a/go.mod.sri b/go.mod.sri index 845086191699a..34e9a57de84ed 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= +sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= diff --git a/shell.nix b/shell.nix index 2eb5b441a2d87..9dfdf4935b5af 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-4QTSspHLYJfzlontQ7msXyOB5gzq7ZwSvWmKuYY5klA= +# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= From 2581e387899413e9933d28101a1a3707331f0327 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 19 Aug 2025 12:13:55 -0400 Subject: [PATCH 0230/1093] prober: update runall handler to be generic (#16895) Update the runall handler to be more generic with an exclude param to exclude multiple probes as the requesters definition. Updates tailscale/corp#27370 Signed-off-by: Mike O'Driscoll --- prober/prober.go | 5 +++- prober/prober_test.go | 69 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 2 deletions(-) diff --git a/prober/prober.go b/prober/prober.go index b69d26821dfba..9c494c3c98d62 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -18,6 +18,7 @@ import ( "maps" "math/rand" "net/http" + "slices" "sync" "time" @@ -585,10 +586,12 @@ type RunHandlerAllResponse struct { } func (p *Prober) RunAllHandler(w http.ResponseWriter, r *http.Request) error { + excluded := r.URL.Query()["exclude"] + probes := make(map[string]*Probe) p.mu.Lock() for _, probe := range p.probes { - if !probe.IsContinuous() && probe.name != "derpmap-probe" { + if !probe.IsContinuous() && !slices.Contains(excluded, probe.name) { probes[probe.name] = probe } } diff --git a/prober/prober_test.go b/prober/prober_test.go index 7cb841936b33f..15db21a5efe5b 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -11,6 +11,7 @@ import ( "io" "net/http" "net/http/httptest" + "net/url" "strings" "sync" "sync/atomic" @@ -722,7 +723,7 @@ func TestRunAllHandler(t *testing.T) { mux.Handle("/prober/runall/", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) - req, err := http.NewRequest("GET", server.URL+"/prober/runall/", nil) + req, err := http.NewRequest("GET", server.URL+"/prober/runall", nil) if err != nil { t.Fatalf("failed to create request: %v", err) } @@ -757,6 +758,72 @@ func TestRunAllHandler(t *testing.T) { } +func TestExcludeInRunAll(t *testing.T) { + clk := newFakeTime() + p := newForTest(clk.Now, clk.NewTicker).WithOnce(true) + + wantJSONResponse := RunHandlerAllResponse{ + Results: map[string]RunHandlerResponse{ + "includedProbe": { + ProbeInfo: ProbeInfo{ + Name: "includedProbe", + Interval: probeInterval, + Status: ProbeStatusSucceeded, + RecentResults: []bool{true, true}, + }, + PreviousSuccessRatio: 1, + }, + }, + } + + p.Run("includedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + p.Run("excludedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + p.Run("excludedOtherProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + mux.Handle("/prober/runall", tsweb.StdHandler(tsweb.ReturnHandlerFunc(p.RunAllHandler), tsweb.HandlerOptions{})) + + req, err := http.NewRequest("GET", server.URL+"/prober/runall", nil) + if err != nil { + t.Fatalf("failed to create request: %v", err) + } + + // Exclude probes with "excluded" in their name + req.URL.RawQuery = url.Values{ + "exclude": []string{"excludedProbe", "excludedOtherProbe"}, + }.Encode() + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("failed to make request: %v", err) + } + + if resp.StatusCode != http.StatusOK { + t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, http.StatusOK) + } + + var gotJSON RunHandlerAllResponse + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read response body: %v", err) + } + + if err := json.Unmarshal(body, &gotJSON); err != nil { + t.Fatalf("failed to unmarshal JSON response: %v; body: %s", err, body) + } + + if resp.Header.Get("Content-Type") != "application/json" { + t.Errorf("unexpected content type: got %q, want application/json", resp.Header.Get("Content-Type")) + } + + if diff := cmp.Diff(wantJSONResponse, gotJSON, cmpopts.IgnoreFields(ProbeInfo{}, "Start", "End", "Labels", "RecentLatencies")); diff != "" { + t.Errorf("unexpected JSON response (-want +got):\n%s", diff) + } +} + type fakeTicker struct { ch chan time.Time interval time.Duration From b28699cd316e339c86e1f0a4751ed7021db3c787 Mon Sep 17 00:00:00 2001 From: Adrian Dewhurst Date: Tue, 19 Aug 2025 12:05:41 -0400 Subject: [PATCH 0231/1093] types/views: add min/max helpers to views.Slice This has come up in a few situations recently and adding these helpers is much better than copying the slice (calling AsSlice()) in order to use slices.Max and friends. Updates #cleanup Change-Id: Ib289a07d23c3687220c72c4ce341b9695cd875bf Signed-off-by: Adrian Dewhurst --- types/views/views.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/types/views/views.go b/types/views/views.go index 6d15b80d4e499..252f126a79f57 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -7,6 +7,7 @@ package views import ( "bytes" + "cmp" jsonv1 "encoding/json" "errors" "fmt" @@ -363,6 +364,20 @@ func (v Slice[T]) ContainsFunc(f func(T) bool) bool { return slices.ContainsFunc(v.ж, f) } +// MaxFunc returns the maximal value in v, using cmp to compare elements. It +// panics if v is empty. If there is more than one maximal element according to +// the cmp function, MaxFunc returns the first one. See also [slices.MaxFunc]. +func (v Slice[T]) MaxFunc(cmp func(a, b T) int) T { + return slices.MaxFunc(v.ж, cmp) +} + +// MinFunc returns the minimal value in v, using cmp to compare elements. It +// panics if v is empty. If there is more than one minimal element according to +// the cmp function, MinFunc returns the first one. See also [slices.MinFunc]. +func (v Slice[T]) MinFunc(cmp func(a, b T) int) T { + return slices.MinFunc(v.ж, cmp) +} + // AppendStrings appends the string representation of each element in v to dst. func AppendStrings[T fmt.Stringer](dst []string, v Slice[T]) []string { for _, x := range v.ж { @@ -383,6 +398,20 @@ func SliceEqual[T comparable](a, b Slice[T]) bool { return slices.Equal(a.ж, b.ж) } +// SliceMax returns the maximal value in v. It panics if v is empty. For +// floating point T, SliceMax propagates NaNs (any NaN value in v forces the +// output to be NaN). See also [slices.Max]. +func SliceMax[T cmp.Ordered](v Slice[T]) T { + return slices.Max(v.ж) +} + +// SliceMin returns the minimal value in v. It panics if v is empty. For +// floating point T, SliceMin propagates NaNs (any NaN value in v forces the +// output to be NaN). See also [slices.Min]. +func SliceMin[T cmp.Ordered](v Slice[T]) T { + return slices.Min(v.ж) +} + // shortOOOLen (short Out-of-Order length) is the slice length at or // under which we attempt to compare two slices quadratically rather // than allocating memory for a map in SliceEqualAnyOrder and From 5c560d748903b06713a0506b4f1f6f58aa273973 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Fri, 15 Aug 2025 13:29:56 -0700 Subject: [PATCH 0232/1093] tsconsensus: check for bootstrap error We have been unintentionally ignoring errors from calling bootstrap. bootstrap sometimes calls raft.BootstrapCluster which sometimes returns a safe to ignore error, handle that case appropriately. Updates #14667 Signed-off-by: Fran Bull --- tsconsensus/tsconsensus.go | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index b6bf373102aa6..53a2c3f54fa64 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -209,7 +209,21 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin } c.raft = r - c.bootstrap(auth.AllowedPeers()) + // we may already be in a consensus (see comment above before startRaft) but we're going to + // try to bootstrap anyway in case this is a fresh start. + err = c.bootstrap(auth.AllowedPeers()) + if err != nil { + if errors.Is(err, raft.ErrCantBootstrap) { + // Raft cluster state can be persisted, if we try to call raft.BootstrapCluster when + // we already have cluster state it will return raft.ErrCantBootstrap. It's safe to + // ignore (according to the comment in the raft code), and we can expect that the other + // nodes of the cluster will become available at some point and we can get back into the + // consensus. + log.Print("Bootstrap: raft has cluster state, waiting for peers") + } else { + return nil, err + } + } if cfg.ServeDebugMonitor { srv, err = serveMonitor(&c, ts, netip.AddrPortFrom(c.self.hostAddr, cfg.MonitorPort).String()) @@ -292,9 +306,9 @@ type Consensus struct { // bootstrap tries to join a raft cluster, or start one. // // We need to do the very first raft cluster configuration, but after that raft manages it. -// bootstrap is called at start up, and we are not currently aware of what the cluster config might be, +// bootstrap is called at start up, and we may not currently be aware of what the cluster config might be, // our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and -// if unsuccessful, assume we are the first and start our own. +// if unsuccessful, assume we are the first and try to start our own. // // It's possible for bootstrap to return an error, or start a errant breakaway cluster. // From d4b720012987f67a3b3a636a33f89c446590c467 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 19 Aug 2025 14:44:39 -0700 Subject: [PATCH 0233/1093] net/udprelay: use batching.Conn (#16866) This significantly improves throughput of a peer relay server on Linux. Server.packetReadLoop no longer passes sockets down the stack. Instead, packet handling methods return a netip.AddrPort and []byte, which packetReadLoop gathers together for eventual batched writes on the appropriate socket(s). Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 2 +- net/batching/conn.go | 1 - net/batching/conn_default.go | 2 + net/batching/conn_linux.go | 6 +- net/batching/conn_linux_test.go | 4 +- net/udprelay/server.go | 201 +++++++++++++++++++++++--------- 6 files changed, 153 insertions(+), 63 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 07f5958ca37f6..e60c1cb0410c2 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -311,7 +311,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ - 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ diff --git a/net/batching/conn.go b/net/batching/conn.go index 2c6100258cb04..77cdf8c849ca4 100644 --- a/net/batching/conn.go +++ b/net/batching/conn.go @@ -32,7 +32,6 @@ type Conn interface { // message may fall on either side of a nonzero. // // Each [ipv6.Message.OOB] must be sized to at least MinControlMessageSize(). - // len(msgs) must be at least MinReadBatchMsgsLen(). ReadBatch(msgs []ipv6.Message, flags int) (n int, err error) // WriteBatchTo writes buffs to addr. // diff --git a/net/batching/conn_default.go b/net/batching/conn_default.go index ed5c494f3fb3a..37d644f50624c 100644 --- a/net/batching/conn_default.go +++ b/net/batching/conn_default.go @@ -19,3 +19,5 @@ var controlMessageSize = 0 func MinControlMessageSize() int { return controlMessageSize } + +const IdealBatchSize = 1 diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 09a80ed9f5e34..7f6c4ed422e31 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -384,7 +384,7 @@ func setGSOSizeInControl(control *[]byte, gsoSize uint16) { } // TryUpgradeToConn probes the capabilities of the OS and pconn, and upgrades -// pconn to a [Conn] if appropriate. A batch size of MinReadBatchMsgsLen() is +// pconn to a [Conn] if appropriate. A batch size of [IdealBatchSize] is // suggested for the best performance. func TryUpgradeToConn(pconn nettype.PacketConn, network string, batchSize int) nettype.PacketConn { if runtime.GOOS != "linux" { @@ -457,6 +457,4 @@ func MinControlMessageSize() int { return controlMessageSize } -func MinReadBatchMsgsLen() int { - return 128 -} +const IdealBatchSize = 128 diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index e33ad6d7aad75..e518c3f9f06d9 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -310,7 +310,7 @@ func TestMinReadBatchMsgsLen(t *testing.T) { // So long as magicsock uses [Conn], and [wireguard-go/conn.Bind] API is // shaped for wireguard-go to control packet memory, these values should be // aligned. - if MinReadBatchMsgsLen() != conn.IdealBatchSize { - t.Fatalf("MinReadBatchMsgsLen():%d != conn.IdealBatchSize(): %d", MinReadBatchMsgsLen(), conn.IdealBatchSize) + if IdealBatchSize != conn.IdealBatchSize { + t.Fatalf("IdealBatchSize: %d != conn.IdealBatchSize(): %d", IdealBatchSize, conn.IdealBatchSize) } } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e138c33f23f32..a039c99302752 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -20,8 +20,11 @@ import ( "time" "go4.org/mem" + "golang.org/x/net/ipv6" "tailscale.com/client/local" "tailscale.com/disco" + "tailscale.com/net/batching" + "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" "tailscale.com/net/packet" @@ -57,10 +60,10 @@ type Server struct { bindLifetime time.Duration steadyStateLifetime time.Duration bus *eventbus.Bus - uc4 *net.UDPConn // always non-nil - uc4Port uint16 // always nonzero - uc6 *net.UDPConn // may be nil if IPv6 bind fails during initialization - uc6Port uint16 // may be zero if IPv6 bind fails during initialization + uc4 batching.Conn // always non-nil + uc4Port uint16 // always nonzero + uc6 batching.Conn // may be nil if IPv6 bind fails during initialization + uc6Port uint16 // may be zero if IPv6 bind fails during initialization closeOnce sync.Once wg sync.WaitGroup closeCh chan struct{} @@ -96,9 +99,9 @@ type serverEndpoint struct { allocatedAt time.Time } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, conn *net.UDPConn, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { if senderIndex != 0 && senderIndex != 1 { - return + return nil, netip.AddrPort{} } otherSender := 0 @@ -121,15 +124,15 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) if err != nil { // silently drop - return + return nil, netip.AddrPort{} } if discoMsg.Generation == 0 { // Generation must be nonzero, silently drop - return + return nil, netip.AddrPort{} } if e.handshakeGeneration[senderIndex] == discoMsg.Generation { // we've seen this generation before, silently drop - return + return nil, netip.AddrPort{} } e.handshakeGeneration[senderIndex] = discoMsg.Generation e.handshakeAddrPorts[senderIndex] = from @@ -144,19 +147,18 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex gh.VNI.Set(e.vni) err = gh.Encode(reply) if err != nil { - return + return nil, netip.AddrPort{} } reply = append(reply, disco.Magic...) reply = serverDisco.AppendTo(reply) box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) reply = append(reply, box...) - conn.WriteMsgUDPAddrPort(reply, nil, from) - return + return reply, from case *disco.BindUDPRelayEndpointAnswer: err := validateVNIAndRemoteKey(discoMsg.BindUDPRelayEndpointCommon) if err != nil { // silently drop - return + return nil, netip.AddrPort{} } generation := e.handshakeGeneration[senderIndex] if generation == 0 || // we have no active handshake @@ -164,23 +166,23 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex e.handshakeAddrPorts[senderIndex] != from || // mismatching source for the active handshake !bytes.Equal(e.challenge[senderIndex][:], discoMsg.Challenge[:]) { // mismatching answer for the active handshake // silently drop - return + return nil, netip.AddrPort{} } // Handshake complete. Update the binding for this sender. e.boundAddrPorts[senderIndex] = from e.lastSeen[senderIndex] = time.Now() // record last seen as bound time - return + return nil, netip.AddrPort{} default: // unexpected message types, silently drop - return + return nil, netip.AddrPort{} } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, conn *net.UDPConn, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message - return + return nil, netip.AddrPort{} } sender := key.DiscoPublicFromRaw32(mem.B(senderRaw)) senderIndex := -1 @@ -191,63 +193,51 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by senderIndex = 1 default: // unknown Disco public key - return + return nil, netip.AddrPort{} } const headerLen = len(disco.Magic) + key.DiscoPublicRawLen discoPayload, ok := e.discoSharedSecrets[senderIndex].Open(b[headerLen:]) if !ok { // unable to decrypt the Disco payload - return + return nil, netip.AddrPort{} } discoMsg, err := disco.Parse(discoPayload) if err != nil { // unable to parse the Disco payload - return + return nil, netip.AddrPort{} } - e.handleDiscoControlMsg(from, senderIndex, discoMsg, conn, serverDisco) + return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco) } -func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, rxSocket, otherAFSocket *net.UDPConn, serverDisco key.DiscoPublic) { +func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { if !gh.Control { if !e.isBound() { // not a control packet, but serverEndpoint isn't bound - return + return nil, netip.AddrPort{} } - var to netip.AddrPort switch { case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() - to = e.boundAddrPorts[1] + return b, e.boundAddrPorts[1] case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() - to = e.boundAddrPorts[0] + return b, e.boundAddrPorts[0] default: // unrecognized source - return - } - // Relay the packet towards the other party via the socket associated - // with the destination's address family. If source and destination - // address families are matching we tx on the same socket the packet - // was received (rxSocket), otherwise we use the "other" socket - // (otherAFSocket). [Server] makes no use of dual-stack sockets. - if from.Addr().Is4() == to.Addr().Is4() { - rxSocket.WriteMsgUDPAddrPort(b, nil, to) - } else if otherAFSocket != nil { - otherAFSocket.WriteMsgUDPAddrPort(b, nil, to) + return nil, netip.AddrPort{} } - return } if gh.Protocol != packet.GeneveProtocolDisco { // control packet, but not Disco - return + return nil, netip.AddrPort{} } msg := b[packet.GeneveFixedHeaderLength:] - e.handleSealedDiscoControlMsg(from, msg, rxSocket, serverDisco) + return e.handleSealedDiscoControlMsg(from, msg, serverDisco) } func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { @@ -338,10 +328,10 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve } s.wg.Add(1) - go s.packetReadLoop(s.uc4, s.uc6) + go s.packetReadLoop(s.uc4, s.uc6, true) if s.uc6 != nil { s.wg.Add(1) - go s.packetReadLoop(s.uc6, s.uc4) + go s.packetReadLoop(s.uc6, s.uc4, false) } s.wg.Add(1) go s.endpointGCLoop() @@ -425,6 +415,41 @@ func (s *Server) addrDiscoveryLoop() { } } +// This is a compile-time assertion that [singlePacketConn] implements the +// [batching.Conn] interface. +var _ batching.Conn = (*singlePacketConn)(nil) + +// singlePacketConn implements [batching.Conn] with single packet syscall +// operations. +type singlePacketConn struct { + *net.UDPConn +} + +func (c *singlePacketConn) ReadBatch(msgs []ipv6.Message, _ int) (int, error) { + n, ap, err := c.UDPConn.ReadFromUDPAddrPort(msgs[0].Buffers[0]) + if err != nil { + return 0, err + } + msgs[0].N = n + msgs[0].Addr = net.UDPAddrFromAddrPort(netaddr.Unmap(ap)) + return 1, nil +} + +func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, geneve packet.GeneveHeader, offset int) error { + for _, buff := range buffs { + if geneve.VNI.IsSet() { + geneve.Encode(buff) + } else { + buff = buff[offset:] + } + _, err := c.UDPConn.WriteToUDPAddrPort(buff, addr) + if err != nil { + return err + } + } + return nil +} + // listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if // we manage to bind the IPv4 socket. // @@ -433,7 +458,10 @@ func (s *Server) addrDiscoveryLoop() { // across IPv4 and IPv6 if the requested port is zero. // // TODO: make these "re-bindable" in similar fashion to magicsock as a means to -// deal with EDR software closing them. http://go/corp/30118 +// deal with EDR software closing them. http://go/corp/30118. We could re-use +// [magicsock.RebindingConn], which would also remove the need for +// [singlePacketConn], as [magicsock.RebindingConn] also handles fallback to +// single packet syscall operations. func (s *Server) listenOn(port int) error { for _, network := range []string{"udp4", "udp6"} { uc, err := net.ListenUDP(network, &net.UDPAddr{Port: port}) @@ -462,11 +490,16 @@ func (s *Server) listenOn(port int) error { } return err } + pc := batching.TryUpgradeToConn(uc, network, batching.IdealBatchSize) + bc, ok := pc.(batching.Conn) + if !ok { + bc = &singlePacketConn{uc} + } if network == "udp4" { - s.uc4 = uc + s.uc4 = bc s.uc4Port = uint16(portUint) } else { - s.uc6 = uc + s.uc6 = bc s.uc6Port = uint16(portUint) } } @@ -526,18 +559,18 @@ func (s *Server) endpointGCLoop() { } } -func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSocket *net.UDPConn) { +func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to netip.AddrPort) { if stun.Is(b) && b[1] == 0x01 { // A b[1] value of 0x01 (STUN method binding) is sufficiently // non-overlapping with the Geneve header where the LSB is always 0 // (part of 6 "reserved" bits). s.netChecker.ReceiveSTUNPacket(b, from) - return + return nil, netip.AddrPort{} } gh := packet.GeneveHeader{} err := gh.Decode(b) if err != nil { - return + return nil, netip.AddrPort{} } // TODO: consider performance implications of holding s.mu for the remainder // of this method, which does a bunch of disco/crypto work depending. Keep @@ -547,13 +580,13 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte, rxSocket, otherAFSo e, ok := s.byVNI[gh.VNI.Get()] if !ok { // unknown VNI - return + return nil, netip.AddrPort{} } - e.handlePacket(from, gh, b, rxSocket, otherAFSocket, s.discoPublic) + return e.handlePacket(from, gh, b, s.discoPublic) } -func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { +func (s *Server) packetReadLoop(readFromSocket, otherSocket batching.Conn, readFromSocketIsIPv4 bool) { defer func() { // We intentionally close the [Server] if we encounter a socket read // error below, at least until socket "re-binding" is implemented as @@ -564,15 +597,73 @@ func (s *Server) packetReadLoop(readFromSocket, otherSocket *net.UDPConn) { s.wg.Done() s.Close() }() - b := make([]byte, 1<<16-1) + + msgs := make([]ipv6.Message, batching.IdealBatchSize) + for i := range msgs { + msgs[i].OOB = make([]byte, batching.MinControlMessageSize()) + msgs[i].Buffers = make([][]byte, 1) + msgs[i].Buffers[0] = make([]byte, 1<<16-1) + } + writeBuffsByDest := make(map[netip.AddrPort][][]byte, batching.IdealBatchSize) + for { + for i := range msgs { + msgs[i] = ipv6.Message{Buffers: msgs[i].Buffers, OOB: msgs[i].OOB[:cap(msgs[i].OOB)]} + } + // TODO: extract laddr from IP_PKTINFO for use in reply - n, from, err := readFromSocket.ReadFromUDPAddrPort(b) + // ReadBatch will split coalesced datagrams before returning, which + // WriteBatchTo will re-coalesce further down. We _could_ be more + // efficient and not split datagrams that belong to the same VNI if they + // are non-control/handshake packets. We pay the memmove/memcopy + // performance penalty for now in the interest of simple single packet + // handlers. + n, err := readFromSocket.ReadBatch(msgs, 0) if err != nil { s.logf("error reading from socket(%v): %v", readFromSocket.LocalAddr(), err) return } - s.handlePacket(from, b[:n], readFromSocket, otherSocket) + + for _, msg := range msgs[:n] { + if msg.N == 0 { + continue + } + buf := msg.Buffers[0][:msg.N] + from := msg.Addr.(*net.UDPAddr).AddrPort() + write, to := s.handlePacket(from, buf) + if !to.IsValid() { + continue + } + if from.Addr().Is4() == to.Addr().Is4() || otherSocket != nil { + buffs, ok := writeBuffsByDest[to] + if !ok { + buffs = make([][]byte, 0, batching.IdealBatchSize) + } + buffs = append(buffs, write) + writeBuffsByDest[to] = buffs + } else { + // This is unexpected. We should never produce a packet to write + // to the "other" socket if the other socket is nil/unbound. + // [server.handlePacket] has to see a packet from a particular + // address family at least once in order for it to return a + // packet to write towards a dest for the same address family. + s.logf("[unexpected] packet from: %v produced packet to: %v while otherSocket is nil", from, to) + } + } + + for dest, buffs := range writeBuffsByDest { + // Write the packet batches via the socket associated with the + // destination's address family. If source and destination address + // families are matching we tx on the same socket the packet was + // received, otherwise we use the "other" socket. [Server] makes no + // use of dual-stack sockets. + if dest.Addr().Is4() == readFromSocketIsIPv4 { + readFromSocket.WriteBatchTo(buffs, dest, packet.GeneveHeader{}, 0) + } else { + otherSocket.WriteBatchTo(buffs, dest, packet.GeneveHeader{}, 0) + } + delete(writeBuffsByDest, dest) + } } } From d986baa18fbca65462f85fcc0f1c19a38a042fcc Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 6 Aug 2025 07:43:58 -0700 Subject: [PATCH 0234/1093] tsconsensus,cmd/natc: add 'follower only' bootstrap option Currently consensus has a bootstrap routine where a tsnet node tries to join each other node with the cluster tag, and if it is not able to join any other node it starts its own cluster. That algorithm is racy, and can result in split brain (more than one leader/cluster) if all the nodes for a cluster are started at the same time. Add a FollowOnly argument to the bootstrap function. If provided this tsnet node will never lead, it will try (and retry with exponential back off) to follow any node it can contact. Add a --follow-only flag to cmd/natc that uses this new tsconsensus functionality. Also slightly reorganize some arguments into opts structs. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 15 ++++- cmd/natc/natc.go | 31 ++++++----- tsconsensus/tsconsensus.go | 89 ++++++++++++++++++++++-------- tsconsensus/tsconsensus_test.go | 30 ++++++++-- 4 files changed, 121 insertions(+), 44 deletions(-) diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 3bc21bd0357dd..821f12faef801 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -149,12 +149,21 @@ func (ipp *ConsensusIPPool) domainLookup(from tailcfg.NodeID, addr netip.Addr) ( return ww, true } +type ClusterOpts struct { + Tag string + StateDir string + FollowOnly bool +} + // StartConsensus is part of the IPPool interface. It starts the raft background routines that handle consensus. -func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, clusterTag string, clusterStateDir string) error { +func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server, opts ClusterOpts) error { cfg := tsconsensus.DefaultConfig() cfg.ServeDebugMonitor = true - cfg.StateDirPath = clusterStateDir - cns, err := tsconsensus.Start(ctx, ts, ipp, clusterTag, cfg) + cfg.StateDirPath = opts.StateDir + cns, err := tsconsensus.Start(ctx, ts, ipp, tsconsensus.BootstrapOpts{ + Tag: opts.Tag, + FollowOnly: opts.FollowOnly, + }, cfg) if err != nil { return err } diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index fdbce3da189b2..2007f0a242719 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -50,18 +50,19 @@ func main() { // Parse flags fs := flag.NewFlagSet("natc", flag.ExitOnError) var ( - debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint") - hostname = fs.String("hostname", "", "Hostname to register the service under") - siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") - v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") - dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") - verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") - printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") - ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") - wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") - clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") - server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") - stateDir = fs.String("state-dir", "", "path to directory in which to store app state") + debugPort = fs.Int("debug-port", 8893, "Listening port for debug/metrics endpoint") + hostname = fs.String("hostname", "", "Hostname to register the service under") + siteID = fs.Uint("site-id", 1, "an integer site ID to use for the ULA prefix which allows for multiple proxies to act in a HA configuration") + v4PfxStr = fs.String("v4-pfx", "100.64.1.0/24", "comma-separated list of IPv4 prefixes to advertise") + dnsServers = fs.String("dns-servers", "", "comma separated list of upstream DNS to use, including host and port (use system if empty)") + verboseTSNet = fs.Bool("verbose-tsnet", false, "enable verbose logging in tsnet") + printULA = fs.Bool("print-ula", false, "print the ULA prefix and exit") + ignoreDstPfxStr = fs.String("ignore-destinations", "", "comma-separated list of prefixes to ignore") + wgPort = fs.Uint("wg-port", 0, "udp port for wireguard and peer to peer traffic") + clusterTag = fs.String("cluster-tag", "", "optionally run in a consensus cluster with other nodes with this tag") + server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") + stateDir = fs.String("state-dir", "", "path to directory in which to store app state") + clusterFollowOnly = fs.Bool("follow-only", false, "Try to find a leader with the cluster tag or exit.") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -163,7 +164,11 @@ func main() { if err != nil { log.Fatalf("Creating cluster state dir failed: %v", err) } - err = cipp.StartConsensus(ctx, ts, *clusterTag, clusterStateDir) + err = cipp.StartConsensus(ctx, ts, ippool.ClusterOpts{ + Tag: *clusterTag, + StateDir: clusterStateDir, + FollowOnly: *clusterFollowOnly, + }) if err != nil { log.Fatalf("StartConsensus: %v", err) } diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index 53a2c3f54fa64..11b039d57223b 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -157,13 +157,18 @@ func (sl StreamLayer) Accept() (net.Conn, error) { } } +type BootstrapOpts struct { + Tag string + FollowOnly bool +} + // Start returns a pointer to a running Consensus instance. // Calling it with a *tsnet.Server will cause that server to join or start a consensus cluster // with other nodes on the tailnet tagged with the clusterTag. The *tsnet.Server will run the state // machine defined by the raft.FSM also provided, and keep it in sync with the other cluster members' // state machines using Raft. -func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag string, cfg Config) (*Consensus, error) { - if clusterTag == "" { +func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, bootstrapOpts BootstrapOpts, cfg Config) (*Consensus, error) { + if bootstrapOpts.Tag == "" { return nil, errors.New("cluster tag must be provided") } @@ -185,7 +190,7 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin shutdownCtxCancel: shutdownCtxCancel, } - auth := newAuthorization(ts, clusterTag) + auth := newAuthorization(ts, bootstrapOpts.Tag) err := auth.Refresh(shutdownCtx) if err != nil { return nil, fmt.Errorf("auth refresh: %w", err) @@ -211,7 +216,7 @@ func Start(ctx context.Context, ts *tsnet.Server, fsm raft.FSM, clusterTag strin // we may already be in a consensus (see comment above before startRaft) but we're going to // try to bootstrap anyway in case this is a fresh start. - err = c.bootstrap(auth.AllowedPeers()) + err = c.bootstrap(shutdownCtx, auth, bootstrapOpts) if err != nil { if errors.Is(err, raft.ErrCantBootstrap) { // Raft cluster state can be persisted, if we try to call raft.BootstrapCluster when @@ -303,14 +308,59 @@ type Consensus struct { shutdownCtxCancel context.CancelFunc } +func (c *Consensus) bootstrapTryToJoinAnyTarget(targets views.Slice[*ipnstate.PeerStatus]) bool { + log.Printf("Bootstrap: Trying to find cluster: num targets to try: %d", targets.Len()) + for _, p := range targets.All() { + if !p.Online { + log.Printf("Bootstrap: Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) + continue + } + log.Printf("Bootstrap: Trying to find cluster: trying %s", p.TailscaleIPs[0]) + err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ + RemoteHost: c.self.hostAddr.String(), + RemoteID: c.self.id, + }) + if err != nil { + log.Printf("Bootstrap: Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) + continue + } + log.Printf("Bootstrap: Trying to find cluster: joined %s", p.TailscaleIPs[0]) + return true + } + return false +} + +func (c *Consensus) retryFollow(ctx context.Context, auth *authorization) bool { + waitFor := 500 * time.Millisecond + nRetries := 10 + attemptCount := 1 + for true { + log.Printf("Bootstrap: trying to follow any cluster member: attempt %v", attemptCount) + joined := c.bootstrapTryToJoinAnyTarget(auth.AllowedPeers()) + if joined || attemptCount == nRetries { + return joined + } + log.Printf("Bootstrap: Failed to follow. Retrying in %v", waitFor) + time.Sleep(waitFor) + waitFor *= 2 + attemptCount++ + auth.Refresh(ctx) + } + return false +} + // bootstrap tries to join a raft cluster, or start one. // // We need to do the very first raft cluster configuration, but after that raft manages it. // bootstrap is called at start up, and we may not currently be aware of what the cluster config might be, // our node may already be in it. Try to join the raft cluster of all the other nodes we know about, and -// if unsuccessful, assume we are the first and try to start our own. +// if unsuccessful, assume we are the first and try to start our own. If the FollowOnly option is set, only try +// to join, never start our own. // -// It's possible for bootstrap to return an error, or start a errant breakaway cluster. +// It's possible for bootstrap to start an errant breakaway cluster if for example all nodes are having a fresh +// start, they're racing bootstrap and multiple nodes were unable to join a peer and so start their own new cluster. +// To avoid this operators should either ensure bootstrap is called for a single node first and allow it to become +// leader before starting the other nodes. Or start all but one node with the FollowOnly option. // // We have a list of expected cluster members already from control (the members of the tailnet with the tag) // so we could do the initial configuration with all servers specified. @@ -318,27 +368,20 @@ type Consensus struct { // - We want to handle machines joining after start anyway. // - Not all tagged nodes tailscale believes are active are necessarily actually responsive right now, // so let each node opt in when able. -func (c *Consensus) bootstrap(targets views.Slice[*ipnstate.PeerStatus]) error { - log.Printf("Trying to find cluster: num targets to try: %d", targets.Len()) - for _, p := range targets.All() { - if !p.Online { - log.Printf("Trying to find cluster: tailscale reports not online: %s", p.TailscaleIPs[0]) - continue - } - log.Printf("Trying to find cluster: trying %s", p.TailscaleIPs[0]) - err := c.commandClient.join(p.TailscaleIPs[0].String(), joinRequest{ - RemoteHost: c.self.hostAddr.String(), - RemoteID: c.self.id, - }) - if err != nil { - log.Printf("Trying to find cluster: could not join %s: %v", p.TailscaleIPs[0], err) - continue +func (c *Consensus) bootstrap(ctx context.Context, auth *authorization, opts BootstrapOpts) error { + if opts.FollowOnly { + joined := c.retryFollow(ctx, auth) + if !joined { + return errors.New("unable to join cluster") } - log.Printf("Trying to find cluster: joined %s", p.TailscaleIPs[0]) return nil } - log.Printf("Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) + joined := c.bootstrapTryToJoinAnyTarget(auth.AllowedPeers()) + if joined { + return nil + } + log.Printf("Bootstrap: Trying to find cluster: unsuccessful, starting as leader: %s", c.self.hostAddr.String()) f := c.raft.BootstrapCluster( raft.Configuration{ Servers: []raft.Server{ diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index bfb6b3e0688cc..3b51a093f12ad 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -262,7 +262,7 @@ func TestStart(t *testing.T) { waitForNodesToBeTaggedInStatus(t, ctx, one, []key.NodePublic{k}, clusterTag) sm := &fsm{} - r, err := Start(ctx, one, sm, clusterTag, warnLogConfig()) + r, err := Start(ctx, one, sm, BootstrapOpts{Tag: clusterTag}, warnLogConfig()) if err != nil { t.Fatal(err) } @@ -334,7 +334,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string t.Helper() participants[0].sm = &fsm{} myCfg := addIDedLogger("0", cfg) - first, err := Start(ctx, participants[0].ts, participants[0].sm, clusterTag, myCfg) + first, err := Start(ctx, participants[0].ts, participants[0].sm, BootstrapOpts{Tag: clusterTag}, myCfg) if err != nil { t.Fatal(err) } @@ -347,7 +347,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string for i := 1; i < len(participants); i++ { participants[i].sm = &fsm{} myCfg := addIDedLogger(fmt.Sprintf("%d", i), cfg) - c, err := Start(ctx, participants[i].ts, participants[i].sm, clusterTag, myCfg) + c, err := Start(ctx, participants[i].ts, participants[i].sm, BootstrapOpts{Tag: clusterTag}, myCfg) if err != nil { t.Fatal(err) } @@ -530,7 +530,7 @@ func TestFollowerFailover(t *testing.T) { // follower comes back smThreeAgain := &fsm{} cfg = addIDedLogger("2 after restarting", warnLogConfig()) - rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, clusterTag, cfg) + rThreeAgain, err := Start(ctx, ps[2].ts, smThreeAgain, BootstrapOpts{Tag: clusterTag}, cfg) if err != nil { t.Fatal(err) } @@ -565,7 +565,7 @@ func TestRejoin(t *testing.T) { tagNodes(t, control, []key.NodePublic{keyJoiner}, clusterTag) waitForNodesToBeTaggedInStatus(t, ctx, ps[0].ts, []key.NodePublic{keyJoiner}, clusterTag) smJoiner := &fsm{} - cJoiner, err := Start(ctx, tsJoiner, smJoiner, clusterTag, cfg) + cJoiner, err := Start(ctx, tsJoiner, smJoiner, BootstrapOpts{Tag: clusterTag}, cfg) if err != nil { t.Fatal(err) } @@ -744,3 +744,23 @@ func TestOnlyTaggedPeersCanJoin(t *testing.T) { t.Fatalf("join req when not tagged, expected body: %s, got: %s", expected, sBody) } } + +func TestFollowOnly(t *testing.T) { + testConfig(t) + ctx := context.Background() + clusterTag := "tag:whatever" + ps, _, _ := startNodesAndWaitForPeerStatus(t, ctx, clusterTag, 3) + cfg := warnLogConfig() + + // start the leader + _, err := Start(ctx, ps[0].ts, ps[0].sm, BootstrapOpts{Tag: clusterTag}, cfg) + if err != nil { + t.Fatal(err) + } + + // start the follower with FollowOnly + _, err = Start(ctx, ps[1].ts, ps[1].sm, BootstrapOpts{Tag: clusterTag, FollowOnly: true}, cfg) + if err != nil { + t.Fatal(err) + } +} From b48d2de6ab7ad2588395e896649f4bf6bea73fcf Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Tue, 19 Aug 2025 13:11:10 -0700 Subject: [PATCH 0235/1093] cmd/natc,tsconsensus: add cluster config admin Add the ability for operators of natc in consensus mode to remove servers from the raft cluster config, without losing other state. Updates #14667 Signed-off-by: Fran Bull --- cmd/natc/ippool/consensusippool.go | 17 +++++++++++++ cmd/natc/natc.go | 38 ++++++++++++++++++++++++++++++ tsconsensus/tsconsensus.go | 20 ++++++++++++++++ 3 files changed, 75 insertions(+) diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 821f12faef801..64807b6c272f5 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -30,6 +30,7 @@ type ConsensusIPPool struct { IPSet *netipx.IPSet perPeerMap *syncs.Map[tailcfg.NodeID, *consensusPerPeerState] consensus commandExecutor + clusterController clusterController unusedAddressLifetime time.Duration } @@ -168,6 +169,7 @@ func (ipp *ConsensusIPPool) StartConsensus(ctx context.Context, ts *tsnet.Server return err } ipp.consensus = cns + ipp.clusterController = cns return nil } @@ -442,3 +444,18 @@ func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { type commandExecutor interface { ExecuteCommand(tsconsensus.Command) (tsconsensus.CommandResult, error) } + +type clusterController interface { + GetClusterConfiguration() (raft.Configuration, error) + DeleteClusterServer(id raft.ServerID) (uint64, error) +} + +// GetClusterConfiguration gets the consensus implementation's cluster configuration +func (ipp *ConsensusIPPool) GetClusterConfiguration() (raft.Configuration, error) { + return ipp.clusterController.GetClusterConfiguration() +} + +// DeleteClusterServer removes a server from the consensus implementation's cluster configuration +func (ipp *ConsensusIPPool) DeleteClusterServer(id raft.ServerID) (uint64, error) { + return ipp.clusterController.DeleteClusterServer(id) +} diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 2007f0a242719..a4f53d657d98e 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -8,6 +8,7 @@ package main import ( "context" + "encoding/json" "errors" "expvar" "flag" @@ -23,6 +24,7 @@ import ( "time" "github.com/gaissmai/bart" + "github.com/hashicorp/raft" "github.com/inetaf/tcpproxy" "github.com/peterbourgon/ff/v3" "go4.org/netipx" @@ -63,6 +65,7 @@ func main() { server = fs.String("login-server", ipn.DefaultControlURL, "the base URL of control server") stateDir = fs.String("state-dir", "", "path to directory in which to store app state") clusterFollowOnly = fs.Bool("follow-only", false, "Try to find a leader with the cluster tag or exit.") + clusterAdminPort = fs.Int("cluster-admin-port", 8081, "Port on localhost for the cluster admin HTTP API") ) ff.Parse(fs, os.Args[1:], ff.WithEnvVarPrefix("TS_NATC")) @@ -179,6 +182,12 @@ func main() { } }() ipp = cipp + + go func() { + // This listens on localhost only, so that only those with access to the host machine + // can remove servers from the cluster config. + log.Print(http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", *clusterAdminPort), httpClusterAdmin(cipp))) + }() } else { ipp = &ippool.SingleMachineIPPool{IPSet: addrPool} } @@ -633,3 +642,32 @@ func getClusterStatePath(stateDirFlag string) (string, error) { return dirPath, nil } + +func httpClusterAdmin(ipp *ippool.ConsensusIPPool) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("GET /{$}", func(w http.ResponseWriter, r *http.Request) { + c, err := ipp.GetClusterConfiguration() + if err != nil { + log.Printf("cluster admin http: error getClusterConfig: %v", err) + http.Error(w, "", http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(c); err != nil { + log.Printf("cluster admin http: error encoding raft configuration: %v", err) + } + }) + mux.HandleFunc("DELETE /{id}", func(w http.ResponseWriter, r *http.Request) { + idString := r.PathValue("id") + id := raft.ServerID(idString) + idx, err := ipp.DeleteClusterServer(id) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if err := json.NewEncoder(w).Encode(idx); err != nil { + log.Printf("cluster admin http: error encoding delete index: %v", err) + return + } + }) + return mux +} diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index 11b039d57223b..1f7dc1b7b6a5e 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -525,3 +525,23 @@ func (c *Consensus) raftAddr(host netip.Addr) string { func (c *Consensus) commandAddr(host netip.Addr) string { return netip.AddrPortFrom(host, c.config.CommandPort).String() } + +// GetClusterConfiguration returns the result of the underlying raft instance's GetConfiguration +func (c *Consensus) GetClusterConfiguration() (raft.Configuration, error) { + fut := c.raft.GetConfiguration() + err := fut.Error() + if err != nil { + return raft.Configuration{}, err + } + return fut.Configuration(), nil +} + +// DeleteClusterServer returns the result of the underlying raft instance's RemoveServer +func (c *Consensus) DeleteClusterServer(id raft.ServerID) (uint64, error) { + fut := c.raft.RemoveServer(id, 0, 1*time.Second) + err := fut.Error() + if err != nil { + return 0, err + } + return fut.Index(), nil +} From 641a90ea33b07e4550eb244ad02f6d1b4b30baeb Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 20 Aug 2025 16:24:00 -0700 Subject: [PATCH 0236/1093] net/sockopts,wgengine/magicsock: export socket buffer sizing logic (#16909) For eventual use by net/udprelay.Server Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + net/sockopts/sockopts.go | 37 +++++++++++++++++ net/sockopts/sockopts_default.go | 21 ++++++++++ net/sockopts/sockopts_linux.go | 40 +++++++++++++++++++ .../sockopts/sockopts_unix_test.go | 7 ++-- tsnet/depaware.txt | 1 + wgengine/magicsock/magicsock.go | 26 ++++++------ wgengine/magicsock/magicsock_default.go | 7 ---- wgengine/magicsock/magicsock_linux.go | 29 -------------- 11 files changed, 119 insertions(+), 52 deletions(-) create mode 100644 net/sockopts/sockopts.go create mode 100644 net/sockopts/sockopts_default.go create mode 100644 net/sockopts/sockopts_linux.go rename wgengine/magicsock/magicsock_unix_test.go => net/sockopts/sockopts_unix_test.go (87%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 1ecef4953ea9f..d9cc43e6b602b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -867,6 +867,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e60c1cb0410c2..219de5b0cdbff 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -339,6 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 5e558a0cd3fc9..2cd76f91ad6d0 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -297,6 +297,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/net/sockopts/sockopts.go b/net/sockopts/sockopts.go new file mode 100644 index 0000000000000..0c0ee7692cf6a --- /dev/null +++ b/net/sockopts/sockopts.go @@ -0,0 +1,37 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package sockopts contains logic for applying socket options. +package sockopts + +import ( + "net" + "runtime" + + "tailscale.com/types/nettype" +) + +// BufferDirection represents either the read/receive or write/send direction +// of a socket buffer. +type BufferDirection string + +const ( + ReadDirection BufferDirection = "read" + WriteDirection BufferDirection = "write" +) + +func portableSetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) error { + if runtime.GOOS == "plan9" { + // Not supported. Don't try. Avoid logspam. + return nil + } + var err error + if c, ok := pconn.(*net.UDPConn); ok { + if direction == WriteDirection { + err = c.SetWriteBuffer(size) + } else { + err = c.SetReadBuffer(size) + } + } + return err +} diff --git a/net/sockopts/sockopts_default.go b/net/sockopts/sockopts_default.go new file mode 100644 index 0000000000000..3cc8679b512c1 --- /dev/null +++ b/net/sockopts/sockopts_default.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package sockopts + +import ( + "tailscale.com/types/nettype" +) + +// SetBufferSize sets pconn's buffer to size for direction. size may be silently +// capped depending on platform. +// +// errForce is only relevant for Linux, and will always be nil otherwise, +// but we maintain a consistent cross-platform API. +// +// If pconn is not a [*net.UDPConn], then SetBufferSize is no-op. +func SetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) (errForce error, errPortable error) { + return nil, portableSetBufferSize(pconn, direction, size) +} diff --git a/net/sockopts/sockopts_linux.go b/net/sockopts/sockopts_linux.go new file mode 100644 index 0000000000000..5d778d380f5c9 --- /dev/null +++ b/net/sockopts/sockopts_linux.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package sockopts + +import ( + "net" + "syscall" + + "tailscale.com/types/nettype" +) + +// SetBufferSize sets pconn's buffer to size for direction. It attempts +// (errForce) to set SO_SNDBUFFORCE or SO_RECVBUFFORCE which can overcome the +// limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. It falls back to +// the portable implementation (errPortable) if that fails, which may be +// silently capped to net.core.{r,w}mem_max. +// +// If pconn is not a [*net.UDPConn], then SetBufferSize is no-op. +func SetBufferSize(pconn nettype.PacketConn, direction BufferDirection, size int) (errForce error, errPortable error) { + opt := syscall.SO_RCVBUFFORCE + if direction == WriteDirection { + opt = syscall.SO_SNDBUFFORCE + } + if c, ok := pconn.(*net.UDPConn); ok { + var rc syscall.RawConn + rc, errForce = c.SyscallConn() + if errForce == nil { + rc.Control(func(fd uintptr) { + errForce = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, opt, size) + }) + } + if errForce != nil { + errPortable = portableSetBufferSize(pconn, direction, size) + } + } + return errForce, errPortable +} diff --git a/wgengine/magicsock/magicsock_unix_test.go b/net/sockopts/sockopts_unix_test.go similarity index 87% rename from wgengine/magicsock/magicsock_unix_test.go rename to net/sockopts/sockopts_unix_test.go index b0700a8ebe870..ebb4354ac1385 100644 --- a/wgengine/magicsock/magicsock_unix_test.go +++ b/net/sockopts/sockopts_unix_test.go @@ -3,7 +3,7 @@ //go:build unix -package magicsock +package sockopts import ( "net" @@ -13,7 +13,7 @@ import ( "tailscale.com/types/nettype" ) -func TestTrySetSocketBuffer(t *testing.T) { +func TestSetBufferSize(t *testing.T) { c, err := net.ListenPacket("udp", ":0") if err != nil { t.Fatal(err) @@ -42,7 +42,8 @@ func TestTrySetSocketBuffer(t *testing.T) { curRcv, curSnd := getBufs() - trySetSocketBuffer(c.(nettype.PacketConn), t.Logf) + SetBufferSize(c.(nettype.PacketConn), ReadDirection, 7<<20) + SetBufferSize(c.(nettype.PacketConn), WriteDirection, 7<<20) newRcv, newSnd := getBufs() diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9ad340c908876..d7d5be65813ba 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -293,6 +293,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a99a0a8e34285..a59a38f655bbd 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -45,6 +45,7 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/ping" "tailscale.com/net/portmapper" + "tailscale.com/net/sockopts" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/net/tstun" @@ -3857,20 +3858,19 @@ func (c *Conn) DebugForcePreferDERP(n int) { c.netChecker.SetForcePreferredDERP(n) } -// portableTrySetSocketBuffer sets SO_SNDBUF and SO_RECVBUF on pconn to socketBufferSize, -// logging an error if it occurs. -func portableTrySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - if runtime.GOOS == "plan9" { - // Not supported. Don't try. Avoid logspam. - return - } - if c, ok := pconn.(*net.UDPConn); ok { - // Attempt to increase the buffer size, and allow failures. - if err := c.SetReadBuffer(socketBufferSize); err != nil { - logf("magicsock: failed to set UDP read buffer size to %d: %v", socketBufferSize, err) +// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which +// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. +// It falls back to the portable implementation if that fails, which may be +// silently capped to net.core.{r,w}mem_max. +func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { + directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} + for _, direction := range directions { + forceErr, portableErr := sockopts.SetBufferSize(pconn, direction, socketBufferSize) + if forceErr != nil { + logf("magicsock: [warning] failed to force-set UDP %v buffer size to %d: %v; using kernel default values (impacts throughput only)", direction, socketBufferSize, forceErr) } - if err := c.SetWriteBuffer(socketBufferSize); err != nil { - logf("magicsock: failed to set UDP write buffer size to %d: %v", socketBufferSize, err) + if portableErr != nil { + logf("magicsock: failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, portableErr) } } } diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 4922f2c096bc4..1c315034a6f75 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -9,15 +9,8 @@ import ( "errors" "fmt" "io" - - "tailscale.com/types/logger" - "tailscale.com/types/nettype" ) func (c *Conn) listenRawDisco(family string) (io.Closer, error) { return nil, fmt.Errorf("raw disco listening not supported on this OS: %w", errors.ErrUnsupported) } - -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - portableTrySetSocketBuffer(pconn, logf) -} diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index 3369bcb89eca3..cad0e9b5e3134 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -13,7 +13,6 @@ import ( "net" "net/netip" "strings" - "syscall" "time" "github.com/mdlayher/socket" @@ -28,7 +27,6 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/nettype" ) const ( @@ -489,30 +487,3 @@ func printSockaddr(sa unix.Sockaddr) string { return fmt.Sprintf("unknown(%T)", sa) } } - -// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which -// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. -// It falls back to the portable implementation if that fails, which may be -// silently capped to net.core.{r,w}mem_max. -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { - if c, ok := pconn.(*net.UDPConn); ok { - var errRcv, errSnd error - rc, err := c.SyscallConn() - if err == nil { - rc.Control(func(fd uintptr) { - errRcv = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, socketBufferSize) - if errRcv != nil { - logf("magicsock: [warning] failed to force-set UDP read buffer size to %d: %v; using kernel default values (impacts throughput only)", socketBufferSize, errRcv) - } - errSnd = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDBUFFORCE, socketBufferSize) - if errSnd != nil { - logf("magicsock: [warning] failed to force-set UDP write buffer size to %d: %v; using kernel default values (impacts throughput only)", socketBufferSize, errSnd) - } - }) - } - - if err != nil || errRcv != nil || errSnd != nil { - portableTrySetSocketBuffer(pconn, logf) - } - } -} From 3e198f6d5f859c75cd049932d82ee26a5d6df8fa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 21 Aug 2025 09:48:31 -0600 Subject: [PATCH 0237/1093] .github: Bump github/codeql-action from 3.29.7 to 3.29.8 (#16828) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.7 to 3.29.8. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/51f77329afa6477de8c49fc9c7046c15b9a4e79d...76621b61decf072c1cee8dd1ce2d2a82d33c17ed) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.29.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 90a20e2f03f7d..2f5ae7d923eb5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@51f77329afa6477de8c49fc9c7046c15b9a4e79d # v3.29.5 + uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 From e296a6be8dcf2ad8f6a16a9e84afa11fd0546bec Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Thu, 21 Aug 2025 13:56:11 -0400 Subject: [PATCH 0238/1093] cmd/tsidp: update oidc-funnel-clients.json store path (#16845) Update odic-funnel-clients.json to take a path, this allows setting the location of the file and prevents it from landing in the root directory or users home directory. Move setting of rootPath until after tsnet has started. Previously this was added for the lazy creation of the oidc-key.json. It's now needed earlier in the flow. Updates #16734 Fixes #16844 Signed-off-by: Mike O'Driscoll --- cmd/tsidp/tsidp.go | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index e68e55ca914fb..2fc6d27e45181 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -142,8 +142,6 @@ func main() { Hostname: *flagHostname, Dir: *flagDir, } - rootPath = ts.GetRootPath() - log.Printf("tsidp root path: %s", rootPath) if *flagVerbose { ts.Logf = log.Printf } @@ -168,6 +166,9 @@ func main() { log.Fatal(err) } lns = append(lns, ln) + + rootPath = ts.GetRootPath() + log.Printf("tsidp root path: %s", rootPath) } srv := &idpServer{ @@ -185,14 +186,18 @@ func main() { // Load funnel clients from disk if they exist, regardless of whether funnel is enabled // This ensures OIDC clients persist across restarts - f, err := os.Open(funnelClientsFile) + funnelClientsFilePath, err := getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + log.Fatalf("could not get funnel clients file path: %v", err) + } + f, err := os.Open(funnelClientsFilePath) if err == nil { if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { - log.Fatalf("could not parse %s: %v", funnelClientsFile, err) + log.Fatalf("could not parse %s: %v", funnelClientsFilePath, err) } f.Close() } else if !errors.Is(err, os.ErrNotExist) { - log.Fatalf("could not open %s: %v", funnelClientsFile, err) + log.Fatalf("could not open %s: %v", funnelClientsFilePath, err) } log.Printf("Running tsidp at %s ...", srv.serverURL) @@ -839,7 +844,10 @@ func (s *idpServer) oidcSigner() (jose.Signer, error) { func (s *idpServer) oidcPrivateKey() (*signingKey, error) { return s.lazySigningKey.GetErr(func() (*signingKey, error) { - keyPath := filepath.Join(s.rootPath, oidcKeyFile) + keyPath, err := getConfigFilePath(s.rootPath, oidcKeyFile) + if err != nil { + return nil, fmt.Errorf("could not get OIDC key file path: %w", err) + } var sk signingKey b, err := os.ReadFile(keyPath) if err == nil { @@ -1147,7 +1155,13 @@ func (s *idpServer) storeFunnelClientsLocked() error { if err := json.NewEncoder(&buf).Encode(s.funnelClients); err != nil { return err } - return os.WriteFile(funnelClientsFile, buf.Bytes(), 0600) + + funnelClientsFilePath, err := getConfigFilePath(s.rootPath, funnelClientsFile) + if err != nil { + return fmt.Errorf("storeFunnelClientsLocked: %v", err) + } + + return os.WriteFile(funnelClientsFilePath, buf.Bytes(), 0600) } const ( @@ -1260,3 +1274,18 @@ func isFunnelRequest(r *http.Request) bool { } return false } + +// getConfigFilePath returns the path to the config file for the given file name. +// The oidc-key.json and funnel-clients.json files were originally opened and written +// to without paths, and ended up in /root dir or home directory of the user running +// the process. To maintain backward compatibility, we return the naked file name if that +// file exists already, otherwise we return the full path in the rootPath. +func getConfigFilePath(rootPath string, fileName string) (string, error) { + if _, err := os.Stat(fileName); err == nil { + return fileName, nil + } else if errors.Is(err, os.ErrNotExist) { + return filepath.Join(rootPath, fileName), nil + } else { + return "", err + } +} From cf739256caa86d8ba48f107bb22c623de0d0822d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 21 Aug 2025 11:03:05 -0700 Subject: [PATCH 0239/1093] net/udprelay: increase socket buffer size (#16910) This increases throughput over long fat networks, and in the presence of crypto/syscall-induced delay. Updates tailscale/corp#31164 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 2 +- net/udprelay/server.go | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 219de5b0cdbff..25f8ee3a11877 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -339,7 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/net/udprelay/server.go b/net/udprelay/server.go index a039c99302752..8aea8ae558724 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -28,11 +28,13 @@ import ( "tailscale.com/net/netcheck" "tailscale.com/net/netmon" "tailscale.com/net/packet" + "tailscale.com/net/sockopts" "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/nettype" "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -450,6 +452,25 @@ func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, gen return nil } +// UDP socket read/write buffer size (7MB). At the time of writing (2025-08-21) +// this value was heavily influenced by magicsock, with similar motivations for +// its increase relative to typical defaults, e.g. long fat networks and +// reducing packet loss around crypto/syscall-induced delay. +const socketBufferSize = 7 << 20 + +func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { + directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} + for _, direction := range directions { + errForce, errPortable := sockopts.SetBufferSize(pconn, direction, socketBufferSize) + if errForce != nil { + logf("[warning] failed to force-set UDP %v buffer size to %d: %v; using kernel default values (impacts throughput only)", direction, socketBufferSize, errForce) + } + if errPortable != nil { + logf("failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, errPortable) + } + } +} + // listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if // we manage to bind the IPv4 socket. // @@ -473,6 +494,7 @@ func (s *Server) listenOn(port int) error { break } } + trySetSocketBuffer(uc, s.logf) // TODO: set IP_PKTINFO sockopt _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) if err != nil { From b17cfe4aed58e6802a45800863670ef299c70891 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 21 Aug 2025 13:44:13 -0700 Subject: [PATCH 0240/1093] wgengine/magicsock,net/sockopts: export Windows ICMP suppression logic (#16917) For eventual use by net/udprelay.Server. Updates tailscale/corp#31506 Signed-off-by: Jordan Whited --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- .../sockopts/sockopts_notwindows.go | 8 +++++--- .../sockopts/sockopts_windows.go | 20 +++++++++++-------- tsnet/depaware.txt | 2 +- wgengine/magicsock/magicsock.go | 12 +++++------ 7 files changed, 27 insertions(+), 21 deletions(-) rename wgengine/magicsock/magicsock_notwindows.go => net/sockopts/sockopts_notwindows.go (52%) rename wgengine/magicsock/magicsock_windows.go => net/sockopts/sockopts_windows.go (67%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d9cc43e6b602b..555407421cce5 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -867,7 +867,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 25f8ee3a11877..be490a9437ecd 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -339,7 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 2cd76f91ad6d0..577050194e620 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -297,7 +297,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/wgengine/magicsock/magicsock_notwindows.go b/net/sockopts/sockopts_notwindows.go similarity index 52% rename from wgengine/magicsock/magicsock_notwindows.go rename to net/sockopts/sockopts_notwindows.go index 7c31c8202b35e..f1bc7fd442ee1 100644 --- a/wgengine/magicsock/magicsock_notwindows.go +++ b/net/sockopts/sockopts_notwindows.go @@ -3,11 +3,13 @@ //go:build !windows -package magicsock +package sockopts import ( - "tailscale.com/types/logger" "tailscale.com/types/nettype" ) -func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) {} +// SetICMPErrImmunity is no-op on non-Windows. +func SetICMPErrImmunity(pconn nettype.PacketConn) error { + return nil +} diff --git a/wgengine/magicsock/magicsock_windows.go b/net/sockopts/sockopts_windows.go similarity index 67% rename from wgengine/magicsock/magicsock_windows.go rename to net/sockopts/sockopts_windows.go index fe2a80e0ba951..1e6c3f69d3af5 100644 --- a/wgengine/magicsock/magicsock_windows.go +++ b/net/sockopts/sockopts_windows.go @@ -3,28 +3,31 @@ //go:build windows -package magicsock +package sockopts import ( + "fmt" "net" "unsafe" "golang.org/x/sys/windows" - "tailscale.com/types/logger" "tailscale.com/types/nettype" ) -func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { +// SetICMPErrImmunity sets socket options on pconn to prevent ICMP reception, +// e.g. ICMP Port Unreachable, from surfacing as a syscall error. +// +// If pconn is not a [*net.UDPConn], then SetICMPErrImmunity is no-op. +func SetICMPErrImmunity(pconn nettype.PacketConn) error { c, ok := pconn.(*net.UDPConn) if !ok { // not a UDP connection; nothing to do - return + return nil } sysConn, err := c.SyscallConn() if err != nil { - logf("trySetUDPSocketOptions: getting SyscallConn failed: %v", err) - return + return fmt.Errorf("SetICMPErrImmunity: getting SyscallConn failed: %v", err) } // Similar to https://github.com/golang/go/issues/5834 (which involved @@ -50,9 +53,10 @@ func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { ) }) if ioctlErr != nil { - logf("trySetUDPSocketOptions: could not set SIO_UDP_NETRESET: %v", ioctlErr) + return fmt.Errorf("SetICMPErrImmunity: could not set SIO_UDP_NETRESET: %v", ioctlErr) } if err != nil { - logf("trySetUDPSocketOptions: SyscallConn.Control failed: %v", err) + return fmt.Errorf("SetICMPErrImmunity: SyscallConn.Control failed: %v", err) } + return nil } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index d7d5be65813ba..1e25090fd3d50 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -293,7 +293,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable - tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a59a38f655bbd..7fb3517e923d5 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3537,7 +3537,6 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur } } } - trySetSocketBuffer(pconn, c.logf) trySetUDPSocketOptions(pconn, c.logf) // Success. @@ -3858,11 +3857,7 @@ func (c *Conn) DebugForcePreferDERP(n int) { c.netChecker.SetForcePreferredDERP(n) } -// trySetSocketBuffer attempts to set SO_SNDBUFFORCE and SO_RECVBUFFORCE which -// can overcome the limit of net.core.{r,w}mem_max, but require CAP_NET_ADMIN. -// It falls back to the portable implementation if that fails, which may be -// silently capped to net.core.{r,w}mem_max. -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} for _, direction := range directions { forceErr, portableErr := sockopts.SetBufferSize(pconn, direction, socketBufferSize) @@ -3873,6 +3868,11 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { logf("magicsock: failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, portableErr) } } + + err := sockopts.SetICMPErrImmunity(pconn) + if err != nil { + logf("magicsock: %v", err) + } } // derpStr replaces DERP IPs in s with "derp-". From c85cdabdfc4959d4d2c43b3cf56b2950fbb908d4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 21 Aug 2025 13:59:23 -0700 Subject: [PATCH 0241/1093] net/udprelay: set ICMP err immunity sock opt (#16918) Updates tailscale/corp#31506 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 8aea8ae558724..123813c165dfc 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -458,7 +458,7 @@ func (c *singlePacketConn) WriteBatchTo(buffs [][]byte, addr netip.AddrPort, gen // reducing packet loss around crypto/syscall-induced delay. const socketBufferSize = 7 << 20 -func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { +func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { directions := []sockopts.BufferDirection{sockopts.ReadDirection, sockopts.WriteDirection} for _, direction := range directions { errForce, errPortable := sockopts.SetBufferSize(pconn, direction, socketBufferSize) @@ -469,6 +469,11 @@ func trySetSocketBuffer(pconn nettype.PacketConn, logf logger.Logf) { logf("failed to set UDP %v buffer size to %d: %v", direction, socketBufferSize, errPortable) } } + + err := sockopts.SetICMPErrImmunity(pconn) + if err != nil { + logf("failed to set ICMP error immunity: %v", err) + } } // listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if @@ -494,7 +499,7 @@ func (s *Server) listenOn(port int) error { break } } - trySetSocketBuffer(uc, s.logf) + trySetUDPSocketOptions(uc, s.logf) // TODO: set IP_PKTINFO sockopt _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) if err != nil { From 3eeecb4c7f340a43ee133c85985111cf0e00e537 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 22 Aug 2025 16:07:05 +0100 Subject: [PATCH 0242/1093] cmd/k8s-proxy,k8s-operator: fix serve config for userspace mode (#16919) The serve code leaves it up to the system's DNS resolver and netstack to figure out how to reach the proxy destination. Combined with k8s-proxy running in userspace mode, this means we can't rely on MagicDNS being available or tailnet IPs being routable. I'd like to implement that as a feature for serve in userspace mode, but for now the safer fix to get kube-apiserver ProxyGroups consistently working in all environments is to switch to using localhost as the proxy target instead. This has a small knock-on in the code that does WhoIs lookups, which now needs to check the X-Forwarded-For header that serve populates to get the correct tailnet IP to look up, because the request's remote address will be loopback. Fixes #16920 Change-Id: I869ddcaf93102da50e66071bb00114cc1acc1288 Signed-off-by: Tom Proctor --- cmd/k8s-proxy/k8s-proxy.go | 2 +- k8s-operator/api-proxy/proxy.go | 30 +++++++++++++++++++++++------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 448bbe3971c0d..7a77072140568 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -453,7 +453,7 @@ func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager serviceHostPort: { Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: fmt.Sprintf("http://%s:80", strings.TrimSuffix(status.Self.DNSName, ".")), + Proxy: "http://localhost:80", }, }, }, diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index ff0373270b2c0..a0f2f930b8067 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -123,11 +123,11 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { if ap.authMode { mode = "auth" } - var tsLn net.Listener + var proxyLn net.Listener var serve func(ln net.Listener) error if ap.https { var err error - tsLn, err = ap.ts.Listen("tcp", ":443") + proxyLn, err = ap.ts.Listen("tcp", ":443") if err != nil { return fmt.Errorf("could not listen on :443: %w", err) } @@ -143,7 +143,7 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { } } else { var err error - tsLn, err = ap.ts.Listen("tcp", ":80") + proxyLn, err = net.Listen("tcp", "localhost:80") if err != nil { return fmt.Errorf("could not listen on :80: %w", err) } @@ -152,8 +152,8 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { errs := make(chan error) go func() { - ap.log.Infof("API server proxy in %s mode is listening on tailnet addresses %s", mode, tsLn.Addr()) - if err := serve(tsLn); err != nil && err != http.ErrServerClosed { + ap.log.Infof("API server proxy in %s mode is listening on %s", mode, proxyLn.Addr()) + if err := serve(proxyLn); err != nil && err != http.ErrServerClosed { errs <- fmt.Errorf("error serving: %w", err) } }() @@ -179,7 +179,7 @@ type APIServerProxy struct { rp *httputil.ReverseProxy authMode bool // Whether to run with impersonation using caller's tailnet identity. - https bool // Whether to serve on https for the device hostname; true for k8s-operator, false for k8s-proxy. + https bool // Whether to serve on https for the device hostname; true for k8s-operator, false (and localhost) for k8s-proxy. ts *tsnet.Server hs *http.Server upstreamURL *url.URL @@ -317,7 +317,23 @@ func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { } func (ap *APIServerProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { - return ap.lc.WhoIs(r.Context(), r.RemoteAddr) + who, remoteErr := ap.lc.WhoIs(r.Context(), r.RemoteAddr) + if remoteErr == nil { + ap.log.Debugf("WhoIs from remote addr: %s", r.RemoteAddr) + return who, nil + } + + var fwdErr error + fwdFor := r.Header.Get("X-Forwarded-For") + if fwdFor != "" && !ap.https { + who, fwdErr = ap.lc.WhoIs(r.Context(), fwdFor) + if fwdErr == nil { + ap.log.Debugf("WhoIs from X-Forwarded-For header: %s", fwdFor) + return who, nil + } + } + + return nil, errors.Join(remoteErr, fwdErr) } func (ap *APIServerProxy) authError(w http.ResponseWriter, err error) { From b558f81a82bac09222b2320dbee5a4dfe96a3a17 Mon Sep 17 00:00:00 2001 From: Need-an-AwP <113933967+Need-an-AwP@users.noreply.github.com> Date: Sat, 23 Aug 2025 02:51:24 +0800 Subject: [PATCH 0243/1093] fix: invalid memory address or nil pointer dereference (#16922) Signed-off-by: Need-an-AwP <113933967+Need-an-AwP@users.noreply.github.com> --- cmd/tsconnect/wasm/wasm_js.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index ebf7284aa0d43..87f8148668be3 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -110,6 +110,7 @@ func newIPN(jsConfig js.Value) map[string]any { ControlKnobs: sys.ControlKnobs(), HealthTracker: sys.HealthTracker(), Metrics: sys.UserMetricsRegistry(), + EventBus: sys.Bus.Get(), }) if err != nil { log.Fatal(err) From 86a5292c03bce774b5ffedaccb768b2d5ff9f0a4 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 22 Aug 2025 15:11:51 -0700 Subject: [PATCH 0244/1093] ipn/localapi: make tailscale debug derp STUNOnly-aware (#16927) Fixes #16926 Signed-off-by: Jordan Whited --- ipn/localapi/debugderp.go | 92 ++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 44 deletions(-) diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 6636fd2535e4f..017b906922835 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -228,55 +228,59 @@ func (h *Handler) serveDebugDERPRegion(w http.ResponseWriter, r *http.Request) { // Start by checking whether we can establish a HTTP connection for _, derpNode := range reg.Nodes { - connSuccess := checkConn(derpNode) + if !derpNode.STUNOnly { + connSuccess := checkConn(derpNode) - // Verify that the /generate_204 endpoint works - captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) - req, err := http.NewRequest("GET", captivePortalURL, nil) - if err != nil { - st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) - continue - } - req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") - resp, err := client.Do(req) - if err != nil { - st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) - } else { - resp.Body.Close() - } + // Verify that the /generate_204 endpoint works + captivePortalURL := fmt.Sprintf("http://%s/generate_204?t=%d", derpNode.HostName, time.Now().Unix()) + req, err := http.NewRequest("GET", captivePortalURL, nil) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Internal error creating request for captive portal check: %v", err)) + continue + } + req.Header.Set("Cache-Control", "no-cache, no-store, must-revalidate, no-transform, max-age=0") + resp, err := client.Do(req) + if err != nil { + st.Warnings = append(st.Warnings, fmt.Sprintf("Error making request to the captive portal check %q; is port 80 blocked?", captivePortalURL)) + } else { + resp.Body.Close() + } - if !connSuccess { - continue - } + if !connSuccess { + continue + } - fakePrivKey := key.NewNode() - - // Next, repeatedly get the server key to see if the node is - // behind a load balancer (incorrectly). - serverPubKeys := make(map[key.NodePublic]bool) - for i := range 5 { - func() { - rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion { - return &tailcfg.DERPRegion{ - RegionID: reg.RegionID, - RegionCode: reg.RegionCode, - RegionName: reg.RegionName, - Nodes: []*tailcfg.DERPNode{derpNode}, + fakePrivKey := key.NewNode() + + // Next, repeatedly get the server key to see if the node is + // behind a load balancer (incorrectly). + serverPubKeys := make(map[key.NodePublic]bool) + for i := range 5 { + func() { + rc := derphttp.NewRegionClient(fakePrivKey, h.logf, h.b.NetMon(), func() *tailcfg.DERPRegion { + return &tailcfg.DERPRegion{ + RegionID: reg.RegionID, + RegionCode: reg.RegionCode, + RegionName: reg.RegionName, + Nodes: []*tailcfg.DERPNode{derpNode}, + } + }) + if err := rc.Connect(ctx); err != nil { + st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err)) + return } - }) - if err := rc.Connect(ctx); err != nil { - st.Errors = append(st.Errors, fmt.Sprintf("Error connecting to node %q @ try %d: %v", derpNode.HostName, i, err)) - return - } - if len(serverPubKeys) == 0 { - st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName)) - } - serverPubKeys[rc.ServerPublicKey()] = true - }() - } - if len(serverPubKeys) > 1 { - st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys))) + if len(serverPubKeys) == 0 { + st.Info = append(st.Info, fmt.Sprintf("Successfully established a DERP connection with node %q", derpNode.HostName)) + } + serverPubKeys[rc.ServerPublicKey()] = true + }() + } + if len(serverPubKeys) > 1 { + st.Errors = append(st.Errors, fmt.Sprintf("Received multiple server public keys (%d); is the DERP server behind a load balancer?", len(serverPubKeys))) + } + } else { + st.Info = append(st.Info, fmt.Sprintf("Node %q is marked STUNOnly; skipped non-STUN checks", derpNode.HostName)) } // Send a STUN query to this node to verify whether or not it From fa0e83ab4f890120f957b34c82f3ff32fb979664 Mon Sep 17 00:00:00 2001 From: Mohammed Naser Date: Sat, 23 Aug 2025 05:00:09 -0400 Subject: [PATCH 0245/1093] tsnet: add Server.AdvertiseTags option (#15840) Updates #8531 Change-Id: I9b6653872c66929e692bd592ef3f438430c657b5 Signed-off-by: Valentin Alekseev Co-authored-by: Valentin Alekseev --- tsnet/tsnet.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 06709bf8b017d..8f2f7bdcd684f 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -124,6 +124,13 @@ type Server struct { // field at zero unless you know what you are doing. Port uint16 + // AdvertiseTags specifies groups that this embedded server wants to join, for + // purposes of ACL enforcement. These can be referenced from the ACL + // security policy. Note that advertising a tag doesn't guarantee that + // the control server will allow you to take on the rights for that + // tag. + AdvertiseTags []string + getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) initOnce sync.Once @@ -662,6 +669,7 @@ func (s *Server) start() (reterr error) { prefs.WantRunning = true prefs.ControlURL = s.ControlURL prefs.RunWebClient = s.RunWebClient + prefs.AdvertiseTags = s.AdvertiseTags authKey := s.getAuthKey() err = lb.Start(ipn.Options{ UpdatePrefs: prefs, From 6c8fef961eab77a51e2b30dcce0f84d7478892b2 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Sat, 23 Aug 2025 02:07:22 -0700 Subject: [PATCH 0246/1093] ipn/ipnlocal: replace the LockedOnEntry pattern with conventional lock/unlock discipline (#16925) There are several methods within the LocalBackend that used an unusual and error-prone lock discipline whereby they require the caller to hold the backend mutex on entry, but release it on the way out. In #11650 we added some support code to make this pattern more visible. Now it is time to eliminate the pattern (at least within this package). This is intended to produce no semantic changes, though I am relying on integration tests and careful inspection to achieve that. To the extent possible I preserved the existing control flow. In a few places, however, I replaced this with an unlock/lock closure. This means we will sometimes reacquire a lock only to release it again one frame up the stack, but these operations are not performance sensitive and the legibility gain seems worthwhile. We can probably also pull some of these out into separate methods, but I did not do that here so as to avoid other variable scope changes that might be hard to see. I would like to do some more cleanup separately. As a follow-up, we could also remove the unlockOnce helper, but I did not do that here either. Updates #11649 Change-Id: I4c92d4536eca629cfcd6187528381c33f4d64e20 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 316 +++++++++++++++++++------------------ ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/profiles.go | 15 +- 3 files changed, 173 insertions(+), 160 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5fb3d5771b4cb..5e6724701847c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -806,7 +806,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { + if err := b.setConfigLocked(conf); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -863,10 +863,9 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLockedOnEntry uses the provided config to update the backend's prefs +// setConfigLocked uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { - defer unlock() +func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -874,8 +873,7 @@ func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlo } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLockedOnEntry(p, unlock) - + b.setPrefsLocked(p) b.conf = conf return nil } @@ -1959,12 +1957,12 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { unlock := b.lockAndGetUnlock() + defer unlock() prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { - unlock.UnlockEarly() return prefs.View(), false } - return b.setPrefsLockedOnEntry(prefs, unlock), true + return b.setPrefsLocked(prefs), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects @@ -2492,8 +2490,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLockedOnEntry(unlock) - + b.stateMachineLocked() return nil } @@ -3512,14 +3509,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry( + _, err := b.editPrefsLocked( ipnauth.Self, &ipn.MaskedPrefs{ Prefs: *prefsClone, AutoUpdateSet: ipn.AutoUpdatePrefsMask{ ApplySet: true, }, - }, unlock) + }) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -3979,7 +3976,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLockedOnEntry(reason, unlock) + b.switchToBestProfileLocked(reason) } // SwitchToBestProfile selects the best profile to use, @@ -3989,13 +3986,14 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) + unlock := b.lockAndGetUnlock() + defer unlock() + b.switchToBestProfileLocked(reason) } -// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], -// but b.mu must held on entry. It is released on exit. -func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { - defer unlock() +// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], but +// the caller must hold b.mu. +func (b *LocalBackend) switchToBestProfileLocked(reason string) { oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) @@ -4026,7 +4024,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { + if err := b.resetForProfileChangeLocked(); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -4304,7 +4302,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLockedOnEntry(actor, mp, unlock) + return b.editPrefsLocked(actor, mp) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4333,7 +4331,9 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) + unlock := b.lockAndGetUnlock() + defer unlock() + return b.editPrefsLocked(actor, mp) } // checkEditPrefsAccessLocked checks whether the current user has access @@ -4540,7 +4540,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { + if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4569,11 +4569,8 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry, but it unlocks it on the way out. -// TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { - defer unlock() // for error paths - +// Warning: b.mu must be held on entry. +func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -4610,12 +4607,10 @@ func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.Maske // before the modified prefs are actually set for the current profile. b.onEditPrefsLocked(actor, mp, p0, p1.View()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) - - // Note: don't perform any actions for the new prefs here. Not - // every prefs change goes through EditPrefs. Put your actions - // in setPrefsLocksOnEntry instead. + newPrefs := b.setPrefsLocked(p1) + // Note: don't perform any actions for the new prefs here. Not every prefs + // change goes through EditPrefs. Put your actions in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil } @@ -4663,12 +4658,9 @@ func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() } -// setPrefsLockedOnEntry requires b.mu be held to call it, but it -// unlocks b.mu when done. newp ownership passes to this function. -// It returns a read-only copy of the new prefs. -func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { - defer unlock() - +// setPrefsLocked requires b.mu be held to call it. It returns a read-only +// copy of the new prefs. +func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4737,28 +4729,33 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.stopOfflineAutoUpdate() } - unlock.UnlockEarly() + // Update status that needs to happen outside the lock, but reacquire it + // before returning (including in case of panics). + func() { + b.mu.Unlock() + defer b.mu.Lock() - if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() - } + if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { + b.doSetHostinfoFilterServices() + } - if netMap != nil { - b.MagicConn().SetDERPMap(netMap.DERPMap) - } + if netMap != nil { + b.MagicConn().SetDERPMap(netMap.DERPMap) + } - if !oldp.WantRunning() && newp.WantRunning && cc != nil { - b.logf("transitioning to running; doing Login...") - cc.Login(controlclient.LoginDefault) - } + if !oldp.WantRunning() && newp.WantRunning && cc != nil { + b.logf("transitioning to running; doing Login...") + cc.Login(controlclient.LoginDefault) + } - if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() - } else { - b.authReconfig() - } + if oldp.WantRunning() != newp.WantRunning { + b.stateMachine() + } else { + b.authReconfig() + } - b.send(ipn.Notify{Prefs: &prefs}) + b.send(ipn.Notify{Prefs: &prefs}) + }() return prefs } @@ -5620,12 +5617,12 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // happen". func (b *LocalBackend) enterState(newState ipn.State) { unlock := b.lockAndGetUnlock() - b.enterStateLockedOnEntry(newState, unlock) + defer unlock() + b.enterStateLocked(newState) } -// enterStateLockedOnEntry is like enterState but requires b.mu be held to call -// it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { +// enterStateLocked is like enterState but requires the caller to hold b.mu. +func (b *LocalBackend) enterStateLocked(newState ipn.State) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5674,51 +5671,56 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock b.maybeStartOfflineAutoUpdate(prefs) } - unlock.UnlockEarly() - - // prefs may change irrespective of state; WantRunning should be explicitly - // set before potential early return even if the state is unchanged. - b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) - if oldState == newState { - return - } - b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", - oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + // Resolve the state transition outside the lock, but reacquire it before + // returning (including in case of panics). + func() { + b.mu.Unlock() + defer b.mu.Lock() - switch newState { - case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } - b.blockEngineUpdates(true) - fallthrough - case ipn.Stopped, ipn.NoState: - // Unconfigure the engine if it has stopped (WantRunning is set to false) - // or if we've switched to a different profile and the state is unknown. - err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - if err != nil { - b.logf("Reconfig(down): %v", err) + // prefs may change irrespective of state; WantRunning should be explicitly + // set before potential early return even if the state is unchanged. + b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) + if oldState == newState { + return } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", + oldState, newState, prefs.WantRunning(), netMap != nil) + b.send(ipn.Notify{State: &newState}) + + switch newState { + case ipn.NeedsLogin: + systemd.Status("Needs login: %s", authURL) + if b.seamlessRenewalEnabled() { + break + } + b.blockEngineUpdates(true) + fallthrough + case ipn.Stopped, ipn.NoState: + // Unconfigure the engine if it has stopped (WantRunning is set to false) + // or if we've switched to a different profile and the state is unknown. + err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + b.logf("Reconfig(down): %v", err) + } - if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") + if newState == ipn.Stopped && authURL == "" { + systemd.Status("Stopped; run 'tailscale up' to log in") + } + case ipn.Starting, ipn.NeedsMachineAuth: + b.authReconfig() + // Needed so that UpdateEndpoints can run + b.e.RequestStatus() + case ipn.Running: + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + default: + b.logf("[unexpected] unknown newState %#v", newState) } - case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() - // Needed so that UpdateEndpoints can run - b.e.RequestStatus() - case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) - } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) - default: - b.logf("[unexpected] unknown newState %#v", newState) - } + }() } func (b *LocalBackend) hasNodeKeyLocked() bool { @@ -5819,26 +5821,28 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // Or maybe just call the state machine from fewer places. func (b *LocalBackend) stateMachine() { unlock := b.lockAndGetUnlock() - b.stateMachineLockedOnEntry(unlock) + defer unlock() + b.stateMachineLocked() } -// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to -// call it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { - b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) +// stateMachineLocked is like stateMachine but requires b.mu be held. +func (b *LocalBackend) stateMachineLocked() { + b.enterStateLocked(b.nextStateLocked()) } -// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will -// unlock it at most once. +// lockAndGetUnlock locks b.mu and returns a function that will unlock it at +// most once. +// +// TODO(creachadair): This was added as a guardrail against the unfortunate +// "LockedOnEntry" methods that were originally used in this package (primarily +// enterStateLockedOnEntry) that required b.mu held to be locked on entry to +// the function but unlocked the mutex on their way out. // -// This is all very unfortunate but exists as a guardrail against the -// unfortunate "lockedOnEntry" methods in this package (primarily -// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the -// function but unlock the mutex on their way out. As a stepping stone to -// cleaning things up (as of 2024-04-06), we at least pass the unlock func -// around now and defer unlock in the caller to avoid missing unlocks and double -// unlocks. TODO(bradfitz,maisem): make the locking in this package more -// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 +// Now that these have all been updated, we could remove this type and acquire +// and release locks directly. For now, however, I've left it alone to reduce +// the scope of lock-related changes. +// +// See: https://github.com/tailscale/tailscale/issues/11649 func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { b.mu.Lock() var unlocked atomic.Bool @@ -6006,30 +6010,35 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { - unlock := b.lockAndGetUnlock() - defer unlock() + // These values are initialized inside the lock on success. + var cc controlclient.Client + var profile ipn.LoginProfileView - if !b.hasNodeKeyLocked() { - // Already logged out. - return nil - } - cc := b.cc + if err := func() error { + unlock := b.lockAndGetUnlock() + defer unlock() - // Grab the current profile before we unlock the mutex, so that we can - // delete it later. - profile := b.pm.CurrentProfile() - - _, err := b.editPrefsLockedOnEntry( - actor, - &ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) - if err != nil { + if !b.hasNodeKeyLocked() { + // Already logged out. + return nil + } + cc = b.cc + + // Grab the current profile before we unlock the mutex, so that we can + // delete it later. + profile = b.pm.CurrentProfile() + + _, err := b.editPrefsLocked( + actor, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }) + return err + }(); err != nil { return err } - // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -6049,14 +6058,14 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - unlock = b.lockAndGetUnlock() + unlock := b.lockAndGetUnlock() defer unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -7245,7 +7254,7 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { b.resetDialPlan() } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) initTKALocked() error { @@ -7325,12 +7334,10 @@ func (b *LocalBackend) getHardwareAddrs() ([]string, error) { return addrs, nil } -// resetForProfileChangeLockedOnEntry resets the backend for a profile change. +// resetForProfileChangeLocked resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. -func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { - defer unlock() - +func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting @@ -7361,12 +7368,19 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu - b.health.SetLocalLogConfigHealth(nil) - if tkaErr != nil { - return tkaErr - } - return b.Start(ipn.Options{}) + b.enterStateLocked(ipn.NoState) + + // Update health status and start outside the lock. + return func() error { + b.mu.Unlock() + defer b.mu.Lock() + + b.health.SetLocalLogConfigHealth(nil) + if tkaErr != nil { + return tkaErr + } + return b.Start(ipn.Options{}) + }() } // DeleteProfile deletes a profile with the given ID. @@ -7385,7 +7399,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { if !needToRestart { return nil } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // CurrentProfile returns the current LoginProfile. @@ -7407,7 +7421,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // ListProfiles returns a list of all LoginProfiles. @@ -7436,7 +7450,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 49cfc3e071569..60b5b2c5be33d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4300,7 +4300,7 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { } unlock := b.lockAndGetUnlock() defer unlock() - b.setPrefsLockedOnEntry(newp, unlock) + b.setPrefsLocked(newp) } type peerOptFunc func(*tailcfg.Node) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 1d312cfa606b3..7519ee157a029 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -180,7 +180,7 @@ func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn. f(pm.currentProfile, pm.prefs, false) } // Do not call pm.extHost.NotifyProfileChange here; it is invoked in - // [LocalBackend.resetForProfileChangeLockedOnEntry] after the netmap reset. + // [LocalBackend.resetForProfileChangeLocked] after the netmap reset. // TODO(nickkhyl): Consider moving it here (or into the stateChangeCb handler // in [LocalBackend]) once the profile/node state, including the netmap, // is actually tied to the current profile. @@ -359,9 +359,9 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) // where prefsIn is the previous profile's prefs with an updated Persist, LoggedOut, // WantRunning and possibly other fields. This may not be the desired behavior. // - // Additionally, LocalBackend doesn't treat it as a proper profile switch, meaning that - // [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain - // node/profile-specific state may not be reset as expected. + // Additionally, LocalBackend doesn't treat it as a proper profile switch, + // meaning that [LocalBackend.resetForProfileChangeLocked] is not called and + // certain node/profile-specific state may not be reset as expected. // // However, [profileManager] notifies [ipnext.Extension]s about the profile change, // so features migrated from LocalBackend to external packages should not be affected. @@ -494,10 +494,9 @@ func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileVie oldPrefs := pm.prefs pm.prefs = clonedPrefs - // Sadly, profile prefs can be changed in multiple ways. - // It's pretty chaotic, and in many cases callers use - // unexported methods of the profile manager instead of - // going through [LocalBackend.setPrefsLockedOnEntry] + // Sadly, profile prefs can be changed in multiple ways. It's pretty + // chaotic, and in many cases callers use unexported methods of the + // profile manager instead of going through [LocalBackend.setPrefsLocked] // or at least using [profileManager.SetPrefs]. // // While we should definitely clean this up to improve From 16bd60f9caff91549b40e470d04b3fdfc2e90c47 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Sat, 23 Aug 2025 08:07:36 -0700 Subject: [PATCH 0247/1093] ipn,tsnet: update AdvertiseTags documentation (#16931) Instead of referring to groups, which is a term of art for a different entity, update the doc comments to more accurately describe what tags are in reference to the policy document. Updates #cleanup Change-Id: Iefff6f84981985f834bae7c6a6c34044f53f2ea2 Signed-off-by: M. J. Fromberger --- ipn/prefs.go | 9 ++++----- tsnet/tsnet.go | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/ipn/prefs.go b/ipn/prefs.go index 71a80b1828760..2eb0ccf0c61e5 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -158,11 +158,10 @@ type Prefs struct { // connections. This overrides tailcfg.Hostinfo's ShieldsUp. ShieldsUp bool - // AdvertiseTags specifies groups that this node wants to join, for - // purposes of ACL enforcement. These can be referenced from the ACL - // security policy. Note that advertising a tag doesn't guarantee that - // the control server will allow you to take on the rights for that - // tag. + // AdvertiseTags specifies tags that should be applied to this node, for + // purposes of ACL enforcement. These can be referenced from the ACL policy + // document. Note that advertising a tag on the client doesn't guarantee + // that the control server will allow the node to adopt that tag. AdvertiseTags []string // Hostname is the hostname to use for identifying the node. If diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 8f2f7bdcd684f..4cb977c73708e 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -124,11 +124,10 @@ type Server struct { // field at zero unless you know what you are doing. Port uint16 - // AdvertiseTags specifies groups that this embedded server wants to join, for - // purposes of ACL enforcement. These can be referenced from the ACL - // security policy. Note that advertising a tag doesn't guarantee that - // the control server will allow you to take on the rights for that - // tag. + // AdvertiseTags specifies tags that should be applied to this node, for + // purposes of ACL enforcement. These can be referenced from the ACL policy + // document. Note that advertising a tag on the client doesn't guarantee + // that the control server will allow the node to adopt that tag. AdvertiseTags []string getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) From 4236a759f3a1c2ae1e78729d677b2bb33c26232b Mon Sep 17 00:00:00 2001 From: Kot C Date: Sat, 23 Aug 2025 17:52:23 -0700 Subject: [PATCH 0248/1093] cmd/tsidp: Add Docker image to README (#16915) Signed-off-by: Kot C --- cmd/tsidp/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index 780d9ab95b037..ffc296b87862a 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -12,6 +12,10 @@ ## Installation using Docker +### Pre-built image + +A pre-built tsidp image exists at `tailscale/tsidp:unstable`. + ### Building from Source ```bash From fafb51453838ddcac71f0ebee44ff1093168f105 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 25 Aug 2025 08:58:12 -0400 Subject: [PATCH 0249/1093] client/systray: go back to using upstream library (#16938) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We had a fix in a local branch, but upstream has merged it now. Updates #1708 Signed-off-by: Claus Lensbøl --- client/systray/logo.go | 2 +- client/systray/systray.go | 2 +- cmd/tailscale/depaware.txt | 12 ++++++------ flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/client/systray/logo.go b/client/systray/logo.go index d9b0932bc522b..3467d1b741f93 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -15,9 +15,9 @@ import ( "sync" "time" + "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/fogleman/gg" - "github.com/tailscale/systray" ) // tsLogo represents the Tailscale logo displayed as the systray icon. diff --git a/client/systray/systray.go b/client/systray/systray.go index b1bc45fa82100..bd7c1597204ed 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -24,10 +24,10 @@ import ( "syscall" "time" + "fyne.io/systray" ico "github.com/Kodeworks/golang-image-ico" "github.com/atotto/clipboard" dbus "github.com/godbus/dbus/v5" - "github.com/tailscale/systray" "github.com/toqueteos/webbrowser" "tailscale.com/client/local" "tailscale.com/ipn" diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8e28e29332278..020479ebb1867 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -2,6 +2,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 + L fyne.io/systray from tailscale.com/client/systray + L fyne.io/systray/internal/generated/menu from fyne.io/systray + L fyne.io/systray/internal/generated/notifier from fyne.io/systray L github.com/Kodeworks/golang-image-ico from tailscale.com/client/systray W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate @@ -22,9 +25,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - L 💣 github.com/godbus/dbus/v5 from github.com/godbus/dbus/v5/introspect+ - L github.com/godbus/dbus/v5/introspect from github.com/godbus/dbus/v5/prop+ - L github.com/godbus/dbus/v5/prop from github.com/tailscale/systray + L 💣 github.com/godbus/dbus/v5 from fyne.io/systray+ + L github.com/godbus/dbus/v5/introspect from fyne.io/systray+ + L github.com/godbus/dbus/v5/prop from fyne.io/systray L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache @@ -66,9 +69,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - L github.com/tailscale/systray from tailscale.com/client/systray - L github.com/tailscale/systray/internal/generated/menu from github.com/tailscale/systray - L github.com/tailscale/systray/internal/generated/notifier from github.com/tailscale/systray github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ L github.com/vishvananda/netns from github.com/tailscale/netlink+ diff --git a/flake.nix b/flake.nix index 311c422fb0400..4ed2ab3244a90 100644 --- a/flake.nix +++ b/flake.nix @@ -148,5 +148,5 @@ }); }; } -# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= +# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= diff --git a/go.mod b/go.mod index fba5a4f54d3a1..380c325bc3f19 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.24.6 require ( filippo.io/mkcert v1.4.4 + fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 github.com/akutz/memconn v0.1.0 github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa @@ -87,7 +88,6 @@ require ( github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb - github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da diff --git a/go.mod.sri b/go.mod.sri index 34e9a57de84ed..69c69b8db0bb2 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= +sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= diff --git a/go.sum b/go.sum index df5d27313731e..f2544b9acdc27 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,8 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= +fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c h1:km4PIleGtbbF1oxmFQuO93CyNCldwuRTPB8WlzNWNZs= +fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c/go.mod h1:RVwqP9nYMo7h5zViCBHri2FgjXF7H2cub7MAq4NSoLs= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= @@ -990,8 +992,6 @@ github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+y github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= -github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78 h1:H7/LOg6wgQ116wFRVa8tz9KTB8pc6jeNtqS9tyKgeVw= -github.com/tailscale/systray v0.0.0-20250807194015-d0cacc864c78/go.mod h1:1NbyArqaFj+AzkSWl0odw7flO9DsHIYWC4lMkwCKVAo= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= diff --git a/shell.nix b/shell.nix index 9dfdf4935b5af..e0f6e79f1151f 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-Ue1rD4m+rg/J84XhpNZAa3ni/r5FVQZTh6wZcbp7GsE= +# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= From b5f834aef83cfa24bc3d4ed39e2e41f7e2cad944 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 25 Aug 2025 10:16:52 -0600 Subject: [PATCH 0250/1093] cmd/tailscaled: add Dnscache as a service dependency Updates https://github.com/tailscale/corp/issues/30961 Signed-off-by: Aaron Klotz --- cmd/tailscaled/install_windows.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index 3e5036fba6bc8..e98a6461ea57b 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -29,6 +29,7 @@ func init() { // This list must be kept in sync with the TailscaledDependencies preprocessor // variable in the installer. var serviceDependencies = []string{ + "Dnscache", "iphlpsvc", "netprofm", "WinHttpAutoProxySvc", From 9403ba8c69eb4664f0c78a08f049c31ea4906a0f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 25 Aug 2025 09:40:15 -0700 Subject: [PATCH 0251/1093] wgengine/magicsock: trigger peer relay path discovery on CallMeMaybe RX (#16929) Updates tailscale/corp#30333 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 951e59011d32a..aba4242c2db49 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1964,7 +1964,25 @@ func (de *endpoint) handleCallMeMaybe(m *disco.CallMeMaybe) { for _, st := range de.endpointState { st.lastPing = 0 } - de.sendDiscoPingsLocked(mono.Now(), false) + monoNow := mono.Now() + de.sendDiscoPingsLocked(monoNow, false) + + // This hook is required to trigger peer relay path discovery around + // disco "tailscale ping" initiated by de. We may be configured with peer + // relay servers that differ from de. + // + // The only other peer relay path discovery hook is in [endpoint.heartbeat], + // which is kicked off around outbound WireGuard packet flow, or if you are + // the "tailscale ping" initiator. Disco "tailscale ping" does not propagate + // into wireguard-go. + // + // We choose not to hook this around disco ping reception since peer relay + // path discovery can also trigger disco ping transmission, which *could* + // lead to an infinite loop of peer relay path discovery between two peers, + // absent intended triggers. + if de.wantUDPRelayPathDiscoveryLocked(monoNow) { + de.discoverUDPRelayPathsLocked(monoNow) + } } func (de *endpoint) populatePeerStatus(ps *ipnstate.PeerStatus) { From 2fb9472990ea76b30b7ac7c138b856ba9500dfa1 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 25 Aug 2025 10:49:06 -0700 Subject: [PATCH 0252/1093] ipn/ipnlocal: remove unnecessary usees of lockAndGetUnlock In places where we are locking the LocakBackend and immediately deferring an unlock, and where there is no shortcut path in the control flow below the deferral, we do not need the unlockOnce helper. Replace all these with use of the lock directly. Updates #11649 Change-Id: I3e6a7110dfc9ec6c1d38d2585c5367a0d4e76514 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 72 +++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5e6724701847c..a5c4e1f22c689 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -797,8 +797,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.conf == nil { return false, nil } @@ -1956,8 +1956,8 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { return prefs.View(), false @@ -2284,8 +2284,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { clientToShutdown.Shutdown() } }() - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -3486,8 +3486,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3953,8 +3953,8 @@ func (b *LocalBackend) shouldUploadServices() bool { // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -3986,8 +3986,8 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.switchToBestProfileLocked(reason) } @@ -4260,8 +4260,8 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() p0 := b.pm.CurrentPrefs() if v && p0.ExitNodeID() != "" { @@ -4331,8 +4331,8 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() return b.editPrefsLocked(actor, mp) } @@ -4521,8 +4521,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4569,7 +4569,7 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry. +// The caller must hold b.mu. func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { p0 := b.pm.CurrentPrefs() @@ -5616,8 +5616,8 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // really this is more "one of several places in which random things // happen". func (b *LocalBackend) enterState(newState ipn.State) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.enterStateLocked(newState) } @@ -5820,8 +5820,8 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // TODO(apenwarr): use a channel or something to prevent reentrancy? // Or maybe just call the state machine from fewer places. func (b *LocalBackend) stateMachine() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.stateMachineLocked() } @@ -6015,8 +6015,8 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { var profile ipn.LoginProfileView if err := func() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if !b.hasNodeKeyLocked() { // Already logged out. @@ -6058,8 +6058,8 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) @@ -7241,8 +7241,8 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { @@ -7336,7 +7336,7 @@ func (b *LocalBackend) getHardwareAddrs() ([]string, error) { // resetForProfileChangeLocked resets the backend for a profile change. // -// b.mu must held on entry. It is released on exit. +// The caller must hold b.mu. func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for @@ -7386,8 +7386,8 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -7412,8 +7412,8 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.pm.SwitchToNewProfile() @@ -7436,8 +7436,8 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prevCC := b.resetControlClientLocked() if prevCC != nil { From 9002e5fd6b8ede093ad05916db0755834f0ab5c9 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Sat, 23 Aug 2025 08:44:43 -0700 Subject: [PATCH 0253/1093] ipn/ipnlocal: remove an unnecessary unlock shortcut The early unlock on this branch was required because the "send" method goes on to acquire the mutex itself. Rather than release the lock just to acquire it again, call the underlying locked helper directly. Updates #11649 Change-Id: I50d81864a00150fc41460b7486a9c65655f282f5 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a5c4e1f22c689..a1d2df24c8a52 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1501,8 +1501,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } if st.Err != nil { - // The following do not depend on any data for which we need b locked. - unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return @@ -1511,7 +1509,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.send(ipn.Notify{ErrMessage: &s}) + b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) } return } From b411ffb52f1336e5284dd70641ccc654fd2b407f Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 25 Aug 2025 09:16:29 -0700 Subject: [PATCH 0254/1093] ipn/ipnlocal: remove UnlockEarly from doSetHostinfoFilterServices Pull the lock-bearing code into a closure, and use a clone rather than a shallow copy of the hostinfo record. Updates #11649 Change-Id: I4f1d42c42ce45e493b204baae0d50b1cbf82b102 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 46 +++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a1d2df24c8a52..26f0155a1d6bf 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4896,36 +4896,34 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - unlock := b.lockAndGetUnlock() - defer unlock() + // Check the control client, hostinfo, and services under the mutex. + // On return, either both the client and hostinfo are nil, or both are non-nil. + // When non-nil, the Hostinfo is a clone of the value carried by b, safe to modify. + cc, hi, peerAPIServices := func() (controlclient.Client, *tailcfg.Hostinfo, []tailcfg.Service) { + b.mu.Lock() + defer b.mu.Unlock() - cc := b.cc - if cc == nil { - // Control client isn't up yet. - return - } - if b.hostinfo == nil { - b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") + if b.cc == nil { + return nil, nil, nil // control client isn't up yet + } else if b.hostinfo == nil { + b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") + return nil, nil, nil + } + svc := b.peerAPIServicesLocked() + if b.egg { + svc = append(svc, tailcfg.Service{Proto: "egg", Port: 1}) + } + // Make a clone of hostinfo so we can mutate the service field, below. + return b.cc, b.hostinfo.Clone(), svc + }() + if cc == nil || hi == nil { return } - peerAPIServices := b.peerAPIServicesLocked() - if b.egg { - peerAPIServices = append(peerAPIServices, tailcfg.Service{Proto: "egg", Port: 1}) - } - - // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. - hi := *b.hostinfo // shallow copy - unlock.UnlockEarly() - // Make a shallow copy of hostinfo so we can mutate - // at the Service field. if !b.shouldUploadServices() { hi.Services = []tailcfg.Service{} } - // Don't mutate hi.Service's underlying array. Append to - // the slice with no free capacity. - c := len(hi.Services) - hi.Services = append(hi.Services[:c:c], peerAPIServices...) + hi.Services = append(hi.Services, peerAPIServices...) hi.PushDeviceToken = b.pushDeviceToken.Load() // Compare the expected ports from peerAPIServices to the actual ports in hi.Services. @@ -4935,7 +4933,7 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) } - cc.SetHostinfo(&hi) + cc.SetHostinfo(hi) } type portPair struct { From c5429cd49c60b766077e792e805f9e42df607c9e Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 26 Aug 2025 07:44:26 -0700 Subject: [PATCH 0255/1093] go.toolchain.branch: bump to go1.25 (#16954) go.toolchain.rev: bump go1.25 version flake.nix: bump Go to 1.25 Updates #16330 Signed-off-by: Patrick O'Doherty --- .github/workflows/golangci-lint.yml | 2 +- Dockerfile | 2 +- cmd/derper/depaware.txt | 19 +++++++++++++++---- cmd/k8s-operator/depaware.txt | 15 +++++++++++++-- cmd/stund/depaware.txt | 20 +++++++++++++++----- cmd/tailscale/depaware.txt | 15 +++++++++++++-- cmd/tailscaled/depaware.txt | 15 +++++++++++++-- cmd/tsidp/depaware.txt | 15 +++++++++++++-- flake.nix | 14 +++++++------- go.mod | 2 +- go.toolchain.branch | 2 +- go.toolchain.rev | 2 +- tsnet/depaware.txt | 15 +++++++++++++-- 13 files changed, 107 insertions(+), 31 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index ee62f04bed91c..bcf17f8e66243 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -33,7 +33,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 with: - version: v2.0.2 + version: v2.4.0 # Show only new issues if it's a pull request. only-new-issues: true diff --git a/Dockerfile b/Dockerfile index fbc0d1194ffc3..bd0f2840fddc1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,7 +36,7 @@ # $ docker exec tailscaled tailscale status -FROM golang:1.24-alpine AS build-env +FROM golang:1.25-alpine AS build-env WORKDIR /go/src/tailscale diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 20b6bfb6e1e17..6b149e5f54cdf 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -205,13 +205,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http + golang.org/x/net/http/httpguts from net/http+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2/hpack from net/http + golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ @@ -241,6 +241,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -270,6 +272,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -337,21 +340,27 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -373,6 +382,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -395,6 +405,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa sort from compress/flate+ strconv from compress/flate+ strings from bufio+ + W structs from internal/syscall/windows sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ @@ -407,4 +418,4 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 555407421cce5..85bec4a791800 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1022,7 +1022,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator @@ -1059,6 +1059,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -1088,6 +1090,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -1170,22 +1173,28 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -1211,6 +1220,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -1233,6 +1243,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -1245,4 +1256,4 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 8e4db75aebb8e..c8a18eb0752bc 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -98,11 +98,10 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http + golang.org/x/net/http/httpguts from net/http+ golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http + golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - D golang.org/x/net/route from net golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus @@ -126,6 +125,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -155,6 +156,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -221,21 +223,27 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -253,9 +261,10 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from net/http + net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -276,6 +285,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar sort from compress/flate+ strconv from compress/flate+ strings from bufio+ + W structs from internal/syscall/windows sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ @@ -286,4 +296,4 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 020479ebb1867..b121a411f38bd 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -246,7 +246,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli golang.org/x/oauth2/internal from golang.org/x/oauth2+ @@ -280,6 +280,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -309,6 +311,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -385,22 +388,28 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -424,6 +433,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ net/textproto from golang.org/x/net/http/httpguts+ @@ -445,6 +455,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -457,4 +468,4 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index be490a9437ecd..a83c67cca03e5 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -499,7 +499,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/jellydator/ttlcache/v3 golang.org/x/sys/cpu from github.com/tailscale/certstore+ @@ -534,6 +534,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -563,6 +565,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -634,22 +637,28 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -673,6 +682,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from github.com/tailscale/wireguard-go/conn+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -694,6 +704,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -706,4 +717,4 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 577050194e620..a695aa5f362ff 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -452,7 +452,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ golang.org/x/net/proxy from tailscale.com/net/netns - D golang.org/x/net/route from net+ + D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LD golang.org/x/sys/unix from github.com/google/nftables+ @@ -485,6 +485,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -514,6 +516,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -585,22 +588,28 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + D internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LD internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -623,6 +632,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -644,6 +654,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -656,4 +667,4 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ diff --git a/flake.nix b/flake.nix index 4ed2ab3244a90..c739e87203109 100644 --- a/flake.nix +++ b/flake.nix @@ -46,18 +46,18 @@ systems, flake-compat, }: let - go124Version = "1.24.6"; - goHash = "sha256-4ctVgqq1iGaLwEwH3hhogHD2uMmyqvNh+CHhm9R8/b0="; + go125Version = "1.25.0"; + goHash = "sha256-S9AekSlyB7+kUOpA1NWpOxtTGl5DhHOyoG4Y4HciciU="; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { system = system; overlays = [ (final: prev: { - go_1_24 = prev.go_1_24.overrideAttrs { - version = go124Version; + go_1_25 = prev.go_1_25.overrideAttrs { + version = go125Version; src = prev.fetchurl { - url = "https://go.dev/dl/go${go124Version}.src.tar.gz"; + url = "https://go.dev/dl/go${go125Version}.src.tar.gz"; hash = goHash; }; }; @@ -84,7 +84,7 @@ # you're an end user you should be prepared for this flake to not # build periodically. packages = eachSystem (pkgs: rec { - default = pkgs.buildGo124Module { + default = pkgs.buildGo125Module { name = "tailscale"; pname = "tailscale"; src = ./.; @@ -137,7 +137,7 @@ gotools graphviz perl - go_1_24 + go_1_25 yarn # qemu and e2fsprogs are needed for natlab diff --git a/go.mod b/go.mod index 380c325bc3f19..ecd229427416d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.24.6 +go 1.25.0 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.branch b/go.toolchain.branch index 5e1cd0620554a..a2bebbeb7858e 100644 --- a/go.toolchain.branch +++ b/go.toolchain.branch @@ -1 +1 @@ -tailscale.go1.24 +tailscale.go1.25 diff --git a/go.toolchain.rev b/go.toolchain.rev index 6e3bd7ff9e3b6..e3dfee5401e9e 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -54f31cd8fc7b3d7d87c1ea455c8bb4b33372f706 +9a1a6a51164c9c7a23f711052bb8776326cd30cd diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 1e25090fd3d50..67c182430ebf7 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -445,7 +445,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/ipv4 from github.com/miekg/dns+ golang.org/x/net/ipv6 from github.com/miekg/dns+ LDW golang.org/x/net/proxy from tailscale.com/net/netns - DI golang.org/x/net/route from net+ + DI golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LDAI golang.org/x/sys/unix from github.com/google/nftables+ @@ -478,6 +478,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ @@ -507,6 +509,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ crypto/internal/fips140/tls12 from crypto/tls crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ @@ -578,22 +581,28 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ + DI internal/routebsd from net internal/runtime/atomic from internal/runtime/exithook+ + LA internal/runtime/cgroup from runtime internal/runtime/exithook from runtime + internal/runtime/gc from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ LA internal/runtime/syscall from runtime+ W internal/saferio from debug/pe internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ + internal/synctest from sync internal/syscall/execenv from os+ LDAI internal/syscall/unix from crypto/internal/sysrand+ W internal/syscall/windows from crypto/internal/sysrand+ W internal/syscall/windows/registry from mime+ W internal/syscall/windows/sysdll from internal/syscall/windows+ internal/testlog from os + internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ @@ -616,6 +625,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) net/http/httputil from github.com/aws/smithy-go/transport/http+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ @@ -636,6 +646,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) sort from compress/flate+ strconv from archive/tar+ strings from archive/tar+ + W structs from internal/syscall/windows sync from archive/tar+ sync/atomic from context+ syscall from archive/tar+ @@ -648,4 +659,4 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) unicode/utf8 from bufio+ unique from net/netip unsafe from bytes+ - weak from unique + weak from unique+ From 575664b26358533466fa3a881a15b821f6176ae2 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 26 Aug 2025 09:22:36 -0700 Subject: [PATCH 0256/1093] wgengine/magicsock: make endpoint.discoPing peer relay aware (#16946) Updates tailscale/corp#30333 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index aba4242c2db49..37892176b0925 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -994,13 +994,30 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst if derpAddr.IsValid() { de.startDiscoPingLocked(epAddr{ap: derpAddr}, now, pingCLI, size, resCB) } - if udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil) { - // Already have an active session, so just ping the address we're using. - // Otherwise "tailscale ping" results to a node on the local network - // can look like they're bouncing between, say 10.0.0.0/9 and the peer's - // IPv6 address, both 1ms away, and it's random who replies first. + + switch { + case udpAddr.ap.IsValid() && now.Before(de.trustBestAddrUntil): + // We have a "trusted" direct OR peer relay address, ping it. de.startDiscoPingLocked(udpAddr, now, pingCLI, size, resCB) - } else { + if !udpAddr.vni.IsSet() { + // If the path is direct we do not want to fallthrough to pinging + // all candidate direct paths, otherwise "tailscale ping" results to + // a node on the local network can look like they're bouncing + // between, say 10.0.0.0/8 and the peer's IPv6 address, both 1ms + // away, and it's random who replies first. cb() is called with the + // first reply, vs background path discovery that is subject to + // betterAddr() comparison and hysteresis + break + } + // If the trusted path is via a peer relay we want to fallthrough in + // order to also try all candidate direct paths. + fallthrough + default: + // Ping all candidate direct paths. This work overlaps with what + // [de.heartbeat] will periodically fire when it calls + // [de.sendDiscoPingsLocked], but a user-initiated [pingCLI] is a + // "do it now" operation that should not be subject to + // [heartbeatInterval] tick or [discoPingInterval] rate-limiting. for ep := range de.endpointState { de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } From 6542a00ab04d85c1157b6e4b44b33184071b0569 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 27 Aug 2025 09:45:31 -0400 Subject: [PATCH 0257/1093] tailcfg: add mac-ui-v2 node capability (#16940) updates tailscale/corp#29841 Adds a node cap macOS UIs can query to determine whether then should enable the new windowed UI. Signed-off-by: Jonathan Nobels --- tailcfg/tailcfg.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index d2125684d3f37..6383af486f414 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2405,6 +2405,9 @@ const ( CapabilityDebug NodeCapability = "https://tailscale.com/cap/debug" // exposes debug endpoints over the PeerAPI CapabilityHTTPS NodeCapability = "https" + // CapabilityMacUIV2 makes the macOS GUI enable its v2 mode. + CapabilityMacUIV2 NodeCapability = "https://tailscale.com/cap/mac-ui-v2" + // CapabilityBindToInterfaceByRoute changes how Darwin nodes create // sockets (in the net/netns package). See that package for more // details on the behaviour of this capability. From 80f5a00e7604632c302b021013df03e4f6ee5a0b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 27 Aug 2025 09:51:28 -0400 Subject: [PATCH 0258/1093] ipn/local: add the suggested exit node to the ipn bus (#16748) fixes tailscale/corp#26369 The suggested exit node is currently only calculated during a localAPI request. For older UIs, this wasn't a bad choice - we could just fetch it on-demand when a menu presented itself. For newer incarnations however, this is an always-visible field that needs to react to changes in the suggested exit node's value. This change recalculates the suggested exit node ID on netmap updates and broadcasts it on the IPN bus. The localAPI version of this remains intact for the time being. Signed-off-by: Jonathan Nobels --- ipn/backend.go | 24 ++++++++++++------ ipn/ipnlocal/bus.go | 3 ++- ipn/ipnlocal/local.go | 57 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 9 deletions(-) diff --git a/ipn/backend.go b/ipn/backend.go index ab01d2fdef57a..fd4442f7160db 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -83,6 +83,8 @@ const ( NotifyRateLimit NotifyWatchOpt = 1 << 8 // if set, rate limit spammy netmap updates to every few seconds NotifyHealthActions NotifyWatchOpt = 1 << 9 // if set, include PrimaryActions in health.State. Otherwise append the action URL to the text + + NotifyInitialSuggestedExitNode NotifyWatchOpt = 1 << 10 // if set, the first Notify message (sent immediately) will contain the current SuggestedExitNode if available ) // Notify is a communication from a backend (e.g. tailscaled) to a frontend @@ -98,7 +100,7 @@ type Notify struct { // This field is only set in the first message when requesting // NotifyInitialState. Clients must store it on their side as // following notifications will not include this field. - SessionID string `json:",omitempty"` + SessionID string `json:",omitzero"` // ErrMessage, if non-nil, contains a critical error message. // For State InUseOtherUser, ErrMessage is not critical and just contains the details. @@ -116,7 +118,7 @@ type Notify struct { // user's preferred storage location. // // Deprecated: use LocalClient.AwaitWaitingFiles instead. - FilesWaiting *empty.Message `json:",omitempty"` + FilesWaiting *empty.Message `json:",omitzero"` // IncomingFiles, if non-nil, specifies which files are in the // process of being received. A nil IncomingFiles means this @@ -125,22 +127,22 @@ type Notify struct { // of being transferred. // // Deprecated: use LocalClient.AwaitWaitingFiles instead. - IncomingFiles []PartialFile `json:",omitempty"` + IncomingFiles []PartialFile `json:",omitzero"` // OutgoingFiles, if non-nil, tracks which files are in the process of // being sent via TailDrop, including files that finished, whether // successful or failed. This slice is sorted by Started time, then Name. - OutgoingFiles []*OutgoingFile `json:",omitempty"` + OutgoingFiles []*OutgoingFile `json:",omitzero"` // LocalTCPPort, if non-nil, informs the UI frontend which // (non-zero) localhost TCP port it's listening on. // This is currently only used by Tailscale when run in the // macOS Network Extension. - LocalTCPPort *uint16 `json:",omitempty"` + LocalTCPPort *uint16 `json:",omitzero"` // ClientVersion, if non-nil, describes whether a client version update // is available. - ClientVersion *tailcfg.ClientVersion `json:",omitempty"` + ClientVersion *tailcfg.ClientVersion `json:",omitzero"` // DriveShares tracks the full set of current DriveShares that we're // publishing. Some client applications, like the MacOS and Windows clients, @@ -153,7 +155,11 @@ type Notify struct { // Health is the last-known health state of the backend. When this field is // non-nil, a change in health verified, and the API client should surface // any changes to the user in the UI. - Health *health.State `json:",omitempty"` + Health *health.State `json:",omitzero"` + + // SuggestedExitNode, if non-nil, is the node that the backend has determined to + // be the best exit node for the current network conditions. + SuggestedExitNode *tailcfg.StableNodeID `json:",omitzero"` // type is mirrored in xcode/IPN/Core/LocalAPI/Model/LocalAPIModel.swift } @@ -194,6 +200,10 @@ func (n Notify) String() string { if n.Health != nil { sb.WriteString("Health{...} ") } + if n.SuggestedExitNode != nil { + fmt.Fprintf(&sb, "SuggestedExitNode=%v ", *n.SuggestedExitNode) + } + s := sb.String() return s[0:len(s)-1] + "}" } diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go index 111a877d849d8..910e4e774c958 100644 --- a/ipn/ipnlocal/bus.go +++ b/ipn/ipnlocal/bus.go @@ -156,5 +156,6 @@ func isNotableNotify(n *ipn.Notify) bool { n.Health != nil || len(n.IncomingFiles) > 0 || len(n.OutgoingFiles) > 0 || - n.FilesWaiting != nil + n.FilesWaiting != nil || + n.SuggestedExitNode != nil } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 26f0155a1d6bf..43d7e121652cc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1737,6 +1737,10 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.send(ipn.Notify{NetMap: st.NetMap}) + // The error here is unimportant as is the result. This will recalculate the suggested exit node + // cache the value and push any changes to the IPN bus. + b.SuggestExitNode() + // Check and update the exit node if needed, now that we have a new netmap. // // This must happen after the netmap change is sent via [ipn.Notify], @@ -2033,7 +2037,13 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo } } + if cn.NetMap() != nil && mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts, cn, b.lastSuggestedExitNode) { + // Recompute the suggested exit node + b.suggestExitNodeLocked() + } + if cn.NetMap() != nil && mutationsAreWorthyOfTellingIPNBus(muts) { + nm := cn.netMapWithPeers() notify = &ipn.Notify{NetMap: nm} } else if testenv.InTest() { @@ -2045,6 +2055,41 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo return true } +// mustationsAreWorthyOfRecalculatingSuggestedExitNode reports whether any mutation type in muts is +// worthy of recalculating the suggested exit node. +func mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts []netmap.NodeMutation, cn *nodeBackend, sid tailcfg.StableNodeID) bool { + for _, m := range muts { + n, ok := cn.NodeByID(m.NodeIDBeingMutated()) + if !ok { + // The node being mutated is not in the netmap. + continue + } + + // The previously suggested exit node itself is being mutated. + if sid != "" && n.StableID() == sid { + return true + } + + allowed := n.AllowedIPs().AsSlice() + isExitNode := slices.Contains(allowed, tsaddr.AllIPv4()) || slices.Contains(allowed, tsaddr.AllIPv6()) + // The node being mutated is not an exit node. We don't care about it - unless + // it was our previously suggested exit node which we catch above. + if !isExitNode { + continue + } + + // Some exit node is being mutated. We care about it if it's online + // or offline state has changed. We *might* eventually care about it for other reasons + // but for the sake of finding a "better" suggested exit node, this is probably + // sufficient. + switch m.(type) { + case netmap.NodeMutationOnline: + return true + } + } + return false +} + // mutationsAreWorthyOfTellingIPNBus reports whether any mutation type in muts is // worthy of spamming the IPN bus (the Windows & Mac GUIs, basically) to tell them // about the update. @@ -3063,7 +3108,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A b.mu.Lock() - const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares + const initialBits = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap | ipn.NotifyInitialDriveShares | ipn.NotifyInitialSuggestedExitNode if mask&initialBits != 0 { cn := b.currentNode() ini = &ipn.Notify{Version: version.Long()} @@ -3086,6 +3131,11 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A if mask&ipn.NotifyInitialHealthState != 0 { ini.Health = b.HealthTracker().CurrentState() } + if mask&ipn.NotifyInitialSuggestedExitNode != 0 { + if en, err := b.SuggestExitNode(); err != nil { + ini.SuggestedExitNode = &en.ID + } + } } ctx, cancel := context.WithCancel(ctx) @@ -7716,7 +7766,12 @@ func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggest if err != nil { return res, err } + if prevSuggestion != res.ID { + // Notify the clients via the IPN bus if the exit node suggestion has changed. + b.sendToLocked(ipn.Notify{SuggestedExitNode: &res.ID}, allClients) + } b.lastSuggestedExitNode = res.ID + return res, err } From 882b05fff9b67b4a63e9a9d7486c0981b2c73016 Mon Sep 17 00:00:00 2001 From: Maisem Ali <3953239+maisem@users.noreply.github.com> Date: Wed, 27 Aug 2025 00:06:28 -0700 Subject: [PATCH 0259/1093] cmd/viewer: add field comments to generated view methods Extract field comments from AST and include them in generated view methods. Comments are preserved from the original struct fields to provide documentation for the view accessors. Fixes #16958 Signed-off-by: Maisem Ali <3953239+maisem@users.noreply.github.com> --- cmd/viewer/tests/tests_view.go | 35 +- cmd/viewer/viewer.go | 146 ++- cmd/viewer/viewer_test.go | 3 +- drive/drive_view.go | 17 +- ipn/ipn_view.go | 365 ++++++- tailcfg/tailcfg_view.go | 945 +++++++++++++++--- types/dnstype/dnstype_view.go | 25 + types/persist/persist_view.go | 9 +- .../prefs/prefs_example/prefs_example_view.go | 74 +- types/prefs/prefs_view_test.go | 7 + 10 files changed, 1382 insertions(+), 244 deletions(-) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index bc95fea015656..e50a71c9e0220 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -247,47 +247,41 @@ func (v *MapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v MapView) Int() views.Map[string, int] { return views.MapOf(v.ж.Int) } - +func (v MapView) Int() views.Map[string, int] { return views.MapOf(v.ж.Int) } func (v MapView) SliceInt() views.MapSlice[string, int] { return views.MapSliceOf(v.ж.SliceInt) } - func (v MapView) StructPtrWithPtr() views.MapFn[string, *StructWithPtrs, StructWithPtrsView] { return views.MapFnOf(v.ж.StructPtrWithPtr, func(t *StructWithPtrs) StructWithPtrsView { return t.View() }) } - func (v MapView) StructPtrWithoutPtr() views.MapFn[string, *StructWithoutPtrs, StructWithoutPtrsView] { return views.MapFnOf(v.ж.StructPtrWithoutPtr, func(t *StructWithoutPtrs) StructWithoutPtrsView { return t.View() }) } - func (v MapView) StructWithoutPtr() views.Map[string, StructWithoutPtrs] { return views.MapOf(v.ж.StructWithoutPtr) } - func (v MapView) SlicesWithPtrs() views.MapFn[string, []*StructWithPtrs, views.SliceView[*StructWithPtrs, StructWithPtrsView]] { return views.MapFnOf(v.ж.SlicesWithPtrs, func(t []*StructWithPtrs) views.SliceView[*StructWithPtrs, StructWithPtrsView] { return views.SliceOfViews[*StructWithPtrs, StructWithPtrsView](t) }) } - func (v MapView) SlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrs, views.SliceView[*StructWithoutPtrs, StructWithoutPtrsView]] { return views.MapFnOf(v.ж.SlicesWithoutPtrs, func(t []*StructWithoutPtrs) views.SliceView[*StructWithoutPtrs, StructWithoutPtrsView] { return views.SliceOfViews[*StructWithoutPtrs, StructWithoutPtrsView](t) }) } - func (v MapView) StructWithoutPtrKey() views.Map[StructWithoutPtrs, int] { return views.MapOf(v.ж.StructWithoutPtrKey) } - func (v MapView) StructWithPtr() views.MapFn[string, StructWithPtrs, StructWithPtrsView] { return views.MapFnOf(v.ж.StructWithPtr, func(t StructWithPtrs) StructWithPtrsView { return t.View() }) } + +// Unsupported views. func (v MapView) SliceIntPtr() map[string][]*int { panic("unsupported") } func (v MapView) PointerKey() map[*string]int { panic("unsupported") } func (v MapView) StructWithPtrKey() map[StructWithPtrs]int { panic("unsupported") } @@ -389,8 +383,10 @@ func (v StructWithSlicesView) Prefixes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Prefixes) } func (v StructWithSlicesView) Data() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Data) } -func (v StructWithSlicesView) Structs() StructWithPtrs { panic("unsupported") } -func (v StructWithSlicesView) Ints() *int { panic("unsupported") } + +// Unsupported views. +func (v StructWithSlicesView) Structs() StructWithPtrs { panic("unsupported") } +func (v StructWithSlicesView) Ints() *int { panic("unsupported") } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _StructWithSlicesViewNeedsRegeneration = StructWithSlices(struct { @@ -554,9 +550,10 @@ func (v GenericIntStructView[T]) Pointer() views.ValuePointer[T] { return views.ValuePointerOf(v.ж.Pointer) } -func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericIntStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericIntStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } -func (v GenericIntStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } +// Unsupported views. func (v GenericIntStructView[T]) PtrSlice() *T { panic("unsupported") } func (v GenericIntStructView[T]) PtrKeyMap() map[*T]string { panic("unsupported") } func (v GenericIntStructView[T]) PtrValueMap() map[string]*T { panic("unsupported") } @@ -648,9 +645,10 @@ func (v GenericNoPtrsStructView[T]) Pointer() views.ValuePointer[T] { return views.ValuePointerOf(v.ж.Pointer) } -func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericNoPtrsStructView[T]) Slice() views.Slice[T] { return views.SliceOf(v.ж.Slice) } +func (v GenericNoPtrsStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } -func (v GenericNoPtrsStructView[T]) Map() views.Map[string, T] { return views.MapOf(v.ж.Map) } +// Unsupported views. func (v GenericNoPtrsStructView[T]) PtrSlice() *T { panic("unsupported") } func (v GenericNoPtrsStructView[T]) PtrKeyMap() map[*T]string { panic("unsupported") } func (v GenericNoPtrsStructView[T]) PtrValueMap() map[string]*T { panic("unsupported") } @@ -741,12 +739,13 @@ func (v GenericCloneableStructView[T, V]) Value() V { return v.ж.Value.View() } func (v GenericCloneableStructView[T, V]) Slice() views.SliceView[T, V] { return views.SliceOfViews[T, V](v.ж.Slice) } - func (v GenericCloneableStructView[T, V]) Map() views.MapFn[string, T, V] { return views.MapFnOf(v.ж.Map, func(t T) V { return t.View() }) } + +// Unsupported views. func (v GenericCloneableStructView[T, V]) Pointer() map[string]T { panic("unsupported") } func (v GenericCloneableStructView[T, V]) PtrSlice() *T { panic("unsupported") } func (v GenericCloneableStructView[T, V]) PtrKeyMap() map[*T]string { panic("unsupported") } @@ -942,25 +941,21 @@ func (v StructWithTypeAliasFieldsView) SliceWithPtrs() views.SliceView[*StructWi func (v StructWithTypeAliasFieldsView) SliceWithoutPtrs() views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](v.ж.SliceWithoutPtrs) } - func (v StructWithTypeAliasFieldsView) MapWithPtrs() views.MapFn[string, *StructWithPtrsAlias, StructWithPtrsAliasView] { return views.MapFnOf(v.ж.MapWithPtrs, func(t *StructWithPtrsAlias) StructWithPtrsAliasView { return t.View() }) } - func (v StructWithTypeAliasFieldsView) MapWithoutPtrs() views.MapFn[string, *StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.MapFnOf(v.ж.MapWithoutPtrs, func(t *StructWithoutPtrsAlias) StructWithoutPtrsAliasView { return t.View() }) } - func (v StructWithTypeAliasFieldsView) MapOfSlicesWithPtrs() views.MapFn[string, []*StructWithPtrsAlias, views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView]] { return views.MapFnOf(v.ж.MapOfSlicesWithPtrs, func(t []*StructWithPtrsAlias) views.SliceView[*StructWithPtrsAlias, StructWithPtrsAliasView] { return views.SliceOfViews[*StructWithPtrsAlias, StructWithPtrsAliasView](t) }) } - func (v StructWithTypeAliasFieldsView) MapOfSlicesWithoutPtrs() views.MapFn[string, []*StructWithoutPtrsAlias, views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView]] { return views.MapFnOf(v.ж.MapOfSlicesWithoutPtrs, func(t []*StructWithoutPtrsAlias) views.SliceView[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView] { return views.SliceOfViews[*StructWithoutPtrsAlias, StructWithoutPtrsAliasView](t) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index a9617ac1064e6..4fd81ea510d40 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -9,6 +9,8 @@ import ( "bytes" "flag" "fmt" + "go/ast" + "go/token" "go/types" "html/template" "log" @@ -17,6 +19,7 @@ import ( "strings" "tailscale.com/util/codegen" + "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -104,16 +107,13 @@ func (v *{{.ViewName}}{{.TypeParamNames}}) UnmarshalJSONFrom(dec *jsontext.Decod {{define "valuePointerField"}}func (v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.ValuePointer[{{.FieldType}}] { return views.ValuePointerOf(v.ж.{{.FieldName}}) } {{end}} -{{define "mapField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.Map[{{.MapKeyType}},{{.MapValueType}}] { return views.MapOf(v.ж.{{.FieldName}})} +{{define "mapField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.Map[{{.MapKeyType}},{{.MapValueType}}] { return views.MapOf(v.ж.{{.FieldName}})} {{end}} -{{define "mapFnField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapFn[{{.MapKeyType}},{{.MapValueType}},{{.MapValueView}}] { return views.MapFnOf(v.ж.{{.FieldName}}, func (t {{.MapValueType}}) {{.MapValueView}} { +{{define "mapFnField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapFn[{{.MapKeyType}},{{.MapValueType}},{{.MapValueView}}] { return views.MapFnOf(v.ж.{{.FieldName}}, func (t {{.MapValueType}}) {{.MapValueView}} { return {{.MapFn}} })} {{end}} -{{define "mapSliceField"}} -func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapSlice[{{.MapKeyType}},{{.MapValueType}}] { return views.MapSliceOf(v.ж.{{.FieldName}}) } +{{define "mapSliceField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() views.MapSlice[{{.MapKeyType}},{{.MapValueType}}] { return views.MapSliceOf(v.ж.{{.FieldName}}) } {{end}} {{define "unsupportedField"}}func(v {{.ViewName}}{{.TypeParamNames}}) {{.FieldName}}() {{.FieldType}} {panic("unsupported")} {{end}} @@ -142,7 +142,81 @@ func requiresCloning(t types.Type) (shallow, deep bool, base types.Type) { return p, p, t } -func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ *types.Package) { +type fieldNameKey struct { + typeName string + fieldName string +} + +// getFieldComments extracts field comments from the AST for a given struct type. +func getFieldComments(syntax []*ast.File) map[fieldNameKey]string { + if len(syntax) == 0 { + return nil + } + var fieldComments map[fieldNameKey]string + + // Search through all AST files in the package + for _, file := range syntax { + // Look for the type declaration + for _, decl := range file.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.TYPE { + continue + } + + for _, spec := range genDecl.Specs { + typeSpec, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + typeName := typeSpec.Name.Name + + // Check if it's a struct type + structType, ok := typeSpec.Type.(*ast.StructType) + if !ok { + continue + } + + // Extract field comments + for _, field := range structType.Fields.List { + if len(field.Names) == 0 { + // Anonymous field or no names + continue + } + + // Get the field name + fieldName := field.Names[0].Name + key := fieldNameKey{typeName, fieldName} + + // Get the comment + var comment string + if field.Doc != nil && field.Doc.Text() != "" { + // Format the comment for Go code generation + comment = strings.TrimSpace(field.Doc.Text()) + // Convert multi-line comments to proper Go comment format + var sb strings.Builder + for line := range strings.Lines(comment) { + sb.WriteString("// ") + sb.WriteString(line) + } + if sb.Len() > 0 { + comment = sb.String() + } + } else if field.Comment != nil && field.Comment.Text() != "" { + // Handle inline comments + comment = "// " + strings.TrimSpace(field.Comment.Text()) + } + if comment != "" { + mak.Set(&fieldComments, key, comment) + } + } + } + } + } + + return fieldComments +} + +func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, fieldComments map[fieldNameKey]string) { t, ok := typ.Underlying().(*types.Struct) if !ok || codegen.IsViewType(t) { return @@ -182,6 +256,15 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * log.Fatal(err) } } + writeTemplateWithComment := func(name, fieldName string) { + // Write the field comment if it exists + key := fieldNameKey{args.StructName, fieldName} + if comment, ok := fieldComments[key]; ok && comment != "" { + fmt.Fprintln(buf, comment) + } + writeTemplate(name) + } + writeTemplate("common") for i := range t.NumFields() { f := t.Field(i) @@ -196,7 +279,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * } if !codegen.ContainsPointers(fieldType) || codegen.IsViewType(fieldType) || codegen.HasNoClone(t.Tag(i)) { args.FieldType = it.QualifiedName(fieldType) - writeTemplate("valueField") + writeTemplateWithComment("valueField", fname) continue } switch underlying := fieldType.Underlying().(type) { @@ -207,7 +290,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * case "byte": args.FieldType = it.QualifiedName(fieldType) it.Import("", "tailscale.com/types/views") - writeTemplate("byteSliceField") + writeTemplateWithComment("byteSliceField", fname) default: args.FieldType = it.QualifiedName(elem) it.Import("", "tailscale.com/types/views") @@ -217,35 +300,35 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * case *types.Pointer: if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldViewName = appendNameSuffix(it.QualifiedName(base), "View") - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } continue case *types.Interface: if viewType := viewTypeForValueType(elem); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) continue } } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } else if shallow { switch base.Underlying().(type) { case *types.Basic, *types.Interface: - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) default: if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldViewName = appendNameSuffix(it.QualifiedName(base), "View") - writeTemplate("viewSliceField") + writeTemplateWithComment("viewSliceField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } } continue } - writeTemplate("sliceField") + writeTemplateWithComment("sliceField", fname) } continue case *types.Struct: @@ -254,26 +337,26 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if codegen.ContainsPointers(strucT) { if viewType := viewTypeForValueType(fieldType); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } if viewType, makeViewFn := viewTypeForContainerType(fieldType); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) args.MakeViewFnName = it.PackagePrefix(makeViewFn.Pkg()) + makeViewFn.Name() - writeTemplate("makeViewField") + writeTemplateWithComment("makeViewField", fname) continue } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } - writeTemplate("valueField") + writeTemplateWithComment("valueField", fname) continue case *types.Map: m := underlying args.FieldType = it.QualifiedName(fieldType) shallow, deep, key := requiresCloning(m.Key()) if shallow || deep { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) continue } it.Import("", "tailscale.com/types/views") @@ -358,7 +441,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * default: template = "unsupportedField" } - writeTemplate(template) + writeTemplateWithComment(template, fname) continue case *types.Pointer: ptr := underlying @@ -368,9 +451,9 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if _, isIface := base.Underlying().(*types.Interface); !isIface { args.FieldType = it.QualifiedName(base) args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) } else { - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } continue } @@ -379,7 +462,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * if viewType := viewTypeForValueType(base); viewType != nil { args.FieldType = it.QualifiedName(base) args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } @@ -389,7 +472,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * baseTypeName := it.QualifiedName(base) args.FieldType = baseTypeName args.FieldViewName = appendNameSuffix(args.FieldType, "View") - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } @@ -397,18 +480,18 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, _ * // and will not have a generated view type, use views.ValuePointer[T] as the field's view type. // Its Get/GetOk methods return stack-allocated shallow copies of the field's value. args.FieldType = it.QualifiedName(base) - writeTemplate("valuePointerField") + writeTemplateWithComment("valuePointerField", fname) continue case *types.Interface: // If fieldType is an interface with a "View() {ViewType}" method, it can be used to clone the field. // This includes scenarios where fieldType is a constrained type parameter. if viewType := viewTypeForValueType(underlying); viewType != nil { args.FieldViewName = it.QualifiedName(viewType) - writeTemplate("viewField") + writeTemplateWithComment("viewField", fname) continue } } - writeTemplate("unsupportedField") + writeTemplateWithComment("unsupportedField", fname) } for i := range typ.NumMethods() { f := typ.Method(i) @@ -627,6 +710,7 @@ func main() { log.Fatal(err) } it := codegen.NewImportTracker(pkg.Types) + fieldComments := getFieldComments(pkg.Syntax) cloneOnlyType := map[string]bool{} for _, t := range strings.Split(*flagCloneOnlyTypes, ",") { @@ -654,7 +738,7 @@ func main() { if !hasClone { runCloner = true } - genView(buf, it, typ, pkg.Types) + genView(buf, it, typ, fieldComments) } out := pkg.Name + "_view" if *flagBuildTags == "test" { diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go index d12d496551327..1e24b705069d7 100644 --- a/cmd/viewer/viewer_test.go +++ b/cmd/viewer/viewer_test.go @@ -53,6 +53,7 @@ func TestViewerImports(t *testing.T) { if err != nil { t.Fatal(err) } + var fieldComments map[fieldNameKey]string // don't need it for this test. var output bytes.Buffer tracker := codegen.NewImportTracker(pkg) @@ -65,7 +66,7 @@ func TestViewerImports(t *testing.T) { if !ok { t.Fatalf("%q is not a named type", tt.typeNames[i]) } - genView(&output, tracker, namedType, pkg) + genView(&output, tracker, namedType, fieldComments) } for _, pkg := range tt.wantImports { diff --git a/drive/drive_view.go b/drive/drive_view.go index 6338705a6f469..b481751bb3bff 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -83,9 +83,24 @@ func (v *ShareView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Name is how this share appears on remote nodes. func (v ShareView) Name() string { return v.ж.Name } + +// Path is the path to the directory on this machine that's being shared. func (v ShareView) Path() string { return v.ж.Path } -func (v ShareView) As() string { return v.ж.As } + +// As is the UNIX or Windows username of the local account used for this +// share. File read/write permissions are enforced based on this username. +// Can be left blank to use the default value of "whoever is running the +// Tailscale GUI". +func (v ShareView) As() string { return v.ж.As } + +// BookmarkData contains security-scoped bookmark data for the Sandboxed +// Mac application. The Sandboxed Mac application gains permission to +// access the Share's folder as a result of a user selecting it in a file +// picker. In order to retain access to it across restarts, it needs to +// hold on to a security-scoped bookmark. That bookmark is stored here. See +// https://developer.apple.com/documentation/security/app_sandbox/accessing_files_from_the_macos_app_sandbox#4144043 func (v ShareView) BookmarkData() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.BookmarkData) } diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 0f0f652d11922..170dc409b2095 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -89,14 +89,47 @@ func (v *LoginProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v LoginProfileView) ID() ProfileID { return v.ж.ID } -func (v LoginProfileView) Name() string { return v.ж.Name } -func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile } -func (v LoginProfileView) Key() StateKey { return v.ж.Key } +// ID is a unique identifier for this profile. +// It is assigned on creation and never changes. +// It may seem redundant to have both ID and UserProfile.ID +// but they are different things. UserProfile.ID may change +// over time (e.g. if a device is tagged). +func (v LoginProfileView) ID() ProfileID { return v.ж.ID } + +// Name is the user-visible name of this profile. +// It is filled in from the UserProfile.LoginName field. +func (v LoginProfileView) Name() string { return v.ж.Name } + +// NetworkProfile is a subset of netmap.NetworkMap that we +// store to remember information about the tailnet that this +// profile was logged in with. +// +// This field was added on 2023-11-17. +func (v LoginProfileView) NetworkProfile() NetworkProfile { return v.ж.NetworkProfile } + +// Key is the StateKey under which the profile is stored. +// It is assigned once at profile creation time and never changes. +func (v LoginProfileView) Key() StateKey { return v.ж.Key } + +// UserProfile is the server provided UserProfile for this profile. +// This is updated whenever the server provides a new UserProfile. func (v LoginProfileView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } -func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID } -func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL } + +// NodeID is the NodeID of the node that this profile is logged into. +// This should be stable across tagging and untagging nodes. +// It may seem redundant to check against both the UserProfile.UserID +// and the NodeID. However the NodeID can change if the node is deleted +// from the admin panel. +func (v LoginProfileView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } + +// LocalUserID is the user ID of the user who created this profile. +// It is only relevant on Windows where we have a multi-user system. +// It is assigned once at profile creation time and never changes. +func (v LoginProfileView) LocalUserID() WindowsUserID { return v.ж.LocalUserID } + +// ControlURL is the URL of the control server that this profile is logged +// into. +func (v LoginProfileView) ControlURL() string { return v.ж.ControlURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _LoginProfileViewNeedsRegeneration = LoginProfile(struct { @@ -177,48 +210,253 @@ func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v PrefsView) ControlURL() string { return v.ж.ControlURL } -func (v PrefsView) RouteAll() bool { return v.ж.RouteAll } -func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID } -func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP } -func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode } +// ControlURL is the URL of the control server to use. +// +// If empty, the default for new installs, DefaultControlURL +// is used. It's set non-empty once the daemon has been started +// for the first time. +// +// TODO(apenwarr): Make it safe to update this with EditPrefs(). +// Right now, you have to pass it in the initial prefs in Start(), +// which is the only code that actually uses the ControlURL value. +// It would be more consistent to restart controlclient +// automatically whenever this variable changes. +// +// Meanwhile, you have to provide this as part of +// Options.LegacyMigrationPrefs or Options.UpdatePrefs when +// calling Backend.Start(). +func (v PrefsView) ControlURL() string { return v.ж.ControlURL } + +// RouteAll specifies whether to accept subnets advertised by +// other nodes on the Tailscale network. Note that this does not +// include default routes (0.0.0.0/0 and ::/0), those are +// controlled by ExitNodeID/IP below. +func (v PrefsView) RouteAll() bool { return v.ж.RouteAll } + +// ExitNodeID and ExitNodeIP specify the node that should be used +// as an exit node for internet traffic. At most one of these +// should be non-zero. +// +// The preferred way to express the chosen node is ExitNodeID, but +// in some cases it's not possible to use that ID (e.g. in the +// linux CLI, before tailscaled has a netmap). For those +// situations, we allow specifying the exit node by IP, and +// ipnlocal.LocalBackend will translate the IP into an ID when the +// node is found in the netmap. +// +// If the selected exit node doesn't exist (e.g. it's not part of +// the current tailnet), or it doesn't offer exit node services, a +// blackhole route will be installed on the local system to +// prevent any traffic escaping to the local network. +func (v PrefsView) ExitNodeID() tailcfg.StableNodeID { return v.ж.ExitNodeID } +func (v PrefsView) ExitNodeIP() netip.Addr { return v.ж.ExitNodeIP } + +// AutoExitNode is an optional expression that specifies whether and how +// tailscaled should pick an exit node automatically. +// +// If specified, tailscaled will use an exit node based on the expression, +// and will re-evaluate the selection periodically as network conditions, +// available exit nodes, or policy settings change. A blackhole route will +// be installed to prevent traffic from escaping to the local network until +// an exit node is selected. It takes precedence over ExitNodeID and ExitNodeIP. +// +// If empty, tailscaled will not automatically select an exit node. +// +// If the specified expression is invalid or unsupported by the client, +// it falls back to the behavior of [AnyExitNode]. +// +// As of 2025-07-02, the only supported value is [AnyExitNode]. +// It's a string rather than a boolean to allow future extensibility +// (e.g., AutoExitNode = "mullvad" or AutoExitNode = "geo:us"). +func (v PrefsView) AutoExitNode() ExitNodeExpression { return v.ж.AutoExitNode } + +// InternalExitNodePrior is the most recently used ExitNodeID in string form. It is set by +// the backend on transition from exit node on to off and used by the +// backend. +// +// As an Internal field, it can't be set by LocalAPI clients, rather it is set indirectly +// when the ExitNodeID value is zero'd and via the set-use-exit-node-enabled endpoint. func (v PrefsView) InternalExitNodePrior() tailcfg.StableNodeID { return v.ж.InternalExitNodePrior } -func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess } -func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS } -func (v PrefsView) RunSSH() bool { return v.ж.RunSSH } -func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient } -func (v PrefsView) WantRunning() bool { return v.ж.WantRunning } -func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut } -func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp } -func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) } -func (v PrefsView) Hostname() string { return v.ж.Hostname } -func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs } -func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon } -func (v PrefsView) Egg() bool { return v.ж.Egg } + +// ExitNodeAllowLANAccess indicates whether locally accessible subnets should be +// routed directly or via the exit node. +func (v PrefsView) ExitNodeAllowLANAccess() bool { return v.ж.ExitNodeAllowLANAccess } + +// CorpDNS specifies whether to install the Tailscale network's +// DNS configuration, if it exists. +func (v PrefsView) CorpDNS() bool { return v.ж.CorpDNS } + +// RunSSH bool is whether this node should run an SSH +// server, permitting access to peers according to the +// policies as configured by the Tailnet's admin(s). +func (v PrefsView) RunSSH() bool { return v.ж.RunSSH } + +// RunWebClient bool is whether this node should expose +// its web client over Tailscale at port 5252, +// permitting access to peers according to the +// policies as configured by the Tailnet's admin(s). +func (v PrefsView) RunWebClient() bool { return v.ж.RunWebClient } + +// WantRunning indicates whether networking should be active on +// this node. +func (v PrefsView) WantRunning() bool { return v.ж.WantRunning } + +// LoggedOut indicates whether the user intends to be logged out. +// There are other reasons we may be logged out, including no valid +// keys. +// We need to remember this state so that, on next startup, we can +// generate the "Login" vs "Connect" buttons correctly, without having +// to contact the server to confirm our nodekey status first. +func (v PrefsView) LoggedOut() bool { return v.ж.LoggedOut } + +// ShieldsUp indicates whether to block all incoming connections, +// regardless of the control-provided packet filter. If false, we +// use the packet filter as provided. If true, we block incoming +// connections. This overrides tailcfg.Hostinfo's ShieldsUp. +func (v PrefsView) ShieldsUp() bool { return v.ж.ShieldsUp } + +// AdvertiseTags specifies tags that should be applied to this node, for +// purposes of ACL enforcement. These can be referenced from the ACL policy +// document. Note that advertising a tag on the client doesn't guarantee +// that the control server will allow the node to adopt that tag. +func (v PrefsView) AdvertiseTags() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseTags) } + +// Hostname is the hostname to use for identifying the node. If +// not set, os.Hostname is used. +func (v PrefsView) Hostname() string { return v.ж.Hostname } + +// NotepadURLs is a debugging setting that opens OAuth URLs in +// notepad.exe on Windows, rather than loading them in a browser. +// +// apenwarr 2020-04-29: Unfortunately this is still needed sometimes. +// Windows' default browser setting is sometimes screwy and this helps +// users narrow it down a bit. +func (v PrefsView) NotepadURLs() bool { return v.ж.NotepadURLs } + +// ForceDaemon specifies whether a platform that normally +// operates in "client mode" (that is, requires an active user +// logged in with the GUI app running) should keep running after the +// GUI ends and/or the user logs out. +// +// The only current applicable platform is Windows. This +// forced Windows to go into "server mode" where Tailscale is +// running even with no users logged in. This might also be +// used for macOS in the future. This setting has no effect +// for Linux/etc, which always operate in daemon mode. +func (v PrefsView) ForceDaemon() bool { return v.ж.ForceDaemon } + +// Egg is a optional debug flag. +func (v PrefsView) Egg() bool { return v.ж.Egg } + +// AdvertiseRoutes specifies CIDR prefixes to advertise into the +// Tailscale network as reachable through the current +// node. func (v PrefsView) AdvertiseRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AdvertiseRoutes) } + +// AdvertiseServices specifies the list of services that this +// node can serve as a destination for. Note that an advertised +// service must still go through the approval process from the +// control server. func (v PrefsView) AdvertiseServices() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseServices) } -func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT } -func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering } + +// NoSNAT specifies whether to source NAT traffic going to +// destinations in AdvertiseRoutes. The default is to apply source +// NAT, which makes the traffic appear to come from the router +// machine rather than the peer's Tailscale IP. +// +// Disabling SNAT requires additional manual configuration in your +// network to route Tailscale traffic back to the subnet relay +// machine. +// +// Linux-only. +func (v PrefsView) NoSNAT() bool { return v.ж.NoSNAT } + +// NoStatefulFiltering specifies whether to apply stateful filtering when +// advertising routes in AdvertiseRoutes. The default is to not apply +// stateful filtering. +// +// To allow inbound connections from advertised routes, both NoSNAT and +// NoStatefulFiltering must be true. +// +// This is an opt.Bool because it was first added after NoSNAT, with a +// backfill based on the value of that parameter. The backfill has been +// removed since then, but the field remains an opt.Bool. +// +// Linux-only. +func (v PrefsView) NoStatefulFiltering() opt.Bool { return v.ж.NoStatefulFiltering } + +// NetfilterMode specifies how much to manage netfilter rules for +// Tailscale, if at all. func (v PrefsView) NetfilterMode() preftype.NetfilterMode { return v.ж.NetfilterMode } -func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser } -func (v PrefsView) ProfileName() string { return v.ж.ProfileName } -func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } -func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } -func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } -func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } + +// OperatorUser is the local machine user name who is allowed to +// operate tailscaled without being root or using sudo. +func (v PrefsView) OperatorUser() string { return v.ж.OperatorUser } + +// ProfileName is the desired name of the profile. If empty, then the user's +// LoginName is used. It is only used for display purposes in the client UI +// and CLI. +func (v PrefsView) ProfileName() string { return v.ж.ProfileName } + +// AutoUpdate sets the auto-update preferences for the node agent. See +// AutoUpdatePrefs docs for more details. +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } + +// AppConnector sets the app connector preferences for the node agent. See +// AppConnectorPrefs docs for more details. +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } + +// PostureChecking enables the collection of information used for device +// posture checks. +// +// Note: this should be named ReportPosture, but it was shipped as +// PostureChecking in some early releases and this JSON field is written to +// disk, so we just keep its old name. (akin to CorpDNS which is an internal +// pref name that doesn't match the public interface) +func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } + +// NetfilterKind specifies what netfilter implementation to use. +// +// Linux-only. +func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } + +// DriveShares are the configured DriveShares, stored in increasing order +// by name. func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] { return views.SliceOfViews[*drive.Share, drive.ShareView](v.ж.DriveShares) } + +// RelayServerPort is the UDP port number for the relay server to bind to, +// on all interfaces. A non-nil zero value signifies a random unused port +// should be used. A nil value signifies relay server functionality +// should be disabled. This field is currently experimental, and therefore +// no guarantees are made about its current naming and functionality when +// non-nil/enabled. func (v PrefsView) RelayServerPort() views.ValuePointer[int] { return views.ValuePointerOf(v.ж.RelayServerPort) } +// AllowSingleHosts was a legacy field that was always true +// for the past 4.5 years. It controlled whether Tailscale +// peers got /32 or /127 routes for each other. +// As of 2024-05-17 we're starting to ignore it, but to let +// people still downgrade Tailscale versions and not break +// all peer-to-peer networking we still write it to disk (as JSON) +// so it can be loaded back by old versions. +// TODO(bradfitz): delete this in 2025 sometime. See #12058. func (v PrefsView) AllowSingleHosts() marshalAsTrueInJSON { return v.ж.AllowSingleHosts } -func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } + +// The Persist field is named 'Config' in the file for backward +// compatibility with earlier versions. +// TODO(apenwarr): We should move this out of here, it's not a pref. +// +// We can maybe do that once we're sure which module should persist +// it (backend or frontend?) +func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { @@ -324,33 +562,52 @@ func (v *ServeConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// TCP are the list of TCP port numbers that tailscaled should handle for +// the Tailscale IP addresses. (not subnet routers, etc) func (v ServeConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView { return t.View() }) } +// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers +// keyed by mount point ("/", "/foo", etc) func (v ServeConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView { return t.View() }) } +// Services maps from service name (in the form "svc:dns-label") to a ServiceConfig. +// Which describes the L3, L4, and L7 forwarding information for the service. func (v ServeConfigView) Services() views.MapFn[tailcfg.ServiceName, *ServiceConfig, ServiceConfigView] { return views.MapFnOf(v.ж.Services, func(t *ServiceConfig) ServiceConfigView { return t.View() }) } +// AllowFunnel is the set of SNI:port values for which funnel +// traffic is allowed, from trusted ingress peers. func (v ServeConfigView) AllowFunnel() views.Map[HostPort, bool] { return views.MapOf(v.ж.AllowFunnel) } +// Foreground is a map of an IPN Bus session ID to an alternate foreground serve config that's valid for the +// life of that WatchIPNBus session ID. This allows the config to specify ephemeral configs that are used +// in the CLI's foreground mode to ensure ungraceful shutdowns of either the client or the LocalBackend does not +// expose ports that users are not aware of. In practice this contains any serve config set via 'tailscale +// serve' command run without the '--bg' flag. ServeConfig contained by Foreground is not expected itself to contain +// another Foreground block. func (v ServeConfigView) Foreground() views.MapFn[string, *ServeConfig, ServeConfigView] { return views.MapFnOf(v.ж.Foreground, func(t *ServeConfig) ServeConfigView { return t.View() }) } + +// ETag is the checksum of the serve config that's populated +// by the LocalClient through the HTTP ETag header during a +// GetServeConfig request and is translated to an If-Match header +// during a SetServeConfig request. func (v ServeConfigView) ETag() string { return v.ж.ETag } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -430,17 +687,23 @@ func (v *ServiceConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// TCP are the list of TCP port numbers that tailscaled should handle for +// the Tailscale IP addresses. (not subnet routers, etc) func (v ServiceConfigView) TCP() views.MapFn[uint16, *TCPPortHandler, TCPPortHandlerView] { return views.MapFnOf(v.ж.TCP, func(t *TCPPortHandler) TCPPortHandlerView { return t.View() }) } +// Web maps from "$SNI_NAME:$PORT" to a set of HTTP handlers +// keyed by mount point ("/", "/foo", etc) func (v ServiceConfigView) Web() views.MapFn[HostPort, *WebServerConfig, WebServerConfigView] { return views.MapFnOf(v.ж.Web, func(t *WebServerConfig) WebServerConfigView { return t.View() }) } + +// Tun determines if the service should be using L3 forwarding (Tun mode). func (v ServiceConfigView) Tun() bool { return v.ж.Tun } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -517,9 +780,29 @@ func (v *TCPPortHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS } -func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP } -func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward } +// HTTPS, if true, means that tailscaled should handle this connection as an +// HTTPS request as configured by ServeConfig.Web. +// +// It is mutually exclusive with TCPForward. +func (v TCPPortHandlerView) HTTPS() bool { return v.ж.HTTPS } + +// HTTP, if true, means that tailscaled should handle this connection as an +// HTTP request as configured by ServeConfig.Web. +// +// It is mutually exclusive with TCPForward. +func (v TCPPortHandlerView) HTTP() bool { return v.ж.HTTP } + +// TCPForward is the IP:port to forward TCP connections to. +// Whether or not TLS is terminated by tailscaled depends on +// TerminateTLS. +// +// It is mutually exclusive with HTTPS. +func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward } + +// TerminateTLS, if non-empty, means that tailscaled should terminate the +// TLS connections before forwarding them to TCPForward, permitting only the +// SNI name with this value. It is only used if TCPForward is non-empty. +// (the HTTPS mode uses ServeConfig.Web) func (v TCPPortHandlerView) TerminateTLS() string { return v.ж.TerminateTLS } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -597,9 +880,14 @@ func (v *HTTPHandlerView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v HTTPHandlerView) Path() string { return v.ж.Path } +// absolute path to directory or file to serve +func (v HTTPHandlerView) Path() string { return v.ж.Path } + +// http://localhost:3000/, localhost:3030, 3030 func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy } -func (v HTTPHandlerView) Text() string { return v.ж.Text } + +// plaintext to serve (primarily for testing) +func (v HTTPHandlerView) Text() string { return v.ж.Text } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { @@ -675,6 +963,7 @@ func (v *WebServerConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// mountPoint => handler func (v WebServerConfigView) Handlers() views.MapFn[string, *HTTPHandler, HTTPHandlerView] { return views.MapFnOf(v.ж.Handlers, func(t *HTTPHandler) HTTPHandlerView { return t.View() diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 8dc4f1ca80e49..e44d0bbef326b 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -90,8 +90,12 @@ func (v *UserView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v UserView) ID() UserID { return v.ж.ID } -func (v UserView) DisplayName() string { return v.ж.DisplayName } +func (v UserView) ID() UserID { return v.ж.ID } + +// if non-empty overrides Login field +func (v UserView) DisplayName() string { return v.ж.DisplayName } + +// if non-empty overrides Login field func (v UserView) ProfilePicURL() string { return v.ж.ProfilePicURL } func (v UserView) Created() time.Time { return v.ж.Created } @@ -172,53 +176,202 @@ func (v *NodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v NodeView) ID() NodeID { return v.ж.ID } func (v NodeView) StableID() StableNodeID { return v.ж.StableID } -func (v NodeView) Name() string { return v.ж.Name } -func (v NodeView) User() UserID { return v.ж.User } -func (v NodeView) Sharer() UserID { return v.ж.Sharer } -func (v NodeView) Key() key.NodePublic { return v.ж.Key } -func (v NodeView) KeyExpiry() time.Time { return v.ж.KeyExpiry } + +// Name is the FQDN of the node. +// It is also the MagicDNS name for the node. +// It has a trailing dot. +// e.g. "host.tail-scale.ts.net." +func (v NodeView) Name() string { return v.ж.Name } + +// User is the user who created the node. If ACL tags are in use for the +// node then it doesn't reflect the ACL identity that the node is running +// as. +func (v NodeView) User() UserID { return v.ж.User } + +// Sharer, if non-zero, is the user who shared this node, if different than User. +func (v NodeView) Sharer() UserID { return v.ж.Sharer } +func (v NodeView) Key() key.NodePublic { return v.ж.Key } + +// the zero value if this node does not expire +func (v NodeView) KeyExpiry() time.Time { return v.ж.KeyExpiry } func (v NodeView) KeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.ж.KeySignature) } -func (v NodeView) Machine() key.MachinePublic { return v.ж.Machine } -func (v NodeView) DiscoKey() key.DiscoPublic { return v.ж.DiscoKey } -func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Addresses) } -func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AllowedIPs) } -func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) } -func (v NodeView) LegacyDERPString() string { return v.ж.LegacyDERPString } -func (v NodeView) HomeDERP() int { return v.ж.HomeDERP } -func (v NodeView) Hostinfo() HostinfoView { return v.ж.Hostinfo } -func (v NodeView) Created() time.Time { return v.ж.Created } -func (v NodeView) Cap() CapabilityVersion { return v.ж.Cap } -func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } +func (v NodeView) Machine() key.MachinePublic { return v.ж.Machine } +func (v NodeView) DiscoKey() key.DiscoPublic { return v.ж.DiscoKey } + +// Addresses are the IP addresses of this Node directly. +func (v NodeView) Addresses() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.Addresses) } + +// AllowedIPs are the IP ranges to route to this node. +// +// As of CapabilityVersion 112, this may be nil (null or undefined) on the wire +// to mean the same as Addresses. Internally, it is always filled in with +// its possibly-implicit value. +func (v NodeView) AllowedIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.AllowedIPs) } + +// IP+port (public via STUN, and local LANs) +func (v NodeView) Endpoints() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Endpoints) } + +// LegacyDERPString is this node's home LegacyDERPString region ID integer, but shoved into an +// IP:port string for legacy reasons. The IP address is always "127.3.3.40" +// (a loopback address (127) followed by the digits over the letters DERP on +// a QWERTY keyboard (3.3.40)). The "port number" is the home LegacyDERPString region ID +// integer. +// +// Deprecated: HomeDERP has replaced this, but old servers might still send +// this field. See tailscale/tailscale#14636. Do not use this field in code +// other than in the upgradeNode func, which canonicalizes it to HomeDERP +// if it arrives as a LegacyDERPString string on the wire. +func (v NodeView) LegacyDERPString() string { return v.ж.LegacyDERPString } + +// HomeDERP is the modern version of the DERP string field, with just an +// integer. The client advertises support for this as of capver 111. +// +// HomeDERP may be zero if not (yet) known, but ideally always be non-zero +// for magicsock connectivity to function normally. +func (v NodeView) HomeDERP() int { return v.ж.HomeDERP } +func (v NodeView) Hostinfo() HostinfoView { return v.ж.Hostinfo } +func (v NodeView) Created() time.Time { return v.ж.Created } + +// if non-zero, the node's capability version; old servers might not send +func (v NodeView) Cap() CapabilityVersion { return v.ж.Cap } + +// Tags are the list of ACL tags applied to this node. +// Tags take the form of `tag:` where value starts +// with a letter and only contains alphanumerics and dashes `-`. +// Some valid tag examples: +// +// `tag:prod` +// `tag:database` +// `tag:lab-1` +func (v NodeView) Tags() views.Slice[string] { return views.SliceOf(v.ж.Tags) } + +// PrimaryRoutes are the routes from AllowedIPs that this node +// is currently the primary subnet router for, as determined +// by the control plane. It does not include the self address +// values from Addresses that are in AllowedIPs. func (v NodeView) PrimaryRoutes() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.PrimaryRoutes) } + +// LastSeen is when the node was last online. It is not +// updated when Online is true. It is nil if the current +// node doesn't have permission to know, or the node +// has never been online. func (v NodeView) LastSeen() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.LastSeen) } +// Online is whether the node is currently connected to the +// coordination server. A value of nil means unknown, or the +// current node doesn't have permission to know. func (v NodeView) Online() views.ValuePointer[bool] { return views.ValuePointerOf(v.ж.Online) } -func (v NodeView) MachineAuthorized() bool { return v.ж.MachineAuthorized } +// TODO(crawshaw): replace with MachineStatus +func (v NodeView) MachineAuthorized() bool { return v.ж.MachineAuthorized } + +// Capabilities are capabilities that the node has. +// They're free-form strings, but should be in the form of URLs/URIs +// such as: +// +// "https://tailscale.com/cap/is-admin" +// "https://tailscale.com/cap/file-sharing" +// +// Deprecated: use CapMap instead. See https://github.com/tailscale/tailscale/issues/11508 func (v NodeView) Capabilities() views.Slice[NodeCapability] { return views.SliceOf(v.ж.Capabilities) } +// CapMap is a map of capabilities to their optional argument/data values. +// +// It is valid for a capability to not have any argument/data values; such +// capabilities can be tested for using the HasCap method. These type of +// capabilities are used to indicate that a node has a capability, but there +// is no additional data associated with it. These were previously +// represented by the Capabilities field, but can now be represented by +// CapMap with an empty value. +// +// See NodeCapability for more information on keys. +// +// Metadata about nodes can be transmitted in 3 ways: +// 1. MapResponse.Node.CapMap describes attributes that affect behavior for +// this node, such as which features have been enabled through the admin +// panel and any associated configuration details. +// 2. MapResponse.PacketFilter(s) describes access (both IP and application +// based) that should be granted to peers. +// 3. MapResponse.Peers[].CapMap describes attributes regarding a peer node, +// such as which features the peer supports or if that peer is preferred +// for a particular task vs other peers that could also be chosen. func (v NodeView) CapMap() views.MapSlice[NodeCapability, RawMessage] { return views.MapSliceOf(v.ж.CapMap) } -func (v NodeView) UnsignedPeerAPIOnly() bool { return v.ж.UnsignedPeerAPIOnly } -func (v NodeView) ComputedName() string { return v.ж.ComputedName } + +// UnsignedPeerAPIOnly means that this node is not signed nor subject to TKA +// restrictions. However, in exchange for that privilege, it does not get +// network access. It can only access this node's peerapi, which may not let +// it do anything. It is the tailscaled client's job to double-check the +// MapResponse's PacketFilter to verify that its AllowedIPs will not be +// accepted by the packet filter. +func (v NodeView) UnsignedPeerAPIOnly() bool { return v.ж.UnsignedPeerAPIOnly } + +// MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) +func (v NodeView) ComputedName() string { return v.ж.ComputedName } + +// either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set func (v NodeView) ComputedNameWithHost() string { return v.ж.ComputedNameWithHost } -func (v NodeView) DataPlaneAuditLogID() string { return v.ж.DataPlaneAuditLogID } -func (v NodeView) Expired() bool { return v.ж.Expired } + +// DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging. +func (v NodeView) DataPlaneAuditLogID() string { return v.ж.DataPlaneAuditLogID } + +// Expired is whether this node's key has expired. Control may send +// this; clients are only allowed to set this from false to true. On +// the client, this is calculated client-side based on a timestamp sent +// from control, to avoid clock skew issues. +func (v NodeView) Expired() bool { return v.ж.Expired } + +// SelfNodeV4MasqAddrForThisPeer is the IPv4 that this peer knows the current node as. +// It may be empty if the peer knows the current node by its native +// IPv4 address. +// This field is only populated in a MapResponse for peers and not +// for the current node. +// +// If set, it should be used to masquerade traffic originating from the +// current node to this peer. The masquerade address is only relevant +// for this peer and not for other peers. +// +// This only applies to traffic originating from the current node to the +// peer or any of its subnets. Traffic originating from subnet routes will +// not be masqueraded (e.g. in case of --snat-subnet-routes). func (v NodeView) SelfNodeV4MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.SelfNodeV4MasqAddrForThisPeer) } +// SelfNodeV6MasqAddrForThisPeer is the IPv6 that this peer knows the current node as. +// It may be empty if the peer knows the current node by its native +// IPv6 address. +// This field is only populated in a MapResponse for peers and not +// for the current node. +// +// If set, it should be used to masquerade traffic originating from the +// current node to this peer. The masquerade address is only relevant +// for this peer and not for other peers. +// +// This only applies to traffic originating from the current node to the +// peer or any of its subnets. Traffic originating from subnet routes will +// not be masqueraded (e.g. in case of --snat-subnet-routes). func (v NodeView) SelfNodeV6MasqAddrForThisPeer() views.ValuePointer[netip.Addr] { return views.ValuePointerOf(v.ж.SelfNodeV6MasqAddrForThisPeer) } +// IsWireGuardOnly indicates that this is a non-Tailscale WireGuard peer, it +// is not expected to speak Disco or DERP, and it must have Endpoints in +// order to be reachable. func (v NodeView) IsWireGuardOnly() bool { return v.ж.IsWireGuardOnly } -func (v NodeView) IsJailed() bool { return v.ж.IsJailed } + +// IsJailed indicates that this node is jailed and should not be allowed +// initiate connections, however outbound connections to it should still be +// allowed. +func (v NodeView) IsJailed() bool { return v.ж.IsJailed } + +// ExitNodeDNSResolvers is the list of DNS servers that should be used when this +// node is marked IsWireGuardOnly and being used as an exit node. func (v NodeView) ExitNodeDNSResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.ExitNodeDNSResolvers) } @@ -331,47 +484,144 @@ func (v *HostinfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v HostinfoView) IPNVersion() string { return v.ж.IPNVersion } -func (v HostinfoView) FrontendLogID() string { return v.ж.FrontendLogID } -func (v HostinfoView) BackendLogID() string { return v.ж.BackendLogID } -func (v HostinfoView) OS() string { return v.ж.OS } -func (v HostinfoView) OSVersion() string { return v.ж.OSVersion } -func (v HostinfoView) Container() opt.Bool { return v.ж.Container } -func (v HostinfoView) Env() string { return v.ж.Env } -func (v HostinfoView) Distro() string { return v.ж.Distro } -func (v HostinfoView) DistroVersion() string { return v.ж.DistroVersion } -func (v HostinfoView) DistroCodeName() string { return v.ж.DistroCodeName } -func (v HostinfoView) App() string { return v.ж.App } -func (v HostinfoView) Desktop() opt.Bool { return v.ж.Desktop } -func (v HostinfoView) Package() string { return v.ж.Package } -func (v HostinfoView) DeviceModel() string { return v.ж.DeviceModel } -func (v HostinfoView) PushDeviceToken() string { return v.ж.PushDeviceToken } -func (v HostinfoView) Hostname() string { return v.ж.Hostname } -func (v HostinfoView) ShieldsUp() bool { return v.ж.ShieldsUp } -func (v HostinfoView) ShareeNode() bool { return v.ж.ShareeNode } -func (v HostinfoView) NoLogsNoSupport() bool { return v.ж.NoLogsNoSupport } -func (v HostinfoView) WireIngress() bool { return v.ж.WireIngress } -func (v HostinfoView) IngressEnabled() bool { return v.ж.IngressEnabled } -func (v HostinfoView) AllowsUpdate() bool { return v.ж.AllowsUpdate } -func (v HostinfoView) Machine() string { return v.ж.Machine } -func (v HostinfoView) GoArch() string { return v.ж.GoArch } -func (v HostinfoView) GoArchVar() string { return v.ж.GoArchVar } -func (v HostinfoView) GoVersion() string { return v.ж.GoVersion } +// version of this code (in version.Long format) +func (v HostinfoView) IPNVersion() string { return v.ж.IPNVersion } + +// logtail ID of frontend instance +func (v HostinfoView) FrontendLogID() string { return v.ж.FrontendLogID } + +// logtail ID of backend instance +func (v HostinfoView) BackendLogID() string { return v.ж.BackendLogID } + +// operating system the client runs on (a version.OS value) +func (v HostinfoView) OS() string { return v.ж.OS } + +// OSVersion is the version of the OS, if available. +// +// For Android, it's like "10", "11", "12", etc. For iOS and macOS it's like +// "15.6.1" or "12.4.0". For Windows it's like "10.0.19044.1889". For +// FreeBSD it's like "12.3-STABLE". +// +// For Linux, prior to Tailscale 1.32, we jammed a bunch of fields into this +// string on Linux, like "Debian 10.4; kernel=xxx; container; env=kn" and so +// on. As of Tailscale 1.32, this is simply the kernel version on Linux, like +// "5.10.0-17-amd64". +func (v HostinfoView) OSVersion() string { return v.ж.OSVersion } + +// best-effort whether the client is running in a container +func (v HostinfoView) Container() opt.Bool { return v.ж.Container } + +// a hostinfo.EnvType in string form +func (v HostinfoView) Env() string { return v.ж.Env } + +// "debian", "ubuntu", "nixos", ... +func (v HostinfoView) Distro() string { return v.ж.Distro } + +// "20.04", ... +func (v HostinfoView) DistroVersion() string { return v.ж.DistroVersion } + +// "jammy", "bullseye", ... +func (v HostinfoView) DistroCodeName() string { return v.ж.DistroCodeName } + +// App is used to disambiguate Tailscale clients that run using tsnet. +func (v HostinfoView) App() string { return v.ж.App } + +// if a desktop was detected on Linux +func (v HostinfoView) Desktop() opt.Bool { return v.ж.Desktop } + +// Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) +func (v HostinfoView) Package() string { return v.ж.Package } + +// mobile phone model ("Pixel 3a", "iPhone12,3") +func (v HostinfoView) DeviceModel() string { return v.ж.DeviceModel } + +// macOS/iOS APNs device token for notifications (and Android in the future) +func (v HostinfoView) PushDeviceToken() string { return v.ж.PushDeviceToken } + +// name of the host the client runs on +func (v HostinfoView) Hostname() string { return v.ж.Hostname } + +// indicates whether the host is blocking incoming connections +func (v HostinfoView) ShieldsUp() bool { return v.ж.ShieldsUp } + +// indicates this node exists in netmap because it's owned by a shared-to user +func (v HostinfoView) ShareeNode() bool { return v.ж.ShareeNode } + +// indicates that the user has opted out of sending logs and support +func (v HostinfoView) NoLogsNoSupport() bool { return v.ж.NoLogsNoSupport } + +// WireIngress indicates that the node would like to be wired up server-side +// (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently +// enabled. For example, the user might only use it for intermittent +// foreground CLI serve sessions, for which they'd like it to work right +// away, even if it's disabled most of the time. As an optimization, this is +// only sent if IngressEnabled is false, as IngressEnabled implies that this +// option is true. +func (v HostinfoView) WireIngress() bool { return v.ж.WireIngress } + +// if the node has any funnel endpoint enabled +func (v HostinfoView) IngressEnabled() bool { return v.ж.IngressEnabled } + +// indicates that the node has opted-in to admin-console-drive remote updates +func (v HostinfoView) AllowsUpdate() bool { return v.ж.AllowsUpdate } + +// the current host's machine type (uname -m) +func (v HostinfoView) Machine() string { return v.ж.Machine } + +// GOARCH value (of the built binary) +func (v HostinfoView) GoArch() string { return v.ж.GoArch } + +// GOARM, GOAMD64, etc (of the built binary) +func (v HostinfoView) GoArchVar() string { return v.ж.GoArchVar } + +// Go version binary was built with +func (v HostinfoView) GoVersion() string { return v.ж.GoVersion } + +// set of IP ranges this client can route func (v HostinfoView) RoutableIPs() views.Slice[netip.Prefix] { return views.SliceOf(v.ж.RoutableIPs) } -func (v HostinfoView) RequestTags() views.Slice[string] { return views.SliceOf(v.ж.RequestTags) } -func (v HostinfoView) WoLMACs() views.Slice[string] { return views.SliceOf(v.ж.WoLMACs) } -func (v HostinfoView) Services() views.Slice[Service] { return views.SliceOf(v.ж.Services) } -func (v HostinfoView) NetInfo() NetInfoView { return v.ж.NetInfo.View() } -func (v HostinfoView) SSH_HostKeys() views.Slice[string] { return views.SliceOf(v.ж.SSH_HostKeys) } -func (v HostinfoView) Cloud() string { return v.ж.Cloud } -func (v HostinfoView) Userspace() opt.Bool { return v.ж.Userspace } -func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } -func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } -func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } -func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } -func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } -func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } +// set of ACL tags this node wants to claim +func (v HostinfoView) RequestTags() views.Slice[string] { return views.SliceOf(v.ж.RequestTags) } + +// MAC address(es) to send Wake-on-LAN packets to wake this node (lowercase hex w/ colons) +func (v HostinfoView) WoLMACs() views.Slice[string] { return views.SliceOf(v.ж.WoLMACs) } + +// services advertised by this machine +func (v HostinfoView) Services() views.Slice[Service] { return views.SliceOf(v.ж.Services) } +func (v HostinfoView) NetInfo() NetInfoView { return v.ж.NetInfo.View() } + +// if advertised +func (v HostinfoView) SSH_HostKeys() views.Slice[string] { return views.SliceOf(v.ж.SSH_HostKeys) } +func (v HostinfoView) Cloud() string { return v.ж.Cloud } + +// if the client is running in userspace (netstack) mode +func (v HostinfoView) Userspace() opt.Bool { return v.ж.Userspace } + +// if the client's subnet router is running in userspace (netstack) mode +func (v HostinfoView) UserspaceRouter() opt.Bool { return v.ж.UserspaceRouter } + +// if the client is running the app-connector service +func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } + +// opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n +func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } + +// the client’s selected exit node, empty when unselected. +func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } + +// Location represents geographical location data about a +// Tailscale host. Location is optional and only set if +// explicitly declared by a node. +func (v HostinfoView) Location() LocationView { return v.ж.Location.View() } + +// TPM device metadata, if available +func (v HostinfoView) TPM() views.ValuePointer[TPMInfo] { return views.ValuePointerOf(v.ж.TPM) } + +// StateEncrypted reports whether the node state is stored encrypted on +// disk. The actual mechanism is platform-specific: +// - Apple nodes use the Keychain +// - Linux and Windows nodes use the TPM +// - Android apps use EncryptedSharedPreferences func (v HostinfoView) StateEncrypted() opt.Bool { return v.ж.StateEncrypted } func (v HostinfoView) Equal(v2 HostinfoView) bool { return v.ж.Equal(v2.ж) } @@ -487,22 +737,74 @@ func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// MappingVariesByDestIP says whether the host's NAT mappings +// vary based on the destination IP. func (v NetInfoView) MappingVariesByDestIP() opt.Bool { return v.ж.MappingVariesByDestIP } -func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning } -func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 } -func (v NetInfoView) OSHasIPv6() opt.Bool { return v.ж.OSHasIPv6 } -func (v NetInfoView) WorkingUDP() opt.Bool { return v.ж.WorkingUDP } -func (v NetInfoView) WorkingICMPv4() opt.Bool { return v.ж.WorkingICMPv4 } -func (v NetInfoView) HavePortMap() bool { return v.ж.HavePortMap } -func (v NetInfoView) UPnP() opt.Bool { return v.ж.UPnP } -func (v NetInfoView) PMP() opt.Bool { return v.ж.PMP } -func (v NetInfoView) PCP() opt.Bool { return v.ж.PCP } -func (v NetInfoView) PreferredDERP() int { return v.ж.PreferredDERP } -func (v NetInfoView) LinkType() string { return v.ж.LinkType } +// HairPinning is their router does hairpinning. +// It reports true even if there's no NAT involved. +func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning } + +// WorkingIPv6 is whether the host has IPv6 internet connectivity. +func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 } + +// OSHasIPv6 is whether the OS supports IPv6 at all, regardless of +// whether IPv6 internet connectivity is available. +func (v NetInfoView) OSHasIPv6() opt.Bool { return v.ж.OSHasIPv6 } + +// WorkingUDP is whether the host has UDP internet connectivity. +func (v NetInfoView) WorkingUDP() opt.Bool { return v.ж.WorkingUDP } + +// WorkingICMPv4 is whether ICMPv4 works. +// Empty means not checked. +func (v NetInfoView) WorkingICMPv4() opt.Bool { return v.ж.WorkingICMPv4 } + +// HavePortMap is whether we have an existing portmap open +// (UPnP, PMP, or PCP). +func (v NetInfoView) HavePortMap() bool { return v.ж.HavePortMap } + +// UPnP is whether UPnP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) UPnP() opt.Bool { return v.ж.UPnP } + +// PMP is whether NAT-PMP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) PMP() opt.Bool { return v.ж.PMP } + +// PCP is whether PCP appears present on the LAN. +// Empty means not checked. +func (v NetInfoView) PCP() opt.Bool { return v.ж.PCP } + +// PreferredDERP is this node's preferred (home) DERP region ID. +// This is where the node expects to be contacted to begin a +// peer-to-peer connection. The node might be be temporarily +// connected to multiple DERP servers (to speak to other nodes +// that are located elsewhere) but PreferredDERP is the region ID +// that the node subscribes to traffic at. +// Zero means disconnected or unknown. +func (v NetInfoView) PreferredDERP() int { return v.ж.PreferredDERP } + +// LinkType is the current link type, if known. +func (v NetInfoView) LinkType() string { return v.ж.LinkType } + +// DERPLatency is the fastest recent time to reach various +// DERP STUN servers, in seconds. The map key is the +// "regionID-v4" or "-v6"; it was previously the DERP server's +// STUN host:port. +// +// This should only be updated rarely, or when there's a +// material change, as any change here also gets uploaded to +// the control plane. func (v NetInfoView) DERPLatency() views.Map[string, float64] { return views.MapOf(v.ж.DERPLatency) } -func (v NetInfoView) FirewallMode() string { return v.ж.FirewallMode } -func (v NetInfoView) String() string { return v.ж.String() } + +// FirewallMode encodes both which firewall mode was selected and why. +// It is Linux-specific (at least as of 2023-08-19) and is meant to help +// debug iptables-vs-nftables issues. The string is of the form +// "{nft,ift}-REASON", like "nft-forced" or "ipt-default". Empty means +// either not Linux or a configuration in which the host firewall rules +// are not managed by tailscaled. +func (v NetInfoView) FirewallMode() string { return v.ж.FirewallMode } +func (v NetInfoView) String() string { return v.ж.String() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoViewNeedsRegeneration = NetInfo(struct { @@ -589,10 +891,19 @@ func (v *LoginView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v LoginView) ID() LoginID { return v.ж.ID } -func (v LoginView) Provider() string { return v.ж.Provider } -func (v LoginView) LoginName() string { return v.ж.LoginName } -func (v LoginView) DisplayName() string { return v.ж.DisplayName } +// unused in the Tailscale client +func (v LoginView) ID() LoginID { return v.ж.ID } + +// "google", "github", "okta_foo", etc. +func (v LoginView) Provider() string { return v.ж.Provider } + +// an email address or "email-ish" string (like alice@github) +func (v LoginView) LoginName() string { return v.ж.LoginName } + +// from the IdP +func (v LoginView) DisplayName() string { return v.ж.DisplayName } + +// from the IdP func (v LoginView) ProfilePicURL() string { return v.ж.ProfilePicURL } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -672,26 +983,82 @@ func (v *DNSConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Resolvers are the DNS resolvers to use, in order of preference. func (v DNSConfigView) Resolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.Resolvers) } +// Routes maps DNS name suffixes to a set of DNS resolvers to +// use. It is used to implement "split DNS" and other advanced DNS +// routing overlays. +// +// Map keys are fully-qualified DNS name suffixes; they may +// optionally contain a trailing dot but no leading dot. +// +// If the value is an empty slice, that means the suffix should still +// be handled by Tailscale's built-in resolver (100.100.100.100), such +// as for the purpose of handling ExtraRecords. func (v DNSConfigView) Routes() views.MapFn[string, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { return views.MapFnOf(v.ж.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) }) } + +// FallbackResolvers is like Resolvers, but is only used if a +// split DNS configuration is requested in a configuration that +// doesn't work yet without explicit default resolvers. +// https://github.com/tailscale/tailscale/issues/1743 func (v DNSConfigView) FallbackResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.FallbackResolvers) } -func (v DNSConfigView) Domains() views.Slice[string] { return views.SliceOf(v.ж.Domains) } -func (v DNSConfigView) Proxied() bool { return v.ж.Proxied } + +// Domains are the search domains to use. +// Search domains must be FQDNs, but *without* the trailing dot. +func (v DNSConfigView) Domains() views.Slice[string] { return views.SliceOf(v.ж.Domains) } + +// Proxied turns on automatic resolution of hostnames for devices +// in the network map, aka MagicDNS. +// Despite the (legacy) name, does not necessarily cause request +// proxying to be enabled. +func (v DNSConfigView) Proxied() bool { return v.ж.Proxied } + +// Nameservers are the IP addresses of the global nameservers to use. +// +// Deprecated: this is only set and used by MapRequest.Version >=9 and <14. Use Resolvers instead. func (v DNSConfigView) Nameservers() views.Slice[netip.Addr] { return views.SliceOf(v.ж.Nameservers) } -func (v DNSConfigView) CertDomains() views.Slice[string] { return views.SliceOf(v.ж.CertDomains) } + +// CertDomains are the set of DNS names for which the control +// plane server will assist with provisioning TLS +// certificates. See SetDNSRequest, which can be used to +// answer dns-01 ACME challenges for e.g. LetsEncrypt. +// These names are FQDNs without trailing periods, and without +// any "_acme-challenge." prefix. +func (v DNSConfigView) CertDomains() views.Slice[string] { return views.SliceOf(v.ж.CertDomains) } + +// ExtraRecords contains extra DNS records to add to the +// MagicDNS config. func (v DNSConfigView) ExtraRecords() views.Slice[DNSRecord] { return views.SliceOf(v.ж.ExtraRecords) } + +// ExitNodeFilteredSuffixes are the DNS suffixes that the +// node, when being an exit node DNS proxy, should not answer. +// +// The entries do not contain trailing periods and are always +// all lowercase. +// +// If an entry starts with a period, it's a suffix match (but +// suffix ".a.b" doesn't match "a.b"; a prefix is required). +// +// If an entry does not start with a period, it's an exact +// match. +// +// Matches are case insensitive. func (v DNSConfigView) ExitNodeFilteredSet() views.Slice[string] { return views.SliceOf(v.ж.ExitNodeFilteredSet) } + +// TempCorpIssue13969 is a temporary (2023-08-16) field for an internal hack day prototype. +// It contains a user inputed URL that should have a list of domains to be blocked. +// See https://github.com/tailscale/corp/issues/13969. func (v DNSConfigView) TempCorpIssue13969() string { return v.ж.TempCorpIssue13969 } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -775,14 +1142,26 @@ func (v *RegisterResponseView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v RegisterResponseView) User() User { return v.ж.User } -func (v RegisterResponseView) Login() Login { return v.ж.Login } -func (v RegisterResponseView) NodeKeyExpired() bool { return v.ж.NodeKeyExpired } +func (v RegisterResponseView) User() User { return v.ж.User } +func (v RegisterResponseView) Login() Login { return v.ж.Login } + +// if true, the NodeKey needs to be replaced +func (v RegisterResponseView) NodeKeyExpired() bool { return v.ж.NodeKeyExpired } + +// TODO(crawshaw): move to using MachineStatus func (v RegisterResponseView) MachineAuthorized() bool { return v.ж.MachineAuthorized } -func (v RegisterResponseView) AuthURL() string { return v.ж.AuthURL } + +// if set, authorization pending +func (v RegisterResponseView) AuthURL() string { return v.ж.AuthURL } + +// If set, this is the current node-key signature that needs to be +// re-signed for the node's new node-key. func (v RegisterResponseView) NodeKeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.ж.NodeKeySignature) } + +// Error indicates that authorization failed. If this is non-empty, +// other status fields should be ignored. func (v RegisterResponseView) Error() string { return v.ж.Error } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -863,6 +1242,7 @@ func (v *RegisterResponseAuthView) UnmarshalJSONFrom(dec *jsontext.Decoder) erro return nil } +// used by pre-1.66 Android only func (v RegisterResponseAuthView) Oauth2Token() views.ValuePointer[Oauth2Token] { return views.ValuePointerOf(v.ж.Oauth2Token) } @@ -943,29 +1323,69 @@ func (v *RegisterRequestView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Version is the client's capabilities when using the Noise +// transport. +// +// When using the original nacl crypto_box transport, the +// value must be 1. func (v RegisterRequestView) Version() CapabilityVersion { return v.ж.Version } func (v RegisterRequestView) NodeKey() key.NodePublic { return v.ж.NodeKey } func (v RegisterRequestView) OldNodeKey() key.NodePublic { return v.ж.OldNodeKey } func (v RegisterRequestView) NLKey() key.NLPublic { return v.ж.NLKey } func (v RegisterRequestView) Auth() RegisterResponseAuthView { return v.ж.Auth.View() } -func (v RegisterRequestView) Expiry() time.Time { return v.ж.Expiry } -func (v RegisterRequestView) Followup() string { return v.ж.Followup } -func (v RegisterRequestView) Hostinfo() HostinfoView { return v.ж.Hostinfo.View() } -func (v RegisterRequestView) Ephemeral() bool { return v.ж.Ephemeral } + +// Expiry optionally specifies the requested key expiry. +// The server policy may override. +// As a special case, if Expiry is in the past and NodeKey is +// the node's current key, the key is expired. +func (v RegisterRequestView) Expiry() time.Time { return v.ж.Expiry } + +// response waits until AuthURL is visited +func (v RegisterRequestView) Followup() string { return v.ж.Followup } +func (v RegisterRequestView) Hostinfo() HostinfoView { return v.ж.Hostinfo.View() } + +// Ephemeral is whether the client is requesting that this +// node be considered ephemeral and be automatically deleted +// when it stops being active. +func (v RegisterRequestView) Ephemeral() bool { return v.ж.Ephemeral } + +// NodeKeySignature is the node's own node-key signature, re-signed +// for its new node key using its network-lock key. +// +// This field is set when the client retries registration after learning +// its NodeKeySignature (which is in need of rotation). func (v RegisterRequestView) NodeKeySignature() views.ByteSlice[tkatype.MarshaledSignature] { return views.ByteSliceOf(v.ж.NodeKeySignature) } + +// The following fields are not used for SignatureNone and are required for +// SignatureV1: func (v RegisterRequestView) SignatureType() SignatureType { return v.ж.SignatureType } + +// creation time of request to prevent replay func (v RegisterRequestView) Timestamp() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.Timestamp) } +// X.509 certificate for client device func (v RegisterRequestView) DeviceCert() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.DeviceCert) } + +// as described by SignatureType func (v RegisterRequestView) Signature() views.ByteSlice[[]byte] { return views.ByteSliceOf(v.ж.Signature) } + +// Tailnet is an optional identifier specifying the name of the recommended or required +// network that the node should join. Its exact form should not be depended on; new +// forms are coming later. The identifier is generally a domain name (for an organization) +// or e-mail address (for a personal account on a shared e-mail provider). It is the same name +// used by the API, as described in /api.md#tailnet. +// If Tailnet begins with the prefix "required:" then the server should prevent logging in to a different +// network than the one specified. Otherwise, the server should recommend the specified network +// but still permit logging in to other networks. +// If empty, no recommendation is offered to the server and the login page should show all options. func (v RegisterRequestView) Tailnet() string { return v.ж.Tailnet } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -1055,6 +1475,19 @@ func (v *DERPHomeParamsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// RegionScore scales latencies of DERP regions by a given scaling +// factor when determining which region to use as the home +// ("preferred") DERP. Scores in the range (0, 1) will cause this +// region to be proportionally more preferred, and scores in the range +// (1, ∞) will penalize a region. +// +// If a region is not present in this map, it is treated as having a +// score of 1.0. +// +// Scores should not be 0 or negative; such scores will be ignored. +// +// A nil map means no change from the previous value (if any); an empty +// non-nil map can be sent to reset all scores back to 1.0. func (v DERPHomeParamsView) RegionScore() views.Map[int, float64] { return views.MapOf(v.ж.RegionScore) } @@ -1131,13 +1564,71 @@ func (v *DERPRegionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v DERPRegionView) RegionID() int { return v.ж.RegionID } -func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } -func (v DERPRegionView) RegionName() string { return v.ж.RegionName } -func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } -func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } -func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } +// RegionID is a unique integer for a geographic region. +// +// It corresponds to the legacy derpN.tailscale.com hostnames +// used by older clients. (Older clients will continue to resolve +// derpN.tailscale.com when contacting peers, rather than use +// the server-provided DERPMap) +// +// RegionIDs must be non-zero, positive, and guaranteed to fit +// in a JavaScript number. +// +// RegionIDs in range 900-999 are reserved for end users to run their +// own DERP nodes. +func (v DERPRegionView) RegionID() int { return v.ж.RegionID } + +// RegionCode is a short name for the region. It's usually a popular +// city or airport code in the region: "nyc", "sf", "sin", +// "fra", etc. +func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } + +// RegionName is a long English name for the region: "New York City", +// "San Francisco", "Singapore", "Frankfurt", etc. +func (v DERPRegionView) RegionName() string { return v.ж.RegionName } + +// Latitude, Longitude are optional geographical coordinates of the DERP region's city, in degrees. +func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } +func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } + +// Avoid is whether the client should avoid picking this as its home region. +// The region should only be used if a peer is there. Clients already using +// this region as their home should migrate away to a new region without +// Avoid set. +// +// Deprecated: because of bugs in past implementations combined with unclear +// docs that caused people to think the bugs were intentional, this field is +// deprecated. It was never supposed to cause STUN/DERP measurement probes, +// but due to bugs, it sometimes did. And then some parts of the code began +// to rely on that property. But then we were unable to use this field for +// its original purpose, nor its later imagined purpose, because various +// parts of the codebase thought it meant one thing and others thought it +// meant another. But it did something in the middle instead. So we're retiring +// it. Use NoMeasureNoHome instead. +func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } + +// NoMeasureNoHome says that this regions should not be measured for its +// latency distance (STUN, HTTPS, etc) or availability (e.g. captive portal +// checks) and should never be selected as the node's home region. However, +// if a peer declares this region as its home, then this client is allowed +// to connect to it for the purpose of communicating with that peer. +// +// This is what the now deprecated Avoid bool was supposed to mean +// originally but had implementation bugs and documentation omissions. func (v DERPRegionView) NoMeasureNoHome() bool { return v.ж.NoMeasureNoHome } + +// Nodes are the DERP nodes running in this region, in +// priority order for the current client. Client TLS +// connections should ideally only go to the first entry +// (falling back to the second if necessary). STUN packets +// should go to the first 1 or 2. +// +// If nodes within a region route packets amongst themselves, +// but not to other regions. That said, each user/domain +// should get a the same preferred node order, so if all nodes +// for a user/network pick the first one (as they should, when +// things are healthy), the inter-cluster routing is minimal +// to zero. func (v DERPRegionView) Nodes() views.SliceView[*DERPNode, DERPNodeView] { return views.SliceOfViews[*DERPNode, DERPNodeView](v.ж.Nodes) } @@ -1221,13 +1712,26 @@ func (v *DERPMapView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// HomeParams, if non-nil, is a change in home parameters. +// +// The rest of the DEPRMap fields, if zero, means unchanged. func (v DERPMapView) HomeParams() DERPHomeParamsView { return v.ж.HomeParams.View() } +// Regions is the set of geographic regions running DERP node(s). +// +// It's keyed by the DERPRegion.RegionID. +// +// The numbers are not necessarily contiguous. func (v DERPMapView) Regions() views.MapFn[int, *DERPRegion, DERPRegionView] { return views.MapFnOf(v.ж.Regions, func(t *DERPRegion) DERPRegionView { return t.View() }) } + +// OmitDefaultRegions specifies to not use Tailscale's DERP servers, and only use those +// specified in this DERPMap. If there are none set outside of the defaults, this is a noop. +// +// This field is only meaningful if the Regions map is non-nil (indicating a change). func (v DERPMapView) OmitDefaultRegions() bool { return v.ж.OmitDefaultRegions } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -1304,18 +1808,74 @@ func (v *DERPNodeView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v DERPNodeView) Name() string { return v.ж.Name } -func (v DERPNodeView) RegionID() int { return v.ж.RegionID } -func (v DERPNodeView) HostName() string { return v.ж.HostName } -func (v DERPNodeView) CertName() string { return v.ж.CertName } -func (v DERPNodeView) IPv4() string { return v.ж.IPv4 } -func (v DERPNodeView) IPv6() string { return v.ж.IPv6 } -func (v DERPNodeView) STUNPort() int { return v.ж.STUNPort } -func (v DERPNodeView) STUNOnly() bool { return v.ж.STUNOnly } -func (v DERPNodeView) DERPPort() int { return v.ж.DERPPort } +// Name is a unique node name (across all regions). +// It is not a host name. +// It's typically of the form "1b", "2a", "3b", etc. (region +// ID + suffix within that region) +func (v DERPNodeView) Name() string { return v.ж.Name } + +// RegionID is the RegionID of the DERPRegion that this node +// is running in. +func (v DERPNodeView) RegionID() int { return v.ж.RegionID } + +// HostName is the DERP node's hostname. +// +// It is required but need not be unique; multiple nodes may +// have the same HostName but vary in configuration otherwise. +func (v DERPNodeView) HostName() string { return v.ж.HostName } + +// CertName optionally specifies the expected TLS cert common +// name. If empty, HostName is used. If CertName is non-empty, +// HostName is only used for the TCP dial (if IPv4/IPv6 are +// not present) + TLS ClientHello. +// +// As a special case, if CertName starts with "sha256-raw:", +// then the rest of the string is a hex-encoded SHA256 of the +// cert to expect. This is used for self-signed certs. +// In this case, the HostName field will typically be an IP +// address literal. +func (v DERPNodeView) CertName() string { return v.ж.CertName } + +// IPv4 optionally forces an IPv4 address to use, instead of using DNS. +// If empty, A record(s) from DNS lookups of HostName are used. +// If the string is not an IPv4 address, IPv4 is not used; the +// conventional string to disable IPv4 (and not use DNS) is +// "none". +func (v DERPNodeView) IPv4() string { return v.ж.IPv4 } + +// IPv6 optionally forces an IPv6 address to use, instead of using DNS. +// If empty, AAAA record(s) from DNS lookups of HostName are used. +// If the string is not an IPv6 address, IPv6 is not used; the +// conventional string to disable IPv6 (and not use DNS) is +// "none". +func (v DERPNodeView) IPv6() string { return v.ж.IPv6 } + +// Port optionally specifies a STUN port to use. +// Zero means 3478. +// To disable STUN on this node, use -1. +func (v DERPNodeView) STUNPort() int { return v.ж.STUNPort } + +// STUNOnly marks a node as only a STUN server and not a DERP +// server. +func (v DERPNodeView) STUNOnly() bool { return v.ж.STUNOnly } + +// DERPPort optionally provides an alternate TLS port number +// for the DERP HTTPS server. +// +// If zero, 443 is used. +func (v DERPNodeView) DERPPort() int { return v.ж.DERPPort } + +// InsecureForTests is used by unit tests to disable TLS verification. +// It should not be set by users. func (v DERPNodeView) InsecureForTests() bool { return v.ж.InsecureForTests } -func (v DERPNodeView) STUNTestIP() string { return v.ж.STUNTestIP } -func (v DERPNodeView) CanPort80() bool { return v.ж.CanPort80 } + +// STUNTestIP is used in tests to override the STUN server's IP. +// If empty, it's assumed to be the same as the DERP server. +func (v DERPNodeView) STUNTestIP() string { return v.ж.STUNTestIP } + +// CanPort80 specifies whether this DERP node is accessible over HTTP +// on port 80 specifically. This is used for captive portal checks. +func (v DERPNodeView) CanPort80() bool { return v.ж.CanPort80 } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPNodeViewNeedsRegeneration = DERPNode(struct { @@ -1400,17 +1960,49 @@ func (v *SSHRuleView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// RuleExpires, if non-nil, is when this rule expires. +// +// For example, a (principal,sshuser) tuple might be granted +// prompt-free SSH access for N minutes, so this rule would be +// before a expiration-free rule for the same principal that +// required an auth prompt. This permits the control plane to +// be out of the path for already-authorized SSH pairs. +// +// Once a rule matches, the lifetime of any accepting connection +// is subject to the SSHAction.SessionExpires time, if any. func (v SSHRuleView) RuleExpires() views.ValuePointer[time.Time] { return views.ValuePointerOf(v.ж.RuleExpires) } +// Principals matches an incoming connection. If the connection +// matches anything in this list and also matches SSHUsers, +// then Action is applied. func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalView] { return views.SliceOfViews[*SSHPrincipal, SSHPrincipalView](v.ж.Principals) } +// SSHUsers are the SSH users that this rule matches. It is a +// map from either ssh-user|"*" => local-user. The map must +// contain a key for either ssh-user or, as a fallback, "*" to +// match anything. If it does, the map entry's value is the +// actual user that's logged in. +// If the map value is the empty string (for either the +// requested SSH user or "*"), the rule doesn't match. +// If the map value is "=", it means the ssh-user should map +// directly to the local-user. +// It may be nil if the Action is reject. func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.ж.SSHUsers) } -func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() } -func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.ж.AcceptEnv) } + +// Action is the outcome to task. +// A nil or invalid action means to deny. +func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() } + +// AcceptEnv is a slice of environment variable names that are allowlisted +// for the SSH rule in the policy file. +// +// AcceptEnv values may contain * and ? wildcard characters which match against +// an arbitrary number of characters or a single character respectively. +func (v SSHRuleView) AcceptEnv() views.Slice[string] { return views.SliceOf(v.ж.AcceptEnv) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _SSHRuleViewNeedsRegeneration = SSHRule(struct { @@ -1488,15 +2080,61 @@ func (v *SSHActionView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v SSHActionView) Message() string { return v.ж.Message } -func (v SSHActionView) Reject() bool { return v.ж.Reject } -func (v SSHActionView) Accept() bool { return v.ж.Accept } -func (v SSHActionView) SessionDuration() time.Duration { return v.ж.SessionDuration } -func (v SSHActionView) AllowAgentForwarding() bool { return v.ж.AllowAgentForwarding } -func (v SSHActionView) HoldAndDelegate() string { return v.ж.HoldAndDelegate } -func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding } -func (v SSHActionView) AllowRemotePortForwarding() bool { return v.ж.AllowRemotePortForwarding } +// Message, if non-empty, is shown to the user before the +// action occurs. +func (v SSHActionView) Message() string { return v.ж.Message } + +// Reject, if true, terminates the connection. This action +// has higher priority that Accept, if given. +// The reason this is exists is primarily so a response +// from HoldAndDelegate has a way to stop the poll. +func (v SSHActionView) Reject() bool { return v.ж.Reject } + +// Accept, if true, accepts the connection immediately +// without further prompts. +func (v SSHActionView) Accept() bool { return v.ж.Accept } + +// SessionDuration, if non-zero, is how long the session can stay open +// before being forcefully terminated. +func (v SSHActionView) SessionDuration() time.Duration { return v.ж.SessionDuration } + +// AllowAgentForwarding, if true, allows accepted connections to forward +// the ssh agent if requested. +func (v SSHActionView) AllowAgentForwarding() bool { return v.ж.AllowAgentForwarding } + +// HoldAndDelegate, if non-empty, is a URL that serves an +// outcome verdict. The connection will be accepted and will +// block until the provided long-polling URL serves a new +// SSHAction JSON value. The URL must be fetched using the +// Noise transport (in package control/control{base,http}). +// If the long poll breaks before returning a complete HTTP +// response, it should be re-fetched as long as the SSH +// session is open. +// +// The following variables in the URL are expanded by tailscaled: +// +// - $SRC_NODE_IP (URL escaped) +// - $SRC_NODE_ID (Node.ID as int64 string) +// - $DST_NODE_IP (URL escaped) +// - $DST_NODE_ID (Node.ID as int64 string) +// - $SSH_USER (URL escaped, ssh user requested) +// - $LOCAL_USER (URL escaped, local user mapped) +func (v SSHActionView) HoldAndDelegate() string { return v.ж.HoldAndDelegate } + +// AllowLocalPortForwarding, if true, allows accepted connections +// to use local port forwarding if requested. +func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding } + +// AllowRemotePortForwarding, if true, allows accepted connections +// to use remote port forwarding if requested. +func (v SSHActionView) AllowRemotePortForwarding() bool { return v.ж.AllowRemotePortForwarding } + +// Recorders defines the destinations of the SSH session recorders. +// The recording will be uploaded to http://addr:port/record. func (v SSHActionView) Recorders() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Recorders) } + +// OnRecorderFailure is the action to take if recording fails. +// If nil, the default action is to fail open. func (v SSHActionView) OnRecordingFailure() views.ValuePointer[SSHRecorderFailureAction] { return views.ValuePointerOf(v.ж.OnRecordingFailure) } @@ -1584,8 +2222,19 @@ func (v *SSHPrincipalView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v SSHPrincipalView) Node() StableNodeID { return v.ж.Node } func (v SSHPrincipalView) NodeIP() string { return v.ж.NodeIP } -func (v SSHPrincipalView) UserLogin() string { return v.ж.UserLogin } -func (v SSHPrincipalView) Any() bool { return v.ж.Any } + +// email-ish: foo@example.com, bar@github +func (v SSHPrincipalView) UserLogin() string { return v.ж.UserLogin } + +// if true, match any connection +func (v SSHPrincipalView) Any() bool { return v.ж.Any } + +// UnusedPubKeys was public key support. It never became an official product +// feature and so as of 2024-12-12 is being removed. +// This stub exists to remind us not to re-use the JSON field name "pubKeys" +// in the future if we bring it back with different semantics. +// +// Deprecated: do not use. It does nothing. func (v SSHPrincipalView) UnusedPubKeys() views.Slice[string] { return views.SliceOf(v.ж.UnusedPubKeys) } @@ -1666,6 +2315,7 @@ func (v *ControlDialPlanView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// An empty list means the default: use DNS (unspecified which DNS). func (v ControlDialPlanView) Candidates() views.Slice[ControlIPCandidate] { return views.SliceOf(v.ж.Candidates) } @@ -1742,13 +2392,35 @@ func (v *LocationView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v LocationView) Country() string { return v.ж.Country } +// User friendly country name, with proper capitalization ("Canada") +func (v LocationView) Country() string { return v.ж.Country } + +// ISO 3166-1 alpha-2 in upper case ("CA") func (v LocationView) CountryCode() string { return v.ж.CountryCode } -func (v LocationView) City() string { return v.ж.City } -func (v LocationView) CityCode() string { return v.ж.CityCode } -func (v LocationView) Latitude() float64 { return v.ж.Latitude } -func (v LocationView) Longitude() float64 { return v.ж.Longitude } -func (v LocationView) Priority() int { return v.ж.Priority } + +// User friendly city name, with proper capitalization ("Squamish") +func (v LocationView) City() string { return v.ж.City } + +// CityCode is a short code representing the city in upper case. +// CityCode is used to disambiguate a city from another location +// with the same city name. It uniquely identifies a particular +// geographical location, within the tailnet. +// IATA, ICAO or ISO 3166-2 codes are recommended ("YSE") +func (v LocationView) CityCode() string { return v.ж.CityCode } + +// Latitude, Longitude are optional geographical coordinates of the node, in degrees. +// No particular accuracy level is promised; the coordinates may simply be the center of the city or country. +func (v LocationView) Latitude() float64 { return v.ж.Latitude } +func (v LocationView) Longitude() float64 { return v.ж.Longitude } + +// Priority determines the order of use of an exit node when a +// location based preference matches more than one exit node, +// the node with the highest priority wins. Nodes of equal +// probability may be selected arbitrarily. +// +// A value of 0 means the exit node does not have a priority +// preference. A negative int is not allowed. +func (v LocationView) Priority() int { return v.ж.Priority } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _LocationViewNeedsRegeneration = Location(struct { @@ -1828,8 +2500,12 @@ func (v *UserProfileView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v UserProfileView) ID() UserID { return v.ж.ID } -func (v UserProfileView) LoginName() string { return v.ж.LoginName } +func (v UserProfileView) ID() UserID { return v.ж.ID } + +// "alice@smith.com"; for display purposes only (provider is not listed) +func (v UserProfileView) LoginName() string { return v.ж.LoginName } + +// "Alice Smith" func (v UserProfileView) DisplayName() string { return v.ж.DisplayName } func (v UserProfileView) ProfilePicURL() string { return v.ж.ProfilePicURL } func (v UserProfileView) Equal(v2 UserProfileView) bool { return v.ж.Equal(v2.ж) } @@ -1909,9 +2585,18 @@ func (v *VIPServiceView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v VIPServiceView) Name() ServiceName { return v.ж.Name } +// Name is the name of the service. The Name uniquely identifies a service +// on a particular tailnet, and so also corresponds uniquely to the pair of +// IP addresses belonging to the VIP service. +func (v VIPServiceView) Name() ServiceName { return v.ж.Name } + +// Ports specify which ProtoPorts are made available by this node +// on the service's IPs. func (v VIPServiceView) Ports() views.Slice[ProtoPortRange] { return views.SliceOf(v.ж.Ports) } -func (v VIPServiceView) Active() bool { return v.ж.Active } + +// Active specifies whether new requests for the service should be +// sent to this node by control. +func (v VIPServiceView) Active() bool { return v.ж.Active } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _VIPServiceViewNeedsRegeneration = VIPService(struct { diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index 0704670a29606..a983864d0ce42 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -84,10 +84,35 @@ func (v *ResolverView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// Addr is the address of the DNS resolver, one of: +// - A plain IP address for a "classic" UDP+TCP DNS resolver. +// This is the common format as sent by the control plane. +// - An IP:port, for tests. +// - "https://resolver.com/path" for DNS over HTTPS; currently +// as of 2022-09-08 only used for certain well-known resolvers +// (see the publicdns package) for which the IP addresses to dial DoH are +// known ahead of time, so bootstrap DNS resolution is not required. +// - "http://node-address:port/path" for DNS over HTTP over WireGuard. This +// is implemented in the PeerAPI for exit nodes and app connectors. +// - [TODO] "tls://resolver.com" for DNS over TCP+TLS func (v ResolverView) Addr() string { return v.ж.Addr } + +// BootstrapResolution is an optional suggested resolution for the +// DoT/DoH resolver, if the resolver URL does not reference an IP +// address directly. +// BootstrapResolution may be empty, in which case clients should +// look up the DoT/DoH server using their local "classic" DNS +// resolver. +// +// As of 2022-09-08, BootstrapResolution is not yet used. func (v ResolverView) BootstrapResolution() views.Slice[netip.Addr] { return views.SliceOf(v.ж.BootstrapResolution) } + +// UseWithExitNode designates that this resolver should continue to be used when an +// exit node is in use. Normally, DNS resolution is delegated to the exit node but +// there are situations where it is preferable to still use a Split DNS server and/or +// global DNS server instead of the exit node. func (v ResolverView) UseWithExitNode() bool { return v.ж.UseWithExitNode } func (v ResolverView) Equal(v2 ResolverView) bool { return v.ж.Equal(v2.ж) } diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 99a86a6a52bbd..7d1507468fc65 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -86,11 +86,18 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } +func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } + +// needed to request key rotation func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } + +// DisallowedTKAStateIDs stores the tka.State.StateID values which +// this node will not operate network lock on. This is used to +// prevent bootstrapping TKA onto a key authority which was forcibly +// disabled. func (v PersistView) DisallowedTKAStateIDs() views.Slice[string] { return views.SliceOf(v.ж.DisallowedTKAStateIDs) } diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index afc9f1781f565..6a1a36865fe00 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -89,38 +89,68 @@ func (v *PrefsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } -func (v PrefsView) ControlURL() prefs.Item[string] { return v.ж.ControlURL } -func (v PrefsView) RouteAll() prefs.Item[bool] { return v.ж.RouteAll } -func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.ж.ExitNodeID } -func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.ж.ExitNodeIP } -func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.ж.ExitNodePrior } -func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.ж.ExitNodeAllowLANAccess } -func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.ж.CorpDNS } -func (v PrefsView) RunSSH() prefs.Item[bool] { return v.ж.RunSSH } -func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.ж.RunWebClient } -func (v PrefsView) WantRunning() prefs.Item[bool] { return v.ж.WantRunning } -func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.ж.LoggedOut } -func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.ж.ShieldsUp } -func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.ж.AdvertiseTags.View() } -func (v PrefsView) Hostname() prefs.Item[string] { return v.ж.Hostname } -func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.ж.NotepadURLs } -func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.ж.ForceDaemon } -func (v PrefsView) Egg() prefs.Item[bool] { return v.ж.Egg } +func (v PrefsView) ControlURL() prefs.Item[string] { return v.ж.ControlURL } +func (v PrefsView) RouteAll() prefs.Item[bool] { return v.ж.RouteAll } +func (v PrefsView) ExitNodeID() prefs.Item[tailcfg.StableNodeID] { return v.ж.ExitNodeID } +func (v PrefsView) ExitNodeIP() prefs.Item[netip.Addr] { return v.ж.ExitNodeIP } + +// ExitNodePrior is an internal state rather than a preference. +// It can be kept in the Prefs structure but should not be wrapped +// and is ignored by the [prefs] package. +func (v PrefsView) ExitNodePrior() tailcfg.StableNodeID { return v.ж.ExitNodePrior } +func (v PrefsView) ExitNodeAllowLANAccess() prefs.Item[bool] { return v.ж.ExitNodeAllowLANAccess } +func (v PrefsView) CorpDNS() prefs.Item[bool] { return v.ж.CorpDNS } +func (v PrefsView) RunSSH() prefs.Item[bool] { return v.ж.RunSSH } +func (v PrefsView) RunWebClient() prefs.Item[bool] { return v.ж.RunWebClient } +func (v PrefsView) WantRunning() prefs.Item[bool] { return v.ж.WantRunning } +func (v PrefsView) LoggedOut() prefs.Item[bool] { return v.ж.LoggedOut } +func (v PrefsView) ShieldsUp() prefs.Item[bool] { return v.ж.ShieldsUp } + +// AdvertiseTags is a preference whose value is a slice of strings. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (string) is immutable, we can use [prefs.List]. +func (v PrefsView) AdvertiseTags() prefs.ListView[string] { return v.ж.AdvertiseTags.View() } +func (v PrefsView) Hostname() prefs.Item[string] { return v.ж.Hostname } +func (v PrefsView) NotepadURLs() prefs.Item[bool] { return v.ж.NotepadURLs } +func (v PrefsView) ForceDaemon() prefs.Item[bool] { return v.ж.ForceDaemon } +func (v PrefsView) Egg() prefs.Item[bool] { return v.ж.Egg } + +// AdvertiseRoutes is a preference whose value is a slice of netip.Prefix. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (netip.Prefix) is immutable, we can use [prefs.List]. func (v PrefsView) AdvertiseRoutes() prefs.ListView[netip.Prefix] { return v.ж.AdvertiseRoutes.View() } func (v PrefsView) NoSNAT() prefs.Item[bool] { return v.ж.NoSNAT } func (v PrefsView) NoStatefulFiltering() prefs.Item[opt.Bool] { return v.ж.NoStatefulFiltering } func (v PrefsView) NetfilterMode() prefs.Item[preftype.NetfilterMode] { return v.ж.NetfilterMode } func (v PrefsView) OperatorUser() prefs.Item[string] { return v.ж.OperatorUser } func (v PrefsView) ProfileName() prefs.Item[string] { return v.ж.ProfileName } -func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } -func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } -func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.ж.PostureChecking } -func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.ж.NetfilterKind } + +// AutoUpdate contains auto-update preferences. +// Each preference in the group can be configured and managed individually. +func (v PrefsView) AutoUpdate() AutoUpdatePrefs { return v.ж.AutoUpdate } + +// AppConnector contains app connector-related preferences. +// Each preference in the group can be configured and managed individually. +func (v PrefsView) AppConnector() AppConnectorPrefs { return v.ж.AppConnector } +func (v PrefsView) PostureChecking() prefs.Item[bool] { return v.ж.PostureChecking } +func (v PrefsView) NetfilterKind() prefs.Item[string] { return v.ж.NetfilterKind } + +// DriveShares is a preference whose value is a slice of *[drive.Share]. +// The value is atomic, and individual items in the slice should +// not be modified after the preference is set. +// Since the item type (*drive.Share) is mutable and implements [views.ViewCloner], +// we need to use [prefs.StructList] instead of [prefs.List]. func (v PrefsView) DriveShares() prefs.StructListView[*drive.Share, drive.ShareView] { return prefs.StructListViewOf(&v.ж.DriveShares) } func (v PrefsView) AllowSingleHosts() prefs.Item[marshalAsTrueInJSON] { return v.ж.AllowSingleHosts } -func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } + +// Persist is an internal state rather than a preference. +// It can be kept in the Prefs structure but should not be wrapped +// and is ignored by the [prefs] package. +func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index 44c3beb877097..8993cb535bd67 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -95,6 +95,9 @@ func (v TestPrefsView) AddrItem() Item[netip.Addr] { return v.ж.A func (v TestPrefsView) StringStringMap() MapView[string, string] { return v.ж.StringStringMap.View() } func (v TestPrefsView) IntStringMap() MapView[int, string] { return v.ж.IntStringMap.View() } func (v TestPrefsView) AddrIntMap() MapView[netip.Addr, int] { return v.ж.AddrIntMap.View() } + +// Bundles are complex preferences that usually consist of +// multiple parameters that must be configured atomically. func (v TestPrefsView) Bundle1() ItemView[*TestBundle, TestBundleView] { return ItemViewOf(&v.ж.Bundle1) } @@ -116,6 +119,10 @@ func (v TestPrefsView) IntBundleMap() StructMapView[int, *TestBundle, TestBundle func (v TestPrefsView) AddrBundleMap() StructMapView[netip.Addr, *TestBundle, TestBundleView] { return StructMapViewOf(&v.ж.AddrBundleMap) } + +// Group is a nested struct that contains one or more preferences. +// Each preference in a group can be configured individually. +// Preference groups should be included directly rather than by pointers. func (v TestPrefsView) Group() TestPrefsGroup { return v.ж.Group } // A compilation failure here means this code must be regenerated, with the command at the top of this file. From 48dbe70b540e1316fcf2cc5e481b950dae47f658 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 28 Aug 2025 13:01:20 -0700 Subject: [PATCH 0260/1093] go.mod: bump Go 1.25 release (#16969) Bump Go 1.25 release to include a go/types patch and resolve govulncheck CI exceptions. Updates tailscale/corp#31755 Signed-off-by: Patrick O'Doherty --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index e3dfee5401e9e..9c2417e7c103b 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -9a1a6a51164c9c7a23f711052bb8776326cd30cd +f3339c88ea24212cc3cd49b64ad1045b85db23bf From 4b9a1a008781df6d967de73686b59d1c39ed4e4e Mon Sep 17 00:00:00 2001 From: License Updater Date: Thu, 28 Aug 2025 21:31:44 +0000 Subject: [PATCH 0261/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 52 +++++++++---------------------------------- licenses/apple.md | 28 +++++++++++------------ licenses/tailscale.md | 32 +++++++++++++------------- licenses/windows.md | 41 +++++++++++++++++----------------- 4 files changed, 63 insertions(+), 90 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 37961b74c44fe..0e68f0caca238 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -9,72 +9,42 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) + - [github.com/google/go-tpm](https://pkg.go.dev/github.com/google/go-tpm) ([Apache-2.0](https://github.com/google/go-tpm/blob/v0.9.4/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/0b8b35511f19/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - - [go4.org/intern](https://pkg.go.dev/go4.org/intern) ([BSD-3-Clause](https://github.com/go4org/intern/blob/ae77deb06f29/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.30.0:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.24.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.33.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 5a017076e9f38..81359b27021c0 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -33,9 +33,9 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) @@ -53,29 +53,29 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) + - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) + - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 206734fb41f47..6feb85aafcea6 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -14,9 +14,12 @@ Some packages may only be included on certain architectures or operating systems - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [fyne.io/systray](https://pkg.go.dev/fyne.io/systray) ([Apache-2.0](https://github.com/fyne-io/systray/blob/4856ac3adc3c/LICENSE)) + - [github.com/Kodeworks/golang-image-ico](https://pkg.go.dev/github.com/Kodeworks/golang-image-ico) ([BSD-3-Clause](https://github.com/Kodeworks/golang-image-ico/blob/73f0f4cfade9/LICENSE)) - [github.com/akutz/memconn](https://pkg.go.dev/github.com/akutz/memconn) ([Apache-2.0](https://github.com/akutz/memconn/blob/v0.1.0/LICENSE)) - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/anmitsu/go-shlex](https://pkg.go.dev/github.com/anmitsu/go-shlex) ([MIT](https://github.com/anmitsu/go-shlex/blob/38f4b401e2be/LICENSE)) + - [github.com/atotto/clipboard](https://pkg.go.dev/github.com/atotto/clipboard) ([BSD-3-Clause](https://github.com/atotto/clipboard/blob/v0.1.4/LICENSE)) - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) @@ -38,18 +41,18 @@ Some packages may only be included on certain architectures or operating systems - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) + - [github.com/fogleman/gg](https://pkg.go.dev/github.com/fogleman/gg) ([MIT](https://github.com/fogleman/gg/blob/v1.3.0/LICENSE.md)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) + - [github.com/golang/freetype/raster](https://pkg.go.dev/github.com/golang/freetype/raster) ([Unknown](Unknown)) + - [github.com/golang/freetype/truetype](https://pkg.go.dev/github.com/golang/freetype/truetype) ([Unknown](Unknown)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -61,7 +64,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) @@ -69,14 +71,13 @@ Some packages may only be included on certain architectures or operating systems - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/91a0587fb251/LICENSE)) + - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/toqueteos/webbrowser](https://pkg.go.dev/github.com/toqueteos/webbrowser) ([MIT](https://github.com/toqueteos/webbrowser/blob/v1.2.0/LICENSE.md)) - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.14.0/LICENSE)) @@ -84,15 +85,16 @@ Some packages may only be included on certain architectures or operating systems - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/image](https://pkg.go.dev/golang.org/x/image) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.30.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index e47bc3227b3f9..5c000cc9fd098 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -31,12 +31,13 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) + - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) @@ -51,38 +52,38 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) + - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) - - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.19.1/LICENSE)) - - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE)) - - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.55.0/LICENSE)) + - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.0/LICENSE)) + - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.65.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/ec1d1c113d33/LICENSE)) + - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/b2c15a420186/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) + - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.35.1/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.6/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From d42f0b6a21dc088a3b7b0366e144e148a118c642 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 28 Aug 2025 12:24:21 -0700 Subject: [PATCH 0262/1093] util/ringbuffer: rename to ringlog I need a ringbuffer in the more traditional sense, one that has a notion of item removal as well as tail loss on overrun. This implementation is really a clearable log window, and is used as such where it is used. Updates #cleanup Updates tailscale/corp#31762 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- tsnet/depaware.txt | 2 +- .../ringbuffer.go => ringlog/ringlog.go} | 35 +++++++++---------- .../ringlog_test.go} | 4 +-- wgengine/magicsock/endpoint.go | 4 +-- wgengine/magicsock/magicsock.go | 4 +-- 8 files changed, 27 insertions(+), 28 deletions(-) rename util/{ringbuffer/ringbuffer.go => ringlog/ringlog.go} (51%) rename util/{ringbuffer/ringbuffer_test.go => ringlog/ringlog_test.go} (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 85bec4a791800..843ce27f2146d 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -947,7 +947,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a83c67cca03e5..fdc48718cdeb3 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -424,7 +424,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index a695aa5f362ff..503454f50bf9b 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -376,7 +376,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/cmd/tsidp+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 67c182430ebf7..b490fcbcaf3b5 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -371,7 +371,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/ringbuffer from tailscale.com/wgengine/magicsock + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ diff --git a/util/ringbuffer/ringbuffer.go b/util/ringlog/ringlog.go similarity index 51% rename from util/ringbuffer/ringbuffer.go rename to util/ringlog/ringlog.go index baca2afe8c2c1..85e0c48611821 100644 --- a/util/ringbuffer/ringbuffer.go +++ b/util/ringlog/ringlog.go @@ -1,32 +1,31 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package ringbuffer contains a fixed-size concurrency-safe generic ring -// buffer. -package ringbuffer +// Package ringlog contains a limited-size concurrency-safe generic ring log. +package ringlog import "sync" -// New creates a new RingBuffer containing at most max items. -func New[T any](max int) *RingBuffer[T] { - return &RingBuffer[T]{ +// New creates a new [RingLog] containing at most max items. +func New[T any](max int) *RingLog[T] { + return &RingLog[T]{ max: max, } } -// RingBuffer is a concurrency-safe ring buffer. -type RingBuffer[T any] struct { +// RingLog is a concurrency-safe fixed size log window containing entries of [T]. +type RingLog[T any] struct { mu sync.Mutex pos int buf []T max int } -// Add appends a new item to the RingBuffer, possibly overwriting the oldest -// item in the buffer if it is already full. +// Add appends a new item to the [RingLog], possibly overwriting the oldest +// item in the log if it is already full. // // It does nothing if rb is nil. -func (rb *RingBuffer[T]) Add(t T) { +func (rb *RingLog[T]) Add(t T) { if rb == nil { return } @@ -40,11 +39,11 @@ func (rb *RingBuffer[T]) Add(t T) { } } -// GetAll returns a copy of all the entries in the ring buffer in the order they +// GetAll returns a copy of all the entries in the ring log in the order they // were added. // // It returns nil if rb is nil. -func (rb *RingBuffer[T]) GetAll() []T { +func (rb *RingLog[T]) GetAll() []T { if rb == nil { return nil } @@ -58,10 +57,10 @@ func (rb *RingBuffer[T]) GetAll() []T { return out } -// Len returns the number of elements in the ring buffer. Note that this value +// Len returns the number of elements in the ring log. Note that this value // could change immediately after being returned if a concurrent caller -// modifies the buffer. -func (rb *RingBuffer[T]) Len() int { +// modifies the log. +func (rb *RingLog[T]) Len() int { if rb == nil { return 0 } @@ -70,8 +69,8 @@ func (rb *RingBuffer[T]) Len() int { return len(rb.buf) } -// Clear will empty the ring buffer. -func (rb *RingBuffer[T]) Clear() { +// Clear will empty the ring log. +func (rb *RingLog[T]) Clear() { rb.mu.Lock() defer rb.mu.Unlock() rb.pos = 0 diff --git a/util/ringbuffer/ringbuffer_test.go b/util/ringlog/ringlog_test.go similarity index 95% rename from util/ringbuffer/ringbuffer_test.go rename to util/ringlog/ringlog_test.go index e10096bfbd771..d6776e181a4f8 100644 --- a/util/ringbuffer/ringbuffer_test.go +++ b/util/ringlog/ringlog_test.go @@ -1,14 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package ringbuffer +package ringlog import ( "reflect" "testing" ) -func TestRingBuffer(t *testing.T) { +func TestRingLog(t *testing.T) { const numItems = 10 rb := New[int](numItems) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 37892176b0925..b8778b8d845d5 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -33,7 +33,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/ringbuffer" + "tailscale.com/util/ringlog" "tailscale.com/util/slicesx" ) @@ -60,7 +60,7 @@ type endpoint struct { lastRecvWG mono.Time // last time there were incoming packets from this peer destined for wireguard-go (e.g. not disco) lastRecvUDPAny mono.Time // last time there were incoming UDP packets from this peer of any kind numStopAndResetAtomic int64 - debugUpdates *ringbuffer.RingBuffer[EndpointChange] + debugUpdates *ringlog.RingLog[EndpointChange] // These fields are initialized once and never modified. c *Conn diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 7fb3517e923d5..a7f84e3521df7 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -62,7 +62,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/ringbuffer" + "tailscale.com/util/ringlog" "tailscale.com/util/set" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" @@ -3112,7 +3112,7 @@ func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { // ~1MB on mobile but we never used the data so the memory was just // wasted. default: - ep.debugUpdates = ringbuffer.New[EndpointChange](entriesPerBuffer) + ep.debugUpdates = ringlog.New[EndpointChange](entriesPerBuffer) } if n.Addresses().Len() > 0 { ep.nodeAddr = n.Addresses().At(0).Addr() From f5d3c59a925b2f0ea249a32ddc0decdb43ff7ee9 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 28 Aug 2025 12:00:03 -0700 Subject: [PATCH 0263/1093] wgengine/magicsock: shorten process internal DERP queue DERP writes go via TCP and the host OS will have plenty of buffer space. We've observed in the wild with a backed up TCP socket kernel side buffers of >2.4MB. The DERP internal queue being larger causes an increase in the probability that the contents of the backbuffer are "dead letters" - packets that were assumed to be lost. A first step to improvement is to size this queue only large enough to avoid some of the initial connect stall problem, but not large enough that it is contributing in a substantial way to buffer bloat / dead-letter retention. Updates tailscale/corp#31762 Signed-off-by: James Tucker --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscaled/depaware.txt | 1 - cmd/tsidp/depaware.txt | 1 - tsnet/depaware.txt | 1 - wgengine/magicsock/derp.go | 72 ++++++---------------------- wgengine/magicsock/magicsock_test.go | 8 ---- 6 files changed, 15 insertions(+), 69 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 843ce27f2146d..4b1e4a1e4a3b3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -958,7 +958,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index fdc48718cdeb3..c2d9f3d00fbb6 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -435,7 +435,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 503454f50bf9b..e8bc2b254785c 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -387,7 +387,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b490fcbcaf3b5..aea6baf93ef11 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -382,7 +382,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/sysresources from tailscale.com/wgengine/magicsock tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 5afdbc6d8718b..9c60e4893a2cd 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -11,9 +11,7 @@ import ( "net" "net/netip" "reflect" - "runtime" "slices" - "sync" "time" "unsafe" @@ -32,7 +30,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/mak" "tailscale.com/util/rands" - "tailscale.com/util/sysresources" "tailscale.com/util/testenv" ) @@ -282,59 +279,20 @@ func (c *Conn) goDerpConnect(regionID int) { go c.derpWriteChanForRegion(regionID, key.NodePublic{}) } -var ( - bufferedDerpWrites int - bufferedDerpWritesOnce sync.Once -) - -// bufferedDerpWritesBeforeDrop returns how many packets writes can be queued -// up the DERP client to write on the wire before we start dropping. -func bufferedDerpWritesBeforeDrop() int { - // For mobile devices, always return the previous minimum value of 32; - // we can do this outside the sync.Once to avoid that overhead. - if runtime.GOOS == "ios" || runtime.GOOS == "android" { - return 32 - } - - bufferedDerpWritesOnce.Do(func() { - // Some rough sizing: for the previous fixed value of 32, the - // total consumed memory can be: - // = numDerpRegions * messages/region * sizeof(message) - // - // For sake of this calculation, assume 100 DERP regions; at - // time of writing (2023-04-03), we have 24. - // - // A reasonable upper bound for the worst-case average size of - // a message is a *disco.CallMeMaybe message with 16 endpoints; - // since sizeof(netip.AddrPort) = 32, that's 512 bytes. Thus: - // = 100 * 32 * 512 - // = 1638400 (1.6MiB) - // - // On a reasonably-small node with 4GiB of memory that's - // connected to each region and handling a lot of load, 1.6MiB - // is about 0.04% of the total system memory. - // - // For sake of this calculation, then, let's double that memory - // usage to 0.08% and scale based on total system memory. - // - // For a 16GiB Linux box, this should buffer just over 256 - // messages. - systemMemory := sysresources.TotalMemory() - memoryUsable := float64(systemMemory) * 0.0008 - - const ( - theoreticalDERPRegions = 100 - messageMaximumSizeBytes = 512 - ) - bufferedDerpWrites = int(memoryUsable / (theoreticalDERPRegions * messageMaximumSizeBytes)) - - // Never drop below the previous minimum value. - if bufferedDerpWrites < 32 { - bufferedDerpWrites = 32 - } - }) - return bufferedDerpWrites -} +// derpWriteQueueDepth is the depth of the in-process write queue to a single +// DERP region. DERP connections are TCP, and so the actual write queue depth is +// substantially larger than this suggests - often scaling into megabytes +// depending on dynamic TCP parameters and platform TCP tuning. This queue is +// excess of the TCP buffer depth, which means it's almost pure buffer bloat, +// and does not want to be deep - if there are key situations where a node can't +// keep up, either the TCP link to DERP is too slow, or there is a +// synchronization issue in the write path, fixes should be focused on those +// paths, rather than extending this queue. +// TODO(raggi): make this even shorter, ideally this should be a fairly direct +// line into a socket TCP buffer. The challenge at present is that connect and +// reconnect are in the write path and we don't want to block other write +// operations on those. +const derpWriteQueueDepth = 32 // derpWriteChanForRegion returns a channel to which to send DERP packet write // requests. It creates a new DERP connection to regionID if necessary. @@ -429,7 +387,7 @@ func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- dc.DNSCache = dnscache.Get() ctx, cancel := context.WithCancel(c.connCtx) - ch := make(chan derpWriteRequest, bufferedDerpWritesBeforeDrop()) + ch := make(chan derpWriteRequest, derpWriteQueueDepth) ad.c = dc ad.writeCh = ch diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5e348b02b7a24..5774432d5a0b9 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2137,14 +2137,6 @@ func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { } } -func TestBufferedDerpWritesBeforeDrop(t *testing.T) { - vv := bufferedDerpWritesBeforeDrop() - if vv < 32 { - t.Fatalf("got bufferedDerpWritesBeforeDrop=%d, which is < 32", vv) - } - t.Logf("bufferedDerpWritesBeforeDrop = %d", vv) -} - // newWireguard starts up a new wireguard-go device attached to a test tun, and // returns the device, tun and endpoint port. To add peers call device.IpcSet with UAPI instructions. func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Device, *tuntest.ChannelTUN, uint16) { From 3aea0e095a411cc98f3ad0b7c1706a00ca7662b0 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 28 Aug 2025 14:09:01 -0700 Subject: [PATCH 0264/1093] syncs: delete WaitGroup and use sync.WaitGroup.Go in Go 1.25 Our own WaitGroup wrapper type was a prototype implementation for the Go method on the standard sync.WaitGroup type. Now that there is first-class support for Go, we should migrate over to using it and delete syncs.WaitGroup. Updates #cleanup Updates tailscale/tailscale#16330 Change-Id: Ib52b10f9847341ce29b4ca0da927dc9321691235 Signed-off-by: Joe Tsai --- cmd/containerboot/egressservices.go | 5 ++--- cmd/tailscale/cli/file.go | 4 ++-- cmd/tailscale/depaware.txt | 2 +- feature/taildrop/delete.go | 3 +-- syncs/syncs.go | 16 ---------------- syncs/syncs_test.go | 5 +++-- 6 files changed, 9 insertions(+), 26 deletions(-) diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 71141f17a9bb6..64ca0a13a4ed7 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -18,6 +18,7 @@ import ( "reflect" "strconv" "strings" + "sync" "time" "github.com/fsnotify/fsnotify" @@ -26,7 +27,6 @@ import ( "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/util/httpm" "tailscale.com/util/linuxfw" @@ -666,8 +666,7 @@ func (ep *egressProxy) waitTillSafeToShutdown(ctx context.Context, cfgs *egresss return } log.Printf("Ensuring that cluster traffic for egress targets is no longer routed via this Pod...") - wg := syncs.WaitGroup{} - + var wg sync.WaitGroup for s, cfg := range *cfgs { hep := cfg.HealthCheckEndpoint if hep == "" { diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index 6f3aa40b5a806..e0879197e2dbb 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -20,6 +20,7 @@ import ( "path" "path/filepath" "strings" + "sync" "sync/atomic" "time" "unicode/utf8" @@ -32,7 +33,6 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/net/tsaddr" - "tailscale.com/syncs" "tailscale.com/tailcfg" tsrate "tailscale.com/tstime/rate" "tailscale.com/util/quarantine" @@ -176,7 +176,7 @@ func runCp(ctx context.Context, args []string) error { log.Printf("sending %q to %v/%v/%v ...", name, target, ip, stableID) } - var group syncs.WaitGroup + var group sync.WaitGroup ctxProgress, cancelProgress := context.WithCancel(ctx) defer cancelProgress() if isatty.IsTerminal(os.Stderr.Fd()) { diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b121a411f38bd..02ffec0ea009c 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -140,7 +140,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ - tailscale.com/syncs from tailscale.com/cmd/tailscale/cli+ + tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/client/local+ diff --git a/feature/taildrop/delete.go b/feature/taildrop/delete.go index 0b7259879f941..8b03a125f445e 100644 --- a/feature/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -12,7 +12,6 @@ import ( "time" "tailscale.com/ipn" - "tailscale.com/syncs" "tailscale.com/tstime" "tailscale.com/types/logger" ) @@ -33,7 +32,7 @@ type fileDeleter struct { byName map[string]*list.Element emptySignal chan struct{} // signal that the queue is empty - group syncs.WaitGroup + group sync.WaitGroup shutdownCtx context.Context shutdown context.CancelFunc fs FileOps // must be used for all filesystem operations diff --git a/syncs/syncs.go b/syncs/syncs.go index cf0be919b5b6b..e85b474c9bc5f 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -402,19 +402,3 @@ func (m *Map[K, V]) Swap(key K, value V) (oldValue V) { mak.Set(&m.m, key, value) return oldValue } - -// WaitGroup is identical to [sync.WaitGroup], -// but provides a Go method to start a goroutine. -type WaitGroup struct{ sync.WaitGroup } - -// Go calls the given function in a new goroutine. -// It automatically increments the counter before execution and -// automatically decrements the counter after execution. -// It must not be called concurrently with Wait. -func (wg *WaitGroup) Go(f func()) { - wg.Add(1) - go func() { - defer wg.Done() - f() - }() -} diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 2439b6068391b..d99c3d1a96e56 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -7,6 +7,7 @@ import ( "context" "io" "os" + "sync" "testing" "time" @@ -98,7 +99,7 @@ func TestMutexValue(t *testing.T) { t.Errorf("Load = %v, want %v", v.Load(), now) } - var group WaitGroup + var group sync.WaitGroup var v2 MutexValue[int] var sum int for i := range 10 { @@ -237,7 +238,7 @@ func TestMap(t *testing.T) { t.Run("LoadOrStore", func(t *testing.T) { var m Map[string, string] - var wg WaitGroup + var wg sync.WaitGroup var ok1, ok2 bool wg.Go(func() { _, ok1 = m.LoadOrStore("", "") }) wg.Go(func() { _, ok2 = m.LoadOrStore("", "") }) From 1a98943204ef628ddcb257891152988d0d20916b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 28 Aug 2025 19:05:57 -0700 Subject: [PATCH 0265/1093] go.mod: bump github.com/ulikunitz/xz for security warning Doesn't look to affect us, but pacifies security scanners. See https://github.com/ulikunitz/xz/commit/88ddf1d0d98d688db65de034f48960b2760d2ae2 It's for decoding. We only use this package for encoding (via github.com/google/rpmpack / github.com/goreleaser/nfpm/v2). Updates #8043 Change-Id: I87631aa5048f9514bb83baf1424f6abb34329c46 Signed-off-by: Brad Fitzpatrick --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index c739e87203109..8cb5e078e11e2 100644 --- a/flake.nix +++ b/flake.nix @@ -148,5 +148,5 @@ }); }; } -# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= +# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= diff --git a/go.mod b/go.mod index ecd229427416d..e6c480494ed2f 100644 --- a/go.mod +++ b/go.mod @@ -391,7 +391,7 @@ require ( github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ulikunitz/xz v0.5.15 // indirect github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.1.0 // indirect github.com/uudashr/gocognit v1.1.2 // indirect diff --git a/go.mod.sri b/go.mod.sri index 69c69b8db0bb2..781799de5eae1 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= +sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= diff --git a/go.sum b/go.sum index f2544b9acdc27..72ddb730fdf84 100644 --- a/go.sum +++ b/go.sum @@ -1031,8 +1031,8 @@ github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg= github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= +github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw= diff --git a/shell.nix b/shell.nix index e0f6e79f1151f..883d71befe9d6 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-15aaW3lqRgXQxBKEWRJTEV1GPmG7Gc9XwsTTu+M5rTY= +# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= From 7cbcc10eb10cdea7cc42511f7d5c4f584c8ead7a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 29 Aug 2025 10:33:14 -0700 Subject: [PATCH 0266/1093] syncs: add Semaphore.Len (#16981) The Len reports the number of acquired tokens for metrics. Updates tailscale/corp#31252 Signed-off-by: Joe Tsai --- syncs/syncs.go | 7 +++++++ syncs/syncs_test.go | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/syncs/syncs.go b/syncs/syncs.go index e85b474c9bc5f..3b37bca085c89 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -201,6 +201,13 @@ func NewSemaphore(n int) Semaphore { return Semaphore{c: make(chan struct{}, n)} } +// Len reports the number of in-flight acquisitions. +// It is incremented whenever the semaphore is acquired. +// It is decremented whenever the semaphore is released. +func (s Semaphore) Len() int { + return len(s.c) +} + // Acquire blocks until a resource is acquired. func (s Semaphore) Acquire() { s.c <- struct{}{} diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index d99c3d1a96e56..a546b8d0a2343 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -162,10 +162,20 @@ func TestClosedChan(t *testing.T) { func TestSemaphore(t *testing.T) { s := NewSemaphore(2) + assertLen := func(want int) { + t.Helper() + if got := s.Len(); got != want { + t.Fatalf("Len = %d, want %d", got, want) + } + } + + assertLen(0) s.Acquire() + assertLen(1) if !s.TryAcquire() { t.Fatal("want true") } + assertLen(2) if s.TryAcquire() { t.Fatal("want false") } @@ -175,11 +185,15 @@ func TestSemaphore(t *testing.T) { t.Fatal("want false") } s.Release() + assertLen(1) if !s.AcquireContext(context.Background()) { t.Fatal("want true") } + assertLen(2) s.Release() + assertLen(1) s.Release() + assertLen(0) } func TestMap(t *testing.T) { From 76fc02be09a069bcc4e440f07ba2640a56cfb5d8 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 29 Aug 2025 14:25:58 -0400 Subject: [PATCH 0267/1093] words: just an ordinary commit, nothing fishy at all (#16982) * words: just an ordinary commit, nothing fishy at all Updates #words Signed-off-by: Naman Sood --- words/tails.txt | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/words/tails.txt b/words/tails.txt index 7e35c69702d5d..20ff326c1e6fd 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -722,3 +722,45 @@ follow stalk caudal chronicle +trout +sturgeon +swordfish +catfish +pike +angler +anchovy +angelfish +cod +icefish +carp +mackarel +salmon +grayling +lungfish +dragonfish +barracuda +barreleye +bass +ridgehead +bigscale +blowfish +bream +bullhead +pufferfish +sardine +sunfish +mullet +snapper +pipefish +seahorse +flounder +tilapia +chub +dorado +shad +lionfish +crayfish +sailfish +billfish +taimen +sargo From 89fe2e1f126d9de3567500fa0b240cc0ac489c09 Mon Sep 17 00:00:00 2001 From: Remy Guercio Date: Fri, 29 Aug 2025 15:16:39 -0500 Subject: [PATCH 0268/1093] cmd/tsidp: add allow-insecure-no-client-registration and JSON file migration (#16881) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a ternary flag that unless set explicitly to false keeps the insecure behavior of TSIDP. If the flag is false, add functionality on startup to migrate oidc-funnel-clients.json to oauth-clients.json if it doesn’t exist. If the flag is false, modify endpoints to behave similarly regardless of funnel, tailnet, or localhost. They will all verify client ID & secret when appropriate per RFC 6749. The authorize endpoint will no longer change based on funnel status or nodeID. Add extra tests verifying TSIDP endpoints behave as expected with the new flag. Safely create the redirect URL from what's passed into the authorize endpoint. Fixes #16880 Signed-off-by: Remy Guercio --- cmd/tsidp/tsidp.go | 375 +++++++++++-- cmd/tsidp/tsidp_test.go | 1140 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 1441 insertions(+), 74 deletions(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 2fc6d27e45181..c02b09745aec8 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -47,6 +47,7 @@ import ( "tailscale.com/tsnet" "tailscale.com/types/key" "tailscale.com/types/lazy" + "tailscale.com/types/opt" "tailscale.com/types/views" "tailscale.com/util/mak" "tailscale.com/util/must" @@ -61,20 +62,40 @@ type ctxConn struct{} // accessing the IDP over Funnel are persisted. const funnelClientsFile = "oidc-funnel-clients.json" +// oauthClientsFile is the new file name for OAuth clients when running in secure mode. +const oauthClientsFile = "oauth-clients.json" + +// deprecatedFunnelClientsFile is the name used when renaming the old file. +const deprecatedFunnelClientsFile = "deprecated-oidc-funnel-clients.json" + // oidcKeyFile is where the OIDC private key is persisted. const oidcKeyFile = "oidc-key.json" var ( - flagVerbose = flag.Bool("verbose", false, "be verbose") - flagPort = flag.Int("port", 443, "port to listen on") - flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") - flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") - flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") - flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") - flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagVerbose = flag.Bool("verbose", false, "be verbose") + flagPort = flag.Int("port", 443, "port to listen on") + flagLocalPort = flag.Int("local-port", -1, "allow requests from localhost") + flagUseLocalTailscaled = flag.Bool("use-local-tailscaled", false, "use local tailscaled instead of tsnet") + flagFunnel = flag.Bool("funnel", false, "use Tailscale Funnel to make tsidp available on the public internet") + flagHostname = flag.String("hostname", "idp", "tsnet hostname to use instead of idp") + flagDir = flag.String("dir", "", "tsnet state directory; a default one will be created if not provided") + flagAllowInsecureRegistrationBool opt.Bool + flagAllowInsecureRegistration = opt.BoolFlag{Bool: &flagAllowInsecureRegistrationBool} ) +// getAllowInsecureRegistration returns whether to allow OAuth flows without pre-registered clients. +// Default is true for backward compatibility; explicitly set to false for strict OAuth compliance. +func getAllowInsecureRegistration() bool { + v, ok := flagAllowInsecureRegistration.Get() + if !ok { + // Flag not set, default to true (allow insecure for backward compatibility) + return true + } + return v +} + func main() { + flag.Var(&flagAllowInsecureRegistration, "allow-insecure-registration", "allow OAuth flows without pre-registered client credentials (default: true for backward compatibility; set to false for strict OAuth compliance)") flag.Parse() ctx := context.Background() if !envknob.UseWIPCode() { @@ -172,10 +193,11 @@ func main() { } srv := &idpServer{ - lc: lc, - funnel: *flagFunnel, - localTSMode: *flagUseLocalTailscaled, - rootPath: rootPath, + lc: lc, + funnel: *flagFunnel, + localTSMode: *flagUseLocalTailscaled, + rootPath: rootPath, + allowInsecureRegistration: getAllowInsecureRegistration(), } if *flagPort != 443 { @@ -184,20 +206,29 @@ func main() { srv.serverURL = fmt.Sprintf("https://%s", strings.TrimSuffix(st.Self.DNSName, ".")) } - // Load funnel clients from disk if they exist, regardless of whether funnel is enabled - // This ensures OIDC clients persist across restarts - funnelClientsFilePath, err := getConfigFilePath(rootPath, funnelClientsFile) - if err != nil { - log.Fatalf("could not get funnel clients file path: %v", err) + // If allowInsecureRegistration is enabled, the old oidc-funnel-clients.json path is used. + // If allowInsecureRegistration is disabled, attempt to migrate the old path to oidc-clients.json and use this new path. + var clientsFilePath string + if !srv.allowInsecureRegistration { + clientsFilePath, err = migrateOAuthClients(rootPath) + if err != nil { + log.Fatalf("could not migrate OAuth clients: %v", err) + } + } else { + clientsFilePath, err = getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + log.Fatalf("could not get funnel clients file path: %v", err) + } } - f, err := os.Open(funnelClientsFilePath) + + f, err := os.Open(clientsFilePath) if err == nil { if err := json.NewDecoder(f).Decode(&srv.funnelClients); err != nil { - log.Fatalf("could not parse %s: %v", funnelClientsFilePath, err) + log.Fatalf("could not parse %s: %v", clientsFilePath, err) } f.Close() } else if !errors.Is(err, os.ErrNotExist) { - log.Fatalf("could not open %s: %v", funnelClientsFilePath, err) + log.Fatalf("could not open %s: %v", clientsFilePath, err) } log.Printf("Running tsidp at %s ...", srv.serverURL) @@ -304,12 +335,13 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. } type idpServer struct { - lc *local.Client - loopbackURL string - serverURL string // "https://foo.bar.ts.net" - funnel bool - localTSMode bool - rootPath string // root path, used for storing state files + lc *local.Client + loopbackURL string + serverURL string // "https://foo.bar.ts.net" + funnel bool + localTSMode bool + rootPath string // root path, used for storing state files + allowInsecureRegistration bool // If true, allow OAuth without pre-registered clients lazyMux lazy.SyncValue[*http.ServeMux] lazySigningKey lazy.SyncValue[*signingKey] @@ -393,14 +425,15 @@ func (ar *authRequest) allowRelyingParty(r *http.Request, lc *local.Client) erro } func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { + // This URL is visited by the user who is being authenticated. If they are // visiting the URL over Funnel, that means they are not part of the // tailnet that they are trying to be authenticated for. + // NOTE: Funnel request behavior is the same regardless of secure or insecure mode. if isFunnelRequest(r) { http.Error(w, "tsidp: unauthorized", http.StatusUnauthorized) return } - uq := r.URL.Query() redirectURI := uq.Get("redirect_uri") @@ -409,6 +442,86 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { return } + clientID := uq.Get("client_id") + if clientID == "" { + http.Error(w, "tsidp: must specify client_id", http.StatusBadRequest) + return + } + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, validate client_id exists but defer client_secret validation to token endpoint + // This follows RFC 6749 which specifies client authentication should occur at token endpoint, not authorization endpoint + + s.mu.Lock() + c, ok := s.funnelClients[clientID] + s.mu.Unlock() + if !ok { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + + // Validate client_id matches (public identifier validation) + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(c.ID)) + if clientIDcmp != 1 { + http.Error(w, "tsidp: invalid client ID", http.StatusBadRequest) + return + } + + // Validate redirect URI + if redirectURI != c.RedirectURI { + http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) + return + } + + // Get user information + var remoteAddr string + if s.localTSMode { + remoteAddr = r.Header.Get("X-Forwarded-For") + } else { + remoteAddr = r.RemoteAddr + } + + // Check who is visiting the authorize endpoint. + var who *apitype.WhoIsResponse + var err error + who, err = s.lc.WhoIs(r.Context(), remoteAddr) + if err != nil { + log.Printf("Error getting WhoIs: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + code := rands.HexString(32) + ar := &authRequest{ + nonce: uq.Get("nonce"), + remoteUser: who, + redirectURI: redirectURI, + clientID: clientID, + funnelRP: c, // Store the validated client + } + + s.mu.Lock() + mak.Set(&s.code, code, ar) + s.mu.Unlock() + + q := make(url.Values) + q.Set("code", code) + if state := uq.Get("state"); state != "" { + q.Set("state", state) + } + parsedURL, err := url.Parse(redirectURI) + if err != nil { + http.Error(w, "invalid redirect URI", http.StatusInternalServerError) + return + } + parsedURL.RawQuery = q.Encode() + u := parsedURL.String() + log.Printf("Redirecting to %q", u) + + http.Redirect(w, r, u, http.StatusFound) + return + } + var remoteAddr string if s.localTSMode { // in local tailscaled mode, the local tailscaled is forwarding us @@ -430,7 +543,7 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { nonce: uq.Get("nonce"), remoteUser: who, redirectURI: redirectURI, - clientID: uq.Get("client_id"), + clientID: clientID, } if r.URL.Path == "/authorize/funnel" { @@ -466,7 +579,13 @@ func (s *idpServer) authorize(w http.ResponseWriter, r *http.Request) { if state := uq.Get("state"); state != "" { q.Set("state", state) } - u := redirectURI + "?" + q.Encode() + parsedURL, err := url.Parse(redirectURI) + if err != nil { + http.Error(w, "invalid redirect URI", http.StatusInternalServerError) + return + } + parsedURL.RawQuery = q.Encode() + u := parsedURL.String() log.Printf("Redirecting to %q", u) http.Redirect(w, r, u, http.StatusFound) @@ -476,7 +595,13 @@ func (s *idpServer) newMux() *http.ServeMux { mux := http.NewServeMux() mux.HandleFunc(oidcJWKSPath, s.serveJWKS) mux.HandleFunc(oidcConfigPath, s.serveOpenIDConfig) - mux.HandleFunc("/authorize/", s.authorize) + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, use a single /authorize endpoint + mux.HandleFunc("/authorize", s.authorize) + } else { + // When insecure registration is allowed, preserve original behavior with path-based routing + mux.HandleFunc("/authorize/", s.authorize) + } mux.HandleFunc("/userinfo", s.serveUserInfo) mux.HandleFunc("/token", s.serveToken) mux.HandleFunc("/clients/", s.serveClients) @@ -513,6 +638,24 @@ func (s *idpServer) serveUserInfo(w http.ResponseWriter, r *http.Request) { s.mu.Lock() delete(s.accessToken, tk) s.mu.Unlock() + return + } + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, validate that the token was issued to a valid client. + if ar.clientID == "" { + http.Error(w, "tsidp: no client associated with token", http.StatusBadRequest) + return + } + + // Validate client still exists + s.mu.Lock() + _, clientExists := s.funnelClients[ar.clientID] + s.mu.Unlock() + if !clientExists { + http.Error(w, "tsidp: client no longer exists", http.StatusUnauthorized) + return + } } ui := userInfo{} @@ -722,11 +865,58 @@ func (s *idpServer) serveToken(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: code not found", http.StatusBadRequest) return } - if err := ar.allowRelyingParty(r, s.lc); err != nil { - log.Printf("Error allowing relying party: %v", err) - http.Error(w, err.Error(), http.StatusForbidden) - return + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, always validate client credentials regardless of request source + clientID := r.FormValue("client_id") + clientSecret := r.FormValue("client_secret") + + // Try basic auth if form values are empty + if clientID == "" || clientSecret == "" { + if basicClientID, basicClientSecret, ok := r.BasicAuth(); ok { + if clientID == "" { + clientID = basicClientID + } + if clientSecret == "" { + clientSecret = basicClientSecret + } + } + } + + if clientID == "" || clientSecret == "" { + http.Error(w, "tsidp: client credentials required in when insecure registration is not allowed", http.StatusUnauthorized) + return + } + + // Validate against the stored auth request + if ar.clientID != clientID { + http.Error(w, "tsidp: client_id mismatch", http.StatusBadRequest) + return + } + + // Validate client credentials against stored clients + if ar.funnelRP == nil { + http.Error(w, "tsidp: no client information found", http.StatusBadRequest) + return + } + + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(ar.funnelRP.ID)) + clientSecretcmp := subtle.ConstantTimeCompare([]byte(clientSecret), []byte(ar.funnelRP.Secret)) + if clientIDcmp != 1 || clientSecretcmp != 1 { + http.Error(w, "tsidp: invalid client credentials", http.StatusUnauthorized) + return + } + } else { + // Original behavior when insecure registration is allowed + // Only checks ClientID and Client Secret when over funnel. + // Local connections are allowed and tailnet connections only check matching nodeIDs. + if err := ar.allowRelyingParty(r, s.lc); err != nil { + log.Printf("Error allowing relying party: %v", err) + http.Error(w, err.Error(), http.StatusForbidden) + return + } } + if ar.redirectURI != r.FormValue("redirect_uri") { http.Error(w, "tsidp: redirect_uri mismatch", http.StatusBadRequest) return @@ -977,24 +1167,38 @@ func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, "tsidp: not found", http.StatusNotFound) return } - ap, err := netip.ParseAddrPort(r.RemoteAddr) - if err != nil { - log.Printf("Error parsing remote addr: %v", err) - return - } + var authorizeEndpoint string rpEndpoint := s.serverURL - if isFunnelRequest(r) { - authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) - } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { - authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) - } else if ap.Addr().IsLoopback() { - rpEndpoint = s.loopbackURL - authorizeEndpoint = fmt.Sprintf("%s/authorize/localhost", s.serverURL) + + if !s.allowInsecureRegistration { + // When insecure registration is NOT allowed, use a single authorization endpoint for all request types + // This will be the same regardless of if the user is on localhost, tailscale, or funnel. + authorizeEndpoint = fmt.Sprintf("%s/authorize", s.serverURL) + rpEndpoint = s.serverURL } else { - log.Printf("Error getting WhoIs: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return + // When insecure registration is allowed TSIDP uses the requestors nodeID + // (typically that of the resource server during auto discovery) when on the tailnet + // and adds it to the authorize URL as a replacement clientID for when the user authorizes. + // The behavior over funnel drops the nodeID & clientID replacement behvaior and does require a + // previously created clientID and client secret. + ap, err := netip.ParseAddrPort(r.RemoteAddr) + if err != nil { + log.Printf("Error parsing remote addr: %v", err) + return + } + if isFunnelRequest(r) { + authorizeEndpoint = fmt.Sprintf("%s/authorize/funnel", s.serverURL) + } else if who, err := s.lc.WhoIs(r.Context(), r.RemoteAddr); err == nil { + authorizeEndpoint = fmt.Sprintf("%s/authorize/%d", s.serverURL, who.Node.ID) + } else if ap.Addr().IsLoopback() { + rpEndpoint = s.loopbackURL + authorizeEndpoint = fmt.Sprintf("%s/authorize/localhost", s.serverURL) + } else { + log.Printf("Error getting WhoIs: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } } w.Header().Set("Content-Type", "application/json") @@ -1148,20 +1352,27 @@ func (s *idpServer) serveDeleteClient(w http.ResponseWriter, r *http.Request, cl } // storeFunnelClientsLocked writes the current mapping of OIDC client ID/secret -// pairs for RPs that access the IDP over funnel. s.mu must be held while -// calling this. +// pairs for RPs that access the IDP. When insecure registration is NOT allowed, uses oauth-clients.json; +// otherwise uses oidc-funnel-clients.json. s.mu must be held while calling this. func (s *idpServer) storeFunnelClientsLocked() error { var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(s.funnelClients); err != nil { return err } - funnelClientsFilePath, err := getConfigFilePath(s.rootPath, funnelClientsFile) + var clientsFilePath string + var err error + if !s.allowInsecureRegistration { + clientsFilePath, err = getConfigFilePath(s.rootPath, oauthClientsFile) + } else { + clientsFilePath, err = getConfigFilePath(s.rootPath, funnelClientsFile) + } + if err != nil { return fmt.Errorf("storeFunnelClientsLocked: %v", err) } - return os.WriteFile(funnelClientsFilePath, buf.Bytes(), 0600) + return os.WriteFile(clientsFilePath, buf.Bytes(), 0600) } const ( @@ -1275,9 +1486,67 @@ func isFunnelRequest(r *http.Request) bool { return false } +// migrateOAuthClients migrates from oidc-funnel-clients.json to oauth-clients.json. +// If oauth-clients.json already exists, no migration is performed. +// If both files are missing a new configuration is created. +// The path to the new configuration file is returned. +func migrateOAuthClients(rootPath string) (string, error) { + // First, check for oauth-clients.json (new file) + oauthPath, err := getConfigFilePath(rootPath, oauthClientsFile) + if err != nil { + return "", fmt.Errorf("could not get oauth clients file path: %w", err) + } + if _, err := os.Stat(oauthPath); err == nil { + // oauth-clients.json already exists, use it + return oauthPath, nil + } + + // Check for old oidc-funnel-clients.json + oldPath, err := getConfigFilePath(rootPath, funnelClientsFile) + if err != nil { + return "", fmt.Errorf("could not get funnel clients file path: %w", err) + } + if _, err := os.Stat(oldPath); err == nil { + // Old file exists, migrate it + log.Printf("Migrating OAuth clients from %s to %s", oldPath, oauthPath) + + // Read the old file + data, err := os.ReadFile(oldPath) + if err != nil { + return "", fmt.Errorf("could not read old funnel clients file: %w", err) + } + + // Write to new location + if err := os.WriteFile(oauthPath, data, 0600); err != nil { + return "", fmt.Errorf("could not write new oauth clients file: %w", err) + } + + // Rename old file to deprecated name + deprecatedPath, err := getConfigFilePath(rootPath, deprecatedFunnelClientsFile) + if err != nil { + return "", fmt.Errorf("could not get deprecated file path: %w", err) + } + if err := os.Rename(oldPath, deprecatedPath); err != nil { + log.Printf("Warning: could not rename old file to deprecated name: %v", err) + } else { + log.Printf("Renamed old file to %s", deprecatedPath) + } + + return oauthPath, nil + } + + // Neither file exists, create empty oauth-clients.json + log.Printf("Creating empty OAuth clients file at %s", oauthPath) + if err := os.WriteFile(oauthPath, []byte("{}"), 0600); err != nil { + return "", fmt.Errorf("could not create empty oauth clients file: %w", err) + } + + return oauthPath, nil +} + // getConfigFilePath returns the path to the config file for the given file name. // The oidc-key.json and funnel-clients.json files were originally opened and written -// to without paths, and ended up in /root dir or home directory of the user running +// to without paths, and ended up in /root or home directory of the user running // the process. To maintain backward compatibility, we return the naked file name if that // file exists already, otherwise we return the full path in the rootPath. func getConfigFilePath(rootPath string, fileName string) (string, error) { diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index e5465d3cfbf62..4f5af9e598e65 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -1,6 +1,19 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +// Package main tests for tsidp focus on OAuth security boundaries and +// correct implementation of the OpenID Connect identity provider. +// +// Test Strategy: +// - Tests are intentionally granular to provide clear failure signals when +// security-critical logic breaks +// - OAuth flow tests cover both strict mode (registered clients only) and +// legacy mode (local funnel clients) to ensure proper access controls +// - Helper functions like normalizeMap ensure deterministic comparisons +// despite JSON marshaling order variations +// - The privateKey global is reused across tests for performance (RSA key +// generation is expensive) + package main import ( @@ -16,21 +29,28 @@ import ( "net/netip" "net/url" "os" + "path/filepath" "reflect" "sort" "strings" + "sync" "testing" "time" "gopkg.in/square/go-jose.v2" "gopkg.in/square/go-jose.v2/jwt" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/types/opt" "tailscale.com/types/views" ) -// normalizeMap recursively sorts []any values in a map[string]any +// normalizeMap recursively sorts []any values in a map[string]any to ensure +// deterministic test comparisons. This is necessary because JSON marshaling +// doesn't guarantee array order, and we need stable comparisons when testing +// claim merging and flattening logic. func normalizeMap(t *testing.T, m map[string]any) map[string]any { t.Helper() normalized := make(map[string]any, len(m)) @@ -66,7 +86,13 @@ func mustMarshalJSON(t *testing.T, v any) tailcfg.RawMessage { return tailcfg.RawMessage(b) } -var privateKey *rsa.PrivateKey = nil +// privateKey is a shared RSA private key used across tests. It's lazily +// initialized on first use to avoid the expensive key generation cost +// for every test. Protected by privateKeyMu for thread safety. +var ( + privateKey *rsa.PrivateKey + privateKeyMu sync.Mutex +) func oidcTestingSigner(t *testing.T) jose.Signer { t.Helper() @@ -86,6 +112,9 @@ func oidcTestingPublicKey(t *testing.T) *rsa.PublicKey { func mustGeneratePrivateKey(t *testing.T) *rsa.PrivateKey { t.Helper() + privateKeyMu.Lock() + defer privateKeyMu.Unlock() + if privateKey != nil { return privateKey } @@ -181,7 +210,7 @@ func TestFlattenExtraClaims(t *testing.T) { {ExtraClaims: map[string]any{"foo": []any{"baz"}}}, }, expected: map[string]any{ - "foo": []any{"bar", "baz"}, // since first was scalar, second being a slice forces slice output + "foo": []any{"bar", "baz"}, // converts to slice when any rule provides a slice }, }, { @@ -462,6 +491,7 @@ func TestServeToken(t *testing.T) { omitCode bool redirectURI string remoteAddr string + strictMode bool expectError bool expected map[string]any }{ @@ -469,12 +499,14 @@ func TestServeToken(t *testing.T) { name: "GET not allowed", method: "GET", grantType: "authorization_code", + strictMode: false, expectError: true, }, { name: "unsupported grant type", method: "POST", grantType: "pkcs", + strictMode: false, expectError: true, }, { @@ -482,6 +514,7 @@ func TestServeToken(t *testing.T) { method: "POST", grantType: "authorization_code", code: "invalid-code", + strictMode: false, expectError: true, }, { @@ -489,6 +522,7 @@ func TestServeToken(t *testing.T) { method: "POST", grantType: "authorization_code", omitCode: true, + strictMode: false, expectError: true, }, { @@ -498,6 +532,7 @@ func TestServeToken(t *testing.T) { code: "valid-code", redirectURI: "https://invalid.example.com/callback", remoteAddr: "127.0.0.1:12345", + strictMode: false, expectError: true, }, { @@ -507,15 +542,17 @@ func TestServeToken(t *testing.T) { redirectURI: "https://rp.example.com/callback", code: "valid-code", remoteAddr: "192.168.0.1:12345", + strictMode: false, expectError: true, }, { - name: "extra claim included", + name: "extra claim included (non-strict)", method: "POST", grantType: "authorization_code", redirectURI: "https://rp.example.com/callback", code: "valid-code", remoteAddr: "127.0.0.1:12345", + strictMode: false, caps: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ @@ -531,11 +568,12 @@ func TestServeToken(t *testing.T) { }, }, { - name: "attempt to overwrite protected claim", + name: "attempt to overwrite protected claim (non-strict)", method: "POST", grantType: "authorization_code", redirectURI: "https://rp.example.com/callback", code: "valid-code", + strictMode: false, caps: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityTsIDP: { mustMarshalJSON(t, capRule{ @@ -554,6 +592,9 @@ func TestServeToken(t *testing.T) { t.Run(tt.name, func(t *testing.T) { now := time.Now() + // Use setupTestServer helper + s := setupTestServer(t, tt.strictMode) + // Fake user/node profile := &tailcfg.UserProfile{ LoginName: "alice@example.com", @@ -575,20 +616,27 @@ func TestServeToken(t *testing.T) { CapMap: tt.caps, } - s := &idpServer{ - code: map[string]*authRequest{ - "valid-code": { - clientID: "client-id", - nonce: "nonce123", - redirectURI: "https://rp.example.com/callback", - validTill: now.Add(5 * time.Minute), - remoteUser: remoteUser, - localRP: true, - }, - }, + // Setup auth request with appropriate configuration for strict mode + var funnelClientPtr *funnelClient + if tt.strictMode { + funnelClientPtr = &funnelClient{ + ID: "client-id", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + s.funnelClients["client-id"] = funnelClientPtr + } + + s.code["valid-code"] = &authRequest{ + clientID: "client-id", + nonce: "nonce123", + redirectURI: "https://rp.example.com/callback", + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: !tt.strictMode, + funnelRP: funnelClientPtr, } - // Inject a working signer - s.lazySigner.Set(oidcTestingSigner(t)) form := url.Values{} form.Set("grant_type", tt.grantType) @@ -596,6 +644,11 @@ func TestServeToken(t *testing.T) { if !tt.omitCode { form.Set("code", tt.code) } + // Add client credentials for strict mode + if tt.strictMode { + form.Set("client_id", "client-id") + form.Set("client_secret", "test-secret") + } req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) req.RemoteAddr = tt.remoteAddr @@ -779,6 +832,7 @@ func TestExtraUserInfo(t *testing.T) { // Insert a valid token into the idpServer s := &idpServer{ + allowInsecureRegistration: true, // Default to allowing insecure registration for backward compatibility accessToken: map[string]*authRequest{ token: { validTill: tt.tokenValidTill, @@ -854,7 +908,7 @@ func TestFunnelClientsPersistence(t *testing.T) { t.Fatalf("failed to write test file: %v", err) } - t.Run("step1_load_from_existing_file", func(t *testing.T) { + t.Run("load_from_existing_file", func(t *testing.T) { srv := &idpServer{} // Simulate the funnel clients loading logic from main() @@ -887,7 +941,7 @@ func TestFunnelClientsPersistence(t *testing.T) { } }) - t.Run("step2_initialize_empty_when_no_file", func(t *testing.T) { + t.Run("initialize_empty_when_no_file", func(t *testing.T) { nonExistentFile := t.TempDir() + "/non-existent.json" srv := &idpServer{} @@ -913,7 +967,7 @@ func TestFunnelClientsPersistence(t *testing.T) { } }) - t.Run("step3_persist_and_reload_clients", func(t *testing.T) { + t.Run("persist_and_reload_clients", func(t *testing.T) { tmpFile2 := t.TempDir() + "/test-persistence.json" // Create initial server with one client @@ -962,4 +1016,1048 @@ func TestFunnelClientsPersistence(t *testing.T) { } } }) + + t.Run("strict_mode_file_handling", func(t *testing.T) { + tmpDir := t.TempDir() + + // Test strict mode uses oauth-clients.json + srv1 := setupTestServer(t, true) + srv1.rootPath = tmpDir + srv1.funnelClients["oauth-client"] = &funnelClient{ + ID: "oauth-client", + Secret: "oauth-secret", + Name: "OAuth Client", + RedirectURI: "https://oauth.example.com/callback", + } + + // Test storeFunnelClientsLocked in strict mode + srv1.mu.Lock() + err := srv1.storeFunnelClientsLocked() + srv1.mu.Unlock() + + if err != nil { + t.Fatalf("failed to store clients in strict mode: %v", err) + } + + // Verify oauth-clients.json was created + oauthPath := tmpDir + "/" + oauthClientsFile + if _, err := os.Stat(oauthPath); err != nil { + t.Errorf("expected oauth-clients.json to be created: %v", err) + } + + // Verify oidc-funnel-clients.json was NOT created + funnelPath := tmpDir + "/" + funnelClientsFile + if _, err := os.Stat(funnelPath); !os.IsNotExist(err) { + t.Error("expected oidc-funnel-clients.json NOT to be created in strict mode") + } + }) + + t.Run("non_strict_mode_file_handling", func(t *testing.T) { + tmpDir := t.TempDir() + + // Test non-strict mode uses oidc-funnel-clients.json + srv1 := setupTestServer(t, false) + srv1.rootPath = tmpDir + srv1.funnelClients["funnel-client"] = &funnelClient{ + ID: "funnel-client", + Secret: "funnel-secret", + Name: "Funnel Client", + RedirectURI: "https://funnel.example.com/callback", + } + + // Test storeFunnelClientsLocked in non-strict mode + srv1.mu.Lock() + err := srv1.storeFunnelClientsLocked() + srv1.mu.Unlock() + + if err != nil { + t.Fatalf("failed to store clients in non-strict mode: %v", err) + } + + // Verify oidc-funnel-clients.json was created + funnelPath := tmpDir + "/" + funnelClientsFile + if _, err := os.Stat(funnelPath); err != nil { + t.Errorf("expected oidc-funnel-clients.json to be created: %v", err) + } + + // Verify oauth-clients.json was NOT created + oauthPath := tmpDir + "/" + oauthClientsFile + if _, err := os.Stat(oauthPath); !os.IsNotExist(err) { + t.Error("expected oauth-clients.json NOT to be created in non-strict mode") + } + }) +} + +// Test helper functions for strict OAuth mode testing +func setupTestServer(t *testing.T, strictMode bool) *idpServer { + return setupTestServerWithClient(t, strictMode, nil) +} + +// setupTestServerWithClient creates a test server with an optional LocalClient. +// If lc is nil, the server will have no LocalClient (original behavior). +// If lc is provided, it will be used for WhoIs calls during testing. +func setupTestServerWithClient(t *testing.T, strictMode bool, lc *local.Client) *idpServer { + t.Helper() + + srv := &idpServer{ + allowInsecureRegistration: !strictMode, + code: make(map[string]*authRequest), + accessToken: make(map[string]*authRequest), + funnelClients: make(map[string]*funnelClient), + serverURL: "https://test.ts.net", + rootPath: t.TempDir(), + lc: lc, + } + + // Add a test client for funnel/strict mode testing + srv.funnelClients["test-client"] = &funnelClient{ + ID: "test-client", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + + // Inject a working signer for token tests + srv.lazySigner.Set(oidcTestingSigner(t)) + + return srv +} + +func TestGetAllowInsecureRegistration(t *testing.T) { + tests := []struct { + name string + flagSet bool + flagValue bool + expectAllowInsecureRegistration bool + }{ + { + name: "flag explicitly set to false - insecure registration disabled (strict mode)", + flagSet: true, + flagValue: false, + expectAllowInsecureRegistration: false, + }, + { + name: "flag explicitly set to true - insecure registration enabled", + flagSet: true, + flagValue: true, + expectAllowInsecureRegistration: true, + }, + { + name: "flag unset - insecure registration enabled (default for backward compatibility)", + flagSet: false, + flagValue: false, // not used when unset + expectAllowInsecureRegistration: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save original state + originalFlag := flagAllowInsecureRegistration + defer func() { + flagAllowInsecureRegistration = originalFlag + }() + + // Set up test state by creating a new BoolFlag and setting values + var b opt.Bool + flagAllowInsecureRegistration = opt.BoolFlag{Bool: &b} + if tt.flagSet { + flagAllowInsecureRegistration.Bool.Set(tt.flagValue) + } + // Note: when tt.flagSet is false, the Bool remains unset (which is what we want) + + got := getAllowInsecureRegistration() + if got != tt.expectAllowInsecureRegistration { + t.Errorf("getAllowInsecureRegistration() = %v, want %v", got, tt.expectAllowInsecureRegistration) + } + }) + } +} + +// TestMigrateOAuthClients verifies the migration from legacy funnel clients +// to OAuth clients. This migration is necessary when transitioning from +// non-strict to strict OAuth mode. The migration logic should: +// - Copy clients from oidc-funnel-clients.json to oauth-clients.json +// - Rename the old file to mark it as deprecated +// - Handle cases where files already exist or are missing +func TestMigrateOAuthClients(t *testing.T) { + tests := []struct { + name string + setupOldFile bool + setupNewFile bool + oldFileContent map[string]*funnelClient + newFileContent map[string]*funnelClient + expectError bool + expectNewFileExists bool + expectOldRenamed bool + }{ + { + name: "migrate from old file to new file", + setupOldFile: true, + oldFileContent: map[string]*funnelClient{ + "old-client": { + ID: "old-client", + Secret: "old-secret", + Name: "Old Client", + RedirectURI: "https://old.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: true, + }, + { + name: "new file already exists - no migration", + setupNewFile: true, + newFileContent: map[string]*funnelClient{ + "existing-client": { + ID: "existing-client", + Secret: "existing-secret", + Name: "Existing Client", + RedirectURI: "https://existing.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: false, + }, + { + name: "neither file exists - create empty new file", + expectNewFileExists: true, + expectOldRenamed: false, + }, + { + name: "both files exist - prefer new file", + setupOldFile: true, + setupNewFile: true, + oldFileContent: map[string]*funnelClient{ + "old-client": { + ID: "old-client", + Secret: "old-secret", + Name: "Old Client", + RedirectURI: "https://old.example.com/callback", + }, + }, + newFileContent: map[string]*funnelClient{ + "new-client": { + ID: "new-client", + Secret: "new-secret", + Name: "New Client", + RedirectURI: "https://new.example.com/callback", + }, + }, + expectNewFileExists: true, + expectOldRenamed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rootPath := t.TempDir() + + // Setup old file if needed + if tt.setupOldFile { + oldData, err := json.Marshal(tt.oldFileContent) + if err != nil { + t.Fatalf("failed to marshal old file content: %v", err) + } + oldPath := rootPath + "/" + funnelClientsFile + if err := os.WriteFile(oldPath, oldData, 0600); err != nil { + t.Fatalf("failed to create old file: %v", err) + } + } + + // Setup new file if needed + if tt.setupNewFile { + newData, err := json.Marshal(tt.newFileContent) + if err != nil { + t.Fatalf("failed to marshal new file content: %v", err) + } + newPath := rootPath + "/" + oauthClientsFile + if err := os.WriteFile(newPath, newData, 0600); err != nil { + t.Fatalf("failed to create new file: %v", err) + } + } + + // Call migrateOAuthClients + resultPath, err := migrateOAuthClients(rootPath) + + if tt.expectError && err == nil { + t.Fatalf("expected error but got none") + } + if !tt.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.expectError { + return + } + + // Verify result path points to oauth-clients.json + expectedPath := filepath.Join(rootPath, oauthClientsFile) + if resultPath != expectedPath { + t.Errorf("expected result path %s, got %s", expectedPath, resultPath) + } + + // Verify new file exists if expected + if tt.expectNewFileExists { + if _, err := os.Stat(resultPath); err != nil { + t.Errorf("expected new file to exist at %s: %v", resultPath, err) + } + + // Verify content + data, err := os.ReadFile(resultPath) + if err != nil { + t.Fatalf("failed to read new file: %v", err) + } + + var clients map[string]*funnelClient + if err := json.Unmarshal(data, &clients); err != nil { + t.Fatalf("failed to unmarshal new file: %v", err) + } + + // Determine expected content + var expectedContent map[string]*funnelClient + if tt.setupNewFile { + expectedContent = tt.newFileContent + } else if tt.setupOldFile { + expectedContent = tt.oldFileContent + } else { + expectedContent = make(map[string]*funnelClient) + } + + if len(clients) != len(expectedContent) { + t.Errorf("expected %d clients, got %d", len(expectedContent), len(clients)) + } + + for id, expectedClient := range expectedContent { + actualClient, ok := clients[id] + if !ok { + t.Errorf("expected client %s not found", id) + continue + } + if actualClient.ID != expectedClient.ID || + actualClient.Secret != expectedClient.Secret || + actualClient.Name != expectedClient.Name || + actualClient.RedirectURI != expectedClient.RedirectURI { + t.Errorf("client %s mismatch: got %+v, want %+v", id, actualClient, expectedClient) + } + } + } + + // Verify old file renamed if expected + if tt.expectOldRenamed { + deprecatedPath := rootPath + "/" + deprecatedFunnelClientsFile + if _, err := os.Stat(deprecatedPath); err != nil { + t.Errorf("expected old file to be renamed to %s: %v", deprecatedPath, err) + } + + // Verify original old file is gone + oldPath := rootPath + "/" + funnelClientsFile + if _, err := os.Stat(oldPath); !os.IsNotExist(err) { + t.Errorf("expected old file %s to be removed", oldPath) + } + } + }) + } +} + +// TestGetConfigFilePath verifies backward compatibility for config file location. +// The function must check current directory first (legacy deployments) before +// falling back to rootPath (new installations) to prevent breaking existing +// tsidp deployments that have config files in unexpected locations. +func TestGetConfigFilePath(t *testing.T) { + tests := []struct { + name string + fileName string + createInCwd bool + createInRoot bool + expectInCwd bool + expectError bool + }{ + { + name: "file exists in current directory - use current directory", + fileName: "test-config.json", + createInCwd: true, + expectInCwd: true, + }, + { + name: "file does not exist - use root path", + fileName: "test-config.json", + createInCwd: false, + expectInCwd: false, + }, + { + name: "file exists in both - prefer current directory", + fileName: "test-config.json", + createInCwd: true, + createInRoot: true, + expectInCwd: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create temporary directories + rootPath := t.TempDir() + originalWd, err := os.Getwd() + if err != nil { + t.Fatalf("failed to get working directory: %v", err) + } + + // Create a temporary working directory + tmpWd := t.TempDir() + if err := os.Chdir(tmpWd); err != nil { + t.Fatalf("failed to change to temp directory: %v", err) + } + defer func() { + os.Chdir(originalWd) + }() + + // Setup files as needed + if tt.createInCwd { + if err := os.WriteFile(tt.fileName, []byte("{}"), 0600); err != nil { + t.Fatalf("failed to create file in cwd: %v", err) + } + } + if tt.createInRoot { + rootFilePath := filepath.Join(rootPath, tt.fileName) + if err := os.WriteFile(rootFilePath, []byte("{}"), 0600); err != nil { + t.Fatalf("failed to create file in root: %v", err) + } + } + + // Call getConfigFilePath + resultPath, err := getConfigFilePath(rootPath, tt.fileName) + + if tt.expectError && err == nil { + t.Fatalf("expected error but got none") + } + if !tt.expectError && err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.expectError { + return + } + + // Verify result + if tt.expectInCwd { + if resultPath != tt.fileName { + t.Errorf("expected path %s, got %s", tt.fileName, resultPath) + } + } else { + expectedPath := filepath.Join(rootPath, tt.fileName) + if resultPath != expectedPath { + t.Errorf("expected path %s, got %s", expectedPath, resultPath) + } + } + }) + } +} + +// TestAuthorizeStrictMode verifies OAuth authorization endpoint security and validation logic. +// Tests both the security boundary (funnel rejection) and the business logic (strict mode validation). +func TestAuthorizeStrictMode(t *testing.T) { + tests := []struct { + name string + strictMode bool + clientID string + redirectURI string + state string + nonce string + setupClient bool + clientRedirect string + useFunnel bool // whether to simulate funnel request + mockWhoIsError bool // whether to make WhoIs return an error + expectError bool + expectCode int + expectRedirect bool + }{ + // Security boundary test: funnel rejection + { + name: "funnel requests are always rejected for security", + strictMode: true, + clientID: "test-client", + redirectURI: "https://rp.example.com/callback", + state: "random-state", + nonce: "random-nonce", + setupClient: true, + clientRedirect: "https://rp.example.com/callback", + useFunnel: true, + expectError: true, + expectCode: http.StatusUnauthorized, + }, + + // Strict mode parameter validation tests (non-funnel) + { + name: "strict mode - missing client_id", + strictMode: true, + clientID: "", + redirectURI: "https://rp.example.com/callback", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - missing redirect_uri", + strictMode: true, + clientID: "test-client", + redirectURI: "", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + + // Strict mode client validation tests (non-funnel) + { + name: "strict mode - invalid client_id", + strictMode: true, + clientID: "invalid-client", + redirectURI: "https://rp.example.com/callback", + setupClient: false, + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - redirect_uri mismatch", + strictMode: true, + clientID: "test-client", + redirectURI: "https://wrong.example.com/callback", + setupClient: true, + clientRedirect: "https://rp.example.com/callback", + useFunnel: false, + expectError: true, + expectCode: http.StatusBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // For non-funnel tests, we'll test the parameter validation logic + // without needing to mock WhoIs, since the validation happens before WhoIs calls + + // Setup client if needed + if tt.setupClient { + srv.funnelClients["test-client"] = &funnelClient{ + ID: "test-client", + Secret: "test-secret", + Name: "Test Client", + RedirectURI: tt.clientRedirect, + } + } else if !tt.strictMode { + // For non-strict mode tests that don't need a specific client setup + // but might reference one, clear the default client + delete(srv.funnelClients, "test-client") + } + + // Create request + reqURL := "/authorize" + if !tt.strictMode { + // In non-strict mode, use the node-specific endpoint + reqURL = "/authorize/123" + } + + query := url.Values{} + if tt.clientID != "" { + query.Set("client_id", tt.clientID) + } + if tt.redirectURI != "" { + query.Set("redirect_uri", tt.redirectURI) + } + if tt.state != "" { + query.Set("state", tt.state) + } + if tt.nonce != "" { + query.Set("nonce", tt.nonce) + } + + reqURL += "?" + query.Encode() + req := httptest.NewRequest("GET", reqURL, nil) + req.RemoteAddr = "127.0.0.1:12345" + + // Set funnel header only when explicitly testing funnel behavior + if tt.useFunnel { + req.Header.Set("Tailscale-Funnel-Request", "true") + } + + rr := httptest.NewRecorder() + srv.authorize(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectRedirect { + if rr.Code != http.StatusFound { + t.Errorf("expected redirect (302), got %d: %s", rr.Code, rr.Body.String()) + } + + location := rr.Header().Get("Location") + if location == "" { + t.Error("expected Location header in redirect response") + } else { + // Parse the redirect URL to verify it contains a code + redirectURL, err := url.Parse(location) + if err != nil { + t.Errorf("failed to parse redirect URL: %v", err) + } else { + code := redirectURL.Query().Get("code") + if code == "" { + t.Error("expected 'code' parameter in redirect URL") + } + + // Verify state is preserved if provided + if tt.state != "" { + returnedState := redirectURL.Query().Get("state") + if returnedState != tt.state { + t.Errorf("expected state '%s', got '%s'", tt.state, returnedState) + } + } + + // Verify the auth request was stored + srv.mu.Lock() + ar, ok := srv.code[code] + srv.mu.Unlock() + + if !ok { + t.Error("expected authorization request to be stored") + } else { + if ar.clientID != tt.clientID { + t.Errorf("expected clientID '%s', got '%s'", tt.clientID, ar.clientID) + } + if ar.redirectURI != tt.redirectURI { + t.Errorf("expected redirectURI '%s', got '%s'", tt.redirectURI, ar.redirectURI) + } + if ar.nonce != tt.nonce { + t.Errorf("expected nonce '%s', got '%s'", tt.nonce, ar.nonce) + } + } + } + } + } else { + t.Errorf("unexpected test case: not expecting error or redirect") + } + }) + } +} + +// TestServeTokenWithClientValidation verifies OAuth token endpoint security in both strict and non-strict modes. +// In strict mode, the token endpoint must: +// - Require and validate client credentials (client_id + client_secret) +// - Only accept tokens from registered funnel clients +// - Validate that redirect_uri matches the registered client +// - Support both form-based and HTTP Basic authentication for client credentials +func TestServeTokenWithClientValidation(t *testing.T) { + tests := []struct { + name string + strictMode bool + method string + grantType string + code string + clientID string + clientSecret string + redirectURI string + useBasicAuth bool + setupAuthRequest bool + authRequestClient string + authRequestRedirect string + expectError bool + expectCode int + expectIDToken bool + }{ + { + name: "strict mode - valid token exchange with form credentials", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "test-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + { + name: "strict mode - valid token exchange with basic auth", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + useBasicAuth: true, + clientID: "test-client", + clientSecret: "test-secret", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + { + name: "strict mode - missing client credentials", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - client_id mismatch", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "wrong-client", + clientSecret: "test-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - invalid client secret", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "wrong-secret", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - redirect_uri mismatch", + strictMode: true, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + clientID: "test-client", + clientSecret: "test-secret", + redirectURI: "https://wrong.example.com/callback", + setupAuthRequest: true, + authRequestClient: "test-client", + authRequestRedirect: "https://rp.example.com/callback", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "non-strict mode - no client validation required", + strictMode: false, + method: "POST", + grantType: "authorization_code", + code: "valid-code", + redirectURI: "https://rp.example.com/callback", + setupAuthRequest: true, + authRequestRedirect: "https://rp.example.com/callback", + expectIDToken: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // Setup authorization request if needed + if tt.setupAuthRequest { + now := time.Now() + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tailcfg.PeerCapMap{}, + } + + var funnelClientPtr *funnelClient + if tt.strictMode && tt.authRequestClient != "" { + funnelClientPtr = &funnelClient{ + ID: tt.authRequestClient, + Secret: "test-secret", + Name: "Test Client", + RedirectURI: tt.authRequestRedirect, + } + srv.funnelClients[tt.authRequestClient] = funnelClientPtr + } + + srv.code["valid-code"] = &authRequest{ + clientID: tt.authRequestClient, + nonce: "nonce123", + redirectURI: tt.authRequestRedirect, + validTill: now.Add(5 * time.Minute), + remoteUser: remoteUser, + localRP: !tt.strictMode, + funnelRP: funnelClientPtr, + } + } + + // Create form data + form := url.Values{} + form.Set("grant_type", tt.grantType) + form.Set("code", tt.code) + form.Set("redirect_uri", tt.redirectURI) + + if !tt.useBasicAuth { + if tt.clientID != "" { + form.Set("client_id", tt.clientID) + } + if tt.clientSecret != "" { + form.Set("client_secret", tt.clientSecret) + } + } + + req := httptest.NewRequest(tt.method, "/token", strings.NewReader(form.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.RemoteAddr = "127.0.0.1:12345" + + if tt.useBasicAuth && tt.clientID != "" && tt.clientSecret != "" { + req.SetBasicAuth(tt.clientID, tt.clientSecret) + } + + rr := httptest.NewRecorder() + srv.serveToken(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectIDToken { + if rr.Code != http.StatusOK { + t.Errorf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp struct { + IDToken string `json:"id_token"` + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + } + + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to unmarshal response: %v", err) + } + + if resp.IDToken == "" { + t.Error("expected id_token in response") + } + if resp.AccessToken == "" { + t.Error("expected access_token in response") + } + if resp.TokenType != "Bearer" { + t.Errorf("expected token_type 'Bearer', got '%s'", resp.TokenType) + } + if resp.ExpiresIn != 300 { + t.Errorf("expected expires_in 300, got %d", resp.ExpiresIn) + } + + // Verify access token was stored + srv.mu.Lock() + _, ok := srv.accessToken[resp.AccessToken] + srv.mu.Unlock() + + if !ok { + t.Error("expected access token to be stored") + } + + // Verify authorization code was consumed + srv.mu.Lock() + _, ok = srv.code[tt.code] + srv.mu.Unlock() + + if ok { + t.Error("expected authorization code to be consumed") + } + } + }) + } +} + +// TestServeUserInfoWithClientValidation verifies UserInfo endpoint security in both strict and non-strict modes. +// In strict mode, the UserInfo endpoint must: +// - Validate that access tokens are associated with registered clients +// - Reject tokens for clients that have been deleted/unregistered +// - Enforce token expiration properly +// - Return appropriate user claims based on client capabilities +func TestServeUserInfoWithClientValidation(t *testing.T) { + tests := []struct { + name string + strictMode bool + setupToken bool + setupClient bool + clientID string + token string + tokenValidTill time.Time + expectError bool + expectCode int + expectUserInfo bool + }{ + { + name: "strict mode - valid token with existing client", + strictMode: true, + setupToken: true, + setupClient: true, + clientID: "test-client", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectUserInfo: true, + }, + { + name: "strict mode - valid token but client no longer exists", + strictMode: true, + setupToken: true, + setupClient: false, + clientID: "deleted-client", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectError: true, + expectCode: http.StatusUnauthorized, + }, + { + name: "strict mode - expired token", + strictMode: true, + setupToken: true, + setupClient: true, + clientID: "test-client", + token: "expired-token", + tokenValidTill: time.Now().Add(-5 * time.Minute), + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - invalid token", + strictMode: true, + setupToken: false, + token: "invalid-token", + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "strict mode - token without client association", + strictMode: true, + setupToken: true, + setupClient: false, + clientID: "", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectError: true, + expectCode: http.StatusBadRequest, + }, + { + name: "non-strict mode - no client validation required", + strictMode: false, + setupToken: true, + setupClient: false, + clientID: "", + token: "valid-token", + tokenValidTill: time.Now().Add(5 * time.Minute), + expectUserInfo: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := setupTestServer(t, tt.strictMode) + + // Setup client if needed + if tt.setupClient { + srv.funnelClients[tt.clientID] = &funnelClient{ + ID: tt.clientID, + Secret: "test-secret", + Name: "Test Client", + RedirectURI: "https://rp.example.com/callback", + } + } + + // Setup token if needed + if tt.setupToken { + profile := &tailcfg.UserProfile{ + LoginName: "alice@example.com", + DisplayName: "Alice Example", + ProfilePicURL: "https://example.com/alice.jpg", + } + node := &tailcfg.Node{ + ID: 123, + Name: "test-node.test.ts.net.", + User: 456, + Key: key.NodePublic{}, + Cap: 1, + DiscoKey: key.DiscoPublic{}, + } + remoteUser := &apitype.WhoIsResponse{ + Node: node, + UserProfile: profile, + CapMap: tailcfg.PeerCapMap{}, + } + + srv.accessToken[tt.token] = &authRequest{ + clientID: tt.clientID, + validTill: tt.tokenValidTill, + remoteUser: remoteUser, + } + } + + // Create request + req := httptest.NewRequest("GET", "/userinfo", nil) + req.Header.Set("Authorization", "Bearer "+tt.token) + req.RemoteAddr = "127.0.0.1:12345" + + rr := httptest.NewRecorder() + srv.serveUserInfo(rr, req) + + if tt.expectError { + if rr.Code != tt.expectCode { + t.Errorf("expected status code %d, got %d: %s", tt.expectCode, rr.Code, rr.Body.String()) + } + } else if tt.expectUserInfo { + if rr.Code != http.StatusOK { + t.Errorf("expected 200 OK, got %d: %s", rr.Code, rr.Body.String()) + } + + var resp map[string]any + if err := json.Unmarshal(rr.Body.Bytes(), &resp); err != nil { + t.Fatalf("failed to parse JSON response: %v", err) + } + + // Check required fields + expectedFields := []string{"sub", "name", "email", "picture", "username"} + for _, field := range expectedFields { + if _, ok := resp[field]; !ok { + t.Errorf("expected field '%s' in user info response", field) + } + } + + // Verify specific values + if resp["name"] != "Alice Example" { + t.Errorf("expected name 'Alice Example', got '%v'", resp["name"]) + } + if resp["email"] != "alice@example.com" { + t.Errorf("expected email 'alice@example.com', got '%v'", resp["email"]) + } + if resp["username"] != "alice" { + t.Errorf("expected username 'alice', got '%v'", resp["username"]) + } + } + }) + } } From 3b68d607be1e3069e9ddbd99d85966e4f059c237 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 28 Aug 2025 21:29:11 -0700 Subject: [PATCH 0269/1093] wgengine/magicsock: drop DERP queue from head rather than tail If the DERP queue is full, drop the oldest item first, rather than the youngest, on the assumption that older data is more likely to be unanswerable. Updates tailscale/corp#31762 Signed-off-by: James Tucker --- wgengine/magicsock/derp.go | 4 ++-- wgengine/magicsock/magicsock.go | 32 +++++++++++++++++++++----------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 9c60e4893a2cd..b5fc36bb8aa9c 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -91,7 +91,7 @@ func (c *Conn) fallbackDERPRegionForPeer(peer key.NodePublic) (regionID int) { type activeDerp struct { c *derphttp.Client cancel context.CancelFunc - writeCh chan<- derpWriteRequest + writeCh chan derpWriteRequest // lastWrite is the time of the last request for its write // channel (currently even if there was no write). // It is always non-nil and initialized to a non-zero Time. @@ -302,7 +302,7 @@ const derpWriteQueueDepth = 32 // // It returns nil if the network is down, the Conn is closed, or the regionID is // not known. -func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan<- derpWriteRequest { +func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan derpWriteRequest { if c.networkDown() { return nil } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a7f84e3521df7..a11e8a1cd4f80 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1642,18 +1642,27 @@ func (c *Conn) sendAddr(addr netip.AddrPort, pubKey key.NodePublic, b []byte, is // internal locks. pkt := bytes.Clone(b) - select { - case <-c.donec: - metricSendDERPErrorClosed.Add(1) - return false, errConnClosed - case ch <- derpWriteRequest{addr, pubKey, pkt, isDisco}: - metricSendDERPQueued.Add(1) - return true, nil - default: - metricSendDERPErrorQueue.Add(1) - // Too many writes queued. Drop packet. - return false, errDropDerpPacket + wr := derpWriteRequest{addr, pubKey, pkt, isDisco} + for range 3 { + select { + case <-c.donec: + metricSendDERPErrorClosed.Add(1) + return false, errConnClosed + case ch <- wr: + metricSendDERPQueued.Add(1) + return true, nil + default: + select { + case <-ch: + metricSendDERPDropped.Add(1) + default: + } + } } + // gave up after 3 write attempts + metricSendDERPErrorQueue.Add(1) + // Too many writes queued. Drop packet. + return false, errDropDerpPacket } type receiveBatch struct { @@ -3937,6 +3946,7 @@ var ( metricSendDERPErrorChan = clientmetric.NewCounter("magicsock_send_derp_error_chan") metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") + metricSendDERPDropped = clientmetric.NewCounter("magicsock_send_derp_dropped") metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") From 442f4758a9f4df88138eee2d24d282f0bb1f5c06 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Sun, 31 Aug 2025 12:30:17 -0400 Subject: [PATCH 0270/1093] .github/workflows: reviewing depaware.txt is unnecessary (#16989) @tailscale/dataplane almost never needs to review depaware.txt, when it is the only change to the DERP implementation. Related #16372 Updates #cleanup Signed-off-by: Simon Law --- .github/workflows/request-dataplane-review.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 836fef6fbce7c..d5ef78d17ff53 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -8,6 +8,8 @@ on: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" - "**/derp*/**" + paths-ignore: + - "**/depaware.txt" jobs: request-dataplane-review: From 6d45fcfc931d78d9796a885544c0053a3d5f033e Mon Sep 17 00:00:00 2001 From: Simon Law Date: Sun, 31 Aug 2025 13:29:25 -0400 Subject: [PATCH 0271/1093] .github/workflows: reviewing depaware.txt is unnecessary (#16990) Apparently, #16989 introduced a bug in request-dataplane-review.yml: > you may only define one of `paths` and `paths-ignore` for a single event Related #16372 Updates #cleanup Signed-off-by: Simon Law --- .github/workflows/request-dataplane-review.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index d5ef78d17ff53..4a86b0541afaa 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -8,8 +8,7 @@ on: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" - "**/derp*/**" - paths-ignore: - - "**/depaware.txt" + - "!**/depaware.txt" jobs: request-dataplane-review: From cc532efc20004522b99e2d1c1029734205caec7f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 30 Aug 2025 08:02:35 -0700 Subject: [PATCH 0272/1093] util/syspolicy/*: move syspolicy keys to new const leaf "pkey" package This is step 1 of ~3, breaking up #14720 into reviewable chunks, with the aim to make syspolicy be a build-time configurable feature. In this first (very noisy) step, all the syspolicy string key constants move to a new constant-only (code-free) package. This will make future steps more reviewable, without this movement noise. There are no code or behavior changes here. The future steps of this series can be seen in #14720: removing global funcs from syspolicy resolution and using an interface that's plumbed around instead. Then adding build tags. Updates #12614 Change-Id: If73bf2c28b9c9b1a408fe868b0b6a25b03eeabd1 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/tailscaled.go | 3 +- cmd/tailscaled/tailscaled_windows.go | 6 +- cmd/tsidp/depaware.txt | 1 + control/controlclient/direct.go | 3 +- control/controlclient/sign_supported.go | 3 +- ipn/desktop/extension.go | 3 +- ipn/ipnauth/policy.go | 5 +- ipn/ipnlocal/c2n.go | 3 +- ipn/ipnlocal/local.go | 53 ++-- ipn/ipnlocal/local_test.go | 117 ++++---- ipn/prefs.go | 3 +- logpolicy/logpolicy.go | 3 +- net/dns/manager_windows.go | 5 +- posture/serialnumber_syspolicy.go | 3 +- tsnet/depaware.txt | 1 + .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + util/syspolicy/handler.go | 9 +- util/syspolicy/internal/metrics/metrics.go | 7 +- .../internal/metrics/metrics_test.go | 3 +- util/syspolicy/pkey/pkey.go | 177 ++++++++++++ util/syspolicy/policy_keys.go | 253 +++--------------- util/syspolicy/policy_keys_test.go | 7 +- util/syspolicy/rsop/change_callbacks.go | 7 +- util/syspolicy/rsop/resultant_policy_test.go | 59 ++-- util/syspolicy/setting/key.go | 13 - util/syspolicy/setting/raw_item.go | 3 +- util/syspolicy/setting/setting.go | 13 +- util/syspolicy/setting/setting_test.go | 7 +- util/syspolicy/setting/snapshot.go | 25 +- util/syspolicy/setting/snapshot_test.go | 145 +++++----- util/syspolicy/source/env_policy_store.go | 15 +- .../syspolicy/source/env_policy_store_test.go | 5 +- util/syspolicy/source/policy_reader.go | 5 +- util/syspolicy/source/policy_reader_test.go | 9 +- util/syspolicy/source/policy_source.go | 9 +- util/syspolicy/source/policy_store_windows.go | 29 +- .../source/policy_store_windows_test.go | 7 +- util/syspolicy/source/test_store.go | 29 +- util/syspolicy/syspolicy.go | 21 +- util/syspolicy/syspolicy_test.go | 77 +++--- 48 files changed, 601 insertions(+), 554 deletions(-) create mode 100644 util/syspolicy/pkey/pkey.go delete mode 100644 util/syspolicy/setting/key.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 6b149e5f54cdf..ccea25a8a8932 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -174,6 +174,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 4b1e4a1e4a3b3..a0214575bc625 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -955,6 +955,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 02ffec0ea009c..7f09be33f4bd8 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -195,6 +195,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c2d9f3d00fbb6..46efa5b211fe4 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -432,6 +432,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 06d366aa6e68e..f55535470d7a6 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -65,6 +65,7 @@ import ( "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -1011,6 +1012,6 @@ func defaultEncryptState() bool { // (plan9/FreeBSD/etc). return false } - v, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + v, _ := syspolicy.GetBoolean(pkey.EncryptState, false) return v } diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 1b50688922968..2d4e71d3cb430 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -56,6 +56,7 @@ import ( "tailscale.com/types/logid" "tailscale.com/util/osdiag" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" "tailscale.com/version" @@ -155,7 +156,7 @@ func runWindowsService(pol *logpolicy.Policy) error { if syslog, err := eventlog.Open(serviceName); err == nil { syslogf = func(format string, args ...any) { - if logSCMInteractions, _ := syspolicy.GetBoolean(syspolicy.LogSCMInteractions, false); logSCMInteractions { + if logSCMInteractions, _ := syspolicy.GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { syslog.Info(0, fmt.Sprintf(format, args...)) } } @@ -389,8 +390,7 @@ func handleSessionChange(chgRequest svc.ChangeRequest) { if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK { return } - - if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(syspolicy.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { + if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") go func() { err := dns.Flush() diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index e8bc2b254785c..f1e22efbfcfe6 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -384,6 +384,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 78a86e935551d..cee9387795ef0 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -54,6 +54,7 @@ import ( "tailscale.com/util/multierr" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" @@ -616,7 +617,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new return regen, opt.URL, nil, err } - tailnet, err := syspolicy.GetString(syspolicy.Tailnet, "") + tailnet, err := syspolicy.GetString(pkey.Tailnet, "") if err != nil { c.logf("unable to provide Tailnet field in register request. err: %v", err) } diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index a5d42ad7df4a2..fab7cd16b0810 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -19,6 +19,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) // getMachineCertificateSubject returns the exact name of a Subject that needs @@ -31,7 +32,7 @@ import ( // // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" func getMachineCertificateSubject() string { - machineCertSubject, _ := syspolicy.GetString(syspolicy.MachineCertificateSubject, "") + machineCertSubject, _ := syspolicy.GetString(pkey.MachineCertificateSubject, "") return machineCertSubject } diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index f204a90dee048..15d239f89c713 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -19,6 +19,7 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/types/logger" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) // featureName is the name of the feature implemented by this package. @@ -135,7 +136,7 @@ func (e *desktopSessionsExt) getBackgroundProfile(profiles ipnext.ProfileStore) e.mu.Lock() defer e.mu.Unlock() - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { // If the Always-On mode is disabled, there's no background profile // as far as the desktop session extension is concerned. return ipn.LoginProfileView{} diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index aa4ec4100ff93..36004b293ead2 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -11,6 +11,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) type actorWithPolicyChecks struct{ Actor } @@ -50,10 +51,10 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } - if allowWithReason, _ := syspolicy.GetBoolean(syspolicy.AlwaysOnOverrideWithReason, false); !allowWithReason { + if allowWithReason, _ := syspolicy.GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { return errors.New("disconnect not allowed: always-on mode is enabled") } if reason == "" { diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 4b91c3cb9453d..8c3bf7b26a50f 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -30,6 +30,7 @@ import ( "tailscale.com/util/goroutines" "tailscale.com/util/set" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -342,7 +343,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // this will first check syspolicy, MDM settings like Registry // on Windows or defaults on macOS. If they are not set, it falls // back to the cli-flag, `--posture-checking`. - choice, err := syspolicy.GetPreferenceOption(syspolicy.PostureChecking) + choice, err := syspolicy.GetPreferenceOption(pkey.PostureChecking) if err != nil { b.logf( "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 43d7e121652cc..bcfb99b09af34 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -108,6 +108,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/systemd" "tailscale.com/util/testenv" @@ -1762,51 +1763,51 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } type preferencePolicyInfo struct { - key syspolicy.Key + key pkey.Key get func(ipn.PrefsView) bool set func(*ipn.Prefs, bool) } var preferencePolicies = []preferencePolicyInfo{ { - key: syspolicy.EnableIncomingConnections, + key: pkey.EnableIncomingConnections, // Allow Incoming (used by the UI) is the negation of ShieldsUp (used by the // backend), so this has to convert between the two conventions. get: func(p ipn.PrefsView) bool { return !p.ShieldsUp() }, set: func(p *ipn.Prefs, v bool) { p.ShieldsUp = !v }, }, { - key: syspolicy.EnableServerMode, + key: pkey.EnableServerMode, get: func(p ipn.PrefsView) bool { return p.ForceDaemon() }, set: func(p *ipn.Prefs, v bool) { p.ForceDaemon = v }, }, { - key: syspolicy.ExitNodeAllowLANAccess, + key: pkey.ExitNodeAllowLANAccess, get: func(p ipn.PrefsView) bool { return p.ExitNodeAllowLANAccess() }, set: func(p *ipn.Prefs, v bool) { p.ExitNodeAllowLANAccess = v }, }, { - key: syspolicy.EnableTailscaleDNS, + key: pkey.EnableTailscaleDNS, get: func(p ipn.PrefsView) bool { return p.CorpDNS() }, set: func(p *ipn.Prefs, v bool) { p.CorpDNS = v }, }, { - key: syspolicy.EnableTailscaleSubnets, + key: pkey.EnableTailscaleSubnets, get: func(p ipn.PrefsView) bool { return p.RouteAll() }, set: func(p *ipn.Prefs, v bool) { p.RouteAll = v }, }, { - key: syspolicy.CheckUpdates, + key: pkey.CheckUpdates, get: func(p ipn.PrefsView) bool { return p.AutoUpdate().Check }, set: func(p *ipn.Prefs, v bool) { p.AutoUpdate.Check = v }, }, { - key: syspolicy.ApplyUpdates, + key: pkey.ApplyUpdates, get: func(p ipn.PrefsView) bool { v, _ := p.AutoUpdate().Apply.Get(); return v }, set: func(p *ipn.Prefs, v bool) { p.AutoUpdate.Apply.Set(v) }, }, { - key: syspolicy.EnableRunExitNode, + key: pkey.EnableRunExitNode, get: func(p ipn.PrefsView) bool { return p.AdvertisesExitNode() }, set: func(p *ipn.Prefs, v bool) { p.SetAdvertiseExitNode(v) }, }, @@ -1817,13 +1818,13 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if controlURL, err := syspolicy.GetString(syspolicy.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { + if controlURL, err := syspolicy.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true } const sentinel = "HostnameDefaultValue" - hostnameFromPolicy, _ := syspolicy.GetString(syspolicy.Hostname, sentinel) + hostnameFromPolicy, _ := syspolicy.GetString(pkey.Hostname, sentinel) switch hostnameFromPolicy { case sentinel: // An empty string for this policy value means that the admin wants to delete @@ -1858,7 +1859,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { anyChange = true } - if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { + if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { prefs.WantRunning = true anyChange = true } @@ -1882,7 +1883,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if exitNodeIDStr, _ := syspolicy.GetString(syspolicy.ExitNodeID, ""); exitNodeIDStr != "" { + if exitNodeIDStr, _ := syspolicy.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], @@ -1923,7 +1924,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange prefs.ExitNodeIP = netip.Addr{} anyChange = true } - } else if exitNodeIPStr, _ := syspolicy.GetString(syspolicy.ExitNodeIP, ""); exitNodeIPStr != "" { + } else if exitNodeIPStr, _ := syspolicy.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { if prefs.AutoExitNode != "" { prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP anyChange = true @@ -1970,7 +1971,7 @@ func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChangedAnyOf(syspolicy.AlwaysOn, syspolicy.AlwaysOnOverrideWithReason) { + if policy.HasChangedAnyOf(pkey.AlwaysOn, pkey.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might // no longer be valid. @@ -1979,7 +1980,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } - if policy.HasChangedAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP, syspolicy.AllowExitNodeOverride) { + if policy.HasChangedAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP, pkey.AllowExitNodeOverride) { // Reset the exit node override if a policy that enforces exit node usage // or allows the user to override automatic exit node selection has changed. b.mu.Lock() @@ -1987,7 +1988,7 @@ func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { b.mu.Unlock() } - if policy.HasChanged(syspolicy.AllowedSuggestedExitNodes) { + if policy.HasChanged(pkey.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { @@ -2348,7 +2349,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { - sysak, _ := syspolicy.GetString(syspolicy.AuthKey, "") + sysak, _ := syspolicy.GetString(pkey.AuthKey, "") if sysak != "" { b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) @@ -4407,7 +4408,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // Prevent users from changing exit node preferences // when exit node usage is managed by policy. if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { - isManaged, err := syspolicy.HasAnyOf(syspolicy.ExitNodeID, syspolicy.ExitNodeIP) + isManaged, err := syspolicy.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) if err != nil { err = fmt.Errorf("policy check failed: %w", err) } else if isManaged { @@ -4415,7 +4416,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // if permitted by [syspolicy.AllowExitNodeOverride]. // // Disabling exit node usage entirely is not allowed. - allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false) + allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false) if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { err = errManagedByPolicy } @@ -4519,7 +4520,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o // mode on them until the policy changes, they switch to a different profile, etc. b.overrideAlwaysOn = true - if reconnectAfter, _ := syspolicy.GetDuration(syspolicy.ReconnectAfter, 0); reconnectAfter > 0 { + if reconnectAfter, _ := syspolicy.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { b.startReconnectTimerLocked(reconnectAfter) } } @@ -4530,7 +4531,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o b.overrideExitNodePolicy = false } if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { - if allowExitNodeOverride, _ := syspolicy.GetBoolean(syspolicy.AllowExitNodeOverride, false); allowExitNodeOverride { + if allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { // If applying exit node policy settings to the new prefs results in no change, // the user is not overriding the policy. Otherwise, it is an override. b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) @@ -7807,9 +7808,9 @@ type selectRegionFunc func(views.Slice[int]) int type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { - nodes, err := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil) + nodes, err := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) if err != nil { - log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", syspolicy.AllowedSuggestedExitNodes, err) + log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", pkey.AllowedSuggestedExitNodes, err) return nil } if nodes == nil { @@ -8176,7 +8177,7 @@ func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { if exitNodeID == "" { return false // an exit node is required } - if nodes, _ := syspolicy.GetStringArray(syspolicy.AllowedSuggestedExitNodes, nil); nodes != nil { + if nodes, _ := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) } @@ -8339,7 +8340,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { // the Keychain. A future release will clean up the on-disk state // files. // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := syspolicy.GetBoolean(syspolicy.EncryptState, false) + sp, _ := syspolicy.GetBoolean(pkey.EncryptState, false) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 60b5b2c5be33d..2b83e47f884b0 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -62,6 +62,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/set" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" @@ -1182,16 +1183,16 @@ func TestConfigureExitNode(t *testing.T) { // Configure policy settings, if any. store := source.NewTestStore(t) if tt.exitNodeIDPolicy != nil { - store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, string(*tt.exitNodeIDPolicy))) + store.SetStrings(source.TestSettingOf(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy))) } if tt.exitNodeIPPolicy != nil { - store.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, tt.exitNodeIPPolicy.String())) + store.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String())) } if tt.exitNodeAllowedIDs != nil { - store.SetStringLists(source.TestSettingOf(syspolicy.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) + store.SetStringLists(source.TestSettingOf(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) } if tt.exitNodeAllowOverride { - store.SetBooleans(source.TestSettingOf(syspolicy.AllowExitNodeOverride, true)) + store.SetBooleans(source.TestSettingOf(pkey.AllowExitNodeOverride, true)) } if store.IsEmpty() { // No syspolicy settings, so don't register a store. @@ -2890,10 +2891,10 @@ func TestSetExitNodeIDPolicy(t *testing.T) { policyStore := source.NewTestStore(t) if test.exitNodeIDKey { - policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeID, test.exitNodeID)) + policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeID, test.exitNodeID)) } if test.exitNodeIPKey { - policyStore.SetStrings(source.TestSettingOf(syspolicy.ExitNodeIP, test.exitNodeIP)) + policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, test.exitNodeIP)) } syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) @@ -3029,7 +3030,7 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { syspolicy.RegisterWellKnownSettingsForTest(t) policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", + pkey.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) @@ -3114,7 +3115,7 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { b.cc = cc syspolicy.RegisterWellKnownSettingsForTest(t) policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", + pkey.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes()) @@ -3223,7 +3224,7 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { b := newTestLocalBackend(t) syspolicy.RegisterWellKnownSettingsForTest(t) policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.ExitNodeID, "auto:any", + pkey.ExitNodeID, "auto:any", )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) b.currentNode().SetNetMap(nm) @@ -3255,7 +3256,7 @@ func TestApplySysPolicy(t *testing.T) { prefs ipn.Prefs wantPrefs ipn.Prefs wantAnyChange bool - stringPolicies map[syspolicy.Key]string + stringPolicies map[pkey.Key]string }{ { name: "empty prefs without policies", @@ -3290,13 +3291,13 @@ func TestApplySysPolicy(t *testing.T) { RouteAll: true, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "1", - syspolicy.EnableIncomingConnections: "never", - syspolicy.EnableServerMode: "always", - syspolicy.ExitNodeAllowLANAccess: "always", - syspolicy.EnableTailscaleDNS: "always", - syspolicy.EnableTailscaleSubnets: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "1", + pkey.EnableIncomingConnections: "never", + pkey.EnableServerMode: "always", + pkey.ExitNodeAllowLANAccess: "always", + pkey.EnableTailscaleDNS: "always", + pkey.EnableTailscaleSubnets: "always", }, }, { @@ -3311,13 +3312,13 @@ func TestApplySysPolicy(t *testing.T) { ShieldsUp: true, ForceDaemon: true, }, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "1", - syspolicy.EnableIncomingConnections: "never", - syspolicy.EnableServerMode: "always", - syspolicy.ExitNodeAllowLANAccess: "never", - syspolicy.EnableTailscaleDNS: "never", - syspolicy.EnableTailscaleSubnets: "never", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "1", + pkey.EnableIncomingConnections: "never", + pkey.EnableServerMode: "always", + pkey.ExitNodeAllowLANAccess: "never", + pkey.EnableTailscaleDNS: "never", + pkey.EnableTailscaleSubnets: "never", }, }, { @@ -3339,13 +3340,13 @@ func TestApplySysPolicy(t *testing.T) { RouteAll: true, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "2", - syspolicy.EnableIncomingConnections: "always", - syspolicy.EnableServerMode: "never", - syspolicy.ExitNodeAllowLANAccess: "always", - syspolicy.EnableTailscaleDNS: "never", - syspolicy.EnableTailscaleSubnets: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "2", + pkey.EnableIncomingConnections: "always", + pkey.EnableServerMode: "never", + pkey.ExitNodeAllowLANAccess: "always", + pkey.EnableTailscaleDNS: "never", + pkey.EnableTailscaleSubnets: "always", }, }, { @@ -3366,12 +3367,12 @@ func TestApplySysPolicy(t *testing.T) { CorpDNS: true, RouteAll: true, }, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.EnableIncomingConnections: "user-decides", - syspolicy.EnableServerMode: "user-decides", - syspolicy.ExitNodeAllowLANAccess: "user-decides", - syspolicy.EnableTailscaleDNS: "user-decides", - syspolicy.EnableTailscaleSubnets: "user-decides", + stringPolicies: map[pkey.Key]string{ + pkey.EnableIncomingConnections: "user-decides", + pkey.EnableServerMode: "user-decides", + pkey.ExitNodeAllowLANAccess: "user-decides", + pkey.EnableTailscaleDNS: "user-decides", + pkey.EnableTailscaleSubnets: "user-decides", }, }, { @@ -3380,8 +3381,8 @@ func TestApplySysPolicy(t *testing.T) { ControlURL: "set", }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ControlURL: "set", + stringPolicies: map[pkey.Key]string{ + pkey.ControlURL: "set", }, }, { @@ -3399,8 +3400,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ApplyUpdates: "always", + stringPolicies: map[pkey.Key]string{ + pkey.ApplyUpdates: "always", }, }, { @@ -3418,8 +3419,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.ApplyUpdates: "never", + stringPolicies: map[pkey.Key]string{ + pkey.ApplyUpdates: "never", }, }, { @@ -3437,8 +3438,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.CheckUpdates: "always", + stringPolicies: map[pkey.Key]string{ + pkey.CheckUpdates: "always", }, }, { @@ -3456,8 +3457,8 @@ func TestApplySysPolicy(t *testing.T) { }, }, wantAnyChange: true, - stringPolicies: map[syspolicy.Key]string{ - syspolicy.CheckUpdates: "never", + stringPolicies: map[pkey.Key]string{ + pkey.CheckUpdates: "never", }, }, } @@ -5574,7 +5575,7 @@ func TestFillAllowedSuggestions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - syspolicy.AllowedSuggestedExitNodes, tt.allowPolicy, + pkey.AllowedSuggestedExitNodes, tt.allowPolicy, )) syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) @@ -6480,23 +6481,23 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { }{ { name: "ShieldsUp/True", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "never")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableIncomingConnections, "never")}, want: wantPrefsChanges(fieldChange{"ShieldsUp", true}), }, { name: "ShieldsUp/False", initialPrefs: &ipn.Prefs{ShieldsUp: true}, - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableIncomingConnections, "always")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableIncomingConnections, "always")}, want: wantPrefsChanges(fieldChange{"ShieldsUp", false}), }, { name: "ExitNodeID", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.ExitNodeID, "foo")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.ExitNodeID, "foo")}, want: wantPrefsChanges(fieldChange{"ExitNodeID", tailcfg.StableNodeID("foo")}), }, { name: "EnableRunExitNode", - stringSettings: []source.TestSetting[string]{source.TestSettingOf(syspolicy.EnableRunExitNode, "always")}, + stringSettings: []source.TestSetting[string]{source.TestSettingOf(pkey.EnableRunExitNode, "always")}, want: wantPrefsChanges(fieldChange{"AdvertiseRoutes", []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}}), }, { @@ -6505,9 +6506,9 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { ExitNodeAllowLANAccess: true, }, stringSettings: []source.TestSetting[string]{ - source.TestSettingOf(syspolicy.EnableServerMode, "always"), - source.TestSettingOf(syspolicy.ExitNodeAllowLANAccess, "never"), - source.TestSettingOf(syspolicy.ExitNodeIP, "127.0.0.1"), + source.TestSettingOf(pkey.EnableServerMode, "always"), + source.TestSettingOf(pkey.ExitNodeAllowLANAccess, "never"), + source.TestSettingOf(pkey.ExitNodeIP, "127.0.0.1"), }, want: wantPrefsChanges( fieldChange{"ForceDaemon", true}, @@ -6523,9 +6524,9 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { AdvertiseRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, stringSettings: []source.TestSetting[string]{ - source.TestSettingOf(syspolicy.EnableTailscaleDNS, "always"), - source.TestSettingOf(syspolicy.ExitNodeID, "foo"), - source.TestSettingOf(syspolicy.EnableRunExitNode, "always"), + source.TestSettingOf(pkey.EnableTailscaleDNS, "always"), + source.TestSettingOf(pkey.ExitNodeID, "foo"), + source.TestSettingOf(pkey.EnableRunExitNode, "always"), }, want: nil, // syspolicy settings match the preferences; no change notification is expected. }, diff --git a/ipn/prefs.go b/ipn/prefs.go index 2eb0ccf0c61e5..4c049688ccbe2 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -29,6 +29,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" ) @@ -726,7 +727,7 @@ func (p PrefsView) ControlURLOrDefault() string { // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. func (p *Prefs) ControlURLOrDefault() string { - controlURL, err := syspolicy.GetString(syspolicy.ControlURL, p.ControlURL) + controlURL, err := syspolicy.GetString(pkey.ControlURL, p.ControlURL) if err != nil { controlURL = p.ControlURL } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index f5c475712afe3..295dc6fff24f6 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -52,6 +52,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/testenv" "tailscale.com/version" "tailscale.com/version/distro" @@ -65,7 +66,7 @@ var getLogTargetOnce struct { func getLogTarget() string { getLogTargetOnce.Do(func() { envTarget, _ := os.LookupEnv("TS_LOG_TARGET") - getLogTargetOnce.v, _ = syspolicy.GetString(syspolicy.LogTarget, envTarget) + getLogTargetOnce.v, _ = syspolicy.GetString(pkey.LogTarget, envTarget) }) return getLogTargetOnce.v diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 6ed5d3ba61f7e..d1cec2a00ed03 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -30,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" @@ -508,7 +509,7 @@ func (m *windowsManager) Close() error { // sysPolicyChanged is a callback triggered by [syspolicy] when it detects // a change in one or more syspolicy settings. func (m *windowsManager) sysPolicyChanged(policy *rsop.PolicyChange) { - if policy.HasChanged(syspolicy.EnableDNSRegistration) { + if policy.HasChanged(pkey.EnableDNSRegistration) { m.reconfigureDNSRegistration() } } @@ -520,7 +521,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(syspolicy.EnableDNSRegistration, setting.NeverByPolicy) + enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, setting.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/posture/serialnumber_syspolicy.go b/posture/serialnumber_syspolicy.go index d6491ff214b95..5123d561db2cd 100644 --- a/posture/serialnumber_syspolicy.go +++ b/posture/serialnumber_syspolicy.go @@ -10,13 +10,14 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" ) // GetSerialNumbers returns the serial number of the device as reported by an // MDM solution. It requires configuration via the DeviceSerialNumber system policy. // This is the only way to gather serial numbers on iOS, tvOS and Android. func GetSerialNumbers(_ logger.Logf) ([]string, error) { - s, err := syspolicy.GetString(syspolicy.DeviceSerialNumber, "") + s, err := syspolicy.GetString(pkey.DeviceSerialNumber, "") if err != nil { return nil, fmt.Errorf("failed to get serial number from MDM: %v", err) } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index aea6baf93ef11..bdf90c9a8c03c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -379,6 +379,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index a73c6ebf649f2..c8a0bb2740b64 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index a73c6ebf649f2..c8a0bb2740b64 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index a73c6ebf649f2..c8a0bb2740b64 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index a73c6ebf649f2..c8a0bb2740b64 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -52,6 +52,7 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index b5919b9628760..c9a1cd0cf188e 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -63,6 +63,7 @@ import ( _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy" + _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/winutil" _ "tailscale.com/util/winutil/gp" _ "tailscale.com/version" diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go index c4bfd9de92594..cdf32a7f78503 100644 --- a/util/syspolicy/handler.go +++ b/util/syspolicy/handler.go @@ -4,6 +4,7 @@ package syspolicy import ( + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -85,22 +86,22 @@ func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func() } // ReadString implements [source.Store]. -func (s handlerStore) ReadString(key setting.Key) (string, error) { +func (s handlerStore) ReadString(key pkey.Key) (string, error) { return s.h.ReadString(string(key)) } // ReadUInt64 implements [source.Store]. -func (s handlerStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s handlerStore) ReadUInt64(key pkey.Key) (uint64, error) { return s.h.ReadUInt64(string(key)) } // ReadBoolean implements [source.Store]. -func (s handlerStore) ReadBoolean(key setting.Key) (bool, error) { +func (s handlerStore) ReadBoolean(key pkey.Key) (bool, error) { return s.h.ReadBoolean(string(key)) } // ReadStringArray implements [source.Store]. -func (s handlerStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s handlerStore) ReadStringArray(key pkey.Key) ([]string, error) { return s.h.ReadStringArray(string(key)) } diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 43f2a285a26ea..8f27456735ca6 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -17,6 +17,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) @@ -209,7 +210,7 @@ func scopeMetrics(origin *setting.Origin) *policyScopeMetrics { var ( settingMetricsMu sync.RWMutex - settingMetricsMap map[setting.Key]*settingMetrics + settingMetricsMap map[pkey.Key]*settingMetrics ) func settingMetricsFor(setting *setting.Definition) *settingMetrics { @@ -283,8 +284,8 @@ func SetHooksForTest(tb testenv.TB, addMetric, setMetric metricFn) { lazyUserMetrics.SetForTest(tb, newScopeMetrics(setting.UserSetting), nil) } -func newSettingMetric(key setting.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { - name := strings.ReplaceAll(string(key), string(setting.KeyPathSeparator), "_") +func newSettingMetric(key pkey.Key, scope setting.Scope, suffix string, typ clientmetric.Type) metric { + name := strings.ReplaceAll(string(key), string(pkey.KeyPathSeparator), "_") name = strings.ReplaceAll(name, ".", "_") // dots are not allowed in metric names return newMetric([]string{name, metricScopeName(scope), suffix}, typ) } diff --git a/util/syspolicy/internal/metrics/metrics_test.go b/util/syspolicy/internal/metrics/metrics_test.go index 07be4773c9fcb..a99938769712f 100644 --- a/util/syspolicy/internal/metrics/metrics_test.go +++ b/util/syspolicy/internal/metrics/metrics_test.go @@ -10,13 +10,14 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/clientmetric" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestSettingMetricNames(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key scope setting.Scope suffix string typ clientmetric.Type diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go new file mode 100644 index 0000000000000..cfef9e17a333a --- /dev/null +++ b/util/syspolicy/pkey/pkey.go @@ -0,0 +1,177 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package pkey defines the keys used to store system policies in the registry. +// +// This is a leaf package meant to only contain string constants, not code. +package pkey + +// Key is a string that uniquely identifies a policy and must remain unchanged +// once established and documented for a given policy setting. It may contain +// alphanumeric characters and zero or more [KeyPathSeparator]s to group +// individual policy settings into categories. +type Key string + +// KeyPathSeparator allows logical grouping of policy settings into categories. +const KeyPathSeparator = '/' + +// The const block below lists known policy keys. +// When adding a key to this list, remember to add a corresponding +// [setting.Definition] to [implicitDefinitions] in util/syspolicy/policy_keys.go. +// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. + +const ( + // Keys with a string value + ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. + LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. + Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. + + // AlwaysOn is a boolean key that controls whether Tailscale + // should always remain in a connected state, and the user should + // not be able to disconnect at their discretion. + // + // Warning: This policy setting is experimental and may change or be removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. + AlwaysOn Key = "AlwaysOn.Enabled" + + // AlwaysOnOverrideWithReason is a boolean key that alters the behavior + // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale + // by providing a reason. The reason is logged and sent to the control + // for auditing purposes. It has no effect when [AlwaysOn] is false. + AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" + + // ReconnectAfter is a string value formatted for use with time.ParseDuration() + // that defines the duration after which the client should automatically reconnect + // to the Tailscale network following a user-initiated disconnect. + // An empty string or a zero duration disables automatic reconnection. + ReconnectAfter Key = "ReconnectAfter" + + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. + // Exit node ID takes precedence over exit node IP. + // To find the node ID, go to /api.md#device. + ExitNodeID Key = "ExitNodeID" + ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. + + // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings + // and manually select an exit node. It does not allow disabling exit node usage entirely. + // It is typically used in conjunction with [ExitNodeID] set to "auto:any". + // + // Warning: This policy setting is experimental and may change, be renamed or removed in the future. + // It may also not be fully supported by all Tailscale clients until it is out of experimental status. + // See tailscale/corp#29969. + AllowExitNodeOverride Key = "ExitNode.AllowOverride" + + // Keys with a string value that specifies an option: "always", "never", "user-decides". + // The default is "user-decides" unless otherwise stated. Enforcement of + // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs + // typically hide menu items related to policies that are enforced. + EnableIncomingConnections Key = "AllowIncomingConnections" + EnableServerMode Key = "UnattendedMode" + ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" + EnableTailscaleDNS Key = "UseTailscaleDNSSettings" + EnableTailscaleSubnets Key = "UseTailscaleSubnets" + + // EnableDNSRegistration is a string value that can be set to "always", "never" + // or "user-decides". It controls whether DNS registration and dynamic DNS + // updates are enabled for the Tailscale interface. For historical reasons + // and to maintain compatibility with existing setups, the default is "never". + // It is only used on Windows. + EnableDNSRegistration Key = "EnableDNSRegistration" + + // CheckUpdates is the key to signal if the updater should periodically + // check for updates. + CheckUpdates Key = "CheckUpdates" + // ApplyUpdates is the key to signal if updates should be automatically + // installed. Its value is "InstallUpdates" because of an awkwardly-named + // visibility option "ApplyUpdates" on MacOS. + ApplyUpdates Key = "InstallUpdates" + // EnableRunExitNode controls if the device acts as an exit node. Even when + // running as an exit node, the device must be approved by a tailnet + // administrator. Its name is slightly awkward because RunExitNodeVisibility + // predates this option but is preserved for backwards compatibility. + EnableRunExitNode Key = "AdvertiseExitNode" + + // Keys with a string value that controls visibility: "show", "hide". + // The default is "show" unless otherwise stated. Enforcement of these + // policies is typically performed by the UI code for the relevant operating + // system. + AdminConsoleVisibility Key = "AdminConsole" + NetworkDevicesVisibility Key = "NetworkDevices" + TestMenuVisibility Key = "TestMenu" + UpdateMenuVisibility Key = "UpdateMenu" + ResetToDefaultsVisibility Key = "ResetToDefaults" + // RunExitNodeVisibility controls if the "run as exit node" menu item is + // visible, without controlling the setting itself. This is preserved for + // backwards compatibility but prefer EnableRunExitNode in new deployments. + RunExitNodeVisibility Key = "RunExitNode" + PreferencesMenuVisibility Key = "PreferencesMenu" + ExitNodeMenuVisibility Key = "ExitNodesPicker" + // AutoUpdateVisibility is the key to signal if the menu item for automatic + // installation of updates should be visible. It is only used by macsys + // installations and uses the Sparkle naming convention, even though it does + // not actually control updates, merely the UI for that setting. + AutoUpdateVisibility Key = "ApplyUpdates" + // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. + // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. + SuggestedExitNodeVisibility Key = "SuggestedExitNode" + // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. + // When this system policy is set to 'hide', the onboarding flow is never shown to the user. + OnboardingFlowVisibility Key = "OnboardingFlow" + + // Keys with a string value formatted for use with time.ParseDuration(). + KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours + + // Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as + // DWORD or QWORD (either is acceptable). 0 means false, and anything else means true. + // The default is 0 unless otherwise stated. + LogSCMInteractions Key = "LogSCMInteractions" + FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" + + // EncryptState is a boolean setting that specifies whether to encrypt the + // tailscaled state file with a TPM device. + EncryptState Key = "EncryptState" + + // PostureChecking indicates if posture checking is enabled and the client shall gather + // posture data. + // Key is a string value that specifies an option: "always", "never", "user-decides". + // The default is "user-decides" unless otherwise stated. + PostureChecking Key = "PostureChecking" + // DeviceSerialNumber is the serial number of the device that is running Tailscale. + // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. + // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. + DeviceSerialNumber Key = "DeviceSerialNumber" + + // ManagedByOrganizationName indicates the name of the organization managing the Tailscale + // install. It is displayed inside the client UI in a prominent location. + ManagedByOrganizationName Key = "ManagedByOrganizationName" + // ManagedByCaption is an info message displayed inside the client UI as a caption when + // ManagedByOrganizationName is set. It can be used to provide a pointer to support resources + // for Tailscale within the organization. + ManagedByCaption Key = "ManagedByCaption" + // ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the + // organization. A button in the client UI provides easy access to this URL. + ManagedByURL Key = "ManagedByURL" + + // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to + // automatically authenticate managed devices, without requiring user interaction. + AuthKey Key = "AuthKey" + + // MachineCertificateSubject is the exact name of a Subject that needs + // to be present in an identity's certificate chain to sign a RegisterRequest, + // formatted as per pkix.Name.String(). The Subject may be that of the identity + // itself, an intermediate CA or the root CA. + // + // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" + MachineCertificateSubject Key = "MachineCertificateSubject" + + // Hostname is the hostname of the device that is running Tailscale. + // When this policy is set, it overrides the hostname that the client + // would otherwise obtain from the OS, e.g. by calling os.Hostname(). + Hostname Key = "Hostname" + + // Keys with a string array value. + + // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. + AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" +) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index cd5f8172c159a..e32d9cdf4ddf5 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -6,225 +6,60 @@ package syspolicy import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) -// Key is a string that uniquely identifies a policy and must remain unchanged -// once established and documented for a given policy setting. It may contain -// alphanumeric characters and zero or more [KeyPathSeparator]s to group -// individual policy settings into categories. -type Key = setting.Key - -// The const block below lists known policy keys. -// When adding a key to this list, remember to add a corresponding -// [setting.Definition] to [implicitDefinitions] below. -// Otherwise, the [TestKnownKeysRegistered] test will fail as a reminder. - -const ( - // Keys with a string value - ControlURL Key = "LoginURL" // default ""; if blank, ipn uses ipn.DefaultControlURL. - LogTarget Key = "LogTarget" // default ""; if blank logging uses logtail.DefaultHost. - Tailnet Key = "Tailnet" // default ""; if blank, no tailnet name is sent to the server. - - // AlwaysOn is a boolean key that controls whether Tailscale - // should always remain in a connected state, and the user should - // not be able to disconnect at their discretion. - // - // Warning: This policy setting is experimental and may change or be removed in the future. - // It may also not be fully supported by all Tailscale clients until it is out of experimental status. - // See tailscale/corp#26247, tailscale/corp#26248 and tailscale/corp#26249 for more information. - AlwaysOn Key = "AlwaysOn.Enabled" - - // AlwaysOnOverrideWithReason is a boolean key that alters the behavior - // of [AlwaysOn]. When true, the user is allowed to disconnect Tailscale - // by providing a reason. The reason is logged and sent to the control - // for auditing purposes. It has no effect when [AlwaysOn] is false. - AlwaysOnOverrideWithReason Key = "AlwaysOn.OverrideWithReason" - - // ReconnectAfter is a string value formatted for use with time.ParseDuration() - // that defines the duration after which the client should automatically reconnect - // to the Tailscale network following a user-initiated disconnect. - // An empty string or a zero duration disables automatic reconnection. - ReconnectAfter Key = "ReconnectAfter" - - // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. - // Exit node ID takes precedence over exit node IP. - // To find the node ID, go to /api.md#device. - ExitNodeID Key = "ExitNodeID" - ExitNodeIP Key = "ExitNodeIP" // default ""; if blank, no exit node is forced. Value is exit node IP. - - // AllowExitNodeOverride is a boolean key that allows the user to override exit node policy settings - // and manually select an exit node. It does not allow disabling exit node usage entirely. - // It is typically used in conjunction with [ExitNodeID] set to "auto:any". - // - // Warning: This policy setting is experimental and may change, be renamed or removed in the future. - // It may also not be fully supported by all Tailscale clients until it is out of experimental status. - // See tailscale/corp#29969. - AllowExitNodeOverride Key = "ExitNode.AllowOverride" - - // Keys with a string value that specifies an option: "always", "never", "user-decides". - // The default is "user-decides" unless otherwise stated. Enforcement of - // these policies is typically performed in ipnlocal.applySysPolicy(). GUIs - // typically hide menu items related to policies that are enforced. - EnableIncomingConnections Key = "AllowIncomingConnections" - EnableServerMode Key = "UnattendedMode" - ExitNodeAllowLANAccess Key = "ExitNodeAllowLANAccess" - EnableTailscaleDNS Key = "UseTailscaleDNSSettings" - EnableTailscaleSubnets Key = "UseTailscaleSubnets" - - // EnableDNSRegistration is a string value that can be set to "always", "never" - // or "user-decides". It controls whether DNS registration and dynamic DNS - // updates are enabled for the Tailscale interface. For historical reasons - // and to maintain compatibility with existing setups, the default is "never". - // It is only used on Windows. - EnableDNSRegistration Key = "EnableDNSRegistration" - - // CheckUpdates is the key to signal if the updater should periodically - // check for updates. - CheckUpdates Key = "CheckUpdates" - // ApplyUpdates is the key to signal if updates should be automatically - // installed. Its value is "InstallUpdates" because of an awkwardly-named - // visibility option "ApplyUpdates" on MacOS. - ApplyUpdates Key = "InstallUpdates" - // EnableRunExitNode controls if the device acts as an exit node. Even when - // running as an exit node, the device must be approved by a tailnet - // administrator. Its name is slightly awkward because RunExitNodeVisibility - // predates this option but is preserved for backwards compatibility. - EnableRunExitNode Key = "AdvertiseExitNode" - - // Keys with a string value that controls visibility: "show", "hide". - // The default is "show" unless otherwise stated. Enforcement of these - // policies is typically performed by the UI code for the relevant operating - // system. - AdminConsoleVisibility Key = "AdminConsole" - NetworkDevicesVisibility Key = "NetworkDevices" - TestMenuVisibility Key = "TestMenu" - UpdateMenuVisibility Key = "UpdateMenu" - ResetToDefaultsVisibility Key = "ResetToDefaults" - // RunExitNodeVisibility controls if the "run as exit node" menu item is - // visible, without controlling the setting itself. This is preserved for - // backwards compatibility but prefer EnableRunExitNode in new deployments. - RunExitNodeVisibility Key = "RunExitNode" - PreferencesMenuVisibility Key = "PreferencesMenu" - ExitNodeMenuVisibility Key = "ExitNodesPicker" - // AutoUpdateVisibility is the key to signal if the menu item for automatic - // installation of updates should be visible. It is only used by macsys - // installations and uses the Sparkle naming convention, even though it does - // not actually control updates, merely the UI for that setting. - AutoUpdateVisibility Key = "ApplyUpdates" - // SuggestedExitNodeVisibility controls the visibility of suggested exit nodes in the client GUI. - // When this system policy is set to 'hide', an exit node suggestion won't be presented to the user as part of the exit nodes picker. - SuggestedExitNodeVisibility Key = "SuggestedExitNode" - // OnboardingFlowVisibility controls the visibility of the onboarding flow in the client GUI. - // When this system policy is set to 'hide', the onboarding flow is never shown to the user. - OnboardingFlowVisibility Key = "OnboardingFlow" - - // Keys with a string value formatted for use with time.ParseDuration(). - KeyExpirationNoticeTime Key = "KeyExpirationNotice" // default 24 hours - - // Boolean Keys that are only applicable on Windows. Booleans are stored in the registry as - // DWORD or QWORD (either is acceptable). 0 means false, and anything else means true. - // The default is 0 unless otherwise stated. - LogSCMInteractions Key = "LogSCMInteractions" - FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" - - // EncryptState is a boolean setting that specifies whether to encrypt the - // tailscaled state file with a TPM device. - EncryptState Key = "EncryptState" - - // PostureChecking indicates if posture checking is enabled and the client shall gather - // posture data. - // Key is a string value that specifies an option: "always", "never", "user-decides". - // The default is "user-decides" unless otherwise stated. - PostureChecking Key = "PostureChecking" - // DeviceSerialNumber is the serial number of the device that is running Tailscale. - // This is used on Android, iOS and tvOS to allow IT administrators to manually give us a serial number via MDM. - // We are unable to programmatically get the serial number on mobile due to sandboxing restrictions. - DeviceSerialNumber Key = "DeviceSerialNumber" - - // ManagedByOrganizationName indicates the name of the organization managing the Tailscale - // install. It is displayed inside the client UI in a prominent location. - ManagedByOrganizationName Key = "ManagedByOrganizationName" - // ManagedByCaption is an info message displayed inside the client UI as a caption when - // ManagedByOrganizationName is set. It can be used to provide a pointer to support resources - // for Tailscale within the organization. - ManagedByCaption Key = "ManagedByCaption" - // ManagedByURL is a valid URL pointing to a support help desk for Tailscale within the - // organization. A button in the client UI provides easy access to this URL. - ManagedByURL Key = "ManagedByURL" - - // AuthKey is an auth key that will be used to login whenever the backend starts. This can be used to - // automatically authenticate managed devices, without requiring user interaction. - AuthKey Key = "AuthKey" - - // MachineCertificateSubject is the exact name of a Subject that needs - // to be present in an identity's certificate chain to sign a RegisterRequest, - // formatted as per pkix.Name.String(). The Subject may be that of the identity - // itself, an intermediate CA or the root CA. - // - // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" - MachineCertificateSubject Key = "MachineCertificateSubject" - - // Hostname is the hostname of the device that is running Tailscale. - // When this policy is set, it overrides the hostname that the client - // would otherwise obtain from the OS, e.g. by calling os.Hostname(). - Hostname Key = "Hostname" - - // Keys with a string array value. - // AllowedSuggestedExitNodes's string array value is a list of exit node IDs that restricts which exit nodes are considered when generating suggestions for exit nodes. - AllowedSuggestedExitNodes Key = "AllowedSuggestedExitNodes" -) - // implicitDefinitions is a list of [setting.Definition] that will be registered // automatically when the policy setting definitions are first used by the syspolicy package hierarchy. // This includes the first time a policy needs to be read from any source. var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): - setting.NewDefinition(AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), - setting.NewDefinition(AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(AlwaysOn, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(AuthKey, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ControlURL, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ExitNodeID, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(ExitNodeIP, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(EncryptState, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(Hostname, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), - setting.NewDefinition(LogTarget, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), - setting.NewDefinition(PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), - setting.NewDefinition(ReconnectAfter, setting.DeviceSetting, setting.DurationValue), - setting.NewDefinition(Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), + setting.NewDefinition(pkey.AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AlwaysOn, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.AuthKey, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.CheckUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ControlURL, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.DeviceSerialNumber, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.EnableDNSRegistration, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableIncomingConnections, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableRunExitNode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableServerMode, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableTailscaleDNS, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.EnableTailscaleSubnets, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ExitNodeAllowLANAccess, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ExitNodeID, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.ExitNodeIP, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.FlushDNSOnSessionUnlock, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.EncryptState, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.Hostname, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.LogSCMInteractions, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.LogTarget, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.MachineCertificateSubject, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), + setting.NewDefinition(pkey.ReconnectAfter, setting.DeviceSetting, setting.DurationValue), + setting.NewDefinition(pkey.Tailnet, setting.DeviceSetting, setting.StringValue), // User policy settings (can be configured on a user- or device-basis): - setting.NewDefinition(AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), - setting.NewDefinition(ManagedByCaption, setting.UserSetting, setting.StringValue), - setting.NewDefinition(ManagedByOrganizationName, setting.UserSetting, setting.StringValue), - setting.NewDefinition(ManagedByURL, setting.UserSetting, setting.StringValue), - setting.NewDefinition(NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), - setting.NewDefinition(OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.AutoUpdateVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.ExitNodeMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.KeyExpirationNoticeTime, setting.UserSetting, setting.DurationValue), + setting.NewDefinition(pkey.ManagedByCaption, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.ManagedByOrganizationName, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.ManagedByURL, setting.UserSetting, setting.StringValue), + setting.NewDefinition(pkey.NetworkDevicesVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.PreferencesMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.ResetToDefaultsVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.RunExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.SuggestedExitNodeVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.TestMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.UpdateMenuVisibility, setting.UserSetting, setting.VisibilityValue), + setting.NewDefinition(pkey.OnboardingFlowVisibility, setting.UserSetting, setting.VisibilityValue), } func init() { @@ -248,7 +83,7 @@ var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap] // WellKnownSettingDefinition returns a well-known, implicit setting definition by its key, // or an [ErrNoSuchKey] if a policy setting with the specified key does not exist // among implicit policy definitions. -func WellKnownSettingDefinition(k Key) (*setting.Definition, error) { +func WellKnownSettingDefinition(k pkey.Key) (*setting.Definition, error) { m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) { return setting.DefinitionMapOf(implicitDefinitions) }) diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go index 4d3260f3e0e60..490353c8144ae 100644 --- a/util/syspolicy/policy_keys_test.go +++ b/util/syspolicy/policy_keys_test.go @@ -14,14 +14,19 @@ import ( "strconv" "testing" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestKnownKeysRegistered(t *testing.T) { - keyConsts, err := listStringConsts[Key]("policy_keys.go") + const file = "pkey/pkey.go" + keyConsts, err := listStringConsts[pkey.Key](file) if err != nil { t.Fatalf("listStringConsts failed: %v", err) } + if len(keyConsts) == 0 { + t.Fatalf("no key constants found in %s", file) + } m, err := setting.DefinitionMapOf(implicitDefinitions) if err != nil { diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 87b45b654709d..59dba07c6a93c 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -11,6 +11,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -37,8 +38,8 @@ func (c PolicyChange) Old() *setting.Snapshot { return c.snapshots.Old } -// HasChanged reports whether a policy setting with the specified [setting.Key], has changed. -func (c PolicyChange) HasChanged(key setting.Key) bool { +// HasChanged reports whether a policy setting with the specified [pkey.Key], has changed. +func (c PolicyChange) HasChanged(key pkey.Key) bool { new, newErr := c.snapshots.New.GetErr(key) old, oldErr := c.snapshots.Old.GetErr(key) if newErr != nil && oldErr != nil { @@ -60,7 +61,7 @@ func (c PolicyChange) HasChanged(key setting.Key) bool { } // HasChangedAnyOf reports whether any of the specified policy settings has changed. -func (c PolicyChange) HasChangedAnyOf(keys ...setting.Key) bool { +func (c PolicyChange) HasChangedAnyOf(keys ...pkey.Key) bool { return slices.ContainsFunc(keys, c.HasChanged) } diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index e4bfb1a886878..2da46a8ca958a 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tstest" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -80,7 +81,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { type sourceConfig struct { name string scope setting.PolicyScope - settingKey setting.Key + settingKey pkey.Key settingValue string wantEffective bool } @@ -113,7 +114,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, @@ -129,7 +130,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), }, @@ -159,7 +160,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueA", nil, setting.NewNamedOrigin("TestSourceA", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), "TestKeyC": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), @@ -191,7 +192,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueC", nil, setting.NewNamedOrigin("TestSourceC", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), }, setting.DeviceScope), @@ -245,7 +246,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("TestValueF", nil, setting.NewNamedOrigin("TestSourceF", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("TestValueB", nil, setting.NewNamedOrigin("TestSourceB", setting.DeviceScope)), "TestKeyC": setting.RawItemWith("TestValueE", nil, setting.NewNamedOrigin("TestSourceE", setting.DeviceScope)), @@ -263,7 +264,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, setting.CurrentUserScope, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, @@ -288,7 +289,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("UserValue", nil, setting.NewNamedOrigin("TestSourceUser", setting.CurrentUserScope)), }, setting.CurrentUserScope), @@ -321,7 +322,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: true, }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), "TestKeyB": setting.RawItemWith("ProfileValue", nil, setting.NewNamedOrigin("TestSourceProfile", setting.CurrentProfileScope)), }, setting.CurrentUserScope), @@ -347,7 +348,7 @@ func TestRegisterSourceAndGetEffectivePolicy(t *testing.T) { wantEffective: false, // Registering a user source should have no impact on the device policy. }, }, - wantSnapshot: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + wantSnapshot: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "TestKeyA": setting.RawItemWith("DeviceValue", nil, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, setting.NewNamedOrigin("TestSourceDevice", setting.DeviceScope)), }, @@ -497,61 +498,61 @@ func TestPolicyFor(t *testing.T) { func TestPolicyChangeHasChanged(t *testing.T) { tests := []struct { name string - old, new map[setting.Key]setting.RawItem - wantChanged []setting.Key - wantUnchanged []setting.Key + old, new map[pkey.Key]setting.RawItem + wantChanged []pkey.Key + wantUnchanged []pkey.Key }{ { name: "String-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf("Old"), "UnchangedSetting": setting.RawItemOf("Value"), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf("New"), "UnchangedSetting": setting.RawItemOf("Value"), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "UInt64-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(uint64(0)), "UnchangedSetting": setting.RawItemOf(uint64(42)), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(uint64(1)), "UnchangedSetting": setting.RawItemOf(uint64(42)), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "StringSlice-Settings", - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf([]string{"Chicago"}), "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf([]string{"New York"}), "UnchangedSetting": setting.RawItemOf([]string{"String1", "String2"}), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, { name: "Int8-Settings", // We don't have actual int8 settings, but this should still work. - old: map[setting.Key]setting.RawItem{ + old: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(int8(0)), "UnchangedSetting": setting.RawItemOf(int8(42)), }, - new: map[setting.Key]setting.RawItem{ + new: map[pkey.Key]setting.RawItem{ "ChangedSetting": setting.RawItemOf(int8(1)), "UnchangedSetting": setting.RawItemOf(int8(42)), }, - wantChanged: []setting.Key{"ChangedSetting"}, - wantUnchanged: []setting.Key{"UnchangedSetting"}, + wantChanged: []pkey.Key{"ChangedSetting"}, + wantUnchanged: []pkey.Key{"UnchangedSetting"}, }, } for _, tt := range tests { diff --git a/util/syspolicy/setting/key.go b/util/syspolicy/setting/key.go deleted file mode 100644 index aa7606d36324a..0000000000000 --- a/util/syspolicy/setting/key.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package setting - -// Key is a string that uniquely identifies a policy and must remain unchanged -// once established and documented for a given policy setting. It may contain -// alphanumeric characters and zero or more [KeyPathSeparator]s to group -// individual policy settings into categories. -type Key string - -// KeyPathSeparator allows logical grouping of policy settings into categories. -const KeyPathSeparator = '/' diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index 9a96073b01297..ea97865f5a396 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -11,6 +11,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" "tailscale.com/types/structs" + "tailscale.com/util/syspolicy/pkey" ) // RawItem contains a raw policy setting value as read from a policy store, or an @@ -169,4 +170,4 @@ func (v *RawValue) UnmarshalJSON(b []byte) error { } // RawValues is a map of keyed setting values that can be read from a JSON. -type RawValues map[Key]RawValue +type RawValues map[pkey.Key]RawValue diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 13c7a2a5fc1a9..9285afade50b9 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -16,6 +16,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/testenv" ) @@ -134,7 +135,7 @@ type ValueType interface { // Definition defines policy key, scope and value type. type Definition struct { - key Key + key pkey.Key scope Scope typ Type platforms PlatformList @@ -142,12 +143,12 @@ type Definition struct { // NewDefinition returns a new [Definition] with the specified // key, scope, type and supported platforms (see [PlatformList]). -func NewDefinition(k Key, s Scope, t Type, platforms ...string) *Definition { +func NewDefinition(k pkey.Key, s Scope, t Type, platforms ...string) *Definition { return &Definition{key: k, scope: s, typ: t, platforms: platforms} } // Key returns a policy setting's identifier. -func (d *Definition) Key() Key { +func (d *Definition) Key() pkey.Key { if d == nil { return "" } @@ -208,7 +209,7 @@ func (d *Definition) Equal(d2 *Definition) bool { } // DefinitionMap is a map of setting [Definition] by [Key]. -type DefinitionMap map[Key]*Definition +type DefinitionMap map[pkey.Key]*Definition var ( definitions lazy.SyncValue[DefinitionMap] @@ -224,7 +225,7 @@ var ( // invoking any functions that use the registered policy definitions. This // includes calling [Definitions] or [DefinitionOf] directly, or reading any // policy settings via syspolicy. -func Register(k Key, s Scope, t Type, platforms ...string) { +func Register(k pkey.Key, s Scope, t Type, platforms ...string) { RegisterDefinition(NewDefinition(k, s, t, platforms...)) } @@ -290,7 +291,7 @@ func SetDefinitionsForTest(tb testenv.TB, ds ...*Definition) error { // DefinitionOf returns a setting definition by key, // or [ErrNoSuchKey] if the specified key does not exist, // or an error if there are conflicting policy definitions. -func DefinitionOf(k Key) (*Definition, error) { +func DefinitionOf(k pkey.Key) (*Definition, error) { ds, err := settingDefinitions() if err != nil { return nil, err diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index 3cc08e7da3d8d..e43495a160e12 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -11,6 +11,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/ptr" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" ) func TestSettingDefinition(t *testing.T) { @@ -18,7 +19,7 @@ func TestSettingDefinition(t *testing.T) { name string setting *Definition osOverride string - wantKey Key + wantKey pkey.Key wantScope Scope wantType Type wantIsSupported bool @@ -163,10 +164,10 @@ func TestSettingDefinition(t *testing.T) { } func TestRegisterSettingDefinition(t *testing.T) { - const testPolicySettingKey Key = "TestPolicySetting" + const testPolicySettingKey pkey.Key = "TestPolicySetting" tests := []struct { name string - key Key + key pkey.Key wantEq *Definition wantErr error }{ diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 3a40785dce9de..94c7ecadb2533 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -15,34 +15,35 @@ import ( "github.com/go-json-experiment/json/jsontext" xmaps "golang.org/x/exp/maps" "tailscale.com/util/deephash" + "tailscale.com/util/syspolicy/pkey" ) // Snapshot is an immutable collection of ([Key], [RawItem]) pairs, representing // a set of policy settings applied at a specific moment in time. // A nil pointer to [Snapshot] is valid. type Snapshot struct { - m map[Key]RawItem + m map[pkey.Key]RawItem sig deephash.Sum // of m summary Summary } // NewSnapshot returns a new [Snapshot] with the specified items and options. -func NewSnapshot(items map[Key]RawItem, opts ...SummaryOption) *Snapshot { +func NewSnapshot(items map[pkey.Key]RawItem, opts ...SummaryOption) *Snapshot { return &Snapshot{m: xmaps.Clone(items), sig: deephash.Hash(&items), summary: SummaryWith(opts...)} } // All returns an iterator over policy settings in s. The iteration order is not // specified and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) All() iter.Seq2[Key, RawItem] { +func (s *Snapshot) All() iter.Seq2[pkey.Key, RawItem] { if s == nil { - return func(yield func(Key, RawItem) bool) {} + return func(yield func(pkey.Key, RawItem) bool) {} } return maps.All(s.m) } // Get returns the value of the policy setting with the specified key // or nil if it is not configured or has an error. -func (s *Snapshot) Get(k Key) any { +func (s *Snapshot) Get(k pkey.Key) any { v, _ := s.GetErr(k) return v } @@ -50,7 +51,7 @@ func (s *Snapshot) Get(k Key) any { // GetErr returns the value of the policy setting with the specified key, // [ErrNotConfigured] if it is not configured, or an error returned by // the policy Store if the policy setting could not be read. -func (s *Snapshot) GetErr(k Key) (any, error) { +func (s *Snapshot) GetErr(k pkey.Key) (any, error) { if s != nil { if s, ok := s.m[k]; ok { return s.Value(), s.Error() @@ -62,7 +63,7 @@ func (s *Snapshot) GetErr(k Key) (any, error) { // GetSetting returns the untyped policy setting with the specified key and true // if a policy setting with such key has been configured; // otherwise, it returns zero, false. -func (s *Snapshot) GetSetting(k Key) (setting RawItem, ok bool) { +func (s *Snapshot) GetSetting(k pkey.Key) (setting RawItem, ok bool) { setting, ok = s.m[k] return setting, ok } @@ -94,9 +95,9 @@ func (s *Snapshot) EqualItems(s2 *Snapshot) bool { // Keys return an iterator over keys in s. The iteration order is not specified // and is not guaranteed to be the same from one call to the next. -func (s *Snapshot) Keys() iter.Seq[Key] { +func (s *Snapshot) Keys() iter.Seq[pkey.Key] { if s.m == nil { - return func(yield func(Key) bool) {} + return func(yield func(pkey.Key) bool) {} } return maps.Keys(s.m) } @@ -144,8 +145,8 @@ func (s *Snapshot) String() string { // snapshotJSON holds JSON-marshallable data for [Snapshot]. type snapshotJSON struct { - Summary Summary `json:",omitzero"` - Settings map[Key]RawItem `json:",omitempty"` + Summary Summary `json:",omitzero"` + Settings map[pkey.Key]RawItem `json:",omitempty"` } var ( @@ -232,7 +233,7 @@ func MergeSnapshots(snapshot1, snapshot2 *Snapshot) *Snapshot { } return &Snapshot{snapshot2.m, snapshot2.sig, SummaryWith(summaryOpts...)} } - m := make(map[Key]RawItem, snapshot1.Len()+snapshot2.Len()) + m := make(map[pkey.Key]RawItem, snapshot1.Len()+snapshot2.Len()) xmaps.Copy(m, snapshot1.m) xmaps.Copy(m, snapshot2.m) // snapshot2 has higher precedence return &Snapshot{m, deephash.Hash(&m), SummaryWith(summaryOpts...)} diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index 19f014acaa831..99c619cd99bb8 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -11,6 +11,7 @@ import ( jsonv2 "github.com/go-json-experiment/json" "tailscale.com/util/syspolicy/internal" + "tailscale.com/util/syspolicy/pkey" ) func TestMergeSnapshots(t *testing.T) { @@ -23,23 +24,23 @@ func TestMergeSnapshots(t *testing.T) { name: "both-nil", s1: nil, s2: nil, - want: NewSnapshot(map[Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{}), }, { name: "both-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{}), }, { name: "first-nil", s1: nil, - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -47,13 +48,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "first-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -61,13 +62,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "second-nil", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), s2: nil, - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -75,13 +76,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -89,17 +90,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "no-conflicts", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting4": RawItemOf(2 * time.Hour), "Setting5": RawItemOf(VisibleByPolicy), "Setting6": RawItemOf(ShowChoiceByPolicy), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -110,17 +111,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-conflicts", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -129,17 +130,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-first-wins", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }, CurrentUserScope), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -148,17 +149,17 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-second-wins", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting3": RawItemOf(false), "Setting4": RawItemOf(2 * time.Hour), }, DeviceScope), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(456), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -167,18 +168,18 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-both-empty", - s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{}, DeviceScope), - want: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), + s1: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[pkey.Key]RawItem{}, DeviceScope), + want: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), }, { name: "with-scope-first-empty", - s1: NewSnapshot(map[Key]RawItem{}, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}, CurrentUserScope), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true)}, DeviceScope, NewNamedOrigin("TestPolicy", DeviceScope)), - want: NewSnapshot(map[Key]RawItem{ + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -186,13 +187,13 @@ func TestMergeSnapshots(t *testing.T) { }, { name: "with-scope-second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), }, CurrentUserScope), - s2: NewSnapshot(map[Key]RawItem{}), - want: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{}), + want: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -226,28 +227,28 @@ func TestSnapshotEqual(t *testing.T) { { name: "nil-empty", s1: nil, - s2: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: true, wantEqualItems: true, }, { name: "empty-nil", - s1: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), s2: nil, wantEqual: true, wantEqualItems: true, }, { name: "empty-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{}), + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: true, wantEqualItems: true, }, { name: "first-nil", s1: nil, - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -257,8 +258,8 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "first-empty", - s1: NewSnapshot(map[Key]RawItem{}), - s2: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -268,7 +269,7 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "second-nil", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(true), @@ -279,23 +280,23 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "second-empty", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{}), + s2: NewSnapshot(map[pkey.Key]RawItem{}), wantEqual: false, wantEqualItems: false, }, { name: "same-items-same-order-no-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -305,12 +306,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-same-order-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -320,12 +321,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-different-order-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting3": RawItemOf(false), "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), @@ -335,12 +336,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "same-items-same-order-different-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), @@ -350,12 +351,12 @@ func TestSnapshotEqual(t *testing.T) { }, { name: "different-items-same-scope", - s1: NewSnapshot(map[Key]RawItem{ + s1: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(123), "Setting2": RawItemOf("String"), "Setting3": RawItemOf(false), }, DeviceScope), - s2: NewSnapshot(map[Key]RawItem{ + s2: NewSnapshot(map[pkey.Key]RawItem{ "Setting4": RawItemOf(2 * time.Hour), "Setting5": RawItemOf(VisibleByPolicy), "Setting6": RawItemOf(ShowChoiceByPolicy), @@ -404,7 +405,7 @@ func TestSnapshotString(t *testing.T) { }, { name: "non-empty", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemOf(2 * time.Hour), "Setting2": RawItemOf(VisibleByPolicy), "Setting3": RawItemOf(ShowChoiceByPolicy), @@ -416,14 +417,14 @@ Setting3 = user-decides`, }, { name: "non-empty-with-item-origin", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemWith(42, nil, NewNamedOrigin("Test Policy", DeviceScope)), }), wantString: `Setting1 = 42 - {Test Policy (Device)}`, }, { name: "non-empty-with-item-error", - snapshot: NewSnapshot(map[Key]RawItem{ + snapshot: NewSnapshot(map[pkey.Key]RawItem{ "Setting1": RawItemWith(nil, NewErrorText("bang!"), nil), }), wantString: `Setting1 = Error{"bang!"}`, @@ -458,55 +459,55 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { }, { name: "Bool/True", - snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(true)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"BoolPolicy": RawItemOf(true)}), wantJSON: `{"Settings": {"BoolPolicy": {"Value": true}}}`, }, { name: "Bool/False", - snapshot: NewSnapshot(map[Key]RawItem{"BoolPolicy": RawItemOf(false)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"BoolPolicy": RawItemOf(false)}), wantJSON: `{"Settings": {"BoolPolicy": {"Value": false}}}`, }, { name: "String/Non-Empty", - snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"StringPolicy": RawItemOf("StringValue")}), wantJSON: `{"Settings": {"StringPolicy": {"Value": "StringValue"}}}`, }, { name: "String/Empty", - snapshot: NewSnapshot(map[Key]RawItem{"StringPolicy": RawItemOf("")}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"StringPolicy": RawItemOf("")}), wantJSON: `{"Settings": {"StringPolicy": {"Value": ""}}}`, }, { name: "Integer/NonZero", - snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"IntPolicy": RawItemOf(uint64(42))}), wantJSON: `{"Settings": {"IntPolicy": {"Value": 42}}}`, }, { name: "Integer/Zero", - snapshot: NewSnapshot(map[Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"IntPolicy": RawItemOf(uint64(0))}), wantJSON: `{"Settings": {"IntPolicy": {"Value": 0}}}`, }, { name: "String-List", - snapshot: NewSnapshot(map[Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"ListPolicy": RawItemOf([]string{"Value1", "Value2"})}), wantJSON: `{"Settings": {"ListPolicy": {"Value": ["Value1", "Value2"]}}}`, }, { name: "Duration/Zero", - snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf(time.Duration(0))}), wantJSON: `{"Settings": {"DurationPolicy": {"Value": "0s"}}}`, - wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("0s")}), + wantBack: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf("0s")}), }, { name: "Duration/NonZero", - snapshot: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), + snapshot: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf(2 * time.Hour)}), wantJSON: `{"Settings": {"DurationPolicy": {"Value": "2h0m0s"}}}`, - wantBack: NewSnapshot(map[Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), + wantBack: NewSnapshot(map[pkey.Key]RawItem{"DurationPolicy": RawItemOf("2h0m0s")}), }, { name: "Empty/With-Summary", snapshot: NewSnapshot( - map[Key]RawItem{}, + map[pkey.Key]RawItem{}, SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), ), wantJSON: `{"Summary": {"Origin": {"Name": "TestSource", "Scope": "Device"}, "Scope": "User"}}`, @@ -514,7 +515,7 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { { name: "Setting/With-Summary", snapshot: NewSnapshot( - map[Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, + map[pkey.Key]RawItem{"PolicySetting": RawItemOf(uint64(42))}, SummaryWith(CurrentUserScope, NewNamedOrigin("TestSource", DeviceScope)), ), wantJSON: `{ @@ -525,7 +526,7 @@ func TestMarshalUnmarshalSnapshot(t *testing.T) { { name: "Settings/With-Origins", snapshot: NewSnapshot( - map[Key]RawItem{ + map[pkey.Key]RawItem{ "SettingA": RawItemWith(uint64(42), nil, NewNamedOrigin("SourceA", DeviceScope)), "SettingB": RawItemWith("B", nil, NewNamedOrigin("SourceB", CurrentProfileScope)), "SettingC": RawItemWith(true, nil, NewNamedOrigin("SourceC", CurrentUserScope)), diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go index 299132b4e11b3..be363b79a84eb 100644 --- a/util/syspolicy/source/env_policy_store.go +++ b/util/syspolicy/source/env_policy_store.go @@ -11,6 +11,7 @@ import ( "strings" "unicode/utf8" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -22,7 +23,7 @@ var _ Store = (*EnvPolicyStore)(nil) type EnvPolicyStore struct{} // ReadString implements [Store]. -func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { +func (s *EnvPolicyStore) ReadString(key pkey.Key) (string, error) { _, str, err := s.lookupSettingVariable(key) if err != nil { return "", err @@ -31,7 +32,7 @@ func (s *EnvPolicyStore) ReadString(key setting.Key) (string, error) { } // ReadUInt64 implements [Store]. -func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s *EnvPolicyStore) ReadUInt64(key pkey.Key) (uint64, error) { name, str, err := s.lookupSettingVariable(key) if err != nil { return 0, err @@ -47,7 +48,7 @@ func (s *EnvPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { } // ReadBoolean implements [Store]. -func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { +func (s *EnvPolicyStore) ReadBoolean(key pkey.Key) (bool, error) { name, str, err := s.lookupSettingVariable(key) if err != nil { return false, err @@ -63,7 +64,7 @@ func (s *EnvPolicyStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadStringArray implements [Store]. -func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s *EnvPolicyStore) ReadStringArray(key pkey.Key) ([]string, error) { _, str, err := s.lookupSettingVariable(key) if err != nil || str == "" { return nil, err @@ -79,7 +80,7 @@ func (s *EnvPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { return res[0:dst], nil } -func (s *EnvPolicyStore) lookupSettingVariable(key setting.Key) (name, value string, err error) { +func (s *EnvPolicyStore) lookupSettingVariable(key pkey.Key) (name, value string, err error) { name, err = keyToEnvVarName(key) if err != nil { return "", "", err @@ -103,7 +104,7 @@ var ( // // It's fine to use this in [EnvPolicyStore] without caching variable names since it's not a hot path. // [EnvPolicyStore] is not a [Changeable] policy store, so the conversion will only happen once. -func keyToEnvVarName(key setting.Key) (string, error) { +func keyToEnvVarName(key pkey.Key) (string, error) { if len(key) == 0 { return "", errEmptyKey } @@ -135,7 +136,7 @@ func keyToEnvVarName(key setting.Key) (string, error) { } case isDigit(c): split = currentWord.Len() > 0 && !isDigit(key[i-1]) - case c == setting.KeyPathSeparator: + case c == pkey.KeyPathSeparator: words = append(words, currentWord.String()) currentWord.Reset() continue diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go index 9eacf6378b450..3255095b2d286 100644 --- a/util/syspolicy/source/env_policy_store_test.go +++ b/util/syspolicy/source/env_policy_store_test.go @@ -11,13 +11,14 @@ import ( "strconv" "testing" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) func TestKeyToEnvVarName(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key want string // suffix after "TS_DEBUGSYSPOLICY_" wantErr error }{ @@ -166,7 +167,7 @@ func TestEnvPolicyStore(t *testing.T) { } tests := []struct { name string - key setting.Key + key pkey.Key lookup func(string) (string, bool) want any wantErr error diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go index a1bd3147ea85e..e6360e5f86a42 100644 --- a/util/syspolicy/source/policy_reader.go +++ b/util/syspolicy/source/policy_reader.go @@ -16,6 +16,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -138,9 +139,9 @@ func (r *Reader) reload(force bool) (*setting.Snapshot, error) { metrics.Reset(r.origin) - var m map[setting.Key]setting.RawItem + var m map[pkey.Key]setting.RawItem if lastPolicyCount := r.lastPolicy.Len(); lastPolicyCount > 0 { - m = make(map[setting.Key]setting.RawItem, lastPolicyCount) + m = make(map[pkey.Key]setting.RawItem, lastPolicyCount) } for _, s := range r.settings { if !r.origin.Scope().IsConfigurableSetting(s) { diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go index 57676e67da614..06246a209a875 100644 --- a/util/syspolicy/source/policy_reader_test.go +++ b/util/syspolicy/source/policy_reader_test.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/util/must" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -72,7 +73,7 @@ func TestReaderLifecycle(t *testing.T) { initWant: setting.NewSnapshot(nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), addStrings: []TestSetting[string]{TestSettingOf("StringValue", "S1")}, addStringLists: []TestSetting[[]string]{TestSettingOf("StringListValue", []string{"S1", "S2", "S3"})}, - newWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + newWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "StringValue": setting.RawItemWith("S1", nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "StringListValue": setting.RawItemWith([]string{"S1", "S2", "S3"}, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, setting.NewNamedOrigin("Test", setting.DeviceScope)), @@ -136,7 +137,7 @@ func TestReaderLifecycle(t *testing.T) { TestSettingOf("PreferenceOptionValue", "always"), TestSettingOf("VisibilityValue", "show"), }, - initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue": setting.RawItemWith(must.Get(time.ParseDuration("2h30m")), nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "PreferenceOptionValue": setting.RawItemWith(setting.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), @@ -165,7 +166,7 @@ func TestReaderLifecycle(t *testing.T) { initUInt64s: []TestSetting[uint64]{ TestSettingOf[uint64]("VisibilityValue", 42), // type mismatch }, - initWant: setting.NewSnapshot(map[setting.Key]setting.RawItem{ + initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue1": setting.RawItemWith(nil, setting.NewErrorText("time: invalid duration \"soon\""), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "DurationValue2": setting.RawItemWith(nil, setting.NewErrorText("bang!"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "PreferenceOptionValue": setting.RawItemWith(setting.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), @@ -277,7 +278,7 @@ func TestReadingSession(t *testing.T) { t.Fatalf("the session was closed prematurely") } - want := setting.NewSnapshot(map[setting.Key]setting.RawItem{ + want := setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "StringValue": setting.RawItemWith("S1", nil, origin), }, origin) if got := session.GetSettings(); !got.Equal(want) { diff --git a/util/syspolicy/source/policy_source.go b/util/syspolicy/source/policy_source.go index 7f2821b596e62..c4774217c09ac 100644 --- a/util/syspolicy/source/policy_source.go +++ b/util/syspolicy/source/policy_source.go @@ -13,6 +13,7 @@ import ( "io" "tailscale.com/types/lazy" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" ) @@ -31,19 +32,19 @@ type Store interface { // ReadString returns the value of a [setting.StringValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadString(key setting.Key) (string, error) + ReadString(key pkey.Key) (string, error) // ReadUInt64 returns the value of a [setting.IntegerValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadUInt64(key setting.Key) (uint64, error) + ReadUInt64(key pkey.Key) (uint64, error) // ReadBoolean returns the value of a [setting.BooleanValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadBoolean(key setting.Key) (bool, error) + ReadBoolean(key pkey.Key) (bool, error) // ReadStringArray returns the value of a [setting.StringListValue] with the specified key, // an [setting.ErrNotConfigured] if the policy setting is not configured, or // an error on failure. - ReadStringArray(key setting.Key) ([]string, error) + ReadStringArray(key pkey.Key) ([]string, error) } // Lockable is an optional interface that [Store] implementations may support. diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go index 621701e84f23c..f97b17f3afee6 100644 --- a/util/syspolicy/source/policy_store_windows.go +++ b/util/syspolicy/source/policy_store_windows.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows/registry" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil/gp" ) @@ -251,7 +252,7 @@ func (ps *PlatformPolicyStore) onChange() { // ReadString retrieves a string policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err error) { +func (ps *PlatformPolicyStore) ReadString(key pkey.Key) (val string, err error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (string, error) { val, _, err := key.GetStringValue(valueName) @@ -261,7 +262,7 @@ func (ps *PlatformPolicyStore) ReadString(key setting.Key) (val string, err erro // ReadUInt64 retrieves an integer policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { +func (ps *PlatformPolicyStore) ReadUInt64(key pkey.Key) (uint64, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (uint64, error) { val, _, err := key.GetIntegerValue(valueName) @@ -271,7 +272,7 @@ func (ps *PlatformPolicyStore) ReadUInt64(key setting.Key) (uint64, error) { // ReadBoolean retrieves a boolean policy with the specified key. // It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { +func (ps *PlatformPolicyStore) ReadBoolean(key pkey.Key) (bool, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) (bool, error) { val, _, err := key.GetIntegerValue(valueName) @@ -283,8 +284,8 @@ func (ps *PlatformPolicyStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadString retrieves a multi-string policy with the specified key. -// It returns [setting.ErrNotConfigured] if the policy setting does not exist. -func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error) { +// It returns [pkey.ErrNotConfigured] if the policy setting does not exist. +func (ps *PlatformPolicyStore) ReadStringArray(key pkey.Key) ([]string, error) { return getPolicyValue(ps, key, func(key registry.Key, valueName string) ([]string, error) { val, _, err := key.GetStringsValue(valueName) @@ -322,25 +323,25 @@ func (ps *PlatformPolicyStore) ReadStringArray(key setting.Key) ([]string, error }) } -// splitSettingKey extracts the registry key name and value name from a [setting.Key]. -// The [setting.Key] format allows grouping settings into nested categories using one -// or more [setting.KeyPathSeparator]s in the path. How individual policy settings are +// splitSettingKey extracts the registry key name and value name from a [pkey.Key]. +// The [pkey.Key] format allows grouping settings into nested categories using one +// or more [pkey.KeyPathSeparator]s in the path. How individual policy settings are // stored is an implementation detail of each [Store]. In the [PlatformPolicyStore] // for Windows, we map nested policy categories onto the Registry key hierarchy. -// The last component after a [setting.KeyPathSeparator] is treated as the value name, +// The last component after a [pkey.KeyPathSeparator] is treated as the value name, // while everything preceding it is considered a subpath (relative to the {HKLM,HKCU}\Software\Policies\Tailscale key). -// If there are no [setting.KeyPathSeparator]s in the key, the policy setting value +// If there are no [pkey.KeyPathSeparator]s in the key, the policy setting value // is meant to be stored directly under {HKLM,HKCU}\Software\Policies\Tailscale. -func splitSettingKey(key setting.Key) (path, valueName string) { - if idx := strings.LastIndexByte(string(key), setting.KeyPathSeparator); idx != -1 { - path = strings.ReplaceAll(string(key[:idx]), string(setting.KeyPathSeparator), `\`) +func splitSettingKey(key pkey.Key) (path, valueName string) { + if idx := strings.LastIndexByte(string(key), pkey.KeyPathSeparator); idx != -1 { + path = strings.ReplaceAll(string(key[:idx]), string(pkey.KeyPathSeparator), `\`) valueName = string(key[idx+1:]) return path, valueName } return "", string(key) } -func getPolicyValue[T any](ps *PlatformPolicyStore, key setting.Key, getter registryValueGetter[T]) (T, error) { +func getPolicyValue[T any](ps *PlatformPolicyStore, key pkey.Key, getter registryValueGetter[T]) (T, error) { var zero T ps.mu.Lock() diff --git a/util/syspolicy/source/policy_store_windows_test.go b/util/syspolicy/source/policy_store_windows_test.go index 33f85dc0b2b7e..4ab1da805d6c8 100644 --- a/util/syspolicy/source/policy_store_windows_test.go +++ b/util/syspolicy/source/policy_store_windows_test.go @@ -19,6 +19,7 @@ import ( "tailscale.com/tstest" "tailscale.com/util/cibuild" "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" @@ -31,7 +32,7 @@ import ( type subkeyStrings []string type testPolicyValue struct { - name setting.Key + name pkey.Key value any } @@ -100,7 +101,7 @@ func TestReadPolicyStore(t *testing.T) { t.Skipf("test requires running as elevated user") } tests := []struct { - name setting.Key + name pkey.Key newValue any legacyValue any want any @@ -269,7 +270,7 @@ func TestPolicyStoreChangeNotifications(t *testing.T) { func TestSplitSettingKey(t *testing.T) { tests := []struct { name string - key setting.Key + key pkey.Key wantPath string wantValue string }{ diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index efaf4cd5a7c0f..ddec9efbb2d01 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -12,6 +12,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" ) @@ -31,7 +32,7 @@ type TestValueType interface { // TestSetting is a policy setting in a [TestStore]. type TestSetting[T TestValueType] struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Error is the error to be returned by the [TestStore] when reading // a policy setting with the specified key. Error error @@ -43,20 +44,20 @@ type TestSetting[T TestValueType] struct { // TestSettingOf returns a [TestSetting] representing a policy setting // configured with the specified key and value. -func TestSettingOf[T TestValueType](key setting.Key, value T) TestSetting[T] { +func TestSettingOf[T TestValueType](key pkey.Key, value T) TestSetting[T] { return TestSetting[T]{Key: key, Value: value} } // TestSettingWithError returns a [TestSetting] representing a policy setting // with the specified key and error. -func TestSettingWithError[T TestValueType](key setting.Key, err error) TestSetting[T] { +func TestSettingWithError[T TestValueType](key pkey.Key, err error) TestSetting[T] { return TestSetting[T]{Key: key, Error: err} } // testReadOperation describes a single policy setting read operation. type testReadOperation struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Type is a value type of a read operation. // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] Type setting.Type @@ -65,7 +66,7 @@ type testReadOperation struct { // TestExpectedReads is the number of read operations with the specified details. type TestExpectedReads struct { // Key is the setting's unique identifier. - Key setting.Key + Key pkey.Key // Type is a value type of a read operation. // [setting.BooleanValue], [setting.IntegerValue], [setting.StringValue] or [setting.StringListValue] Type setting.Type @@ -87,8 +88,8 @@ type TestStore struct { storeLockCount atomic.Int32 mu sync.RWMutex - suspendCount int // change callback are suspended if > 0 - mr, mw map[setting.Key]any // maps for reading and writing; they're the same unless the store is suspended. + suspendCount int // change callback are suspended if > 0 + mr, mw map[pkey.Key]any // maps for reading and writing; they're the same unless the store is suspended. cbs set.HandleSet[func()] closed bool @@ -99,7 +100,7 @@ type TestStore struct { // NewTestStore returns a new [TestStore]. // The tb will be used to report coding errors detected by the [TestStore]. func NewTestStore(tb testenv.TB) *TestStore { - m := make(map[setting.Key]any) + m := make(map[pkey.Key]any) store := &TestStore{ tb: tb, done: make(chan struct{}), @@ -162,7 +163,7 @@ func (s *TestStore) IsEmpty() bool { } // ReadString implements [Store]. -func (s *TestStore) ReadString(key setting.Key) (string, error) { +func (s *TestStore) ReadString(key pkey.Key) (string, error) { defer s.recordRead(key, setting.StringValue) s.mu.RLock() defer s.mu.RUnlock() @@ -181,7 +182,7 @@ func (s *TestStore) ReadString(key setting.Key) (string, error) { } // ReadUInt64 implements [Store]. -func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { +func (s *TestStore) ReadUInt64(key pkey.Key) (uint64, error) { defer s.recordRead(key, setting.IntegerValue) s.mu.RLock() defer s.mu.RUnlock() @@ -200,7 +201,7 @@ func (s *TestStore) ReadUInt64(key setting.Key) (uint64, error) { } // ReadBoolean implements [Store]. -func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { +func (s *TestStore) ReadBoolean(key pkey.Key) (bool, error) { defer s.recordRead(key, setting.BooleanValue) s.mu.RLock() defer s.mu.RUnlock() @@ -219,7 +220,7 @@ func (s *TestStore) ReadBoolean(key setting.Key) (bool, error) { } // ReadStringArray implements [Store]. -func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { +func (s *TestStore) ReadStringArray(key pkey.Key) ([]string, error) { defer s.recordRead(key, setting.StringListValue) s.mu.RLock() defer s.mu.RUnlock() @@ -237,7 +238,7 @@ func (s *TestStore) ReadStringArray(key setting.Key) ([]string, error) { return slice, nil } -func (s *TestStore) recordRead(key setting.Key, typ setting.Type) { +func (s *TestStore) recordRead(key pkey.Key, typ setting.Type) { s.readsMu.Lock() op := testReadOperation{key, typ} num := s.reads[op] @@ -399,7 +400,7 @@ func (s *TestStore) SetStringLists(settings ...TestSetting[[]string]) { } // Delete deletes the specified settings from s. -func (s *TestStore) Delete(keys ...setting.Key) { +func (s *TestStore) Delete(keys ...pkey.Key) { s.storeLock.Lock() for _, key := range keys { s.mu.Lock() diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 6555a58ac4564..0ac1d251745d3 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -17,6 +17,7 @@ import ( "time" "tailscale.com/util/syspolicy/internal/loggerx" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -58,7 +59,7 @@ func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicySc // HasAnyOf returns whether at least one of the specified policy settings is configured, // or an error if no keys are provided or the check fails. -func HasAnyOf(keys ...Key) (bool, error) { +func HasAnyOf(keys ...pkey.Key) (bool, error) { if len(keys) == 0 { return false, errors.New("at least one key must be specified") } @@ -82,25 +83,25 @@ func HasAnyOf(keys ...Key) (bool, error) { // GetString returns a string policy setting with the specified key, // or defaultValue if it does not exist. -func GetString(key Key, defaultValue string) (string, error) { +func GetString(key pkey.Key, defaultValue string) (string, error) { return getCurrentPolicySettingValue(key, defaultValue) } // GetUint64 returns a numeric policy setting with the specified key, // or defaultValue if it does not exist. -func GetUint64(key Key, defaultValue uint64) (uint64, error) { +func GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { return getCurrentPolicySettingValue(key, defaultValue) } // GetBoolean returns a boolean policy setting with the specified key, // or defaultValue if it does not exist. -func GetBoolean(key Key, defaultValue bool) (bool, error) { +func GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { return getCurrentPolicySettingValue(key, defaultValue) } // GetStringArray returns a multi-string policy setting with the specified key, // or defaultValue if it does not exist. -func GetStringArray(key Key, defaultValue []string) ([]string, error) { +func GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { return getCurrentPolicySettingValue(key, defaultValue) } @@ -110,14 +111,14 @@ func GetStringArray(key Key, defaultValue []string) ([]string, error) { // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not // present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name Key) (setting.PreferenceOption, error) { +func GetPreferenceOption(name pkey.Key) (setting.PreferenceOption, error) { return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) } // GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows // specifying a default value to return if the policy setting is not configured. // It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { +func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } @@ -126,7 +127,7 @@ func GetPreferenceOptionOrDefault(name Key, defaultValue setting.PreferenceOptio // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name Key) (setting.Visibility, error) { +func GetVisibility(name pkey.Key) (setting.Visibility, error) { return getCurrentPolicySettingValue(name, setting.VisibleByPolicy) } @@ -135,7 +136,7 @@ func GetVisibility(name Key) (setting.Visibility, error) { // action. The registry value should be a string that time.ParseDuration // understands. If the registry value is "" or can not be processed, // defaultValue is returned instead. -func GetDuration(name Key, defaultValue time.Duration) (time.Duration, error) { +func GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { d, err := getCurrentPolicySettingValue(name, defaultValue) if err != nil { return d, err @@ -160,7 +161,7 @@ func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), er // specified by its key from the [rsop.Policy] of the [setting.DefaultScope]. It // returns def if the policy setting is not configured, or an error if it has // an error or could not be converted to the specified type T. -func getCurrentPolicySettingValue[T setting.ValueType](key Key, def T) (T, error) { +func getCurrentPolicySettingValue[T setting.ValueType](key pkey.Key, def T) (T, error) { effective, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { return def, err diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index fc01f364597c1..5e822a0b7a2aa 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -12,6 +12,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -22,7 +23,7 @@ var someOtherError = errors.New("error other than not found") func TestGetString(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error defaultValue string @@ -32,7 +33,7 @@ func TestGetString(t *testing.T) { }{ { name: "read existing value", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "hide", wantValue: "hide", wantMetrics: []metrics.TestState{ @@ -42,13 +43,13 @@ func TestGetString(t *testing.T) { }, { name: "read non-existing value", - key: EnableServerMode, + key: pkey.EnableServerMode, handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non-blank default", - key: EnableServerMode, + key: pkey.EnableServerMode, handlerError: ErrNotConfigured, defaultValue: "test", wantValue: "test", @@ -56,7 +57,7 @@ func TestGetString(t *testing.T) { }, { name: "reading value returns other error", - key: NetworkDevicesVisibility, + key: pkey.NetworkDevicesVisibility, handlerError: someOtherError, wantError: someOtherError, wantMetrics: []metrics.TestState{ @@ -103,7 +104,7 @@ func TestGetString(t *testing.T) { func TestGetUint64(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue uint64 handlerError error defaultValue uint64 @@ -112,27 +113,27 @@ func TestGetUint64(t *testing.T) { }{ { name: "read existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: 1, wantValue: 1, }, { name: "read non-existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: 0, handlerError: ErrNotConfigured, wantValue: 0, }, { name: "read non-existing value, non-zero default", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, defaultValue: 2, handlerError: ErrNotConfigured, wantValue: 2, }, { name: "reading value returns other error", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerError: someOtherError, wantError: someOtherError, }, @@ -169,7 +170,7 @@ func TestGetUint64(t *testing.T) { func TestGetBoolean(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue bool handlerError error defaultValue bool @@ -179,7 +180,7 @@ func TestGetBoolean(t *testing.T) { }{ { name: "read existing value", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerValue: true, wantValue: true, wantMetrics: []metrics.TestState{ @@ -189,14 +190,14 @@ func TestGetBoolean(t *testing.T) { }, { name: "read non-existing value", - key: LogSCMInteractions, + key: pkey.LogSCMInteractions, handlerValue: false, handlerError: ErrNotConfigured, wantValue: false, }, { name: "reading value returns other error", - key: FlushDNSOnSessionUnlock, + key: pkey.FlushDNSOnSessionUnlock, handlerError: someOtherError, wantError: someOtherError, // expect error... defaultValue: true, @@ -245,7 +246,7 @@ func TestGetBoolean(t *testing.T) { func TestGetPreferenceOption(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error wantValue setting.PreferenceOption @@ -254,7 +255,7 @@ func TestGetPreferenceOption(t *testing.T) { }{ { name: "always by policy", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "always", wantValue: setting.AlwaysByPolicy, wantMetrics: []metrics.TestState{ @@ -264,7 +265,7 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "never by policy", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "never", wantValue: setting.NeverByPolicy, wantMetrics: []metrics.TestState{ @@ -274,7 +275,7 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "use default", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerValue: "", wantValue: setting.ShowChoiceByPolicy, wantMetrics: []metrics.TestState{ @@ -284,13 +285,13 @@ func TestGetPreferenceOption(t *testing.T) { }, { name: "read non-existing value", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerError: ErrNotConfigured, wantValue: setting.ShowChoiceByPolicy, }, { name: "other error is returned", - key: EnableIncomingConnections, + key: pkey.EnableIncomingConnections, handlerError: someOtherError, wantValue: setting.ShowChoiceByPolicy, wantError: someOtherError, @@ -338,7 +339,7 @@ func TestGetPreferenceOption(t *testing.T) { func TestGetVisibility(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error wantValue setting.Visibility @@ -347,7 +348,7 @@ func TestGetVisibility(t *testing.T) { }{ { name: "hidden by policy", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "hide", wantValue: setting.HiddenByPolicy, wantMetrics: []metrics.TestState{ @@ -357,7 +358,7 @@ func TestGetVisibility(t *testing.T) { }, { name: "visibility default", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", wantValue: setting.VisibleByPolicy, wantMetrics: []metrics.TestState{ @@ -367,14 +368,14 @@ func TestGetVisibility(t *testing.T) { }, { name: "read non-existing value", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: ErrNotConfigured, wantValue: setting.VisibleByPolicy, }, { name: "other error is returned", - key: AdminConsoleVisibility, + key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: someOtherError, wantValue: setting.VisibleByPolicy, @@ -423,7 +424,7 @@ func TestGetVisibility(t *testing.T) { func TestGetDuration(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue string handlerError error defaultValue time.Duration @@ -433,7 +434,7 @@ func TestGetDuration(t *testing.T) { }{ { name: "read existing value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerValue: "2h", wantValue: 2 * time.Hour, defaultValue: 24 * time.Hour, @@ -444,7 +445,7 @@ func TestGetDuration(t *testing.T) { }, { name: "invalid duration value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerValue: "-20", wantValue: 24 * time.Hour, wantError: errors.New(`time: missing unit in duration "-20"`), @@ -456,21 +457,21 @@ func TestGetDuration(t *testing.T) { }, { name: "read non-existing value", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: ErrNotConfigured, wantValue: 24 * time.Hour, defaultValue: 24 * time.Hour, }, { name: "read non-existing value different default", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: ErrNotConfigured, wantValue: 0 * time.Second, defaultValue: 0 * time.Second, }, { name: "other error is returned", - key: KeyExpirationNoticeTime, + key: pkey.KeyExpirationNoticeTime, handlerError: someOtherError, wantValue: 24 * time.Hour, wantError: someOtherError, @@ -519,7 +520,7 @@ func TestGetDuration(t *testing.T) { func TestGetStringArray(t *testing.T) { tests := []struct { name string - key Key + key pkey.Key handlerValue []string handlerError error defaultValue []string @@ -529,7 +530,7 @@ func TestGetStringArray(t *testing.T) { }{ { name: "read existing value", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, wantMetrics: []metrics.TestState{ @@ -539,13 +540,13 @@ func TestGetStringArray(t *testing.T) { }, { name: "read non-existing value", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: ErrNotConfigured, wantError: nil, }, { name: "read non-existing value, non nil default", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: ErrNotConfigured, defaultValue: []string{"foo", "bar"}, wantValue: []string{"foo", "bar"}, @@ -553,7 +554,7 @@ func TestGetStringArray(t *testing.T) { }, { name: "reading value returns other error", - key: AllowedSuggestedExitNodes, + key: pkey.AllowedSuggestedExitNodes, handlerError: someOtherError, wantError: someOtherError, wantMetrics: []metrics.TestState{ @@ -607,11 +608,11 @@ func BenchmarkGetString(b *testing.B) { RegisterWellKnownSettingsForTest(b) wantControlURL := "https://login.tailscale.com" - registerSingleSettingStoreForTest(b, source.TestSettingOf(ControlURL, wantControlURL)) + registerSingleSettingStoreForTest(b, source.TestSettingOf(pkey.ControlURL, wantControlURL)) b.ResetTimer() for i := 0; i < b.N; i++ { - gotControlURL, _ := GetString(ControlURL, "https://controlplane.tailscale.com") + gotControlURL, _ := GetString(pkey.ControlURL, "https://controlplane.tailscale.com") if gotControlURL != wantControlURL { b.Fatalf("got %v; want %v", gotControlURL, wantControlURL) } From 921d53904c29761649057bdd2610cd6733dd030e Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 1 Sep 2025 13:02:01 +0100 Subject: [PATCH 0273/1093] CODE_OF_CONDUCT.md: fix duplicate entry (#16814) Remove duplicate entry not present on approved wording Updates #cleanup Signed-off-by: Erisa A --- CODE_OF_CONDUCT.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 3d33bba98c863..a5877cb112eff 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -32,8 +32,6 @@ Examples of unacceptable behavior include without limitation: * Distributing or promoting malware; * Other conduct which could reasonably be considered inappropriate in a professional setting. -* Please also see the Tailscale Acceptable Use Policy, available at - [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). From d05e6dc09e7a36e2b6082ce259e33eb3eecd0c0c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 08:04:17 -0700 Subject: [PATCH 0274/1093] util/syspolicy/policyclient: add policyclient.Client interface, start plumbing This is step 2 of ~4, breaking up #14720 into reviewable chunks, with the aim to make syspolicy be a build-time configurable feature. Step 1 was #16984. In this second step, the util/syspolicy/policyclient package is added with the policyclient.Client interface. This is the interface that's always present (regardless of build tags), and is what code around the tree uses to ask syspolicy/MDM questions. There are two implementations of policyclient.Client for now: 1) NoPolicyClient, which only returns default values. 2) the unexported, temporary 'globalSyspolicy', which is implemented in terms of the global functions we wish to later eliminate. This then starts to plumb around the policyclient.Client to most callers. Future changes will plumb it more. When the last of the global func callers are gone, then we can unexport the global functions and make a proper policyclient.Client type and constructor in the syspolicy package, removing the globalSyspolicy impl out of tsd. The final change will sprinkle build tags in a few more places and lock it in with dependency tests to make sure the dependencies don't later creep back in. Updates #16998 Updates #12614 Change-Id: Ib2c93d15c15c1f2b981464099177cd492d50391c Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 5 +- control/controlclient/direct.go | 10 ++- control/controlclient/sign_supported.go | 10 +-- control/controlclient/sign_unsupported.go | 3 +- ipn/ipnlocal/c2n.go | 2 +- ipn/ipnlocal/local.go | 9 ++- net/dns/manager_windows.go | 4 +- posture/serialnumber_macos.go | 3 +- posture/serialnumber_macos_test.go | 3 +- posture/serialnumber_notmacos.go | 3 +- posture/serialnumber_notmacos_test.go | 3 +- posture/serialnumber_stub.go | 3 +- posture/serialnumber_syspolicy.go | 6 +- posture/serialnumber_test.go | 3 +- tsd/syspolicy_off.go | 12 ++++ tsd/syspolicy_on.go | 41 ++++++++++++ tsd/tsd.go | 7 +++ tsnet/depaware.txt | 5 +- util/syspolicy/policyclient/policyclient.go | 66 ++++++++++++++++++++ util/syspolicy/rsop/change_callbacks.go | 3 +- util/syspolicy/rsop/resultant_policy_test.go | 13 ++-- 25 files changed, 184 insertions(+), 36 deletions(-) create mode 100644 tsd/syspolicy_off.go create mode 100644 tsd/syspolicy_on.go create mode 100644 util/syspolicy/policyclient/policyclient.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index ccea25a8a8932..0597d5d1f0210 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -175,6 +175,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index a0214575bc625..40c8abb0813cb 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -951,11 +951,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/ipn+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7f09be33f4bd8..cf1691c71a52d 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -196,6 +196,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 46efa5b211fe4..f08601f81eb27 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -433,6 +433,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f1e22efbfcfe6..743492904f796 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -380,12 +380,13 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/ipn+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ - tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index cee9387795ef0..47283a673c935 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -6,6 +6,7 @@ package controlclient import ( "bufio" "bytes" + "cmp" "context" "encoding/binary" "encoding/json" @@ -53,8 +54,8 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" "tailscale.com/util/singleflight" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" @@ -77,6 +78,7 @@ type Direct struct { debugFlags []string skipIPForwardingCheck bool pinger Pinger + polc policyclient.Client // always non-nil popBrowser func(url string) // or nil c2nHandler http.Handler // or nil onClientVersion func(*tailcfg.ClientVersion) // or nil @@ -125,6 +127,7 @@ type Options struct { Clock tstime.Clock Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc DiscoPublicKey key.DiscoPublic + PolicyClient policyclient.Client // or nil for none Logf logger.Logf HTTPTestClient *http.Client // optional HTTP client to use (for tests only) NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) @@ -299,6 +302,7 @@ func NewDirect(opts Options) (*Direct, error) { health: opts.HealthTracker, skipIPForwardingCheck: opts.SkipIPForwardingCheck, pinger: opts.Pinger, + polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), popBrowser: opts.PopBrowserURL, onClientVersion: opts.OnClientVersion, onTailnetDefaultAutoUpdate: opts.OnTailnetDefaultAutoUpdate, @@ -617,7 +621,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new return regen, opt.URL, nil, err } - tailnet, err := syspolicy.GetString(pkey.Tailnet, "") + tailnet, err := c.polc.GetString(pkey.Tailnet, "") if err != nil { c.logf("unable to provide Tailnet field in register request. err: %v", err) } @@ -647,7 +651,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new AuthKey: authKey, } } - err = signRegisterRequest(&request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public()) + err = signRegisterRequest(c.polc, &request, c.serverURL, c.serverLegacyKey, machinePrivKey.Public()) if err != nil { // If signing failed, clear all related fields request.SignatureType = tailcfg.SignatureNone diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index fab7cd16b0810..439e6d36b4fe3 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -18,8 +18,8 @@ import ( "github.com/tailscale/certstore" "tailscale.com/tailcfg" "tailscale.com/types/key" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // getMachineCertificateSubject returns the exact name of a Subject that needs @@ -31,8 +31,8 @@ import ( // each RegisterRequest will be unsigned. // // Example: "CN=Tailscale Inc Test Root CA,OU=Tailscale Inc Test Certificate Authority,O=Tailscale Inc,ST=ON,C=CA" -func getMachineCertificateSubject() string { - machineCertSubject, _ := syspolicy.GetString(pkey.MachineCertificateSubject, "") +func getMachineCertificateSubject(polc policyclient.Client) string { + machineCertSubject, _ := polc.GetString(pkey.MachineCertificateSubject, "") return machineCertSubject } @@ -137,7 +137,7 @@ func findIdentity(subject string, st certstore.Store) (certstore.Identity, []*x5 // using that identity's public key. In addition to the signature, the full // certificate chain is included so that the control server can validate the // certificate from a copy of the root CA's certificate. -func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) { +func signRegisterRequest(polc policyclient.Client, req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) (err error) { defer func() { if err != nil { err = fmt.Errorf("signRegisterRequest: %w", err) @@ -148,7 +148,7 @@ func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverP return errBadRequest } - machineCertificateSubject := getMachineCertificateSubject() + machineCertificateSubject := getMachineCertificateSubject(polc) if machineCertificateSubject == "" { return errCertificateNotConfigured } diff --git a/control/controlclient/sign_unsupported.go b/control/controlclient/sign_unsupported.go index 5e161dcbce453..f6c4ddc6288fb 100644 --- a/control/controlclient/sign_unsupported.go +++ b/control/controlclient/sign_unsupported.go @@ -8,9 +8,10 @@ package controlclient import ( "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/syspolicy/policyclient" ) // signRegisterRequest on non-supported platforms always returns errNoCertStore. -func signRegisterRequest(req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error { +func signRegisterRequest(polc policyclient.Client, req *tailcfg.RegisterRequest, serverURL string, serverPubKey, machinePubKey key.MachinePublic) error { return errNoCertStore } diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 8c3bf7b26a50f..b1a780cc10c64 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -353,7 +353,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http } if choice.ShouldEnable(b.Prefs().PostureChecking()) { - res.SerialNumbers, err = posture.GetSerialNumbers(b.logf) + res.SerialNumbers, err = posture.GetSerialNumbers(b.polc, b.logf) if err != nil { b.logf("c2n: GetSerialNumbers returned error: %v", err) } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bcfb99b09af34..61bde31e48405 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -109,7 +109,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" @@ -203,7 +203,8 @@ type LocalBackend struct { keyLogf logger.Logf // for printing list of peers on change statsLogf logger.Logf // for printing peers stats on change sys *tsd.System - health *health.Tracker // always non-nil + health *health.Tracker // always non-nil + polc policyclient.Client // always non-nil metrics metrics e wgengine.Engine // non-nil; TODO(bradfitz): remove; use sys store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys @@ -515,6 +516,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo keyLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, + polc: sys.PolicyClientOrDefault(), health: sys.HealthTracker(), metrics: m, e: e, @@ -1970,7 +1972,7 @@ func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { // sysPolicyChanged is a callback triggered by syspolicy when it detects // a change in one or more syspolicy settings. -func (b *LocalBackend) sysPolicyChanged(policy *rsop.PolicyChange) { +func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { if policy.HasChangedAnyOf(pkey.AlwaysOn, pkey.AlwaysOnOverrideWithReason) { // If the AlwaysOn or the AlwaysOnOverrideWithReason policy has changed, // we should reset the overrideAlwaysOn flag, as the override might @@ -2468,6 +2470,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { DiscoPublicKey: discoPublic, DebugFlags: debugFlags, HealthTracker: b.health, + PolicyClient: b.sys.PolicyClientOrDefault(), Pinger: b, PopBrowserURL: b.tellClientToBrowseToURL, OnClientVersion: b.onClientVersion, diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index d1cec2a00ed03..901ab6dd062bd 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -31,7 +31,7 @@ import ( "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/winutil" ) @@ -508,7 +508,7 @@ func (m *windowsManager) Close() error { // sysPolicyChanged is a callback triggered by [syspolicy] when it detects // a change in one or more syspolicy settings. -func (m *windowsManager) sysPolicyChanged(policy *rsop.PolicyChange) { +func (m *windowsManager) sysPolicyChanged(policy policyclient.PolicyChange) { if policy.HasChanged(pkey.EnableDNSRegistration) { m.reconfigureDNSRegistration() } diff --git a/posture/serialnumber_macos.go b/posture/serialnumber_macos.go index 48355d31393ee..18c929107a768 100644 --- a/posture/serialnumber_macos.go +++ b/posture/serialnumber_macos.go @@ -59,10 +59,11 @@ import ( "strings" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumber returns the platform serial sumber as reported by IOKit. -func GetSerialNumbers(_ logger.Logf) ([]string, error) { +func GetSerialNumbers(policyclient.Client, logger.Logf) ([]string, error) { csn := C.getSerialNumber() serialNumber := C.GoString(csn) diff --git a/posture/serialnumber_macos_test.go b/posture/serialnumber_macos_test.go index 9f0ce1c6a76d6..9d9b9f578da55 100644 --- a/posture/serialnumber_macos_test.go +++ b/posture/serialnumber_macos_test.go @@ -11,6 +11,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/cibuild" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumberMac(t *testing.T) { @@ -20,7 +21,7 @@ func TestGetSerialNumberMac(t *testing.T) { t.Skip() } - sns, err := GetSerialNumbers(logger.Discard) + sns, err := GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) if err != nil { t.Fatalf("failed to get serial number: %s", err) } diff --git a/posture/serialnumber_notmacos.go b/posture/serialnumber_notmacos.go index 8b91738b04bfa..132fa08f6a56e 100644 --- a/posture/serialnumber_notmacos.go +++ b/posture/serialnumber_notmacos.go @@ -13,6 +13,7 @@ import ( "github.com/digitalocean/go-smbios/smbios" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // getByteFromSmbiosStructure retrieves a 8-bit unsigned integer at the given specOffset. @@ -71,7 +72,7 @@ func init() { numOfTables = len(validTables) } -func GetSerialNumbers(logf logger.Logf) ([]string, error) { +func GetSerialNumbers(polc policyclient.Client, logf logger.Logf) ([]string, error) { // Find SMBIOS data in operating system-specific location. rc, _, err := smbios.Stream() if err != nil { diff --git a/posture/serialnumber_notmacos_test.go b/posture/serialnumber_notmacos_test.go index f2a15e0373caf..da5aada8509e3 100644 --- a/posture/serialnumber_notmacos_test.go +++ b/posture/serialnumber_notmacos_test.go @@ -12,6 +12,7 @@ import ( "testing" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumberNotMac(t *testing.T) { @@ -21,7 +22,7 @@ func TestGetSerialNumberNotMac(t *testing.T) { // Comment out skip for local testing. t.Skip() - sns, err := GetSerialNumbers(logger.Discard) + sns, err := GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) if err != nil { t.Fatalf("failed to get serial number: %s", err) } diff --git a/posture/serialnumber_stub.go b/posture/serialnumber_stub.go index 4cc84fa133489..854a0014bd1bf 100644 --- a/posture/serialnumber_stub.go +++ b/posture/serialnumber_stub.go @@ -14,9 +14,10 @@ import ( "errors" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumber returns client machine serial number(s). -func GetSerialNumbers(_ logger.Logf) ([]string, error) { +func GetSerialNumbers(polc policyclient.Client, _ logger.Logf) ([]string, error) { return nil, errors.New("not implemented") } diff --git a/posture/serialnumber_syspolicy.go b/posture/serialnumber_syspolicy.go index 5123d561db2cd..64a154a2cae0b 100644 --- a/posture/serialnumber_syspolicy.go +++ b/posture/serialnumber_syspolicy.go @@ -9,15 +9,15 @@ import ( "fmt" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // GetSerialNumbers returns the serial number of the device as reported by an // MDM solution. It requires configuration via the DeviceSerialNumber system policy. // This is the only way to gather serial numbers on iOS, tvOS and Android. -func GetSerialNumbers(_ logger.Logf) ([]string, error) { - s, err := syspolicy.GetString(pkey.DeviceSerialNumber, "") +func GetSerialNumbers(polc policyclient.Client, _ logger.Logf) ([]string, error) { + s, err := polc.GetString(pkey.DeviceSerialNumber, "") if err != nil { return nil, fmt.Errorf("failed to get serial number from MDM: %v", err) } diff --git a/posture/serialnumber_test.go b/posture/serialnumber_test.go index fac4392fab7d3..6db3651e21cd7 100644 --- a/posture/serialnumber_test.go +++ b/posture/serialnumber_test.go @@ -7,10 +7,11 @@ import ( "testing" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) func TestGetSerialNumber(t *testing.T) { // ensure GetSerialNumbers is implemented // or covered by a stub on a given platform. - _, _ = GetSerialNumbers(logger.Discard) + _, _ = GetSerialNumbers(policyclient.NoPolicyClient{}, logger.Discard) } diff --git a/tsd/syspolicy_off.go b/tsd/syspolicy_off.go new file mode 100644 index 0000000000000..221b8f223eadc --- /dev/null +++ b/tsd/syspolicy_off.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_syspolicy + +package tsd + +import ( + "tailscale.com/util/syspolicy/policyclient" +) + +func getPolicyClient() policyclient.Client { return policyclient.NoPolicyClient{} } diff --git a/tsd/syspolicy_on.go b/tsd/syspolicy_on.go new file mode 100644 index 0000000000000..8d7762bd9c5c8 --- /dev/null +++ b/tsd/syspolicy_on.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package tsd + +import ( + "tailscale.com/util/syspolicy" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" +) + +func getPolicyClient() policyclient.Client { return globalSyspolicy{} } + +// globalSyspolicy implements [policyclient.Client] using the syspolicy global +// functions and global registrations. +// +// TODO: de-global-ify. This implementation using the old global functions +// is an intermediate stage while changing policyclient to be modular. +type globalSyspolicy struct{} + +func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return syspolicy.GetBoolean(key, defaultValue) +} + +func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { + return syspolicy.GetString(key, defaultValue) +} + +func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return syspolicy.GetStringArray(key, defaultValue) +} + +func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { + syspolicy.SetDebugLoggingEnabled(enabled) +} + +func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { + return syspolicy.RegisterChangeCallback(cb) +} diff --git a/tsd/tsd.go b/tsd/tsd.go index ccd804f816aaa..b7194a3d7b5e6 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -33,6 +33,7 @@ import ( "tailscale.com/proxymap" "tailscale.com/types/netmap" "tailscale.com/util/eventbus" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/usermetric" "tailscale.com/wgengine" "tailscale.com/wgengine/magicsock" @@ -165,6 +166,12 @@ func (s *System) UserMetricsRegistry() *usermetric.Registry { return &s.userMetricsRegistry } +// PolicyClientOrDefault returns the policy client if set or a no-op default +// otherwise. It always returns a non-nil value. +func (s *System) PolicyClientOrDefault() policyclient.Client { + return getPolicyClient() +} + // SubSystem represents some subsystem of the Tailscale node daemon. // // A subsystem can be set to a value, and then later retrieved. A subsystem diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index bdf90c9a8c03c..f4b0dc775415b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -375,12 +375,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy from tailscale.com/ipn+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ - tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/systemd from tailscale.com/control/controlclient+ diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go new file mode 100644 index 0000000000000..0b15599c1591a --- /dev/null +++ b/util/syspolicy/policyclient/policyclient.go @@ -0,0 +1,66 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package policyclient contains the minimal syspolicy interface as needed by +// client code using syspolicy. It's the part that's always linked in, even if the rest +// of syspolicy is omitted from the build. +package policyclient + +import "tailscale.com/util/syspolicy/pkey" + +// Client is the interface between code making questions about the system policy +// and the actual implementation. +type Client interface { + // GetString returns a string policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetString(key pkey.Key, defaultValue string) (string, error) + + // GetStringArray returns a string array policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) + + // GetBoolean returns a boolean policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetBoolean(key pkey.Key, defaultValue bool) (bool, error) + + // SetDebugLoggingEnabled enables or disables debug logging for the policy client. + SetDebugLoggingEnabled(enabled bool) + + // RegisterChangeCallback registers a callback function that will be called + // whenever a policy change is detected. It returns a function to unregister + // the callback and an error if the registration fails. + RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) +} + +// PolicyChange is the interface representing a change in policy settings. +type PolicyChange interface { + // HasChanged reports whether the policy setting identified by the given key + // has changed. + HasChanged(pkey.Key) bool + + // HasChangedAnyOf reports whether any of the provided policy settings + // changed in this change. + HasChangedAnyOf(keys ...pkey.Key) bool +} + +// NoPolicyClient is a no-op implementation of [Client] that only +// returns default values. +type NoPolicyClient struct{} + +func (NoPolicyClient) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetString(key pkey.Key, defaultValue string) (string, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return defaultValue, nil +} + +func (NoPolicyClient) SetDebugLoggingEnabled(enabled bool) {} + +func (NoPolicyClient) RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) { + return func() {}, nil +} diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 59dba07c6a93c..4e71f683a943d 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -12,6 +12,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" ) @@ -21,7 +22,7 @@ type Change[T any] struct { } // PolicyChangeCallback is a function called whenever a policy changes. -type PolicyChangeCallback func(*PolicyChange) +type PolicyChangeCallback func(policyclient.PolicyChange) // PolicyChange describes a policy change. type PolicyChange struct { diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index 2da46a8ca958a..3ff1421197b1f 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -16,6 +16,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/tstest" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -602,8 +603,8 @@ func TestChangePolicySetting(t *testing.T) { } // Subscribe to the policy change callback... - policyChanged := make(chan *PolicyChange) - unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + policyChanged := make(chan policyclient.PolicyChange) + unregister := policy.RegisterChangeCallback(func(pc policyclient.PolicyChange) { policyChanged <- pc }) t.Cleanup(unregister) // ...make the change, and measure the time between initiating the change @@ -611,7 +612,7 @@ func TestChangePolicySetting(t *testing.T) { start := time.Now() const wantValueA = "TestValueA" store.SetStrings(source.TestSettingOf(settingA.Key(), wantValueA)) - change := <-policyChanged + change := (<-policyChanged).(*PolicyChange) gotDelay := time.Since(start) // Ensure there is at least a [policyReloadMinDelay] delay between @@ -653,7 +654,7 @@ func TestChangePolicySetting(t *testing.T) { // The callback should be invoked only once, even though the policy setting // has changed N times. - change = <-policyChanged + change = (<-policyChanged).(*PolicyChange) gotDelay = time.Since(start) gotCallbacks := 1 drain: @@ -853,8 +854,8 @@ func TestReplacePolicySource(t *testing.T) { } // Subscribe to the policy change callback. - policyChanged := make(chan *PolicyChange, 1) - unregister := policy.RegisterChangeCallback(func(pc *PolicyChange) { policyChanged <- pc }) + policyChanged := make(chan policyclient.PolicyChange, 1) + unregister := policy.RegisterChangeCallback(func(pc policyclient.PolicyChange) { policyChanged <- pc }) t.Cleanup(unregister) // Now, let's replace the initial store with the new store. From 12ad630128846919908fbd0b08580864b86bb913 Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 2 Sep 2025 13:10:03 +0100 Subject: [PATCH 0275/1093] cmd/k8s-operator: allow specifying replicas for connectors (#16721) This commit adds a `replicas` field to the `Connector` custom resource that allows users to specify the number of desired replicas deployed for their connectors. This allows users to deploy exit nodes, subnet routers and app connectors in a highly available fashion. Fixes #14020 Signed-off-by: David Bond --- cmd/k8s-operator/connector.go | 41 +- cmd/k8s-operator/connector_test.go | 121 +++++- .../deploy/crds/tailscale.com_connectors.yaml | 47 ++- .../deploy/manifests/operator.yaml | 47 ++- cmd/k8s-operator/ingress.go | 29 +- cmd/k8s-operator/ingress_test.go | 3 +- cmd/k8s-operator/operator_test.go | 13 + cmd/k8s-operator/sts.go | 354 +++++++++++------- cmd/k8s-operator/svc.go | 20 +- cmd/k8s-operator/testutils_test.go | 84 ++++- k8s-operator/api.md | 25 +- k8s-operator/apis/v1alpha1/types_connector.go | 41 +- .../apis/v1alpha1/zz_generated.deepcopy.go | 32 ++ 13 files changed, 660 insertions(+), 197 deletions(-) diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 8406a1156fc8f..7fa311532238b 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -25,7 +25,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -176,6 +175,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge if cn.Spec.Hostname != "" { hostname = string(cn.Spec.Hostname) } + crl := childResourceLabels(cn.Name, a.tsnamespace, "connector") proxyClass := cn.Spec.ProxyClass @@ -188,10 +188,17 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } } + var replicas int32 = 1 + if cn.Spec.Replicas != nil { + replicas = *cn.Spec.Replicas + } + sts := &tailscaleSTSConfig{ + Replicas: replicas, ParentResourceName: cn.Name, ParentResourceUID: string(cn.UID), Hostname: hostname, + HostnamePrefix: string(cn.Spec.HostnamePrefix), ChildResourceLabels: crl, Tags: cn.Spec.Tags.Stringify(), Connector: &connector{ @@ -219,16 +226,19 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } else { a.exitNodes.Remove(cn.UID) } + if cn.Spec.SubnetRouter != nil { a.subnetRouters.Add(cn.GetUID()) } else { a.subnetRouters.Remove(cn.GetUID()) } + if cn.Spec.AppConnector != nil { a.appConnectors.Add(cn.GetUID()) } else { a.appConnectors.Remove(cn.GetUID()) } + a.mu.Unlock() gaugeConnectorSubnetRouterResources.Set(int64(a.subnetRouters.Len())) gaugeConnectorExitNodeResources.Set(int64(a.exitNodes.Len())) @@ -244,21 +254,23 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge return err } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return err } - if dev == nil || dev.hostname == "" { - logger.Debugf("no Tailscale hostname known yet, waiting for Connector Pod to finish auth") - // No hostname yet. Wait for the connector pod to auth. - cn.Status.TailnetIPs = nil - cn.Status.Hostname = "" - return nil + cn.Status.Devices = make([]tsapi.ConnectorDevice, len(devices)) + for i, dev := range devices { + cn.Status.Devices[i] = tsapi.ConnectorDevice{ + Hostname: dev.hostname, + TailnetIPs: dev.ips, + } } - cn.Status.TailnetIPs = dev.ips - cn.Status.Hostname = dev.hostname + if len(cn.Status.Devices) > 0 { + cn.Status.Hostname = cn.Status.Devices[0].Hostname + cn.Status.TailnetIPs = cn.Status.Devices[0].TailnetIPs + } return nil } @@ -302,6 +314,15 @@ func (a *ConnectorReconciler) validate(cn *tsapi.Connector) error { if (cn.Spec.SubnetRouter != nil || cn.Spec.ExitNode) && cn.Spec.AppConnector != nil { return errors.New("invalid spec: a Connector that is configured as an app connector must not be also configured as a subnet router or exit node") } + + // These two checks should be caught by the Connector schema validation. + if cn.Spec.Replicas != nil && *cn.Spec.Replicas > 1 && cn.Spec.Hostname != "" { + return errors.New("invalid spec: a Connector that is configured with multiple replicas cannot specify a hostname. Instead, use a hostnamePrefix") + } + if cn.Spec.HostnamePrefix != "" && cn.Spec.Hostname != "" { + return errors.New("invalid spec: a Connect cannot use both a hostname and hostname prefix") + } + if cn.Spec.AppConnector != nil { return validateAppConnector(cn.Spec.AppConnector) } diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index d5829c37fe596..afc7d2d6e3975 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -7,6 +7,8 @@ package main import ( "context" + "strconv" + "strings" "testing" "time" @@ -20,6 +22,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" + "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -36,6 +39,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.com/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -55,7 +59,8 @@ func TestConnector(t *testing.T) { cl := tstest.NewClock(tstest.ClockOpts{}) cr := &ConnectorReconciler{ - Client: fc, + Client: fc, + recorder: record.NewFakeRecorder(10), ssr: &tailscaleSTSReconciler{ Client: fc, tsClient: ft, @@ -78,6 +83,7 @@ func TestConnector(t *testing.T) { isExitNode: true, subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -94,6 +100,10 @@ func TestConnector(t *testing.T) { cn.Status.IsExitNode = cn.Spec.ExitNode cn.Status.SubnetRoutes = cn.Spec.SubnetRouter.AdvertiseRoutes.Stringify() cn.Status.Hostname = hostname + cn.Status.Devices = []tsapi.ConnectorDevice{{ + Hostname: hostname, + TailnetIPs: []string{"127.0.0.1", "::1"}, + }} cn.Status.TailnetIPs = []string{"127.0.0.1", "::1"} expectEqual(t, fc, cn, func(o *tsapi.Connector) { o.Status.Conditions = nil @@ -156,6 +166,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -174,6 +185,7 @@ func TestConnector(t *testing.T) { subnetRoutes: "10.40.0.0/14", hostname: "test-connector", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -217,9 +229,11 @@ func TestConnectorWithProxyClass(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, + ExitNode: true, }, } @@ -260,6 +274,7 @@ func TestConnectorWithProxyClass(t *testing.T) { isExitNode: true, subnetRoutes: "10.40.0.0/14", app: kubetypes.AppConnector, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -311,6 +326,7 @@ func TestConnectorWithAppConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](1), AppConnector: &tsapi.AppConnector{}, }, } @@ -340,7 +356,7 @@ func TestConnectorWithAppConnector(t *testing.T) { recorder: fr, } - // 1. Connector with app connnector is created and becomes ready + // 1. Connector with app connector is created and becomes ready expectReconciled(t, cr, "", "test") fullName, shortName := findGenName(t, fc, "", "test", "connector") opts := configOpts{ @@ -350,6 +366,7 @@ func TestConnectorWithAppConnector(t *testing.T) { hostname: "test-connector", app: kubetypes.AppConnector, isAppConnector: true, + replicas: cn.Spec.Replicas, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs) @@ -357,6 +374,7 @@ func TestConnectorWithAppConnector(t *testing.T) { cn.ObjectMeta.Finalizers = append(cn.ObjectMeta.Finalizers, "tailscale.com/finalizer") cn.Status.IsAppConnector = true + cn.Status.Devices = []tsapi.ConnectorDevice{} cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), Status: metav1.ConditionTrue, @@ -368,9 +386,9 @@ func TestConnectorWithAppConnector(t *testing.T) { // 2. Connector with invalid app connector routes has status set to invalid mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + conn.Spec.AppConnector.Routes = tsapi.Routes{"1.2.3.4/5"} }) - cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("1.2.3.4/5")} + cn.Spec.AppConnector.Routes = tsapi.Routes{"1.2.3.4/5"} expectReconciled(t, cr, "", "test") cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), @@ -383,9 +401,9 @@ func TestConnectorWithAppConnector(t *testing.T) { // 3. Connector with valid app connnector routes becomes ready mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + conn.Spec.AppConnector.Routes = tsapi.Routes{"10.88.2.21/32"} }) - cn.Spec.AppConnector.Routes = tsapi.Routes{tsapi.Route("10.88.2.21/32")} + cn.Spec.AppConnector.Routes = tsapi.Routes{"10.88.2.21/32"} cn.Status.Conditions = []metav1.Condition{{ Type: string(tsapi.ConnectorReady), Status: metav1.ConditionTrue, @@ -395,3 +413,94 @@ func TestConnectorWithAppConnector(t *testing.T) { }} expectReconciled(t, cr, "", "test") } + +func TestConnectorWithMultipleReplicas(t *testing.T) { + cn := &tsapi.Connector{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + UID: types.UID("1234-UID"), + }, + TypeMeta: metav1.TypeMeta{ + Kind: tsapi.ConnectorKind, + APIVersion: "tailscale.io/v1alpha1", + }, + Spec: tsapi.ConnectorSpec{ + Replicas: ptr.To[int32](3), + AppConnector: &tsapi.AppConnector{}, + HostnamePrefix: "test-connector", + }, + } + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(cn). + WithStatusSubresource(cn). + Build() + ft := &fakeTSClient{} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + fr := record.NewFakeRecorder(1) + cr := &ConnectorReconciler{ + Client: fc, + clock: cl, + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + recorder: fr, + } + + // 1. Ensure that our connector resource is reconciled. + expectReconciled(t, cr, "", "test") + + // 2. Ensure we have a number of secrets matching the number of replicas. + names := findGenNames(t, fc, "", "test", "connector") + if int32(len(names)) != *cn.Spec.Replicas { + t.Fatalf("expected %d secrets, got %d", *cn.Spec.Replicas, len(names)) + } + + // 3. Ensure each device has the correct hostname prefix and ordinal suffix. + for i, name := range names { + expected := expectedSecret(t, fc, configOpts{ + secretName: name, + hostname: string(cn.Spec.HostnamePrefix) + "-" + strconv.Itoa(i), + isAppConnector: true, + parentType: "connector", + namespace: cr.tsnamespace, + }) + + expectEqual(t, fc, expected) + } + + // 4. Ensure the generated stateful set has the matching number of replicas + shortName := strings.TrimSuffix(names[0], "-0") + + var sts appsv1.StatefulSet + if err = fc.Get(t.Context(), types.NamespacedName{Namespace: "operator-ns", Name: shortName}, &sts); err != nil { + t.Fatalf("failed to get StatefulSet %q: %v", shortName, err) + } + + if sts.Spec.Replicas == nil { + t.Fatalf("actual StatefulSet %q does not have replicas set", shortName) + } + + if *sts.Spec.Replicas != *cn.Spec.Replicas { + t.Fatalf("expected %d replicas, got %d", *cn.Spec.Replicas, *sts.Spec.Replicas) + } + + // 5. We'll scale the connector down by 1 replica and make sure its secret is cleaned up + mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { + conn.Spec.Replicas = ptr.To[int32](2) + }) + expectReconciled(t, cr, "", "test") + names = findGenNames(t, fc, "", "test", "connector") + if len(names) != 2 { + t.Fatalf("expected 2 secrets, got %d", len(names)) + } +} diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index d645e39228062..74d32d53d2199 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -115,9 +115,19 @@ spec: Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 - and 63 characters long. + and 63 characters long. This field should only be used when creating a connector + with an unspecified number of replicas, or a single replica. type: string pattern: ^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$ + hostnamePrefix: + description: |- + HostnamePrefix specifies the hostname prefix for each + replica. Each device will have the integer number + from its StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + type: string + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that @@ -125,6 +135,14 @@ spec: resources created for this Connector. If unset, the operator will create resources with the default configuration. type: string + replicas: + description: |- + Replicas specifies how many devices to create. Set this to enable + high availability for app connectors, subnet routers, or exit nodes. + https://tailscale.com/kb/1115/high-availability. Defaults to 1. + type: integer + format: int32 + minimum: 0 subnetRouter: description: |- SubnetRouter defines subnet routes that the Connector device should @@ -168,6 +186,10 @@ spec: message: A Connector needs to have at least one of exit node, subnet router or app connector configured. - rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. + - rule: '!(has(self.hostname) && has(self.replicas) && self.replicas > 1)' + message: The hostname field cannot be specified when replicas is greater than 1. + - rule: '!(has(self.hostname) && has(self.hostnamePrefix))' + message: The hostname and hostnamePrefix fields are mutually exclusive. status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -235,11 +257,32 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + devices: + description: Devices contains information on each device managed by the Connector resource. + type: array + items: + type: object + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the Connector replica. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the Connector replica. + type: array + items: + type: string hostname: description: |- Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - node. + node. When using multiple replicas, this field will be populated with the + first replica's hostname. Use the Hostnames field for the full list + of hostnames. type: string isAppConnector: description: IsAppConnector is set to true if the Connector acts as an app connector. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 5e0cca9b59339..766d7f0d647a9 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -140,9 +140,19 @@ spec: Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and dashes, it must not start or end with a dash and must be between 2 - and 63 characters long. + and 63 characters long. This field should only be used when creating a connector + with an unspecified number of replicas, or a single replica. pattern: ^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$ type: string + hostnamePrefix: + description: |- + HostnamePrefix specifies the hostname prefix for each + replica. Each device will have the integer number + from its StatefulSet pod appended to this prefix to form the full hostname. + HostnamePrefix can contain lower case letters, numbers and dashes, it + must not start with a dash and must be between 1 and 62 characters long. + pattern: ^[a-z0-9][a-z0-9-]{0,61}$ + type: string proxyClass: description: |- ProxyClass is the name of the ProxyClass custom resource that @@ -150,6 +160,14 @@ spec: resources created for this Connector. If unset, the operator will create resources with the default configuration. type: string + replicas: + description: |- + Replicas specifies how many devices to create. Set this to enable + high availability for app connectors, subnet routers, or exit nodes. + https://tailscale.com/kb/1115/high-availability. Defaults to 1. + format: int32 + minimum: 0 + type: integer subnetRouter: description: |- SubnetRouter defines subnet routes that the Connector device should @@ -194,6 +212,10 @@ spec: rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) - message: The appConnector field is mutually exclusive with exitNode and subnetRouter fields. rule: '!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))' + - message: The hostname field cannot be specified when replicas is greater than 1. + rule: '!(has(self.hostname) && has(self.replicas) && self.replicas > 1)' + - message: The hostname and hostnamePrefix fields are mutually exclusive. + rule: '!(has(self.hostname) && has(self.hostnamePrefix))' status: description: |- ConnectorStatus describes the status of the Connector. This is set @@ -260,11 +282,32 @@ spec: x-kubernetes-list-map-keys: - type x-kubernetes-list-type: map + devices: + description: Devices contains information on each device managed by the Connector resource. + items: + properties: + hostname: + description: |- + Hostname is the fully qualified domain name of the Connector replica. + If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + node. + type: string + tailnetIPs: + description: |- + TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + assigned to the Connector replica. + items: + type: string + type: array + type: object + type: array hostname: description: |- Hostname is the fully qualified domain name of the Connector node. If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - node. + node. When using multiple replicas, this field will be populated with the + first replica's hostname. Use the Hostnames field for the full list + of hostnames. type: string isAppConnector: description: IsAppConnector is set to true if the Connector acts as an app connector. diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index d66cf9116f14a..fb11f717de04e 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -212,6 +212,7 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga hostname := hostnameForIngress(ing) sts := &tailscaleSTSConfig{ + Replicas: 1, Hostname: hostname, ParentResourceName: ing.Name, ParentResourceUID: string(ing.UID), @@ -227,27 +228,23 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga sts.ForwardClusterTrafficViaL7IngressProxy = true } - if _, err := a.ssr.Provision(ctx, logger, sts); err != nil { + if _, err = a.ssr.Provision(ctx, logger, sts); err != nil { return fmt.Errorf("failed to provision: %w", err) } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to retrieve Ingress HTTPS endpoint status: %w", err) } - if dev == nil || dev.ingressDNSName == "" { - logger.Debugf("no Ingress DNS name known yet, waiting for proxy Pod initialize and start serving Ingress") - // No hostname yet. Wait for the proxy pod to auth. - ing.Status.LoadBalancer.Ingress = nil - if err := a.Status().Update(ctx, ing); err != nil { - return fmt.Errorf("failed to update ingress status: %w", err) + + ing.Status.LoadBalancer.Ingress = nil + for _, dev := range devices { + if dev.ingressDNSName == "" { + continue } - return nil - } - logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) - ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ - { + logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) + ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ Hostname: dev.ingressDNSName, Ports: []networkingv1.IngressPortStatus{ { @@ -255,11 +252,13 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga Port: 443, }, }, - }, + }) } - if err := a.Status().Update(ctx, ing); err != nil { + + if err = a.Status().Update(ctx, ing); err != nil { return fmt.Errorf("failed to update ingress status: %w", err) } + return nil } diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index fe4d90c785c47..f5e23cfe92043 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -57,6 +57,7 @@ func TestTailscaleIngress(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "ingress") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -766,7 +767,7 @@ func ingress() *networkingv1.Ingress { ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", - UID: types.UID("1234-UID"), + UID: "1234-UID", }, Spec: networkingv1.IngressSpec{ IngressClassName: ptr.To("tailscale"), diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 1f700f13a4fc0..50f8738cefc39 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -122,6 +122,7 @@ func TestLoadBalancerClass(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -260,6 +261,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -372,6 +374,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -623,6 +626,7 @@ func TestAnnotations(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -729,6 +733,7 @@ func TestAnnotationIntoLB(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -859,6 +864,7 @@ func TestLBIntoAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -999,6 +1005,7 @@ func TestCustomHostname(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1111,6 +1118,7 @@ func TestCustomPriorityClassName(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1359,6 +1367,7 @@ func TestProxyClassForService(t *testing.T) { expectReconciled(t, sr, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1454,6 +1463,7 @@ func TestDefaultLoadBalancer(t *testing.T) { expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1509,6 +1519,7 @@ func TestProxyFirewallMode(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", @@ -1800,6 +1811,7 @@ func Test_authKeyRemoval(t *testing.T) { hostname: "default-test", clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, + replicas: ptr.To[int32](1), } expectEqual(t, fc, expectedSecret(t, fc, opts)) @@ -1867,6 +1879,7 @@ func Test_externalNameService(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ + replicas: ptr.To[int32](1), stsName: shortName, secretName: fullName, namespace: "default", diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 911d0283242d7..9a87d26438b8a 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -13,6 +13,7 @@ import ( "fmt" "net/http" "os" + "path" "slices" "strconv" "strings" @@ -20,6 +21,7 @@ import ( "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -114,6 +116,7 @@ var ( ) type tailscaleSTSConfig struct { + Replicas int32 ParentResourceName string ParentResourceUID string ChildResourceLabels map[string]string @@ -144,6 +147,10 @@ type tailscaleSTSConfig struct { // LoginServer denotes the URL of the control plane that should be used by the proxy. LoginServer string + + // HostnamePrefix specifies the desired prefix for the device's hostname. The hostname will be suffixed with the + // ordinal number generated by the StatefulSet. + HostnamePrefix string } type connector struct { @@ -205,11 +212,12 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretName, _, err := a.createOrGetSecret(ctx, logger, sts, hsvc) + secretNames, err := a.provisionSecrets(ctx, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } - _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretName) + + _, err = a.reconcileSTS(ctx, logger, sts, hsvc, secretNames) if err != nil { return nil, fmt.Errorf("failed to reconcile statefulset: %w", err) } @@ -239,6 +247,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare if err != nil { return false, fmt.Errorf("getting statefulset: %w", err) } + if sts != nil { if !sts.GetDeletionTimestamp().IsZero() { // Deletion in progress, check again later. We'll get another @@ -246,29 +255,39 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare logger.Debugf("waiting for statefulset %s/%s deletion", sts.GetNamespace(), sts.GetName()) return false, nil } - err := a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, client.InNamespace(a.operatorNamespace), client.MatchingLabels(labels), client.PropagationPolicy(metav1.DeletePropagationForeground)) - if err != nil { + + options := []client.DeleteAllOfOption{ + client.InNamespace(a.operatorNamespace), + client.MatchingLabels(labels), + client.PropagationPolicy(metav1.DeletePropagationForeground), + } + + if err = a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, options...); err != nil { return false, fmt.Errorf("deleting statefulset: %w", err) } + logger.Debugf("started deletion of statefulset %s/%s", sts.GetNamespace(), sts.GetName()) return false, nil } - dev, err := a.DeviceInfo(ctx, labels, logger) + devices, err := a.DeviceInfo(ctx, labels, logger) if err != nil { return false, fmt.Errorf("getting device info: %w", err) } - if dev != nil && dev.id != "" { - logger.Debugf("deleting device %s from control", string(dev.id)) - if err := a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + + for _, dev := range devices { + if dev.id != "" { + logger.Debugf("deleting device %s from control", string(dev.id)) + if err = a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) + } else { + return false, fmt.Errorf("deleting device: %w", err) + } } else { - return false, fmt.Errorf("deleting device: %w", err) + logger.Debugf("device %s deleted from control", string(dev.id)) } - } else { - logger.Debugf("device %s deleted from control", string(dev.id)) } } @@ -286,9 +305,10 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare tsNamespace: a.operatorNamespace, proxyType: typ, } - if err := maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { + if err = maybeCleanupMetricsResources(ctx, mo, a.Client); err != nil { return false, fmt.Errorf("error cleaning up metrics resources: %w", err) } + return true, nil } @@ -339,91 +359,139 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) (secretName string, configs tailscaledConfigs, _ error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - // Hardcode a -0 suffix so that in future, if we support - // multiple StatefulSet replicas, we can provision -N for - // those. - Name: hsvc.Name + "-0", - Namespace: a.operatorNamespace, - Labels: stsC.ChildResourceLabels, - }, - } - var orig *corev1.Secret // unmodified copy of secret - if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { - logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) - orig = secret.DeepCopy() - } else if !apierrors.IsNotFound(err) { - return "", nil, err - } +func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) ([]string, error) { + secretNames := make([]string, stsC.Replicas) + + // Start by ensuring we have Secrets for the desired number of replicas. This will handle both creating and scaling + // up a StatefulSet. + for i := range stsC.Replicas { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", hsvc.Name, i), + Namespace: a.operatorNamespace, + Labels: stsC.ChildResourceLabels, + }, + } - var authKey string - if orig == nil { - // Initially it contains only tailscaled config, but when the - // proxy starts, it will also store there the state, certs and - // ACME account key. - sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, stsC.ChildResourceLabels) - if err != nil { - return "", nil, err + // If we only have a single replica, use the hostname verbatim. Otherwise, use the hostname prefix and add + // an ordinal suffix. + hostname := stsC.Hostname + if stsC.HostnamePrefix != "" { + hostname = fmt.Sprintf("%s-%d", stsC.HostnamePrefix, i) } - if sts != nil { - // StatefulSet exists, so we have already created the secret. - // If the secret is missing, they should delete the StatefulSet. - logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName()) - return "", nil, nil + + secretNames[i] = secret.Name + + var orig *corev1.Secret // unmodified copy of secret + if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil { + logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName()) + orig = secret.DeepCopy() + } else if !apierrors.IsNotFound(err) { + return nil, err } - // Create API Key secret which is going to be used by the statefulset - // to authenticate with Tailscale. - logger.Debugf("creating authkey for new tailscale proxy") - tags := stsC.Tags - if len(tags) == 0 { - tags = a.defaultTags + + var ( + authKey string + err error + ) + if orig == nil { + // Create API Key secret which is going to be used by the statefulset + // to authenticate with Tailscale. + logger.Debugf("creating authkey for new tailscale proxy") + tags := stsC.Tags + if len(tags) == 0 { + tags = a.defaultTags + } + authKey, err = newAuthKey(ctx, a.tsClient, tags) + if err != nil { + return nil, err + } } - authKey, err = newAuthKey(ctx, a.tsClient, tags) + + configs, err := tailscaledConfig(stsC, authKey, orig, hostname) if err != nil { - return "", nil, err + return nil, fmt.Errorf("error creating tailscaled config: %w", err) + } + + latest := tailcfg.CapabilityVersion(-1) + var latestConfig ipn.ConfigVAlpha + for key, val := range configs { + fn := tsoperator.TailscaledConfigFileName(key) + b, err := json.Marshal(val) + if err != nil { + return nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + } + + mak.Set(&secret.StringData, fn, string(b)) + if key > latest { + latest = key + latestConfig = val + } + } + + if stsC.ServeConfig != nil { + j, err := json.Marshal(stsC.ServeConfig) + if err != nil { + return nil, err + } + + mak.Set(&secret.StringData, "serve-config", string(j)) + } + + if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { + logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { + return nil, err + } + } else { + logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + if err = a.Create(ctx, secret); err != nil { + return nil, err + } } } - configs, err := tailscaledConfig(stsC, authKey, orig) - if err != nil { - return "", nil, fmt.Errorf("error creating tailscaled config: %w", err) + + // Next, we check if we have additional secrets and remove them and their associated device. This happens when we + // scale an StatefulSet down. + var secrets corev1.SecretList + if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(stsC.ChildResourceLabels)); err != nil { + return nil, err } - latest := tailcfg.CapabilityVersion(-1) - var latestConfig ipn.ConfigVAlpha - for key, val := range configs { - fn := tsoperator.TailscaledConfigFileName(key) - b, err := json.Marshal(val) - if err != nil { - return "", nil, fmt.Errorf("error marshalling tailscaled config: %w", err) + + for _, secret := range secrets.Items { + var ordinal int32 + if _, err := fmt.Sscanf(secret.Name, hsvc.Name+"-%d", &ordinal); err != nil { + return nil, err } - mak.Set(&secret.StringData, fn, string(b)) - if key > latest { - latest = key - latestConfig = val + + if ordinal < stsC.Replicas { + continue } - } - if stsC.ServeConfig != nil { - j, err := json.Marshal(stsC.ServeConfig) + dev, err := deviceInfo(&secret, "", logger) if err != nil { - return "", nil, err + return nil, err } - mak.Set(&secret.StringData, "serve-config", string(j)) - } - if orig != nil { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) - if err := a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { - return "", nil, err + if dev != nil && dev.id != "" { + var errResp *tailscale.ErrResponse + + err = a.tsClient.DeleteDevice(ctx, string(dev.id)) + switch { + case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: + // This device has possibly already been deleted in the admin console. So we can ignore this + // and move on to removing the secret. + case err != nil: + return nil, err + } } - } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) - if err := a.Create(ctx, secret); err != nil { - return "", nil, err + + if err = a.Delete(ctx, &secret); err != nil { + return nil, err } } - return secret.Name, configs, nil + + return secretNames, nil } // sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted @@ -443,22 +511,38 @@ func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { // It retrieves info from a Kubernetes Secret labeled with the provided labels. Capver is cross-validated against the // Pod to ensure that it is the currently running Pod that set the capver. If the Pod or the Secret does not exist, the // returned capver is -1. Either of device ID, hostname and IPs can be empty string if not found in the Secret. -func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) (dev *device, err error) { - sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childLabels) - if err != nil { - return dev, err - } - if sec == nil { - return dev, nil +func (a *tailscaleSTSReconciler) DeviceInfo(ctx context.Context, childLabels map[string]string, logger *zap.SugaredLogger) ([]*device, error) { + var secrets corev1.SecretList + if err := a.List(ctx, &secrets, client.InNamespace(a.operatorNamespace), client.MatchingLabels(childLabels)); err != nil { + return nil, err } - podUID := "" - pod := new(corev1.Pod) - if err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod); err != nil && !apierrors.IsNotFound(err) { - return dev, err - } else if err == nil { - podUID = string(pod.ObjectMeta.UID) + + devices := make([]*device, 0) + for _, sec := range secrets.Items { + podUID := "" + pod := new(corev1.Pod) + err := a.Get(ctx, types.NamespacedName{Namespace: sec.Namespace, Name: sec.Name}, pod) + switch { + case apierrors.IsNotFound(err): + // If the Pod is not found, we won't have its UID. We can still get the device information but the + // capability version will be unknown. + case err != nil: + return nil, err + default: + podUID = string(pod.ObjectMeta.UID) + } + + info, err := deviceInfo(&sec, podUID, logger) + if err != nil { + return nil, err + } + + if info != nil { + devices = append(devices, info) + } } - return deviceInfo(sec, podUID, logger) + + return devices, nil } // device contains tailscale state of a proxy device as gathered from its tailscale state Secret. @@ -534,7 +618,7 @@ var proxyYaml []byte //go:embed deploy/manifests/userspace-proxy.yaml var userspaceProxyYaml []byte -func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecret string) (*appsv1.StatefulSet, error) { +func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig, headlessSvc *corev1.Service, proxySecrets []string) (*appsv1.StatefulSet, error) { ss := new(appsv1.StatefulSet) if sts.ServeConfig != nil && sts.ForwardClusterTrafficViaL7IngressProxy != true { // If forwarding cluster traffic via is required we need non-userspace + NET_ADMIN + forwarding if err := yaml.Unmarshal(userspaceProxyYaml, &ss); err != nil { @@ -573,18 +657,22 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod } + if sts.Replicas > 0 { + ss.Spec.Replicas = ptr.To(sts.Replicas) + } + // Generic containerboot configuration options. container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_KUBE_SECRET", - Value: proxySecret, + Value: "$(POD_NAME)", }, corev1.EnvVar{ - // New style is in the form of cap-.hujson. Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", - Value: "/etc/tsconfig", + Value: "/etc/tsconfig/$(POD_NAME)", }, ) + if sts.ForwardClusterTrafficViaL7IngressProxy { container.Env = append(container.Env, corev1.EnvVar{ Name: "EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS", @@ -592,20 +680,23 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }) } - configVolume := corev1.Volume{ - Name: "tailscaledconfig", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: proxySecret, + for i, secret := range proxySecrets { + configVolume := corev1.Volume{ + Name: "tailscaledconfig-" + strconv.Itoa(i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + }, }, - }, + } + + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: fmt.Sprintf("tailscaledconfig-%d", i), + ReadOnly: true, + MountPath: path.Join("/etc/tsconfig/", secret), + }) } - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, configVolume) - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "tailscaledconfig", - ReadOnly: true, - MountPath: "/etc/tsconfig", - }) if a.tsFirewallMode != "" { container.Env = append(container.Env, corev1.EnvVar{ @@ -643,22 +734,27 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S } else if sts.ServeConfig != nil { container.Env = append(container.Env, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", - Value: "/etc/tailscaled/serve-config", - }) - container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ - Name: "serve-config", - ReadOnly: true, - MountPath: "/etc/tailscaled", + Value: "/etc/tailscaled/$(POD_NAME)/serve-config", }) - pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ - Name: "serve-config", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: proxySecret, - Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + + for i, secret := range proxySecrets { + container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ + Name: "serve-config-" + strconv.Itoa(i), + ReadOnly: true, + MountPath: path.Join("/etc/tailscaled", secret), + }) + + pod.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "serve-config-" + strconv.Itoa(i), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + }, }, - }, - }) + }) + } + } app, err := appInfoForProxy(sts) @@ -918,13 +1014,13 @@ func isMainContainer(c *corev1.Container) bool { // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret) (tailscaledConfigs, error) { +func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret, hostname string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", - Hostname: &stsC.Hostname, + Hostname: &hostname, NoStatefulFiltering: "true", // Explicitly enforce default value, see #14216 AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, } diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 52c8bec7ff32a..51ad1aea3c808 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -23,7 +23,6 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -265,6 +264,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } sts := &tailscaleSTSConfig{ + Replicas: 1, ParentResourceName: svc.Name, ParentResourceUID: string(svc.UID), Hostname: nameForService(svc), @@ -332,11 +332,12 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } - dev, err := a.ssr.DeviceInfo(ctx, crl, logger) + devices, err := a.ssr.DeviceInfo(ctx, crl, logger) if err != nil { return fmt.Errorf("failed to get device ID: %w", err) } - if dev == nil || dev.hostname == "" { + + if len(devices) == 0 || devices[0].hostname == "" { msg := "no Tailscale hostname known yet, waiting for proxy pod to finish auth" logger.Debug(msg) // No hostname yet. Wait for the proxy pod to auth. @@ -345,26 +346,29 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } + dev := devices[0] logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", ")) - ingress := []corev1.LoadBalancerIngress{ - {Hostname: dev.hostname}, - } + svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{ + Hostname: dev.hostname, + }) + clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP) if err != nil { msg := fmt.Sprintf("failed to parse cluster IP: %v", err) tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionFalse, reasonProxyFailed, msg, a.clock, logger) return errors.New(msg) } + for _, ip := range dev.ips { addr, err := netip.ParseAddr(ip) if err != nil { continue } if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family - ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip}) + svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{IP: ip}) } } - svc.Status.LoadBalancer.Ingress = ingress + tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionTrue, reasonProxyCreated, reasonProxyCreated, a.clock, logger) return nil } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 6ae32d6fbac13..b4c468c8e8e94 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -11,6 +11,7 @@ import ( "fmt" "net/http" "net/netip" + "path" "reflect" "strings" "sync" @@ -69,9 +70,9 @@ type configOpts struct { shouldRemoveAuthKey bool secretExtraData map[string][]byte resourceVersion string - - enableMetrics bool - serviceMonitorLabels tsapi.Labels + replicas *int32 + enableMetrics bool + serviceMonitorLabels tsapi.Labels } func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.StatefulSet { @@ -88,8 +89,8 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, }, SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -106,7 +107,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef var volumes []corev1.Volume volumes = []corev1.Volume{ { - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, @@ -115,9 +116,9 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }, } tsContainer.VolumeMounts = []corev1.VolumeMount{{ - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", ReadOnly: true, - MountPath: "/etc/tsconfig", + MountPath: "/etc/tsconfig/" + opts.secretName, }} if opts.firewallMode != "" { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ @@ -154,10 +155,21 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef if opts.serveConfig != nil { tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_SERVE_CONFIG", - Value: "/etc/tailscaled/serve-config", + Value: "/etc/tailscaled/$(POD_NAME)/serve-config", + }) + volumes = append(volumes, corev1.Volume{ + Name: "serve-config-0", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: opts.secretName, + Items: []corev1.KeyToPath{{ + Key: "serve-config", + Path: "serve-config", + }}, + }, + }, }) - volumes = append(volumes, corev1.Volume{Name: "serve-config", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}) - tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}) + tsContainer.VolumeMounts = append(tsContainer.VolumeMounts, corev1.VolumeMount{Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}) } tsContainer.Env = append(tsContainer.Env, corev1.EnvVar{ Name: "TS_INTERNAL_APP", @@ -202,7 +214,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef }, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: opts.replicas, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "1234-UID"}, }, @@ -266,15 +278,15 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "POD_IP", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "status.podIP"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, - {Name: "TS_KUBE_SECRET", Value: opts.secretName}, - {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig"}, - {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/serve-config"}, + {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, {Name: "TS_INTERNAL_APP", Value: opts.app}, }, ImagePullPolicy: "Always", VolumeMounts: []corev1.VolumeMount{ - {Name: "tailscaledconfig", ReadOnly: true, MountPath: "/etc/tsconfig"}, - {Name: "serve-config", ReadOnly: true, MountPath: "/etc/tailscaled"}, + {Name: "tailscaledconfig-0", ReadOnly: true, MountPath: path.Join("/etc/tsconfig", opts.secretName)}, + {Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}, }, } if opts.enableMetrics { @@ -302,16 +314,22 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps } volumes := []corev1.Volume{ { - Name: "tailscaledconfig", + Name: "tailscaledconfig-0", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: opts.secretName, }, }, }, - {Name: "serve-config", + { + Name: "serve-config-0", VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{SecretName: opts.secretName, Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}}}}, + Secret: &corev1.SecretVolumeSource{ + SecretName: opts.secretName, + Items: []corev1.KeyToPath{{Key: "serve-config", Path: "serve-config"}}, + }, + }, + }, } ss := &appsv1.StatefulSet{ TypeMeta: metav1.TypeMeta{ @@ -592,6 +610,32 @@ func findGenName(t *testing.T, client client.Client, ns, name, typ string) (full return s.GetName(), strings.TrimSuffix(s.GetName(), "-0") } +func findGenNames(t *testing.T, cl client.Client, ns, name, typ string) []string { + t.Helper() + labels := map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, + } + + var list corev1.SecretList + if err := cl.List(t.Context(), &list, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil { + t.Fatalf("finding secrets for %q: %v", name, err) + } + + if len(list.Items) == 0 { + t.Fatalf("no secrets found for %q %s %+#v", name, ns, labels) + } + + names := make([]string, len(list.Items)) + for i, secret := range list.Items { + names[i] = secret.GetName() + } + + return names +} + func mustCreate(t *testing.T, client client.Client, obj client.Object) { t.Helper() if err := client.Create(context.Background(), obj); err != nil { diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 93a024b31d3c9..79c8469e11bbc 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -81,6 +81,23 @@ _Appears in:_ | `status` _[ConnectorStatus](#connectorstatus)_ | ConnectorStatus describes the status of the Connector. This is set
        and managed by the Tailscale operator. | | | +#### ConnectorDevice + + + + + + + +_Appears in:_ +- [ConnectorStatus](#connectorstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector replica.
        If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
        node. | | | +| `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
        assigned to the Connector replica. | | | + + #### ConnectorList @@ -115,11 +132,13 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `tags` _[Tags](#tags)_ | Tags that the Tailscale node will be tagged with.
        Defaults to [tag:k8s].
        To autoapprove the subnet routes or exit node defined by a Connector,
        you can configure Tailscale ACLs to give these tags the necessary
        permissions.
        See https://tailscale.com/kb/1337/acl-syntax#autoapprovers.
        If you specify custom tags here, you must also make the operator an owner of these tags.
        See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
        Tags cannot be changed once a Connector node has been created.
        Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
        Type: string
        | -| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
        Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
        dashes, it must not start or end with a dash and must be between 2
        and 63 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
        Type: string
        | +| `hostname` _[Hostname](#hostname)_ | Hostname is the tailnet hostname that should be assigned to the
        Connector node. If unset, hostname defaults to name>-connector. Hostname can contain lower case letters, numbers and
        dashes, it must not start or end with a dash and must be between 2
        and 63 characters long. This field should only be used when creating a connector
        with an unspecified number of replicas, or a single replica. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
        Type: string
        | +| `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix specifies the hostname prefix for each
        replica. Each device will have the integer number
        from its StatefulSet pod appended to this prefix to form the full hostname.
        HostnamePrefix can contain lower case letters, numbers and dashes, it
        must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
        Type: string
        | | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that
        contains configuration options that should be applied to the
        resources created for this Connector. If unset, the operator will
        create resources with the default configuration. | | | | `subnetRouter` _[SubnetRouter](#subnetrouter)_ | SubnetRouter defines subnet routes that the Connector device should
        expose to tailnet as a Tailscale subnet router.
        https://tailscale.com/kb/1019/subnets/
        If this field is unset, the device does not get configured as a Tailscale subnet router.
        This field is mutually exclusive with the appConnector field. | | | | `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
        configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
        Connector does not act as an app connector.
        Note that you will need to manually configure the permissions and the domains for the app connector via the
        Admin panel.
        Note also that the main tested and supported use case of this config option is to deploy an app connector on
        Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
        cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
        tested or optimised for.
        If you are using the app connector to access SaaS applications because you need a predictable egress IP that
        can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
        via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
        device with a static IP address.
        https://tailscale.com/kb/1281/app-connectors | | | | `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
        This field is mutually exclusive with the appConnector field.
        https://tailscale.com/kb/1103/exit-nodes | | | +| `replicas` _integer_ | Replicas specifies how many devices to create. Set this to enable
        high availability for app connectors, subnet routers, or exit nodes.
        https://tailscale.com/kb/1115/high-availability. Defaults to 1. | | Minimum: 0
        | #### ConnectorStatus @@ -140,7 +159,8 @@ _Appears in:_ | `isExitNode` _boolean_ | IsExitNode is set to true if the Connector acts as an exit node. | | | | `isAppConnector` _boolean_ | IsAppConnector is set to true if the Connector acts as an app connector. | | | | `tailnetIPs` _string array_ | TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6)
        assigned to the Connector node. | | | -| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
        If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
        node. | | | +| `hostname` _string_ | Hostname is the fully qualified domain name of the Connector node.
        If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the
        node. When using multiple replicas, this field will be populated with the
        first replica's hostname. Use the Hostnames field for the full list
        of hostnames. | | | +| `devices` _[ConnectorDevice](#connectordevice) array_ | Devices contains information on each device managed by the Connector resource. | | | #### Container @@ -324,6 +344,7 @@ _Validation:_ - Type: string _Appears in:_ +- [ConnectorSpec](#connectorspec) - [ProxyGroupSpec](#proxygroupspec) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index ce6a1411b9ea8..58457500f6c34 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -59,6 +59,8 @@ type ConnectorList struct { // ConnectorSpec describes a Tailscale node to be deployed in the cluster. // +kubebuilder:validation:XValidation:rule="has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector)",message="A Connector needs to have at least one of exit node, subnet router or app connector configured." // +kubebuilder:validation:XValidation:rule="!((has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true)) && has(self.appConnector))",message="The appConnector field is mutually exclusive with exitNode and subnetRouter fields." +// +kubebuilder:validation:XValidation:rule="!(has(self.hostname) && has(self.replicas) && self.replicas > 1)",message="The hostname field cannot be specified when replicas is greater than 1." +// +kubebuilder:validation:XValidation:rule="!(has(self.hostname) && has(self.hostnamePrefix))",message="The hostname and hostnamePrefix fields are mutually exclusive." type ConnectorSpec struct { // Tags that the Tailscale node will be tagged with. // Defaults to [tag:k8s]. @@ -76,9 +78,19 @@ type ConnectorSpec struct { // Connector node. If unset, hostname defaults to -connector. Hostname can contain lower case letters, numbers and // dashes, it must not start or end with a dash and must be between 2 - // and 63 characters long. + // and 63 characters long. This field should only be used when creating a connector + // with an unspecified number of replicas, or a single replica. // +optional Hostname Hostname `json:"hostname,omitempty"` + + // HostnamePrefix specifies the hostname prefix for each + // replica. Each device will have the integer number + // from its StatefulSet pod appended to this prefix to form the full hostname. + // HostnamePrefix can contain lower case letters, numbers and dashes, it + // must not start with a dash and must be between 1 and 62 characters long. + // +optional + HostnamePrefix HostnamePrefix `json:"hostnamePrefix,omitempty"` + // ProxyClass is the name of the ProxyClass custom resource that // contains configuration options that should be applied to the // resources created for this Connector. If unset, the operator will @@ -108,11 +120,19 @@ type ConnectorSpec struct { // https://tailscale.com/kb/1281/app-connectors // +optional AppConnector *AppConnector `json:"appConnector,omitempty"` + // ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false. // This field is mutually exclusive with the appConnector field. // https://tailscale.com/kb/1103/exit-nodes // +optional ExitNode bool `json:"exitNode"` + + // Replicas specifies how many devices to create. Set this to enable + // high availability for app connectors, subnet routers, or exit nodes. + // https://tailscale.com/kb/1115/high-availability. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` } // SubnetRouter defines subnet routes that should be exposed to tailnet via a @@ -197,9 +217,26 @@ type ConnectorStatus struct { TailnetIPs []string `json:"tailnetIPs,omitempty"` // Hostname is the fully qualified domain name of the Connector node. // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the - // node. + // node. When using multiple replicas, this field will be populated with the + // first replica's hostname. Use the Hostnames field for the full list + // of hostnames. // +optional Hostname string `json:"hostname,omitempty"` + // Devices contains information on each device managed by the Connector resource. + // +optional + Devices []ConnectorDevice `json:"devices"` +} + +type ConnectorDevice struct { + // Hostname is the fully qualified domain name of the Connector replica. + // If MagicDNS is enabled in your tailnet, it is the MagicDNS name of the + // node. + // +optional + Hostname string `json:"hostname"` + // TailnetIPs is the set of tailnet IP addresses (both IPv4 and IPv6) + // assigned to the Connector replica. + // +optional + TailnetIPs []string `json:"tailnetIPs,omitempty"` } type ConditionType string diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 6586c13546f4f..d7a90ad0fd895 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -60,6 +60,26 @@ func (in *Connector) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectorDevice) DeepCopyInto(out *ConnectorDevice) { + *out = *in + if in.TailnetIPs != nil { + in, out := &in.TailnetIPs, &out.TailnetIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorDevice. +func (in *ConnectorDevice) DeepCopy() *ConnectorDevice { + if in == nil { + return nil + } + out := new(ConnectorDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectorList) DeepCopyInto(out *ConnectorList) { *out = *in @@ -110,6 +130,11 @@ func (in *ConnectorSpec) DeepCopyInto(out *ConnectorSpec) { *out = new(AppConnector) (*in).DeepCopyInto(*out) } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorSpec. @@ -137,6 +162,13 @@ func (in *ConnectorStatus) DeepCopyInto(out *ConnectorStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]ConnectorDevice, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectorStatus. From 61d3693e61072dea3899d860f99a0c0b91255b1a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 08:21:47 -0700 Subject: [PATCH 0276/1093] cmd/tailscale/cli: add a debug command to force a risky action For testing risky action flows. Updates #15445 Change-Id: Id81e54678a1fe5ccedb5dd9c6542ff48c162b349 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index fb062fd17c7aa..6fe15b238ca46 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -374,6 +374,17 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print the current set of candidate peer relay servers", Exec: runPeerRelayServers, }, + { + Name: "test-risk", + ShortUsage: "tailscale debug test-risk", + ShortHelp: "Do a fake risky action", + Exec: runTestRisk, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("test-risk") + fs.StringVar(&testRiskArgs.acceptedRisk, "accept-risk", "", "comma-separated list of accepted risks") + return fs + })(), + }, }...), } } @@ -1403,3 +1414,18 @@ func runPeerRelayServers(ctx context.Context, args []string) error { e.Encode(v) return nil } + +var testRiskArgs struct { + acceptedRisk string +} + +func runTestRisk(ctx context.Context, args []string) error { + if len(args) > 0 { + return errors.New("unexpected arguments") + } + if err := presentRiskToUser("test-risk", "This is a test risky action.", testRiskArgs.acceptedRisk); err != nil { + return err + } + fmt.Println("did-test-risky-action") + return nil +} From 0f5d3969cad44527d371c4f0b0403b4c305bd1ac Mon Sep 17 00:00:00 2001 From: nikiUppal-TS Date: Tue, 2 Sep 2025 11:26:10 -0500 Subject: [PATCH 0277/1093] tailcfg: add tailnet display name field (#16907) Updates the NodeCapabilities to contain Tailnet Display Name Updates tailscale/corp#30462 Signed-off-by: nikiUppal-TS --- tailcfg/tailcfg.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6383af486f414..94d0b19d5c700 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2656,6 +2656,14 @@ const ( // NodeAttrTrafficSteering configures the node to use the traffic // steering subsystem for via routes. See tailscale/corp#29966. NodeAttrTrafficSteering NodeCapability = "traffic-steering" + + // NodeAttrTailnetDisplayName is an optional alternate name for the tailnet + // to be displayed to the user. + // If empty or absent, a default is used. + // If this value is present and set by a user this will only include letters, + // numbers, apostrophe, spaces, and hyphens. This may not be true for the default. + // Values can look like "foo.com" or "Foo's Test Tailnet - Staging". + NodeAttrTailnetDisplayName NodeCapability = "tailnet-display-name" ) // SetDNSRequest is a request to add a DNS record. From dbc54addd0170fadcb7271db550f72887721fc9c Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 1 Sep 2025 15:02:24 +0000 Subject: [PATCH 0278/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/licenses/android.md b/licenses/android.md index 0e68f0caca238..881f3ed3df9ea 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -12,7 +12,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-tpm](https://pkg.go.dev/github.com/google/go-tpm) ([Apache-2.0](https://github.com/google/go-tpm/blob/v0.9.4/LICENSE)) From 42a215e12adb3a4da9012de9e450faecc24f88dd Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Tue, 2 Sep 2025 09:25:21 -0500 Subject: [PATCH 0279/1093] cmd/tailscale/cli: prompt for y/n when attempting risky action Previously, when attempting a risky action, the CLI printed a 5 second countdown saying "Continuing in 5 seconds...". When the countdown finished, the CLI aborted rather than continuing. To avoid confusion, but also avoid accidentally continuing if someone (or an automated process) fails to manually abort within the countdown, we now explicitly prompt for a y/n response on whether or not to continue. Updates #15445 Co-authored-by: Kot C Signed-off-by: Percy Wegmann --- cmd/tailscale/cli/risks.go | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index 9b03025a83a0b..dfde87f640a16 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -7,15 +7,11 @@ import ( "context" "errors" "flag" - "fmt" - "os" - "os/signal" "runtime" "strings" - "syscall" - "time" "tailscale.com/ipn" + "tailscale.com/util/prompt" "tailscale.com/util/testenv" ) @@ -57,11 +53,6 @@ func isRiskAccepted(riskType, acceptedRisks string) bool { var errAborted = errors.New("aborted, no changes made") -// riskAbortTimeSeconds is the number of seconds to wait after displaying the -// risk message before continuing with the operation. -// It is used by the presentRiskToUser function below. -const riskAbortTimeSeconds = 5 - // presentRiskToUser displays the risk message and waits for the user to cancel. // It returns errorAborted if the user aborts. In tests it returns errAborted // immediately unless the risk has been explicitly accepted. @@ -75,22 +66,10 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { outln(riskMessage) printf("To skip this warning, use --accept-risk=%s\n", riskType) - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, syscall.SIGINT) - var msgLen int - for left := riskAbortTimeSeconds; left > 0; left-- { - msg := fmt.Sprintf("\rContinuing in %d seconds...", left) - msgLen = len(msg) - printf("%s", msg) - select { - case <-interrupt: - printf("\r%s\r", strings.Repeat("x", msgLen+1)) - return errAborted - case <-time.After(time.Second): - continue - } + if prompt.YesNo("Continue?") { + return nil } - printf("\r%s\r", strings.Repeat(" ", msgLen)) + return errAborted } From 2434bc69fc67fd146021fcb6743e692a51953ab8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 14:37:45 -0700 Subject: [PATCH 0280/1093] util/syspolicy/{setting,ptype}: move PreferenceOption and Visibility to new leaf package Step 3 in the series. See earlier cc532efc2000 and d05e6dc09e. This step moves some types into a new leaf "ptype" package out of the big "settings" package. The policyclient.Client will later get new methods to return those things (as well as Duration and Uint64, which weren't done at the time of the earlier prototype). Updates #16998 Updates #12614 Change-Id: I4d72d8079de3b5351ed602eaa72863372bd474a2 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + net/dns/manager_windows.go | 4 ++-- tsnet/depaware.txt | 1 + tstest/deptest/deptest.go | 4 ++++ .../{setting/types.go => ptype/ptype.go} | 22 +++++++---------- util/syspolicy/ptype/ptype_test.go | 24 +++++++++++++++++++ util/syspolicy/rsop/change_callbacks.go | 3 ++- util/syspolicy/setting/setting.go | 3 ++- util/syspolicy/setting/snapshot_test.go | 6 +++++ util/syspolicy/source/policy_reader.go | 9 +++---- util/syspolicy/source/policy_reader_test.go | 9 +++---- util/syspolicy/syspolicy.go | 11 +++++---- util/syspolicy/syspolicy_test.go | 23 +++++++++--------- 17 files changed, 83 insertions(+), 41 deletions(-) rename util/syspolicy/{setting/types.go => ptype/ptype.go} (88%) create mode 100644 util/syspolicy/ptype/ptype_test.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 0597d5d1f0210..2c6c4690ca885 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -176,6 +176,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 40c8abb0813cb..ccba967070b03 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -957,6 +957,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index cf1691c71a52d..047bac6c274c9 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -197,6 +197,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index f08601f81eb27..ee55f914cfc1e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -434,6 +434,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 743492904f796..155ad03e3b029 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -386,6 +386,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 901ab6dd062bd..8830861d10ae2 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -32,7 +32,7 @@ import ( "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/syspolicy/setting" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/winutil" ) @@ -521,7 +521,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, setting.NeverByPolicy) + enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, ptype.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f4b0dc775415b..1c2be4781e29c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -381,6 +381,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/control/controlclient+ tailscale.com/util/syspolicy/policyclient from tailscale.com/control/controlclient+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 4effd4a7883af..c248d6c20845b 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -24,6 +24,7 @@ import ( type DepChecker struct { GOOS string // optional GOARCH string // optional + OnDep func(string) // if non-nil, called per import BadDeps map[string]string // package => why WantDeps set.Set[string] // packages expected Tags string // comma-separated @@ -66,6 +67,9 @@ func (c DepChecker) Check(t *testing.T) { }) for _, dep := range res.Deps { + if c.OnDep != nil { + c.OnDep(dep) + } if why, ok := c.BadDeps[dep]; ok { t.Errorf("package %q is not allowed as a dependency (env: %q); reason: %s", dep, extraEnv, why) } diff --git a/util/syspolicy/setting/types.go b/util/syspolicy/ptype/ptype.go similarity index 88% rename from util/syspolicy/setting/types.go rename to util/syspolicy/ptype/ptype.go index 9f110ab034c83..65ca9e63108eb 100644 --- a/util/syspolicy/setting/types.go +++ b/util/syspolicy/ptype/ptype.go @@ -1,11 +1,11 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package setting - -import ( - "encoding" -) +// Package ptype contains types used by syspolicy. +// +// It's a leaf package for dependency reasons and should not contain much if any +// code, and should not import much (or anything). +package ptype // PreferenceOption is a policy that governs whether a boolean variable // is forcibly assigned an administrator-defined value, or allowed to receive @@ -18,9 +18,10 @@ const ( AlwaysByPolicy ) -// Show returns if the UI option that controls the choice administered by this -// policy should be shown. Currently this is true if and only if the policy is -// [ShowChoiceByPolicy]. +// Show reports whether the UI option that controls the choice administered by +// this policy should be shown (that is, available for users to change). +// +// Currently this is true if and only if the policy is [ShowChoiceByPolicy]. func (p PreferenceOption) Show() bool { return p == ShowChoiceByPolicy } @@ -91,11 +92,6 @@ func (p *PreferenceOption) UnmarshalText(text []byte) error { // component of a user interface is to be shown. type Visibility byte -var ( - _ encoding.TextMarshaler = (*Visibility)(nil) - _ encoding.TextUnmarshaler = (*Visibility)(nil) -) - const ( VisibleByPolicy Visibility = 'v' HiddenByPolicy Visibility = 'h' diff --git a/util/syspolicy/ptype/ptype_test.go b/util/syspolicy/ptype/ptype_test.go new file mode 100644 index 0000000000000..7c963398b41b1 --- /dev/null +++ b/util/syspolicy/ptype/ptype_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ptype + +import ( + "encoding" + "testing" + + "tailscale.com/tstest/deptest" +) + +var ( + _ encoding.TextMarshaler = (*Visibility)(nil) + _ encoding.TextUnmarshaler = (*Visibility)(nil) +) + +func TestImports(t *testing.T) { + deptest.DepChecker{ + OnDep: func(dep string) { + t.Errorf("unexpected dep %q in leaf package; this package should not contain much code", dep) + }, + }.Check(t) +} diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 4e71f683a943d..fdf51c253cbd7 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -13,6 +13,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -50,7 +51,7 @@ func (c PolicyChange) HasChanged(key pkey.Key) bool { return true } switch newVal := new.(type) { - case bool, uint64, string, setting.Visibility, setting.PreferenceOption, time.Duration: + case bool, uint64, string, ptype.Visibility, ptype.PreferenceOption, time.Duration: return newVal != old case []string: oldVal, ok := old.([]string) diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 9285afade50b9..091cf58d31b71 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -17,6 +17,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/testenv" ) @@ -130,7 +131,7 @@ func (t Type) String() string { // ValueType is a constraint that allows Go types corresponding to [Type]. type ValueType interface { - bool | uint64 | string | []string | Visibility | PreferenceOption | time.Duration + bool | uint64 | string | []string | ptype.Visibility | ptype.PreferenceOption | time.Duration } // Definition defines policy key, scope and value type. diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index 99c619cd99bb8..762a9681c6d7e 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -12,6 +12,12 @@ import ( jsonv2 "github.com/go-json-experiment/json" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +const ( + VisibleByPolicy = ptype.VisibleByPolicy + ShowChoiceByPolicy = ptype.ShowChoiceByPolicy ) func TestMergeSnapshots(t *testing.T) { diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go index e6360e5f86a42..33ef22912f172 100644 --- a/util/syspolicy/source/policy_reader.go +++ b/util/syspolicy/source/policy_reader.go @@ -17,6 +17,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -365,21 +366,21 @@ func readPolicySettingValue(store Store, s *setting.Definition) (value any, err case setting.PreferenceOptionValue: s, err := store.ReadString(key) if err == nil { - var value setting.PreferenceOption + var value ptype.PreferenceOption if err = value.UnmarshalText([]byte(s)); err == nil { return value, nil } } - return setting.ShowChoiceByPolicy, err + return ptype.ShowChoiceByPolicy, err case setting.VisibilityValue: s, err := store.ReadString(key) if err == nil { - var value setting.Visibility + var value ptype.Visibility if err = value.UnmarshalText([]byte(s)); err == nil { return value, nil } } - return setting.VisibleByPolicy, err + return ptype.VisibleByPolicy, err case setting.DurationValue: s, err := store.ReadString(key) if err == nil { diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go index 06246a209a875..32e8c51a6d3c9 100644 --- a/util/syspolicy/source/policy_reader_test.go +++ b/util/syspolicy/source/policy_reader_test.go @@ -10,6 +10,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" ) @@ -139,8 +140,8 @@ func TestReaderLifecycle(t *testing.T) { }, initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue": setting.RawItemWith(must.Get(time.ParseDuration("2h30m")), nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), - "PreferenceOptionValue": setting.RawItemWith(setting.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), - "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "PreferenceOptionValue": setting.RawItemWith(ptype.AlwaysByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), + "VisibilityValue": setting.RawItemWith(ptype.VisibleByPolicy, nil, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, setting.NewNamedOrigin("Test", setting.DeviceScope)), }, { @@ -169,8 +170,8 @@ func TestReaderLifecycle(t *testing.T) { initWant: setting.NewSnapshot(map[pkey.Key]setting.RawItem{ "DurationValue1": setting.RawItemWith(nil, setting.NewErrorText("time: invalid duration \"soon\""), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), "DurationValue2": setting.RawItemWith(nil, setting.NewErrorText("bang!"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), - "PreferenceOptionValue": setting.RawItemWith(setting.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), - "VisibilityValue": setting.RawItemWith(setting.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "PreferenceOptionValue": setting.RawItemWith(ptype.ShowChoiceByPolicy, nil, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), + "VisibilityValue": setting.RawItemWith(ptype.VisibleByPolicy, setting.NewErrorText("type mismatch in ReadString: got uint64"), setting.NewNamedOrigin("Test", setting.CurrentUserScope)), }, setting.NewNamedOrigin("Test", setting.CurrentUserScope)), }, } diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 0ac1d251745d3..189f4110707e1 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -18,6 +18,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" @@ -111,14 +112,14 @@ func GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not // present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name pkey.Key) (setting.PreferenceOption, error) { - return getCurrentPolicySettingValue(name, setting.ShowChoiceByPolicy) +func GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { + return getCurrentPolicySettingValue(name, ptype.ShowChoiceByPolicy) } // GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows // specifying a default value to return if the policy setting is not configured. // It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue setting.PreferenceOption) (setting.PreferenceOption, error) { +func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } @@ -127,8 +128,8 @@ func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue setting.Preference // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name pkey.Key) (setting.Visibility, error) { - return getCurrentPolicySettingValue(name, setting.VisibleByPolicy) +func GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return getCurrentPolicySettingValue(name, ptype.VisibleByPolicy) } // GetDuration loads a policy from the registry that can be managed diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 5e822a0b7a2aa..3130f5d077ea0 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -13,6 +13,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -249,7 +250,7 @@ func TestGetPreferenceOption(t *testing.T) { key pkey.Key handlerValue string handlerError error - wantValue setting.PreferenceOption + wantValue ptype.PreferenceOption wantError error wantMetrics []metrics.TestState }{ @@ -257,7 +258,7 @@ func TestGetPreferenceOption(t *testing.T) { name: "always by policy", key: pkey.EnableIncomingConnections, handlerValue: "always", - wantValue: setting.AlwaysByPolicy, + wantValue: ptype.AlwaysByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -267,7 +268,7 @@ func TestGetPreferenceOption(t *testing.T) { name: "never by policy", key: pkey.EnableIncomingConnections, handlerValue: "never", - wantValue: setting.NeverByPolicy, + wantValue: ptype.NeverByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -277,7 +278,7 @@ func TestGetPreferenceOption(t *testing.T) { name: "use default", key: pkey.EnableIncomingConnections, handlerValue: "", - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AllowIncomingConnections", Value: 1}, @@ -287,13 +288,13 @@ func TestGetPreferenceOption(t *testing.T) { name: "read non-existing value", key: pkey.EnableIncomingConnections, handlerError: ErrNotConfigured, - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, }, { name: "other error is returned", key: pkey.EnableIncomingConnections, handlerError: someOtherError, - wantValue: setting.ShowChoiceByPolicy, + wantValue: ptype.ShowChoiceByPolicy, wantError: someOtherError, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_errors", Value: 1}, @@ -342,7 +343,7 @@ func TestGetVisibility(t *testing.T) { key pkey.Key handlerValue string handlerError error - wantValue setting.Visibility + wantValue ptype.Visibility wantError error wantMetrics []metrics.TestState }{ @@ -350,7 +351,7 @@ func TestGetVisibility(t *testing.T) { name: "hidden by policy", key: pkey.AdminConsoleVisibility, handlerValue: "hide", - wantValue: setting.HiddenByPolicy, + wantValue: ptype.HiddenByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AdminConsole", Value: 1}, @@ -360,7 +361,7 @@ func TestGetVisibility(t *testing.T) { name: "visibility default", key: pkey.AdminConsoleVisibility, handlerValue: "show", - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_any", Value: 1}, {Name: "$os_syspolicy_AdminConsole", Value: 1}, @@ -371,14 +372,14 @@ func TestGetVisibility(t *testing.T) { key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: ErrNotConfigured, - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, }, { name: "other error is returned", key: pkey.AdminConsoleVisibility, handlerValue: "show", handlerError: someOtherError, - wantValue: setting.VisibleByPolicy, + wantValue: ptype.VisibleByPolicy, wantError: someOtherError, wantMetrics: []metrics.TestState{ {Name: "$os_syspolicy_errors", Value: 1}, From 1ca4ae598a8369c53f91eec09e19c7f2326ed539 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 15:05:06 -0700 Subject: [PATCH 0281/1093] ipn/ipnlocal: use policyclient.Client always, stop using global syspolicy funcs Step 4 of N. See earlier commits in the series (via the issue) for the plan. This adds the missing methods to policyclient.Client and then uses it everywhere in ipn/ipnlocal and locks it in with a new dep test. Still plenty of users of the global syspolicy elsewhere in the tree, but this is a lot of them. Updates #16998 Updates #12614 Change-Id: I25b136539ae1eedbcba80124de842970db0ca314 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/c2n.go | 3 +- ipn/ipnlocal/local.go | 71 ++++++++++----------- ipn/ipnlocal/local_test.go | 50 +++++++++++++-- tsd/syspolicy_on.go | 23 +++++++ tstest/deptest/deptest.go | 12 +++- util/syspolicy/policyclient/policyclient.go | 56 +++++++++++++++- 6 files changed, 168 insertions(+), 47 deletions(-) diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index b1a780cc10c64..339fad50afeb7 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -29,7 +29,6 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" "tailscale.com/util/set" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/version/distro" @@ -343,7 +342,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // this will first check syspolicy, MDM settings like Registry // on Windows or defaults on macOS. If they are not set, it falls // back to the cli-flag, `--posture-checking`. - choice, err := syspolicy.GetPreferenceOption(pkey.PostureChecking) + choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking) if err != nil { b.logf( "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 61bde31e48405..5f70ae8ef0d00 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -107,7 +107,6 @@ import ( "tailscale.com/util/rands" "tailscale.com/util/set" "tailscale.com/util/slicesx" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/systemd" @@ -382,7 +381,7 @@ type LocalBackend struct { lastSuggestedExitNode tailcfg.StableNodeID // allowedSuggestedExitNodes is a set of exit nodes permitted by the most recent - // [syspolicy.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu + // [pkey.AllowedSuggestedExitNodes] value. The allowedSuggestedExitNodesMu // mutex guards access to this set. allowedSuggestedExitNodesMu sync.Mutex allowedSuggestedExitNodes set.Set[tailcfg.StableNodeID] @@ -405,10 +404,10 @@ type LocalBackend struct { // (sending false). needsCaptiveDetection chan bool - // overrideAlwaysOn is whether [syspolicy.AlwaysOn] is overridden by the user + // overrideAlwaysOn is whether [pkey.AlwaysOn] is overridden by the user // and should have no impact on the WantRunning state until the policy changes, // or the user re-connects manually, switches to a different profile, etc. - // Notably, this is true when [syspolicy.AlwaysOnOverrideWithReason] is enabled, + // Notably, this is true when [pkey.AlwaysOnOverrideWithReason] is enabled, // and the user has disconnected with a reason. // See tailscale/corp#26146. overrideAlwaysOn bool @@ -418,9 +417,9 @@ type LocalBackend struct { reconnectTimer tstime.TimerController // overrideExitNodePolicy is whether the user has overridden the exit node policy - // by manually selecting an exit node, as allowed by [syspolicy.AllowExitNodeOverride]. + // by manually selecting an exit node, as allowed by [pkey.AllowExitNodeOverride]. // - // If true, the [syspolicy.ExitNodeID] and [syspolicy.ExitNodeIP] policy settings are ignored, + // If true, the [pkey.ExitNodeID] and [pkey.ExitNodeIP] policy settings are ignored, // and the suggested exit node is not applied automatically. // // It is cleared when the user switches back to the state required by policy (typically, auto:any), @@ -679,7 +678,7 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim } } case "syspolicy": - setEnabled = syspolicy.SetDebugLoggingEnabled + setEnabled = b.polc.SetDebugLoggingEnabled } if setEnabled == nil || !slices.Contains(ipn.DebuggableComponents, component) { return fmt.Errorf("unknown component %q", component) @@ -1820,13 +1819,13 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if controlURL, err := syspolicy.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { + if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true } const sentinel = "HostnameDefaultValue" - hostnameFromPolicy, _ := syspolicy.GetString(pkey.Hostname, sentinel) + hostnameFromPolicy, _ := b.polc.GetString(pkey.Hostname, sentinel) switch hostnameFromPolicy { case sentinel: // An empty string for this policy value means that the admin wants to delete @@ -1861,13 +1860,13 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { anyChange = true } - if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { + if alwaysOn, _ := b.polc.GetBoolean(pkey.AlwaysOn, false); alwaysOn && !b.overrideAlwaysOn && !prefs.WantRunning { prefs.WantRunning = true anyChange = true } for _, opt := range preferencePolicies { - if po, err := syspolicy.GetPreferenceOption(opt.key); err == nil { + if po, err := b.polc.GetPreferenceOption(opt.key); err == nil { curVal := opt.get(prefs.View()) newVal := po.ShouldEnable(curVal) if curVal != newVal { @@ -1885,7 +1884,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { - if exitNodeIDStr, _ := syspolicy.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { + if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) // Try to parse the policy setting value as an "auto:"-prefixed [ipn.ExitNodeExpression], @@ -1914,7 +1913,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange // or requires an auto exit node ID and the current one isn't allowed, // then update the exit node ID. if prefs.ExitNodeID != exitNodeID { - if !useAutoExitNode || !isAllowedAutoExitNodeID(prefs.ExitNodeID) { + if !useAutoExitNode || !isAllowedAutoExitNodeID(b.polc, prefs.ExitNodeID) { prefs.ExitNodeID = exitNodeID anyChange = true } @@ -1926,7 +1925,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange prefs.ExitNodeIP = netip.Addr{} anyChange = true } - } else if exitNodeIPStr, _ := syspolicy.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { + } else if exitNodeIPStr, _ := b.polc.GetString(pkey.ExitNodeIP, ""); exitNodeIPStr != "" { if prefs.AutoExitNode != "" { prefs.AutoExitNode = "" // mutually exclusive with ExitNodeIP anyChange = true @@ -1946,7 +1945,7 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange // registerSysPolicyWatch subscribes to syspolicy change notifications // and immediately applies the effective syspolicy settings to the current profile. func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { - if unregister, err = syspolicy.RegisterChangeCallback(b.sysPolicyChanged); err != nil { + if unregister, err = b.polc.RegisterChangeCallback(b.sysPolicyChanged); err != nil { return nil, fmt.Errorf("syspolicy: LocalBacked failed to register policy change callback: %v", err) } if prefs, anyChange := b.reconcilePrefs(); anyChange { @@ -1996,7 +1995,7 @@ func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { b.logf("failed to select auto exit node: %v", err) } - // If [syspolicy.ExitNodeID] is set to `auto:any`, the suggested exit node ID + // If [pkey.ExitNodeID] is set to `auto:any`, the suggested exit node ID // will be used when [applySysPolicy] updates the current profile's prefs. } @@ -2132,7 +2131,7 @@ func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged if !b.lastSuggestedExitNode.IsZero() { // If we have a suggested exit node, use it. newExitNodeID = b.lastSuggestedExitNode - } else if isAllowedAutoExitNodeID(prefs.ExitNodeID) { + } else if isAllowedAutoExitNodeID(b.polc, prefs.ExitNodeID) { // If we don't have a suggested exit node, but the prefs already // specify an allowed auto exit node ID, retain it. newExitNodeID = prefs.ExitNodeID @@ -2351,7 +2350,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { - sysak, _ := syspolicy.GetString(pkey.AuthKey, "") + sysak, _ := b.polc.GetString(pkey.AuthKey, "") if sysak != "" { b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) @@ -4111,7 +4110,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac if b.currentUser != nil { profile := b.pm.CurrentProfile() // TODO(nickkhyl): check if the current profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // such as when [pkey.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. if uid := b.currentUser.UserID(); profile.LocalUserID() != uid { profile = b.pm.DefaultUserProfile(uid) @@ -4138,7 +4137,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // using the current profile. // // TODO(nickkhyl): check if the current profile is allowed on the device, - // such as when [syspolicy.Tailnet] policy setting requires a specific Tailnet. + // such as when [pkey.Tailnet] policy setting requires a specific Tailnet. // See tailscale/corp#26249. return b.pm.CurrentProfile(), false } @@ -4411,15 +4410,15 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // Prevent users from changing exit node preferences // when exit node usage is managed by policy. if mp.ExitNodeIDSet || mp.ExitNodeIPSet || mp.AutoExitNodeSet { - isManaged, err := syspolicy.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) + isManaged, err := b.polc.HasAnyOf(pkey.ExitNodeID, pkey.ExitNodeIP) if err != nil { err = fmt.Errorf("policy check failed: %w", err) } else if isManaged { // Allow users to override ExitNode policy settings and select an exit node manually - // if permitted by [syspolicy.AllowExitNodeOverride]. + // if permitted by [pkey.AllowExitNodeOverride]. // // Disabling exit node usage entirely is not allowed. - allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false) + allowExitNodeOverride, _ := b.polc.GetBoolean(pkey.AllowExitNodeOverride, false) if !allowExitNodeOverride || b.changeDisablesExitNodeLocked(prefs, mp) { err = errManagedByPolicy } @@ -4517,13 +4516,13 @@ func (b *LocalBackend) adjustEditPrefsLocked(prefs ipn.PrefsView, mp *ipn.Masked // b.mu must be held; mp must not be mutated by this method. func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, oldPrefs, newPrefs ipn.PrefsView) { if mp.WantRunningSet && !mp.WantRunning && oldPrefs.WantRunning() { - // If a user has enough rights to disconnect, such as when [syspolicy.AlwaysOn] - // is disabled, or [syspolicy.AlwaysOnOverrideWithReason] is also set and the user + // If a user has enough rights to disconnect, such as when [pkey.AlwaysOn] + // is disabled, or [pkey.AlwaysOnOverrideWithReason] is also set and the user // provides a reason for disconnecting, then we should not force the "always on" // mode on them until the policy changes, they switch to a different profile, etc. b.overrideAlwaysOn = true - if reconnectAfter, _ := syspolicy.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { + if reconnectAfter, _ := b.polc.GetDuration(pkey.ReconnectAfter, 0); reconnectAfter > 0 { b.startReconnectTimerLocked(reconnectAfter) } } @@ -4534,7 +4533,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o b.overrideExitNodePolicy = false } if mp.AutoExitNodeSet || mp.ExitNodeIDSet || mp.ExitNodeIPSet { - if allowExitNodeOverride, _ := syspolicy.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { + if allowExitNodeOverride, _ := b.polc.GetBoolean(pkey.AllowExitNodeOverride, false); allowExitNodeOverride { // If applying exit node policy settings to the new prefs results in no change, // the user is not overriding the policy. Otherwise, it is an override. b.overrideExitNodePolicy = b.applyExitNodeSysPolicyLocked(newPrefs.AsStruct()) @@ -5643,7 +5642,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // was selected, if any. // // If auto exit node is enabled (via [ipn.Prefs.AutoExitNode] or - // [syspolicy.ExitNodeID]), or an exit node is specified by ExitNodeIP + // [pkey.ExitNodeID]), or an exit node is specified by ExitNodeIP // instead of ExitNodeID , and we don't yet have enough info to resolve // it (usually due to missing netmap or net report), then ExitNodeID in // the prefs may be invalid (typically, [unresolvedExitNodeID]) until @@ -7786,7 +7785,7 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes } // getAllowedSuggestions returns a set of exit nodes permitted by the most recent -// [syspolicy.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. +// [pkey.AllowedSuggestedExitNodes] value. Callers must not mutate the returned set. func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() @@ -7794,11 +7793,11 @@ func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { } // refreshAllowedSuggestions rebuilds the set of permitted exit nodes -// from the current [syspolicy.AllowedSuggestedExitNodes] value. +// from the current [pkey.AllowedSuggestedExitNodes] value. func (b *LocalBackend) refreshAllowedSuggestions() { b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() - b.allowedSuggestedExitNodes = fillAllowedSuggestions() + b.allowedSuggestedExitNodes = fillAllowedSuggestions(b.polc) } // selectRegionFunc returns a DERP region from the slice of candidate regions. @@ -7810,8 +7809,8 @@ type selectRegionFunc func(views.Slice[int]) int // choice. type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView -func fillAllowedSuggestions() set.Set[tailcfg.StableNodeID] { - nodes, err := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) +func fillAllowedSuggestions(polc policyclient.Client) set.Set[tailcfg.StableNodeID] { + nodes, err := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) if err != nil { log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", pkey.AllowedSuggestedExitNodes, err) return nil @@ -8176,11 +8175,11 @@ const ( unresolvedExitNodeID tailcfg.StableNodeID = "auto:any" ) -func isAllowedAutoExitNodeID(exitNodeID tailcfg.StableNodeID) bool { +func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.StableNodeID) bool { if exitNodeID == "" { return false // an exit node is required } - if nodes, _ := syspolicy.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { + if nodes, _ := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) } @@ -8343,7 +8342,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { // the Keychain. A future release will clean up the on-disk state // files. // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := syspolicy.GetBoolean(pkey.EncryptState, false) + sp, _ := b.polc.GetBoolean(pkey.EncryptState, false) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 2b83e47f884b0..0967bf1ff0996 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -47,6 +47,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" "tailscale.com/types/key" @@ -63,6 +64,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" @@ -5541,6 +5543,28 @@ func TestReadWriteRouteInfo(t *testing.T) { } } +// staticPolicy maps policy keys to their corresponding values, +// which must be of the correct type (string, []string, bool, etc). +// +// It is used for testing purposes to simulate policy client behavior. +// It panics if the values are the wrong type. +type staticPolicy map[pkey.Key]any + +type testPolicy struct { + staticPolicy + policyclient.Client +} + +func (sp testPolicy) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { + if val, ok := sp.staticPolicy[key]; ok { + if arr, ok := val.([]string); ok { + return arr, nil + } + return nil, fmt.Errorf("key %s is not a []string", key) + } + return defaultVal, nil +} + func TestFillAllowedSuggestions(t *testing.T) { tests := []struct { name string @@ -5571,15 +5595,16 @@ func TestFillAllowedSuggestions(t *testing.T) { want: []tailcfg.StableNodeID{"ABC", "def", "gHiJ"}, }, } - syspolicy.RegisterWellKnownSettingsForTest(t) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.AllowedSuggestedExitNodes, tt.allowPolicy, - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + polc := testPolicy{ + staticPolicy: staticPolicy{ + pkey.AllowedSuggestedExitNodes: tt.allowPolicy, + }, + } - got := fillAllowedSuggestions() + got := fillAllowedSuggestions(polc) if got == nil { if tt.want == nil { return @@ -7008,6 +7033,19 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func TestDeps(t *testing.T) { + deptest.DepChecker{ + OnImport: func(pkg string) { + switch pkg { + case "tailscale.com/util/syspolicy", + "tailscale.com/util/syspolicy/setting", + "tailscale.com/util/syspolicy/rsop": + t.Errorf("ipn/ipnlocal: importing syspolicy package %q is not allowed; only policyclient and its deps should be used by ipn/ipnlocal", pkg) + } + }, + }.Check(t) +} + func checkError(tb testing.TB, got, want error, fatal bool) { tb.Helper() f := tb.Errorf diff --git a/tsd/syspolicy_on.go b/tsd/syspolicy_on.go index 8d7762bd9c5c8..e9811b88b0c61 100644 --- a/tsd/syspolicy_on.go +++ b/tsd/syspolicy_on.go @@ -6,9 +6,12 @@ package tsd import ( + "time" + "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" ) func getPolicyClient() policyclient.Client { return globalSyspolicy{} } @@ -36,6 +39,26 @@ func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { syspolicy.SetDebugLoggingEnabled(enabled) } +func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return syspolicy.GetUint64(key, defaultValue) +} + +func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return syspolicy.GetDuration(name, defaultValue) +} + +func (globalSyspolicy) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { + return syspolicy.GetPreferenceOption(name) +} + +func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return syspolicy.GetVisibility(name) +} + +func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + return syspolicy.HasAnyOf(keys...) +} + func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { return syspolicy.RegisterChangeCallback(cb) } diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index c248d6c20845b..c0b6d8b8cffb5 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -24,7 +24,8 @@ import ( type DepChecker struct { GOOS string // optional GOARCH string // optional - OnDep func(string) // if non-nil, called per import + OnDep func(string) // if non-nil, called per dependency + OnImport func(string) // if non-nil, called per import BadDeps map[string]string // package => why WantDeps set.Set[string] // packages expected Tags string // comma-separated @@ -52,7 +53,8 @@ func (c DepChecker) Check(t *testing.T) { t.Fatal(err) } var res struct { - Deps []string + Imports []string + Deps []string } if err := json.Unmarshal(out, &res); err != nil { t.Fatal(err) @@ -66,6 +68,12 @@ func (c DepChecker) Check(t *testing.T) { return strings.TrimSpace(string(out)) }) + if c.OnImport != nil { + for _, imp := range res.Imports { + c.OnImport(imp) + } + } + for _, dep := range res.Deps { if c.OnDep != nil { c.OnDep(dep) diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index 0b15599c1591a..aadcbc60e91db 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -6,7 +6,12 @@ // of syspolicy is omitted from the build. package policyclient -import "tailscale.com/util/syspolicy/pkey" +import ( + "time" + + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) // Client is the interface between code making questions about the system policy // and the actual implementation. @@ -23,9 +28,38 @@ type Client interface { // or defaultValue (and a nil error) if it does not exist. GetBoolean(key pkey.Key, defaultValue bool) (bool, error) + // GetUint64 returns a numeric policy setting with the specified key, + // or defaultValue (and a nil error) if it does not exist. + GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) + + // GetDuration loads a policy from the registry that can be managed by an + // enterprise policy management system and describes a duration for some + // action. The registry value should be a string that time.ParseDuration + // understands. If the registry value is "" or can not be processed, + // defaultValue (and a nil error) is returned instead. + GetDuration(key pkey.Key, defaultValue time.Duration) (time.Duration, error) + + // GetPreferenceOption loads a policy from the registry that can be + // managed by an enterprise policy management system and allows administrative + // overrides of users' choices in a way that we do not want tailcontrol to have + // the authority to set. It describes user-decides/always/never options, where + // "always" and "never" remove the user's ability to make a selection. If not + // present or set to a different value, "user-decides" is the default. + GetPreferenceOption(key pkey.Key) (ptype.PreferenceOption, error) + + // GetVisibility returns whether a UI element should be visible based on + // the system's configuration. + // If unconfigured, implementations should return [ptype.VisibleByPolicy] + // and a nil error. + GetVisibility(key pkey.Key) (ptype.Visibility, error) + // SetDebugLoggingEnabled enables or disables debug logging for the policy client. SetDebugLoggingEnabled(enabled bool) + // HasAnyOf returns whether at least one of the specified policy settings is + // configured, or an error if no keys are provided or the check fails. + HasAnyOf(keys ...pkey.Key) (bool, error) + // RegisterChangeCallback registers a callback function that will be called // whenever a policy change is detected. It returns a function to unregister // the callback and an error if the registration fails. @@ -59,6 +93,26 @@ func (NoPolicyClient) GetStringArray(key pkey.Key, defaultValue []string) ([]str return defaultValue, nil } +func (NoPolicyClient) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return defaultValue, nil +} + +func (NoPolicyClient) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { + return ptype.ShowChoiceByPolicy, nil +} + +func (NoPolicyClient) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return ptype.VisibleByPolicy, nil +} + +func (NoPolicyClient) HasAnyOf(keys ...pkey.Key) (bool, error) { + return false, nil +} + func (NoPolicyClient) SetDebugLoggingEnabled(enabled bool) {} func (NoPolicyClient) RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) { From 0d23490e1a7593661d9ef3b76dd151d2d70778b9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 1 Sep 2025 15:31:49 -0700 Subject: [PATCH 0282/1093] ipn/ipnlocal: simplify a test with a new simpler syspolicy client test type Less indirection. Updates #16998 Updates #12614 Change-Id: I5a3a3c3f3b195486b2731ec002d2532337b3d211 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 97 ++++++++++++++++++++++++-------------- tsd/tsd.go | 6 +++ 2 files changed, 68 insertions(+), 35 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 0967bf1ff0996..4843a941f6acc 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -632,7 +632,7 @@ func TestConfigureExitNode(t *testing.T) { exitNodeIDPolicy *tailcfg.StableNodeID exitNodeIPPolicy *netip.Addr exitNodeAllowedIDs []tailcfg.StableNodeID // nil if all IDs are allowed for auto exit nodes - exitNodeAllowOverride bool // whether [syspolicy.AllowExitNodeOverride] should be set to true + exitNodeAllowOverride bool // whether [pkey.AllowExitNodeOverride] should be set to true wantChangePrefsErr error // if non-nil, the error we expect from [LocalBackend.EditPrefsAs] wantPrefs ipn.Prefs wantExitNodeToggleErr error // if non-nil, the error we expect from [LocalBackend.SetUseExitNodeEnabled] @@ -970,7 +970,7 @@ func TestConfigureExitNode(t *testing.T) { name: "auto-any-via-policy/no-netmap/with-disallowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID prefs: ipn.Prefs{ ControlURL: controlURL, - ExitNodeID: exitNode2.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + ExitNodeID: exitNode2.StableID(), // not allowed by [pkey.AllowedSuggestedExitNodes] }, netMap: nil, report: report, @@ -989,7 +989,7 @@ func TestConfigureExitNode(t *testing.T) { name: "auto-any-via-policy/with-netmap/with-allowed-existing", // same, but now with a syspolicy setting that does not allow the existing exit node ID prefs: ipn.Prefs{ ControlURL: controlURL, - ExitNodeID: exitNode1.StableID(), // not allowed by [syspolicy.AllowedSuggestedExitNodes] + ExitNodeID: exitNode1.StableID(), // not allowed by [pkey.AllowedSuggestedExitNodes] }, netMap: clientNetmap, report: report, @@ -1072,7 +1072,7 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode1.StableID(), }, { - name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [syspolicy.AllowExitNodeOverride] + name: "auto-any-via-policy/allow-override/change", // changing the exit node is allowed by [pkey.AllowExitNodeOverride] prefs: ipn.Prefs{ ControlURL: controlURL, }, @@ -1094,7 +1094,7 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode2.StableID(), }, { - name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [syspolicy.AllowExitNodeOverride] + name: "auto-any-via-policy/allow-override/clear", // clearing the exit node ID is not allowed by [pkey.AllowExitNodeOverride] prefs: ipn.Prefs{ ControlURL: controlURL, }, @@ -1118,7 +1118,7 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode1.StableID(), }, { - name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [syspolicy.AllowExitNodeOverride] + name: "auto-any-via-policy/allow-override/toggle-off", // similarly, toggling off the exit node is not allowed even with [pkey.AllowExitNodeOverride] prefs: ipn.Prefs{ ControlURL: controlURL, }, @@ -1179,36 +1179,32 @@ func TestConfigureExitNode(t *testing.T) { wantHostinfoExitNodeID: exitNode1.StableID(), }, } - syspolicy.RegisterWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + var pol testPolicy // Configure policy settings, if any. - store := source.NewTestStore(t) if tt.exitNodeIDPolicy != nil { - store.SetStrings(source.TestSettingOf(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy))) + pol.Set(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy)) } if tt.exitNodeIPPolicy != nil { - store.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String())) + pol.Set(pkey.ExitNodeIP, tt.exitNodeIPPolicy.String()) } if tt.exitNodeAllowedIDs != nil { - store.SetStringLists(source.TestSettingOf(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs))) + pol.Set(pkey.AllowedSuggestedExitNodes, toStrings(tt.exitNodeAllowedIDs)) } if tt.exitNodeAllowOverride { - store.SetBooleans(source.TestSettingOf(pkey.AllowExitNodeOverride, true)) - } - if store.IsEmpty() { - // No syspolicy settings, so don't register a store. - // This allows the test to run in parallel with other tests. - t.Parallel() - } else { - // Register the store for syspolicy settings to make them available to the LocalBackend. - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, store) + pol.Set(pkey.AllowExitNodeOverride, true) } // Create a new LocalBackend with the given prefs. // Any syspolicy settings will be applied to the initial prefs. - lb := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(pol) + lb := newTestLocalBackendWithSys(t, sys) lb.SetPrefsForTest(tt.prefs.Clone()) + // Then set the netcheck report and netmap, if any. if tt.report != nil { lb.MagicConn().SetLastNetcheckReportForTest(t.Context(), tt.report) @@ -5543,28 +5539,62 @@ func TestReadWriteRouteInfo(t *testing.T) { } } -// staticPolicy maps policy keys to their corresponding values, -// which must be of the correct type (string, []string, bool, etc). +// testPolicy is a [policyclient.Client] with a static mapping of values. +// The map value must be of the correct type (string, []string, bool, etc). // // It is used for testing purposes to simulate policy client behavior. // It panics if the values are the wrong type. -type staticPolicy map[pkey.Key]any - type testPolicy struct { - staticPolicy - policyclient.Client + v map[pkey.Key]any + policyclient.NoPolicyClient +} + +func (sp *testPolicy) Set(key pkey.Key, value any) { + if sp.v == nil { + sp.v = make(map[pkey.Key]any) + } + sp.v[key] = value } func (sp testPolicy) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { - if val, ok := sp.staticPolicy[key]; ok { + if val, ok := sp.v[key]; ok { if arr, ok := val.([]string); ok { return arr, nil } - return nil, fmt.Errorf("key %s is not a []string", key) + panic(fmt.Sprintf("key %s is not a []string", key)) + } + return defaultVal, nil +} + +func (sp testPolicy) GetString(key pkey.Key, defaultVal string) (string, error) { + if val, ok := sp.v[key]; ok { + if str, ok := val.(string); ok { + return str, nil + } + panic(fmt.Sprintf("key %s is not a string", key)) + } + return defaultVal, nil +} + +func (sp testPolicy) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { + if val, ok := sp.v[key]; ok { + if b, ok := val.(bool); ok { + return b, nil + } + panic(fmt.Sprintf("key %s is not a bool", key)) } return defaultVal, nil } +func (sp testPolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + for _, key := range keys { + if _, ok := sp.v[key]; ok { + return true, nil + } + } + return false, nil +} + func TestFillAllowedSuggestions(t *testing.T) { tests := []struct { name string @@ -5598,13 +5628,10 @@ func TestFillAllowedSuggestions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - polc := testPolicy{ - staticPolicy: staticPolicy{ - pkey.AllowedSuggestedExitNodes: tt.allowPolicy, - }, - } + var pol testPolicy + pol.Set(pkey.AllowedSuggestedExitNodes, tt.allowPolicy) - got := fillAllowedSuggestions(polc) + got := fillAllowedSuggestions(pol) if got == nil { if tt.want == nil { return diff --git a/tsd/tsd.go b/tsd/tsd.go index b7194a3d7b5e6..17795d3c52e86 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -59,6 +59,7 @@ type System struct { Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] + PolicyClient SubSystem[policyclient.Client] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -127,6 +128,8 @@ func (s *System) Set(v any) { s.DriveForLocal.Set(v) case drive.FileSystemForRemote: s.DriveForRemote.Set(v) + case policyclient.Client: + s.PolicyClient.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } @@ -169,6 +172,9 @@ func (s *System) UserMetricsRegistry() *usermetric.Registry { // PolicyClientOrDefault returns the policy client if set or a no-op default // otherwise. It always returns a non-nil value. func (s *System) PolicyClientOrDefault() policyclient.Client { + if client, ok := s.PolicyClient.GetOK(); ok { + return client + } return getPolicyClient() } From 9e9bf130633fc1816646e5f1834054c0d7e551dc Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 2 Sep 2025 15:57:31 -0700 Subject: [PATCH 0283/1093] ipn/ipnlocal: revert some locking changes ahead of release branch cut (#17011) --- ipn/ipnlocal/local.go | 416 ++++++++++++++++++------------------- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/profiles.go | 15 +- 3 files changed, 212 insertions(+), 221 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5f70ae8ef0d00..54dcda30aa095 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -799,8 +799,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if b.conf == nil { return false, nil } @@ -808,7 +808,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLocked(conf); err != nil { + if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -865,9 +865,10 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLocked uses the provided config to update the backend's prefs +// setConfigLockedOnEntry uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { +func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { + defer unlock() p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -875,7 +876,8 @@ func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLocked(p) + b.setPrefsLockedOnEntry(p, unlock) + b.conf = conf return nil } @@ -1503,6 +1505,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } if st.Err != nil { + // The following do not depend on any data for which we need b locked. + unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return @@ -1511,7 +1515,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) + b.send(ipn.Notify{ErrMessage: &s}) } return } @@ -1960,13 +1964,13 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { + unlock.UnlockEarly() return prefs.View(), false } - return b.setPrefsLocked(prefs), true + return b.setPrefsLockedOnEntry(prefs, unlock), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects @@ -2329,8 +2333,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { clientToShutdown.Shutdown() } }() - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -2536,7 +2540,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLocked() + b.stateMachineLockedOnEntry(unlock) + return nil } @@ -3537,8 +3542,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3560,14 +3565,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLocked( + _, err := b.editPrefsLockedOnEntry( ipnauth.Self, &ipn.MaskedPrefs{ Prefs: *prefsClone, AutoUpdateSet: ipn.AutoUpdatePrefsMask{ ApplySet: true, }, - }) + }, unlock) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -4004,8 +4009,8 @@ func (b *LocalBackend) shouldUploadServices() bool { // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -4027,7 +4032,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLocked(reason) + b.switchToBestProfileLockedOnEntry(reason, unlock) } // SwitchToBestProfile selects the best profile to use, @@ -4037,14 +4042,13 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.mu.Lock() - defer b.mu.Unlock() - b.switchToBestProfileLocked(reason) + b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) } -// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], but -// the caller must hold b.mu. -func (b *LocalBackend) switchToBestProfileLocked(reason string) { +// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], +// but b.mu must held on entry. It is released on exit. +func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { + defer unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) @@ -4075,7 +4079,7 @@ func (b *LocalBackend) switchToBestProfileLocked(reason string) { if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLocked(); err != nil { + if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -4311,8 +4315,8 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() p0 := b.pm.CurrentPrefs() if v && p0.ExitNodeID() != "" { @@ -4353,7 +4357,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLocked(actor, mp) + return b.editPrefsLockedOnEntry(actor, mp, unlock) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4382,9 +4386,7 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip return ipn.PrefsView{}, errors.New("can't set Internal fields") } - b.mu.Lock() - defer b.mu.Unlock() - return b.editPrefsLocked(actor, mp) + return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) } // checkEditPrefsAccessLocked checks whether the current user has access @@ -4572,8 +4574,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4591,7 +4593,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { + if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4620,8 +4622,11 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// The caller must hold b.mu. -func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { +// Warning: b.mu must be held on entry, but it unlocks it on the way out. +// TODO(bradfitz): redo the locking on all these weird methods like this. +func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { + defer unlock() // for error paths + p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -4658,10 +4663,12 @@ func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) // before the modified prefs are actually set for the current profile. b.onEditPrefsLocked(actor, mp, p0, p1.View()) - newPrefs := b.setPrefsLocked(p1) + newPrefs := b.setPrefsLockedOnEntry(p1, unlock) + + // Note: don't perform any actions for the new prefs here. Not + // every prefs change goes through EditPrefs. Put your actions + // in setPrefsLocksOnEntry instead. - // Note: don't perform any actions for the new prefs here. Not every prefs - // change goes through EditPrefs. Put your actions in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil } @@ -4709,9 +4716,12 @@ func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() } -// setPrefsLocked requires b.mu be held to call it. It returns a read-only -// copy of the new prefs. -func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { +// setPrefsLockedOnEntry requires b.mu be held to call it, but it +// unlocks b.mu when done. newp ownership passes to this function. +// It returns a read-only copy of the new prefs. +func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { + defer unlock() + cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4780,33 +4790,28 @@ func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { b.stopOfflineAutoUpdate() } - // Update status that needs to happen outside the lock, but reacquire it - // before returning (including in case of panics). - func() { - b.mu.Unlock() - defer b.mu.Lock() + unlock.UnlockEarly() - if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() - } + if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { + b.doSetHostinfoFilterServices() + } - if netMap != nil { - b.MagicConn().SetDERPMap(netMap.DERPMap) - } + if netMap != nil { + b.MagicConn().SetDERPMap(netMap.DERPMap) + } - if !oldp.WantRunning() && newp.WantRunning && cc != nil { - b.logf("transitioning to running; doing Login...") - cc.Login(controlclient.LoginDefault) - } + if !oldp.WantRunning() && newp.WantRunning && cc != nil { + b.logf("transitioning to running; doing Login...") + cc.Login(controlclient.LoginDefault) + } - if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() - } else { - b.authReconfig() - } + if oldp.WantRunning() != newp.WantRunning { + b.stateMachine() + } else { + b.authReconfig() + } - b.send(ipn.Notify{Prefs: &prefs}) - }() + b.send(ipn.Notify{Prefs: &prefs}) return prefs } @@ -4949,34 +4954,36 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - // Check the control client, hostinfo, and services under the mutex. - // On return, either both the client and hostinfo are nil, or both are non-nil. - // When non-nil, the Hostinfo is a clone of the value carried by b, safe to modify. - cc, hi, peerAPIServices := func() (controlclient.Client, *tailcfg.Hostinfo, []tailcfg.Service) { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() - if b.cc == nil { - return nil, nil, nil // control client isn't up yet - } else if b.hostinfo == nil { - b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") - return nil, nil, nil - } - svc := b.peerAPIServicesLocked() - if b.egg { - svc = append(svc, tailcfg.Service{Proto: "egg", Port: 1}) - } - // Make a clone of hostinfo so we can mutate the service field, below. - return b.cc, b.hostinfo.Clone(), svc - }() - if cc == nil || hi == nil { + cc := b.cc + if cc == nil { + // Control client isn't up yet. return } + if b.hostinfo == nil { + b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") + return + } + peerAPIServices := b.peerAPIServicesLocked() + if b.egg { + peerAPIServices = append(peerAPIServices, tailcfg.Service{Proto: "egg", Port: 1}) + } + // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. + hi := *b.hostinfo // shallow copy + unlock.UnlockEarly() + + // Make a shallow copy of hostinfo so we can mutate + // at the Service field. if !b.shouldUploadServices() { hi.Services = []tailcfg.Service{} } - hi.Services = append(hi.Services, peerAPIServices...) + // Don't mutate hi.Service's underlying array. Append to + // the slice with no free capacity. + c := len(hi.Services) + hi.Services = append(hi.Services[:c:c], peerAPIServices...) hi.PushDeviceToken = b.pushDeviceToken.Load() // Compare the expected ports from peerAPIServices to the actual ports in hi.Services. @@ -4986,7 +4993,7 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) } - cc.SetHostinfo(hi) + cc.SetHostinfo(&hi) } type portPair struct { @@ -5665,13 +5672,13 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // really this is more "one of several places in which random things // happen". func (b *LocalBackend) enterState(newState ipn.State) { - b.mu.Lock() - defer b.mu.Unlock() - b.enterStateLocked(newState) + unlock := b.lockAndGetUnlock() + b.enterStateLockedOnEntry(newState, unlock) } -// enterStateLocked is like enterState but requires the caller to hold b.mu. -func (b *LocalBackend) enterStateLocked(newState ipn.State) { +// enterStateLockedOnEntry is like enterState but requires b.mu be held to call +// it, but it unlocks b.mu when done (via unlock, a once func). +func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5720,56 +5727,51 @@ func (b *LocalBackend) enterStateLocked(newState ipn.State) { b.maybeStartOfflineAutoUpdate(prefs) } - // Resolve the state transition outside the lock, but reacquire it before - // returning (including in case of panics). - func() { - b.mu.Unlock() - defer b.mu.Lock() + unlock.UnlockEarly() - // prefs may change irrespective of state; WantRunning should be explicitly - // set before potential early return even if the state is unchanged. - b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) - if oldState == newState { - return - } - b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", - oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + // prefs may change irrespective of state; WantRunning should be explicitly + // set before potential early return even if the state is unchanged. + b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) + if oldState == newState { + return + } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", + oldState, newState, prefs.WantRunning(), netMap != nil) + b.send(ipn.Notify{State: &newState}) - switch newState { - case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } - b.blockEngineUpdates(true) - fallthrough - case ipn.Stopped, ipn.NoState: - // Unconfigure the engine if it has stopped (WantRunning is set to false) - // or if we've switched to a different profile and the state is unknown. - err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - if err != nil { - b.logf("Reconfig(down): %v", err) - } + switch newState { + case ipn.NeedsLogin: + systemd.Status("Needs login: %s", authURL) + if b.seamlessRenewalEnabled() { + break + } + b.blockEngineUpdates(true) + fallthrough + case ipn.Stopped, ipn.NoState: + // Unconfigure the engine if it has stopped (WantRunning is set to false) + // or if we've switched to a different profile and the state is unknown. + err := b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + b.logf("Reconfig(down): %v", err) + } - if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") - } - case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() - // Needed so that UpdateEndpoints can run - b.e.RequestStatus() - case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) - } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) - default: - b.logf("[unexpected] unknown newState %#v", newState) + if newState == ipn.Stopped && authURL == "" { + systemd.Status("Stopped; run 'tailscale up' to log in") } - }() + case ipn.Starting, ipn.NeedsMachineAuth: + b.authReconfig() + // Needed so that UpdateEndpoints can run + b.e.RequestStatus() + case ipn.Running: + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + default: + b.logf("[unexpected] unknown newState %#v", newState) + } } func (b *LocalBackend) hasNodeKeyLocked() bool { @@ -5869,29 +5871,27 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // TODO(apenwarr): use a channel or something to prevent reentrancy? // Or maybe just call the state machine from fewer places. func (b *LocalBackend) stateMachine() { - b.mu.Lock() - defer b.mu.Unlock() - b.stateMachineLocked() + unlock := b.lockAndGetUnlock() + b.stateMachineLockedOnEntry(unlock) } -// stateMachineLocked is like stateMachine but requires b.mu be held. -func (b *LocalBackend) stateMachineLocked() { - b.enterStateLocked(b.nextStateLocked()) +// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to +// call it, but it unlocks b.mu when done (via unlock, a once func). +func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { + b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) } -// lockAndGetUnlock locks b.mu and returns a function that will unlock it at -// most once. +// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will +// unlock it at most once. // -// TODO(creachadair): This was added as a guardrail against the unfortunate -// "LockedOnEntry" methods that were originally used in this package (primarily -// enterStateLockedOnEntry) that required b.mu held to be locked on entry to -// the function but unlocked the mutex on their way out. -// -// Now that these have all been updated, we could remove this type and acquire -// and release locks directly. For now, however, I've left it alone to reduce -// the scope of lock-related changes. -// -// See: https://github.com/tailscale/tailscale/issues/11649 +// This is all very unfortunate but exists as a guardrail against the +// unfortunate "lockedOnEntry" methods in this package (primarily +// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the +// function but unlock the mutex on their way out. As a stepping stone to +// cleaning things up (as of 2024-04-06), we at least pass the unlock func +// around now and defer unlock in the caller to avoid missing unlocks and double +// unlocks. TODO(bradfitz,maisem): make the locking in this package more +// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { b.mu.Lock() var unlocked atomic.Bool @@ -6059,35 +6059,30 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { - // These values are initialized inside the lock on success. - var cc controlclient.Client - var profile ipn.LoginProfileView - - if err := func() error { - b.mu.Lock() - defer b.mu.Unlock() - - if !b.hasNodeKeyLocked() { - // Already logged out. - return nil - } - cc = b.cc + unlock := b.lockAndGetUnlock() + defer unlock() - // Grab the current profile before we unlock the mutex, so that we can - // delete it later. - profile = b.pm.CurrentProfile() + if !b.hasNodeKeyLocked() { + // Already logged out. + return nil + } + cc := b.cc - _, err := b.editPrefsLocked( - actor, - &ipn.MaskedPrefs{ - WantRunningSet: true, - LoggedOutSet: true, - Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }) - return err - }(); err != nil { + // Grab the current profile before we unlock the mutex, so that we can + // delete it later. + profile := b.pm.CurrentProfile() + + _, err := b.editPrefsLockedOnEntry( + actor, + &ipn.MaskedPrefs{ + WantRunningSet: true, + LoggedOutSet: true, + Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, + }, unlock) + if err != nil { return err } + // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -6107,14 +6102,14 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - b.mu.Lock() - defer b.mu.Unlock() + unlock = b.lockAndGetUnlock() + defer unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -7290,8 +7285,8 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { @@ -7303,7 +7298,7 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { b.resetDialPlan() } - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } func (b *LocalBackend) initTKALocked() error { @@ -7383,10 +7378,12 @@ func (b *LocalBackend) getHardwareAddrs() ([]string, error) { return addrs, nil } -// resetForProfileChangeLocked resets the backend for a profile change. +// resetForProfileChangeLockedOnEntry resets the backend for a profile change. // -// The caller must hold b.mu. -func (b *LocalBackend) resetForProfileChangeLocked() error { +// b.mu must held on entry. It is released on exit. +func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { + defer unlock() + if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting @@ -7417,26 +7414,19 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLocked(ipn.NoState) - - // Update health status and start outside the lock. - return func() error { - b.mu.Unlock() - defer b.mu.Lock() - - b.health.SetLocalLogConfigHealth(nil) - if tkaErr != nil { - return tkaErr - } - return b.Start(ipn.Options{}) - }() + b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu + b.health.SetLocalLogConfigHealth(nil) + if tkaErr != nil { + return tkaErr + } + return b.Start(ipn.Options{}) } // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -7448,7 +7438,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { if !needToRestart { return nil } - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } // CurrentProfile returns the current LoginProfile. @@ -7461,8 +7451,8 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() b.pm.SwitchToNewProfile() @@ -7470,7 +7460,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } // ListProfiles returns a list of all LoginProfiles. @@ -7485,8 +7475,8 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - b.mu.Lock() - defer b.mu.Unlock() + unlock := b.lockAndGetUnlock() + defer unlock() prevCC := b.resetControlClientLocked() if prevCC != nil { @@ -7499,7 +7489,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLocked() + return b.resetForProfileChangeLockedOnEntry(unlock) } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 4843a941f6acc..a3a26af042012 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4299,7 +4299,7 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { } unlock := b.lockAndGetUnlock() defer unlock() - b.setPrefsLocked(newp) + b.setPrefsLockedOnEntry(newp, unlock) } type peerOptFunc func(*tailcfg.Node) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 7519ee157a029..1d312cfa606b3 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -180,7 +180,7 @@ func (pm *profileManager) SwitchToProfile(profile ipn.LoginProfileView) (cp ipn. f(pm.currentProfile, pm.prefs, false) } // Do not call pm.extHost.NotifyProfileChange here; it is invoked in - // [LocalBackend.resetForProfileChangeLocked] after the netmap reset. + // [LocalBackend.resetForProfileChangeLockedOnEntry] after the netmap reset. // TODO(nickkhyl): Consider moving it here (or into the stateChangeCb handler // in [LocalBackend]) once the profile/node state, including the netmap, // is actually tied to the current profile. @@ -359,9 +359,9 @@ func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) // where prefsIn is the previous profile's prefs with an updated Persist, LoggedOut, // WantRunning and possibly other fields. This may not be the desired behavior. // - // Additionally, LocalBackend doesn't treat it as a proper profile switch, - // meaning that [LocalBackend.resetForProfileChangeLocked] is not called and - // certain node/profile-specific state may not be reset as expected. + // Additionally, LocalBackend doesn't treat it as a proper profile switch, meaning that + // [LocalBackend.resetForProfileChangeLockedOnEntry] is not called and certain + // node/profile-specific state may not be reset as expected. // // However, [profileManager] notifies [ipnext.Extension]s about the profile change, // so features migrated from LocalBackend to external packages should not be affected. @@ -494,9 +494,10 @@ func (pm *profileManager) setProfilePrefsNoPermCheck(profile ipn.LoginProfileVie oldPrefs := pm.prefs pm.prefs = clonedPrefs - // Sadly, profile prefs can be changed in multiple ways. It's pretty - // chaotic, and in many cases callers use unexported methods of the - // profile manager instead of going through [LocalBackend.setPrefsLocked] + // Sadly, profile prefs can be changed in multiple ways. + // It's pretty chaotic, and in many cases callers use + // unexported methods of the profile manager instead of + // going through [LocalBackend.setPrefsLockedOnEntry] // or at least using [profileManager.SetPrefs]. // // While we should definitely clean this up to improve From 2b3e53304871fccb4f91fdef32a59ef8a30c9752 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 12:49:37 -0700 Subject: [PATCH 0284/1093] util/syspolicy: finish plumbing policyclient, add feature/syspolicy, move global impl This is step 4 of making syspolicy a build-time feature. This adds a policyclient.Get() accessor to return the correct implementation to use: either the real one, or the no-op one. (A third type, a static one for testing, also exists, so in general a policyclient.Client should be plumbed around and not always fetched via policyclient.Get whenever possible, especially if tests need to use alternate syspolicy) Updates #16998 Updates #12614 Change-Id: Iaf19670744a596d5918acfa744f5db4564272978 Signed-off-by: Brad Fitzpatrick --- client/web/auth.go | 2 +- client/web/web.go | 12 +- client/web/web_test.go | 2 + cmd/derper/depaware.txt | 20 ++-- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscale/cli/maybe_syspolicy.go | 8 ++ cmd/tailscale/cli/up.go | 3 +- cmd/tailscale/depaware.txt | 13 ++- cmd/tailscaled/depaware.txt | 3 +- cmd/tailscaled/tailscaled.go | 8 +- cmd/tailscaled/tailscaled_windows.go | 6 +- cmd/tsidp/depaware.txt | 3 +- feature/condregister/maybe_syspolicy.go | 8 ++ feature/syspolicy/syspolicy.go | 7 ++ ipn/desktop/extension.go | 4 +- ipn/ipnauth/policy.go | 6 +- ipn/ipnlocal/c2n.go | 3 +- ipn/ipnlocal/local.go | 21 ++-- ipn/prefs.go | 16 +-- ipn/prefs_test.go | 8 +- logpolicy/logpolicy.go | 4 +- logpolicy/maybe_syspolicy.go | 8 ++ net/dns/manager.go | 3 +- net/dns/manager_darwin.go | 3 +- net/dns/manager_default.go | 3 +- net/dns/manager_freebsd.go | 3 +- net/dns/manager_linux.go | 3 +- net/dns/manager_openbsd.go | 3 +- net/dns/manager_plan9.go | 3 +- net/dns/manager_solaris.go | 3 +- net/dns/manager_windows.go | 12 +- net/dns/manager_windows_test.go | 5 +- tsd/syspolicy_off.go | 12 -- tsd/syspolicy_on.go | 64 ----------- tsd/tsd.go | 2 +- tsnet/depaware.txt | 3 +- .../tailscaled_deps_test_darwin.go | 2 +- .../tailscaled_deps_test_freebsd.go | 2 +- .../integration/tailscaled_deps_test_linux.go | 2 +- .../tailscaled_deps_test_openbsd.go | 2 +- .../tailscaled_deps_test_windows.go | 2 +- util/syspolicy/policyclient/policyclient.go | 25 +++- util/syspolicy/syspolicy.go | 108 ++++++++++++------ util/syspolicy/syspolicy_test.go | 16 +-- 44 files changed, 242 insertions(+), 207 deletions(-) create mode 100644 cmd/tailscale/cli/maybe_syspolicy.go create mode 100644 feature/condregister/maybe_syspolicy.go create mode 100644 feature/syspolicy/syspolicy.go create mode 100644 logpolicy/maybe_syspolicy.go delete mode 100644 tsd/syspolicy_off.go delete mode 100644 tsd/syspolicy_on.go diff --git a/client/web/auth.go b/client/web/auth.go index 8b195a417f415..27eb24ee444c5 100644 --- a/client/web/auth.go +++ b/client/web/auth.go @@ -192,7 +192,7 @@ func (s *Server) controlSupportsCheckMode(ctx context.Context) bool { if err != nil { return true } - controlURL, err := url.Parse(prefs.ControlURLOrDefault()) + controlURL, err := url.Parse(prefs.ControlURLOrDefault(s.polc)) if err != nil { return true } diff --git a/client/web/web.go b/client/web/web.go index f3158cd1f6ff5..71a015daba465 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -5,6 +5,7 @@ package web import ( + "cmp" "context" "encoding/json" "errors" @@ -36,6 +37,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -49,6 +51,7 @@ type Server struct { mode ServerMode logf logger.Logf + polc policyclient.Client // must be non-nil lc *local.Client timeNow func() time.Time @@ -139,9 +142,13 @@ type ServerOpts struct { TimeNow func() time.Time // Logf optionally provides a logger function. - // log.Printf is used as default. + // If nil, log.Printf is used as default. Logf logger.Logf + // PolicyClient, if non-nil, will be used to fetch policy settings. + // If nil, the default policy client will be used. + PolicyClient policyclient.Client + // The following two fields are required and used exclusively // in ManageServerMode to facilitate the control server login // check step for authorizing browser sessions. @@ -178,6 +185,7 @@ func NewServer(opts ServerOpts) (s *Server, err error) { } s = &Server{ mode: opts.Mode, + polc: cmp.Or(opts.PolicyClient, policyclient.Get()), logf: opts.Logf, devMode: envknob.Bool("TS_DEBUG_WEB_CLIENT_DEV"), lc: opts.LocalClient, @@ -950,7 +958,7 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { UnraidToken: os.Getenv("UNRAID_CSRF_TOKEN"), RunningSSHServer: prefs.RunSSH, URLPrefix: strings.TrimSuffix(s.pathPrefix, "/"), - ControlAdminURL: prefs.AdminPageURL(), + ControlAdminURL: prefs.AdminPageURL(s.polc), LicensesURL: licenses.LicensesURL(), Features: availableFeatures(), diff --git a/client/web/web_test.go b/client/web/web_test.go index 12dbb5c79b13a..9ba16bccf4884 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/views" "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/policyclient" ) func TestQnapAuthnURL(t *testing.T) { @@ -576,6 +577,7 @@ func TestServeAuth(t *testing.T) { timeNow: func() time.Time { return timeNow }, newAuthURL: mockNewAuthURL, waitAuthURL: mockWaitAuthURL, + polc: policyclient.NoPolicyClient{}, } successCookie := "ts-cookie-success" diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 2c6c4690ca885..52b82b2289b49 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -170,21 +170,15 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ - tailscale.com/util/syspolicy from tailscale.com/ipn - tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ - tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source + tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ - tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop - tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ - tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ - tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/testenv from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/ipn + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy/policyclient+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local + tailscale.com/util/testenv from tailscale.com/net/bakedroots+ tailscale.com/util/usermetric from tailscale.com/health tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ - W 💣 tailscale.com/util/winutil/gp from tailscale.com/util/syspolicy/source W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/version from tailscale.com/derp+ tailscale.com/version/distro from tailscale.com/envknob+ @@ -205,7 +199,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/winutil+ - golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ @@ -393,7 +387,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa os from crypto/internal/sysrand+ os/exec from github.com/coreos/go-iptables/iptables+ os/signal from tailscale.com/cmd/derper - W os/user from tailscale.com/util/winutil+ + W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ccba967070b03..d94b5b6cf52f7 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,6 +798,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -951,7 +952,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/set from tailscale.com/cmd/k8s-operator+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/ipn+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/cmd/tailscale/cli/maybe_syspolicy.go b/cmd/tailscale/cli/maybe_syspolicy.go new file mode 100644 index 0000000000000..937a278334fd9 --- /dev/null +++ b/cmd/tailscale/cli/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package cli + +import _ "tailscale.com/feature/syspolicy" diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 1863957d3f143..ebbe3b19e10d9 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -39,6 +39,7 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -609,7 +610,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if env.upArgs.json { printUpDoneJSON(ipn.NeedsMachineAuth, "") } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL()) + fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) } case ipn.Running: // Done full authentication process diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 047bac6c274c9..4453206366ded 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -106,6 +106,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ @@ -191,15 +192,15 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/singleflight from tailscale.com/net/dnscache+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ L tailscale.com/util/stringsx from tailscale.com/client/systray - tailscale.com/util/syspolicy from tailscale.com/ipn + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ - tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ + tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source tailscale.com/util/syspolicy/pkey from tailscale.com/ipn+ - tailscale.com/util/syspolicy/policyclient from tailscale.com/util/syspolicy/rsop - tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/client/web+ + tailscale.com/util/syspolicy/ptype from tailscale.com/util/syspolicy/policyclient+ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy - tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ + tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ tailscale.com/util/testenv from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/truncate from tailscale.com/cmd/tailscale/cli @@ -228,7 +229,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/pbkdf2 from software.sslmate.com/src/go-pkcs12 golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ - golang.org/x/exp/maps from tailscale.com/util/syspolicy/internal/metrics+ + golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting+ L golang.org/x/image/draw from github.com/fogleman/gg L golang.org/x/image/font from github.com/fogleman/gg+ L golang.org/x/image/font/basicfont from github.com/fogleman/gg diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index ee55f914cfc1e..3d93681439ab1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -276,6 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/relayserver from tailscale.com/feature/condregister + tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/tpm from tailscale.com/feature/condregister @@ -428,7 +429,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ - tailscale.com/util/syspolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index f55535470d7a6..ddf6d9ef68f5d 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -64,8 +64,8 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/multierr" "tailscale.com/util/osshare" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -773,7 +773,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -807,7 +807,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() @@ -1012,6 +1012,6 @@ func defaultEncryptState() bool { // (plan9/FreeBSD/etc). return false } - v, _ := syspolicy.GetBoolean(pkey.EncryptState, false) + v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) return v } diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 2d4e71d3cb430..3a2edcac51886 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -55,8 +55,8 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/osdiag" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" "tailscale.com/version" @@ -156,7 +156,7 @@ func runWindowsService(pol *logpolicy.Policy) error { if syslog, err := eventlog.Open(serviceName); err == nil { syslogf = func(format string, args ...any) { - if logSCMInteractions, _ := syspolicy.GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { + if logSCMInteractions, _ := policyclient.Get().GetBoolean(pkey.LogSCMInteractions, false); logSCMInteractions { syslog.Info(0, fmt.Sprintf(format, args...)) } } @@ -390,7 +390,7 @@ func handleSessionChange(chgRequest svc.ChangeRequest) { if chgRequest.Cmd != svc.SessionChange || chgRequest.EventType != windows.WTS_SESSION_UNLOCK { return } - if flushDNSOnSessionUnlock, _ := syspolicy.GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { + if flushDNSOnSessionUnlock, _ := policyclient.Get().GetBoolean(pkey.FlushDNSOnSessionUnlock, false); flushDNSOnSessionUnlock { log.Printf("Received WTS_SESSION_UNLOCK event, initiating DNS flush.") go func() { err := dns.Flush() diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 155ad03e3b029..efe9456d814c7 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -240,6 +240,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -380,7 +381,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/ipn+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/feature/condregister/maybe_syspolicy.go b/feature/condregister/maybe_syspolicy.go new file mode 100644 index 0000000000000..49ec5c02c63e1 --- /dev/null +++ b/feature/condregister/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package condregister + +import _ "tailscale.com/feature/syspolicy" diff --git a/feature/syspolicy/syspolicy.go b/feature/syspolicy/syspolicy.go new file mode 100644 index 0000000000000..08c3cf3736b29 --- /dev/null +++ b/feature/syspolicy/syspolicy.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package syspolicy provides an interface for system-wide policy management. +package syspolicy + +import _ "tailscale.com/util/syspolicy" // for its registration side effects diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index 15d239f89c713..0277726714512 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -18,8 +18,8 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/types/logger" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) // featureName is the name of the feature implemented by this package. @@ -136,7 +136,7 @@ func (e *desktopSessionsExt) getBackgroundProfile(profiles ipnext.ProfileStore) e.mu.Lock() defer e.mu.Unlock() - if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { // If the Always-On mode is disabled, there's no background profile // as far as the desktop session extension is concerned. return ipn.LoginProfileView{} diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index 36004b293ead2..42366dbd94990 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -10,8 +10,8 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" "tailscale.com/tailcfg" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" ) type actorWithPolicyChecks struct{ Actor } @@ -51,10 +51,10 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { - if alwaysOn, _ := syspolicy.GetBoolean(pkey.AlwaysOn, false); !alwaysOn { + if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } - if allowWithReason, _ := syspolicy.GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { + if allowWithReason, _ := policyclient.Get().GetBoolean(pkey.AlwaysOnOverrideWithReason, false); !allowWithReason { return errors.New("disconnect not allowed: always-on mode is enabled") } if reason == "" { diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 339fad50afeb7..2c13f06198455 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -30,6 +30,7 @@ import ( "tailscale.com/util/goroutines" "tailscale.com/util/set" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" "tailscale.com/version/distro" ) @@ -342,7 +343,7 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http // this will first check syspolicy, MDM settings like Registry // on Windows or defaults on macOS. If they are not set, it falls // back to the cli-flag, `--posture-checking`. - choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking) + choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) if err != nil { b.logf( "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 54dcda30aa095..700e2de37778a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -109,6 +109,7 @@ import ( "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" @@ -1610,7 +1611,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // future "tailscale up" to start checking for // implicit setting reverts, which it doesn't do when // ControlURL is blank. - prefs.ControlURL = prefs.ControlURLOrDefault() + prefs.ControlURL = prefs.ControlURLOrDefault(b.polc) prefsChanged = true } if st.Persist.Valid() { @@ -1870,7 +1871,7 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { } for _, opt := range preferencePolicies { - if po, err := b.polc.GetPreferenceOption(opt.key); err == nil { + if po, err := b.polc.GetPreferenceOption(opt.key, ptype.ShowChoiceByPolicy); err == nil { curVal := opt.get(prefs.View()) newVal := po.ShouldEnable(curVal) if curVal != newVal { @@ -2425,7 +2426,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { loggedOut := prefs.LoggedOut() - serverURL := prefs.ControlURLOrDefault() + serverURL := prefs.ControlURLOrDefault(b.polc) if inServerMode := prefs.ForceDaemon(); inServerMode || runtime.GOOS == "windows" { b.logf("Start: serverMode=%v", inServerMode) } @@ -3498,7 +3499,7 @@ func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { if err != nil { return false } - serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault() + serverURL := b.sanitizedPrefsLocked().ControlURLOrDefault(b.polc) if ipn.IsLoginServerSynonym(serverURL) { // When connected to the official Tailscale control plane, only allow // URLs from tailscale.com or its subdomains. @@ -4049,7 +4050,7 @@ func (b *LocalBackend) SwitchToBestProfile(reason string) { // but b.mu must held on entry. It is released on exit. func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { defer unlock() - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) switch { @@ -4076,7 +4077,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un return } // As an optimization, only reset the dialPlan if the control URL changed. - if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { @@ -4250,7 +4251,7 @@ func (b *LocalBackend) isDefaultServerLocked() bool { if !prefs.Valid() { return true // assume true until set otherwise } - return prefs.ControlURLOrDefault() == ipn.DefaultControlURL + return prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL } var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ @@ -5687,7 +5688,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Some temporary (2024-05-05) debugging code to help us catch // https://github.com/tailscale/tailscale/issues/11962 in the act. if prefs.WantRunning() && - prefs.ControlURLOrDefault() == ipn.DefaultControlURL && + prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { panic("[unexpected] use of main control server in integration test") } @@ -7288,13 +7289,13 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { unlock := b.lockAndGetUnlock() defer unlock() - oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault() + oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { return err // nil if we're already on the target profile } // As an optimization, only reset the dialPlan if the control URL changed. - if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(); oldControlURL != newControlURL { + if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } diff --git a/ipn/prefs.go b/ipn/prefs.go index 4c049688ccbe2..14b8078c0f55b 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -28,8 +28,8 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" ) @@ -718,16 +718,16 @@ func NewPrefs() *Prefs { // // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. -func (p PrefsView) ControlURLOrDefault() string { - return p.ж.ControlURLOrDefault() +func (p PrefsView) ControlURLOrDefault(polc policyclient.Client) string { + return p.ж.ControlURLOrDefault(polc) } // ControlURLOrDefault returns the coordination server's URL base. // // If not configured, or if the configured value is a legacy name equivalent to // the default, then DefaultControlURL is returned instead. -func (p *Prefs) ControlURLOrDefault() string { - controlURL, err := syspolicy.GetString(pkey.ControlURL, p.ControlURL) +func (p *Prefs) ControlURLOrDefault(polc policyclient.Client) string { + controlURL, err := polc.GetString(pkey.ControlURL, p.ControlURL) if err != nil { controlURL = p.ControlURL } @@ -756,11 +756,11 @@ func (p *Prefs) DefaultRouteAll(goos string) bool { } // AdminPageURL returns the admin web site URL for the current ControlURL. -func (p PrefsView) AdminPageURL() string { return p.ж.AdminPageURL() } +func (p PrefsView) AdminPageURL(polc policyclient.Client) string { return p.ж.AdminPageURL(polc) } // AdminPageURL returns the admin web site URL for the current ControlURL. -func (p *Prefs) AdminPageURL() string { - url := p.ControlURLOrDefault() +func (p *Prefs) AdminPageURL(polc policyclient.Client) string { + url := p.ControlURLOrDefault(polc) if IsLoginServerSynonym(url) { // TODO(crawshaw): In future release, make this https://console.tailscale.com url = "https://login.tailscale.com" diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 43e360c6af0c2..7aac20c807716 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -23,6 +23,7 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" + "tailscale.com/util/syspolicy/policyclient" ) func fieldsOf(t reflect.Type) (fields []string) { @@ -1032,15 +1033,16 @@ func TestExitNodeIPOfArg(t *testing.T) { func TestControlURLOrDefault(t *testing.T) { var p Prefs - if got, want := p.ControlURLOrDefault(), DefaultControlURL; got != want { + polc := policyclient.NoPolicyClient{} + if got, want := p.ControlURLOrDefault(polc), DefaultControlURL; got != want { t.Errorf("got %q; want %q", got, want) } p.ControlURL = "http://foo.bar" - if got, want := p.ControlURLOrDefault(), "http://foo.bar"; got != want { + if got, want := p.ControlURLOrDefault(polc), "http://foo.bar"; got != want { t.Errorf("got %q; want %q", got, want) } p.ControlURL = "https://login.tailscale.com" - if got, want := p.ControlURLOrDefault(), DefaultControlURL; got != want { + if got, want := p.ControlURLOrDefault(polc), DefaultControlURL; got != want { t.Errorf("got %q; want %q", got, want) } } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 295dc6fff24f6..587b421f3c4cc 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -51,8 +51,8 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/must" "tailscale.com/util/racebuild" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/testenv" "tailscale.com/version" "tailscale.com/version/distro" @@ -66,7 +66,7 @@ var getLogTargetOnce struct { func getLogTarget() string { getLogTargetOnce.Do(func() { envTarget, _ := os.LookupEnv("TS_LOG_TARGET") - getLogTargetOnce.v, _ = syspolicy.GetString(pkey.LogTarget, envTarget) + getLogTargetOnce.v, _ = policyclient.Get().GetString(pkey.LogTarget, envTarget) }) return getLogTargetOnce.v diff --git a/logpolicy/maybe_syspolicy.go b/logpolicy/maybe_syspolicy.go new file mode 100644 index 0000000000000..8b2836c97411c --- /dev/null +++ b/logpolicy/maybe_syspolicy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package logpolicy + +import _ "tailscale.com/feature/syspolicy" diff --git a/net/dns/manager.go b/net/dns/manager.go index 5d6f225ce032f..4a5c4925cf092 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -30,6 +30,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" "tailscale.com/util/slicesx" + "tailscale.com/util/syspolicy/policyclient" ) var ( @@ -576,7 +577,7 @@ func (m *Manager) FlushCaches() error { // // health must not be nil func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { - oscfg, err := NewOSConfigurator(logf, nil, nil, interfaceName) + oscfg, err := NewOSConfigurator(logf, health, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) return diff --git a/net/dns/manager_darwin.go b/net/dns/manager_darwin.go index ccfafaa457f16..d73ad71a829a5 100644 --- a/net/dns/manager_darwin.go +++ b/net/dns/manager_darwin.go @@ -14,12 +14,13 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/util/mak" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { return &darwinConfigurator{logf: logf, ifName: ifName}, nil } diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index dbe985cacdfc9..1a86690c5d829 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -9,11 +9,12 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logger.Logf, *health.Tracker, *controlknobs.Knobs, string) (OSConfigurator, error) { +func NewOSConfigurator(logger.Logf, *health.Tracker, policyclient.Client, *controlknobs.Knobs, string) (OSConfigurator, error) { return NewNoopManager() } diff --git a/net/dns/manager_freebsd.go b/net/dns/manager_freebsd.go index 1ec9ea841d77a..3237fb382fbd3 100644 --- a/net/dns/manager_freebsd.go +++ b/net/dns/manager_freebsd.go @@ -10,12 +10,13 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { bs, err := os.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { return newDirectManager(logf, health), nil diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 643cc280af1e3..8b66ac3a685e3 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -22,6 +22,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/cmpver" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -38,7 +39,7 @@ var publishOnce sync.Once // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { if distro.Get() == distro.JetKVM { return NewNoopManager() } diff --git a/net/dns/manager_openbsd.go b/net/dns/manager_openbsd.go index 1a1c4390c943f..6168a9e0818cd 100644 --- a/net/dns/manager_openbsd.go +++ b/net/dns/manager_openbsd.go @@ -11,6 +11,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) type kv struct { @@ -24,7 +25,7 @@ func (kv kv) String() string { // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { return newOSConfigurator(logf, health, interfaceName, newOSConfigEnv{ rcIsResolvd: rcIsResolvd, diff --git a/net/dns/manager_plan9.go b/net/dns/manager_plan9.go index ca179f27fcc8a..ef1ceea17787a 100644 --- a/net/dns/manager_plan9.go +++ b/net/dns/manager_plan9.go @@ -21,9 +21,10 @@ import ( "tailscale.com/health" "tailscale.com/types/logger" "tailscale.com/util/set" + "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, _ policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { return &plan9DNSManager{ logf: logf, ht: ht, diff --git a/net/dns/manager_solaris.go b/net/dns/manager_solaris.go index 1f48efb9e61a1..de7e72bb52436 100644 --- a/net/dns/manager_solaris.go +++ b/net/dns/manager_solaris.go @@ -7,8 +7,9 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { return newDirectManager(logf, health), nil } diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 8830861d10ae2..444c5d37debf4 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -29,7 +29,6 @@ import ( "tailscale.com/health" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" @@ -48,6 +47,7 @@ type windowsManager struct { knobs *controlknobs.Knobs // or nil nrptDB *nrptRuleDatabase wslManager *wslManager + polc policyclient.Client unregisterPolicyChangeCb func() // called when the manager is closing @@ -58,11 +58,15 @@ type windowsManager struct { // NewOSConfigurator created a new OS configurator. // // The health tracker and the knobs may be nil. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, polc policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { + if polc == nil { + panic("nil policyclient.Client") + } ret := &windowsManager{ logf: logf, guid: interfaceName, knobs: knobs, + polc: polc, wslManager: newWSLManager(logf, health), } @@ -71,7 +75,7 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, knobs *controlk } var err error - if ret.unregisterPolicyChangeCb, err = syspolicy.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { + if ret.unregisterPolicyChangeCb, err = polc.RegisterChangeCallback(ret.sysPolicyChanged); err != nil { logf("error registering policy change callback: %v", err) // non-fatal } @@ -521,7 +525,7 @@ func (m *windowsManager) reconfigureDNSRegistration() { // Disable DNS registration by default (if the policy setting is not configured). // This is primarily for historical reasons and to avoid breaking existing // setups that rely on this behavior. - enableDNSRegistration, err := syspolicy.GetPreferenceOptionOrDefault(pkey.EnableDNSRegistration, ptype.NeverByPolicy) + enableDNSRegistration, err := m.polc.GetPreferenceOption(pkey.EnableDNSRegistration, ptype.NeverByPolicy) if err != nil { m.logf("error getting DNSRegistration policy setting: %v", err) // non-fatal; we'll use the default } diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index edcf24ec04240..7c0139f455d70 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -17,6 +17,7 @@ import ( "golang.org/x/sys/windows/registry" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/winutil" "tailscale.com/util/winutil/gp" ) @@ -133,7 +134,7 @@ func TestManagerWindowsGPCopy(t *testing.T) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } @@ -262,7 +263,7 @@ func runTest(t *testing.T, isLocal bool) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } diff --git a/tsd/syspolicy_off.go b/tsd/syspolicy_off.go deleted file mode 100644 index 221b8f223eadc..0000000000000 --- a/tsd/syspolicy_off.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_syspolicy - -package tsd - -import ( - "tailscale.com/util/syspolicy/policyclient" -) - -func getPolicyClient() policyclient.Client { return policyclient.NoPolicyClient{} } diff --git a/tsd/syspolicy_on.go b/tsd/syspolicy_on.go deleted file mode 100644 index e9811b88b0c61..0000000000000 --- a/tsd/syspolicy_on.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_syspolicy - -package tsd - -import ( - "time" - - "tailscale.com/util/syspolicy" - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/syspolicy/ptype" -) - -func getPolicyClient() policyclient.Client { return globalSyspolicy{} } - -// globalSyspolicy implements [policyclient.Client] using the syspolicy global -// functions and global registrations. -// -// TODO: de-global-ify. This implementation using the old global functions -// is an intermediate stage while changing policyclient to be modular. -type globalSyspolicy struct{} - -func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { - return syspolicy.GetBoolean(key, defaultValue) -} - -func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { - return syspolicy.GetString(key, defaultValue) -} - -func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { - return syspolicy.GetStringArray(key, defaultValue) -} - -func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { - syspolicy.SetDebugLoggingEnabled(enabled) -} - -func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { - return syspolicy.GetUint64(key, defaultValue) -} - -func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { - return syspolicy.GetDuration(name, defaultValue) -} - -func (globalSyspolicy) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { - return syspolicy.GetPreferenceOption(name) -} - -func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { - return syspolicy.GetVisibility(name) -} - -func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { - return syspolicy.HasAnyOf(keys...) -} - -func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { - return syspolicy.RegisterChangeCallback(cb) -} diff --git a/tsd/tsd.go b/tsd/tsd.go index 17795d3c52e86..bd333bd31b027 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -175,7 +175,7 @@ func (s *System) PolicyClientOrDefault() policyclient.Client { if client, ok := s.PolicyClient.GetOK(); ok { return client } - return getPolicyClient() + return policyclient.Get() } // SubSystem represents some subsystem of the Tailscale node daemon. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 1c2be4781e29c..187237e2f8b7d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -236,6 +236,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -375,7 +376,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ - tailscale.com/util/syspolicy from tailscale.com/ipn+ + tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/internal/metrics from tailscale.com/util/syspolicy/source diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index c8a0bb2740b64..a87a3ec658ccb 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index c8a0bb2740b64..a87a3ec658ccb 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index c8a0bb2740b64..a87a3ec658ccb 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index c8a0bb2740b64..a87a3ec658ccb 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -51,8 +51,8 @@ import ( _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/version" _ "tailscale.com/version/distro" _ "tailscale.com/wgengine" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index c9a1cd0cf188e..54e1bcc04dbbc 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -62,8 +62,8 @@ import ( _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" - _ "tailscale.com/util/syspolicy" _ "tailscale.com/util/syspolicy/pkey" + _ "tailscale.com/util/syspolicy/policyclient" _ "tailscale.com/util/winutil" _ "tailscale.com/util/winutil/gp" _ "tailscale.com/version" diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index aadcbc60e91db..5a78424481955 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -44,8 +44,8 @@ type Client interface { // overrides of users' choices in a way that we do not want tailcontrol to have // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not - // present or set to a different value, "user-decides" is the default. - GetPreferenceOption(key pkey.Key) (ptype.PreferenceOption, error) + // present or set to a different value, defaultValue (and a nil error) is returned. + GetPreferenceOption(key pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) // GetVisibility returns whether a UI element should be visible based on // the system's configuration. @@ -66,6 +66,21 @@ type Client interface { RegisterChangeCallback(cb func(PolicyChange)) (unregister func(), err error) } +// Get returns a non-nil [Client] implementation as a function of the +// build tags. It returns a no-op implementation if the full syspolicy +// package is omitted from the build. +func Get() Client { + return client +} + +// RegisterClientImpl registers a [Client] implementation to be returned by +// [Get]. +func RegisterClientImpl(c Client) { + client = c +} + +var client Client = NoPolicyClient{} + // PolicyChange is the interface representing a change in policy settings. type PolicyChange interface { // HasChanged reports whether the policy setting identified by the given key @@ -81,6 +96,8 @@ type PolicyChange interface { // returns default values. type NoPolicyClient struct{} +var _ Client = NoPolicyClient{} + func (NoPolicyClient) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { return defaultValue, nil } @@ -101,8 +118,8 @@ func (NoPolicyClient) GetDuration(name pkey.Key, defaultValue time.Duration) (ti return defaultValue, nil } -func (NoPolicyClient) GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { - return ptype.ShowChoiceByPolicy, nil +func (NoPolicyClient) GetPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { + return defaultValue, nil } func (NoPolicyClient) GetVisibility(name pkey.Key) (ptype.Visibility, error) { diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 189f4110707e1..2367e21eb2ad3 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -1,13 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package syspolicy facilitates retrieval of the current policy settings -// applied to the device or user and receiving notifications when the policy -// changes. -// -// It provides functions that return specific policy settings by their unique -// [setting.Key]s, such as [GetBoolean], [GetUint64], [GetString], -// [GetStringArray], [GetPreferenceOption], [GetVisibility] and [GetDuration]. +// Package syspolicy contains the implementation of system policy management. +// Calling code should use the client interface in +// tailscale.com/util/syspolicy/policyclient. package syspolicy import ( @@ -18,6 +14,7 @@ import ( "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" @@ -58,9 +55,9 @@ func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicySc return reg } -// HasAnyOf returns whether at least one of the specified policy settings is configured, +// hasAnyOf returns whether at least one of the specified policy settings is configured, // or an error if no keys are provided or the check fails. -func HasAnyOf(keys ...pkey.Key) (bool, error) { +func hasAnyOf(keys ...pkey.Key) (bool, error) { if len(keys) == 0 { return false, errors.New("at least one key must be specified") } @@ -82,62 +79,55 @@ func HasAnyOf(keys ...pkey.Key) (bool, error) { return false, nil } -// GetString returns a string policy setting with the specified key, +// getString returns a string policy setting with the specified key, // or defaultValue if it does not exist. -func GetString(key pkey.Key, defaultValue string) (string, error) { +func getString(key pkey.Key, defaultValue string) (string, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetUint64 returns a numeric policy setting with the specified key, +// getUint64 returns a numeric policy setting with the specified key, // or defaultValue if it does not exist. -func GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { +func getUint64(key pkey.Key, defaultValue uint64) (uint64, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetBoolean returns a boolean policy setting with the specified key, +// getBoolean returns a boolean policy setting with the specified key, // or defaultValue if it does not exist. -func GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { +func getBoolean(key pkey.Key, defaultValue bool) (bool, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetStringArray returns a multi-string policy setting with the specified key, +// getStringArray returns a multi-string policy setting with the specified key, // or defaultValue if it does not exist. -func GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { +func getStringArray(key pkey.Key, defaultValue []string) ([]string, error) { return getCurrentPolicySettingValue(key, defaultValue) } -// GetPreferenceOption loads a policy from the registry that can be +// getPreferenceOption loads a policy from the registry that can be // managed by an enterprise policy management system and allows administrative // overrides of users' choices in a way that we do not want tailcontrol to have // the authority to set. It describes user-decides/always/never options, where // "always" and "never" remove the user's ability to make a selection. If not -// present or set to a different value, "user-decides" is the default. -func GetPreferenceOption(name pkey.Key) (ptype.PreferenceOption, error) { - return getCurrentPolicySettingValue(name, ptype.ShowChoiceByPolicy) -} - -// GetPreferenceOptionOrDefault is like [GetPreferenceOption], but allows -// specifying a default value to return if the policy setting is not configured. -// It can be used in situations where "user-decides" is not the default. -func GetPreferenceOptionOrDefault(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { +// present or set to a different value, defaultValue (and a nil error) is returned. +func getPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { return getCurrentPolicySettingValue(name, defaultValue) } -// GetVisibility loads a policy from the registry that can be managed +// getVisibility loads a policy from the registry that can be managed // by an enterprise policy management system and describes show/hide decisions // for UI elements. The registry value should be a string set to "show" (return // true) or "hide" (return true). If not present or set to a different value, // "show" (return false) is the default. -func GetVisibility(name pkey.Key) (ptype.Visibility, error) { +func getVisibility(name pkey.Key) (ptype.Visibility, error) { return getCurrentPolicySettingValue(name, ptype.VisibleByPolicy) } -// GetDuration loads a policy from the registry that can be managed +// getDuration loads a policy from the registry that can be managed // by an enterprise policy management system and describes a duration for some // action. The registry value should be a string that time.ParseDuration // understands. If the registry value is "" or can not be processed, // defaultValue is returned instead. -func GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { +func getDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { d, err := getCurrentPolicySettingValue(name, defaultValue) if err != nil { return d, err @@ -148,9 +138,9 @@ func GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, erro return d, nil } -// RegisterChangeCallback adds a function that will be called whenever the effective policy +// registerChangeCallback adds a function that will be called whenever the effective policy // for the default scope changes. The returned function can be used to unregister the callback. -func RegisterChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { +func registerChangeCallback(cb rsop.PolicyChangeCallback) (unregister func(), err error) { effective, err := rsop.PolicyFor(setting.DefaultScope()) if err != nil { return nil, err @@ -233,7 +223,53 @@ func SelectControlURL(reg, disk string) string { return def } -// SetDebugLoggingEnabled controls whether spammy debug logging is enabled. -func SetDebugLoggingEnabled(v bool) { - loggerx.SetDebugLoggingEnabled(v) +func init() { + policyclient.RegisterClientImpl(globalSyspolicy{}) +} + +// globalSyspolicy implements [policyclient.Client] using the syspolicy global +// functions and global registrations. +// +// TODO: de-global-ify. This implementation using the old global functions +// is an intermediate stage while changing policyclient to be modular. +type globalSyspolicy struct{} + +func (globalSyspolicy) GetBoolean(key pkey.Key, defaultValue bool) (bool, error) { + return getBoolean(key, defaultValue) +} + +func (globalSyspolicy) GetString(key pkey.Key, defaultValue string) (string, error) { + return getString(key, defaultValue) +} + +func (globalSyspolicy) GetStringArray(key pkey.Key, defaultValue []string) ([]string, error) { + return getStringArray(key, defaultValue) +} + +func (globalSyspolicy) SetDebugLoggingEnabled(enabled bool) { + loggerx.SetDebugLoggingEnabled(enabled) +} + +func (globalSyspolicy) GetUint64(key pkey.Key, defaultValue uint64) (uint64, error) { + return getUint64(key, defaultValue) +} + +func (globalSyspolicy) GetDuration(name pkey.Key, defaultValue time.Duration) (time.Duration, error) { + return getDuration(name, defaultValue) +} + +func (globalSyspolicy) GetPreferenceOption(name pkey.Key, defaultValue ptype.PreferenceOption) (ptype.PreferenceOption, error) { + return getPreferenceOption(name, defaultValue) +} + +func (globalSyspolicy) GetVisibility(name pkey.Key) (ptype.Visibility, error) { + return getVisibility(name) +} + +func (globalSyspolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { + return hasAnyOf(keys...) +} + +func (globalSyspolicy) RegisterChangeCallback(cb func(policyclient.PolicyChange)) (unregister func(), err error) { + return registerChangeCallback(cb) } diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 3130f5d077ea0..0ee62efb11af5 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -82,7 +82,7 @@ func TestGetString(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetString(tt.key, tt.defaultValue) + value, err := getString(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -157,7 +157,7 @@ func TestGetUint64(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetUint64(tt.key, tt.defaultValue) + value, err := getUint64(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -224,7 +224,7 @@ func TestGetBoolean(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetBoolean(tt.key, tt.defaultValue) + value, err := getBoolean(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -317,7 +317,7 @@ func TestGetPreferenceOption(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - option, err := GetPreferenceOption(tt.key) + option, err := getPreferenceOption(tt.key, ptype.ShowChoiceByPolicy) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -402,7 +402,7 @@ func TestGetVisibility(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - visibility, err := GetVisibility(tt.key) + visibility, err := getVisibility(tt.key) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -498,7 +498,7 @@ func TestGetDuration(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - duration, err := GetDuration(tt.key, tt.defaultValue) + duration, err := getDuration(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -579,7 +579,7 @@ func TestGetStringArray(t *testing.T) { } registerSingleSettingStoreForTest(t, s) - value, err := GetStringArray(tt.key, tt.defaultValue) + value, err := getStringArray(tt.key, tt.defaultValue) if !errorsMatchForTest(err, tt.wantError) { t.Errorf("err=%q, want %q", err, tt.wantError) } @@ -613,7 +613,7 @@ func BenchmarkGetString(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - gotControlURL, _ := GetString(pkey.ControlURL, "https://controlplane.tailscale.com") + gotControlURL, _ := getString(pkey.ControlURL, "https://controlplane.tailscale.com") if gotControlURL != wantControlURL { b.Fatalf("got %v; want %v", gotControlURL, wantControlURL) } From 24b8a57b1e9c61154d45d87402fadcb56ff27843 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 16:50:10 -0700 Subject: [PATCH 0285/1093] util/syspolicy/policytest: move policy test helper to its own package Updates #16998 Updates #12614 Change-Id: I9fd27d653ebee547951705dc5597481e85b60747 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 62 +------------ util/syspolicy/policytest/policytest.go | 117 ++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 59 deletions(-) create mode 100644 util/syspolicy/policytest/policytest.go diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index a3a26af042012..bd81a09c3a7a5 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -64,7 +64,7 @@ import ( "tailscale.com/util/set" "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/policytest" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" @@ -1183,7 +1183,7 @@ func TestConfigureExitNode(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - var pol testPolicy + var pol policytest.Config // Configure policy settings, if any. if tt.exitNodeIDPolicy != nil { pol.Set(pkey.ExitNodeID, string(*tt.exitNodeIDPolicy)) @@ -5539,62 +5539,6 @@ func TestReadWriteRouteInfo(t *testing.T) { } } -// testPolicy is a [policyclient.Client] with a static mapping of values. -// The map value must be of the correct type (string, []string, bool, etc). -// -// It is used for testing purposes to simulate policy client behavior. -// It panics if the values are the wrong type. -type testPolicy struct { - v map[pkey.Key]any - policyclient.NoPolicyClient -} - -func (sp *testPolicy) Set(key pkey.Key, value any) { - if sp.v == nil { - sp.v = make(map[pkey.Key]any) - } - sp.v[key] = value -} - -func (sp testPolicy) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { - if val, ok := sp.v[key]; ok { - if arr, ok := val.([]string); ok { - return arr, nil - } - panic(fmt.Sprintf("key %s is not a []string", key)) - } - return defaultVal, nil -} - -func (sp testPolicy) GetString(key pkey.Key, defaultVal string) (string, error) { - if val, ok := sp.v[key]; ok { - if str, ok := val.(string); ok { - return str, nil - } - panic(fmt.Sprintf("key %s is not a string", key)) - } - return defaultVal, nil -} - -func (sp testPolicy) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { - if val, ok := sp.v[key]; ok { - if b, ok := val.(bool); ok { - return b, nil - } - panic(fmt.Sprintf("key %s is not a bool", key)) - } - return defaultVal, nil -} - -func (sp testPolicy) HasAnyOf(keys ...pkey.Key) (bool, error) { - for _, key := range keys { - if _, ok := sp.v[key]; ok { - return true, nil - } - } - return false, nil -} - func TestFillAllowedSuggestions(t *testing.T) { tests := []struct { name string @@ -5628,7 +5572,7 @@ func TestFillAllowedSuggestions(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var pol testPolicy + var pol policytest.Config pol.Set(pkey.AllowedSuggestedExitNodes, tt.allowPolicy) got := fillAllowedSuggestions(pol) diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go new file mode 100644 index 0000000000000..e05d8938e2ad3 --- /dev/null +++ b/util/syspolicy/policytest/policytest.go @@ -0,0 +1,117 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package policytest contains test helpers for the syspolicy packages. +package policytest + +import ( + "fmt" + "time" + + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policyclient" + "tailscale.com/util/syspolicy/ptype" +) + +// Config is a [policyclient.Client] implementation with a static mapping of +// values. +// +// It is used for testing purposes to simulate policy client behavior. +// +// It panics if a value is Set with one type and then accessed with a different +// expected type. +type Config map[pkey.Key]any + +var _ policyclient.Client = Config{} + +func (c *Config) Set(key pkey.Key, value any) { + if *c == nil { + *c = make(map[pkey.Key]any) + } + (*c)[key] = value +} + +func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { + if val, ok := c[key]; ok { + if arr, ok := val.([]string); ok { + return arr, nil + } + panic(fmt.Sprintf("key %s is not a []string", key)) + } + return defaultVal, nil +} + +func (c Config) GetString(key pkey.Key, defaultVal string) (string, error) { + if val, ok := c[key]; ok { + if str, ok := val.(string); ok { + return str, nil + } + panic(fmt.Sprintf("key %s is not a string", key)) + } + return defaultVal, nil +} + +func (c Config) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { + if val, ok := c[key]; ok { + if b, ok := val.(bool); ok { + return b, nil + } + panic(fmt.Sprintf("key %s is not a bool", key)) + } + return defaultVal, nil +} + +func (c Config) GetUint64(key pkey.Key, defaultVal uint64) (uint64, error) { + if val, ok := c[key]; ok { + if u, ok := val.(uint64); ok { + return u, nil + } + panic(fmt.Sprintf("key %s is not a uint64", key)) + } + return defaultVal, nil +} + +func (c Config) GetDuration(key pkey.Key, defaultVal time.Duration) (time.Duration, error) { + if val, ok := c[key]; ok { + if d, ok := val.(time.Duration); ok { + return d, nil + } + panic(fmt.Sprintf("key %s is not a time.Duration", key)) + } + return defaultVal, nil +} + +func (c Config) GetPreferenceOption(key pkey.Key, defaultVal ptype.PreferenceOption) (ptype.PreferenceOption, error) { + if val, ok := c[key]; ok { + if p, ok := val.(ptype.PreferenceOption); ok { + return p, nil + } + panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) + } + return defaultVal, nil +} + +func (c Config) GetVisibility(key pkey.Key) (ptype.Visibility, error) { + if val, ok := c[key]; ok { + if p, ok := val.(ptype.Visibility); ok { + return p, nil + } + panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) + } + return ptype.Visibility(ptype.ShowChoiceByPolicy), nil +} + +func (c Config) HasAnyOf(keys ...pkey.Key) (bool, error) { + for _, key := range keys { + if _, ok := c[key]; ok { + return true, nil + } + } + return false, nil +} + +func (sp Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { + return func() {}, nil +} + +func (sp Config) SetDebugLoggingEnabled(enabled bool) {} From 21f21bd2a2be7999a328b29ef1ff05e4c973ec35 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 16:50:10 -0700 Subject: [PATCH 0286/1093] util/syspolicy: finish adding ts_omit_syspolicy build tags, tests Fixes #16998 Updates #12614 Change-Id: Idf2b1657898111df4be31f356091b2376d0d7f0b Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- client/local/local.go | 28 -------------- client/local/syspolicy.go | 40 +++++++++++++++++++ cmd/tailscale/cli/cli.go | 3 +- cmd/tailscale/cli/syspolicy.go | 71 ++++++++++++++++++---------------- cmd/tailscaled/deps_test.go | 14 +++++++ ipn/localapi/localapi.go | 50 ------------------------ ipn/localapi/syspolicy_api.go | 68 ++++++++++++++++++++++++++++++++ 8 files changed, 163 insertions(+), 113 deletions(-) create mode 100644 client/local/syspolicy.go create mode 100644 ipn/localapi/syspolicy_api.go diff --git a/build_dist.sh b/build_dist.sh index 0fc123ade3824..12f366e061730 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/client/local/local.go b/client/local/local.go index 55d14f95eee5a..0257c7a260b7a 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -43,7 +43,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/tkatype" "tailscale.com/util/eventbus" - "tailscale.com/util/syspolicy/setting" ) // defaultClient is the default Client when using the legacy @@ -926,33 +925,6 @@ func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Pref return decodeJSON[*ipn.Prefs](body) } -// GetEffectivePolicy returns the effective policy for the specified scope. -func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { - scopeID, err := scope.MarshalText() - if err != nil { - return nil, err - } - body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) - if err != nil { - return nil, err - } - return decodeJSON[*setting.Snapshot](body) -} - -// ReloadEffectivePolicy reloads the effective policy for the specified scope -// by reading and merging policy settings from all applicable policy sources. -func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { - scopeID, err := scope.MarshalText() - if err != nil { - return nil, err - } - body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) - if err != nil { - return nil, err - } - return decodeJSON[*setting.Snapshot](body) -} - // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { diff --git a/client/local/syspolicy.go b/client/local/syspolicy.go new file mode 100644 index 0000000000000..6eff177833786 --- /dev/null +++ b/client/local/syspolicy.go @@ -0,0 +1,40 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package local + +import ( + "context" + "net/http" + + "tailscale.com/util/syspolicy/setting" +) + +// GetEffectivePolicy returns the effective policy for the specified scope. +func (lc *Client) GetEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.get200(ctx, "/localapi/v0/policy/"+string(scopeID)) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} + +// ReloadEffectivePolicy reloads the effective policy for the specified scope +// by reading and merging policy settings from all applicable policy sources. +func (lc *Client) ReloadEffectivePolicy(ctx context.Context, scope setting.PolicyScope) (*setting.Snapshot, error) { + scopeID, err := scope.MarshalText() + if err != nil { + return nil, err + } + body, err := lc.send(ctx, "POST", "/localapi/v0/policy/"+string(scopeID), 200, http.NoBody) + if err != nil { + return nil, err + } + return decodeJSON[*setting.Snapshot](body) +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 208ee93fd9388..5db0308887efa 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -209,6 +209,7 @@ func noDupFlagify(c *ffcli.Command) { } var fileCmd func() *ffcli.Command +var sysPolicyCmd func() *ffcli.Command func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -239,7 +240,7 @@ change in the future. logoutCmd, switchCmd, configureCmd(), - syspolicyCmd, + nilOrCall(sysPolicyCmd), netcheckCmd, ipCmd, dnsCmd, diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go index a71952a9f7f62..97f3f2122b40c 100644 --- a/cmd/tailscale/cli/syspolicy.go +++ b/cmd/tailscale/cli/syspolicy.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_syspolicy + package cli import ( @@ -20,38 +22,42 @@ var syspolicyArgs struct { json bool // JSON output mode } -var syspolicyCmd = &ffcli.Command{ - Name: "syspolicy", - ShortHelp: "Diagnose the MDM and system policy configuration", - LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", - ShortUsage: "tailscale syspolicy ", - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "list", - ShortUsage: "tailscale syspolicy list", - Exec: runSysPolicyList, - ShortHelp: "Print effective policy settings", - LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("syspolicy list") - fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") - return fs - })(), - }, - { - Name: "reload", - ShortUsage: "tailscale syspolicy reload", - Exec: runSysPolicyReload, - ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", - LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("syspolicy reload") - fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") - return fs - })(), - }, - }, +func init() { + sysPolicyCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "syspolicy", + ShortHelp: "Diagnose the MDM and system policy configuration", + LongHelp: "The 'tailscale syspolicy' command provides tools for diagnosing the MDM and system policy configuration.", + ShortUsage: "tailscale syspolicy ", + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "list", + ShortUsage: "tailscale syspolicy list", + Exec: runSysPolicyList, + ShortHelp: "Print effective policy settings", + LongHelp: "The 'tailscale syspolicy list' subcommand displays the effective policy settings and their sources (e.g., MDM or environment variables).", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy list") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + { + Name: "reload", + ShortUsage: "tailscale syspolicy reload", + Exec: runSysPolicyReload, + ShortHelp: "Force a reload of policy settings, even if no changes are detected, and prints the result", + LongHelp: "The 'tailscale syspolicy reload' subcommand forces a reload of policy settings, even if no changes are detected, and prints the result.", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("syspolicy reload") + fs.BoolVar(&syspolicyArgs.json, "json", false, "output in JSON format") + return fs + })(), + }, + }, + } + } } func runSysPolicyList(ctx context.Context, args []string) error { @@ -61,7 +67,6 @@ func runSysPolicyList(ctx context.Context, args []string) error { } printPolicySettings(policy) return nil - } func runSysPolicyReload(ctx context.Context, args []string) error { diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 7f06abc6c5ba1..6d2ea383780a6 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -27,3 +27,17 @@ func TestOmitSSH(t *testing.T) { }, }.Check(t) } + +func TestOmitSyspolicy(t *testing.T) { + const msg = "unexpected syspolicy usage with ts_omit_syspolicy" + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_syspolicy,ts_include_cli", + BadDeps: map[string]string{ + "tailscale.com/util/syspolicy": msg, + "tailscale.com/util/syspolicy/setting": msg, + "tailscale.com/util/syspolicy/rsop": msg, + }, + }.Check(t) +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a199a29082aab..2dc75c0d936b3 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -58,8 +58,6 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/osdiag" "tailscale.com/util/rands" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -79,7 +77,6 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: "cert/": (*Handler).serveCert, - "policy/": (*Handler).servePolicy, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME @@ -1603,53 +1600,6 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { e.Encode(prefs) } -func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "policy access denied", http.StatusForbidden) - return - } - - suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") - if !ok { - http.Error(w, "misconfigured", http.StatusInternalServerError) - return - } - - var scope setting.PolicyScope - if suffix == "" { - scope = setting.DefaultScope() - } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { - http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) - return - } - - policy, err := rsop.PolicyFor(scope) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - var effectivePolicy *setting.Snapshot - switch r.Method { - case httpm.GET: - effectivePolicy = policy.Get() - case httpm.POST: - effectivePolicy, err = policy.Reload() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) - return - } - - w.Header().Set("Content-Type", "application/json") - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(effectivePolicy) -} - type resJSON struct { Error string `json:",omitempty"` } diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go new file mode 100644 index 0000000000000..a438d352b52e1 --- /dev/null +++ b/ipn/localapi/syspolicy_api.go @@ -0,0 +1,68 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_syspolicy + +package localapi + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + "tailscale.com/util/httpm" + "tailscale.com/util/syspolicy/rsop" + "tailscale.com/util/syspolicy/setting" +) + +func init() { + handler["policy/"] = (*Handler).servePolicy +} + +func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "policy access denied", http.StatusForbidden) + return + } + + suffix, ok := strings.CutPrefix(r.URL.EscapedPath(), "/localapi/v0/policy/") + if !ok { + http.Error(w, "misconfigured", http.StatusInternalServerError) + return + } + + var scope setting.PolicyScope + if suffix == "" { + scope = setting.DefaultScope() + } else if err := scope.UnmarshalText([]byte(suffix)); err != nil { + http.Error(w, fmt.Sprintf("%q is not a valid scope", suffix), http.StatusBadRequest) + return + } + + policy, err := rsop.PolicyFor(scope) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var effectivePolicy *setting.Snapshot + switch r.Method { + case httpm.GET: + effectivePolicy = policy.Get() + case httpm.POST: + effectivePolicy, err = policy.Reload() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Content-Type", "application/json") + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(effectivePolicy) +} From d06d9007a6854b381fede40e25047c213c5e9bc3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 18:47:48 -0700 Subject: [PATCH 0287/1093] ipn/ipnlocal: convert more tests to use policytest, de-global-ify Now that we have policytest and the policyclient.Client interface, we can de-global-ify many of the tests, letting them run concurrently with each other, and just removing global variable complexity. This does ~half of the LocalBackend ones. Updates #16998 Change-Id: Iece754e1ef4e49744ccd967fa83629d0dca6f66a Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 96 ++++++++++++------------- ipn/ipnlocal/serve_test.go | 13 +++- util/syspolicy/policytest/policytest.go | 87 ++++++++++++++++------ 3 files changed, 125 insertions(+), 71 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bd81a09c3a7a5..4debcdd8dcf71 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2881,20 +2881,16 @@ func TestSetExitNodeIDPolicy(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, test := range tests { t.Run(test.name, func(t *testing.T) { - b := newTestBackend(t) - - policyStore := source.NewTestStore(t) + var polc policytest.Config if test.exitNodeIDKey { - policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeID, test.exitNodeID)) + polc.Set(pkey.ExitNodeID, test.exitNodeID) } if test.exitNodeIPKey { - policyStore.SetStrings(source.TestSettingOf(pkey.ExitNodeIP, test.exitNodeIP)) + polc.Set(pkey.ExitNodeIP, test.exitNodeIP) } - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + b := newTestBackend(t, polc) if test.nm == nil { test.nm = new(netmap.NetworkMap) @@ -3026,15 +3022,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - b := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(policytest.Config{ + pkey.ExitNodeID: "auto:any", + }) + b := newTestLocalBackendWithSys(t, sys) b.currentNode().SetNetMap(tt.netmap) b.lastSuggestedExitNode = tt.lastSuggestedExitNode b.sys.MagicSock.Get().SetLastNetcheckReportForTest(b.ctx, tt.report) @@ -3094,7 +3088,13 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { } func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { - b := newTestLocalBackend(t) + polc := policytest.Config{ + pkey.ExitNodeID: "auto:any", + } + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + b := newTestLocalBackendWithSys(t, sys) hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni @@ -3106,16 +3106,12 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), - Logf: b.logf, + Dialer: tsdial.NewDialer(netmon.NewStatic()), + Logf: b.logf, + PolicyClient: polc, } cc = newClient(t, opts) b.cc = cc - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) peer1 := makePeer(1, withCap(26), withDERP(3), withSuggest(), withExitRoutes()) peer2 := makePeer(2, withCap(26), withDERP(2), withSuggest(), withExitRoutes()) selfNode := tailcfg.Node{ @@ -3219,12 +3215,14 @@ func TestSetControlClientStatusAutoExitNode(t *testing.T) { }, DERPMap: derpMap, } - b := newTestLocalBackend(t) - syspolicy.RegisterWellKnownSettingsForTest(t) - policyStore := source.NewTestStoreOf(t, source.TestSettingOf( - pkey.ExitNodeID, "auto:any", - )) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) + + polc := policytest.Config{ + pkey.ExitNodeID: "auto:any", + } + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + b := newTestLocalBackendWithSys(t, sys) b.currentNode().SetNetMap(nm) // Peer 2 should be the initial exit node, as it's better than peer 1 // in terms of latency and DERP region. @@ -3461,21 +3459,20 @@ func TestApplySysPolicy(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - settings := make([]source.TestSetting[string], 0, len(tt.stringPolicies)) - for p, v := range tt.stringPolicies { - settings = append(settings, source.TestSettingOf(p, v)) + var polc policytest.Config + for k, v := range tt.stringPolicies { + polc.Set(k, v) } - policyStore := source.NewTestStoreOf(t, settings...) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) t.Run("unit", func(t *testing.T) { prefs := tt.prefs.Clone() - lb := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + lb := newTestLocalBackendWithSys(t, sys) gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange && prefs.Equals(&tt.prefs) { @@ -3508,7 +3505,7 @@ func TestApplySysPolicy(t *testing.T) { pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) pm.prefs = usePrefs.View() - b := newTestBackend(t) + b := newTestBackend(t, polc) b.mu.Lock() b.pm = pm b.mu.Unlock() @@ -3607,24 +3604,26 @@ func TestPreferencePolicyInfo(t *testing.T) { }, } - syspolicy.RegisterWellKnownSettingsForTest(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { for _, pp := range preferencePolicies { t.Run(string(pp.key), func(t *testing.T) { - s := source.TestSetting[string]{ - Key: pp.key, - Error: tt.policyError, - Value: tt.policyValue, + t.Parallel() + + var polc policytest.Config + if tt.policyError != nil { + polc.Set(pp.key, tt.policyError) + } else { + polc.Set(pp.key, tt.policyValue) } - policyStore := source.NewTestStoreOf(t, s) - syspolicy.MustRegisterStoreForTest(t, "TestStore", setting.DeviceScope, policyStore) prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - lb := newTestLocalBackend(t) + sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) + + lb := newTestLocalBackendWithSys(t, sys) gotAnyChange := lb.applySysPolicyLocked(prefs) if gotAnyChange != tt.wantChange { @@ -6534,7 +6533,8 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { store := source.NewTestStoreOf[string](t) syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store) - lb := newLocalBackendWithTestControl(t, enableLogging, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + sys := tsd.NewSystem() + lb := newLocalBackendWithSysAndTestControl(t, enableLogging, sys, func(tb testing.TB, opts controlclient.Options) controlclient.Client { return newClient(tb, opts) }) if tt.initialPrefs != nil { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 57d1a4745a4a3..e2561cba9ef22 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/syspolicy/policyclient" "tailscale.com/wgengine" ) @@ -870,7 +871,7 @@ func mustCreateURL(t *testing.T, u string) url.URL { return *uParsed } -func newTestBackend(t *testing.T) *LocalBackend { +func newTestBackend(t *testing.T, opts ...any) *LocalBackend { var logf logger.Logf = logger.Discard const debug = true if debug { @@ -878,6 +879,16 @@ func newTestBackend(t *testing.T) *LocalBackend { } sys := tsd.NewSystem() + + for _, o := range opts { + switch v := o.(type) { + case policyclient.Client: + sys.PolicyClient.Set(v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, HealthTracker: sys.HealthTracker(), diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go index e05d8938e2ad3..7ea0ad91ff8c8 100644 --- a/util/syspolicy/policytest/policytest.go +++ b/util/syspolicy/policytest/policytest.go @@ -19,7 +19,12 @@ import ( // It is used for testing purposes to simulate policy client behavior. // // It panics if a value is Set with one type and then accessed with a different -// expected type. +// expected type and/or value. Some accessors such as GetPreferenceOption and +// GetVisibility support either a ptype.PreferenceOption/ptype.Visibility in the +// map, or the string representation as supported by their UnmarshalText +// methods. +// +// The map value may be an error to return that error value from the accessor. type Config map[pkey.Key]any var _ policyclient.Client = Config{} @@ -33,70 +38,108 @@ func (c *Config) Set(key pkey.Key, value any) { func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { if val, ok := c[key]; ok { - if arr, ok := val.([]string); ok { - return arr, nil + switch val := val.(type) { + case []string: + return val, nil + case error: + return nil, val + default: + panic(fmt.Sprintf("key %s is not a []string; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a []string", key)) } return defaultVal, nil } func (c Config) GetString(key pkey.Key, defaultVal string) (string, error) { if val, ok := c[key]; ok { - if str, ok := val.(string); ok { - return str, nil + switch val := val.(type) { + case string: + return val, nil + case error: + return "", val + default: + panic(fmt.Sprintf("key %s is not a string; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a string", key)) } return defaultVal, nil } func (c Config) GetBoolean(key pkey.Key, defaultVal bool) (bool, error) { if val, ok := c[key]; ok { - if b, ok := val.(bool); ok { - return b, nil + switch val := val.(type) { + case bool: + return val, nil + case error: + return false, val + default: + panic(fmt.Sprintf("key %s is not a bool; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a bool", key)) } return defaultVal, nil } func (c Config) GetUint64(key pkey.Key, defaultVal uint64) (uint64, error) { if val, ok := c[key]; ok { - if u, ok := val.(uint64); ok { - return u, nil + switch val := val.(type) { + case uint64: + return val, nil + case error: + return 0, val + default: + panic(fmt.Sprintf("key %s is not a uint64; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a uint64", key)) } return defaultVal, nil } func (c Config) GetDuration(key pkey.Key, defaultVal time.Duration) (time.Duration, error) { if val, ok := c[key]; ok { - if d, ok := val.(time.Duration); ok { - return d, nil + switch val := val.(type) { + case time.Duration: + return val, nil + case error: + return 0, val + default: + panic(fmt.Sprintf("key %s is not a time.Duration; got %T", key, val)) } - panic(fmt.Sprintf("key %s is not a time.Duration", key)) } return defaultVal, nil } func (c Config) GetPreferenceOption(key pkey.Key, defaultVal ptype.PreferenceOption) (ptype.PreferenceOption, error) { if val, ok := c[key]; ok { - if p, ok := val.(ptype.PreferenceOption); ok { - return p, nil + switch val := val.(type) { + case ptype.PreferenceOption: + return val, nil + case error: + var zero ptype.PreferenceOption + return zero, val + case string: + var p ptype.PreferenceOption + err := p.UnmarshalText(([]byte)(val)) + return p, err + default: + panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) } - panic(fmt.Sprintf("key %s is not a ptype.PreferenceOption", key)) } return defaultVal, nil } func (c Config) GetVisibility(key pkey.Key) (ptype.Visibility, error) { if val, ok := c[key]; ok { - if p, ok := val.(ptype.Visibility); ok { - return p, nil + switch val := val.(type) { + case ptype.Visibility: + return val, nil + case error: + var zero ptype.Visibility + return zero, val + case string: + var p ptype.Visibility + err := p.UnmarshalText(([]byte)(val)) + return p, err + default: + panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) } - panic(fmt.Sprintf("key %s is not a ptype.Visibility", key)) } return ptype.Visibility(ptype.ShowChoiceByPolicy), nil } From c9f214e503af5357c2cea77629441d8647e6402f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 3 Sep 2025 13:47:32 +0100 Subject: [PATCH 0288/1093] ipn: warn about self as the exit node if backend is running (#17018) Before: $ tailscale ip -4 1.2.3.4 $ tailscale set --exit-node=1.2.3.4 no node found in netmap with IP 1.2.3.4 After: $ tailscale set --exit-node=1.2.3.4 cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine; did you mean --advertise-exit-node? The new error message already existed in the code, but would only be triggered if the backend wasn't running -- which means, in practice, it would almost never be triggered. The old error message is technically true, but could be confusing if you don't know the distinction between "netmap" and "tailnet" -- it could sound like the exit node isn't part of your tailnet. A node is never in its own netmap, but it is part of your tailnet. This error confused me when I was doing some local dev work, and it's confused customers before (e.g. #7513). Using the more specific error message should reduce confusion. Updates #7513 Updates https://github.com/tailscale/corp/issues/23596 Signed-off-by: Alex Chan --- ipn/prefs.go | 6 +++--- ipn/prefs_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/ipn/prefs.go b/ipn/prefs.go index 14b8078c0f55b..88c73ead3e365 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -847,6 +847,9 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) { } ip, err = netip.ParseAddr(s) if err == nil { + if !isRemoteIP(st, ip) { + return ip, ExitNodeLocalIPError{s} + } // If we're online already and have a netmap, double check that the IP // address specified is valid. if st.BackendState == "Running" { @@ -858,9 +861,6 @@ func exitNodeIPOfArg(s string, st *ipnstate.Status) (ip netip.Addr, err error) { return ip, fmt.Errorf("node %v is not advertising an exit node", ip) } } - if !isRemoteIP(st, ip) { - return ip, ExitNodeLocalIPError{s} - } return ip, nil } match := 0 diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 7aac20c807716..3339a631ce827 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -897,6 +897,23 @@ func TestExitNodeIPOfArg(t *testing.T) { }, wantErr: `no node found in netmap with IP 1.2.3.4`, }, + { + name: "ip_is_self", + arg: "1.2.3.4", + st: &ipnstate.Status{ + TailscaleIPs: []netip.Addr{mustIP("1.2.3.4")}, + }, + wantErr: "cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine", + }, + { + name: "ip_is_self_when_backend_running", + arg: "1.2.3.4", + st: &ipnstate.Status{ + BackendState: "Running", + TailscaleIPs: []netip.Addr{mustIP("1.2.3.4")}, + }, + wantErr: "cannot use 1.2.3.4 as an exit node as it is a local IP address to this machine", + }, { name: "ip_not_exit", arg: "1.2.3.4", From 0f3598b46741cbd0c005dc7d95c6e24fc8cf1924 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 3 Sep 2025 09:17:24 -0700 Subject: [PATCH 0289/1093] util/syspolicy: delete some unused code in handler.go There's a TODO to delete all of handler.go, but part of it's still used in another repo. But this deletes some. Updates #17022 Change-Id: Ic5a8a5a694ca258440307436731cd92b45ee2d21 Signed-off-by: Brad Fitzpatrick --- util/syspolicy/handler.go | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go index cdf32a7f78503..690ff2162f9b9 100644 --- a/util/syspolicy/handler.go +++ b/util/syspolicy/handler.go @@ -8,7 +8,6 @@ import ( "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" - "tailscale.com/util/testenv" ) // TODO(nickkhyl): delete this file once other repos are updated. @@ -36,19 +35,10 @@ type Handler interface { // // Deprecated: using [RegisterStore] should be preferred. func RegisterHandler(h Handler) { - rsop.RegisterStore("DeviceHandler", setting.DeviceScope, WrapHandler(h)) + rsop.RegisterStore("DeviceHandler", setting.DeviceScope, handlerStore{h}) } -// SetHandlerForTest wraps and sets the specified handler as the device's policy -// [source.Store] for the duration of tb. -// -// Deprecated: using [MustRegisterStoreForTest] should be preferred. -func SetHandlerForTest(tb testenv.TB, h Handler) { - RegisterWellKnownSettingsForTest(tb) - MustRegisterStoreForTest(tb, "DeviceHandler-TestOnly", setting.DefaultScope(), WrapHandler(h)) -} - -var _ source.Store = (*handlerStore)(nil) +var _ source.Store = handlerStore{} // handlerStore is a [source.Store] that calls the underlying [Handler]. // @@ -57,11 +47,6 @@ type handlerStore struct { h Handler } -// WrapHandler returns a [source.Store] that wraps the specified [Handler]. -func WrapHandler(h Handler) source.Store { - return handlerStore{h} -} - // Lock implements [source.Lockable]. func (s handlerStore) Lock() error { if lockable, ok := s.h.(source.Lockable); ok { From 2b9d055101a0a2731af9ef5d2caf513bfb7da75e Mon Sep 17 00:00:00 2001 From: Craig Hesling Date: Tue, 2 Sep 2025 02:27:34 -0700 Subject: [PATCH 0290/1093] drive: fix StatCache mishandling of paths with spaces Fix "file not found" errors when WebDAV clients access files/dirs inside directories with spaces. The issue occurred because StatCache was mixing URL-escaped and unescaped paths, causing cache key mismatches. Specifically, StatCache.set() parsed WebDAV responses containing URL-escaped paths (ex. "Dir%20Space/file1.txt") and stored them alongside unescaped cache keys (ex. "Dir Space/file1.txt"). This mismatch prevented StatCache.get() from correctly determining whether a child file existed. See https://github.com/tailscale/tailscale/issues/13632#issuecomment-3243522449 for the full explanation of the issue. The decision to keep all paths references unescaped inside the StatCache is consistent with net/http.Request.URL.Path and rewrite.go (sole consumer) Update unit test to detect this directory space mishandling. Fixes tailscale#13632 Signed-off-by: Craig Hesling --- drive/driveimpl/compositedav/stat_cache.go | 8 +++++++- drive/driveimpl/compositedav/stat_cache_test.go | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drive/driveimpl/compositedav/stat_cache.go b/drive/driveimpl/compositedav/stat_cache.go index fc57ff0648300..36463fe7e137f 100644 --- a/drive/driveimpl/compositedav/stat_cache.go +++ b/drive/driveimpl/compositedav/stat_cache.go @@ -8,6 +8,7 @@ import ( "encoding/xml" "log" "net/http" + "net/url" "sync" "time" @@ -165,7 +166,12 @@ func (c *StatCache) set(name string, depth int, ce *cacheEntry) { children = make(map[string]*cacheEntry, len(ms.Responses)-1) for i := 0; i < len(ms.Responses); i++ { response := ms.Responses[i] - name := shared.Normalize(response.Href) + name, err := url.PathUnescape(response.Href) + if err != nil { + log.Printf("statcache.set child parse error: %s", err) + return + } + name = shared.Normalize(name) raw := marshalMultiStatus(response) entry := newCacheEntry(ce.Status, raw) if i == 0 { diff --git a/drive/driveimpl/compositedav/stat_cache_test.go b/drive/driveimpl/compositedav/stat_cache_test.go index fa63457a256d3..baa4fdda2c7f7 100644 --- a/drive/driveimpl/compositedav/stat_cache_test.go +++ b/drive/driveimpl/compositedav/stat_cache_test.go @@ -16,12 +16,12 @@ import ( "tailscale.com/tstest" ) -var parentPath = "/parent" +var parentPath = "/parent with spaces" -var childPath = "/parent/child.txt" +var childPath = "/parent with spaces/child.txt" var parentResponse = ` -/parent/ +/parent%20with%20spaces/ Mon, 29 Apr 2024 19:52:23 GMT @@ -36,7 +36,7 @@ var parentResponse = ` var childResponse = ` -/parent/child.txt +/parent%20with%20spaces/child.txt Mon, 29 Apr 2024 19:52:23 GMT From a2f2ac6ba15283dcf0a6e8a62d64cf4122ea9360 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 3 Sep 2025 15:35:05 -0400 Subject: [PATCH 0291/1093] ipn/local: fix deadlock in initial suggested exit node query (#17025) updates tailscale/corp#26369 b.mu is locked here. We need to use suggestExitNodeLocked. Signed-off-by: Jonathan Nobels --- ipn/ipnlocal/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 700e2de37778a..7592e9b4ba105 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3141,7 +3141,7 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A ini.Health = b.HealthTracker().CurrentState() } if mask&ipn.NotifyInitialSuggestedExitNode != 0 { - if en, err := b.SuggestExitNode(); err != nil { + if en, err := b.suggestExitNodeLocked(); err == nil { ini.SuggestedExitNode = &en.ID } } From 04f00339b6079f5afb3512dfe8cf929f42097cd8 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 3 Sep 2025 22:08:45 +0100 Subject: [PATCH 0292/1093] cmd/k8s-operator: update connector example (#17020) This commit modifies the connector example to use the new hostname prefix and replicas fields Signed-off-by: David Bond --- cmd/k8s-operator/deploy/examples/connector.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/deploy/examples/connector.yaml b/cmd/k8s-operator/deploy/examples/connector.yaml index d29f27cf51c98..f5447400e8722 100644 --- a/cmd/k8s-operator/deploy/examples/connector.yaml +++ b/cmd/k8s-operator/deploy/examples/connector.yaml @@ -11,7 +11,8 @@ metadata: spec: tags: - "tag:prod" - hostname: ts-prod + hostnamePrefix: ts-prod + replicas: 2 subnetRouter: advertiseRoutes: - "10.40.0.0/14" From d8ac539bf9617bc18cd2c5f231c77b1edb48849e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 3 Sep 2025 15:05:38 -0700 Subject: [PATCH 0293/1093] util/syspolicy: remove handler, other dead code Fixes #17022 Change-Id: I6a0f6488ae3ea75c5844dfcba68e1e8024e930be Signed-off-by: Brad Fitzpatrick --- util/syspolicy/handler.go | 99 ------------------------------ util/syspolicy/policy_keys.go | 19 ------ util/syspolicy/policy_keys_test.go | 7 --- 3 files changed, 125 deletions(-) delete mode 100644 util/syspolicy/handler.go diff --git a/util/syspolicy/handler.go b/util/syspolicy/handler.go deleted file mode 100644 index 690ff2162f9b9..0000000000000 --- a/util/syspolicy/handler.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package syspolicy - -import ( - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/rsop" - "tailscale.com/util/syspolicy/setting" - "tailscale.com/util/syspolicy/source" -) - -// TODO(nickkhyl): delete this file once other repos are updated. - -// Handler reads system policies from OS-specific storage. -// -// Deprecated: implementing a [source.Store] should be preferred. -type Handler interface { - // ReadString reads the policy setting's string value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadString(key string) (string, error) - // ReadUInt64 reads the policy setting's uint64 value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadUInt64(key string) (uint64, error) - // ReadBool reads the policy setting's boolean value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadBoolean(key string) (bool, error) - // ReadStringArray reads the policy setting's string array value for the given key. - // It should return ErrNoSuchKey if the key does not have a value set. - ReadStringArray(key string) ([]string, error) -} - -// RegisterHandler wraps and registers the specified handler as the device's -// policy [source.Store] for the program's lifetime. -// -// Deprecated: using [RegisterStore] should be preferred. -func RegisterHandler(h Handler) { - rsop.RegisterStore("DeviceHandler", setting.DeviceScope, handlerStore{h}) -} - -var _ source.Store = handlerStore{} - -// handlerStore is a [source.Store] that calls the underlying [Handler]. -// -// TODO(nickkhyl): remove it when the corp and android repos are updated. -type handlerStore struct { - h Handler -} - -// Lock implements [source.Lockable]. -func (s handlerStore) Lock() error { - if lockable, ok := s.h.(source.Lockable); ok { - return lockable.Lock() - } - return nil -} - -// Unlock implements [source.Lockable]. -func (s handlerStore) Unlock() { - if lockable, ok := s.h.(source.Lockable); ok { - lockable.Unlock() - } -} - -// RegisterChangeCallback implements [source.Changeable]. -func (s handlerStore) RegisterChangeCallback(callback func()) (unregister func(), err error) { - if changeable, ok := s.h.(source.Changeable); ok { - return changeable.RegisterChangeCallback(callback) - } - return func() {}, nil -} - -// ReadString implements [source.Store]. -func (s handlerStore) ReadString(key pkey.Key) (string, error) { - return s.h.ReadString(string(key)) -} - -// ReadUInt64 implements [source.Store]. -func (s handlerStore) ReadUInt64(key pkey.Key) (uint64, error) { - return s.h.ReadUInt64(string(key)) -} - -// ReadBoolean implements [source.Store]. -func (s handlerStore) ReadBoolean(key pkey.Key) (bool, error) { - return s.h.ReadBoolean(string(key)) -} - -// ReadStringArray implements [source.Store]. -func (s handlerStore) ReadStringArray(key pkey.Key) ([]string, error) { - return s.h.ReadStringArray(string(key)) -} - -// Done implements [source.Expirable]. -func (s handlerStore) Done() <-chan struct{} { - if expirable, ok := s.h.(source.Expirable); ok { - return expirable.Done() - } - return nil -} diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index e32d9cdf4ddf5..1bbcfe6ca4645 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -4,7 +4,6 @@ package syspolicy import ( - "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/setting" @@ -78,24 +77,6 @@ func init() { }) } -var implicitDefinitionMap lazy.SyncValue[setting.DefinitionMap] - -// WellKnownSettingDefinition returns a well-known, implicit setting definition by its key, -// or an [ErrNoSuchKey] if a policy setting with the specified key does not exist -// among implicit policy definitions. -func WellKnownSettingDefinition(k pkey.Key) (*setting.Definition, error) { - m, err := implicitDefinitionMap.GetErr(func() (setting.DefinitionMap, error) { - return setting.DefinitionMapOf(implicitDefinitions) - }) - if err != nil { - return nil, err - } - if d, ok := m[k]; ok { - return d, nil - } - return nil, ErrNoSuchKey -} - // RegisterWellKnownSettingsForTest registers all implicit setting definitions // for the duration of the test. func RegisterWellKnownSettingsForTest(tb testenv.TB) { diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go index 490353c8144ae..c2b8d5741831d 100644 --- a/util/syspolicy/policy_keys_test.go +++ b/util/syspolicy/policy_keys_test.go @@ -46,13 +46,6 @@ func TestKnownKeysRegistered(t *testing.T) { } } -func TestNotAWellKnownSetting(t *testing.T) { - d, err := WellKnownSettingDefinition("TestSettingDoesNotExist") - if d != nil || err == nil { - t.Fatalf("got %v, %v; want nil, %v", d, err, ErrNoSuchKey) - } -} - func listStringConsts[T ~string](filename string) (map[string]T, error) { fset := token.NewFileSet() src, err := os.ReadFile(filename) From 624cdd2961ac88ac2c187072dc2cb322d05a653b Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 4 Sep 2025 12:40:55 +0100 Subject: [PATCH 0294/1093] cmd/containerboot: do not reset state on non-existant secret (#17021) This commit modifies containerboot's state reset process to handle the state secret not existing. During other parts of the boot process we gracefully handle the state secret not being created yet, but missed that check within `resetContainerbootState` Fixes https://github.com/tailscale/tailscale/issues/16804 Signed-off-by: David Bond --- cmd/containerboot/kube.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index d4a974e6f3a24..4873ae13f753a 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -124,10 +124,13 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { // ensure the operator doesn't use stale state when a Pod is first recreated. func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error { existingSecret, err := kc.GetSecret(ctx, kc.stateSecret) - if err != nil { + switch { + case kubeclient.IsNotFoundErr(err): + // In the case that the Secret doesn't exist, we don't have any state to reset and can return early. + return nil + case err != nil: return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err) } - s := &kubeapi.Secret{ Data: map[string][]byte{ kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion), From b034f7cca95476c89394b3419b8fb7b9d7e3534c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 3 Sep 2025 16:06:39 -0700 Subject: [PATCH 0295/1093] ipn/ipnlocal, util/syspolicy: convert last RegisterWellKnownSettingsForTest caller, remove Updates #16998 Change-Id: I735d75129a97a929092e9075107e41cdade18944 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local_test.go | 14 ++-- util/syspolicy/policy_keys.go | 10 --- util/syspolicy/policytest/policytest.go | 93 ++++++++++++++++++++++++- util/syspolicy/syspolicy.go | 11 --- util/syspolicy/syspolicy_test.go | 37 +++++++--- 5 files changed, 129 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 4debcdd8dcf71..7d1c452f30697 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -65,7 +65,6 @@ import ( "tailscale.com/util/syspolicy" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policytest" - "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" @@ -6529,12 +6528,13 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - syspolicy.RegisterWellKnownSettingsForTest(t) - store := source.NewTestStoreOf[string](t) - syspolicy.MustRegisterStoreForTest(t, "TestSource", setting.DeviceScope, store) + var polc policytest.Config + polc.EnableRegisterChangeCallback() sys := tsd.NewSystem() + sys.PolicyClient.Set(polc) lb := newLocalBackendWithSysAndTestControl(t, enableLogging, sys, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + opts.PolicyClient = polc return newClient(tb, opts) }) if tt.initialPrefs != nil { @@ -6551,7 +6551,11 @@ func TestUpdatePrefsOnSysPolicyChange(t *testing.T) { nw.watch(0, nil, unexpectedPrefsChange) } - store.SetStrings(tt.stringSettings...) + var batch policytest.Config + for _, ss := range tt.stringSettings { + batch.Set(ss.Key, ss.Value) + } + polc.SetMultiple(batch) nw.check() }) diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 1bbcfe6ca4645..ef2ac430dbccc 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -76,13 +76,3 @@ func init() { return nil }) } - -// RegisterWellKnownSettingsForTest registers all implicit setting definitions -// for the duration of the test. -func RegisterWellKnownSettingsForTest(tb testenv.TB) { - tb.Helper() - err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) - if err != nil { - tb.Fatalf("Failed to register well-known settings: %v", err) - } -} diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go index 7ea0ad91ff8c8..e5c1c7856d0a3 100644 --- a/util/syspolicy/policytest/policytest.go +++ b/util/syspolicy/policytest/policytest.go @@ -6,8 +6,12 @@ package policytest import ( "fmt" + "maps" + "slices" + "sync" "time" + "tailscale.com/util/set" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" @@ -29,11 +33,85 @@ type Config map[pkey.Key]any var _ policyclient.Client = Config{} +// Set sets key to value. The value should be of the correct type that it will +// be read as later. For PreferenceOption and Visibility, you may also set them +// to 'string' values and they'll be UnmarshalText'ed into their correct value +// at Get time. +// +// As a special case, the value can also be of type error to make the accessors +// return that error value. func (c *Config) Set(key pkey.Key, value any) { if *c == nil { *c = make(map[pkey.Key]any) } (*c)[key] = value + + if w, ok := (*c)[watchersKey].(*watchers); ok && key != watchersKey { + w.mu.Lock() + vals := slices.Collect(maps.Values(w.s)) + w.mu.Unlock() + for _, f := range vals { + f(policyChange(key)) + } + } +} + +// SetMultiple is a batch version of [Config.Set]. It copies the contents of o +// into c and does at most one notification wake-up for the whole batch. +func (c *Config) SetMultiple(o Config) { + if *c == nil { + *c = make(map[pkey.Key]any) + } + + maps.Copy(*c, o) + + if w, ok := (*c)[watchersKey].(*watchers); ok { + w.mu.Lock() + vals := slices.Collect(maps.Values(w.s)) + w.mu.Unlock() + for _, f := range vals { + f(policyChanges(o)) + } + } +} + +type policyChange pkey.Key + +func (pc policyChange) HasChanged(v pkey.Key) bool { return pkey.Key(pc) == v } +func (pc policyChange) HasChangedAnyOf(keys ...pkey.Key) bool { + return slices.Contains(keys, pkey.Key(pc)) +} + +type policyChanges map[pkey.Key]any + +func (pc policyChanges) HasChanged(v pkey.Key) bool { + _, ok := pc[v] + return ok +} +func (pc policyChanges) HasChangedAnyOf(keys ...pkey.Key) bool { + for _, k := range keys { + if pc.HasChanged(k) { + return true + } + } + return false +} + +const watchersKey = "_policytest_watchers" + +type watchers struct { + mu sync.Mutex + s set.HandleSet[func(policyclient.PolicyChange)] +} + +// EnableRegisterChangeCallback makes c support the RegisterChangeCallback +// for testing. Without calling this, the RegisterChangeCallback does nothing. +// For watchers to be notified, use the [Config.Set] method. Changing the map +// directly obviously wouldn't work. +func (c *Config) EnableRegisterChangeCallback() { + if _, ok := (*c)[watchersKey]; !ok { + c.Set(watchersKey, new(watchers)) + } } func (c Config) GetStringArray(key pkey.Key, defaultVal []string) ([]string, error) { @@ -153,8 +231,19 @@ func (c Config) HasAnyOf(keys ...pkey.Key) (bool, error) { return false, nil } -func (sp Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { - return func() {}, nil +func (c Config) RegisterChangeCallback(callback func(policyclient.PolicyChange)) (func(), error) { + w, ok := c[watchersKey].(*watchers) + if !ok { + return func() {}, nil + } + w.mu.Lock() + defer w.mu.Unlock() + h := w.s.Add(callback) + return func() { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.s, h) + }, nil } func (sp Config) SetDebugLoggingEnabled(enabled bool) {} diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 2367e21eb2ad3..48e430b674e35 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -19,7 +19,6 @@ import ( "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" - "tailscale.com/util/testenv" ) var ( @@ -45,16 +44,6 @@ func RegisterStore(name string, scope setting.PolicyScope, store source.Store) ( return rsop.RegisterStore(name, scope, store) } -// MustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. -func MustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { - tb.Helper() - reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) - if err != nil { - tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) - } - return reg -} - // hasAnyOf returns whether at least one of the specified policy settings is configured, // or an error if no keys are provided or the check fails. func hasAnyOf(keys ...pkey.Key) (bool, error) { diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 0ee62efb11af5..10f8da48657d3 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/util/syspolicy/internal/metrics" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" + "tailscale.com/util/syspolicy/rsop" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/syspolicy/source" "tailscale.com/util/testenv" @@ -21,6 +22,16 @@ import ( var someOtherError = errors.New("error other than not found") +// registerWellKnownSettingsForTest registers all implicit setting definitions +// for the duration of the test. +func registerWellKnownSettingsForTest(tb testenv.TB) { + tb.Helper() + err := setting.SetDefinitionsForTest(tb, implicitDefinitions...) + if err != nil { + tb.Fatalf("Failed to register well-known settings: %v", err) + } +} + func TestGetString(t *testing.T) { tests := []struct { name string @@ -68,7 +79,7 @@ func TestGetString(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -210,7 +221,7 @@ func TestGetBoolean(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -303,7 +314,7 @@ func TestGetPreferenceOption(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -388,7 +399,7 @@ func TestGetVisibility(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -484,7 +495,7 @@ func TestGetDuration(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -565,7 +576,7 @@ func TestGetStringArray(t *testing.T) { }, } - RegisterWellKnownSettingsForTest(t) + registerWellKnownSettingsForTest(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -599,14 +610,24 @@ func TestGetStringArray(t *testing.T) { } } +// mustRegisterStoreForTest is like [rsop.RegisterStoreForTest], but it fails the test if the store could not be registered. +func mustRegisterStoreForTest(tb testenv.TB, name string, scope setting.PolicyScope, store source.Store) *rsop.StoreRegistration { + tb.Helper() + reg, err := rsop.RegisterStoreForTest(tb, name, scope, store) + if err != nil { + tb.Fatalf("Failed to register policy store %q as a %v policy source: %v", name, scope, err) + } + return reg +} + func registerSingleSettingStoreForTest[T source.TestValueType](tb testenv.TB, s source.TestSetting[T]) { policyStore := source.NewTestStoreOf(tb, s) - MustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) + mustRegisterStoreForTest(tb, "TestStore", setting.DeviceScope, policyStore) } func BenchmarkGetString(b *testing.B) { loggerx.SetForTest(b, logger.Discard, logger.Discard) - RegisterWellKnownSettingsForTest(b) + registerWellKnownSettingsForTest(b) wantControlURL := "https://login.tailscale.com" registerSingleSettingStoreForTest(b, source.TestSettingOf(pkey.ControlURL, wantControlURL)) From 46369f06af2729b2e553433aef16c821670c2455 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Sep 2025 21:41:06 -0700 Subject: [PATCH 0296/1093] util/syspolicy/policyclient: always use no-op policyclient in tests by default We should never use the real syspolicy implementation in tests by default. (the machine's configuration shouldn't affect tests) You either specify a test policy, or you get a no-op one. Updates #16998 Change-Id: I3350d392aad11573a5ad7caab919bb3bbaecb225 Signed-off-by: Brad Fitzpatrick --- util/syspolicy/policyclient/policyclient.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index 5a78424481955..728a16718e8e4 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -11,6 +11,7 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" + "tailscale.com/util/testenv" ) // Client is the interface between code making questions about the system policy @@ -68,8 +69,15 @@ type Client interface { // Get returns a non-nil [Client] implementation as a function of the // build tags. It returns a no-op implementation if the full syspolicy -// package is omitted from the build. +// package is omitted from the build, or in tests. func Get() Client { + if testenv.InTest() { + // This is a little redundant (the Windows implementation at least + // already does this) but it's here for redundancy and clarity, that we + // don't want to accidentally use the real system policy when running + // tests. + return NoPolicyClient{} + } return client } From 046b8830c76b29f04fc95f3880e6abe41eeb16e7 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 5 Sep 2025 14:52:44 +0100 Subject: [PATCH 0297/1093] ipn/ipnlocal: add state change test for key expiry Updates tailscale/corp#31478 Signed-off-by: James Sanderson --- ipn/ipnlocal/state_test.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index c29589acc698c..4097a37735b5c 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1349,6 +1349,21 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Hosts: hostsFor(node3), }, }, + { + name: "Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node3) + cc().send(nil, "", false, &netmap.NetworkMap{ + Expiry: time.Now().Add(-time.Minute), + }) + }, + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, } for _, tt := range tests { From 23297da10d180a4b30b1a6db9e131e463b447813 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Fri, 5 Sep 2025 15:56:23 -0400 Subject: [PATCH 0298/1093] cmd/tailscale/cli: add new line for set --webclient (#17043) Fixes #17042 Signed-off-by: Mike O'Driscoll --- cmd/tailscale/cli/set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index f1b21995ec388..d265090e2e571 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -264,7 +264,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.runWebClient && len(st.TailscaleIPs) > 0 { - printf("\nWeb interface now running at %s:%d", st.TailscaleIPs[0], web.ListenPort) + printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], web.ListenPort) } return nil From a29545e9cc6a71439741836ea9ba0e8cbfbc7134 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 5 Sep 2025 17:58:36 -0700 Subject: [PATCH 0299/1093] wgengine/magicsock: log the peer failing disco writes are intended for Updates tailscale/corp#31762 Signed-off-by: James Tucker --- wgengine/magicsock/magicsock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a11e8a1cd4f80..695039ea640a5 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2026,7 +2026,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. // Can't send. (e.g. no IPv6 locally) } else { if !c.networkDown() && pmtuShouldLogDiscoTxErr(m, err) { - c.logf("magicsock: disco: failed to send %v to %v: %v", disco.MessageSummary(m), dst, err) + c.logf("magicsock: disco: failed to send %v to %v %s: %v", disco.MessageSummary(m), dst, dstKey.ShortString(), err) } } return sent, err From ed6aa50bd549bdc5e79dcf0326c358f40e9aced2 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Sat, 6 Sep 2025 09:28:07 +0100 Subject: [PATCH 0300/1093] prober: include current probe results in run-probe text response It was a bit confusing that provided history did not include the current probe results. Updates tailscale/corp#20583 Signed-off-by: Anton Tolchanov --- prober/prober.go | 6 +++--- prober/prober_test.go | 12 +++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/prober/prober.go b/prober/prober.go index 9c494c3c98d62..af0e199343b2d 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -570,9 +570,9 @@ func (p *Prober) RunHandler(w http.ResponseWriter, r *http.Request) error { return nil } - stats := fmt.Sprintf("Last %d probes: success rate %d%%, median latency %v\n", - len(prevInfo.RecentResults), - int(prevInfo.RecentSuccessRatio()*100), prevInfo.RecentMedianLatency()) + stats := fmt.Sprintf("Last %d probes (including this one): success rate %d%%, median latency %v\n", + len(info.RecentResults), + int(info.RecentSuccessRatio()*100), info.RecentMedianLatency()) if err != nil { return tsweb.Error(respStatus, fmt.Sprintf("Probe failed: %s\n%s", err.Error(), stats), err) } diff --git a/prober/prober_test.go b/prober/prober_test.go index 15db21a5efe5b..1e045fa8971b0 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -12,6 +12,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "regexp" "strings" "sync" "sync/atomic" @@ -546,7 +547,7 @@ func TestProberRunHandler(t *testing.T) { probeFunc func(context.Context) error wantResponseCode int wantJSONResponse RunHandlerResponse - wantPlaintextResponse string + wantPlaintextResponse *regexp.Regexp }{ { name: "success", @@ -561,7 +562,7 @@ func TestProberRunHandler(t *testing.T) { }, PreviousSuccessRatio: 1, }, - wantPlaintextResponse: "Probe succeeded", + wantPlaintextResponse: regexp.MustCompile("(?s)Probe succeeded .*Last 2 probes.*success rate 100%"), }, { name: "failure", @@ -576,7 +577,7 @@ func TestProberRunHandler(t *testing.T) { RecentResults: []bool{false, false}, }, }, - wantPlaintextResponse: "Probe failed", + wantPlaintextResponse: regexp.MustCompile("(?s)Probe failed: .*Last 2 probes.*success rate 0%"), }, } @@ -607,6 +608,7 @@ func TestProberRunHandler(t *testing.T) { if err != nil { t.Fatalf("failed to make request: %v", err) } + defer resp.Body.Close() if resp.StatusCode != tt.wantResponseCode { t.Errorf("unexpected response code: got %d, want %d", resp.StatusCode, tt.wantResponseCode) @@ -630,8 +632,8 @@ func TestProberRunHandler(t *testing.T) { } } else { body, _ := io.ReadAll(resp.Body) - if !strings.Contains(string(body), tt.wantPlaintextResponse) { - t.Errorf("unexpected response body: got %q, want to contain %q", body, tt.wantPlaintextResponse) + if !tt.wantPlaintextResponse.MatchString(string(body)) { + t.Errorf("unexpected response body: got %q, want to match %q", body, tt.wantPlaintextResponse) } } }) From ff8900583ceb1876a8453c6d2c03dbd2985a2857 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Sep 2025 15:25:50 +0100 Subject: [PATCH 0301/1093] cmd/tailscale/cli: fix the spelling of "routes" (#17039) Updates #cleanup Signed-off-by: Alex Chan --- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/up.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index d265090e2e571..a1c6987e8fc4b 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -185,7 +185,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } } - warnOnAdvertiseRouts(ctx, &maskedPrefs.Prefs) + warnOnAdvertiseRoutes(ctx, &maskedPrefs.Prefs) if err := checkExitNodeRisk(ctx, &maskedPrefs.Prefs, setArgs.acceptedRisks); err != nil { return err } diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index ebbe3b19e10d9..097af725b9d78 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -486,7 +486,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE fatalf("%s", err) } - warnOnAdvertiseRouts(ctx, prefs) + warnOnAdvertiseRoutes(ctx, prefs) if err := checkExitNodeRisk(ctx, prefs, upArgs.acceptedRisks); err != nil { return err } @@ -1184,7 +1184,7 @@ func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { return authkey, nil } -func warnOnAdvertiseRouts(ctx context.Context, prefs *ipn.Prefs) { +func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding // into a single HTTP req. From 14adf5b71783d039c1a8a978eea5ce75f081144f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Sep 2025 15:27:24 +0100 Subject: [PATCH 0302/1093] utils/expvarx, tstest/integration: mark two tests as known flaky (#17052) * utils/expvarx: mark TestSafeFuncHappyPath as known flaky Updates #15348 Signed-off-by: Alex Chan * tstest/integration: mark TestCollectPanic as known flaky Updates #15865 Signed-off-by: Alex Chan --------- Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 1 + util/expvarx/expvarx_test.go | 3 +++ 2 files changed, 4 insertions(+) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 7cb251f31c344..de464108c44dd 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -170,6 +170,7 @@ func TestControlKnobs(t *testing.T) { } func TestCollectPanic(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15865") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) diff --git a/util/expvarx/expvarx_test.go b/util/expvarx/expvarx_test.go index 74ec152f476b9..50131dfb3845a 100644 --- a/util/expvarx/expvarx_test.go +++ b/util/expvarx/expvarx_test.go @@ -10,6 +10,8 @@ import ( "sync/atomic" "testing" "time" + + "tailscale.com/cmd/testwrapper/flakytest" ) func ExampleNewSafeFunc() { @@ -52,6 +54,7 @@ func ExampleNewSafeFunc() { } func TestSafeFuncHappyPath(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15348") var count int f := NewSafeFunc(expvar.Func(func() any { count++ From 1cb855fb3682c9c1f0052bfe298d058fe76a0b03 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 08:46:53 -0700 Subject: [PATCH 0303/1093] util/expvarx: deflake TestSafeFuncHappyPath with synctest I probably could've deflaked this without synctest, but might as well use it now that Go 1.25 has it. Fixes #15348 Change-Id: I81c9253fcb7eada079f3e943ab5f1e29ba8e8e31 Signed-off-by: Brad Fitzpatrick --- util/expvarx/expvarx_test.go | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/util/expvarx/expvarx_test.go b/util/expvarx/expvarx_test.go index 50131dfb3845a..9ed2e8f209115 100644 --- a/util/expvarx/expvarx_test.go +++ b/util/expvarx/expvarx_test.go @@ -9,9 +9,8 @@ import ( "sync" "sync/atomic" "testing" + "testing/synctest" "time" - - "tailscale.com/cmd/testwrapper/flakytest" ) func ExampleNewSafeFunc() { @@ -54,19 +53,21 @@ func ExampleNewSafeFunc() { } func TestSafeFuncHappyPath(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15348") - var count int - f := NewSafeFunc(expvar.Func(func() any { - count++ - return count - }), time.Millisecond, nil) - - if got, want := f.Value(), 1; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := f.Value(), 2; got != want { - t.Errorf("got %v, want %v", got, want) - } + synctest.Test(t, func(t *testing.T) { + var count int + f := NewSafeFunc(expvar.Func(func() any { + count++ + return count + }), time.Second, nil) + + if got, want := f.Value(), 1; got != want { + t.Errorf("got %v, want %v", got, want) + } + time.Sleep(5 * time.Second) // (fake time in synctest) + if got, want := f.Value(), 2; got != want { + t.Errorf("got %v, want %v", got, want) + } + }) } func TestSafeFuncSlow(t *testing.T) { From 71cb6d4cbd8758197a82449ffa86b3288a35d29c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Sep 2025 17:51:59 +0100 Subject: [PATCH 0304/1093] cmd/tailscale/cli, derp: use client/local instead of deprecated client/tailscale (#17061) * cmd/tailscale/cli: use client/local instead of deprecated client/tailscale Updates tailscale/corp#22748 Signed-off-by: Alex Chan * derp: use client/local instead of deprecated client/tailscale Updates tailscale/corp#22748 Signed-off-by: Alex Chan --------- Signed-off-by: Alex Chan --- cmd/derper/depaware.txt | 8 +++----- cmd/tailscale/cli/bugreport.go | 4 ++-- cmd/tailscale/cli/cli.go | 5 ++--- cmd/tailscale/cli/debug.go | 4 ++-- cmd/tailscale/cli/ping.go | 4 ++-- cmd/tailscale/cli/serve_legacy.go | 4 ++-- cmd/tailscale/cli/serve_legacy_test.go | 4 ++-- cmd/tailscale/cli/serve_v2.go | 6 +++--- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 9 ++++----- cmd/tsidp/depaware.txt | 2 +- derp/derp_server.go | 3 +-- tsnet/depaware.txt | 2 +- 13 files changed, 26 insertions(+), 31 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 52b82b2289b49..8adb2d3382b13 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -89,9 +89,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/local from tailscale.com/derp + tailscale.com/client/tailscale/apitype from tailscale.com/client/local tailscale.com/derp from tailscale.com/cmd/derper+ tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper @@ -142,7 +141,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/envknob+ tailscale.com/types/persist from tailscale.com/ipn tailscale.com/types/preftype from tailscale.com/ipn tailscale.com/types/ptr from tailscale.com/hostinfo+ @@ -160,7 +159,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ diff --git a/cmd/tailscale/cli/bugreport.go b/cmd/tailscale/cli/bugreport.go index d671f3df60d76..50e6ffd82bedc 100644 --- a/cmd/tailscale/cli/bugreport.go +++ b/cmd/tailscale/cli/bugreport.go @@ -10,7 +10,7 @@ import ( "fmt" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" ) var bugReportCmd = &ffcli.Command{ @@ -40,7 +40,7 @@ func runBugReport(ctx context.Context, args []string) error { default: return errors.New("unknown arguments") } - opts := tailscale.BugReportOpts{ + opts := local.BugReportOpts{ Note: note, Diagnose: bugReportArgs.diagnose, } diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 5db0308887efa..42f1cb3a526fb 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -23,7 +23,6 @@ import ( "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" "tailscale.com/paths" @@ -113,7 +112,7 @@ func Run(args []string) (err error) { } var warnOnce sync.Once - tailscale.SetVersionMismatchHandler(func(clientVer, serverVer string) { + local.SetVersionMismatchHandler(func(clientVer, serverVer string) { warnOnce.Do(func() { fmt.Fprintf(Stderr, "Warning: client version %q != tailscaled server version %q\n", clientVer, serverVer) }) @@ -164,7 +163,7 @@ func Run(args []string) (err error) { } err = rootCmd.Run(context.Background()) - if tailscale.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { + if local.IsAccessDeniedError(err) && os.Getuid() != 0 && runtime.GOOS != "windows" { return fmt.Errorf("%v\n\nUse 'sudo tailscale %s'.\nTo not require root, use 'sudo tailscale set --operator=$USER' once.", err, strings.Join(args, " ")) } if errors.Is(err, flag.ErrHelp) { diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 6fe15b238ca46..4960aeec2d50a 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -30,7 +30,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" "tailscale.com/hostinfo" @@ -1219,7 +1219,7 @@ var debugPortmapArgs struct { } func debugPortmap(ctx context.Context, args []string) error { - opts := &tailscale.DebugPortmapOpts{ + opts := &local.DebugPortmapOpts{ Duration: debugPortmapArgs.duration, Type: debugPortmapArgs.ty, LogHTTP: debugPortmapArgs.logHTTP, diff --git a/cmd/tailscale/cli/ping.go b/cmd/tailscale/cli/ping.go index d438cb2286d4c..8ece7c93d2311 100644 --- a/cmd/tailscale/cli/ping.go +++ b/cmd/tailscale/cli/ping.go @@ -16,7 +16,7 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -128,7 +128,7 @@ func runPing(ctx context.Context, args []string) error { for { n++ ctx, cancel := context.WithTimeout(ctx, pingArgs.timeout) - pr, err := localClient.PingWithOpts(ctx, netip.MustParseAddr(ip), pingType(), tailscale.PingOpts{Size: pingArgs.size}) + pr, err := localClient.PingWithOpts(ctx, netip.MustParseAddr(ip), pingType(), local.PingOpts{Size: pingArgs.size}) cancel() if err != nil { if errors.Is(err, context.DeadlineExceeded) { diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 1a05d0543f58e..3fbddeabf8d4e 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -139,7 +139,7 @@ type localServeClient interface { GetServeConfig(context.Context) (*ipn.ServeConfig, error) SetServeConfig(context.Context, *ipn.ServeConfig) error QueryFeature(ctx context.Context, feature string) (*tailcfg.QueryFeatureResponse, error) - WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) + WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, error) IncrementCounter(ctx context.Context, name string, delta int) error GetPrefs(ctx context.Context) (*ipn.Prefs, error) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 1ea76e72ca818..c509508dfb1f0 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -925,7 +925,7 @@ func (lc *fakeLocalServeClient) QueryFeature(ctx context.Context, feature string return &tailcfg.QueryFeatureResponse{Complete: true}, nil // fallback to already enabled } -func (lc *fakeLocalServeClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*tailscale.IPNBusWatcher, error) { +func (lc *fakeLocalServeClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*local.IPNBusWatcher, error) { return nil, nil // unused in tests } diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index acefd881f01b0..903036db4a6e7 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/tailscale" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -365,7 +365,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } } - var watcher *tailscale.IPNBusWatcher + var watcher *local.IPNBusWatcher svcName := noService if forService { @@ -426,7 +426,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } if err := e.lc.SetServeConfig(ctx, parentSC); err != nil { - if tailscale.IsPreconditionsFailedError(err) { + if local.IsPreconditionsFailedError(err) { fmt.Fprintln(e.stderr(), "Another client is changing the serve config; please try again.") } return err diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 4453206366ded..a983f1c09f0bf 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,7 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli - tailscale.com/client/tailscale from tailscale.com/cmd/tailscale/cli+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3d93681439ab1..3ca57077254f7 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -244,9 +244,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp - tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ + tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate @@ -388,7 +387,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ @@ -410,7 +409,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httphdr from tailscale.com/feature/taildrop - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/control/controlclient+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index efe9456d814c7..38d2c76c0fb8e 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -218,7 +218,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/tailscale from tailscale.com/tsnet tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ diff --git a/derp/derp_server.go b/derp/derp_server.go index bd67e7eeca22f..f0c635a5aef50 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -38,7 +38,6 @@ import ( "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" @@ -1384,7 +1383,7 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf // tailscaled-based verification: if s.verifyClientsLocalTailscaled { _, err := s.localClient.WhoIsNodeKey(ctx, clientKey) - if err == tailscale.ErrPeerNotFound { + if err == local.ErrPeerNotFound { return fmt.Errorf("peer %v not authorized (not found in local tailscaled)", clientKey) } if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 187237e2f8b7d..ed61de5312f88 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -214,7 +214,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/derp+ + tailscale.com/client/tailscale from tailscale.com/tsnet tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ From 2da52dce7aba5150b7b9e637b9fb0d7307fed916 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 8 Sep 2025 15:02:43 +0000 Subject: [PATCH 0305/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 10 +++++----- licenses/windows.md | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 81359b27021c0..91ba966981785 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -68,13 +68,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 5c000cc9fd098..aff149d4d4ba4 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -72,18 +72,18 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.40.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.42.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.34.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.33.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.27.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.6/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From 6f9f190f4d8655a1699c8424a7e0f7860349023d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 11:23:32 -0700 Subject: [PATCH 0306/1093] go.toolchain.rev: bump to Go 1.25.1 Updates #17064 Change-Id: Ibbca837e0921fe9f82fc931dde8bb51b017e4e48 Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.toolchain.rev | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e6c480494ed2f..6883d2552e447 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.0 +go 1.25.1 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 9c2417e7c103b..1fd4f3df25747 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -f3339c88ea24212cc3cd49b64ad1045b85db23bf +aa85d1541af0921f830f053f29d91971fa5838f6 From ffc82ad82014b03533e7214a2b259e62801d2191 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 09:33:39 -0700 Subject: [PATCH 0307/1093] util/eventbus: add ts_omit_debugeventbus Updates #17063 Change-Id: Ibc98dd2088f82c829effa71f72f3e2a5abda5038 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- util/eventbus/debughttp.go | 2 +- util/eventbus/debughttp_off.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/build_dist.sh b/build_dist.sh index 12f366e061730..9514d53b6d53c 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index a94eaa9cf7ba2..617502b93752c 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_debugeventbus package eventbus diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 85330579c8329..7d9fb327c494f 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android +//go:build ios || android || ts_omit_debugeventbus package eventbus From 3e4b0c1516819ea47a90189a4f116a2e44b97e39 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 12:09:29 -0700 Subject: [PATCH 0308/1093] cmd/tailscale, ipn/ipnlocal: add ts_omit_webclient Fixes #17063 Updates #12614 Change-Id: I0a189f6a4d1c4558351e3195839867725774fa96 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/tailscale/cli/cli.go | 3 ++- cmd/tailscale/cli/set.go | 4 ++-- cmd/tailscale/cli/web.go | 41 ++++++++++++++++++++------------- cmd/tailscaled/deps_test.go | 20 ++++++++++++++++ ipn/ipnlocal/web_client.go | 5 ++-- ipn/ipnlocal/web_client_stub.go | 2 +- tsconst/webclient.go | 9 ++++++++ 8 files changed, 63 insertions(+), 23 deletions(-) create mode 100644 tsconst/webclient.go diff --git a/build_dist.sh b/build_dist.sh index 9514d53b6d53c..57231eb7079ea 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus,ts_omit_webclient" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 42f1cb3a526fb..46aa29c710333 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -209,6 +209,7 @@ func noDupFlagify(c *ffcli.Command) { var fileCmd func() *ffcli.Command var sysPolicyCmd func() *ffcli.Command +var maybeWebCmd func() *ffcli.Command func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -251,7 +252,7 @@ change in the future. funnelCmd(), serveCmd(), versionCmd, - webCmd, + nilOrCall(maybeWebCmd), nilOrCall(fileCmd), bugReportCmd, certCmd, diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index a1c6987e8fc4b..c0ce0b1c137ac 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -15,13 +15,13 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/client/web" "tailscale.com/clientupdate" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/net/tsaddr" "tailscale.com/safesocket" + "tailscale.com/tsconst" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/types/views" @@ -264,7 +264,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } if setArgs.runWebClient && len(st.TailscaleIPs) > 0 { - printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], web.ListenPort) + printf("\nWeb interface now running at %s:%d\n", st.TailscaleIPs[0], tsconst.WebListenPort) } return nil diff --git a/cmd/tailscale/cli/web.go b/cmd/tailscale/cli/web.go index 5e1821dd011eb..2713f730bf600 100644 --- a/cmd/tailscale/cli/web.go +++ b/cmd/tailscale/cli/web.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_webclient + package cli import ( @@ -22,14 +24,20 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/web" "tailscale.com/ipn" + "tailscale.com/tsconst" ) -var webCmd = &ffcli.Command{ - Name: "web", - ShortUsage: "tailscale web [flags]", - ShortHelp: "Run a web server for controlling Tailscale", +func init() { + maybeWebCmd = webCmd +} + +func webCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "web", + ShortUsage: "tailscale web [flags]", + ShortHelp: "Run a web server for controlling Tailscale", - LongHelp: strings.TrimSpace(` + LongHelp: strings.TrimSpace(` "tailscale web" runs a webserver for controlling the Tailscale daemon. It's primarily intended for use on Synology, QNAP, and other @@ -37,16 +45,17 @@ NAS devices where a web interface is the natural place to control Tailscale, as opposed to a CLI or a native app. `), - FlagSet: (func() *flag.FlagSet { - webf := newFlagSet("web") - webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic") - webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") - webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") - webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") - webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") - return webf - })(), - Exec: runWeb, + FlagSet: (func() *flag.FlagSet { + webf := newFlagSet("web") + webf.StringVar(&webArgs.listen, "listen", "localhost:8088", "listen address; use port 0 for automatic") + webf.BoolVar(&webArgs.cgi, "cgi", false, "run as CGI script") + webf.StringVar(&webArgs.prefix, "prefix", "", "URL prefix added to requests (for cgi or reverse proxies)") + webf.BoolVar(&webArgs.readonly, "readonly", false, "run web UI in read-only mode") + webf.StringVar(&webArgs.origin, "origin", "", "origin at which the web UI is served (if behind a reverse proxy or used with cgi)") + return webf + })(), + Exec: runWeb, + } } var webArgs struct { @@ -101,7 +110,7 @@ func runWeb(ctx context.Context, args []string) error { var startedManagementClient bool // we started the management client if !existingWebClient && !webArgs.readonly { // Also start full client in tailscaled. - log.Printf("starting tailscaled web client at http://%s\n", netip.AddrPortFrom(selfIP, web.ListenPort)) + log.Printf("starting tailscaled web client at http://%s\n", netip.AddrPortFrom(selfIP, tsconst.WebListenPort)) if err := setRunWebClient(ctx, true); err != nil { return fmt.Errorf("starting web client in tailscaled: %w", err) } diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 6d2ea383780a6..a672e32e2d63e 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -4,6 +4,7 @@ package main import ( + "strings" "testing" "tailscale.com/tstest/deptest" @@ -41,3 +42,22 @@ func TestOmitSyspolicy(t *testing.T) { }, }.Check(t) } + +// Test that we can build a binary without reflect.MethodByName. +// See https://github.com/tailscale/tailscale/issues/17063 +func TestOmitReflectThings(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_include_cli,ts_omit_systray,ts_omit_debugeventbus,ts_omit_webclient", + BadDeps: map[string]string{ + "text/template": "unexpected text/template usage", + "html/template": "unexpected text/template usage", + }, + OnDep: func(dep string) { + if strings.Contains(dep, "systray") { + t.Errorf("unexpected systray dep %q", dep) + } + }, + }.Check(t) +} diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 18145d1bb7e46..7cfb30ca4efeb 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_webclient package ipnlocal @@ -22,11 +22,12 @@ import ( "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/types/logger" "tailscale.com/util/mak" ) -const webClientPort = web.ListenPort +const webClientPort = tsconst.WebListenPort // webClient holds state for the web interface for managing this // tailscale instance. The web interface is not used by default, diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 31735de250d54..5f37560cc6ddb 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android +//go:build ios || android || ts_omit_webclient package ipnlocal diff --git a/tsconst/webclient.go b/tsconst/webclient.go new file mode 100644 index 0000000000000..d4b3c8db51b2a --- /dev/null +++ b/tsconst/webclient.go @@ -0,0 +1,9 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// WebListenPort is the static port used for the web client when run inside +// tailscaled. (5252 are the numbers above the letters "TSTS" on a qwerty +// keyboard.) +const WebListenPort = 5252 From f4ae81e015c32918e1198f0f2e2b0dd6332d4c99 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 9 Sep 2025 14:12:08 +0100 Subject: [PATCH 0309/1093] tsnet: remove APIClient() which is deprecated and now unused (#17073) Updates tailscale/corp#22748 Signed-off-by: Alex Chan --- cmd/tsidp/depaware.txt | 7 +++---- tsnet/depaware.txt | 7 +++---- tsnet/tsnet.go | 20 -------------------- 3 files changed, 6 insertions(+), 28 deletions(-) diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 38d2c76c0fb8e..cfe44d1dc1934 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -217,8 +217,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/tsnet + tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -343,7 +342,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/cmd/tsidp+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ @@ -364,7 +363,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ed61de5312f88..74f3f8c539a66 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -213,8 +213,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ - tailscale.com/client/local from tailscale.com/client/tailscale+ - tailscale.com/client/tailscale from tailscale.com/tsnet + tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -338,7 +337,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ - tailscale.com/types/opt from tailscale.com/client/tailscale+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ tailscale.com/types/ptr from tailscale.com/control/controlclient+ @@ -359,7 +358,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/client/tailscale+ + tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 4cb977c73708e..359fbc1c5246d 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -27,7 +27,6 @@ import ( "time" "tailscale.com/client/local" - "tailscale.com/client/tailscale" "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/health" @@ -910,25 +909,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// APIClient returns a tailscale.Client that can be used to make authenticated -// requests to the Tailscale control server. -// It requires the user to set tailscale.I_Acknowledge_This_API_Is_Unstable. -// -// Deprecated: use AuthenticatedAPITransport with tailscale.com/client/tailscale/v2 instead. -func (s *Server) APIClient() (*tailscale.Client, error) { - if !tailscale.I_Acknowledge_This_API_Is_Unstable { - return nil, errors.New("use of Client without setting I_Acknowledge_This_API_Is_Unstable") - } - if err := s.Start(); err != nil { - return nil, err - } - - c := tailscale.NewClient("-", nil) - c.UserAgent = "tailscale-tsnet" - c.HTTPClient = &http.Client{Transport: s.lb.KeyProvingNoiseRoundTripper()} - return c, nil -} - // I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() // for now. var I_Acknowledge_This_API_Is_Experimental = false From f1ded844540f66c1a426fa54700ee626a0f9e658 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 9 Sep 2025 07:36:55 -0700 Subject: [PATCH 0310/1093] cmd/tailscaled: add disabled debug file to force reflect for binary size experiments This adds a file that's not compiled by default that exists just to make it easier to do binary size checks, probing what a binary would be like if it included reflect methods (as used by html/template, etc). As an example, once tailscaled uses reflect.Type.MethodByName(non-const-string) anywhere, the build jumps up by 14.5 MB: $ GOOS=linux GOARCH=amd64 ./tool/go build -tags=ts_include_cli,ts_omit_webclient,ts_omit_systray,ts_omit_debugeventbus -o before ./cmd/tailscaled $ GOOS=linux GOARCH=amd64 ./tool/go build -tags=ts_include_cli,ts_omit_webclient,ts_omit_systray,ts_omit_debugeventbus,ts_debug_forcereflect -o after ./cmd/tailscaled $ ls -l before after -rwxr-xr-x@ 1 bradfitz staff 41011861 Sep 9 07:28 before -rwxr-xr-x@ 1 bradfitz staff 55610948 Sep 9 07:29 after This is particularly pronounced with large deps like the AWS SDK. If you compare using ts_omit_aws: -rwxr-xr-x@ 1 bradfitz staff 38284771 Sep 9 07:40 no-aws-no-reflect -rwxr-xr-x@ 1 bradfitz staff 45546491 Sep 9 07:41 no-aws-with-reflect That means adding AWS to a non-reflect binary adds 2.7 MB but adding AWS to a reflect binary adds 10 MB. Updates #17063 Updates #12614 Change-Id: I18e9b77c9cf33565ce5bba65ac5584fa9433f7fb Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/debug_forcereflect.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 cmd/tailscaled/debug_forcereflect.go diff --git a/cmd/tailscaled/debug_forcereflect.go b/cmd/tailscaled/debug_forcereflect.go new file mode 100644 index 0000000000000..7378753ceb64c --- /dev/null +++ b/cmd/tailscaled/debug_forcereflect.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_debug_forcereflect + +// This file exists for benchmarking binary sizes. When the build tag is +// enabled, it forces use of part of the reflect package that makes the Go +// linker go into conservative retention mode where its deadcode pass can't +// eliminate exported method. + +package main + +import ( + "reflect" + "time" +) + +func init() { + // See Go's src/cmd/compile/internal/walk/expr.go:usemethod for + // why this is isn't a const. + name := []byte("Bar") + if time.Now().Unix()&1 == 0 { + name[0] = 'X' + } + _, _ = reflect.TypeOf(12).MethodByName(string(name)) +} From 77250a301aee83d67c1bbe497391500f7c70e7b4 Mon Sep 17 00:00:00 2001 From: Nick O'Neill Date: Tue, 9 Sep 2025 09:03:01 -0700 Subject: [PATCH 0311/1093] ipn/ipnlocal, types: plumb tailnet display name cap through to network profile (#17045) Updates tailscale/corp#30456 Signed-off-by: Nick O'Neill --- ipn/ipnlocal/local.go | 8 ++++++++ ipn/ipnlocal/node_backend.go | 1 + ipn/prefs.go | 1 + types/netmap/netmap.go | 16 ++++++++++++++++ 4 files changed, 26 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7592e9b4ba105..2d917ae545545 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1650,12 +1650,18 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefsChanged = true } + // If the tailnet's display name has changed, update prefs. + if st.NetMap != nil && st.NetMap.TailnetDisplayName() != b.pm.CurrentProfile().NetworkProfile().DisplayName { + prefsChanged = true + } + // Perform all mutations of prefs based on the netmap here. if prefsChanged { // Prefs will be written out if stale; this is not safe unless locked or cloned. if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: curNetMap.MagicDNSSuffix(), DomainName: curNetMap.DomainName(), + DisplayName: curNetMap.TailnetDisplayName(), }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } @@ -1716,6 +1722,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control if err := b.pm.SetPrefs(p, ipn.NetworkProfile{ MagicDNSName: st.NetMap.MagicDNSSuffix(), DomainName: st.NetMap.DomainName(), + DisplayName: st.NetMap.TailnetDisplayName(), }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } @@ -6185,6 +6192,7 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { if err := b.pm.SetPrefs(prefs.View(), ipn.NetworkProfile{ MagicDNSName: nm.MagicDNSSuffix(), DomainName: nm.DomainName(), + DisplayName: nm.TailnetDisplayName(), }); err != nil { b.logf("failed to save exit node changes: %v", err) } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index a3889b6434c40..4319ed372222f 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -168,6 +168,7 @@ func (nb *nodeBackend) NetworkProfile() ipn.NetworkProfile { // These are ok to call with nil netMap. MagicDNSName: nb.netMap.MagicDNSSuffix(), DomainName: nb.netMap.DomainName(), + DisplayName: nb.netMap.TailnetDisplayName(), } } diff --git a/ipn/prefs.go b/ipn/prefs.go index 88c73ead3e365..7c3c50f73bcd8 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -988,6 +988,7 @@ type WindowsUserID string type NetworkProfile struct { MagicDNSName string DomainName string + DisplayName string } // RequiresBackfill returns whether this object does not have all the data diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 963f80a441ee4..cc6bec1db8edb 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -252,6 +252,22 @@ func (nm *NetworkMap) DomainName() string { return nm.Domain } +// TailnetDisplayName returns the admin-editable name contained in +// NodeAttrTailnetDisplayName. If the capability is not present it +// returns an empty string. +func (nm *NetworkMap) TailnetDisplayName() string { + if nm == nil || !nm.SelfNode.Valid() { + return "" + } + + tailnetDisplayNames, err := tailcfg.UnmarshalNodeCapViewJSON[string](nm.SelfNode.CapMap(), tailcfg.NodeAttrTailnetDisplayName) + if err != nil || len(tailnetDisplayNames) == 0 { + return "" + } + + return tailnetDisplayNames[0] +} + // HasSelfCapability reports whether nm.SelfNode contains capability c. // // It exists to satisify an unused (as of 2025-01-04) interface in the logknob package. From 88d7db33dab4bab8a0ae7beb3838b82898488a87 Mon Sep 17 00:00:00 2001 From: nikiUppal-TS Date: Tue, 9 Sep 2025 16:02:56 -0500 Subject: [PATCH 0312/1093] cmd/tailscale: use tailnet display name on cli (#17079) Updates cli to use tailnet display name Updates tailscale/corp#32108 Signed-off-by: nikiUppal-TS --- cmd/tailscale/cli/switch.go | 18 +++++++++++++----- ipn/prefs.go | 8 ++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index af8b513263d37..0677da1b31868 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -24,7 +24,7 @@ var switchCmd = &ffcli.Command{ LongHelp: `"tailscale switch" switches between logged in accounts. You can use the ID that's returned from 'tailnet switch -list' to pick which profile you want to switch to. Alternatively, you -can use the Tailnet or the account names to switch as well. +can use the Tailnet, account names, or display names to switch as well. This command is currently in alpha and may change in the future.`, @@ -46,7 +46,7 @@ func init() { seen := make(map[string]bool, 3*len(all)) wordfns := []func(prof ipn.LoginProfile) string{ func(prof ipn.LoginProfile) string { return string(prof.ID) }, - func(prof ipn.LoginProfile) string { return prof.NetworkProfile.DomainName }, + func(prof ipn.LoginProfile) string { return prof.NetworkProfile.DisplayNameOrDefault() }, func(prof ipn.LoginProfile) string { return prof.Name }, } @@ -57,7 +57,7 @@ func init() { continue } seen[word] = true - words = append(words, fmt.Sprintf("%s\tid: %s, tailnet: %s, account: %s", word, prof.ID, prof.NetworkProfile.DomainName, prof.Name)) + words = append(words, fmt.Sprintf("%s\tid: %s, tailnet: %s, account: %s", word, prof.ID, prof.NetworkProfile.DisplayNameOrDefault(), prof.Name)) } } return words, ffcomplete.ShellCompDirectiveNoFileComp, nil @@ -86,7 +86,7 @@ func listProfiles(ctx context.Context) error { } printRow( string(prof.ID), - prof.NetworkProfile.DomainName, + prof.NetworkProfile.DisplayNameOrDefault(), name, ) } @@ -107,7 +107,7 @@ func switchProfile(ctx context.Context, args []string) error { os.Exit(1) } var profID ipn.ProfileID - // Allow matching by ID, Tailnet, or Account + // Allow matching by ID, Tailnet, Account, or Display Name // in that order. for _, p := range all { if p.ID == ipn.ProfileID(args[0]) { @@ -131,6 +131,14 @@ func switchProfile(ctx context.Context, args []string) error { } } } + if profID == "" { + for _, p := range all { + if p.NetworkProfile.DisplayName == args[0] { + profID = p.ID + break + } + } + } if profID == "" { errf("No profile named %q\n", args[0]) os.Exit(1) diff --git a/ipn/prefs.go b/ipn/prefs.go index 7c3c50f73bcd8..1efb5d0feabd9 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -5,6 +5,7 @@ package ipn import ( "bytes" + "cmp" "encoding/json" "errors" "fmt" @@ -1001,6 +1002,13 @@ func (n NetworkProfile) RequiresBackfill() bool { return n == NetworkProfile{} } +// DisplayNameOrDefault will always return a non-empty string. +// If there is a defined display name, it will return that. +// If they did not it will default to their domain name. +func (n NetworkProfile) DisplayNameOrDefault() string { + return cmp.Or(n.DisplayName, n.DomainName) +} + // LoginProfile represents a single login profile as managed // by the ProfileManager. type LoginProfile struct { From 09bfee2e06418c48397c988f1bb6d535186b67ca Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 9 Sep 2025 14:54:22 -0700 Subject: [PATCH 0313/1093] disco: add missing message types to MessageSummary (#17081) Updates tailscale/corp#30818 Signed-off-by: Jordan Whited --- disco/disco.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/disco/disco.go b/disco/disco.go index 1689d2a93da77..f58bc1b8c1ba1 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -287,12 +287,18 @@ func MessageSummary(m Message) string { return fmt.Sprintf("pong tx=%x", m.TxID[:6]) case *CallMeMaybe: return "call-me-maybe" + case *CallMeMaybeVia: + return "call-me-maybe-via" case *BindUDPRelayEndpoint: return "bind-udp-relay-endpoint" case *BindUDPRelayEndpointChallenge: return "bind-udp-relay-endpoint-challenge" case *BindUDPRelayEndpointAnswer: return "bind-udp-relay-endpoint-answer" + case *AllocateUDPRelayEndpointRequest: + return "allocate-udp-relay-endpoint-request" + case *AllocateUDPRelayEndpointResponse: + return "allocate-udp-relay-endpoint-response" default: return fmt.Sprintf("%#v", m) } From 2d9d869d3dbdf485c9d04276a84435b329d2739f Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 9 Sep 2025 15:38:08 -0700 Subject: [PATCH 0314/1093] wgengine/magicsock: fix debug disco printing of alloc resp disco keys (#17087) Updates tailscale/corp#30818 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 695039ea640a5..1bff7153bdb3c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2411,11 +2411,11 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } else { - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, for %d<->%d", + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s for %v<->%v", c.discoShort, epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, - req.ClientDisco[0], req.ClientDisco[1]) + req.ClientDisco[0].ShortString(), req.ClientDisco[1].ShortString()) } if c.filt == nil { From 1ec3d20d10d4cf400b26b938187820f111e912e3 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 10 Sep 2025 13:02:59 +0100 Subject: [PATCH 0315/1093] cmd/k8s-operator: simplify scope of e2e tests (#17076) Removes ACL edits from e2e tests in favour of trying to simplify the tests and separate the actual test logic from the environment setup logic as much as possible. Also aims to fit in with the requirements that will generally be filled anyway for most devs working on the operator; in particular using tags that fit in with our documentation. Updates tailscale/corp#32085 Change-Id: I7659246e39ec0b7bcc4ec0a00c6310f25fe6fac2 Signed-off-by: Tom Proctor --- cmd/k8s-operator/e2e/acl.hujson | 33 +++++ cmd/k8s-operator/e2e/ingress_test.go | 70 ++++++---- cmd/k8s-operator/e2e/main_test.go | 202 +++++++++------------------ cmd/k8s-operator/e2e/proxy_test.go | 88 +++--------- 4 files changed, 168 insertions(+), 225 deletions(-) create mode 100644 cmd/k8s-operator/e2e/acl.hujson diff --git a/cmd/k8s-operator/e2e/acl.hujson b/cmd/k8s-operator/e2e/acl.hujson new file mode 100644 index 0000000000000..1a7b61767c92b --- /dev/null +++ b/cmd/k8s-operator/e2e/acl.hujson @@ -0,0 +1,33 @@ +// To run the e2e tests against a tailnet, ensure its access controls are a +// superset of the following: +{ + "tagOwners": { + "tag:k8s-operator": [], + "tag:k8s": ["tag:k8s-operator"], + "tag:k8s-recorder": ["tag:k8s-operator"], + }, + "autoApprovers": { + // Could be relaxed if we coordinated with the cluster config, but this + // wide subnet maximises compatibility for most clusters. + "routes": { + "10.0.0.0/8": ["tag:k8s"], + }, + "services": { + "tag:k8s": ["tag:k8s"], + }, + }, + "grants": [ + { + "src": ["tag:k8s"], + "dst": ["tag:k8s", "tag:k8s-operator"], + "ip": ["tcp:80", "tcp:443"], + "app": { + "tailscale.com/cap/kubernetes": [{ + "impersonate": { + "groups": ["ts:e2e-test-proxy"], + }, + }], + }, + }, + ], +} \ No newline at end of file diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index 373dd2c7dc88f..23f0711ec9906 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -17,45 +18,63 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" kube "tailscale.com/k8s-operator" "tailscale.com/tstest" + "tailscale.com/types/ptr" + "tailscale.com/util/httpm" ) // See [TestMain] for test requirements. func TestIngress(t *testing.T) { - if tsClient == nil { - t.Skip("TestIngress requires credentials for a tailscale client") + if apiClient == nil { + t.Skip("TestIngress requires TS_API_CLIENT_SECRET set") } - ctx := context.Background() cfg := config.GetConfigOrDie() cl, err := client.New(cfg, client.Options{}) if err != nil { t.Fatal(err) } // Apply nginx - createAndCleanup(t, ctx, cl, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "nginx", - Namespace: "default", - Labels: map[string]string{ - "app.kubernetes.io/name": "nginx", + createAndCleanup(t, cl, + &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nginx", + Namespace: "default", + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "nginx", - Image: "nginx", + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "nginx", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx", + }, + }, + }, }, }, - }, - }) + }) // Apply service to expose it as ingress svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test-ingress", Namespace: "default", Annotations: map[string]string{ - "tailscale.com/expose": "true", + "tailscale.com/expose": "true", + "tailscale.com/proxy-class": "prod", }, }, Spec: corev1.ServiceSpec{ @@ -71,10 +90,10 @@ func TestIngress(t *testing.T) { }, }, } - createAndCleanup(t, ctx, cl, svc) + createAndCleanup(t, cl, svc) // TODO: instead of timing out only when test times out, cancel context after 60s or so. - if err := wait.PollUntilContextCancel(ctx, time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { + if err := wait.PollUntilContextCancel(t.Context(), time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { maybeReadySvc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} if err := get(ctx, cl, maybeReadySvc); err != nil { return false, err @@ -89,17 +108,20 @@ func TestIngress(t *testing.T) { } var resp *http.Response - if err := tstest.WaitFor(time.Second*60, func() error { + if err := tstest.WaitFor(time.Minute, func() error { // TODO(tomhjp): Get the tailnet DNS name from the associated secret instead. // If we are not the first tailnet node with the requested name, we'll get // a -N suffix. - resp, err = tsClient.HTTPClient.Get(fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name)) + req, err := http.NewRequest(httpm.GET, fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name), nil) if err != nil { return err } - return nil + ctx, cancel := context.WithTimeout(t.Context(), time.Second) + defer cancel() + resp, err = tailnetClient.HTTPClient().Do(req.WithContext(ctx)) + return err }); err != nil { - t.Fatalf("error trying to reach service: %v", err) + t.Fatalf("error trying to reach Service: %v", err) } if resp.StatusCode != http.StatusOK { diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index 5a1364e09d0d7..fb5e5c8597cef 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -6,167 +6,89 @@ package e2e import ( "context" "errors" - "fmt" "log" "os" - "slices" "strings" "testing" + "time" - "github.com/go-logr/zapr" - "github.com/tailscale/hujson" - "go.uber.org/zap/zapcore" "golang.org/x/oauth2/clientcredentials" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - logf "sigs.k8s.io/controller-runtime/pkg/log" - kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn/store/mem" + "tailscale.com/tsnet" ) -const ( - e2eManagedComment = "// This is managed by the k8s-operator e2e tests" -) - +// This test suite is currently not run in CI. +// It requires some setup not handled by this code: +// - Kubernetes cluster with local kubeconfig for it (direct connection, no API server proxy) +// - Tailscale operator installed with --set apiServerProxyConfig.mode="true" +// - ACLs from acl.hujson +// - OAuth client secret in TS_API_CLIENT_SECRET env, with at least auth_keys write scope and tag:k8s tag var ( - tsClient *tailscale.Client - testGrants = map[string]string{ - "test-proxy": `{ - "src": ["tag:e2e-test-proxy"], - "dst": ["tag:k8s-operator"], - "app": { - "tailscale.com/cap/kubernetes": [{ - "impersonate": { - "groups": ["ts:e2e-test-proxy"], - }, - }], - }, - }`, - } + apiClient *tailscale.Client // For API calls to control. + tailnetClient *tsnet.Server // For testing real tailnet traffic. ) -// This test suite is currently not run in CI. -// It requires some setup not handled by this code: -// - Kubernetes cluster with tailscale operator installed -// - Current kubeconfig context set to connect to that cluster (directly, no operator proxy) -// - Operator installed with --set apiServerProxyConfig.mode="true" -// - ACLs that define tag:e2e-test-proxy tag. TODO(tomhjp): Can maybe replace this prereq onwards with an API key -// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env -// - OAuth client must have auth_keys and policy_file write for tag:e2e-test-proxy tag func TestMain(m *testing.M) { code, err := runTests(m) if err != nil { - log.Fatal(err) + log.Printf("Error: %v", err) + os.Exit(1) } os.Exit(code) } func runTests(m *testing.M) (int, error) { - zlog := kzap.NewRaw([]kzap.Opts{kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel)}...).Sugar() - logf.SetLogger(zapr.NewLogger(zlog.Desugar())) + secret := os.Getenv("TS_API_CLIENT_SECRET") + if secret != "" { + secretParts := strings.Split(secret, "-") + if len(secretParts) != 4 { + return 0, errors.New("TS_API_CLIENT_SECRET is not valid") + } + ctx := context.Background() + credentials := clientcredentials.Config{ + ClientID: secretParts[2], + ClientSecret: secret, + TokenURL: "https://login.tailscale.com/api/v2/oauth/token", + Scopes: []string{"auth_keys"}, + } + apiClient = tailscale.NewClient("-", nil) + apiClient.HTTPClient = credentials.Client(ctx) + + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Preauthorized: true, + Ephemeral: true, + Tags: []string{"tag:k8s"}, + }, + }, + } - if clientID := os.Getenv("TS_API_CLIENT_ID"); clientID != "" { - cleanup, err := setupClientAndACLs() + authKey, authKeyMeta, err := apiClient.CreateKeyWithExpiry(ctx, caps, 10*time.Minute) if err != nil { return 0, err } - defer func() { - err = errors.Join(err, cleanup()) - }() - } - - return m.Run(), nil -} - -func setupClientAndACLs() (cleanup func() error, _ error) { - ctx := context.Background() - credentials := clientcredentials.Config{ - ClientID: os.Getenv("TS_API_CLIENT_ID"), - ClientSecret: os.Getenv("TS_API_CLIENT_SECRET"), - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", - Scopes: []string{"auth_keys", "policy_file"}, - } - tsClient = tailscale.NewClient("-", nil) - tsClient.HTTPClient = credentials.Client(ctx) - - if err := patchACLs(ctx, tsClient, func(acls *hujson.Value) { - for test, grant := range testGrants { - deleteTestGrants(test, acls) - addTestGrant(test, grant, acls) - } - }); err != nil { - return nil, err - } - - return func() error { - return patchACLs(ctx, tsClient, func(acls *hujson.Value) { - for test := range testGrants { - deleteTestGrants(test, acls) - } - }) - }, nil -} - -func patchACLs(ctx context.Context, tsClient *tailscale.Client, patchFn func(*hujson.Value)) error { - acls, err := tsClient.ACLHuJSON(ctx) - if err != nil { - return err - } - hj, err := hujson.Parse([]byte(acls.ACL)) - if err != nil { - return err - } - - patchFn(&hj) - - hj.Format() - acls.ACL = hj.String() - if _, err := tsClient.SetACLHuJSON(ctx, *acls, true); err != nil { - return err - } - - return nil -} + defer apiClient.DeleteKey(context.Background(), authKeyMeta.ID) -func addTestGrant(test, grant string, acls *hujson.Value) error { - v, err := hujson.Parse([]byte(grant)) - if err != nil { - return err - } - - // Add the managed comment to the first line of the grant object contents. - v.Value.(*hujson.Object).Members[0].Name.BeforeExtra = hujson.Extra(fmt.Sprintf("%s: %s\n", e2eManagedComment, test)) - - if err := acls.Patch([]byte(fmt.Sprintf(`[{"op": "add", "path": "/grants/-", "value": %s}]`, v.String()))); err != nil { - return err - } - - return nil -} - -func deleteTestGrants(test string, acls *hujson.Value) error { - grants := acls.Find("/grants") - - var patches []string - for i, g := range grants.Value.(*hujson.Array).Elements { - members := g.Value.(*hujson.Object).Members - if len(members) == 0 { - continue + tailnetClient = &tsnet.Server{ + Hostname: "test-proxy", + Ephemeral: true, + Store: &mem.Store{}, + AuthKey: authKey, } - comment := strings.TrimSpace(string(members[0].Name.BeforeExtra)) - if name, found := strings.CutPrefix(comment, e2eManagedComment+": "); found && name == test { - patches = append(patches, fmt.Sprintf(`{"op": "remove", "path": "/grants/%d"}`, i)) + _, err = tailnetClient.Up(ctx) + if err != nil { + return 0, err } + defer tailnetClient.Close() } - // Remove in reverse order so we don't affect the found indices as we mutate. - slices.Reverse(patches) - - if err := acls.Patch([]byte(fmt.Sprintf("[%s]", strings.Join(patches, ",")))); err != nil { - return err - } - - return nil + return m.Run(), nil } func objectMeta(namespace, name string) metav1.ObjectMeta { @@ -176,13 +98,25 @@ func objectMeta(namespace, name string) metav1.ObjectMeta { } } -func createAndCleanup(t *testing.T, ctx context.Context, cl client.Client, obj client.Object) { +func createAndCleanup(t *testing.T, cl client.Client, obj client.Object) { t.Helper() - if err := cl.Create(ctx, obj); err != nil { - t.Fatal(err) + + // Try to create the object first + err := cl.Create(t.Context(), obj) + if err != nil { + if apierrors.IsAlreadyExists(err) { + if updateErr := cl.Update(t.Context(), obj); updateErr != nil { + t.Fatal(updateErr) + } + } else { + t.Fatal(err) + } } + t.Cleanup(func() { - if err := cl.Delete(ctx, obj); err != nil { + // Use context.Background() for cleanup, as t.Context() is cancelled + // just before cleanup functions are called. + if err := cl.Delete(context.Background(), obj); err != nil { t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) } }) diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index eac983e88d613..b3010f97e28c8 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -4,10 +4,8 @@ package e2e import ( - "context" "encoding/json" "fmt" - "strings" "testing" "time" @@ -17,18 +15,16 @@ import ( "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" - "tailscale.com/client/tailscale" - "tailscale.com/tsnet" + "tailscale.com/ipn" "tailscale.com/tstest" ) // See [TestMain] for test requirements. func TestProxy(t *testing.T) { - if tsClient == nil { - t.Skip("TestProxy requires credentials for a tailscale client") + if apiClient == nil { + t.Skip("TestIngress requires TS_API_CLIENT_SECRET set") } - ctx := context.Background() cfg := config.GetConfigOrDie() cl, err := client.New(cfg, client.Options{}) if err != nil { @@ -36,7 +32,7 @@ func TestProxy(t *testing.T) { } // Create role and role binding to allow a group we'll impersonate to do stuff. - createAndCleanup(t, ctx, cl, &rbacv1.Role{ + createAndCleanup(t, cl, &rbacv1.Role{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Rules: []rbacv1.PolicyRule{{ APIGroups: []string{""}, @@ -44,7 +40,7 @@ func TestProxy(t *testing.T) { Resources: []string{"secrets"}, }}, }) - createAndCleanup(t, ctx, cl, &rbacv1.RoleBinding{ + createAndCleanup(t, cl, &rbacv1.RoleBinding{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Subjects: []rbacv1.Subject{{ Kind: "Group", @@ -60,16 +56,14 @@ func TestProxy(t *testing.T) { operatorSecret := corev1.Secret{ ObjectMeta: objectMeta("tailscale", "operator"), } - if err := get(ctx, cl, &operatorSecret); err != nil { + if err := get(t.Context(), cl, &operatorSecret); err != nil { t.Fatal(err) } - // Connect to tailnet with test-specific tag so we can use the - // [testGrants] ACLs when connecting to the API server proxy - ts := tsnetServerWithTag(t, ctx, "tag:e2e-test-proxy") + // Join tailnet as a client of the API server proxy. proxyCfg := &rest.Config{ Host: fmt.Sprintf("https://%s:443", hostNameFromOperatorSecret(t, operatorSecret)), - Dial: ts.Dial, + Dial: tailnetClient.Dial, } proxyCl, err := client.New(proxyCfg, client.Options{}) if err != nil { @@ -82,8 +76,8 @@ func TestProxy(t *testing.T) { } // Wait for up to a minute the first time we use the proxy, to give it time // to provision the TLS certs. - if err := tstest.WaitFor(time.Second*60, func() error { - return get(ctx, proxyCl, &allowedSecret) + if err := tstest.WaitFor(time.Minute, func() error { + return get(t.Context(), proxyCl, &allowedSecret) }); err != nil { t.Fatal(err) } @@ -92,65 +86,25 @@ func TestProxy(t *testing.T) { forbiddenSecret := corev1.Secret{ ObjectMeta: objectMeta("default", "operator"), } - if err := get(ctx, proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { + if err := get(t.Context(), proxyCl, &forbiddenSecret); err == nil || !apierrors.IsForbidden(err) { t.Fatalf("expected forbidden error fetching secret from default namespace: %s", err) } } -func tsnetServerWithTag(t *testing.T, ctx context.Context, tag string) *tsnet.Server { - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Preauthorized: true, - Ephemeral: true, - Tags: []string{tag}, - }, - }, - } - - authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) - if err != nil { - t.Fatal(err) +func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { + t.Helper() + prefsBytes, ok := s.Data[string(s.Data["_current-profile"])] + if !ok { + t.Fatalf("no state in operator Secret data: %#v", s.Data) } - t.Cleanup(func() { - if err := tsClient.DeleteKey(ctx, authKeyMeta.ID); err != nil { - t.Errorf("error deleting auth key: %s", err) - } - }) - ts := &tsnet.Server{ - Hostname: "test-proxy", - Ephemeral: true, - Dir: t.TempDir(), - AuthKey: authKey, - } - _, err = ts.Up(ctx) - if err != nil { + prefs := ipn.Prefs{} + if err := json.Unmarshal(prefsBytes, &prefs); err != nil { t.Fatal(err) } - t.Cleanup(func() { - if err := ts.Close(); err != nil { - t.Errorf("error shutting down tsnet.Server: %s", err) - } - }) - - return ts -} -func hostNameFromOperatorSecret(t *testing.T, s corev1.Secret) string { - profiles := map[string]any{} - if err := json.Unmarshal(s.Data["_profiles"], &profiles); err != nil { - t.Fatal(err) + if prefs.Persist == nil { + t.Fatalf("no hostname in operator Secret data: %#v", s.Data) } - key, ok := strings.CutPrefix(string(s.Data["_current-profile"]), "profile-") - if !ok { - t.Fatal(string(s.Data["_current-profile"])) - } - profile, ok := profiles[key] - if !ok { - t.Fatal(profiles) - } - - return ((profile.(map[string]any))["Name"]).(string) + return prefs.Persist.UserProfile.LoginName } From 6feb6f3c753aca44d284a3b1a103692e96c62aee Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 10 Sep 2025 12:36:53 -0700 Subject: [PATCH 0316/1093] wgengine/magicsock: add relayManager event logs (#17091) These are gated behind magicsock component debug logging. Updates tailscale/corp#30818 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/relaymanager.go | 78 ++++++++++++++++++++++++++---- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1bff7153bdb3c..8ab7957ca2bb6 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2411,7 +2411,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake msgType, sender.ShortString(), derpNodeSrc.ShortString()) return } else { - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s for %v<->%v", + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s disco[0]=%v disco[1]=%v", c.discoShort, epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 8a1a4fcf57fe8..4680832d96bb8 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -6,6 +6,7 @@ package magicsock import ( "context" "errors" + "fmt" "net/netip" "sync" "time" @@ -76,8 +77,11 @@ type serverDiscoVNI struct { // relayHandshakeWork serves to track in-progress relay handshake work for a // [udprelay.ServerEndpoint]. This structure is immutable once initialized. type relayHandshakeWork struct { - wlb endpointWithLastBest - se udprelay.ServerEndpoint + wlb endpointWithLastBest + se udprelay.ServerEndpoint + server candidatePeerRelay + + handshakeGen uint32 // handshakeServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to @@ -91,6 +95,26 @@ type relayHandshakeWork struct { cancel context.CancelFunc } +func (r *relayHandshakeWork) dlogf(format string, args ...any) { + if !r.wlb.ep.c.debugLogging.Load() { + return + } + var relay string + if r.server.nodeKey.IsZero() { + relay = "from-call-me-maybe-via" + } else { + relay = r.server.nodeKey.ShortString() + } + r.wlb.ep.c.logf("%s node=%v relay=%v handshakeGen=%d disco[0]=%v disco[1]=%v", + fmt.Sprintf(format, args...), + r.wlb.ep.publicKey.ShortString(), + relay, + r.handshakeGen, + r.se.ClientDisco[0].ShortString(), + r.se.ClientDisco[1].ShortString(), + ) +} + // newRelayServerEndpointEvent indicates a new [udprelay.ServerEndpoint] has // become known either via allocation with a relay server, or via // [disco.CallMeMaybeVia] reception. This structure is immutable once @@ -257,7 +281,9 @@ type relayDiscoMsgEvent struct { type relayEndpointAllocWork struct { wlb endpointWithLastBest discoKeys key.SortedPairOfDiscoPublic - candidatePeerRelay candidatePeerRelay + candidatePeerRelay candidatePeerRelay // zero value if learned via [disco.CallMeMaybeVia] + + allocGen uint32 // allocateServerEndpoint() always writes to doneCh (len 1) when it // returns. It may end up writing the same event afterward to @@ -271,6 +297,20 @@ type relayEndpointAllocWork struct { cancel context.CancelFunc } +func (r *relayEndpointAllocWork) dlogf(format string, args ...any) { + if !r.wlb.ep.c.debugLogging.Load() { + return + } + r.wlb.ep.c.logf("%s node=%v relay=%v allocGen=%d disco[0]=%v disco[1]=%v", + fmt.Sprintf(format, args...), + r.wlb.ep.publicKey.ShortString(), + r.candidatePeerRelay.nodeKey.ShortString(), + r.allocGen, + r.discoKeys.Get()[0].ShortString(), + r.discoKeys.Get()[1].ShortString(), + ) +} + // init initializes [relayManager] if it is not already initialized. func (r *relayManager) init() { r.initOnce.Do(func() { @@ -712,6 +752,7 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay work := &relayHandshakeWork{ wlb: newServerEndpoint.wlb, se: newServerEndpoint.se, + server: newServerEndpoint.server, rxDiscoMsgCh: make(chan relayDiscoMsgEvent), doneCh: make(chan relayEndpointHandshakeWorkDoneEvent, 1), ctx: ctx, @@ -728,8 +769,9 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay if r.handshakeGeneration == 0 { // generation must be nonzero r.handshakeGeneration++ } + work.handshakeGen = r.handshakeGeneration - go r.handshakeServerEndpoint(work, r.handshakeGeneration) + go r.handshakeServerEndpoint(work) } // sendCallMeMaybeVia sends a [disco.CallMeMaybeVia] to ep over DERP. It must be @@ -758,7 +800,7 @@ func (r *relayManager) sendCallMeMaybeVia(ep *endpoint, se udprelay.ServerEndpoi ep.c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, epDisco.key, callMeMaybeVia, discoVerboseLog) } -func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generation uint32) { +func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork) { done := relayEndpointHandshakeWorkDoneEvent{work: work} r.ensureDiscoInfoFor(work) @@ -777,10 +819,13 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat common := disco.BindUDPRelayEndpointCommon{ VNI: work.se.VNI, - Generation: generation, + Generation: work.handshakeGen, RemoteKey: epDisco.key, } + work.dlogf("[v1] magicsock: relayManager: starting handshake addrPorts=%v", + work.se.AddrPorts, + ) sentBindAny := false bind := &disco.BindUDPRelayEndpoint{ BindUDPRelayEndpointCommon: common, @@ -848,6 +893,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat for { select { case <-work.ctx.Done(): + work.dlogf("[v1] magicsock: relayManager: handshake canceled") return case msgEvent := <-work.rxDiscoMsgCh: switch msg := msgEvent.msg.(type) { @@ -859,12 +905,14 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat if handshakeState >= disco.BindUDPRelayHandshakeStateAnswerSent { continue } + work.dlogf("[v1] magicsock: relayManager: got handshake challenge from %v", msgEvent.from) txPing(msgEvent.from, &msg.Challenge) handshakeState = disco.BindUDPRelayHandshakeStateAnswerSent case *disco.Ping: if handshakeState < disco.BindUDPRelayHandshakeStateAnswerSent { continue } + work.dlogf("[v1] magicsock: relayManager: got relayed ping from %v", msgEvent.from) // An inbound ping from the remote peer indicates we completed a // handshake with the relay server (our answer msg was // received). Chances are our ping was dropped before the remote @@ -885,6 +933,10 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat // round-trip latency and return. done.pongReceivedFrom = msgEvent.from done.latency = time.Since(at) + work.dlogf("[v1] magicsock: relayManager: got relayed pong from %v latency=%v", + msgEvent.from, + done.latency.Round(time.Millisecond), + ) return default: // unexpected message type, silently discard @@ -892,6 +944,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat } case <-timer.C: // The handshake timed out. + work.dlogf("[v1] magicsock: relayManager: handshake timed out") return } } @@ -899,7 +952,7 @@ func (r *relayManager) handshakeServerEndpoint(work *relayHandshakeWork, generat const allocateUDPRelayEndpointRequestTimeout = time.Second * 10 -func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, generation uint32) { +func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork) { done := relayEndpointAllocWorkDoneEvent{work: work} defer func() { @@ -910,7 +963,7 @@ func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, gene dm := &disco.AllocateUDPRelayEndpointRequest{ ClientDisco: work.discoKeys.Get(), - Generation: generation, + Generation: work.allocGen, } sendAllocReq := func() { @@ -923,6 +976,7 @@ func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, gene dm, discoVerboseLog, ) + work.dlogf("[v1] magicsock: relayManager: sent alloc request") } go sendAllocReq() @@ -938,16 +992,19 @@ func (r *relayManager) allocateServerEndpoint(work *relayEndpointAllocWork, gene for { select { case <-work.ctx.Done(): + work.dlogf("[v1] magicsock: relayManager: alloc request canceled") return case <-returnAfterTimer.C: + work.dlogf("[v1] magicsock: relayManager: alloc request timed out") return case <-retryAfterTimer.C: go sendAllocReq() case resp := <-work.rxDiscoMsgCh: - if resp.Generation != generation || + if resp.Generation != work.allocGen || !work.discoKeys.Equal(key.NewSortedPairOfDiscoPublic(resp.ClientDisco[0], resp.ClientDisco[1])) { continue } + work.dlogf("[v1] magicsock: relayManager: got alloc response") done.allocated = udprelay.ServerEndpoint{ ServerDisco: resp.ServerDisco, ClientDisco: resp.ClientDisco, @@ -1004,6 +1061,7 @@ func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { } byCandidatePeerRelay[v] = started r.allocGeneration++ - go r.allocateServerEndpoint(started, r.allocGeneration) + started.allocGen = r.allocGeneration + go r.allocateServerEndpoint(started) } } From 32bfd7275234d336b6e2fc22d4e3889ba4f4c3cf Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 10 Sep 2025 16:30:25 -0700 Subject: [PATCH 0317/1093] tstest/integration/testcontrol: propagate CapVer (#17093) To support integration testing of client features that rely on it, e.g. peer relay. Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- tstest/integration/testcontrol/testcontrol.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 739795bb3d245..2fbf37de9a15e 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -674,6 +674,7 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. AllowedIPs: allowedIPs, Hostinfo: req.Hostinfo.View(), Name: req.Hostinfo.Hostname, + Cap: req.Version, Capabilities: []tailcfg.NodeCapability{ tailcfg.CapabilityHTTPS, tailcfg.NodeAttrFunnel, @@ -811,6 +812,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi endpoints := filterInvalidIPv6Endpoints(req.Endpoints) node.Endpoints = endpoints node.DiscoKey = req.DiscoKey + node.Cap = req.Version if req.Hostinfo != nil { node.Hostinfo = req.Hostinfo.View() if ni := node.Hostinfo.NetInfo(); ni.Valid() { From fb9d9ba86e42680cde20c890de8857cbfe40f2c3 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 10 Sep 2025 16:48:40 -0700 Subject: [PATCH 0318/1093] wgengine/magicsock: add TS_DEBUG_NEVER_DIRECT_UDP debug knob (#17094) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- wgengine/magicsock/debugknobs.go | 3 +++ wgengine/magicsock/debugknobs_stubs.go | 1 + wgengine/magicsock/endpoint.go | 3 +++ 3 files changed, 7 insertions(+) diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index f8fd9f0407d44..b0a47ff87f31b 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,6 +62,9 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") + // debugNeverDirectUDP disables the use of direct UDP connections, forcing + // all peer communication over DERP or peer relay. + debugNeverDirectUDP = envknob.RegisterBool("TS_DEBUG_NEVER_DIRECT_UDP") // Hey you! Adding a new debugknob? Make sure to stub it out in the // debugknobs_stubs.go file too. ) diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 336d7baa19645..7dee1d6b0b91c 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -31,3 +31,4 @@ func debugRingBufferMaxSizeBytes() int { return 0 } func inTest() bool { return false } func debugPeerMap() bool { return false } func pretendpoints() []netip.AddrPort { return []netip.AddrPort{} } +func debugNeverDirectUDP() bool { return false } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index b8778b8d845d5..1f36aabd3baf8 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1286,6 +1286,9 @@ func (de *endpoint) startDiscoPingLocked(ep epAddr, now mono.Time, purpose disco if runtime.GOOS == "js" { return } + if debugNeverDirectUDP() && !ep.vni.IsSet() && ep.ap.Addr() != tailcfg.DerpMagicIPAddr { + return + } epDisco := de.disco.Load() if epDisco == nil { return From 49aa798d18ac070de48aafec65cbd853ba18ed96 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 11 Sep 2025 10:56:02 -0700 Subject: [PATCH 0319/1093] VERSION.txt: this is v1.88.0 (#17098) Signed-off-by: Will Hannah --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index f6342716723fc..59be592144c28 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.87.0 +1.88.0 From 1be9c6b23ed08befba62c3ca44b2e3f98f335a59 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 11 Sep 2025 11:19:17 -0700 Subject: [PATCH 0320/1093] VERSION.txt: this is v1.89.0 (#17099) Signed-off-by: Will Hannah --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 59be592144c28..636ea711ad968 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.88.0 +1.89.0 From 921d77062ebfb4b4d26629278abea7ea55cfc942 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 7 Sep 2025 20:25:54 -0700 Subject: [PATCH 0321/1093] cmd/omitsize: add tool to dump build sizes Updates #12614 Change-Id: I8f85d7275bc8eecedbabe6631b50e1cf70791d2d Signed-off-by: Brad Fitzpatrick --- cmd/omitsize/omitsize.go | 124 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 cmd/omitsize/omitsize.go diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go new file mode 100644 index 0000000000000..d8e1a65403396 --- /dev/null +++ b/cmd/omitsize/omitsize.go @@ -0,0 +1,124 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The omitsize tool prints out how large the Tailscale binaries are with +// different build tags. +package main + +import ( + "crypto/sha256" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strconv" + "strings" + + "tailscale.com/util/must" +) + +var ( + cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") + features = flag.String("features", "", "comma-separated list of features to consider, with or without the ts_omit_ prefix (default: all detected in build_dist.sh)") +) + +func main() { + flag.Parse() + + var all []string + if *features == "" { + sh := must.Get(os.ReadFile("build_dist.sh")) + omitRx := regexp.MustCompile(`\b(ts_omit_\w+)\b`) + all = omitRx.FindAllString(string(sh), -1) + } else { + for v := range strings.SplitSeq(*features, ",") { + if !strings.HasPrefix(v, "ts_omit_") { + v = "ts_omit_" + v + } + all = append(all, v) + } + } + + slices.Sort(all) + all = slices.Compact(all) + + baseD := measure("tailscaled") + baseC := measure("tailscale") + baseBoth := measure("tailscaled", "ts_include_cli") + + fmt.Printf("(a) starting with everything and removing a feature...\n\n") + + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) + + minD := measure("tailscaled", all...) + minC := measure("tailscale", all...) + minBoth := measure("tailscaled", append(slices.Clone(all), "ts_include_cli")...) + fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) + + for _, t := range all { + sizeD := measure("tailscaled", t) + sizeC := measure("tailscale", t) + sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) + saveD := max(baseD-sizeD, 0) + saveC := max(baseC-sizeC, 0) + saveBoth := max(baseBoth-sizeBoth, 0) + fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + } + + fmt.Printf("\n(b) or, starting at minimal and adding one feature back...\n") + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) + for _, t := range all { + tags := allExcept(all, t) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.TrimPrefix(t, "ts_omit_")) + } + +} + +func allExcept(all []string, omit string) []string { + return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return s == omit }) +} + +func measure(bin string, tags ...string) int64 { + tags = slices.Clone(tags) + slices.Sort(tags) + tags = slices.Compact(tags) + comma := strings.Join(tags, ",") + + var cacheFile string + if *cacheDir != "" { + cacheFile = filepath.Join(*cacheDir, fmt.Sprintf("%02x", sha256.Sum256(fmt.Appendf(nil, "%s-%s.size", bin, comma)))) + if v, err := os.ReadFile(cacheFile); err == nil { + if size, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { + return size + } + } + } + + cmd := exec.Command("go", "build", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) + log.Printf("# Measuring %v", cmd.Args) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64") + out, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("error measuring %q: %v, %s\n", bin, err, out) + } + fi, err := os.Stat("tmpbin") + if err != nil { + log.Fatal(err) + } + n := fi.Size() + if cacheFile != "" { + if err := os.WriteFile(cacheFile, fmt.Appendf(nil, "%d", n), 0644); err != nil { + log.Fatalf("error writing size to cache: %v\n", err) + } + } + return n +} From 82c5024f036c440ce22c6a2ff9bfe73b2fb991e3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 23 Jun 2025 21:24:37 -0700 Subject: [PATCH 0322/1093] net/netns: fix controlLogf doc Its doc said its signature matched a std signature, but it used Tailscale-specific types. Nowadays it's the caller (func control) that curries the logf/netmon and returns the std-matching signature. Updates #cleanup (while answering a question on Slack) Change-Id: Ic99de41fc6a1c720575a7f33c564d0bcfd9a2c30 Signed-off-by: Brad Fitzpatrick --- net/netns/netns_darwin.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index f2ed16601b88e..1f30f00d2a870 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -33,10 +33,8 @@ var bindToInterfaceByRouteEnv = envknob.RegisterBool("TS_BIND_TO_INTERFACE_BY_RO var errInterfaceStateInvalid = errors.New("interface state invalid") -// controlLogf marks c as necessary to dial in a separate network namespace. -// -// It's intentionally the same signature as net.Dialer.Control -// and net.ListenConfig.Control. +// controlLogf binds c to a particular interface as necessary to dial the +// provided (network, address). func controlLogf(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) error { if isLocalhost(address) { // Don't bind to an interface for localhost connections. From a1dcf12b671e8668b1bd3eedc7cfcb4381b9d29c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 8 Sep 2025 08:13:49 -0700 Subject: [PATCH 0323/1093] feature/drive: start factoring out Taildrive, add ts_omit_drive build tag As of this commit (per the issue), the Taildrive code remains where it was, but in new files that are protected by the new ts_omit_drive build tag. Future commits will move it. Updates #17058 Change-Id: Idf0a51db59e41ae8da6ea2b11d238aefc48b219e Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- cmd/tailscale/cli/cli.go | 3 +- cmd/tailscale/cli/drive.go | 80 +++++---- cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/deps_test.go | 16 ++ cmd/tailscaled/tailscaled.go | 45 ++--- cmd/tailscaled/tailscaled_drive.go | 56 ++++++ feature/condregister/maybe_drive.go | 8 + feature/drive/drive.go | 5 + ipn/ipnlocal/drive.go | 163 ++++++++++++++++-- ipn/ipnlocal/drive_tomove.go | 30 ++++ ipn/ipnlocal/local.go | 139 +-------------- ipn/ipnlocal/peerapi.go | 95 ---------- ipn/ipnlocal/peerapi_drive.go | 110 ++++++++++++ ipn/localapi/localapi.go | 123 ------------- ipn/localapi/localapi_drive.go | 141 +++++++++++++++ .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + 21 files changed, 582 insertions(+), 440 deletions(-) create mode 100644 cmd/tailscaled/tailscaled_drive.go create mode 100644 feature/condregister/maybe_drive.go create mode 100644 feature/drive/drive.go create mode 100644 ipn/ipnlocal/drive_tomove.go create mode 100644 ipn/ipnlocal/peerapi_drive.go create mode 100644 ipn/localapi/localapi_drive.go diff --git a/build_dist.sh b/build_dist.sh index 57231eb7079ea..9dc879b1ed466 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus,ts_omit_webclient" + tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube,ts_omit_completion,ts_omit_ssh,ts_omit_wakeonlan,ts_omit_capture,ts_omit_relayserver,ts_omit_systray,ts_omit_taildrop,ts_omit_tpm,ts_omit_syspolicy,ts_omit_debugeventbus,ts_omit_webclient,ts_omit_drive" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 46aa29c710333..39fdce60d1d3b 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -210,6 +210,7 @@ func noDupFlagify(c *ffcli.Command) { var fileCmd func() *ffcli.Command var sysPolicyCmd func() *ffcli.Command var maybeWebCmd func() *ffcli.Command +var maybeDriveCmd func() *ffcli.Command func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -262,7 +263,7 @@ change in the future. updateCmd, whoisCmd, debugCmd(), - driveCmd, + nilOrCall(maybeDriveCmd), idTokenCmd, configureHostCmd(), systrayCmd, diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 929852b4c5a32..67536ace07367 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive + package cli import ( @@ -20,43 +22,49 @@ const ( driveListUsage = "tailscale drive list" ) -var driveCmd = &ffcli.Command{ - Name: "drive", - ShortHelp: "Share a directory with your tailnet", - ShortUsage: strings.Join([]string{ - driveShareUsage, - driveRenameUsage, - driveUnshareUsage, - driveListUsage, - }, "\n"), - LongHelp: buildShareLongHelp(), - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "share", - ShortUsage: driveShareUsage, - Exec: runDriveShare, - ShortHelp: "[ALPHA] Create or modify a share", - }, - { - Name: "rename", - ShortUsage: driveRenameUsage, - ShortHelp: "[ALPHA] Rename a share", - Exec: runDriveRename, - }, - { - Name: "unshare", - ShortUsage: driveUnshareUsage, - ShortHelp: "[ALPHA] Remove a share", - Exec: runDriveUnshare, - }, - { - Name: "list", - ShortUsage: driveListUsage, - ShortHelp: "[ALPHA] List current shares", - Exec: runDriveList, +func init() { + maybeDriveCmd = driveCmd +} + +func driveCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "drive", + ShortHelp: "Share a directory with your tailnet", + ShortUsage: strings.Join([]string{ + driveShareUsage, + driveRenameUsage, + driveUnshareUsage, + driveListUsage, + }, "\n"), + LongHelp: buildShareLongHelp(), + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "share", + ShortUsage: driveShareUsage, + Exec: runDriveShare, + ShortHelp: "[ALPHA] Create or modify a share", + }, + { + Name: "rename", + ShortUsage: driveRenameUsage, + ShortHelp: "[ALPHA] Rename a share", + Exec: runDriveRename, + }, + { + Name: "unshare", + ShortUsage: driveUnshareUsage, + ShortHelp: "[ALPHA] Remove a share", + Exec: runDriveUnshare, + }, + { + Name: "list", + ShortUsage: driveListUsage, + ShortHelp: "[ALPHA] List current shares", + Exec: runDriveList, + }, }, - }, + } } // runDriveShare is the entry point for the "tailscale drive share" command. diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3ca57077254f7..a0842b45bec60 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -274,6 +274,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/drive from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a672e32e2d63e..5c71a62fd7457 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -61,3 +61,19 @@ func TestOmitReflectThings(t *testing.T) { }, }.Check(t) } + +func TestOmitDrive(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_drive,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "driveimpl") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + if strings.Contains(dep, "webdav") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index ddf6d9ef68f5d..890ff7bf8f8fd 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -33,8 +33,8 @@ import ( "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" - "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + "tailscale.com/feature" _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -153,7 +153,6 @@ var subCommands = map[string]*func([]string) error{ "uninstall-system-daemon": &uninstallSystemDaemon, "debug": &debugModeFunc, "be-child": &beChildFunc, - "serve-taildrive": &serveDriveFunc, } var beCLI func() // non-nil if CLI is linked in with the "ts_include_cli" build tag @@ -480,7 +479,9 @@ func run() (err error) { debugMux = newDebugMux() } - sys.Set(driveimpl.NewFileSystemForRemote(logf)) + if f, ok := hookSetSysDrive.GetOk(); ok { + f(sys, logf) + } if app := envknob.App(); app != "" { hostinfo.SetApp(app) @@ -489,6 +490,11 @@ func run() (err error) { return startIPNServer(context.Background(), logf, pol.PublicID, sys) } +var ( + hookSetSysDrive feature.Hook[func(*tsd.System, logger.Logf)] + hookSetWgEnginConfigDrive feature.Hook[func(*wgengine.Config, logger.Logf)] +) + var sigPipe os.Signal // set by sigpipe.go func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) error { @@ -749,7 +755,9 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), EventBus: sys.Bus.Get(), - DriveForLocal: driveimpl.NewFileSystemForLocal(logf), + } + if f, ok := hookSetWgEnginConfigDrive.GetOk(); ok { + f(&conf, logf) } sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) @@ -943,35 +951,6 @@ func beChild(args []string) error { return f(args[1:]) } -var serveDriveFunc = serveDrive - -// serveDrive serves one or more Taildrives on localhost using the WebDAV -// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child -// tailscaled processes in serve-taildrive mode in order to access the fliesystem -// as specific (usually unprivileged) users. -// -// serveDrive prints the address on which it's listening to stdout so that the -// parent process knows where to connect to. -func serveDrive(args []string) error { - if len(args) == 0 { - return errors.New("missing shares") - } - if len(args)%2 != 0 { - return errors.New("need pairs") - } - s, err := driveimpl.NewFileServer() - if err != nil { - return fmt.Errorf("unable to start Taildrive file server: %v", err) - } - shares := make(map[string]string) - for i := 0; i < len(args); i += 2 { - shares[args[i]] = args[i+1] - } - s.SetShares(shares) - fmt.Printf("%v\n", s.Addr()) - return s.Serve() -} - // dieOnPipeReadErrorOfFD reads from the pipe named by fd and exit the process // when the pipe becomes readable. We use this in tests as a somewhat more // portable mechanism for the Linux PR_SET_PDEATHSIG, which we wish existed on diff --git a/cmd/tailscaled/tailscaled_drive.go b/cmd/tailscaled/tailscaled_drive.go new file mode 100644 index 0000000000000..49f35a3811404 --- /dev/null +++ b/cmd/tailscaled/tailscaled_drive.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package main + +import ( + "errors" + "fmt" + + "tailscale.com/drive/driveimpl" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine" +) + +func init() { + subCommands["serve-taildrive"] = &serveDriveFunc + + hookSetSysDrive.Set(func(sys *tsd.System, logf logger.Logf) { + sys.Set(driveimpl.NewFileSystemForRemote(logf)) + }) + hookSetWgEnginConfigDrive.Set(func(conf *wgengine.Config, logf logger.Logf) { + conf.DriveForLocal = driveimpl.NewFileSystemForLocal(logf) + }) +} + +var serveDriveFunc = serveDrive + +// serveDrive serves one or more Taildrives on localhost using the WebDAV +// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child +// tailscaled processes in serve-taildrive mode in order to access the fliesystem +// as specific (usually unprivileged) users. +// +// serveDrive prints the address on which it's listening to stdout so that the +// parent process knows where to connect to. +func serveDrive(args []string) error { + if len(args) == 0 { + return errors.New("missing shares") + } + if len(args)%2 != 0 { + return errors.New("need pairs") + } + s, err := driveimpl.NewFileServer() + if err != nil { + return fmt.Errorf("unable to start Taildrive file server: %v", err) + } + shares := make(map[string]string) + for i := 0; i < len(args); i += 2 { + shares[args[i]] = args[i+1] + } + s.SetShares(shares) + fmt.Printf("%v\n", s.Addr()) + return s.Serve() +} diff --git a/feature/condregister/maybe_drive.go b/feature/condregister/maybe_drive.go new file mode 100644 index 0000000000000..cb447ff289a29 --- /dev/null +++ b/feature/condregister/maybe_drive.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package condregister + +import _ "tailscale.com/feature/drive" diff --git a/feature/drive/drive.go b/feature/drive/drive.go new file mode 100644 index 0000000000000..3660a2b959643 --- /dev/null +++ b/feature/drive/drive.go @@ -0,0 +1,5 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package drive registers the Taildrive (file server) feature. +package drive diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index d77481903fc09..7d6dc2427adae 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -1,38 +1,35 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive + package ipnlocal import ( + "errors" "fmt" + "io" + "net/http" + "net/netip" "os" "slices" "tailscale.com/drive" "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/httpm" ) -const ( - // DriveLocalPort is the port on which the Taildrive listens for location - // connections on quad 100. - DriveLocalPort = 8080 -) - -// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is -// enabled. This is currently based on checking for the drive:share node -// attribute. -func (b *LocalBackend) DriveSharingEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +func init() { + hookSetNetMapLockedDrive.Set(setNetMapLockedDrive) } -// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes -// is enabled. This is currently based on checking for the drive:access node -// attribute. -func (b *LocalBackend) DriveAccessEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +func setNetMapLockedDrive(b *LocalBackend, nm *netmap.NetworkMap) { + b.updateDrivePeersLocked(nm) + b.driveNotifyCurrentSharesLocked() } // DriveSetServerAddr tells Taildrive to use the given address for connecting @@ -363,3 +360,137 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem } return driveRemotes } + +// responseBodyWrapper wraps an io.ReadCloser and stores +// the number of bytesRead. +type responseBodyWrapper struct { + io.ReadCloser + logVerbose bool + bytesRx int64 + bytesTx int64 + log logger.Logf + method string + statusCode int + contentType string + fileExtension string + shareNodeKey string + selfNodeKey string + contentLength int64 +} + +// logAccess logs the taildrive: access: log line. If the logger is nil, +// the log will not be written. +func (rbw *responseBodyWrapper) logAccess(err string) { + if rbw.log == nil { + return + } + + // Some operating systems create and copy lots of 0 length hidden files for + // tracking various states. Omit these to keep logs from being too verbose. + if rbw.logVerbose || rbw.contentLength > 0 { + levelPrefix := "" + if rbw.logVerbose { + levelPrefix = "[v1] " + } + rbw.log( + "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", + levelPrefix, + rbw.method, + rbw.selfNodeKey, + rbw.shareNodeKey, + rbw.statusCode, + rbw.fileExtension, + rbw.contentType, + roundTraffic(rbw.contentLength), + roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) + } +} + +// Read implements the io.Reader interface. +func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { + n, err := rbw.ReadCloser.Read(b) + rbw.bytesRx += int64(n) + if err != nil && !errors.Is(err, io.EOF) { + rbw.logAccess(err.Error()) + } + + return n, err +} + +// Close implements the io.Close interface. +func (rbw *responseBodyWrapper) Close() error { + err := rbw.ReadCloser.Close() + var errStr string + if err != nil { + errStr = err.Error() + } + rbw.logAccess(errStr) + + return err +} + +// driveTransport is an http.RoundTripper that wraps +// b.Dialer().PeerAPITransport() with metrics tracking. +type driveTransport struct { + b *LocalBackend + tr *http.Transport +} + +func (b *LocalBackend) newDriveTransport() *driveTransport { + return &driveTransport{ + b: b, + tr: b.Dialer().PeerAPITransport(), + } +} + +func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + // Some WebDAV clients include origin and refer headers, which peerapi does + // not like. Remove them. + req.Header.Del("origin") + req.Header.Del("referer") + + bw := &requestBodyWrapper{} + if req.Body != nil { + bw.ReadCloser = req.Body + req.Body = bw + } + + defer func() { + contentType := "unknown" + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct + } + + dt.b.mu.Lock() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() + dt.b.mu.Unlock() + n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) + shareNodeKey := "unknown" + if ok { + shareNodeKey = string(n.Key().ShortString()) + } + + rbw := responseBodyWrapper{ + log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level + method: req.Method, + bytesTx: int64(bw.bytesRead), + selfNodeKey: selfNodeKey, + shareNodeKey: shareNodeKey, + contentType: contentType, + contentLength: resp.ContentLength, + fileExtension: parseDriveFileExtensionForLog(req.URL.Path), + statusCode: resp.StatusCode, + ReadCloser: resp.Body, + } + + if resp.StatusCode >= 400 { + // in case of error response, just log immediately + rbw.logAccess("") + } else { + resp.Body = &rbw + } + }() + + return dt.tr.RoundTrip(req) +} diff --git a/ipn/ipnlocal/drive_tomove.go b/ipn/ipnlocal/drive_tomove.go new file mode 100644 index 0000000000000..290fe097022fd --- /dev/null +++ b/ipn/ipnlocal/drive_tomove.go @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// This is the Taildrive stuff that should ideally be registered in init only when +// the ts_omit_drive is not set, but for transition reasons is currently (2025-09-08) +// always defined, as we work to pull it out of LocalBackend. + +package ipnlocal + +import "tailscale.com/tailcfg" + +const ( + // DriveLocalPort is the port on which the Taildrive listens for location + // connections on quad 100. + DriveLocalPort = 8080 +) + +// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is +// enabled. This is currently based on checking for the drive:share node +// attribute. +func (b *LocalBackend) DriveSharingEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +} + +// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes +// is enabled. This is currently based on checking for the drive:access node +// attribute. +func (b *LocalBackend) DriveAccessEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2d917ae545545..8a6d0e013454a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -52,6 +52,7 @@ import ( "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -100,7 +101,6 @@ import ( "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/goroutines" - "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/osuser" @@ -6326,143 +6326,12 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.metrics.approvedRoutes.Set(approved) } - b.updateDrivePeersLocked(nm) - b.driveNotifyCurrentSharesLocked() -} - -// responseBodyWrapper wraps an io.ReadCloser and stores -// the number of bytesRead. -type responseBodyWrapper struct { - io.ReadCloser - logVerbose bool - bytesRx int64 - bytesTx int64 - log logger.Logf - method string - statusCode int - contentType string - fileExtension string - shareNodeKey string - selfNodeKey string - contentLength int64 -} - -// logAccess logs the taildrive: access: log line. If the logger is nil, -// the log will not be written. -func (rbw *responseBodyWrapper) logAccess(err string) { - if rbw.log == nil { - return - } - - // Some operating systems create and copy lots of 0 length hidden files for - // tracking various states. Omit these to keep logs from being too verbose. - if rbw.logVerbose || rbw.contentLength > 0 { - levelPrefix := "" - if rbw.logVerbose { - levelPrefix = "[v1] " - } - rbw.log( - "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", - levelPrefix, - rbw.method, - rbw.selfNodeKey, - rbw.shareNodeKey, - rbw.statusCode, - rbw.fileExtension, - rbw.contentType, - roundTraffic(rbw.contentLength), - roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) - } -} - -// Read implements the io.Reader interface. -func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { - n, err := rbw.ReadCloser.Read(b) - rbw.bytesRx += int64(n) - if err != nil && !errors.Is(err, io.EOF) { - rbw.logAccess(err.Error()) - } - - return n, err -} - -// Close implements the io.Close interface. -func (rbw *responseBodyWrapper) Close() error { - err := rbw.ReadCloser.Close() - var errStr string - if err != nil { - errStr = err.Error() + if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { + f(b, nm) } - rbw.logAccess(errStr) - - return err -} - -// driveTransport is an http.RoundTripper that wraps -// b.Dialer().PeerAPITransport() with metrics tracking. -type driveTransport struct { - b *LocalBackend - tr *http.Transport } -func (b *LocalBackend) newDriveTransport() *driveTransport { - return &driveTransport{ - b: b, - tr: b.Dialer().PeerAPITransport(), - } -} - -func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - // Some WebDAV clients include origin and refer headers, which peerapi does - // not like. Remove them. - req.Header.Del("origin") - req.Header.Del("referer") - - bw := &requestBodyWrapper{} - if req.Body != nil { - bw.ReadCloser = req.Body - req.Body = bw - } - - defer func() { - contentType := "unknown" - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - - dt.b.mu.Lock() - selfNodeKey := dt.b.currentNode().Self().Key().ShortString() - dt.b.mu.Unlock() - n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) - shareNodeKey := "unknown" - if ok { - shareNodeKey = string(n.Key().ShortString()) - } - - rbw := responseBodyWrapper{ - log: dt.b.logf, - logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level - method: req.Method, - bytesTx: int64(bw.bytesRead), - selfNodeKey: selfNodeKey, - shareNodeKey: shareNodeKey, - contentType: contentType, - contentLength: resp.ContentLength, - fileExtension: parseDriveFileExtensionForLog(req.URL.Path), - statusCode: resp.StatusCode, - ReadCloser: resp.Body, - } - - if resp.StatusCode >= 400 { - // in case of error response, just log immediately - rbw.logAccess("") - } else { - resp.Body = &rbw - } - }() - - return dt.tr.RoundTrip(req) -} +var hookSetNetMapLockedDrive feature.Hook[func(*LocalBackend, *netmap.NetworkMap)] // roundTraffic rounds bytes. This is used to preserve user privacy within logs. func roundTraffic(bytes int64) float64 { diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 89554f0ff9eb1..23c349087caf8 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -16,7 +16,6 @@ import ( "net/http" "net/netip" "os" - "path/filepath" "runtime" "slices" "strconv" @@ -26,7 +25,6 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" - "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" @@ -39,14 +37,9 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/clientmetric" - "tailscale.com/util/httpm" "tailscale.com/wgengine/filter" ) -const ( - taildrivePrefix = "/v0/drive" -) - var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error // addH2C is non-nil on platforms where we want to add H2C @@ -369,10 +362,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.handleDNSQuery(w, r) return } - if strings.HasPrefix(r.URL.Path, taildrivePrefix) { - h.handleServeDrive(w, r) - return - } switch r.URL.Path { case "/v0/goroutines": h.handleServeGoroutines(w, r) @@ -1018,90 +1007,6 @@ func (rbw *requestBodyWrapper) Read(b []byte) (int, error) { return n, err } -func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request) { - h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) - if !h.ps.b.DriveSharingEnabled() { - h.logf("taildrive: not enabled") - http.Error(w, "taildrive not enabled", http.StatusNotFound) - return - } - - capsMap := h.PeerCaps() - driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] - if !ok { - h.logf("taildrive: not permitted") - http.Error(w, "taildrive not permitted", http.StatusForbidden) - return - } - - rawPerms := make([][]byte, 0, len(driveCaps)) - for _, cap := range driveCaps { - rawPerms = append(rawPerms, []byte(cap)) - } - - p, err := drive.ParsePermissions(rawPerms) - if err != nil { - h.logf("taildrive: error parsing permissions: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - fs, ok := h.ps.b.sys.DriveForRemote.GetOK() - if !ok { - h.logf("taildrive: not supported on platform") - http.Error(w, "taildrive not supported on platform", http.StatusNotFound) - return - } - wr := &httpResponseWrapper{ - ResponseWriter: w, - } - bw := &requestBodyWrapper{ - ReadCloser: r.Body, - } - r.Body = bw - - defer func() { - switch wr.statusCode { - case 304: - // 304s are particularly chatty so skip logging. - default: - log := h.logf - if r.Method != httpm.PUT && r.Method != httpm.GET { - log = h.logfv1 - } - contentType := "unknown" - if ct := wr.Header().Get("Content-Type"); ct != "" { - contentType = ct - } - - log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) - } - }() - - r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) - fs.ServeHTTPWithPerms(p, wr, r) -} - -// parseDriveFileExtensionForLog parses the file extension, if available. -// If a file extension is not present or parsable, the file extension is -// set to "unknown". If the file extension contains a double quote, it is -// replaced with "removed". -// All whitespace is removed from a parsed file extension. -// File extensions including the leading ., e.g. ".gif". -func parseDriveFileExtensionForLog(path string) string { - fileExt := "unknown" - if fe := filepath.Ext(path); fe != "" { - if strings.Contains(fe, "\"") { - // Do not log include file extensions with quotes within them. - return "removed" - } - // Remove white space from user defined inputs. - fileExt = strings.ReplaceAll(fe, " ", "") - } - - return fileExt -} - // peerAPIURL returns an HTTP URL for the peer's peerapi service, // without a trailing slash. // diff --git a/ipn/ipnlocal/peerapi_drive.go b/ipn/ipnlocal/peerapi_drive.go new file mode 100644 index 0000000000000..8dffacd9a2513 --- /dev/null +++ b/ipn/ipnlocal/peerapi_drive.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package ipnlocal + +import ( + "net/http" + "path/filepath" + "strings" + + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/util/httpm" +) + +const ( + taildrivePrefix = "/v0/drive" +) + +func init() { + peerAPIHandlerPrefixes[taildrivePrefix] = handleServeDrive +} + +func handleServeDrive(hi PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := hi.(*peerAPIHandler) + + h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) + if !h.ps.b.DriveSharingEnabled() { + h.logf("taildrive: not enabled") + http.Error(w, "taildrive not enabled", http.StatusNotFound) + return + } + + capsMap := h.PeerCaps() + driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] + if !ok { + h.logf("taildrive: not permitted") + http.Error(w, "taildrive not permitted", http.StatusForbidden) + return + } + + rawPerms := make([][]byte, 0, len(driveCaps)) + for _, cap := range driveCaps { + rawPerms = append(rawPerms, []byte(cap)) + } + + p, err := drive.ParsePermissions(rawPerms) + if err != nil { + h.logf("taildrive: error parsing permissions: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + fs, ok := h.ps.b.sys.DriveForRemote.GetOK() + if !ok { + h.logf("taildrive: not supported on platform") + http.Error(w, "taildrive not supported on platform", http.StatusNotFound) + return + } + wr := &httpResponseWrapper{ + ResponseWriter: w, + } + bw := &requestBodyWrapper{ + ReadCloser: r.Body, + } + r.Body = bw + + defer func() { + switch wr.statusCode { + case 304: + // 304s are particularly chatty so skip logging. + default: + log := h.logf + if r.Method != httpm.PUT && r.Method != httpm.GET { + log = h.logfv1 + } + contentType := "unknown" + if ct := wr.Header().Get("Content-Type"); ct != "" { + contentType = ct + } + + log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) + } + }() + + r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) + fs.ServeHTTPWithPerms(p, wr, r) +} + +// parseDriveFileExtensionForLog parses the file extension, if available. +// If a file extension is not present or parsable, the file extension is +// set to "unknown". If the file extension contains a double quote, it is +// replaced with "removed". +// All whitespace is removed from a parsed file extension. +// File extensions including the leading ., e.g. ".gif". +func parseDriveFileExtensionForLog(path string) string { + fileExt := "unknown" + if fe := filepath.Ext(path); fe != "" { + if strings.Contains(fe, "\"") { + // Do not log include file extensions with quotes within them. + return "removed" + } + // Remove white space from user defined inputs. + fileExt = strings.ReplaceAll(fe, " ", "") + } + + return fileExt +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2dc75c0d936b3..2a245be27f197 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -18,8 +18,6 @@ import ( "net/http" "net/netip" "net/url" - "os" - "path" "reflect" "runtime" "slices" @@ -31,7 +29,6 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" - "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -104,8 +101,6 @@ var handler = map[string]LocalAPIHandler{ "disconnect-control": (*Handler).disconnectControl, "dns-osconfig": (*Handler).serveDNSOSConfig, "dns-query": (*Handler).serveDNSQuery, - "drive/fileserver-address": (*Handler).serveDriveServerAddr, - "drive/shares": (*Handler).serveShares, "goroutines": (*Handler).serveGoroutines, "handle-push-message": (*Handler).serveHandlePushMessage, "id-token": (*Handler).serveIDToken, @@ -2661,124 +2656,6 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { }) } -// serveDriveServerAddr handles updates of the Taildrive file server address. -func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.PUT { - http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) - return - } - - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - h.b.DriveSetServerAddr(string(b)) - w.WriteHeader(http.StatusCreated) -} - -// serveShares handles the management of Taildrive shares. -// -// PUT - adds or updates an existing share -// DELETE - removes a share -// GET - gets a list of all shares, sorted by name -// POST - renames an existing share -func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { - if !h.b.DriveSharingEnabled() { - http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) - return - } - switch r.Method { - case httpm.PUT: - var share drive.Share - err := json.NewDecoder(r.Body).Decode(&share) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - share.Path = path.Clean(share.Path) - fi, err := os.Stat(share.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !fi.IsDir() { - http.Error(w, "not a directory", http.StatusBadRequest) - return - } - if drive.AllowShareAs() { - // share as the connected user - username, err := h.Actor.Username() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - share.As = username - } - err = h.b.DriveSetShare(&share) - if err != nil { - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusCreated) - case httpm.DELETE: - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRemoveShare(string(b)) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.POST: - var names [2]string - err := json.NewDecoder(r.Body).Decode(&names) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRenameShare(names[0], names[1]) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - if os.IsExist(err) { - http.Error(w, "share name already used", http.StatusBadRequest) - return - } - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.GET: - shares := h.b.DriveGetShares() - err := json.NewEncoder(w).Encode(shares) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) - } -} - // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.GET { diff --git a/ipn/localapi/localapi_drive.go b/ipn/localapi/localapi_drive.go new file mode 100644 index 0000000000000..eb765ec2eabba --- /dev/null +++ b/ipn/localapi/localapi_drive.go @@ -0,0 +1,141 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package localapi + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "os" + "path" + + "tailscale.com/drive" + "tailscale.com/util/httpm" +) + +func init() { + Register("drive/fileserver-address", (*Handler).serveDriveServerAddr) + Register("drive/shares", (*Handler).serveShares) +} + +// serveDriveServerAddr handles updates of the Taildrive file server address. +func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.PUT { + http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) + return + } + + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + h.b.DriveSetServerAddr(string(b)) + w.WriteHeader(http.StatusCreated) +} + +// serveShares handles the management of Taildrive shares. +// +// PUT - adds or updates an existing share +// DELETE - removes a share +// GET - gets a list of all shares, sorted by name +// POST - renames an existing share +func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { + if !h.b.DriveSharingEnabled() { + http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) + return + } + switch r.Method { + case httpm.PUT: + var share drive.Share + err := json.NewDecoder(r.Body).Decode(&share) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + share.Path = path.Clean(share.Path) + fi, err := os.Stat(share.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !fi.IsDir() { + http.Error(w, "not a directory", http.StatusBadRequest) + return + } + if drive.AllowShareAs() { + // share as the connected user + username, err := h.Actor.Username() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + share.As = username + } + err = h.b.DriveSetShare(&share) + if err != nil { + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusCreated) + case httpm.DELETE: + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRemoveShare(string(b)) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.POST: + var names [2]string + err := json.NewDecoder(r.Body).Decode(&names) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRenameShare(names[0], names[1]) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + if os.IsExist(err) { + http.Error(w, "share name already used", http.StatusBadRequest) + return + } + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.GET: + shares := h.b.DriveGetShares() + err := json.NewEncoder(w).Encode(shares) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + } +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index a87a3ec658ccb..b025e3a4304bb 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index a87a3ec658ccb..b025e3a4304bb 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index a87a3ec658ccb..b025e3a4304bb 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index a87a3ec658ccb..b025e3a4304bb 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -17,6 +17,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 54e1bcc04dbbc..32f95357dc039 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -25,6 +25,7 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" From cfb2ca724b5faf8007576014a8350893868f7629 Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 12 Sep 2025 12:04:39 +0100 Subject: [PATCH 0324/1093] tsnet: expose logtail's Logf method (#17057) This commit adds a new method to the tsnet.Server type named `Logger` that returns the underlying logtail instance's Logf method. This is intended to be used within the Kubernetes operator to wrap its existing logger in a way such that operator specific logs can also be sent to control for support & debugging purposes. Updates https://github.com/tailscale/corp/issues/32037 Signed-off-by: David Bond --- tsnet/tsnet.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 359fbc1c5246d..d25da0996d3a6 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -492,6 +492,16 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return ip4, ip6 } +// Logtailf returns a [logger.Logf] that outputs to Tailscale's logging service and will be only visible to Tailscale's +// support team. Logs written there cannot be retrieved by the user. This method always returns a non-nil value. +func (s *Server) Logtailf() logger.Logf { + if s.logtail == nil { + return logger.Discard + } + + return s.logtail.Logf +} + func (s *Server) getAuthKey() string { if v := s.AuthKey; v != "" { return v From 0e3d942e39030e886e19bd0083969a192b340026 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 12 Sep 2025 11:22:36 -0700 Subject: [PATCH 0325/1093] feature/featuretags: move list of omit-able features to a Go package Updates #12614 Change-Id: I4012c33095c6a7ccf80ad36dbab5cedbae5b3d47 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 4 +- cmd/featuretags/featuretags.go | 73 ++++++++++++++++++++++++++++++ cmd/omitsize/omitsize.go | 11 ++--- feature/featuretags/featuretags.go | 27 +++++++++++ 4 files changed, 107 insertions(+), 8 deletions(-) create mode 100644 cmd/featuretags/featuretags.go create mode 100644 feature/featuretags/featuretags.go diff --git a/build_dist.sh b/build_dist.sh index 9dc879b1ed466..45d471be0a6e0 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -18,7 +18,7 @@ fi eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion` -if [ "$1" = "shellvars" ]; then +if [ "$#" -ge 1 ] && [ "$1" = "shellvars" ]; then cat < Date: Fri, 12 Sep 2025 09:51:00 -0700 Subject: [PATCH 0326/1093] all: add ts_omit_tailnetlock as a start of making it build-time modular Updates #17115 Change-Id: I6b083c0db4c4d359e49eb129d626b7f128f0a9d2 Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 187 ------------ client/local/tailnetlock.go | 204 +++++++++++++ cmd/tailscale/cli/cli.go | 14 +- cmd/tailscale/cli/network-lock.go | 6 + cmd/tailscaled/deps_test.go | 13 + feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/local.go | 48 ---- ipn/ipnlocal/network-lock.go | 49 ++++ ipn/ipnlocal/network-lock_test.go | 2 + ipn/ipnlocal/tailnetlock_disabled.go | 31 ++ ipn/localapi/localapi.go | 394 ------------------------- ipn/localapi/tailnetlock.go | 413 +++++++++++++++++++++++++++ tka/aum.go | 2 + tka/builder.go | 2 + tka/deeplink.go | 2 + tka/disabled_stub.go | 149 ++++++++++ tka/sig.go | 2 + tka/state.go | 2 + tka/state_test.go | 2 + tka/sync.go | 2 + tka/tailchonk.go | 2 + tka/tka.go | 2 + types/netlogtype/netlogtype_test.go | 2 + 23 files changed, 897 insertions(+), 634 deletions(-) create mode 100644 client/local/tailnetlock.go create mode 100644 ipn/ipnlocal/tailnetlock_disabled.go create mode 100644 ipn/localapi/tailnetlock.go create mode 100644 tka/disabled_stub.go diff --git a/client/local/local.go b/client/local/local.go index 0257c7a260b7a..03ca10bb75a4b 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -38,10 +38,8 @@ import ( "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/types/dnstype" "tailscale.com/types/key" - "tailscale.com/types/tkatype" "tailscale.com/util/eventbus" ) @@ -1219,183 +1217,6 @@ func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.Ping return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } -// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. -func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockInit initializes the tailnet key authority. -// -// TODO(tom): Plumb through disablement secrets. -func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { - var b bytes.Buffer - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - - if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { - return nil, err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockWrapPreauthKey wraps a pre-auth key with information to -// enable unattended bringup in the locked tailnet. -func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { - encodedPrivate, err := tkaKey.MarshalText() - if err != nil { - return "", err - } - - var b bytes.Buffer - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { - return "", err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) - if err != nil { - return "", fmt.Errorf("error: %w", err) - } - return string(body), nil -} - -// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. -func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { - var b bytes.Buffer - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - - if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. -// rotationPublic, if specified, must be an ed25519 public key. -func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { - var b bytes.Buffer - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - - if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. -func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[[]tkatype.MarshaledSignature](body) -} - -// NetworkLockLog returns up to maxEntries number of changes to network-lock state. -func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { - v := url.Values{} - v.Set("limit", fmt.Sprint(maxEntries)) - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) - if err != nil { - return nil, fmt.Errorf("error %w: %s", err, body) - } - return decodeJSON[[]ipnstate.NetworkLockUpdate](body) -} - -// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. -func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { - // This endpoint expects an empty JSON stanza as the payload. - var b bytes.Buffer - if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained -// in url and returns information extracted from it. -func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { - vr := struct { - URL string - }{url} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending verify-deeplink: %w", err) - } - - return decodeJSON[*tka.DeeplinkValidationResult](body) -} - -// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. -func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { - vr := struct { - Keys []tkatype.KeyID - ForkFrom string - }{removeKeys, forkFrom.String()} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. -func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { - r := bytes.NewReader(aum.Serialize()) - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) - if err != nil { - return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. -func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { - r := bytes.NewReader(aum.Serialize()) - _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) - if err != nil { - return fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - return nil -} - // SetServeConfig sets or replaces the serving settings. // If config is nil, settings are cleared and serving is disabled. func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { @@ -1421,14 +1242,6 @@ func (lc *Client) DisconnectControl(ctx context.Context) error { return nil } -// NetworkLockDisable shuts down network-lock across the tailnet. -func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - // GetServeConfig return the current serve config. // // If the serve config is empty, it returns (nil, nil). diff --git a/client/local/tailnetlock.go b/client/local/tailnetlock.go new file mode 100644 index 0000000000000..9d37d2f3553d5 --- /dev/null +++ b/client/local/tailnetlock.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package local + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. +func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockInit initializes the tailnet key authority. +// +// TODO(tom): Plumb through disablement secrets. +func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { + var b bytes.Buffer + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + + if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { + return nil, err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockWrapPreauthKey wraps a pre-auth key with information to +// enable unattended bringup in the locked tailnet. +func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { + encodedPrivate, err := tkaKey.MarshalText() + if err != nil { + return "", err + } + + var b bytes.Buffer + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { + return "", err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) + if err != nil { + return "", fmt.Errorf("error: %w", err) + } + return string(body), nil +} + +// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. +func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { + var b bytes.Buffer + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + + if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. +// rotationPublic, if specified, must be an ed25519 public key. +func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { + var b bytes.Buffer + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + + if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. +func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[[]tkatype.MarshaledSignature](body) +} + +// NetworkLockLog returns up to maxEntries number of changes to network-lock state. +func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { + v := url.Values{} + v.Set("limit", fmt.Sprint(maxEntries)) + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[[]ipnstate.NetworkLockUpdate](body) +} + +// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. +func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { + // This endpoint expects an empty JSON stanza as the payload. + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained +// in url and returns information extracted from it. +func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { + vr := struct { + URL string + }{url} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending verify-deeplink: %w", err) + } + + return decodeJSON[*tka.DeeplinkValidationResult](body) +} + +// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. +func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { + vr := struct { + Keys []tkatype.KeyID + ForkFrom string + }{removeKeys, forkFrom.String()} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. +func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { + r := bytes.NewReader(aum.Serialize()) + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) + if err != nil { + return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. +func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { + r := bytes.NewReader(aum.Serialize()) + _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) + if err != nil { + return fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + return nil +} + +// NetworkLockDisable shuts down network-lock across the tailnet. +func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 39fdce60d1d3b..ef0dc98209237 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -207,10 +207,14 @@ func noDupFlagify(c *ffcli.Command) { } } -var fileCmd func() *ffcli.Command -var sysPolicyCmd func() *ffcli.Command -var maybeWebCmd func() *ffcli.Command -var maybeDriveCmd func() *ffcli.Command +var ( + fileCmd, + sysPolicyCmd, + maybeWebCmd, + maybeDriveCmd, + maybeNetlockCmd, + _ func() *ffcli.Command +) func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -257,7 +261,7 @@ change in the future. nilOrCall(fileCmd), bugReportCmd, certCmd, - netlockCmd, + nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), updateCmd, diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index d19909576c090..ec3b01ad61291 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package cli import ( @@ -27,6 +29,10 @@ import ( "tailscale.com/util/prompt" ) +func init() { + maybeNetlockCmd = func() *ffcli.Command { return netlockCmd } +} + var netlockCmd = &ffcli.Command{ Name: "lock", ShortUsage: "tailscale lock [arguments...]", diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 5c71a62fd7457..a334eb9b76d4b 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -77,3 +77,16 @@ func TestOmitDrive(t *testing.T) { }, }.Check(t) } + +func TestOmitTailnetLock(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_tailnetlock,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "cbor") { + t.Errorf("unexpected dep with ts_omit_tailnetlock: %q", dep) + } + }, + }.Check(t) +} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6f87dab7667e2..00ad0b4c24adf 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -19,6 +19,7 @@ var Features = map[string]string{ "syspolicy": "System policy configuration (MDM) support", "systray": "Linux system tray", "taildrop": "Taildrop (file sending) support", + "tailnetlock": "Tailnet Lock support", "tap": "Experimental Layer 2 (ethernet) support", "tka": "Tailnet Lock (TKA) support", "tpm": "TPM support", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8a6d0e013454a..6108aa83061bc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -82,7 +82,6 @@ import ( "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tsd" "tailscale.com/tstime" "tailscale.com/types/appctype" @@ -7179,53 +7178,6 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { return b.resetForProfileChangeLockedOnEntry(unlock) } -func (b *LocalBackend) initTKALocked() error { - cp := b.pm.CurrentProfile() - if cp.ID() == "" { - b.tka = nil - return nil - } - if b.tka != nil { - if b.tka.profile == cp.ID() { - // Already initialized. - return nil - } - // As we're switching profiles, we need to reset the TKA to nil. - b.tka = nil - } - root := b.TailscaleVarRoot() - if root == "" { - b.tka = nil - b.logf("network-lock unavailable; no state directory") - return nil - } - - chonkDir := b.chonkPathLocked() - if _, err := os.Stat(chonkDir); err == nil { - // The directory exists, which means network-lock has been initialized. - storage, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("opening tailchonk: %v", err) - } - authority, err := tka.Open(storage) - if err != nil { - return fmt.Errorf("initializing tka: %v", err) - } - if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { - b.logf("tka compaction failed: %v", err) - } - - b.tka = &tkaState{ - profile: cp.ID(), - authority: authority, - storage: storage, - } - b.logf("tka initialized at head %x", authority.Head()) - } - - return nil -} - // resetDialPlan resets the dialPlan for this LocalBackend. It will log if // anything is reset. // diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 10f0cc8278109..4990824453c47 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( @@ -56,6 +58,53 @@ type tkaState struct { filtered []ipnstate.TKAPeer } +func (b *LocalBackend) initTKALocked() error { + cp := b.pm.CurrentProfile() + if cp.ID() == "" { + b.tka = nil + return nil + } + if b.tka != nil { + if b.tka.profile == cp.ID() { + // Already initialized. + return nil + } + // As we're switching profiles, we need to reset the TKA to nil. + b.tka = nil + } + root := b.TailscaleVarRoot() + if root == "" { + b.tka = nil + b.logf("network-lock unavailable; no state directory") + return nil + } + + chonkDir := b.chonkPathLocked() + if _, err := os.Stat(chonkDir); err == nil { + // The directory exists, which means network-lock has been initialized. + storage, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("opening tailchonk: %v", err) + } + authority, err := tka.Open(storage) + if err != nil { + return fmt.Errorf("initializing tka: %v", err) + } + if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { + b.logf("tka compaction failed: %v", err) + } + + b.tka = &tkaState{ + profile: cp.ID(), + authority: authority, + storage: storage, + } + b.logf("tka initialized at head %x", authority.Head()) + } + + return nil +} + // tkaFilterNetmapLocked checks the signatures on each node key, dropping // nodes from the netmap whose signature does not verify. // diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 443539aecc2cb..842b75c437799 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( diff --git a/ipn/ipnlocal/tailnetlock_disabled.go b/ipn/ipnlocal/tailnetlock_disabled.go new file mode 100644 index 0000000000000..85cf4bd3f4ea5 --- /dev/null +++ b/ipn/ipnlocal/tailnetlock_disabled.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/netmap" +) + +type tkaState struct { + authority *tka.Authority +} + +func (b *LocalBackend) initTKALocked() error { + return nil +} + +func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsView) error { + return nil +} + +func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {} + +func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { + return &ipnstate.NetworkLockStatus{Enabled: false} +} diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2a245be27f197..ac5b0ee7db06e 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -41,14 +41,12 @@ import ( "tailscale.com/net/netutil" "tailscale.com/net/portmapper" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/ptr" - "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/httpm" @@ -124,19 +122,6 @@ var handler = map[string]LocalAPIHandler{ "start": (*Handler).serveStart, "status": (*Handler).serveStatus, "suggest-exit-node": (*Handler).serveSuggestExitNode, - "tka/affected-sigs": (*Handler).serveTKAAffectedSigs, - "tka/cosign-recovery-aum": (*Handler).serveTKACosignRecoveryAUM, - "tka/disable": (*Handler).serveTKADisable, - "tka/force-local-disable": (*Handler).serveTKALocalDisable, - "tka/generate-recovery-aum": (*Handler).serveTKAGenerateRecoveryAUM, - "tka/init": (*Handler).serveTKAInit, - "tka/log": (*Handler).serveTKALog, - "tka/modify": (*Handler).serveTKAModify, - "tka/sign": (*Handler).serveTKASign, - "tka/status": (*Handler).serveTKAStatus, - "tka/submit-recovery-aum": (*Handler).serveTKASubmitRecoveryAUM, - "tka/verify-deeplink": (*Handler).serveTKAVerifySigningDeeplink, - "tka/wrap-preauth-key": (*Handler).serveTKAWrapPreauthKey, "update/check": (*Handler).serveUpdateCheck, "update/install": (*Handler).serveUpdateInstall, "update/progress": (*Handler).serveUpdateProgress, @@ -1892,25 +1877,6 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques json.NewEncoder(w).Encode(struct{}{}) } -func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "lock status access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) @@ -1958,366 +1924,6 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ e.Encode(prefs) } -func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock sign access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - var req signRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { - http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock init access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - var req initRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if !h.b.NetworkLockAllowed() { - http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) - return - } - - if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { - http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - var req modifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { - http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(204) -} - -func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - var req wrapRequest - if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - var priv key.NLPrivate - if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(wrappedKey)) -} - -func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - URL string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - res := h.b.NetworkLockVerifySigningDeeplink(req.URL) - j, err := json.MarshalIndent(res, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - secret, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading secret", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockDisable(secret); err != nil { - http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - // Require a JSON stanza for the body as an additional CSRF protection. - var req struct{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockForceLocalDisable(); err != nil { - http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - limit := 50 - if limitStr := r.FormValue("limit"); limitStr != "" { - l, err := strconv.Atoi(limitStr) - if err != nil { - http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) - return - } - limit = int(l) - } - - updates, err := h.b.NetworkLockLog(limit) - if err != nil { - http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(updates, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) - if err != nil { - http.Error(w, "reading body", http.StatusBadRequest) - return - } - - sigs, err := h.b.NetworkLockAffectedSigs(keyID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(sigs, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - Keys []tkatype.KeyID - ForkFrom string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - var forkFrom tka.AUMHash - if req.ForkFrom != "" { - if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { - http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) - return - } - } - - res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) -} - // serveProfiles serves profile switching-related endpoints. Supported methods // and paths are: // - GET /profiles/: list all profiles (JSON-encoded array of ipn.LoginProfiles) diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go new file mode 100644 index 0000000000000..7971509384cf0 --- /dev/null +++ b/ipn/localapi/tailnetlock.go @@ -0,0 +1,413 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package localapi + +import ( + "encoding/json" + "io" + "net/http" + "strconv" + + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" + "tailscale.com/util/httpm" +) + +func init() { + handler["tka/affected-sigs"] = (*Handler).serveTKAAffectedSigs + handler["tka/cosign-recovery-aum"] = (*Handler).serveTKACosignRecoveryAUM + handler["tka/disable"] = (*Handler).serveTKADisable + handler["tka/force-local-disable"] = (*Handler).serveTKALocalDisable + handler["tka/generate-recovery-aum"] = (*Handler).serveTKAGenerateRecoveryAUM + handler["tka/init"] = (*Handler).serveTKAInit + handler["tka/log"] = (*Handler).serveTKALog + handler["tka/modify"] = (*Handler).serveTKAModify + handler["tka/sign"] = (*Handler).serveTKASign + handler["tka/status"] = (*Handler).serveTKAStatus + handler["tka/submit-recovery-aum"] = (*Handler).serveTKASubmitRecoveryAUM + handler["tka/verify-deeplink"] = (*Handler).serveTKAVerifySigningDeeplink + handler["tka/wrap-preauth-key"] = (*Handler).serveTKAWrapPreauthKey +} + +func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "lock status access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock sign access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + var req signRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { + http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock init access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + var req initRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if !h.b.NetworkLockAllowed() { + http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) + return + } + + if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { + http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + var req modifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { + http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(204) +} + +func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + var req wrapRequest + if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + var priv key.NLPrivate + if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(wrappedKey)) +} + +func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + URL string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + res := h.b.NetworkLockVerifySigningDeeplink(req.URL) + j, err := json.MarshalIndent(res, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + secret, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading secret", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockDisable(secret); err != nil { + http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + // Require a JSON stanza for the body as an additional CSRF protection. + var req struct{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockForceLocalDisable(); err != nil { + http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + limit := 50 + if limitStr := r.FormValue("limit"); limitStr != "" { + l, err := strconv.Atoi(limitStr) + if err != nil { + http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) + return + } + limit = int(l) + } + + updates, err := h.b.NetworkLockLog(limit) + if err != nil { + http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(updates, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) + if err != nil { + http.Error(w, "reading body", http.StatusBadRequest) + return + } + + sigs, err := h.b.NetworkLockAffectedSigs(keyID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(sigs, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + Keys []tkatype.KeyID + ForkFrom string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + var forkFrom tka.AUMHash + if req.ForkFrom != "" { + if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { + http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) + return + } + } + + res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) +} diff --git a/tka/aum.go b/tka/aum.go index 07a34b4f62458..08d70897ee70f 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/builder.go b/tka/builder.go index ec38bb6fa15f7..642f39d77422d 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/deeplink.go b/tka/deeplink.go index 5cf24fc5c2c82..5570a19d7371b 100644 --- a/tka/deeplink.go +++ b/tka/deeplink.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go new file mode 100644 index 0000000000000..15bf12c333fc8 --- /dev/null +++ b/tka/disabled_stub.go @@ -0,0 +1,149 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/tkatype" +) + +type Authority struct { + head AUM + oldestAncestor AUM + state State +} + +func (*Authority) Head() AUMHash { return AUMHash{} } + +func (AUMHash) MarshalText() ([]byte, error) { return nil, errNoTailnetLock } + +type State struct{} + +// AUMKind describes valid AUM types. +type AUMKind uint8 + +type AUMHash [32]byte + +type AUM struct { + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash []byte `cbor:"2,keyasint"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key *Key `cbor:"3,keyasint,omitempty"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID tkatype.KeyID `cbor:"4,keyasint,omitempty"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State *State `cbor:"5,keyasint,omitempty"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes *uint `cbor:"6,keyasint,omitempty"` + Meta map[string]string `cbor:"7,keyasint,omitempty"` + + // Signatures lists the signatures over this AUM. + // CBOR key 23 is the last key which can be encoded as a single byte. + Signatures []tkatype.Signature `cbor:"23,keyasint,omitempty"` +} + +type Chonk interface { + // AUM returns the AUM with the specified digest. + // + // If the AUM does not exist, then os.ErrNotExist is returned. + AUM(hash AUMHash) (AUM, error) + + // ChildAUMs returns all AUMs with a specified previous + // AUM hash. + ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) + + // CommitVerifiedAUMs durably stores the provided AUMs. + // Callers MUST ONLY provide AUMs which are verified (specifically, + // a call to aumVerify() must return a nil error). + // as the implementation assumes that only verified AUMs are stored. + CommitVerifiedAUMs(updates []AUM) error + + // Heads returns AUMs for which there are no children. In other + // words, the latest AUM in all possible chains (the 'leaves'). + Heads() ([]AUM, error) + + // SetLastActiveAncestor is called to record the oldest-known AUM + // that contributed to the current state. This value is used as + // a hint on next startup to determine which chain to pick when computing + // the current state, if there are multiple distinct chains. + SetLastActiveAncestor(hash AUMHash) error + + // LastActiveAncestor returns the oldest-known AUM that was (in a + // previous run) an ancestor of the current state. This is used + // as a hint to pick the correct chain in the event that the Chonk stores + // multiple distinct chains. + LastActiveAncestor() (*AUMHash, error) +} + +// SigKind describes valid NodeKeySignature types. +type SigKind uint8 + +type NodeKeySignature struct { + // SigKind identifies the variety of signature. + SigKind SigKind `cbor:"1,keyasint"` + // Pubkey identifies the key.NodePublic which is being authorized. + // SigCredential signatures do not use this field. + Pubkey []byte `cbor:"2,keyasint,omitempty"` + + // KeyID identifies which key in the tailnet key authority should + // be used to verify this signature. Only set for SigDirect and + // SigCredential signature kinds. + KeyID []byte `cbor:"3,keyasint,omitempty"` + + // Signature is the packed (R, S) ed25519 signature over all other + // fields of the structure. + Signature []byte `cbor:"4,keyasint,omitempty"` + + // Nested describes a NodeKeySignature which authorizes the node-key + // used as Pubkey. Only used for SigRotation signatures. + Nested *NodeKeySignature `cbor:"5,keyasint,omitempty"` + + // WrappingPubkey specifies the ed25519 public key which must be used + // to sign a Signature which embeds this one. + // + // For SigRotation signatures multiple levels deep, intermediate + // signatures may omit this value, in which case the parent WrappingPubkey + // is used. + // + // SigCredential signatures use this field to specify the public key + // they are certifying, following the usual semanticsfor WrappingPubkey. + WrappingPubkey []byte `cbor:"6,keyasint,omitempty"` +} + +type DeeplinkValidationResult struct { +} + +func (h *AUMHash) UnmarshalText(text []byte) error { + return errNoTailnetLock +} + +var errNoTailnetLock = errors.New("tailnet lock is not enabled") + +func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) { + return wrappedAuthKey, false, nil, nil +} + +func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func SignByCredential(privKey []byte, wrapped *NodeKeySignature, nodeKey key.NodePublic) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func (s NodeKeySignature) String() string { return "" } diff --git a/tka/sig.go b/tka/sig.go index c82f9715c33fb..7b1838d409130 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/state.go b/tka/state.go index 0a459bd9a1b24..0a30c56a02fa8 100644 --- a/tka/state.go +++ b/tka/state.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/state_test.go b/tka/state_test.go index 060bd9350dd06..32b6563145ee7 100644 --- a/tka/state_test.go +++ b/tka/state_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/sync.go b/tka/sync.go index 6131f54d0dfca..6c2b7cbb8c81a 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 32d2215dec9a1..6c441669a6853 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/tka.go b/tka/tka.go index ade621bc689e3..3929ff22a607e 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + // Package tka (WIP) implements the Tailnet Key Authority. package tka diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 7f29090c5f757..403cb950883c7 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package netlogtype import ( From 0cc1b2ff76560ee4675909272fa37ba6b397744c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 12 Sep 2025 08:10:48 -0700 Subject: [PATCH 0327/1093] cmd/derper: add start of ACE support Updates tailscale/corp#32168 Updates tailscale/corp#32226 Change-Id: Ia46abcaa09dcfd53bf8d4699909537bacf84d57a Signed-off-by: Brad Fitzpatrick --- cmd/derper/ace.go | 50 +++++++++++++++++++++++++++++++++++++++++ cmd/derper/depaware.txt | 1 + cmd/derper/derper.go | 8 +++++++ 3 files changed, 59 insertions(+) create mode 100644 cmd/derper/ace.go diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go new file mode 100644 index 0000000000000..301b029ccf1cf --- /dev/null +++ b/cmd/derper/ace.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// TODO: docs about all this + +package main + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" + + "tailscale.com/derp" + "tailscale.com/net/connectproxy" +) + +// serveConnect handles a CONNECT request for ACE support. +func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) { + if !*flagACEEnabled { + http.Error(w, "CONNECT not enabled", http.StatusForbidden) + return + } + if r.TLS == nil { + // This should already be enforced by the caller of serveConnect, but + // double check. + http.Error(w, "CONNECT requires TLS", http.StatusForbidden) + return + } + + ch := &connectproxy.Handler{ + Check: func(hostPort string) error { + host, port, err := net.SplitHostPort(hostPort) + if err != nil { + return err + } + if port != "443" { + return fmt.Errorf("only port 443 is allowed") + } + // TODO(bradfitz): make policy configurable from flags and/or come + // from local tailscaled nodeAttrs + if !strings.HasSuffix(host, ".tailscale.com") || strings.Contains(host, "derp") { + return errors.New("bad host") + } + return nil + }, + } + ch.ServeHTTP(w, r) +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 8adb2d3382b13..61e42ede14a41 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -105,6 +105,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + tailscale.com/net/connectproxy from tailscale.com/cmd/derper tailscale.com/net/dnscache from tailscale.com/derp/derphttp tailscale.com/net/ktimeout from tailscale.com/cmd/derper tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 7ea404beb50af..b25bf22de72d7 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -91,6 +91,9 @@ var ( tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + + // ACE + flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]") ) var ( @@ -373,6 +376,11 @@ func main() { tlsRequestVersion.Add(label, 1) tlsActiveVersion.Add(label, 1) defer tlsActiveVersion.Add(label, -1) + + if r.Method == "CONNECT" { + serveConnect(s, w, r) + return + } } mux.ServeHTTP(w, r) From 7d2101f3520f16b86f2ed5e15f23c44d720534e6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 12 Sep 2025 17:09:23 -0700 Subject: [PATCH 0328/1093] cmd/omitsize: add flag to disable the removal table And remove a bogus omit feature from feature/featuretags. Updates #12614 Change-Id: I0a08183fb75c73ae75b6fd4216d134e352dcf5a0 Signed-off-by: Brad Fitzpatrick --- cmd/featuretags/featuretags.go | 22 ++++++++--------- cmd/omitsize/omitsize.go | 39 ++++++++++++++++++------------ feature/featuretags/featuretags.go | 33 +++++++++++++++++++++++-- 3 files changed, 65 insertions(+), 29 deletions(-) diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index c97d66c471328..5213fda4c1e47 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -35,16 +35,14 @@ func main() { return } - var keep = map[string]bool{} + var keep = map[featuretags.FeatureTag]bool{} for t := range strings.SplitSeq(*add, ",") { if t != "" { - keep[t] = true + keep[featuretags.FeatureTag(t)] = true } } var tags []string - if keep["cli"] { - // The "cli" --add value is special in that it's a build tag - // that adds something, rather than removes something. + if keep[featuretags.CLI] { tags = append(tags, "ts_include_cli") } if *min { @@ -52,22 +50,24 @@ func main() { if f == "" { continue } - if !keep[f] { - tags = append(tags, "ts_omit_"+f) + if !keep[f] && f.IsOmittable() { + tags = append(tags, f.OmitTag()) } } } - for f := range strings.SplitSeq(*remove, ",") { - if f == "" { + for v := range strings.SplitSeq(*remove, ",") { + if v == "" { continue } + f := featuretags.FeatureTag(v) if _, ok := features[f]; !ok { log.Fatalf("unknown feature %q in --remove", f) } - tags = append(tags, "ts_omit_"+f) + tags = append(tags, f.OmitTag()) } + slices.Sort(tags) + tags = slices.Compact(tags) if len(tags) != 0 { fmt.Println(strings.Join(tags, ",")) } - } diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index bce0729736490..841f3ab9e8c30 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -23,6 +23,8 @@ import ( var ( cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") features = flag.String("features", "", "comma-separated list of features to consider, with or without the ts_omit_ prefix") + + showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set") ) func main() { @@ -31,7 +33,9 @@ func main() { var all []string if *features == "" { for k := range featuretags.Features { - all = append(all, "ts_omit_"+k) + if k.IsOmittable() { + all = append(all, k.OmitTag()) + } } } else { for v := range strings.SplitSeq(*features, ",") { @@ -49,27 +53,30 @@ func main() { baseC := measure("tailscale") baseBoth := measure("tailscaled", "ts_include_cli") - fmt.Printf("(a) starting with everything and removing a feature...\n\n") - - fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") - fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) - minD := measure("tailscaled", all...) minC := measure("tailscale", all...) minBoth := measure("tailscaled", append(slices.Clone(all), "ts_include_cli")...) - fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) - for _, t := range all { - sizeD := measure("tailscaled", t) - sizeC := measure("tailscale", t) - sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) - saveD := max(baseD-sizeD, 0) - saveC := max(baseC-sizeC, 0) - saveBoth := max(baseBoth-sizeBoth, 0) - fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + if *showRemovals { + fmt.Printf("Starting with everything and removing a feature...\n\n") + + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) + + fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) + + for _, t := range all { + sizeD := measure("tailscaled", t) + sizeC := measure("tailscale", t) + sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) + saveD := max(baseD-sizeD, 0) + saveC := max(baseC-sizeC, 0) + saveBoth := max(baseBoth-sizeBoth, 0) + fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + } } - fmt.Printf("\n(b) or, starting at minimal and adding one feature back...\n") + fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n") fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) for _, t := range all { diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 00ad0b4c24adf..87bc22fc64007 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -4,7 +4,37 @@ // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags -var Features = map[string]string{ +// CLI is a special feature in the [Features] map that works opposite +// from the others: it is opt-in, rather than opt-out, having a different +// build tag format. +const CLI FeatureTag = "cli" + +// FeatureTag names a Tailscale feature that can be selectively added or removed +// via build tags. +type FeatureTag string + +// IsOmittable reports whether this feature tag is one that can be +// omitted via a ts_omit_ build tag. +func (ft FeatureTag) IsOmittable() bool { + switch ft { + case CLI: + return false + } + return true +} + +// OmitTag returns the ts_omit_ build tag for this feature tag. +// It panics if the feature tag is not omitable. +func (ft FeatureTag) OmitTag() string { + if !ft.IsOmittable() { + panic("not omitable: " + string(ft)) + } + return "ts_omit_" + string(ft) +} + +// Features are the known Tailscale features that can be selectively included or +// excluded via build tags, and a description of each. +var Features = map[FeatureTag]string{ "aws": "AWS integration", "bird": "Bird BGP integration", "capture": "Packet capture", @@ -21,7 +51,6 @@ var Features = map[string]string{ "taildrop": "Taildrop (file sending) support", "tailnetlock": "Tailnet Lock support", "tap": "Experimental Layer 2 (ethernet) support", - "tka": "Tailnet Lock (TKA) support", "tpm": "TPM support", "wakeonlan": "Wake-on-LAN support", "webclient": "Web client support", From 782c16c5138fb0f83ea80ed1793e3be93791d280 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 15 Sep 2025 12:37:28 +0100 Subject: [PATCH 0329/1093] k8s-operator: reset service status before append (#17120) This commit fixes an issue within the service reconciler where we end up in a constant reconciliation loop. When reconciling, the loadbalancer status is appended to but not reset between each reconciliation, leading to an ever growing slice of duplicate statuses. Fixes https://github.com/tailscale/tailscale/issues/17105 Fixes https://github.com/tailscale/tailscale/issues/17107 Signed-off-by: David Bond --- cmd/k8s-operator/operator_test.go | 4 ++++ cmd/k8s-operator/svc.go | 10 ++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 50f8738cefc39..5af237342e8cd 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -173,6 +173,10 @@ func TestLoadBalancerClass(t *testing.T) { }, }, } + + // Perform an additional reconciliation loop here to ensure resources don't change through side effects. Mainly + // to prevent infinite reconciliation + expectReconciled(t, sr, "default", "test") expectEqual(t, fc, want) // Turn the service back into a ClusterIP service, which should make the diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 51ad1aea3c808..eec1924e7902c 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -348,9 +348,10 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga dev := devices[0] logger.Debugf("setting Service LoadBalancer status to %q, %s", dev.hostname, strings.Join(dev.ips, ", ")) - svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{ - Hostname: dev.hostname, - }) + + ingress := []corev1.LoadBalancerIngress{ + {Hostname: dev.hostname}, + } clusterIPAddr, err := netip.ParseAddr(svc.Spec.ClusterIP) if err != nil { @@ -365,10 +366,11 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga continue } if addr.Is4() == clusterIPAddr.Is4() { // only add addresses of the same family - svc.Status.LoadBalancer.Ingress = append(svc.Status.LoadBalancer.Ingress, corev1.LoadBalancerIngress{IP: ip}) + ingress = append(ingress, corev1.LoadBalancerIngress{IP: ip}) } } + svc.Status.LoadBalancer.Ingress = ingress tsoperator.SetServiceCondition(svc, tsapi.ProxyReady, metav1.ConditionTrue, reasonProxyCreated, reasonProxyCreated, a.clock, logger) return nil } From b816fd71176132ee6e2912f8bdb2e55e9eb181ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 15 Sep 2025 10:36:17 -0400 Subject: [PATCH 0330/1093] control/controlclient: introduce eventbus messages instead of callbacks (#16956) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a small introduction of the eventbus into controlclient that communicates with mainly ipnlocal. While ipnlocal is a complicated part of the codebase, the subscribers here are from the perspective of ipnlocal already called async. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/auto.go | 6 +- control/controlclient/client.go | 3 + control/controlclient/controlclient_test.go | 7 + control/controlclient/direct.go | 174 +++++++++++--------- control/controlclient/direct_test.go | 5 + ipn/ipnlocal/expiry.go | 43 ++++- ipn/ipnlocal/expiry_test.go | 10 +- ipn/ipnlocal/local.go | 94 +++++++---- ipn/ipnlocal/local_test.go | 13 +- ipn/ipnlocal/network-lock_test.go | 3 + ipn/ipnlocal/serve_test.go | 29 ++-- ipn/ipnlocal/state_test.go | 31 ++-- ipn/localapi/localapi_test.go | 4 +- tsd/tsd.go | 12 +- util/eventbus/eventbustest/eventbustest.go | 2 +- 15 files changed, 293 insertions(+), 143 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index e6335e54d251b..7bca6c8d8b316 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -205,7 +205,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { } }) return c, nil - } // SetPaused controls whether HTTP activity should be paused. @@ -424,6 +423,11 @@ func (c *Auto) unpausedChanLocked() <-chan bool { return unpaused } +// ClientID returns the ClientID of the direct controlClient +func (c *Auto) ClientID() int64 { + return c.direct.ClientID() +} + // mapRoutineState is the state of Auto.mapRoutine while it's running. type mapRoutineState struct { c *Auto diff --git a/control/controlclient/client.go b/control/controlclient/client.go index 8df64f9e8139a..d0aa129ae95b4 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -81,6 +81,9 @@ type Client interface { // in a separate http request. It has nothing to do with the rest of // the state machine. UpdateEndpoints(endpoints []tailcfg.Endpoint) + // ClientID returns the ClientID of a client. This ID is meant to + // distinguish one client from another. + ClientID() int64 } // UserVisibleError is an error that should be shown to users. diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 792c26955e5d1..2efc27b5e8a19 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" ) func fieldsOf(t reflect.Type) (fields []string) { @@ -218,6 +219,8 @@ func TestDirectProxyManual(t *testing.T) { t.Skip("skipping without --live-network-test") } + bus := eventbustest.NewBus(t) + dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) @@ -239,6 +242,7 @@ func TestDirectProxyManual(t *testing.T) { }, Dialer: dialer, ControlKnobs: &controlknobs.Knobs{}, + Bus: bus, } d, err := NewDirect(opts) if err != nil { @@ -263,6 +267,8 @@ func TestHTTPSWithProxy(t *testing.T) { testHTTPS(t, true) } func testHTTPS(t *testing.T, withProxy bool) { bakedroots.ResetForTest(t, tlstest.TestRootCA()) + bus := eventbustest.NewBus(t) + controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlane.ServerTLSConfig()) if err != nil { t.Fatal(err) @@ -327,6 +333,7 @@ func testHTTPS(t *testing.T, withProxy bool) { t.Logf("PopBrowserURL: %q", url) }, Dialer: dialer, + Bus: bus, } d, err := NewDirect(opts) if err != nil { diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 47283a673c935..b9e26cc9823cc 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -14,6 +14,7 @@ import ( "fmt" "io" "log" + "math/rand/v2" "net" "net/http" "net/netip" @@ -52,6 +53,7 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/multierr" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" @@ -63,30 +65,31 @@ import ( // Direct is the client that connects to a tailcontrol server for a node. type Direct struct { - httpc *http.Client // HTTP client used to talk to tailcontrol - interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - controlKnobs *controlknobs.Knobs // always non-nil - serverURL string // URL of the tailcontrol server - clock tstime.Clock - logf logger.Logf - netMon *netmon.Monitor // non-nil - health *health.Tracker - discoPubKey key.DiscoPublic - getMachinePrivKey func() (key.MachinePrivate, error) - debugFlags []string - skipIPForwardingCheck bool - pinger Pinger - polc policyclient.Client // always non-nil - popBrowser func(url string) // or nil - c2nHandler http.Handler // or nil - onClientVersion func(*tailcfg.ClientVersion) // or nil - onControlTime func(time.Time) // or nil - onTailnetDefaultAutoUpdate func(bool) // or nil - panicOnUse bool // if true, panic if client is used (for testing) - closedCtx context.Context // alive until Direct.Close is called - closeCtx context.CancelFunc // cancels closedCtx + httpc *http.Client // HTTP client used to talk to tailcontrol + interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial + dialer *tsdial.Dialer + dnsCache *dnscache.Resolver + controlKnobs *controlknobs.Knobs // always non-nil + serverURL string // URL of the tailcontrol server + clock tstime.Clock + logf logger.Logf + netMon *netmon.Monitor // non-nil + health *health.Tracker + discoPubKey key.DiscoPublic + busClient *eventbus.Client + clientVersionPub *eventbus.Publisher[tailcfg.ClientVersion] + autoUpdatePub *eventbus.Publisher[AutoUpdate] + controlTimePub *eventbus.Publisher[ControlTime] + getMachinePrivKey func() (key.MachinePrivate, error) + debugFlags []string + skipIPForwardingCheck bool + pinger Pinger + popBrowser func(url string) // or nil + polc policyclient.Client // always non-nil + c2nHandler http.Handler // or nil + panicOnUse bool // if true, panic if client is used (for testing) + closedCtx context.Context // alive until Direct.Close is called + closeCtx context.CancelFunc // cancels closedCtx dialPlan ControlDialPlanner // can be nil @@ -107,6 +110,8 @@ type Direct struct { tkaHead string lastPingURL string // last PingRequest.URL received, for dup suppression connectionHandleForTest string // sent in MapRequest.ConnectionHandleForTest + + controlClientID int64 // Random ID used to differentiate clients for consumers of messages. } // Observer is implemented by users of the control client (such as LocalBackend) @@ -120,26 +125,24 @@ type Observer interface { } type Options struct { - Persist persist.Persist // initial persistent data - GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use - ServerURL string // URL of the tailcontrol server - AuthKey string // optional node auth key for auto registration - Clock tstime.Clock - Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc - DiscoPublicKey key.DiscoPublic - PolicyClient policyclient.Client // or nil for none - Logf logger.Logf - HTTPTestClient *http.Client // optional HTTP client to use (for tests only) - NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) - DebugFlags []string // debug settings to send to control - HealthTracker *health.Tracker - PopBrowserURL func(url string) // optional func to open browser - OnClientVersion func(*tailcfg.ClientVersion) // optional func to inform GUI of client version status - OnControlTime func(time.Time) // optional func to notify callers of new time from control - OnTailnetDefaultAutoUpdate func(bool) // optional func to inform GUI of default auto-update setting for the tailnet - Dialer *tsdial.Dialer // non-nil - C2NHandler http.Handler // or nil - ControlKnobs *controlknobs.Knobs // or nil to ignore + Persist persist.Persist // initial persistent data + GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use + ServerURL string // URL of the tailcontrol server + AuthKey string // optional node auth key for auto registration + Clock tstime.Clock + Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc + DiscoPublicKey key.DiscoPublic + PolicyClient policyclient.Client // or nil for none + Logf logger.Logf + HTTPTestClient *http.Client // optional HTTP client to use (for tests only) + NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) + DebugFlags []string // debug settings to send to control + HealthTracker *health.Tracker + PopBrowserURL func(url string) // optional func to open browser + Dialer *tsdial.Dialer // non-nil + C2NHandler http.Handler // or nil + ControlKnobs *controlknobs.Knobs // or nil to ignore + Bus *eventbus.Bus // Observer is called when there's a change in status to report // from the control client. @@ -287,33 +290,32 @@ func NewDirect(opts Options) (*Direct, error) { } c := &Direct{ - httpc: httpc, - interceptedDial: interceptedDial, - controlKnobs: opts.ControlKnobs, - getMachinePrivKey: opts.GetMachinePrivateKey, - serverURL: opts.ServerURL, - clock: opts.Clock, - logf: opts.Logf, - persist: opts.Persist.View(), - authKey: opts.AuthKey, - discoPubKey: opts.DiscoPublicKey, - debugFlags: opts.DebugFlags, - netMon: netMon, - health: opts.HealthTracker, - skipIPForwardingCheck: opts.SkipIPForwardingCheck, - pinger: opts.Pinger, - polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), - popBrowser: opts.PopBrowserURL, - onClientVersion: opts.OnClientVersion, - onTailnetDefaultAutoUpdate: opts.OnTailnetDefaultAutoUpdate, - onControlTime: opts.OnControlTime, - c2nHandler: opts.C2NHandler, - dialer: opts.Dialer, - dnsCache: dnsCache, - dialPlan: opts.DialPlan, + httpc: httpc, + interceptedDial: interceptedDial, + controlKnobs: opts.ControlKnobs, + getMachinePrivKey: opts.GetMachinePrivateKey, + serverURL: opts.ServerURL, + clock: opts.Clock, + logf: opts.Logf, + persist: opts.Persist.View(), + authKey: opts.AuthKey, + discoPubKey: opts.DiscoPublicKey, + debugFlags: opts.DebugFlags, + netMon: netMon, + health: opts.HealthTracker, + skipIPForwardingCheck: opts.SkipIPForwardingCheck, + pinger: opts.Pinger, + polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), + popBrowser: opts.PopBrowserURL, + c2nHandler: opts.C2NHandler, + dialer: opts.Dialer, + dnsCache: dnsCache, + dialPlan: opts.DialPlan, } c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) + c.controlClientID = rand.Int64() + if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) } else { @@ -331,6 +333,12 @@ func NewDirect(opts Options) (*Direct, error) { if strings.Contains(opts.ServerURL, "controlplane.tailscale.com") && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { c.panicOnUse = true } + + c.busClient = opts.Bus.Client("controlClient.direct") + c.clientVersionPub = eventbus.Publish[tailcfg.ClientVersion](c.busClient) + c.autoUpdatePub = eventbus.Publish[AutoUpdate](c.busClient) + c.controlTimePub = eventbus.Publish[ControlTime](c.busClient) + return c, nil } @@ -340,6 +348,7 @@ func (c *Direct) Close() error { c.mu.Lock() defer c.mu.Unlock() + c.busClient.Close() if c.noiseClient != nil { if err := c.noiseClient.Close(); err != nil { return err @@ -826,6 +835,23 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } +// ClientID returns the ControlClientID of the controlClient +func (c *Direct) ClientID() int64 { + return c.controlClientID +} + +// AutoUpdate wraps a bool for naming on the eventbus +type AutoUpdate struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct + Value bool +} + +// ControlTime wraps a [time.Time] for naming on the eventbus +type ControlTime struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct + Value time.Time +} + // If we go more than watchdogTimeout without hearing from the server, // end the long poll. We should be receiving a keep alive ping // every minute. @@ -1085,14 +1111,12 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.logf("netmap: control says to open URL %v; no popBrowser func", u) } } - if resp.ClientVersion != nil && c.onClientVersion != nil { - c.onClientVersion(resp.ClientVersion) + if resp.ClientVersion != nil { + c.clientVersionPub.Publish(*resp.ClientVersion) } if resp.ControlTime != nil && !resp.ControlTime.IsZero() { c.logf.JSON(1, "controltime", resp.ControlTime.UTC()) - if c.onControlTime != nil { - c.onControlTime(*resp.ControlTime) - } + c.controlTimePub.Publish(ControlTime{c.controlClientID, *resp.ControlTime}) } if resp.KeepAlive { vlogf("netmap: got keep-alive") @@ -1112,9 +1136,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap continue } if au, ok := resp.DefaultAutoUpdate.Get(); ok { - if c.onTailnetDefaultAutoUpdate != nil { - c.onTailnetDefaultAutoUpdate(au) - } + c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, au}) } metricMapResponseMap.Add(1) diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index e2a6f9fa4b93f..bba76d6f05c0d 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -17,12 +17,14 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/eventbus/eventbustest" ) func TestNewDirect(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() opts := Options{ @@ -32,6 +34,7 @@ func TestNewDirect(t *testing.T) { return k, nil }, Dialer: tsdial.NewDialer(netmon.NewStatic()), + Bus: bus, } c, err := NewDirect(opts) if err != nil { @@ -99,6 +102,7 @@ func TestTsmpPing(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() opts := Options{ @@ -108,6 +112,7 @@ func TestTsmpPing(t *testing.T) { return k, nil }, Dialer: tsdial.NewDialer(netmon.NewStatic()), + Bus: bus, } c, err := NewDirect(opts) diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index d1119981594da..3d20d57b464e5 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -6,12 +6,14 @@ package ipnlocal import ( "time" + "tailscale.com/control/controlclient" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" ) // For extra defense-in-depth, when we're testing expired nodes we check @@ -40,14 +42,46 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock + + eventClient *eventbus.Client + controlTimeSub *eventbus.Subscriber[controlclient.ControlTime] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns } -func newExpiryManager(logf logger.Logf) *expiryManager { - return &expiryManager{ +func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { + em := &expiryManager{ previouslyExpired: map[tailcfg.StableNodeID]bool{}, logf: logf, clock: tstime.StdClock{}, } + + em.eventClient = bus.Client("ipnlocal.expiryManager") + em.controlTimeSub = eventbus.Subscribe[controlclient.ControlTime](em.eventClient) + + em.subsDoneCh = make(chan struct{}) + go em.consumeEventbusTopics() + + return em +} + +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [controlclient.ControlTime] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (em *expiryManager) consumeEventbusTopics() { + defer close(em.subsDoneCh) + + for { + select { + case <-em.controlTimeSub.Done(): + return + case time := <-em.controlTimeSub.Events(): + em.onControlTime(time.Value) + } + } } // onControlTime is called whenever we receive a new timestamp from the control @@ -218,6 +252,11 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } +func (em *expiryManager) close() { + em.eventClient.Close() + <-em.subsDoneCh +} + // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded // when the LocalBackend last received a time message from the control server. diff --git a/ipn/ipnlocal/expiry_test.go b/ipn/ipnlocal/expiry_test.go index a2b10fe325b8a..2c646ca724efd 100644 --- a/ipn/ipnlocal/expiry_test.go +++ b/ipn/ipnlocal/expiry_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" ) func TestFlagExpiredPeers(t *testing.T) { @@ -110,7 +111,8 @@ func TestFlagExpiredPeers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) if tt.controlTime != nil { em.onControlTime(*tt.controlTime) @@ -240,7 +242,8 @@ func TestNextPeerExpiry(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) got := em.nextPeerExpiry(tt.netmap, now) if !got.Equal(tt.want) { @@ -253,7 +256,8 @@ func TestNextPeerExpiry(t *testing.T) { t.Run("ClockSkew", func(t *testing.T) { t.Logf("local time: %q", now.Format(time.RFC3339)) - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) // The local clock is "running fast"; our clock skew is -2h diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6108aa83061bc..c98a0810d2cd2 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -99,6 +99,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/mak" "tailscale.com/util/multierr" @@ -202,6 +203,10 @@ type LocalBackend struct { keyLogf logger.Logf // for printing list of peers on change statsLogf logger.Logf // for printing peers stats on change sys *tsd.System + eventClient *eventbus.Client + clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] + autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns health *health.Tracker // always non-nil polc policyclient.Client // always non-nil metrics metrics @@ -525,7 +530,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo backendLogID: logID, state: ipn.NoState, portpoll: new(portlist.Poller), - em: newExpiryManager(logf), + em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), @@ -533,7 +538,11 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), + subsDoneCh: make(chan struct{}), } + b.eventClient = b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) + b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -604,9 +613,32 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } + go b.consumeEventbusTopics() return b, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [tailcfg.ClientVersion] subscriber is closed, which is interpreted to be the +// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either +// all open or all closed). +func (b *LocalBackend) consumeEventbusTopics() { + defer close(b.subsDoneCh) + + for { + select { + case <-b.clientVersionSub.Done(): + return + case clientVersion := <-b.clientVersionSub.Events(): + b.onClientVersion(&clientVersion) + case au := <-b.autoUpdateSub.Events(): + b.onTailnetDefaultAutoUpdate(au.Value) + } + } +} + func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } @@ -1065,6 +1097,17 @@ func (b *LocalBackend) ClearCaptureSink() { // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { + // Close the [eventbus.Client] and wait for LocalBackend.consumeEventbusTopics + // to return. Do this before acquiring b.mu: + // 1. LocalBackend.consumeEventbusTopics event handlers also acquire b.mu, + // they can deadlock with c.Shutdown(). + // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against + // undesirable post/in-progress LocalBackend.Shutdown() behaviors. + b.eventClient.Close() + <-b.subsDoneCh + + b.em.close() + b.mu.Lock() if b.shutdownCalled { b.mu.Unlock() @@ -2465,33 +2508,32 @@ func (b *LocalBackend) Start(opts ipn.Options) error { cb() } } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, // but it won't take effect until the next Start. cc, err := b.getNewControlClientFuncLocked()(controlclient.Options{ - GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), - Logf: logger.WithPrefix(b.logf, "control: "), - Persist: *persistv, - ServerURL: serverURL, - AuthKey: opts.AuthKey, - Hostinfo: hostinfo, - HTTPTestClient: httpTestClient, - DiscoPublicKey: discoPublic, - DebugFlags: debugFlags, - HealthTracker: b.health, - PolicyClient: b.sys.PolicyClientOrDefault(), - Pinger: b, - PopBrowserURL: b.tellClientToBrowseToURL, - OnClientVersion: b.onClientVersion, - OnTailnetDefaultAutoUpdate: b.onTailnetDefaultAutoUpdate, - OnControlTime: b.em.onControlTime, - Dialer: b.Dialer(), - Observer: b, - C2NHandler: http.HandlerFunc(b.handleC2N), - DialPlan: &b.dialPlan, // pointer because it can't be copied - ControlKnobs: b.sys.ControlKnobs(), - Shutdown: ccShutdown, + GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), + Logf: logger.WithPrefix(b.logf, "control: "), + Persist: *persistv, + ServerURL: serverURL, + AuthKey: opts.AuthKey, + Hostinfo: hostinfo, + HTTPTestClient: httpTestClient, + DiscoPublicKey: discoPublic, + DebugFlags: debugFlags, + HealthTracker: b.health, + PolicyClient: b.sys.PolicyClientOrDefault(), + Pinger: b, + PopBrowserURL: b.tellClientToBrowseToURL, + Dialer: b.Dialer(), + Observer: b, + C2NHandler: http.HandlerFunc(b.handleC2N), + DialPlan: &b.dialPlan, // pointer because it can't be copied + ControlKnobs: b.sys.ControlKnobs(), + Shutdown: ccShutdown, + Bus: b.sys.Bus.Get(), // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -4482,7 +4524,6 @@ func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change // but wasn't empty before, then the change disables // exit node usage. return tmpPrefs.ExitNodeID == "" - } // adjustEditPrefsLocked applies additional changes to mp if necessary, @@ -8001,7 +8042,6 @@ func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.Stable } if nodes, _ := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) - } return true // no policy configured; allow all exit nodes } @@ -8145,9 +8185,7 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf return servicesList } -var ( - metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") -) +var metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") func (b *LocalBackend) stateEncrypted() opt.Bool { switch runtime.GOOS { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 7d1c452f30697..261d5c4c20682 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -59,6 +59,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" @@ -455,7 +456,8 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { } func newTestLocalBackend(t testing.TB) *LocalBackend { - return newTestLocalBackendWithSys(t, tsd.NewSystem()) + bus := eventbustest.NewBus(t) + return newTestLocalBackendWithSys(t, tsd.NewSystemWithBus(bus)) } // newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. @@ -533,7 +535,6 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { ExitNodeID: "", }, }, user) - if err != nil { t.Fatalf("enabling first exit node: %v", err) } @@ -543,7 +544,6 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { if got, want := pv.InternalExitNodePrior(), tailcfg.StableNodeID(""); got != want { t.Fatalf("unexpected InternalExitNodePrior %q, want: %q", got, want) } - } func TestSetUseExitNodeEnabled(t *testing.T) { @@ -3619,7 +3619,8 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.PolicyClient.Set(polc) lb := newTestLocalBackendWithSys(t, sys) @@ -5786,7 +5787,8 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { - return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystem(), newControl) + bus := eventbustest.NewBus(t) + return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystemWithBus(bus), newControl) } func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { @@ -5945,7 +5947,6 @@ func (w *notificationWatcher) watch(mask ipn.NotifyWatchOpt, wanted []wantedNoti return true }) - }() <-watchAddedCh } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 842b75c437799..93ecd977f6152 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" ) @@ -49,6 +50,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() opts := controlclient.Options{ @@ -61,6 +63,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { NoiseTestClient: c, Observer: observerFunc(func(controlclient.Status) {}), Dialer: tsdial.NewDialer(netmon.NewStatic()), + Bus: bus, } cc, err := controlclient.NewNoStart(opts) diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index e2561cba9ef22..86b56ab4b585f 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -33,6 +33,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/syspolicy/policyclient" @@ -240,11 +241,15 @@ func TestServeConfigForeground(t *testing.T) { err := b.SetServeConfig(&ipn.ServeConfig{ Foreground: map[string]*ipn.ServeConfig{ - session1: {TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:3000"}}, + session1: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:3000"}, + }, }, - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -267,8 +272,10 @@ func TestServeConfigForeground(t *testing.T) { 5000: {TCPForward: "http://localhost:5000"}, }, Foreground: map[string]*ipn.ServeConfig{ - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -491,7 +498,6 @@ func TestServeConfigServices(t *testing.T) { } }) } - } func TestServeConfigETag(t *testing.T) { @@ -659,6 +665,7 @@ func TestServeHTTPProxyPath(t *testing.T) { }) } } + func TestServeHTTPProxyHeaders(t *testing.T) { b := newTestBackend(t) @@ -859,7 +866,6 @@ func Test_reverseProxyConfiguration(t *testing.T) { wantsURL: mustCreateURL(t, "https://example3.com"), }, }) - } func mustCreateURL(t *testing.T, u string) url.URL { @@ -878,7 +884,8 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) for _, o := range opts { switch v := o.(type) { @@ -952,13 +959,13 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { func TestServeFileOrDirectory(t *testing.T) { td := t.TempDir() writeFile := func(suffix, contents string) { - if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0600); err != nil { + if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0o600); err != nil { t.Fatal(err) } } writeFile("foo", "this is foo") writeFile("bar", "this is bar") - os.MkdirAll(filepath.Join(td, "subdir"), 0700) + os.MkdirAll(filepath.Join(td, "subdir"), 0o700) writeFile("subdir/file-a", "this is A") writeFile("subdir/file-b", "this is B") writeFile("subdir/file-c", "this is C") diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 4097a37735b5c..30538f2c824e1 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "math/rand/v2" "net/netip" "strings" "sync" @@ -39,6 +40,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/types/preftype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/wgengine" @@ -113,10 +115,11 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { // in the controlclient.Client, so by controlling it, we can check that // the state machine works as expected. type mockControl struct { - tb testing.TB - logf logger.Logf - opts controlclient.Options - paused atomic.Bool + tb testing.TB + logf logger.Logf + opts controlclient.Options + paused atomic.Bool + controlClientID int64 mu sync.Mutex persist *persist.Persist @@ -127,12 +130,13 @@ type mockControl struct { func newClient(tb testing.TB, opts controlclient.Options) *mockControl { return &mockControl{ - tb: tb, - authBlocked: true, - logf: opts.Logf, - opts: opts, - shutdown: make(chan struct{}), - persist: opts.Persist.Clone(), + tb: tb, + authBlocked: true, + logf: opts.Logf, + opts: opts, + shutdown: make(chan struct{}), + persist: opts.Persist.Clone(), + controlClientID: rand.Int64(), } } @@ -287,6 +291,10 @@ func (cc *mockControl) UpdateEndpoints(endpoints []tailcfg.Endpoint) { cc.called("UpdateEndpoints") } +func (cc *mockControl) ClientID() int64 { + return cc.controlClientID +} + func (b *LocalBackend) nonInteractiveLoginForStateTest() { b.mu.Lock() if b.cc == nil { @@ -1507,7 +1515,8 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( dialer := &tsdial.Dialer{Logf: logf} dialer.SetNetMon(netmon.NewStatic()) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.Set(dialer) sys.Set(dialer.NetMon()) diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 970f798d05005..046eb744d460a 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/slicesx" "tailscale.com/wgengine" ) @@ -158,7 +159,6 @@ func TestWhoIsArgTypes(t *testing.T) { t.Fatalf("backend called with %v; want %v", k, keyStr) } return match() - }, peerCaps: map[netip.Addr]tailcfg.PeerCapMap{ netip.MustParseAddr("100.101.102.103"): map[tailcfg.PeerCapability][]tailcfg.RawMessage{ @@ -336,7 +336,7 @@ func TestServeWatchIPNBus(t *testing.T) { func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { var logf logger.Logf = logger.Discard - sys := tsd.NewSystem() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) store := new(mem.Store) sys.Set(store) eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) diff --git a/tsd/tsd.go b/tsd/tsd.go index bd333bd31b027..e4a512e4b6eba 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -80,9 +80,17 @@ type System struct { // NewSystem constructs a new otherwise-empty [System] with a // freshly-constructed event bus populated. -func NewSystem() *System { +func NewSystem() *System { return NewSystemWithBus(eventbus.New()) } + +// NewSystemWithBus constructs a new otherwise-empty [System] with an +// eventbus provided by the caller. The provided bus must not be nil. +// This is mainly intended for testing; for production use call [NewBus]. +func NewSystemWithBus(bus *eventbus.Bus) *System { + if bus == nil { + panic("nil eventbus") + } sys := new(System) - sys.Set(eventbus.New()) + sys.Set(bus) return sys } diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 98536ae0affc8..b7375adc40ed3 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -15,7 +15,7 @@ import ( // NewBus constructs an [eventbus.Bus] that will be shut automatically when // its controlling test ends. -func NewBus(t *testing.T) *eventbus.Bus { +func NewBus(t testing.TB) *eventbus.Bus { bus := eventbus.New() t.Cleanup(bus.Close) return bus From 082c6a25b02fc4caeea8cfeb1705b00f52dc132e Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 15 Sep 2025 09:04:00 -0700 Subject: [PATCH 0331/1093] client/systray: only send clipboard notification on success Fixes #14430 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index bd7c1597204ed..536cfe1825cd5 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -540,9 +540,9 @@ func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) { err := clipboard.WriteAll(ip) if err != nil { log.Printf("clipboard error: %v", err) + } else { + menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } - - menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } // sendNotification sends a desktop notification with the given title and content. From 17ffa8013835d41801f3a18ea957ddab0215d247 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 13 Sep 2025 20:20:08 -0700 Subject: [PATCH 0332/1093] feature/featuretags: add auto-generated constants for all modular features So code (in upcoming PRs) can test for the build tags with consts and get dead code elimination from the compiler+linker. Updates #12614 Change-Id: If6160453ffd01b798f09894141e7631a93385941 Signed-off-by: Brad Fitzpatrick --- cmd/featuretags/featuretags.go | 2 +- feature/featuretags/feature_aws_disabled.go | 13 +++++ feature/featuretags/feature_aws_enabled.go | 13 +++++ feature/featuretags/feature_bird_disabled.go | 13 +++++ feature/featuretags/feature_bird_enabled.go | 13 +++++ .../featuretags/feature_capture_disabled.go | 13 +++++ .../featuretags/feature_capture_enabled.go | 13 +++++ .../feature_completion_disabled.go | 13 +++++ .../featuretags/feature_completion_enabled.go | 13 +++++ .../feature_debugeventbus_disabled.go | 13 +++++ .../feature_debugeventbus_enabled.go | 13 +++++ .../feature_desktop_sessions_disabled.go | 13 +++++ .../feature_desktop_sessions_enabled.go | 13 +++++ feature/featuretags/feature_drive_disabled.go | 13 +++++ feature/featuretags/feature_drive_enabled.go | 13 +++++ feature/featuretags/feature_kube_disabled.go | 13 +++++ feature/featuretags/feature_kube_enabled.go | 13 +++++ .../feature_relayserver_disabled.go | 13 +++++ .../feature_relayserver_enabled.go | 13 +++++ feature/featuretags/feature_serve_disabled.go | 13 +++++ feature/featuretags/feature_serve_enabled.go | 13 +++++ feature/featuretags/feature_ssh_disabled.go | 13 +++++ feature/featuretags/feature_ssh_enabled.go | 13 +++++ .../featuretags/feature_syspolicy_disabled.go | 13 +++++ .../featuretags/feature_syspolicy_enabled.go | 13 +++++ .../featuretags/feature_systray_disabled.go | 13 +++++ .../featuretags/feature_systray_enabled.go | 13 +++++ .../featuretags/feature_taildrop_disabled.go | 13 +++++ .../featuretags/feature_taildrop_enabled.go | 13 +++++ .../feature_tailnetlock_disabled.go | 13 +++++ .../feature_tailnetlock_enabled.go | 13 +++++ feature/featuretags/feature_tap_disabled.go | 13 +++++ feature/featuretags/feature_tap_enabled.go | 13 +++++ feature/featuretags/feature_tpm_disabled.go | 13 +++++ feature/featuretags/feature_tpm_enabled.go | 13 +++++ .../featuretags/feature_wakeonlan_disabled.go | 13 +++++ .../featuretags/feature_wakeonlan_enabled.go | 13 +++++ .../featuretags/feature_webclient_disabled.go | 13 +++++ .../featuretags/feature_webclient_enabled.go | 13 +++++ feature/featuretags/featuretags.go | 50 +++++++++++-------- feature/featuretags/gen-featuretags.go | 49 ++++++++++++++++++ 41 files changed, 574 insertions(+), 21 deletions(-) create mode 100644 feature/featuretags/feature_aws_disabled.go create mode 100644 feature/featuretags/feature_aws_enabled.go create mode 100644 feature/featuretags/feature_bird_disabled.go create mode 100644 feature/featuretags/feature_bird_enabled.go create mode 100644 feature/featuretags/feature_capture_disabled.go create mode 100644 feature/featuretags/feature_capture_enabled.go create mode 100644 feature/featuretags/feature_completion_disabled.go create mode 100644 feature/featuretags/feature_completion_enabled.go create mode 100644 feature/featuretags/feature_debugeventbus_disabled.go create mode 100644 feature/featuretags/feature_debugeventbus_enabled.go create mode 100644 feature/featuretags/feature_desktop_sessions_disabled.go create mode 100644 feature/featuretags/feature_desktop_sessions_enabled.go create mode 100644 feature/featuretags/feature_drive_disabled.go create mode 100644 feature/featuretags/feature_drive_enabled.go create mode 100644 feature/featuretags/feature_kube_disabled.go create mode 100644 feature/featuretags/feature_kube_enabled.go create mode 100644 feature/featuretags/feature_relayserver_disabled.go create mode 100644 feature/featuretags/feature_relayserver_enabled.go create mode 100644 feature/featuretags/feature_serve_disabled.go create mode 100644 feature/featuretags/feature_serve_enabled.go create mode 100644 feature/featuretags/feature_ssh_disabled.go create mode 100644 feature/featuretags/feature_ssh_enabled.go create mode 100644 feature/featuretags/feature_syspolicy_disabled.go create mode 100644 feature/featuretags/feature_syspolicy_enabled.go create mode 100644 feature/featuretags/feature_systray_disabled.go create mode 100644 feature/featuretags/feature_systray_enabled.go create mode 100644 feature/featuretags/feature_taildrop_disabled.go create mode 100644 feature/featuretags/feature_taildrop_enabled.go create mode 100644 feature/featuretags/feature_tailnetlock_disabled.go create mode 100644 feature/featuretags/feature_tailnetlock_enabled.go create mode 100644 feature/featuretags/feature_tap_disabled.go create mode 100644 feature/featuretags/feature_tap_enabled.go create mode 100644 feature/featuretags/feature_tpm_disabled.go create mode 100644 feature/featuretags/feature_tpm_enabled.go create mode 100644 feature/featuretags/feature_wakeonlan_disabled.go create mode 100644 feature/featuretags/feature_wakeonlan_enabled.go create mode 100644 feature/featuretags/feature_webclient_disabled.go create mode 100644 feature/featuretags/feature_webclient_enabled.go create mode 100644 feature/featuretags/gen-featuretags.go diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index 5213fda4c1e47..c34adbb3f1f3e 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -30,7 +30,7 @@ func main() { if *list { for _, f := range slices.Sorted(maps.Keys(features)) { - fmt.Printf("%20s: %s\n", f, features[f]) + fmt.Printf("%20s: %s\n", f, features[f].Desc) } return } diff --git a/feature/featuretags/feature_aws_disabled.go b/feature/featuretags/feature_aws_disabled.go new file mode 100644 index 0000000000000..045feb269844e --- /dev/null +++ b/feature/featuretags/feature_aws_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_aws + +package featuretags + +// AWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const AWS = false diff --git a/feature/featuretags/feature_aws_enabled.go b/feature/featuretags/feature_aws_enabled.go new file mode 100644 index 0000000000000..d935c9d262cef --- /dev/null +++ b/feature/featuretags/feature_aws_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_aws + +package featuretags + +// AWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const AWS = true diff --git a/feature/featuretags/feature_bird_disabled.go b/feature/featuretags/feature_bird_disabled.go new file mode 100644 index 0000000000000..986c984584f9f --- /dev/null +++ b/feature/featuretags/feature_bird_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_bird + +package featuretags + +// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const Bird = false diff --git a/feature/featuretags/feature_bird_enabled.go b/feature/featuretags/feature_bird_enabled.go new file mode 100644 index 0000000000000..ac9404704e880 --- /dev/null +++ b/feature/featuretags/feature_bird_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_bird + +package featuretags + +// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const Bird = true diff --git a/feature/featuretags/feature_capture_disabled.go b/feature/featuretags/feature_capture_disabled.go new file mode 100644 index 0000000000000..cee42454291f7 --- /dev/null +++ b/feature/featuretags/feature_capture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_capture + +package featuretags + +// Capture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const Capture = false diff --git a/feature/featuretags/feature_capture_enabled.go b/feature/featuretags/feature_capture_enabled.go new file mode 100644 index 0000000000000..40aabf11064c8 --- /dev/null +++ b/feature/featuretags/feature_capture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_capture + +package featuretags + +// Capture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const Capture = true diff --git a/feature/featuretags/feature_completion_disabled.go b/feature/featuretags/feature_completion_disabled.go new file mode 100644 index 0000000000000..7b3f3cb6dcfbf --- /dev/null +++ b/feature/featuretags/feature_completion_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_completion + +package featuretags + +// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const Completion = false diff --git a/feature/featuretags/feature_completion_enabled.go b/feature/featuretags/feature_completion_enabled.go new file mode 100644 index 0000000000000..b6d5218f2f8e0 --- /dev/null +++ b/feature/featuretags/feature_completion_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_completion + +package featuretags + +// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const Completion = true diff --git a/feature/featuretags/feature_debugeventbus_disabled.go b/feature/featuretags/feature_debugeventbus_disabled.go new file mode 100644 index 0000000000000..c826de6912e30 --- /dev/null +++ b/feature/featuretags/feature_debugeventbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_debugeventbus + +package featuretags + +// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const DebugEventBus = false diff --git a/feature/featuretags/feature_debugeventbus_enabled.go b/feature/featuretags/feature_debugeventbus_enabled.go new file mode 100644 index 0000000000000..068efa8598856 --- /dev/null +++ b/feature/featuretags/feature_debugeventbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_debugeventbus + +package featuretags + +// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const DebugEventBus = true diff --git a/feature/featuretags/feature_desktop_sessions_disabled.go b/feature/featuretags/feature_desktop_sessions_disabled.go new file mode 100644 index 0000000000000..73644d91190b8 --- /dev/null +++ b/feature/featuretags/feature_desktop_sessions_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_desktop_sessions + +package featuretags + +// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const DesktopSessions = false diff --git a/feature/featuretags/feature_desktop_sessions_enabled.go b/feature/featuretags/feature_desktop_sessions_enabled.go new file mode 100644 index 0000000000000..93c776a047dc2 --- /dev/null +++ b/feature/featuretags/feature_desktop_sessions_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_desktop_sessions + +package featuretags + +// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const DesktopSessions = true diff --git a/feature/featuretags/feature_drive_disabled.go b/feature/featuretags/feature_drive_disabled.go new file mode 100644 index 0000000000000..550ed0bd16a6d --- /dev/null +++ b/feature/featuretags/feature_drive_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_drive + +package featuretags + +// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const Drive = false diff --git a/feature/featuretags/feature_drive_enabled.go b/feature/featuretags/feature_drive_enabled.go new file mode 100644 index 0000000000000..2ed83b271c7d2 --- /dev/null +++ b/feature/featuretags/feature_drive_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_drive + +package featuretags + +// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const Drive = true diff --git a/feature/featuretags/feature_kube_disabled.go b/feature/featuretags/feature_kube_disabled.go new file mode 100644 index 0000000000000..3a140e8693ad2 --- /dev/null +++ b/feature/featuretags/feature_kube_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_kube + +package featuretags + +// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const Kube = false diff --git a/feature/featuretags/feature_kube_enabled.go b/feature/featuretags/feature_kube_enabled.go new file mode 100644 index 0000000000000..1dd119a2b2431 --- /dev/null +++ b/feature/featuretags/feature_kube_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_kube + +package featuretags + +// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const Kube = true diff --git a/feature/featuretags/feature_relayserver_disabled.go b/feature/featuretags/feature_relayserver_disabled.go new file mode 100644 index 0000000000000..e6122ef9cb657 --- /dev/null +++ b/feature/featuretags/feature_relayserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_relayserver + +package featuretags + +// RelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const RelayServer = false diff --git a/feature/featuretags/feature_relayserver_enabled.go b/feature/featuretags/feature_relayserver_enabled.go new file mode 100644 index 0000000000000..34ed23a847f28 --- /dev/null +++ b/feature/featuretags/feature_relayserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_relayserver + +package featuretags + +// RelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const RelayServer = true diff --git a/feature/featuretags/feature_serve_disabled.go b/feature/featuretags/feature_serve_disabled.go new file mode 100644 index 0000000000000..a143e951f7ddf --- /dev/null +++ b/feature/featuretags/feature_serve_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_serve + +package featuretags + +// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const Serve = false diff --git a/feature/featuretags/feature_serve_enabled.go b/feature/featuretags/feature_serve_enabled.go new file mode 100644 index 0000000000000..1d1af0809dcb8 --- /dev/null +++ b/feature/featuretags/feature_serve_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_serve + +package featuretags + +// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const Serve = true diff --git a/feature/featuretags/feature_ssh_disabled.go b/feature/featuretags/feature_ssh_disabled.go new file mode 100644 index 0000000000000..c22be29453cc0 --- /dev/null +++ b/feature/featuretags/feature_ssh_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_ssh + +package featuretags + +// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const SSH = false diff --git a/feature/featuretags/feature_ssh_enabled.go b/feature/featuretags/feature_ssh_enabled.go new file mode 100644 index 0000000000000..52fa10b581e24 --- /dev/null +++ b/feature/featuretags/feature_ssh_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_ssh + +package featuretags + +// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const SSH = true diff --git a/feature/featuretags/feature_syspolicy_disabled.go b/feature/featuretags/feature_syspolicy_disabled.go new file mode 100644 index 0000000000000..db73b02612ca3 --- /dev/null +++ b/feature/featuretags/feature_syspolicy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_syspolicy + +package featuretags + +// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const SystemPolicy = false diff --git a/feature/featuretags/feature_syspolicy_enabled.go b/feature/featuretags/feature_syspolicy_enabled.go new file mode 100644 index 0000000000000..2ad332676a474 --- /dev/null +++ b/feature/featuretags/feature_syspolicy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_syspolicy + +package featuretags + +// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const SystemPolicy = true diff --git a/feature/featuretags/feature_systray_disabled.go b/feature/featuretags/feature_systray_disabled.go new file mode 100644 index 0000000000000..a358bbf6fd657 --- /dev/null +++ b/feature/featuretags/feature_systray_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_systray + +package featuretags + +// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const SysTray = false diff --git a/feature/featuretags/feature_systray_enabled.go b/feature/featuretags/feature_systray_enabled.go new file mode 100644 index 0000000000000..aebf3ad9e47fb --- /dev/null +++ b/feature/featuretags/feature_systray_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_systray + +package featuretags + +// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const SysTray = true diff --git a/feature/featuretags/feature_taildrop_disabled.go b/feature/featuretags/feature_taildrop_disabled.go new file mode 100644 index 0000000000000..5c95c28b6624b --- /dev/null +++ b/feature/featuretags/feature_taildrop_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_taildrop + +package featuretags + +// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const Taildrop = false diff --git a/feature/featuretags/feature_taildrop_enabled.go b/feature/featuretags/feature_taildrop_enabled.go new file mode 100644 index 0000000000000..e5212f03a06ab --- /dev/null +++ b/feature/featuretags/feature_taildrop_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_taildrop + +package featuretags + +// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const Taildrop = true diff --git a/feature/featuretags/feature_tailnetlock_disabled.go b/feature/featuretags/feature_tailnetlock_disabled.go new file mode 100644 index 0000000000000..2a07233decb9c --- /dev/null +++ b/feature/featuretags/feature_tailnetlock_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_tailnetlock + +package featuretags + +// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const TailnetLock = false diff --git a/feature/featuretags/feature_tailnetlock_enabled.go b/feature/featuretags/feature_tailnetlock_enabled.go new file mode 100644 index 0000000000000..1abf0c3bcba18 --- /dev/null +++ b/feature/featuretags/feature_tailnetlock_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_tailnetlock + +package featuretags + +// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const TailnetLock = true diff --git a/feature/featuretags/feature_tap_disabled.go b/feature/featuretags/feature_tap_disabled.go new file mode 100644 index 0000000000000..d4dfded2b29aa --- /dev/null +++ b/feature/featuretags/feature_tap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_tap + +package featuretags + +// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const Tap = false diff --git a/feature/featuretags/feature_tap_enabled.go b/feature/featuretags/feature_tap_enabled.go new file mode 100644 index 0000000000000..a6ce1415c764b --- /dev/null +++ b/feature/featuretags/feature_tap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_tap + +package featuretags + +// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const Tap = true diff --git a/feature/featuretags/feature_tpm_disabled.go b/feature/featuretags/feature_tpm_disabled.go new file mode 100644 index 0000000000000..15d888cfead9a --- /dev/null +++ b/feature/featuretags/feature_tpm_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_tpm + +package featuretags + +// TPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const TPM = false diff --git a/feature/featuretags/feature_tpm_enabled.go b/feature/featuretags/feature_tpm_enabled.go new file mode 100644 index 0000000000000..3525f744c9e4b --- /dev/null +++ b/feature/featuretags/feature_tpm_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_tpm + +package featuretags + +// TPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const TPM = true diff --git a/feature/featuretags/feature_wakeonlan_disabled.go b/feature/featuretags/feature_wakeonlan_disabled.go new file mode 100644 index 0000000000000..7b2b39c443374 --- /dev/null +++ b/feature/featuretags/feature_wakeonlan_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_wakeonlan + +package featuretags + +// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const WakeOnLAN = false diff --git a/feature/featuretags/feature_wakeonlan_enabled.go b/feature/featuretags/feature_wakeonlan_enabled.go new file mode 100644 index 0000000000000..87eed5abf194d --- /dev/null +++ b/feature/featuretags/feature_wakeonlan_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_wakeonlan + +package featuretags + +// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const WakeOnLAN = true diff --git a/feature/featuretags/feature_webclient_disabled.go b/feature/featuretags/feature_webclient_disabled.go new file mode 100644 index 0000000000000..d49cbf8a71193 --- /dev/null +++ b/feature/featuretags/feature_webclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build ts_omit_webclient + +package featuretags + +// WebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const WebClient = false diff --git a/feature/featuretags/feature_webclient_enabled.go b/feature/featuretags/feature_webclient_enabled.go new file mode 100644 index 0000000000000..020ff64a05b14 --- /dev/null +++ b/feature/featuretags/feature_webclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen-featuretags.go; DO NOT EDIT. + +//go:build !ts_omit_webclient + +package featuretags + +// WebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const WebClient = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 87bc22fc64007..55945075b5c7d 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:generate go run gen-featuretags.go + // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags @@ -32,26 +34,34 @@ func (ft FeatureTag) OmitTag() string { return "ts_omit_" + string(ft) } +// FeatureMeta describes a modular feature that can be conditionally linked into +// the binary. +type FeatureMeta struct { + Sym string // exported Go symbol for boolean const + Desc string // human-readable description +} + // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. -var Features = map[FeatureTag]string{ - "aws": "AWS integration", - "bird": "Bird BGP integration", - "capture": "Packet capture", - "cli": "embed the CLI into the tailscaled binary", - "completion": "CLI shell completion", - "debugeventbus": "eventbus debug support", - "desktop_sessions": "Desktop sessions support", - "drive": "Tailscale Drive (file server) support", - "kube": "Kubernetes integration", - "relayserver": "Relay server", - "ssh": "Tailscale SSH support", - "syspolicy": "System policy configuration (MDM) support", - "systray": "Linux system tray", - "taildrop": "Taildrop (file sending) support", - "tailnetlock": "Tailnet Lock support", - "tap": "Experimental Layer 2 (ethernet) support", - "tpm": "TPM support", - "wakeonlan": "Wake-on-LAN support", - "webclient": "Web client support", +var Features = map[FeatureTag]FeatureMeta{ + "aws": {"AWS", "AWS integration"}, + "bird": {"Bird", "Bird BGP integration"}, + "capture": {"Capture", "Packet capture"}, + "cli": {"CLI", "embed the CLI into the tailscaled binary"}, + "completion": {"Completion", "CLI shell completion"}, + "debugeventbus": {"DebugEventBus", "eventbus debug support"}, + "desktop_sessions": {"DesktopSessions", "Desktop sessions support"}, + "drive": {"Drive", "Tailscale Drive (file server) support"}, + "kube": {"Kube", "Kubernetes integration"}, + "relayserver": {"RelayServer", "Relay server"}, + "serve": {"Serve", "Serve and Funnel support"}, + "ssh": {"SSH", "Tailscale SSH support"}, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support"}, + "systray": {"SysTray", "Linux system tray"}, + "taildrop": {"Taildrop", "Taildrop (file sending) support"}, + "tailnetlock": {"TailnetLock", "Tailnet Lock support"}, + "tap": {"Tap", "Experimental Layer 2 (ethernet) support"}, + "tpm": {"TPM", "TPM support"}, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support"}, + "webclient": {"WebClient", "Web client support"}, } diff --git a/feature/featuretags/gen-featuretags.go b/feature/featuretags/gen-featuretags.go new file mode 100644 index 0000000000000..27701fb78d1d7 --- /dev/null +++ b/feature/featuretags/gen-featuretags.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ignore + +// The gen-featuretags.go program generates the feature__enabled.go +// and feature__disabled.go files for each feature tag. +package main + +import ( + "cmp" + "fmt" + "os" + "strings" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/must" +) + +const header = `// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code g|e|n|e|r|a|t|e|d by gen-featuretags.go; D|O N|OT E|D|I|T. + +` + +func main() { + header := strings.ReplaceAll(header, "|", "") // to avoid this file being marked as generated + for k, m := range featuretags.Features { + if !k.IsOmittable() { + continue + } + sym := cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) + for _, suf := range []string{"enabled", "disabled"} { + bang := "" + if suf == "enabled" { + bang = "!" // !ts_omit_... + } + must.Do(os.WriteFile("feature_"+string(k)+"_"+suf+".go", + fmt.Appendf(nil, "%s//go:build %s%s\n\npackage featuretags\n\n"+ + "// %s is whether the binary was built with support for modular feature %q.\n"+ + "// Specifically, it's whether the binary was NOT built with the %q build tag.\n"+ + "// It's a const so it can be used for dead code elimination.\n"+ + "const %s = %t\n", + header, bang, k.OmitTag(), sym, m.Desc, k.OmitTag(), sym, suf == "enabled"), 0644)) + + } + } +} From 510830ca7aa2987cce1e76b92efbe5fede6eff8b Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Mon, 15 Sep 2025 10:11:38 -0700 Subject: [PATCH 0333/1093] tailcfg: add HardwareAttestationKey to MapRequest (#17102) Extend the client state management to generate a hardware attestation key if none exists. Extend MapRequest with HardwareAttestationKey{,Signature} fields that optionally contain the public component of the hardware attestation key and a signature of the node's node key using it. This will be used by control to associate hardware attesation keys with node identities on a TOFU basis. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- tailcfg/tailcfg.go | 7 +++ types/key/hardware_attestation.go | 97 ++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 94d0b19d5c700..6c1357a6336c3 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1360,6 +1360,13 @@ type MapRequest struct { NodeKey key.NodePublic DiscoKey key.DiscoPublic + // HardwareAttestationKey is the public key of the node's hardware-backed + // identity attestation key, if any. + HardwareAttestationKey key.HardwareAttestationPublic `json:",omitzero"` + // HardwareAttestationKeySignature is the signature of the NodeKey + // serialized using MarshalText using its hardware attestation key, if any. + HardwareAttestationKeySignature []byte `json:",omitempty"` + // Stream is whether the client wants to receive multiple MapResponses over // the same HTTP connection. // diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index be2eefb78319e..ead077a5d1fa4 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -5,12 +5,19 @@ package key import ( "crypto" + "crypto/ecdsa" + "crypto/elliptic" "encoding/json" "fmt" + "io" + + "go4.org/mem" ) var ErrUnsupported = fmt.Errorf("key type not supported on this platform") +const hardwareAttestPublicHexPrefix = "hwattestpub:" + // HardwareAttestationKey describes a hardware-backed key that is used to // identify a node. Implementation details will // vary based on the platform in use (SecureEnclave for Apple, TPM for @@ -20,10 +27,96 @@ type HardwareAttestationKey interface { crypto.Signer json.Marshaler json.Unmarshaler + io.Closer + Clone() HardwareAttestationKey +} + +// HardwareAttestationPublicFromPlatformKey creates a HardwareAttestationPublic +// for communicating the public component of the hardware attestation key +// with control and other nodes. +func HardwareAttestationPublicFromPlatformKey(k HardwareAttestationKey) HardwareAttestationPublic { + if k == nil { + return HardwareAttestationPublic{} + } + pub := k.Public() + ecdsaPub, ok := pub.(*ecdsa.PublicKey) + if !ok { + panic("hardware attestation key is not ECDSA") + } + return HardwareAttestationPublic{k: ecdsaPub} +} + +// HardwareAttestationPublic is the public key counterpart to +// HardwareAttestationKey. +type HardwareAttestationPublic struct { + k *ecdsa.PublicKey +} + +func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { + if k.k == nil || o.k == nil { + return k.k == o.k + } + return k.k.X.Cmp(o.k.X) == 0 && k.k.Y.Cmp(o.k.Y) == 0 && k.k.Curve == o.k.Curve +} + +// IsZero reports whether k is the zero value. +func (k HardwareAttestationPublic) IsZero() bool { + return k.k == nil +} + +// String returns the hex-encoded public key with a type prefix. +func (k HardwareAttestationPublic) String() string { + bs, err := k.MarshalText() + if err != nil { + panic(err) + } + return string(bs) +} + +// MarshalText implements encoding.TextMarshaler. +func (k HardwareAttestationPublic) MarshalText() ([]byte, error) { + if k.k == nil { + return nil, nil + } + return k.AppendText(nil) +} + +// UnmarshalText implements encoding.TextUnmarshaler. It expects a typed prefix +// followed by a hex encoded representation of k. +func (k *HardwareAttestationPublic) UnmarshalText(b []byte) error { + if len(b) == 0 { + *k = HardwareAttestationPublic{} + return nil + } + + kb := make([]byte, 65) + if err := parseHex(kb, mem.B(b), mem.S(hardwareAttestPublicHexPrefix)); err != nil { + return err + } + + pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) + if err != nil { + return err + } + k.k = pk + return nil +} + +func (k HardwareAttestationPublic) AppendText(dst []byte) ([]byte, error) { + b, err := k.k.Bytes() + if err != nil { + return nil, err + } + return appendHexKey(dst, hardwareAttestPublicHexPrefix, b), nil +} + +// Verifier returns the ECDSA public key for verifying signatures made by k. +func (k HardwareAttestationPublic) Verifier() *ecdsa.PublicKey { + return k.k } // emptyHardwareAttestationKey is a function that returns an empty -// HardwareAttestationKey suitable for use with JSON unmarshalling. +// HardwareAttestationKey suitable for use with JSON unmarshaling. var emptyHardwareAttestationKey func() HardwareAttestationKey // createHardwareAttestationKey is a function that creates a new @@ -50,7 +143,7 @@ func RegisterHardwareAttestationKeyFns(emptyFn func() HardwareAttestationKey, cr } // NewEmptyHardwareAttestationKey returns an empty HardwareAttestationKey -// suitable for JSON unmarshalling. +// suitable for JSON unmarshaling. func NewEmptyHardwareAttestationKey() (HardwareAttestationKey, error) { if emptyHardwareAttestationKey == nil { return nil, ErrUnsupported From 6fb316f5edceb5d534a22058dc6804263971e269 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 09:52:17 -0700 Subject: [PATCH 0334/1093] feature/buildfeatures: split const bools out of the featuretags package, add Has prefix This renames the package+symbols in the earlier 17ffa8013835d4 to be in their own package ("buildfeatures") and start with the word "Has" like "if buildfeatures.HasFoo {". Updates #12614 Change-Id: I510e5f65993e5b76a0e163e3aa4543755213cbf6 Signed-off-by: Brad Fitzpatrick --- feature/buildfeatures/buildfeatures.go | 10 ++++++++++ .../feature_aws_disabled.go | 8 ++++---- .../feature_aws_enabled.go | 8 ++++---- .../feature_bird_disabled.go | 8 ++++---- .../feature_bird_enabled.go | 8 ++++---- .../feature_capture_disabled.go | 8 ++++---- .../feature_capture_enabled.go | 8 ++++---- .../feature_completion_disabled.go | 8 ++++---- .../feature_completion_enabled.go | 8 ++++---- .../feature_debugeventbus_disabled.go | 8 ++++---- .../feature_debugeventbus_enabled.go | 8 ++++---- .../feature_desktop_sessions_disabled.go | 8 ++++---- .../feature_desktop_sessions_enabled.go | 8 ++++---- .../feature_drive_disabled.go | 8 ++++---- .../feature_drive_enabled.go | 8 ++++---- .../feature_kube_disabled.go | 8 ++++---- .../feature_kube_enabled.go | 8 ++++---- .../feature_relayserver_disabled.go | 8 ++++---- .../feature_relayserver_enabled.go | 8 ++++---- .../feature_serve_disabled.go | 8 ++++---- .../feature_serve_enabled.go | 8 ++++---- .../feature_ssh_disabled.go | 8 ++++---- .../feature_ssh_enabled.go | 8 ++++---- .../feature_syspolicy_disabled.go | 8 ++++---- .../feature_syspolicy_enabled.go | 8 ++++---- .../feature_systray_disabled.go | 8 ++++---- .../feature_systray_enabled.go | 8 ++++---- .../feature_taildrop_disabled.go | 8 ++++---- .../feature_taildrop_enabled.go | 8 ++++---- .../feature_tailnetlock_disabled.go | 8 ++++---- .../feature_tailnetlock_enabled.go | 8 ++++---- .../feature_tap_disabled.go | 8 ++++---- .../feature_tap_enabled.go | 8 ++++---- .../feature_tpm_disabled.go | 8 ++++---- .../feature_tpm_enabled.go | 8 ++++---- .../feature_wakeonlan_disabled.go | 8 ++++---- .../feature_wakeonlan_enabled.go | 8 ++++---- .../feature_webclient_disabled.go | 8 ++++---- .../feature_webclient_enabled.go | 8 ++++---- .../gen-featuretags.go => buildfeatures/gen.go} | 8 ++++---- feature/featuretags/featuretags.go | 2 -- 41 files changed, 166 insertions(+), 158 deletions(-) create mode 100644 feature/buildfeatures/buildfeatures.go rename feature/{featuretags => buildfeatures}/feature_aws_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_aws_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_bird_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_bird_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_capture_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_capture_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_completion_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_completion_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_debugeventbus_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_debugeventbus_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_desktop_sessions_disabled.go (56%) rename feature/{featuretags => buildfeatures}/feature_desktop_sessions_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_drive_disabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_drive_enabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_kube_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_kube_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_relayserver_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_relayserver_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_serve_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_serve_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_ssh_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_ssh_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_syspolicy_disabled.go (54%) rename feature/{featuretags => buildfeatures}/feature_syspolicy_enabled.go (54%) rename feature/{featuretags => buildfeatures}/feature_systray_disabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_systray_enabled.go (58%) rename feature/{featuretags => buildfeatures}/feature_taildrop_disabled.go (56%) rename feature/{featuretags => buildfeatures}/feature_taildrop_enabled.go (56%) rename feature/{featuretags => buildfeatures}/feature_tailnetlock_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_tailnetlock_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_tap_disabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_tap_enabled.go (55%) rename feature/{featuretags => buildfeatures}/feature_tpm_disabled.go (59%) rename feature/{featuretags => buildfeatures}/feature_tpm_enabled.go (59%) rename feature/{featuretags => buildfeatures}/feature_wakeonlan_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_wakeonlan_enabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_webclient_disabled.go (57%) rename feature/{featuretags => buildfeatures}/feature_webclient_enabled.go (57%) rename feature/{featuretags/gen-featuretags.go => buildfeatures/gen.go} (80%) diff --git a/feature/buildfeatures/buildfeatures.go b/feature/buildfeatures/buildfeatures.go new file mode 100644 index 0000000000000..cdb31dc015673 --- /dev/null +++ b/feature/buildfeatures/buildfeatures.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:generate go run gen.go + +// The buildfeatures package contains boolean constants indicating which +// features were included in the binary (via build tags), for use in dead code +// elimination when using separate build tag protected files is impractical +// or undesirable. +package buildfeatures diff --git a/feature/featuretags/feature_aws_disabled.go b/feature/buildfeatures/feature_aws_disabled.go similarity index 58% rename from feature/featuretags/feature_aws_disabled.go rename to feature/buildfeatures/feature_aws_disabled.go index 045feb269844e..66b670c1fe451 100644 --- a/feature/featuretags/feature_aws_disabled.go +++ b/feature/buildfeatures/feature_aws_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_aws -package featuretags +package buildfeatures -// AWS is whether the binary was built with support for modular feature "AWS integration". +// HasAWS is whether the binary was built with support for modular feature "AWS integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. // It's a const so it can be used for dead code elimination. -const AWS = false +const HasAWS = false diff --git a/feature/featuretags/feature_aws_enabled.go b/feature/buildfeatures/feature_aws_enabled.go similarity index 58% rename from feature/featuretags/feature_aws_enabled.go rename to feature/buildfeatures/feature_aws_enabled.go index d935c9d262cef..30203b2aa6df8 100644 --- a/feature/featuretags/feature_aws_enabled.go +++ b/feature/buildfeatures/feature_aws_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_aws -package featuretags +package buildfeatures -// AWS is whether the binary was built with support for modular feature "AWS integration". +// HasAWS is whether the binary was built with support for modular feature "AWS integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. // It's a const so it can be used for dead code elimination. -const AWS = true +const HasAWS = true diff --git a/feature/featuretags/feature_bird_disabled.go b/feature/buildfeatures/feature_bird_disabled.go similarity index 57% rename from feature/featuretags/feature_bird_disabled.go rename to feature/buildfeatures/feature_bird_disabled.go index 986c984584f9f..469aa41f954a9 100644 --- a/feature/featuretags/feature_bird_disabled.go +++ b/feature/buildfeatures/feature_bird_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_bird -package featuretags +package buildfeatures -// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. // It's a const so it can be used for dead code elimination. -const Bird = false +const HasBird = false diff --git a/feature/featuretags/feature_bird_enabled.go b/feature/buildfeatures/feature_bird_enabled.go similarity index 58% rename from feature/featuretags/feature_bird_enabled.go rename to feature/buildfeatures/feature_bird_enabled.go index ac9404704e880..792129f64f567 100644 --- a/feature/featuretags/feature_bird_enabled.go +++ b/feature/buildfeatures/feature_bird_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_bird -package featuretags +package buildfeatures -// Bird is whether the binary was built with support for modular feature "Bird BGP integration". +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. // It's a const so it can be used for dead code elimination. -const Bird = true +const HasBird = true diff --git a/feature/featuretags/feature_capture_disabled.go b/feature/buildfeatures/feature_capture_disabled.go similarity index 58% rename from feature/featuretags/feature_capture_disabled.go rename to feature/buildfeatures/feature_capture_disabled.go index cee42454291f7..58535958f26e8 100644 --- a/feature/featuretags/feature_capture_disabled.go +++ b/feature/buildfeatures/feature_capture_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_capture -package featuretags +package buildfeatures -// Capture is whether the binary was built with support for modular feature "Packet capture". +// HasCapture is whether the binary was built with support for modular feature "Packet capture". // Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. // It's a const so it can be used for dead code elimination. -const Capture = false +const HasCapture = false diff --git a/feature/featuretags/feature_capture_enabled.go b/feature/buildfeatures/feature_capture_enabled.go similarity index 58% rename from feature/featuretags/feature_capture_enabled.go rename to feature/buildfeatures/feature_capture_enabled.go index 40aabf11064c8..7120a3d06fa7d 100644 --- a/feature/featuretags/feature_capture_enabled.go +++ b/feature/buildfeatures/feature_capture_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_capture -package featuretags +package buildfeatures -// Capture is whether the binary was built with support for modular feature "Packet capture". +// HasCapture is whether the binary was built with support for modular feature "Packet capture". // Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. // It's a const so it can be used for dead code elimination. -const Capture = true +const HasCapture = true diff --git a/feature/featuretags/feature_completion_disabled.go b/feature/buildfeatures/feature_completion_disabled.go similarity index 57% rename from feature/featuretags/feature_completion_disabled.go rename to feature/buildfeatures/feature_completion_disabled.go index 7b3f3cb6dcfbf..ea319beb0af3e 100644 --- a/feature/featuretags/feature_completion_disabled.go +++ b/feature/buildfeatures/feature_completion_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_completion -package featuretags +package buildfeatures -// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". // Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. // It's a const so it can be used for dead code elimination. -const Completion = false +const HasCompletion = false diff --git a/feature/featuretags/feature_completion_enabled.go b/feature/buildfeatures/feature_completion_enabled.go similarity index 57% rename from feature/featuretags/feature_completion_enabled.go rename to feature/buildfeatures/feature_completion_enabled.go index b6d5218f2f8e0..6db41c97b3e76 100644 --- a/feature/featuretags/feature_completion_enabled.go +++ b/feature/buildfeatures/feature_completion_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_completion -package featuretags +package buildfeatures -// Completion is whether the binary was built with support for modular feature "CLI shell completion". +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". // Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. // It's a const so it can be used for dead code elimination. -const Completion = true +const HasCompletion = true diff --git a/feature/featuretags/feature_debugeventbus_disabled.go b/feature/buildfeatures/feature_debugeventbus_disabled.go similarity index 57% rename from feature/featuretags/feature_debugeventbus_disabled.go rename to feature/buildfeatures/feature_debugeventbus_disabled.go index c826de6912e30..2eb59993444af 100644 --- a/feature/featuretags/feature_debugeventbus_disabled.go +++ b/feature/buildfeatures/feature_debugeventbus_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_debugeventbus -package featuretags +package buildfeatures -// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". // Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. // It's a const so it can be used for dead code elimination. -const DebugEventBus = false +const HasDebugEventBus = false diff --git a/feature/featuretags/feature_debugeventbus_enabled.go b/feature/buildfeatures/feature_debugeventbus_enabled.go similarity index 57% rename from feature/featuretags/feature_debugeventbus_enabled.go rename to feature/buildfeatures/feature_debugeventbus_enabled.go index 068efa8598856..df13b6fa23167 100644 --- a/feature/featuretags/feature_debugeventbus_enabled.go +++ b/feature/buildfeatures/feature_debugeventbus_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_debugeventbus -package featuretags +package buildfeatures -// DebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". // Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. // It's a const so it can be used for dead code elimination. -const DebugEventBus = true +const HasDebugEventBus = true diff --git a/feature/featuretags/feature_desktop_sessions_disabled.go b/feature/buildfeatures/feature_desktop_sessions_disabled.go similarity index 56% rename from feature/featuretags/feature_desktop_sessions_disabled.go rename to feature/buildfeatures/feature_desktop_sessions_disabled.go index 73644d91190b8..1536c886fec25 100644 --- a/feature/featuretags/feature_desktop_sessions_disabled.go +++ b/feature/buildfeatures/feature_desktop_sessions_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_desktop_sessions -package featuretags +package buildfeatures -// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". // Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. // It's a const so it can be used for dead code elimination. -const DesktopSessions = false +const HasDesktopSessions = false diff --git a/feature/featuretags/feature_desktop_sessions_enabled.go b/feature/buildfeatures/feature_desktop_sessions_enabled.go similarity index 57% rename from feature/featuretags/feature_desktop_sessions_enabled.go rename to feature/buildfeatures/feature_desktop_sessions_enabled.go index 93c776a047dc2..84658de952c86 100644 --- a/feature/featuretags/feature_desktop_sessions_enabled.go +++ b/feature/buildfeatures/feature_desktop_sessions_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_desktop_sessions -package featuretags +package buildfeatures -// DesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". // Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. // It's a const so it can be used for dead code elimination. -const DesktopSessions = true +const HasDesktopSessions = true diff --git a/feature/featuretags/feature_drive_disabled.go b/feature/buildfeatures/feature_drive_disabled.go similarity index 55% rename from feature/featuretags/feature_drive_disabled.go rename to feature/buildfeatures/feature_drive_disabled.go index 550ed0bd16a6d..07202638952e8 100644 --- a/feature/featuretags/feature_drive_disabled.go +++ b/feature/buildfeatures/feature_drive_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_drive -package featuretags +package buildfeatures -// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. // It's a const so it can be used for dead code elimination. -const Drive = false +const HasDrive = false diff --git a/feature/featuretags/feature_drive_enabled.go b/feature/buildfeatures/feature_drive_enabled.go similarity index 55% rename from feature/featuretags/feature_drive_enabled.go rename to feature/buildfeatures/feature_drive_enabled.go index 2ed83b271c7d2..9f58836a43fc7 100644 --- a/feature/featuretags/feature_drive_enabled.go +++ b/feature/buildfeatures/feature_drive_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_drive -package featuretags +package buildfeatures -// Drive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. // It's a const so it can be used for dead code elimination. -const Drive = true +const HasDrive = true diff --git a/feature/featuretags/feature_kube_disabled.go b/feature/buildfeatures/feature_kube_disabled.go similarity index 57% rename from feature/featuretags/feature_kube_disabled.go rename to feature/buildfeatures/feature_kube_disabled.go index 3a140e8693ad2..2b76c57e78b94 100644 --- a/feature/featuretags/feature_kube_disabled.go +++ b/feature/buildfeatures/feature_kube_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_kube -package featuretags +package buildfeatures -// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. // It's a const so it can be used for dead code elimination. -const Kube = false +const HasKube = false diff --git a/feature/featuretags/feature_kube_enabled.go b/feature/buildfeatures/feature_kube_enabled.go similarity index 57% rename from feature/featuretags/feature_kube_enabled.go rename to feature/buildfeatures/feature_kube_enabled.go index 1dd119a2b2431..7abca1759fc49 100644 --- a/feature/featuretags/feature_kube_enabled.go +++ b/feature/buildfeatures/feature_kube_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_kube -package featuretags +package buildfeatures -// Kube is whether the binary was built with support for modular feature "Kubernetes integration". +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". // Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. // It's a const so it can be used for dead code elimination. -const Kube = true +const HasKube = true diff --git a/feature/featuretags/feature_relayserver_disabled.go b/feature/buildfeatures/feature_relayserver_disabled.go similarity index 58% rename from feature/featuretags/feature_relayserver_disabled.go rename to feature/buildfeatures/feature_relayserver_disabled.go index e6122ef9cb657..08ced83101f96 100644 --- a/feature/featuretags/feature_relayserver_disabled.go +++ b/feature/buildfeatures/feature_relayserver_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_relayserver -package featuretags +package buildfeatures -// RelayServer is whether the binary was built with support for modular feature "Relay server". +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". // Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. // It's a const so it can be used for dead code elimination. -const RelayServer = false +const HasRelayServer = false diff --git a/feature/featuretags/feature_relayserver_enabled.go b/feature/buildfeatures/feature_relayserver_enabled.go similarity index 58% rename from feature/featuretags/feature_relayserver_enabled.go rename to feature/buildfeatures/feature_relayserver_enabled.go index 34ed23a847f28..6a35f8305d68f 100644 --- a/feature/featuretags/feature_relayserver_enabled.go +++ b/feature/buildfeatures/feature_relayserver_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_relayserver -package featuretags +package buildfeatures -// RelayServer is whether the binary was built with support for modular feature "Relay server". +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". // Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. // It's a const so it can be used for dead code elimination. -const RelayServer = true +const HasRelayServer = true diff --git a/feature/featuretags/feature_serve_disabled.go b/feature/buildfeatures/feature_serve_disabled.go similarity index 57% rename from feature/featuretags/feature_serve_disabled.go rename to feature/buildfeatures/feature_serve_disabled.go index a143e951f7ddf..6d79713500e29 100644 --- a/feature/featuretags/feature_serve_disabled.go +++ b/feature/buildfeatures/feature_serve_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_serve -package featuretags +package buildfeatures -// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". // Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. // It's a const so it can be used for dead code elimination. -const Serve = false +const HasServe = false diff --git a/feature/featuretags/feature_serve_enabled.go b/feature/buildfeatures/feature_serve_enabled.go similarity index 57% rename from feature/featuretags/feature_serve_enabled.go rename to feature/buildfeatures/feature_serve_enabled.go index 1d1af0809dcb8..57bf2c6b0fc2b 100644 --- a/feature/featuretags/feature_serve_enabled.go +++ b/feature/buildfeatures/feature_serve_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_serve -package featuretags +package buildfeatures -// Serve is whether the binary was built with support for modular feature "Serve and Funnel support". +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". // Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. // It's a const so it can be used for dead code elimination. -const Serve = true +const HasServe = true diff --git a/feature/featuretags/feature_ssh_disabled.go b/feature/buildfeatures/feature_ssh_disabled.go similarity index 57% rename from feature/featuretags/feature_ssh_disabled.go rename to feature/buildfeatures/feature_ssh_disabled.go index c22be29453cc0..754f50eb6a816 100644 --- a/feature/featuretags/feature_ssh_disabled.go +++ b/feature/buildfeatures/feature_ssh_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_ssh -package featuretags +package buildfeatures -// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". // Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. // It's a const so it can be used for dead code elimination. -const SSH = false +const HasSSH = false diff --git a/feature/featuretags/feature_ssh_enabled.go b/feature/buildfeatures/feature_ssh_enabled.go similarity index 58% rename from feature/featuretags/feature_ssh_enabled.go rename to feature/buildfeatures/feature_ssh_enabled.go index 52fa10b581e24..dbdc3a89fa027 100644 --- a/feature/featuretags/feature_ssh_enabled.go +++ b/feature/buildfeatures/feature_ssh_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_ssh -package featuretags +package buildfeatures -// SSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". // Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. // It's a const so it can be used for dead code elimination. -const SSH = true +const HasSSH = true diff --git a/feature/featuretags/feature_syspolicy_disabled.go b/feature/buildfeatures/feature_syspolicy_disabled.go similarity index 54% rename from feature/featuretags/feature_syspolicy_disabled.go rename to feature/buildfeatures/feature_syspolicy_disabled.go index db73b02612ca3..54d32e32e71d8 100644 --- a/feature/featuretags/feature_syspolicy_disabled.go +++ b/feature/buildfeatures/feature_syspolicy_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_syspolicy -package featuretags +package buildfeatures -// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. // It's a const so it can be used for dead code elimination. -const SystemPolicy = false +const HasSystemPolicy = false diff --git a/feature/featuretags/feature_syspolicy_enabled.go b/feature/buildfeatures/feature_syspolicy_enabled.go similarity index 54% rename from feature/featuretags/feature_syspolicy_enabled.go rename to feature/buildfeatures/feature_syspolicy_enabled.go index 2ad332676a474..f7c403ae9d68b 100644 --- a/feature/featuretags/feature_syspolicy_enabled.go +++ b/feature/buildfeatures/feature_syspolicy_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_syspolicy -package featuretags +package buildfeatures -// SystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. // It's a const so it can be used for dead code elimination. -const SystemPolicy = true +const HasSystemPolicy = true diff --git a/feature/featuretags/feature_systray_disabled.go b/feature/buildfeatures/feature_systray_disabled.go similarity index 58% rename from feature/featuretags/feature_systray_disabled.go rename to feature/buildfeatures/feature_systray_disabled.go index a358bbf6fd657..4ae1edb0ab83f 100644 --- a/feature/featuretags/feature_systray_disabled.go +++ b/feature/buildfeatures/feature_systray_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_systray -package featuretags +package buildfeatures -// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". // Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. // It's a const so it can be used for dead code elimination. -const SysTray = false +const HasSysTray = false diff --git a/feature/featuretags/feature_systray_enabled.go b/feature/buildfeatures/feature_systray_enabled.go similarity index 58% rename from feature/featuretags/feature_systray_enabled.go rename to feature/buildfeatures/feature_systray_enabled.go index aebf3ad9e47fb..5fd7fd220325a 100644 --- a/feature/featuretags/feature_systray_enabled.go +++ b/feature/buildfeatures/feature_systray_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_systray -package featuretags +package buildfeatures -// SysTray is whether the binary was built with support for modular feature "Linux system tray". +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". // Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. // It's a const so it can be used for dead code elimination. -const SysTray = true +const HasSysTray = true diff --git a/feature/featuretags/feature_taildrop_disabled.go b/feature/buildfeatures/feature_taildrop_disabled.go similarity index 56% rename from feature/featuretags/feature_taildrop_disabled.go rename to feature/buildfeatures/feature_taildrop_disabled.go index 5c95c28b6624b..8ffe90617839f 100644 --- a/feature/featuretags/feature_taildrop_disabled.go +++ b/feature/buildfeatures/feature_taildrop_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_taildrop -package featuretags +package buildfeatures -// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. // It's a const so it can be used for dead code elimination. -const Taildrop = false +const HasTaildrop = false diff --git a/feature/featuretags/feature_taildrop_enabled.go b/feature/buildfeatures/feature_taildrop_enabled.go similarity index 56% rename from feature/featuretags/feature_taildrop_enabled.go rename to feature/buildfeatures/feature_taildrop_enabled.go index e5212f03a06ab..4f55d2801c516 100644 --- a/feature/featuretags/feature_taildrop_enabled.go +++ b/feature/buildfeatures/feature_taildrop_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_taildrop -package featuretags +package buildfeatures -// Taildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. // It's a const so it can be used for dead code elimination. -const Taildrop = true +const HasTaildrop = true diff --git a/feature/featuretags/feature_tailnetlock_disabled.go b/feature/buildfeatures/feature_tailnetlock_disabled.go similarity index 57% rename from feature/featuretags/feature_tailnetlock_disabled.go rename to feature/buildfeatures/feature_tailnetlock_disabled.go index 2a07233decb9c..6b5a57f24ba4f 100644 --- a/feature/featuretags/feature_tailnetlock_disabled.go +++ b/feature/buildfeatures/feature_tailnetlock_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_tailnetlock -package featuretags +package buildfeatures -// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. // It's a const so it can be used for dead code elimination. -const TailnetLock = false +const HasTailnetLock = false diff --git a/feature/featuretags/feature_tailnetlock_enabled.go b/feature/buildfeatures/feature_tailnetlock_enabled.go similarity index 57% rename from feature/featuretags/feature_tailnetlock_enabled.go rename to feature/buildfeatures/feature_tailnetlock_enabled.go index 1abf0c3bcba18..afedb7faad312 100644 --- a/feature/featuretags/feature_tailnetlock_enabled.go +++ b/feature/buildfeatures/feature_tailnetlock_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_tailnetlock -package featuretags +package buildfeatures -// TailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. // It's a const so it can be used for dead code elimination. -const TailnetLock = true +const HasTailnetLock = true diff --git a/feature/featuretags/feature_tap_disabled.go b/feature/buildfeatures/feature_tap_disabled.go similarity index 55% rename from feature/featuretags/feature_tap_disabled.go rename to feature/buildfeatures/feature_tap_disabled.go index d4dfded2b29aa..f0b3eec8d7e6f 100644 --- a/feature/featuretags/feature_tap_disabled.go +++ b/feature/buildfeatures/feature_tap_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_tap -package featuretags +package buildfeatures -// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. // It's a const so it can be used for dead code elimination. -const Tap = false +const HasTap = false diff --git a/feature/featuretags/feature_tap_enabled.go b/feature/buildfeatures/feature_tap_enabled.go similarity index 55% rename from feature/featuretags/feature_tap_enabled.go rename to feature/buildfeatures/feature_tap_enabled.go index a6ce1415c764b..1363c4b44afb2 100644 --- a/feature/featuretags/feature_tap_enabled.go +++ b/feature/buildfeatures/feature_tap_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_tap -package featuretags +package buildfeatures -// Tap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. // It's a const so it can be used for dead code elimination. -const Tap = true +const HasTap = true diff --git a/feature/featuretags/feature_tpm_disabled.go b/feature/buildfeatures/feature_tpm_disabled.go similarity index 59% rename from feature/featuretags/feature_tpm_disabled.go rename to feature/buildfeatures/feature_tpm_disabled.go index 15d888cfead9a..b9d55815ef5df 100644 --- a/feature/featuretags/feature_tpm_disabled.go +++ b/feature/buildfeatures/feature_tpm_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_tpm -package featuretags +package buildfeatures -// TPM is whether the binary was built with support for modular feature "TPM support". +// HasTPM is whether the binary was built with support for modular feature "TPM support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. // It's a const so it can be used for dead code elimination. -const TPM = false +const HasTPM = false diff --git a/feature/featuretags/feature_tpm_enabled.go b/feature/buildfeatures/feature_tpm_enabled.go similarity index 59% rename from feature/featuretags/feature_tpm_enabled.go rename to feature/buildfeatures/feature_tpm_enabled.go index 3525f744c9e4b..dcfc8a30442ad 100644 --- a/feature/featuretags/feature_tpm_enabled.go +++ b/feature/buildfeatures/feature_tpm_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_tpm -package featuretags +package buildfeatures -// TPM is whether the binary was built with support for modular feature "TPM support". +// HasTPM is whether the binary was built with support for modular feature "TPM support". // Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. // It's a const so it can be used for dead code elimination. -const TPM = true +const HasTPM = true diff --git a/feature/featuretags/feature_wakeonlan_disabled.go b/feature/buildfeatures/feature_wakeonlan_disabled.go similarity index 57% rename from feature/featuretags/feature_wakeonlan_disabled.go rename to feature/buildfeatures/feature_wakeonlan_disabled.go index 7b2b39c443374..816ac661f78ce 100644 --- a/feature/featuretags/feature_wakeonlan_disabled.go +++ b/feature/buildfeatures/feature_wakeonlan_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_wakeonlan -package featuretags +package buildfeatures -// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". // Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. // It's a const so it can be used for dead code elimination. -const WakeOnLAN = false +const HasWakeOnLAN = false diff --git a/feature/featuretags/feature_wakeonlan_enabled.go b/feature/buildfeatures/feature_wakeonlan_enabled.go similarity index 57% rename from feature/featuretags/feature_wakeonlan_enabled.go rename to feature/buildfeatures/feature_wakeonlan_enabled.go index 87eed5abf194d..34b3348a10fef 100644 --- a/feature/featuretags/feature_wakeonlan_enabled.go +++ b/feature/buildfeatures/feature_wakeonlan_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_wakeonlan -package featuretags +package buildfeatures -// WakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". // Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. // It's a const so it can be used for dead code elimination. -const WakeOnLAN = true +const HasWakeOnLAN = true diff --git a/feature/featuretags/feature_webclient_disabled.go b/feature/buildfeatures/feature_webclient_disabled.go similarity index 57% rename from feature/featuretags/feature_webclient_disabled.go rename to feature/buildfeatures/feature_webclient_disabled.go index d49cbf8a71193..a7b24f4ac2dda 100644 --- a/feature/featuretags/feature_webclient_disabled.go +++ b/feature/buildfeatures/feature_webclient_disabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build ts_omit_webclient -package featuretags +package buildfeatures -// WebClient is whether the binary was built with support for modular feature "Web client support". +// HasWebClient is whether the binary was built with support for modular feature "Web client support". // Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. // It's a const so it can be used for dead code elimination. -const WebClient = false +const HasWebClient = false diff --git a/feature/featuretags/feature_webclient_enabled.go b/feature/buildfeatures/feature_webclient_enabled.go similarity index 57% rename from feature/featuretags/feature_webclient_enabled.go rename to feature/buildfeatures/feature_webclient_enabled.go index 020ff64a05b14..e40dad33c6ebb 100644 --- a/feature/featuretags/feature_webclient_enabled.go +++ b/feature/buildfeatures/feature_webclient_enabled.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code generated by gen-featuretags.go; DO NOT EDIT. +// Code generated by gen.go; DO NOT EDIT. //go:build !ts_omit_webclient -package featuretags +package buildfeatures -// WebClient is whether the binary was built with support for modular feature "Web client support". +// HasWebClient is whether the binary was built with support for modular feature "Web client support". // Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. // It's a const so it can be used for dead code elimination. -const WebClient = true +const HasWebClient = true diff --git a/feature/featuretags/gen-featuretags.go b/feature/buildfeatures/gen.go similarity index 80% rename from feature/featuretags/gen-featuretags.go rename to feature/buildfeatures/gen.go index 27701fb78d1d7..e967cb8ff1906 100644 --- a/feature/featuretags/gen-featuretags.go +++ b/feature/buildfeatures/gen.go @@ -3,7 +3,7 @@ //go:build ignore -// The gen-featuretags.go program generates the feature__enabled.go +// The gens.go program generates the feature__enabled.go // and feature__disabled.go files for each feature tag. package main @@ -20,7 +20,7 @@ import ( const header = `// Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Code g|e|n|e|r|a|t|e|d by gen-featuretags.go; D|O N|OT E|D|I|T. +// Code g|e|n|e|r|a|t|e|d by gen.go; D|O N|OT E|D|I|T. ` @@ -30,14 +30,14 @@ func main() { if !k.IsOmittable() { continue } - sym := cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) + sym := "Has" + cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) for _, suf := range []string{"enabled", "disabled"} { bang := "" if suf == "enabled" { bang = "!" // !ts_omit_... } must.Do(os.WriteFile("feature_"+string(k)+"_"+suf+".go", - fmt.Appendf(nil, "%s//go:build %s%s\n\npackage featuretags\n\n"+ + fmt.Appendf(nil, "%s//go:build %s%s\n\npackage buildfeatures\n\n"+ "// %s is whether the binary was built with support for modular feature %q.\n"+ "// Specifically, it's whether the binary was NOT built with the %q build tag.\n"+ "// It's a const so it can be used for dead code elimination.\n"+ diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 55945075b5c7d..6778593fae903 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -1,8 +1,6 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run gen-featuretags.go - // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags From 4bb03609bc95734644855976525d7203bb0da7f6 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 15 Sep 2025 11:40:34 -0600 Subject: [PATCH 0335/1093] tool/gocross: ensure child process error codes are propagated on non-Unix The Unix implementation of doExec propagates error codes by virtue of the fact that it does an execve; the replacement binary will return the exit code. On non-Unix, we need to simulate these semantics by checking for an ExitError and, when present, passing that value on to os.Exit. We also add error handling to the doExec call for the benefit of handling any errors where doExec fails before being able to execute the desired binary. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/exec_other.go | 12 +++++++++++- tool/gocross/gocross.go | 6 +++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 7bce0c0993620..4dd74f84d7d2b 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -6,6 +6,7 @@ package main import ( + "errors" "os" "os/exec" ) @@ -16,5 +17,14 @@ func doExec(cmd string, args []string, env []string) error { c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr - return c.Run() + err := c.Run() + + // Propagate ExitErrors within this func to give us similar semantics to + // the Unix variant. + var ee *exec.ExitError + if errors.As(err, &ee) { + os.Exit(ee.ExitCode()) + } + + return err } diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index c71012d73778b..41fab3d584260 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -114,7 +114,11 @@ func main() { } - doExec(filepath.Join(toolchain, "bin/go"), args, os.Environ()) + // Note that doExec only returns if the exec call failed. + if err := doExec(filepath.Join(toolchain, "bin", "go"), args, os.Environ()); err != nil { + fmt.Fprintf(os.Stderr, "executing process: %v\n", err) + os.Exit(1) + } } //go:embed gocross-wrapper.sh From 09dfd94613ebe181217fabec46a254cbd04f94e5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 12:10:58 -0700 Subject: [PATCH 0336/1093] cmd/omitsize: fix the --features flag When you say --features=foo,bar, that was supposed to mean to only show features "foo" and "bar" in the table. But it was also being used as the set of all features that are omittable, which was wrong, leading to misleading numbers when --features was non-empty. Updates #12614 Change-Id: Idad2fa67fb49c39454032e84a3dede967890fdf5 Signed-off-by: Brad Fitzpatrick --- cmd/omitsize/omitsize.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index 841f3ab9e8c30..a4bce63295f25 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -31,12 +31,16 @@ func main() { flag.Parse() var all []string - if *features == "" { - for k := range featuretags.Features { - if k.IsOmittable() { - all = append(all, k.OmitTag()) - } + var allOmittable []string + + for k := range featuretags.Features { + if k.IsOmittable() { + allOmittable = append(allOmittable, k.OmitTag()) } + } + + if *features == "" { + all = slices.Clone(allOmittable) } else { for v := range strings.SplitSeq(*features, ",") { if !strings.HasPrefix(v, "ts_omit_") { @@ -49,15 +53,15 @@ func main() { slices.Sort(all) all = slices.Compact(all) - baseD := measure("tailscaled") - baseC := measure("tailscale") - baseBoth := measure("tailscaled", "ts_include_cli") - - minD := measure("tailscaled", all...) - minC := measure("tailscale", all...) - minBoth := measure("tailscaled", append(slices.Clone(all), "ts_include_cli")...) + minD := measure("tailscaled", allOmittable...) + minC := measure("tailscale", allOmittable...) + minBoth := measure("tailscaled", append(slices.Clone(allOmittable), "ts_include_cli")...) if *showRemovals { + baseD := measure("tailscaled") + baseC := measure("tailscale") + baseBoth := measure("tailscaled", "ts_include_cli") + fmt.Printf("Starting with everything and removing a feature...\n\n") fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") @@ -80,7 +84,7 @@ func main() { fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) for _, t := range all { - tags := allExcept(all, t) + tags := allExcept(allOmittable, t) sizeD := measure("tailscaled", tags...) sizeC := measure("tailscale", tags...) sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) From 998a667cd5eb42a1f49374c328007db647405d11 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 15 Sep 2025 15:22:13 -0700 Subject: [PATCH 0337/1093] wgengine/magicsock: don't add DERP addrs to endpointState (#17147) endpointState is used for tracking UDP direct connection candidate addresses. If it contains a DERP addr, then direct connection path discovery will always send a wasteful disco ping over it. Additionally, CLI "tailscale ping" via peer relay will race over DERP, leading to a misleading result if pong arrives via DERP first. Disco pongs arriving via DERP never influence path selection. Disco ping/pong via DERP only serves "tailscale ping" reporting. Updates #17121 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8ab7957ca2bb6..fa1f1f88f6cda 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2539,10 +2539,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // Remember this route if not present. var dup bool if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return - } + if _, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { numNodes = 1 } } else { From 5c24f0ed803a0f60d3a05f148f3e20f99f3d00d7 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 15 Sep 2025 06:53:41 +0100 Subject: [PATCH 0338/1093] wgengine/magicsock: send a valid payload in TestNetworkDownSendErrors This test ostensibly checks whether we record an error metric if a packet is dropped because the network is down, but the network connectivity is irrelevant -- the send error is actually because the arguments to Send() are invalid: RebindingUDPConn.WriteWireGuardBatchTo: [unexpected] offset (0) != Geneve header length (8) This patch changes the test so we try to send a valid packet, and we verify this by sending it once before taking the network down. The new error is: magicsock: network down which is what we're trying to test. We then test sending an invalid payload as a separate test case. Updates tailscale/corp#22075 Signed-off-by: Alex Chan --- wgengine/magicsock/magicsock_test.go | 83 +++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 14 deletions(-) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5774432d5a0b9..bb5922c8c352d 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -3131,34 +3131,89 @@ func TestMaybeRebindOnError(t *testing.T) { }) } -func TestNetworkDownSendErrors(t *testing.T) { +func newTestConnAndRegistry(t *testing.T) (*Conn, *usermetric.Registry, func()) { + t.Helper() bus := eventbus.New() - defer bus.Close() - netMon := must.Get(netmon.New(bus, t.Logf)) - defer netMon.Close() reg := new(usermetric.Registry) + conn := must.Get(NewConn(Options{ DisablePortMapper: true, Logf: t.Logf, NetMon: netMon, - Metrics: reg, EventBus: bus, + Metrics: reg, })) - defer conn.Close() - conn.SetNetworkUp(false) - if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0); err == nil { - t.Error("expected error, got nil") - } - resp := httptest.NewRecorder() - reg.Handler(resp, new(http.Request)) - if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { - t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + return conn, reg, func() { + bus.Close() + netMon.Close() + conn.Close() } } +func TestNetworkSendErrors(t *testing.T) { + t.Run("network-down", func(t *testing.T) { + // TODO(alexc): This test case fails on Windows because it never + // successfully sends the first packet: + // + // expected successful Send, got err: "write udp4 0.0.0.0:57516->127.0.0.1:9999: + // wsasendto: The requested address is not valid in its context." + // + // It would be nice to run this test on Windows, but I was already + // on a side quest and it was unclear if this test has ever worked + // correctly on Windows. + if runtime.GOOS == "windows" { + t.Skipf("skipping on %s", runtime.GOOS) + } + + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + buffs := [][]byte{{00, 00, 00, 00, 00, 00, 00, 00}} + ep := &lazyEndpoint{ + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:9999")}, + } + offset := 8 + + // Check this is a valid payload to send when the network is up + conn.SetNetworkUp(true) + if err := conn.Send(buffs, ep, offset); err != nil { + t.Errorf("expected successful Send, got err: %q", err) + } + + // Now we know the payload would be sent if the network is up, + // send it again when the network is down + conn.SetNetworkUp(false) + err := conn.Send(buffs, ep, offset) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + } + }) + + t.Run("invalid-payload", func(t *testing.T) { + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + conn.SetNetworkUp(false) + err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected invalid payload to increment packet dropped metric; got %q", resp.Body.String()) + } + }) +} + func Test_packetLooksLike(t *testing.T) { discoPub := key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 30: 30, 31: 31})) nakedDisco := make([]byte, 0, 512) From 8b48f3847d91d9a309b9593dcd17d7fe6aae1291 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 15:49:56 -0700 Subject: [PATCH 0339/1093] net/netmon, wgengine/magicsock: simplify LinkChangeLogLimiter signature Remove the need for the caller to hold on to and call an unregister function. Both two callers (one real, one test) already have a context they can use. Use context.AfterFunc instead. There are no observable side effects from scheduling too late if the goroutine doesn't run sync. Updates #17148 Change-Id: Ie697dae0e797494fa8ef27fbafa193bfe5ceb307 Signed-off-by: Brad Fitzpatrick --- net/netmon/loghelper.go | 12 +++++++----- net/netmon/loghelper_test.go | 19 ++++++++++++++----- wgengine/magicsock/magicsock.go | 12 ++++-------- 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 824faeef09b1c..96991644c38b9 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -4,6 +4,7 @@ package netmon import ( + "context" "sync" "tailscale.com/types/logger" @@ -12,16 +13,17 @@ import ( // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique // format string to the underlying logger only once per major LinkChange event. // -// The returned function should be called when the logger is no longer needed, -// to release resources from the Monitor. -func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregister func()) { +// The logger stops tracking seen format strings when the provided context is +// done. +func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - unregister = nm.RegisterChangeCallback(func(cd *ChangeDelta) { + unregister := nm.RegisterChangeCallback(func(cd *ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } }) + context.AfterFunc(ctx, unregister) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it @@ -38,5 +40,5 @@ func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregis } logf(format, args...) - }, unregister + } } diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 44aa46783de07..aeac9f03191aa 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -5,13 +5,17 @@ package netmon import ( "bytes" + "context" "fmt" "testing" + "testing/synctest" "tailscale.com/util/eventbus" ) -func TestLinkChangeLogLimiter(t *testing.T) { +func TestLinkChangeLogLimiter(t *testing.T) { synctest.Test(t, syncTestLinkChangeLogLimiter) } + +func syncTestLinkChangeLogLimiter(t *testing.T) { bus := eventbus.New() defer bus.Close() mon, err := New(bus, t.Logf) @@ -30,8 +34,10 @@ func TestLinkChangeLogLimiter(t *testing.T) { fmt.Fprintf(&logBuffer, format, args...) } - logf, unregister := LinkChangeLogLimiter(logf, mon) - defer unregister() + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + logf = LinkChangeLogLimiter(ctx, logf, mon) // Log once, which should write to our log buffer. logf("hello %s", "world") @@ -72,8 +78,11 @@ func TestLinkChangeLogLimiter(t *testing.T) { t.Errorf("unexpected log buffer contents: %q", got) } - // Unregistering the callback should clear our 'cbs' set. - unregister() + // Canceling the context we passed to LinkChangeLogLimiter should + // unregister the callback from the netmon. + cancel() + synctest.Wait() + mon.mu.Lock() if len(mon.cbs) != 0 { t.Errorf("expected no callbacks, got %v", mon.cbs) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index fa1f1f88f6cda..36402122c9448 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -209,10 +209,6 @@ type Conn struct { // port mappings from NAT devices. portMapper *portmapper.Client - // portMapperLogfUnregister is the function to call to unregister - // the portmapper log limiter. - portMapperLogfUnregister func() - // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. derpRecvCh chan derpReadResult @@ -748,10 +744,13 @@ func NewConn(opts Options) (*Conn, error) { c.subsDoneCh = make(chan struct{}) go c.consumeEventbusTopics() + c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) + c.donec = c.connCtx.Done() + // Don't log the same log messages possibly every few seconds in our // portmapper. portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") - portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon) + portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) portMapOpts := &portmapper.DebugKnobs{ DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, } @@ -772,8 +771,6 @@ func NewConn(opts Options) (*Conn, error) { return nil, err } - c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) - c.donec = c.connCtx.Done() c.netChecker = &netcheck.Client{ Logf: logger.WithPrefix(c.logf, "netcheck: "), NetMon: c.netMon, @@ -3330,7 +3327,6 @@ func (c *Conn) Close() error { } c.stopPeriodicReSTUNTimerLocked() c.portMapper.Close() - c.portMapperLogfUnregister() c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() From 24dd19c9a01235363f20b762fbf3b83a7d488313 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 15 Sep 2025 16:32:12 -0700 Subject: [PATCH 0340/1093] tstest/integration{/testcontrol}: add peer relay integration test (#17103) Updates tailscale/corp#30903 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 26 ++++- tstest/integration/integration.go | 18 ++- tstest/integration/integration_test.go | 103 ++++++++++++++++++ tstest/integration/testcontrol/testcontrol.go | 17 ++- 4 files changed, 155 insertions(+), 9 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index b90a6234508f2..24304e8eccbad 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,9 +6,13 @@ package relayserver import ( + "log" + "net/netip" + "strings" "sync" "tailscale.com/disco" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -115,6 +119,26 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.handleBusLifetimeLocked() } +// overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It +// can be between 0 and 3 comma-separated Addrs. TS_DEBUG_RELAY_SERVER_ADDRS is +// not a stable interface, and is subject to change. +var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { + all := envknob.String("TS_DEBUG_RELAY_SERVER_ADDRS") + const max = 3 + remain := all + for remain != "" && len(ret) < max { + var s string + s, remain, _ = strings.Cut(remain, ",") + addr, err := netip.ParseAddr(s) + if err != nil { + log.Printf("ignoring invalid Addr %q in TS_DEBUG_RELAY_SERVER_ADDRS %q: %v", s, all, err) + continue + } + ret = append(ret, addr) + } + return +}) + func (e *extension) consumeEventbusTopics(port int) { defer close(e.busDoneCh) @@ -140,7 +164,7 @@ func (e *extension) consumeEventbusTopics(port int) { case req := <-reqSub.Events(): if rs == nil { var err error - rs, err = udprelay.NewServer(e.logf, port, nil) + rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) if err != nil { e.logf("error initializing server: %v", err) continue diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 987bb569a4f66..b28ebaba1fbdc 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -480,11 +480,13 @@ func (lc *LogCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { // TestEnv contains the test environment (set of servers) used by one // or more nodes. type TestEnv struct { - t testing.TB - tunMode bool - cli string - daemon string - loopbackPort *int + t testing.TB + tunMode bool + cli string + daemon string + loopbackPort *int + neverDirectUDP bool + relayServerUseLoopback bool LogCatcher *LogCatcher LogCatcherServer *httptest.Server @@ -842,6 +844,12 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { if n.env.loopbackPort != nil { cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) } + if n.env.neverDirectUDP { + cmd.Env = append(cmd.Env, "TS_DEBUG_NEVER_DIRECT_UDP=1") + } + if n.env.relayServerUseLoopback { + cmd.Env = append(cmd.Env, "TS_DEBUG_RELAY_SERVER_ADDRS=::1,127.0.0.1") + } if version.IsRace() { cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index de464108c44dd..b282adcf86249 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -44,6 +44,7 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/must" + "tailscale.com/util/set" ) func TestMain(m *testing.M) { @@ -1530,3 +1531,105 @@ func TestEncryptStateMigration(t *testing.T) { runNode(t, wantPlaintextStateKeys) }) } + +// TestPeerRelayPing creates three nodes with one acting as a peer relay. +// The test succeeds when "tailscale ping" flows through the peer +// relay between all 3 nodes. +func TestPeerRelayPing(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl(func(server *testcontrol.Server) { + server.PeerRelayGrants = true + })) + env.neverDirectUDP = true + env.relayServerUseLoopback = true + + n1 := NewTestNode(t, env) + n2 := NewTestNode(t, env) + peerRelay := NewTestNode(t, env) + + allNodes := []*TestNode{n1, n2, peerRelay} + wantPeerRelayServers := make(set.Set[string]) + for _, n := range allNodes { + n.StartDaemon() + n.AwaitResponding() + n.MustUp() + wantPeerRelayServers.Add(n.AwaitIP4().String()) + n.AwaitRunning() + } + + if err := peerRelay.Tailscale("set", "--relay-server-port=0").Run(); err != nil { + t.Fatal(err) + } + + errCh := make(chan error) + for _, a := range allNodes { + go func() { + err := tstest.WaitFor(time.Second*5, func() error { + out, err := a.Tailscale("debug", "peer-relay-servers").CombinedOutput() + if err != nil { + return fmt.Errorf("debug peer-relay-servers failed: %v", err) + } + servers := make([]string, 0) + err = json.Unmarshal(out, &servers) + if err != nil { + return fmt.Errorf("failed to unmarshal debug peer-relay-servers: %v", err) + } + gotPeerRelayServers := make(set.Set[string]) + for _, server := range servers { + gotPeerRelayServers.Add(server) + } + if !gotPeerRelayServers.Equal(wantPeerRelayServers) { + return fmt.Errorf("got peer relay servers: %v want: %v", gotPeerRelayServers, wantPeerRelayServers) + } + return nil + }) + errCh <- err + }() + } + for range allNodes { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } + + pingPairs := make([][2]*TestNode, 0) + for _, a := range allNodes { + for _, z := range allNodes { + if a == z { + continue + } + pingPairs = append(pingPairs, [2]*TestNode{a, z}) + } + } + for _, pair := range pingPairs { + go func() { + a := pair[0] + z := pair[1] + err := tstest.WaitFor(time.Second*10, func() error { + remoteKey := z.MustStatus().Self.PublicKey + if err := a.Tailscale("ping", "--until-direct=false", "--c=1", "--timeout=1s", z.AwaitIP4().String()).Run(); err != nil { + return err + } + remotePeer, ok := a.MustStatus().Peer[remoteKey] + if !ok { + return fmt.Errorf("%v->%v remote peer not found", a.MustStatus().Self.ID, z.MustStatus().Self.ID) + } + if len(remotePeer.PeerRelay) == 0 { + return fmt.Errorf("%v->%v not using peer relay, curAddr=%v relay=%v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.CurAddr, remotePeer.Relay) + } + t.Logf("%v->%v using peer relay addr: %v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.PeerRelay) + return nil + }) + errCh <- err + }() + } + for range pingPairs { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 2fbf37de9a15e..66d868aca6294 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -55,6 +55,10 @@ type Server struct { MagicDNSDomain string HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + // PeerRelayGrants, if true, inserts relay capabilities into the wildcard + // grants rules. + PeerRelayGrants bool + // AllNodesSameUser, if true, makes all created nodes // belong to the same user. AllNodesSameUser bool @@ -931,14 +935,21 @@ var keepAliveMsg = &struct { KeepAlive: true, } -func packetFilterWithIngressCaps() []tailcfg.FilterRule { +func packetFilterWithIngress(addRelayCaps bool) []tailcfg.FilterRule { out := slices.Clone(tailcfg.FilterAllowAll) + caps := []tailcfg.PeerCapability{ + tailcfg.PeerCapabilityIngress, + } + if addRelayCaps { + caps = append(caps, tailcfg.PeerCapabilityRelay) + caps = append(caps, tailcfg.PeerCapabilityRelayTarget) + } out = append(out, tailcfg.FilterRule{ SrcIPs: []string{"*"}, CapGrant: []tailcfg.CapGrant{ { Dsts: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - Caps: []tailcfg.PeerCapability{tailcfg.PeerCapabilityIngress}, + Caps: caps, }, }, }) @@ -977,7 +988,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, DERPMap: s.DERPMap, Domain: domain, CollectServices: "true", - PacketFilter: packetFilterWithIngressCaps(), + PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, } From 5ad3bd9f47ab631bf10d6f480e7c22850e7c024f Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Mon, 15 Sep 2025 19:45:07 -0400 Subject: [PATCH 0341/1093] flake.nix: fix go version (#17152) Bump to 1.25.1 to match go.mod Fixes #17150 Signed-off-by: Mike O'Driscoll --- flake.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 8cb5e078e11e2..8f1fe026d2d9c 100644 --- a/flake.nix +++ b/flake.nix @@ -46,8 +46,8 @@ systems, flake-compat, }: let - go125Version = "1.25.0"; - goHash = "sha256-S9AekSlyB7+kUOpA1NWpOxtTGl5DhHOyoG4Y4HciciU="; + go125Version = "1.25.1"; + goHash = "sha256-0BDBCc7pTYDv5oHqtGvepJGskGv0ZYPDLp8NuwvRpZQ="; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { From 5b5ae2b2eea44f30ea4afe78f2176d1b3fcd4809 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 16 Sep 2025 07:44:08 -0700 Subject: [PATCH 0342/1093] util/eventbus: add a Done channel to the Client (#17118) Subscribers already have a Done channel that the caller can use to detect when the subscriber has been closed. Typically this happens when the governing Client closes, which in turn is typically because the Bus closed. But clients and subscribers can stop at other times too, and a caller has no good way to tell the difference between "this subscriber closed but the rest are OK" and "the client closed and all these subscribers are finished". We've worked around this in practice by knowing the closure of one subscriber implies the fate of the rest, but we can do better: Add a Done method to the Client that allows us to tell when that has been closed explicitly, after all the publishers and subscribers associated with that client have been closed. This allows the caller to be sure that, by the time that occurs, no further pending events are forthcoming on that client. Updates #15160 Change-Id: Id601a79ba043365ecdb47dd035f1fdadd984f303 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 45 ++++++++++++++++++++++++++++++++++++++- util/eventbus/client.go | 13 ++++++++--- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index e159b6a12608a..9fd0e440948e1 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -27,7 +27,16 @@ func TestBus(t *testing.T) { defer b.Close() c := b.Client("TestSub") - defer c.Close() + cdone := c.Done() + defer func() { + c.Close() + select { + case <-cdone: + t.Log("Client close signal received (OK)") + case <-time.After(time.Second): + t.Error("timed out waiting for client close signal") + } + }() s := eventbus.Subscribe[EventA](c) go func() { @@ -178,6 +187,40 @@ func TestSpam(t *testing.T) { // subsequences of the received slices. } +func TestClient_Done(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + s := eventbus.Subscribe[string](c) + + // The client is not Done until closed. + select { + case <-c.Done(): + t.Fatal("Client done before being closed") + default: + // OK + } + + go c.Close() + + // Once closed, the client becomes Done. + select { + case <-c.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Client to be done") + } + + // Thereafter, the subscriber should also be closed. + select { + case <-s.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timoeout waiting for Subscriber to be done") + } +} + type queueChecker struct { t *testing.T want []any diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a6266a4d8f823..176b6f2bc8e60 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -21,9 +21,10 @@ type Client struct { bus *Bus publishDebug hook[PublishedEvent] - mu sync.Mutex - pub set.Set[publisher] - sub *subscribeState // Lazily created on first subscribe + mu sync.Mutex + pub set.Set[publisher] + sub *subscribeState // Lazily created on first subscribe + stop stopFlag // signaled on Close } func (c *Client) Name() string { return c.name } @@ -47,8 +48,14 @@ func (c *Client) Close() { for p := range pub { p.Close() } + c.stop.Stop() } +// Done returns a channel that is closed when [Client.Close] is called. +// The channel is closed after all the publishers and subscribers governed by +// the client have been closed. +func (c *Client) Done() <-chan struct{} { return c.stop.Done() } + func (c *Client) snapshotSubscribeQueue() []DeliveredEvent { return c.peekSubscribeState().snapshotQueue() } From 4cca9f7c673f0a3b027b28170bd218520875ea4c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 13 Sep 2025 20:20:08 -0700 Subject: [PATCH 0343/1093] all: add ts_omit_serve, start making tailscale serve/funnel be modular tailscaled tailscale combined (linux/amd64) 29853147 17384418 31412596 omitting everything + 621570 + 219277 + 554256 .. add serve Updates #17128 Change-Id: I87c2c6c3d3fc2dc026c3de8ef7000a813b41d31c Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 40 ---- client/local/serve.go | 55 +++++ cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/cli.go | 6 +- cmd/tailscale/cli/funnel.go | 45 ++++ cmd/tailscale/cli/serve_legacy.go | 6 + cmd/tailscale/cli/serve_v2.go | 2 + cmd/tailscale/cli/status.go | 41 +--- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + ipn/ipnlocal/c2n.go | 13 -- ipn/ipnlocal/local.go | 277 ++++--------------------- ipn/ipnlocal/peerapi.go | 69 +------ ipn/ipnlocal/serve.go | 332 ++++++++++++++++++++++++++++++ ipn/ipnlocal/serve_disabled.go | 34 +++ ipn/ipnlocal/serve_test.go | 2 + ipn/localapi/localapi.go | 86 -------- ipn/localapi/serve.go | 108 ++++++++++ tsnet/depaware.txt | 1 + wgengine/netstack/netstack.go | 20 +- 21 files changed, 651 insertions(+), 491 deletions(-) create mode 100644 client/local/serve.go create mode 100644 ipn/ipnlocal/serve_disabled.go create mode 100644 ipn/localapi/serve.go diff --git a/client/local/local.go b/client/local/local.go index 03ca10bb75a4b..32e8208da2fed 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1217,20 +1217,6 @@ func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.Ping return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } -// SetServeConfig sets or replaces the serving settings. -// If config is nil, settings are cleared and serving is disabled. -func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { - h := make(http.Header) - if config != nil { - h.Set("If-Match", config.ETag) - } - _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) - if err != nil { - return fmt.Errorf("sending serve config: %w", err) - } - return nil -} - // DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be // run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over // to another replica whilst there is still some grace period for the existing connections to terminate. @@ -1242,32 +1228,6 @@ func (lc *Client) DisconnectControl(ctx context.Context) error { return nil } -// GetServeConfig return the current serve config. -// -// If the serve config is empty, it returns (nil, nil). -func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { - body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) - if err != nil { - return nil, fmt.Errorf("getting serve config: %w", err) - } - sc, err := getServeConfigFromJSON(body) - if err != nil { - return nil, err - } - if sc == nil { - sc = new(ipn.ServeConfig) - } - sc.ETag = h.Get("Etag") - return sc, nil -} - -func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { - if err := json.Unmarshal(body, &sc); err != nil { - return nil, err - } - return sc, nil -} - // tailscaledConnectHint gives a little thing about why tailscaled (or // platform equivalent) is not answering localapi connections. // diff --git a/client/local/serve.go b/client/local/serve.go new file mode 100644 index 0000000000000..51d15e7e5439b --- /dev/null +++ b/client/local/serve.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package local + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "tailscale.com/ipn" +) + +// GetServeConfig return the current serve config. +// +// If the serve config is empty, it returns (nil, nil). +func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { + body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) + if err != nil { + return nil, fmt.Errorf("getting serve config: %w", err) + } + sc, err := getServeConfigFromJSON(body) + if err != nil { + return nil, err + } + if sc == nil { + sc = new(ipn.ServeConfig) + } + sc.ETag = h.Get("Etag") + return sc, nil +} + +func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { + if err := json.Unmarshal(body, &sc); err != nil { + return nil, err + } + return sc, nil +} + +// SetServeConfig sets or replaces the serving settings. +// If config is nil, settings are cleared and serving is disabled. +func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { + h := make(http.Header) + if config != nil { + h.Set("If-Match", config.ETag) + } + _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) + if err != nil { + return fmt.Errorf("sending serve config: %w", err) + } + return nil +} diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d94b5b6cf52f7..87bae60c89b34 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,6 +798,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index ef0dc98209237..d039be607b6a2 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -213,6 +213,8 @@ var ( maybeWebCmd, maybeDriveCmd, maybeNetlockCmd, + maybeFunnelCmd, + maybeServeCmd, _ func() *ffcli.Command ) @@ -254,8 +256,8 @@ change in the future. pingCmd, ncCmd, sshCmd, - funnelCmd(), - serveCmd(), + nilOrCall(maybeFunnelCmd), + nilOrCall(maybeServeCmd), versionCmd, nilOrCall(maybeWebCmd), nilOrCall(fileCmd), diff --git a/cmd/tailscale/cli/funnel.go b/cmd/tailscale/cli/funnel.go index f4a1c6bfdb3b8..34b0c74c23949 100644 --- a/cmd/tailscale/cli/funnel.go +++ b/cmd/tailscale/cli/funnel.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -16,6 +18,10 @@ import ( "tailscale.com/tailcfg" ) +func init() { + maybeFunnelCmd = funnelCmd +} + var funnelCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true @@ -174,3 +180,42 @@ func printFunnelWarning(sc *ipn.ServeConfig) { fmt.Fprintf(Stderr, " run: `tailscale serve --help` to see how to configure handlers\n") } } + +func init() { + hookPrintFunnelStatus.Set(printFunnelStatus) +} + +// printFunnelStatus prints the status of the funnel, if it's running. +// It prints nothing if the funnel is not running. +func printFunnelStatus(ctx context.Context) { + sc, err := localClient.GetServeConfig(ctx) + if err != nil { + outln() + printf("# Funnel:\n") + printf("# - Unable to get Funnel status: %v\n", err) + return + } + if !sc.IsFunnelOn() { + return + } + outln() + printf("# Funnel on:\n") + for hp, on := range sc.AllowFunnel { + if !on { // if present, should be on + continue + } + sni, portStr, _ := net.SplitHostPort(string(hp)) + p, _ := strconv.ParseUint(portStr, 10, 16) + isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) + url := "https://" + if isTCP { + url = "tcp://" + } + url += sni + if isTCP || p != 443 { + url += ":" + portStr + } + printf("# - %s\n", url) + } + outln() +} diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 3fbddeabf8d4e..b60e9833bc86f 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -31,6 +33,10 @@ import ( "tailscale.com/version" ) +func init() { + maybeServeCmd = serveCmd +} + var serveCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 903036db4a6e7..058d80649fd3a 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 726606109aa15..97f6708db675a 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -15,12 +15,12 @@ import ( "net/http" "net/netip" "os" - "strconv" "strings" "github.com/peterbourgon/ff/v3/ffcli" "github.com/toqueteos/webbrowser" "golang.org/x/net/idna" + "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netmon" @@ -238,44 +238,13 @@ func runStatus(ctx context.Context, args []string) error { outln() printHealth() } - printFunnelStatus(ctx) + if f, ok := hookPrintFunnelStatus.GetOk(); ok { + f(ctx) + } return nil } -// printFunnelStatus prints the status of the funnel, if it's running. -// It prints nothing if the funnel is not running. -func printFunnelStatus(ctx context.Context) { - sc, err := localClient.GetServeConfig(ctx) - if err != nil { - outln() - printf("# Funnel:\n") - printf("# - Unable to get Funnel status: %v\n", err) - return - } - if !sc.IsFunnelOn() { - return - } - outln() - printf("# Funnel on:\n") - for hp, on := range sc.AllowFunnel { - if !on { // if present, should be on - continue - } - sni, portStr, _ := net.SplitHostPort(string(hp)) - p, _ := strconv.ParseUint(portStr, 10, 16) - isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) - url := "https://" - if isTCP { - url = "tcp://" - } - url += sni - if isTCP || p != 443 { - url += ":" + portStr - } - printf("# - %s\n", url) - } - outln() -} +var hookPrintFunnelStatus feature.Hook[func(context.Context)] // isRunningOrStarting reports whether st is in state Running or Starting. // It also returns a description of the status suitable to display to a user. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index a983f1c09f0bf..a39363353f4c7 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -104,7 +104,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a0842b45bec60..736c268dcdb02 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -272,6 +272,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/drive from tailscale.com/feature/condregister diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index cfe44d1dc1934..c9cd12d4118e8 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -239,6 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 2c13f06198455..b5f50f3bccc70 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -72,9 +72,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // Linux netfilter. req("POST /netfilter-kind"): handleC2NSetNetfilterKind, - - // VIP services. - req("GET /vip-services"): handleC2NVIPServicesGet, } // RegisterC2N registers a new c2n handler for the given pattern. @@ -280,16 +277,6 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } -func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /vip-services received") - var res tailcfg.C2NVIPServicesResponse - res.VIPServices = b.VIPServices() - res.ServicesHash = b.vipServiceHash(res.VIPServices) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: GET /update received") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c98a0810d2cd2..6d92e58d0c111 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -18,7 +18,6 @@ import ( "fmt" "io" "log" - "maps" "math" "math/rand/v2" "net" @@ -53,6 +52,7 @@ import ( "tailscale.com/envknob" "tailscale.com/envknob/featureknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -585,7 +585,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetJailedFilter(noneFilter) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsIntercepted(nil) b.statusChanged = sync.NewCond(&b.statusLock) b.e.SetStatusCallback(b.setWgengineStatus) @@ -3759,46 +3758,6 @@ func generateInterceptVIPServicesTCPPortFunc(svcAddrPorts map[netip.Addr]func(ui } } -// setVIPServicesTCPPortsIntercepted populates b.shouldInterceptVIPServicesTCPPortAtomic with an -// efficient func for ShouldInterceptTCPPort to use, which is called on every incoming packet. -func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[tailcfg.ServiceName][]uint16) { - b.mu.Lock() - defer b.mu.Unlock() - b.setVIPServicesTCPPortsInterceptedLocked(svcPorts) -} - -func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { - if len(svcPorts) == 0 { - b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) - return - } - nm := b.currentNode().NetMap() - if nm == nil { - b.logf("can't set intercept function for Service TCP Ports, netMap is nil") - return - } - vipServiceIPMap := nm.GetVIPServiceIPMap() - if len(vipServiceIPMap) == 0 { - // No approved VIP Services - return - } - - svcAddrPorts := make(map[netip.Addr]func(uint16) bool) - // Only set the intercept function if the service has been assigned a VIP. - for svcName, ports := range svcPorts { - addrs, ok := vipServiceIPMap[svcName] - if !ok { - continue - } - interceptFn := generateInterceptTCPPortFunc(ports) - for _, addr := range addrs { - svcAddrPorts[addr] = interceptFn - } - } - - b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) -} - // setAtomicValuesFromPrefsLocked populates sshAtomicBool, containsViaIPFuncAtomic, // shouldInterceptTCPPortAtomic, and exposeRemoteWebClientAtomicBool from the prefs p, // which may be !Valid(). @@ -3809,7 +3768,9 @@ func (b *LocalBackend) setAtomicValuesFromPrefsLocked(p ipn.PrefsView) { if !p.Valid() { b.containsViaIPFuncAtomic.Store(ipset.FalseContainsIPFunc()) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsInterceptedLocked(nil) + if f, ok := hookServeClearVIPServicesTCPPortsInterceptedLocked.GetOk(); ok { + f(b) + } b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} } else { @@ -4738,32 +4699,6 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { return nil } -// wantIngressLocked reports whether this node has ingress configured. This bool -// is sent to the coordination server (in Hostinfo.WireIngress) as an -// optimization hint to know primarily which nodes are NOT using ingress, to -// avoid doing work for regular nodes. -// -// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw -// mode and contains map entries with false values, sending true (from Len > 0) -// is still fine. This is only an optimization hint for the control plane and -// doesn't affect security or correctness. And we also don't expect people to -// modify their ServeConfig in raw mode. -func (b *LocalBackend) wantIngressLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() -} - -// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in -// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. -func (b *LocalBackend) hasIngressEnabledLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() -} - -// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it -// seems that it is intended to be used with funnel. -func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { - return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() -} - // setPrefsLockedOnEntry requires b.mu be held to call it, but it // unlocks b.mu when done. newp ownership passes to this function. // It returns a read-only copy of the new prefs. @@ -4907,6 +4842,16 @@ var ( magicDNSIPv6 = tsaddr.TailscaleServiceIPv6() ) +// Hook exclusively for serve. +var ( + hookServeTCPHandlerForVIPService feature.Hook[func(b *LocalBackend, dst netip.AddrPort, src netip.AddrPort) (handler func(c net.Conn) error)] + hookTCPHandlerForServe feature.Hook[func(b *LocalBackend, dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error)] + hookServeUpdateServeTCPPortNetMapAddrListenersLocked feature.Hook[func(b *LocalBackend, ports []uint16)] + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked feature.Hook[func(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16)] + hookServeClearVIPServicesTCPPortsInterceptedLocked feature.Hook[func(*LocalBackend)] +) + // TCPHandlerForDst returns a TCP handler for connections to dst, or nil if // no handler is needed. It also returns a list of TCP socket options to // apply to the socket before calling the handler. @@ -4929,10 +4874,10 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c } } - // TODO(tailscale/corp#26001): Get handler for VIP services and Local IPs using - // the same function. - if handler := b.tcpHandlerForVIPService(dst, src); handler != nil { - return handler, opts + if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { + if handler := f(b, dst, src); handler != nil { + return handler, opts + } } // Then handle external connections to the local IP. if !b.isLocalIP(dst.Addr()) { @@ -4958,8 +4903,10 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c return nil }, opts } - if handler := b.tcpHandlerForServe(dst.Port(), src, nil); handler != nil { - return handler, opts + if f, ok := hookTCPHandlerForServe.GetOk(); ok { + if handler := f(b, dst.Port(), src, nil); handler != nil { + return handler, opts + } } return nil, nil } @@ -6341,7 +6288,9 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) - b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + if buildfeatures.HasServe { + b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + } if !oldSelf.Equal(nm.SelfNodeOrZero()) { for _, f := range b.extHost.Hooks().OnSelfChange { @@ -6411,55 +6360,12 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { } } -// reloadServeConfigLocked reloads the serve config from the store or resets the -// serve config to nil if not logged in. The "changed" parameter, when false, instructs -// the method to only run the reset-logic and not reload the store from memory to ensure -// foreground sessions are not removed if they are not saved on disk. -func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { - // We're not logged in, so we don't have a profile. - // Don't try to load the serve config. - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - - confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) - // TODO(maisem,bradfitz): prevent reading the config from disk - // if the profile has not changed. - confj, err := b.store.ReadState(confKey) - if err != nil { - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - if b.lastServeConfJSON.Equal(mem.B(confj)) { - return - } - b.lastServeConfJSON = mem.B(confj) - var conf ipn.ServeConfig - if err := json.Unmarshal(confj, &conf); err != nil { - b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) - b.serveConfig = ipn.ServeConfigView{} - return - } - - // remove inactive sessions - maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { - _, ok := b.notifyWatchers[sessionID] - return !ok - }) - - b.serveConfig = conf.View() -} - // setTCPPortsInterceptedFromNetmapAndPrefsLocked calls setTCPPortsIntercepted with // the ports that tailscaled should handle as a function of b.netMap and b.prefs. // // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) - var vipServicesPorts map[tailcfg.ServiceName][]uint16 if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -6473,42 +6379,14 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } } - b.reloadServeConfigLocked(prefs) - if b.serveConfig.Valid() { - servePorts := make([]uint16, 0, 3) - for port := range b.serveConfig.TCPs() { - if port > 0 { - servePorts = append(servePorts, uint16(port)) - } - } - handlePorts = append(handlePorts, servePorts...) - - for svc, cfg := range b.serveConfig.Services().All() { - servicePorts := make([]uint16, 0, 3) - for port := range cfg.TCP().All() { - if port > 0 { - servicePorts = append(servicePorts, uint16(port)) - } - } - if _, ok := vipServicesPorts[svc]; !ok { - mak.Set(&vipServicesPorts, svc, servicePorts) - } else { - mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) - } - } - - b.setServeProxyHandlersLocked() - - // don't listen on netmap addresses if we're in userspace mode - if !b.sys.IsNetstack() { - b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) - } + if f, ok := hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.GetOk(); ok { + v := f(b, prefs) + handlePorts = append(handlePorts, v...) } // Update funnel and service hash info in hostinfo and kick off control update if needed. b.updateIngressAndServiceHashLocked(prefs) b.setTCPPortsIntercepted(handlePorts) - b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) } // updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and @@ -6541,51 +6419,6 @@ func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { } } -// setServeProxyHandlersLocked ensures there is an http proxy handler for each -// backend specified in serveConfig. It expects serveConfig to be valid and -// up-to-date, so should be called after reloadServeConfigLocked. -func (b *LocalBackend) setServeProxyHandlersLocked() { - if !b.serveConfig.Valid() { - return - } - var backends map[string]bool - for _, conf := range b.serveConfig.Webs() { - for _, h := range conf.Handlers().All() { - backend := h.Proxy() - if backend == "" { - // Only create proxy handlers for servers with a proxy backend. - continue - } - mak.Set(&backends, backend, true) - if _, ok := b.serveProxyHandlers.Load(backend); ok { - continue - } - - b.logf("serve: creating a new proxy handler for %s", backend) - p, err := b.proxyHandlerForBackend(backend) - if err != nil { - // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget - // in the CLI, so just log the error here. - b.logf("[unexpected] could not create proxy for %v: %s", backend, err) - continue - } - b.serveProxyHandlers.Store(backend, p) - } - } - - // Clean up handlers for proxy backends that are no longer present - // in configuration. - b.serveProxyHandlers.Range(func(key, value any) bool { - backend := key.(string) - if !backends[backend] { - b.logf("serve: closing idle connections to %s", backend) - b.serveProxyHandlers.Delete(backend) - value.(*reverseProxy).close() - } - return true - }) -} - // operatorUserName returns the current pref's OperatorUser's name, or the // empty string if none. func (b *LocalBackend) operatorUserName() string { @@ -7196,7 +7029,14 @@ func (b *LocalBackend) ShouldInterceptTCPPort(port uint16) bool { // ShouldInterceptVIPServiceTCPPort reports whether the given TCP port number // to a VIP service should be intercepted by Tailscaled and handled in-process. func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool { - return b.shouldInterceptVIPServicesTCPPortAtomic.Load()(ap) + if !buildfeatures.HasServe { + return false + } + f := b.shouldInterceptVIPServicesTCPPortAtomic.Load() + if f == nil { + return false + } + return f(ap) } // SwitchProfile switches to the profile with the given id. @@ -8131,15 +7971,6 @@ func maybeUsernameOf(actor ipnauth.Actor) string { return username } -// VIPServices returns the list of tailnet services that this node -// is serving as a destination for. -// The returned memory is owned by the caller. -func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { - b.mu.Lock() - defer b.mu.Unlock() - return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) -} - func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { if len(services) == 0 { return "" @@ -8153,39 +7984,9 @@ func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { return hex.EncodeToString(hash[:]) } -func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { - // keyed by service name - var services map[tailcfg.ServiceName]*tailcfg.VIPService - if b.serveConfig.Valid() { - for svc, config := range b.serveConfig.Services().All() { - mak.Set(&services, svc, &tailcfg.VIPService{ - Name: svc, - Ports: config.ServicePortRange(), - }) - } - } - - for _, s := range prefs.AdvertiseServices().All() { - sn := tailcfg.ServiceName(s) - if services == nil || services[sn] == nil { - mak.Set(&services, sn, &tailcfg.VIPService{ - Name: sn, - }) - } - services[sn].Active = true - } - - servicesList := slicesx.MapValues(services) - // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll - // be hashing a representation of this list later we want it to be in a consistent - // order. - slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { - return strings.Compare(a.Name.String(), b.Name.String()) - }) - return servicesList -} - -var metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") +var ( + metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") +) func (b *LocalBackend) stateEncrypted() opt.Bool { switch runtime.GOOS { diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 23c349087caf8..886a7129120b8 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -28,7 +28,6 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/hostinfo" - "tailscale.com/ipn" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/net/netutil" @@ -387,10 +386,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "/v0/sockstats": h.handleServeSockStats(w, r) return - case "/v0/ingress": - metricIngressCalls.Add(1) - h.handleServeIngress(w, r) - return } if ph, ok := peerAPIHandlers[r.URL.Path]; ok { ph(h, w, r) @@ -413,67 +408,6 @@ This is my Tailscale device. Your device is %v. } } -func (h *peerAPIHandler) handleServeIngress(w http.ResponseWriter, r *http.Request) { - // http.Errors only useful if hitting endpoint manually - // otherwise rely on log lines when debugging ingress connections - // as connection is hijacked for bidi and is encrypted tls - if !h.canIngress() { - h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) - http.Error(w, "denied; no ingress cap", http.StatusForbidden) - return - } - logAndError := func(code int, publicMsg string) { - h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) - http.Error(w, publicMsg, code) - } - bad := func(publicMsg string) { - logAndError(http.StatusBadRequest, publicMsg) - } - if r.Method != "POST" { - logAndError(http.StatusMethodNotAllowed, "only POST allowed") - return - } - srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") - if srcAddrStr == "" { - bad("Tailscale-Ingress-Src header not set") - return - } - srcAddr, err := netip.ParseAddrPort(srcAddrStr) - if err != nil { - bad("Tailscale-Ingress-Src header invalid; want ip:port") - return - } - target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) - if target == "" { - bad("Tailscale-Ingress-Target header not set") - return - } - if _, _, err := net.SplitHostPort(string(target)); err != nil { - bad("Tailscale-Ingress-Target header invalid; want host:port") - return - } - - getConnOrReset := func() (net.Conn, bool) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - h.logf("ingress: failed hijacking conn") - http.Error(w, "failed hijacking conn", http.StatusInternalServerError) - return nil, false - } - io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") - return &ipn.FunnelConn{ - Conn: conn, - Src: srcAddr, - Target: target, - }, true - } - sendRST := func() { - http.Error(w, "denied", http.StatusForbidden) - } - - h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) -} - func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -1099,6 +1033,5 @@ var ( metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests") // Non-debug PeerAPI endpoints. - metricDNSCalls = clientmetric.NewCounter("peerapi_dns") - metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + metricDNSCalls = clientmetric.NewCounter("peerapi_dns") ) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 36738b88119f5..cbf84fb29a1d5 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1,6 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + +// TODO: move this whole file to its own package, out of ipnlocal. + package ipnlocal import ( @@ -12,6 +16,7 @@ import ( "errors" "fmt" "io" + "maps" "mime" "net" "net/http" @@ -28,6 +33,7 @@ import ( "time" "unicode/utf8" + "go4.org/mem" "golang.org/x/net/http2" "tailscale.com/ipn" "tailscale.com/logtail/backoff" @@ -36,11 +42,26 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" "tailscale.com/util/mak" + "tailscale.com/util/slicesx" "tailscale.com/version" ) +func init() { + hookServeTCPHandlerForVIPService.Set((*LocalBackend).tcpHandlerForVIPService) + hookTCPHandlerForServe.Set((*LocalBackend).tcpHandlerForServe) + hookServeUpdateServeTCPPortNetMapAddrListenersLocked.Set((*LocalBackend).updateServeTCPPortNetMapAddrListenersLocked) + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.Set(serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked) + hookServeClearVIPServicesTCPPortsInterceptedLocked.Set(func(b *LocalBackend) { + b.setVIPServicesTCPPortsInterceptedLocked(nil) + }) + + RegisterC2N("GET /vip-services", handleC2NVIPServicesGet) +} + const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" @@ -222,6 +243,10 @@ func (s *localListener) handleListenersAccept(ln net.Listener) error { // // b.mu must be held. func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint16) { + if b.sys.IsNetstack() { + // don't listen on netmap addresses if we're in userspace mode + return + } // close existing listeners where port // is no longer in incoming ports list for ap, sl := range b.serveListeners { @@ -439,6 +464,38 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target handler(c) } +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + // keyed by service name + var services map[tailcfg.ServiceName]*tailcfg.VIPService + if b.serveConfig.Valid() { + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), + }) + } + } + + for _, s := range prefs.AdvertiseServices().All() { + sn := tailcfg.ServiceName(s) + if services == nil || services[sn] == nil { + mak.Set(&services, sn, &tailcfg.VIPService{ + Name: sn, + }) + } + services[sn].Active = true + } + + servicesList := slicesx.MapValues(services) + // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll + // be hashing a representation of this list later we want it to be in a consistent + // order. + slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name.String(), b.Name.String()) + }) + return servicesList +} + // tcpHandlerForVIPService returns a handler for a TCP connection to a VIP service // that is being served via the ipn.ServeConfig. It returns nil if the destination // address is not a VIP service or if the VIP service does not have a TCP handler set. @@ -1046,3 +1103,278 @@ func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService tailcfg return &cert, nil } } + +// setServeProxyHandlersLocked ensures there is an http proxy handler for each +// backend specified in serveConfig. It expects serveConfig to be valid and +// up-to-date, so should be called after reloadServeConfigLocked. +func (b *LocalBackend) setServeProxyHandlersLocked() { + if !b.serveConfig.Valid() { + return + } + var backends map[string]bool + for _, conf := range b.serveConfig.Webs() { + for _, h := range conf.Handlers().All() { + backend := h.Proxy() + if backend == "" { + // Only create proxy handlers for servers with a proxy backend. + continue + } + mak.Set(&backends, backend, true) + if _, ok := b.serveProxyHandlers.Load(backend); ok { + continue + } + + b.logf("serve: creating a new proxy handler for %s", backend) + p, err := b.proxyHandlerForBackend(backend) + if err != nil { + // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget + // in the CLI, so just log the error here. + b.logf("[unexpected] could not create proxy for %v: %s", backend, err) + continue + } + b.serveProxyHandlers.Store(backend, p) + } + } + + // Clean up handlers for proxy backends that are no longer present + // in configuration. + b.serveProxyHandlers.Range(func(key, value any) bool { + backend := key.(string) + if !backends[backend] { + b.logf("serve: closing idle connections to %s", backend) + b.serveProxyHandlers.Delete(backend) + value.(*reverseProxy).close() + } + return true + }) +} + +// VIPServices returns the list of tailnet services that this node +// is serving as a destination for. +// The returned memory is owned by the caller. +func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { + b.mu.Lock() + defer b.mu.Unlock() + return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) +} + +func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + b.logf("c2n: GET /vip-services received") + var res tailcfg.C2NVIPServicesResponse + res.VIPServices = b.VIPServices() + res.ServicesHash = b.vipServiceHash(res.VIPServices) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +var metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + +func init() { + RegisterPeerAPIHandler("/v0/ingress", handleServeIngress) + +} + +func handleServeIngress(ph PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := ph.(*peerAPIHandler) + metricIngressCalls.Add(1) + + // http.Errors only useful if hitting endpoint manually + // otherwise rely on log lines when debugging ingress connections + // as connection is hijacked for bidi and is encrypted tls + if !h.canIngress() { + h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) + http.Error(w, "denied; no ingress cap", http.StatusForbidden) + return + } + logAndError := func(code int, publicMsg string) { + h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) + http.Error(w, publicMsg, code) + } + bad := func(publicMsg string) { + logAndError(http.StatusBadRequest, publicMsg) + } + if r.Method != "POST" { + logAndError(http.StatusMethodNotAllowed, "only POST allowed") + return + } + srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") + if srcAddrStr == "" { + bad("Tailscale-Ingress-Src header not set") + return + } + srcAddr, err := netip.ParseAddrPort(srcAddrStr) + if err != nil { + bad("Tailscale-Ingress-Src header invalid; want ip:port") + return + } + target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) + if target == "" { + bad("Tailscale-Ingress-Target header not set") + return + } + if _, _, err := net.SplitHostPort(string(target)); err != nil { + bad("Tailscale-Ingress-Target header invalid; want host:port") + return + } + + getConnOrReset := func() (net.Conn, bool) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + h.logf("ingress: failed hijacking conn") + http.Error(w, "failed hijacking conn", http.StatusInternalServerError) + return nil, false + } + io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") + return &ipn.FunnelConn{ + Conn: conn, + Src: srcAddr, + Target: target, + }, true + } + sendRST := func() { + http.Error(w, "denied", http.StatusForbidden) + } + + h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) +} + +// wantIngressLocked reports whether this node has ingress configured. This bool +// is sent to the coordination server (in Hostinfo.WireIngress) as an +// optimization hint to know primarily which nodes are NOT using ingress, to +// avoid doing work for regular nodes. +// +// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw +// mode and contains map entries with false values, sending true (from Len > 0) +// is still fine. This is only an optimization hint for the control plane and +// doesn't affect security or correctness. And we also don't expect people to +// modify their ServeConfig in raw mode. +func (b *LocalBackend) wantIngressLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() +} + +// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in +// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. +func (b *LocalBackend) hasIngressEnabledLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() +} + +// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it +// seems that it is intended to be used with funnel. +func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { + return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() +} + +func serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16) { + var vipServicesPorts map[tailcfg.ServiceName][]uint16 + + b.reloadServeConfigLocked(prefs) + if b.serveConfig.Valid() { + servePorts := make([]uint16, 0, 3) + for port := range b.serveConfig.TCPs() { + if port > 0 { + servePorts = append(servePorts, uint16(port)) + } + } + handlePorts = append(handlePorts, servePorts...) + + for svc, cfg := range b.serveConfig.Services().All() { + servicePorts := make([]uint16, 0, 3) + for port := range cfg.TCP().All() { + if port > 0 { + servicePorts = append(servicePorts, uint16(port)) + } + } + if _, ok := vipServicesPorts[svc]; !ok { + mak.Set(&vipServicesPorts, svc, servicePorts) + } else { + mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) + } + } + + b.setServeProxyHandlersLocked() + + // don't listen on netmap addresses if we're in userspace mode + if !b.sys.IsNetstack() { + b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) + } + } + + b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) + + return handlePorts +} + +// reloadServeConfigLocked reloads the serve config from the store or resets the +// serve config to nil if not logged in. The "changed" parameter, when false, instructs +// the method to only run the reset-logic and not reload the store from memory to ensure +// foreground sessions are not removed if they are not saved on disk. +func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { + if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { + // We're not logged in, so we don't have a profile. + // Don't try to load the serve config. + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + + confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) + // TODO(maisem,bradfitz): prevent reading the config from disk + // if the profile has not changed. + confj, err := b.store.ReadState(confKey) + if err != nil { + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + if b.lastServeConfJSON.Equal(mem.B(confj)) { + return + } + b.lastServeConfJSON = mem.B(confj) + var conf ipn.ServeConfig + if err := json.Unmarshal(confj, &conf); err != nil { + b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) + b.serveConfig = ipn.ServeConfigView{} + return + } + + // remove inactive sessions + maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { + _, ok := b.notifyWatchers[sessionID] + return !ok + }) + + b.serveConfig = conf.View() +} + +func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { + if len(svcPorts) == 0 { + b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) + return + } + nm := b.currentNode().NetMap() + if nm == nil { + b.logf("can't set intercept function for Service TCP Ports, netMap is nil") + return + } + vipServiceIPMap := nm.GetVIPServiceIPMap() + if len(vipServiceIPMap) == 0 { + // No approved VIP Services + return + } + + svcAddrPorts := make(map[netip.Addr]func(uint16) bool) + // Only set the intercept function if the service has been assigned a VIP. + for svcName, ports := range svcPorts { + addrs, ok := vipServiceIPMap[svcName] + if !ok { + continue + } + interceptFn := generateInterceptTCPPortFunc(ports) + for _, addr := range addrs { + svcAddrPorts[addr] = interceptFn + } + } + + b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) +} diff --git a/ipn/ipnlocal/serve_disabled.go b/ipn/ipnlocal/serve_disabled.go new file mode 100644 index 0000000000000..a97112941d844 --- /dev/null +++ b/ipn/ipnlocal/serve_disabled.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_serve + +// These are temporary (2025-09-13) stubs for when tailscaled is built with the +// ts_omit_serve build tag, disabling serve. +// +// TODO: move serve to a separate package, out of ipnlocal, and delete this +// file. One step at a time. + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +const serveEnabled = false + +type localListener = struct{} + +func (b *LocalBackend) DeleteForegroundSession(sessionID string) error { + return nil +} + +type funnelFlow = struct{} + +func (*LocalBackend) hasIngressEnabledLocked() bool { return false } +func (*LocalBackend) shouldWireInactiveIngressLocked() bool { return false } + +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + return nil +} diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 86b56ab4b585f..d18ee4db90618 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package ipnlocal import ( diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ac5b0ee7db06e..7e54cef854de2 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -8,8 +8,6 @@ import ( "bytes" "cmp" "context" - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -112,7 +110,6 @@ var handler = map[string]LocalAPIHandler{ "query-feature": (*Handler).serveQueryFeature, "reload-config": (*Handler).reloadConfig, "reset-auth": (*Handler).serveResetAuth, - "serve-config": (*Handler).serveServeConfig, "set-dns": (*Handler).serveSetDNS, "set-expiry-sooner": (*Handler).serveSetExpirySooner, "set-gui-visible": (*Handler).serveSetGUIVisible, @@ -1209,89 +1206,6 @@ func (h *Handler) serveResetAuth(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } -func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case httpm.GET: - if !h.PermitRead { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - config := h.b.ServeConfig() - bts, err := json.Marshal(config) - if err != nil { - http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) - return - } - sum := sha256.Sum256(bts) - etag := hex.EncodeToString(sum[:]) - w.Header().Set("Etag", etag) - w.Header().Set("Content-Type", "application/json") - w.Write(bts) - case httpm.POST: - if !h.PermitWrite { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - configIn := new(ipn.ServeConfig) - if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { - WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) - return - } - - // require a local admin when setting a path handler - // TODO: roll-up this Windows-specific check into either PermitWrite - // or a global admin escalation check. - if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - etag := r.Header.Get("If-Match") - if err := h.b.SetServeConfig(configIn, etag); err != nil { - if errors.Is(err, ipnlocal.ErrETagMismatch) { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) - return - } - w.WriteHeader(http.StatusOK) - default: - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - } -} - -func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { - switch goos { - case "windows", "linux", "darwin", "illumos", "solaris": - default: - return nil - } - // Only check for local admin on tailscaled-on-mac (based on "sudo" - // permissions). On sandboxed variants (MacSys and AppStore), tailscaled - // cannot serve files outside of the sandbox and this check is not - // relevant. - if goos == "darwin" && version.IsSandboxedMacOS() { - return nil - } - if !configIn.HasPathHandler() { - return nil - } - if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { - return nil - } - switch goos { - case "windows": - return errors.New("must be a Windows local admin to serve a path") - case "linux", "darwin", "illumos", "solaris": - return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") - default: - // We filter goos at the start of the func, this default case - // should never happen. - panic("unreachable") - } -} - func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "IP forwarding check access denied", http.StatusForbidden) diff --git a/ipn/localapi/serve.go b/ipn/localapi/serve.go new file mode 100644 index 0000000000000..56c8b486cf93c --- /dev/null +++ b/ipn/localapi/serve.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package localapi + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "runtime" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/util/httpm" + "tailscale.com/version" +) + +func init() { + Register("serve-config", (*Handler).serveServeConfig) +} + +func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case httpm.GET: + if !h.PermitRead { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + config := h.b.ServeConfig() + bts, err := json.Marshal(config) + if err != nil { + http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) + return + } + sum := sha256.Sum256(bts) + etag := hex.EncodeToString(sum[:]) + w.Header().Set("Etag", etag) + w.Header().Set("Content-Type", "application/json") + w.Write(bts) + case httpm.POST: + if !h.PermitWrite { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + configIn := new(ipn.ServeConfig) + if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { + WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) + return + } + + // require a local admin when setting a path handler + // TODO: roll-up this Windows-specific check into either PermitWrite + // or a global admin escalation check. + if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + etag := r.Header.Get("If-Match") + if err := h.b.SetServeConfig(configIn, etag); err != nil { + if errors.Is(err, ipnlocal.ErrETagMismatch) { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) + return + } + w.WriteHeader(http.StatusOK) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { + switch goos { + case "windows", "linux", "darwin", "illumos", "solaris": + default: + return nil + } + // Only check for local admin on tailscaled-on-mac (based on "sudo" + // permissions). On sandboxed variants (MacSys and AppStore), tailscaled + // cannot serve files outside of the sandbox and this check is not + // relevant. + if goos == "darwin" && version.IsSandboxedMacOS() { + return nil + } + if !configIn.HasPathHandler() { + return nil + } + if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { + return nil + } + switch goos { + case "windows": + return errors.New("must be a Windows local admin to serve a path") + case "linux", "darwin", "illumos", "solaris": + return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") + default: + // We filter goos at the start of the func, this default case + // should never happen. + panic("unreachable") + } +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 74f3f8c539a66..9b93ce8dbf2ec 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -235,6 +235,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index d97c669463d78..7381c515aba3c 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -33,6 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnlocal" "tailscale.com/metrics" "tailscale.com/net/dns" @@ -643,13 +644,15 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { var selfNode tailcfg.NodeView var serviceAddrSet set.Set[netip.Addr] if nm != nil { - vipServiceIPMap := nm.GetVIPServiceIPMap() - serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) - for _, addrs := range vipServiceIPMap { - serviceAddrSet.AddSlice(addrs) - } ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses())) - ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + if buildfeatures.HasServe { + vipServiceIPMap := nm.GetVIPServiceIPMap() + serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) + for _, addrs := range vipServiceIPMap { + serviceAddrSet.AddSlice(addrs) + } + ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + } selfNode = nm.SelfNode } else { ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) @@ -1032,6 +1035,9 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool { // isVIPServiceIP reports whether ip is an IP address that's // assigned to a VIP service. func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool { + if !buildfeatures.HasServe { + return false + } return ns.atomicIsVIPServiceIPFunc.Load()(ip) } @@ -1074,7 +1080,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { return true } } - if isService { + if buildfeatures.HasServe && isService { if p.IsEchoRequest() { return true } From 2015ce40814dd175f7d441c83d7517a2128b37e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 16 Sep 2025 11:25:29 -0400 Subject: [PATCH 0344/1093] health,ipn/ipnlocal: introduce eventbus in heath.Tracker (#17085) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Tracker was using direct callbacks to ipnlocal. This PR moves those to be triggered via the eventbus. Additionally, the eventbus is now closed on exit from tailscaled explicitly, and health is now a SubSystem in tsd. Updates #15160 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/debug.go | 4 +- cmd/tailscaled/tailscaled.go | 14 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- control/controlclient/controlclient_test.go | 4 +- control/controlclient/map_test.go | 5 +- control/controlhttp/http_test.go | 5 +- health/health.go | 46 +++ health/health_test.go | 323 +++++++++++++------- ipn/ipnlocal/extension_host_test.go | 3 +- ipn/ipnlocal/local.go | 17 +- ipn/ipnlocal/local_test.go | 8 +- ipn/ipnlocal/loglines_test.go | 2 +- ipn/ipnlocal/network-lock_test.go | 39 +-- ipn/ipnlocal/peerapi_test.go | 21 +- ipn/ipnlocal/profiles.go | 5 +- ipn/ipnlocal/profiles_test.go | 25 +- ipn/ipnlocal/serve_test.go | 4 +- ipn/ipnlocal/ssh_test.go | 3 +- ipn/ipnlocal/state_test.go | 6 +- ipn/lapitest/backend.go | 2 +- ipn/localapi/localapi_test.go | 2 +- net/dns/manager_tcp_test.go | 5 +- net/dns/manager_test.go | 5 +- net/dns/resolver/forwarder_test.go | 7 +- net/dns/resolver/tsdns_test.go | 9 +- net/tlsdial/tlsdial_test.go | 3 +- ssh/tailssh/tailssh_test.go | 2 +- tsd/tsd.go | 13 +- tsnet/tsnet.go | 6 +- util/eventbus/eventbustest/eventbustest.go | 4 +- wgengine/bench/wg.go | 4 +- wgengine/magicsock/magicsock_test.go | 13 +- wgengine/netstack/netstack_test.go | 4 +- wgengine/router/router_linux_test.go | 2 +- wgengine/userspace_ext_test.go | 4 +- wgengine/userspace_test.go | 17 +- wgengine/watchdog_test.go | 7 +- 37 files changed, 402 insertions(+), 243 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 2f469a0d189f7..85dd787c1b128 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -161,7 +161,9 @@ func getURL(ctx context.Context, urlStr string) error { } func checkDerp(ctx context.Context, derpRegion string) (err error) { - ht := new(health.Tracker) + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) req, err := http.NewRequestWithContext(ctx, "GET", ipn.DefaultControlURL+"/derpmap/default", nil) if err != nil { return fmt.Errorf("create derp map request: %w", err) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 890ff7bf8f8fd..734c8e8e88342 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -426,7 +426,7 @@ func run() (err error) { sys.Set(netMon) } - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker(), nil /* use log.Printf */) + pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) pol.SetVerbosityLevel(args.verbose) logPol = pol defer func() { @@ -461,7 +461,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, sys.HealthTracker(), args.tunname) + dns.CleanUp(logf, netMon, sys.HealthTracker.Get(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -749,7 +749,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo conf := wgengine.Config{ ListenPort: args.port, NetMon: sys.NetMon.Get(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), Dialer: sys.Dialer.Get(), SetSubsystem: sys.Set, @@ -760,7 +760,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo f(&conf, logf) } - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) onlyNetstack = name == "userspace-networking" netstackSubnetRouter := onlyNetstack // but mutated later on some platforms @@ -781,7 +781,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -809,13 +809,13 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo sys.NetMon.Get().SetTailscaleInterfaceName(devName) } - r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker(), sys.Bus.Get()) + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { dev.Close() return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 87f8148668be3..ea40dba9ccbb1 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -108,7 +108,7 @@ func newIPN(jsConfig js.Value) map[string]any { Dialer: dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 2efc27b5e8a19..78646d76aca47 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -236,7 +236,7 @@ func TestDirectProxyManual(t *testing.T) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, @@ -328,7 +328,7 @@ func testHTTPS(t *testing.T, withProxy bool) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index ff5df8207ba8f..59b8988fcd46e 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/ptr" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -1326,7 +1327,7 @@ func TestNetmapDisplayMessage(t *testing.T) { // [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapHealthIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -1371,7 +1372,7 @@ func TestNetmapHealthIntegration(t *testing.T) { // passing the [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapDisplayMessageIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index daf262023da97..0b4e117f98928 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus/eventbustest" ) type httpTestParam struct { @@ -228,7 +229,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { omitCertErrorLogging: true, testFallbackDelay: fallbackDelay, Clock: clock, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), } if param.httpInDial { @@ -730,7 +731,7 @@ func TestDialPlan(t *testing.T) { omitCertErrorLogging: true, testFallbackDelay: 50 * time.Millisecond, Clock: clock, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), } conn, err := a.dial(ctx) diff --git a/health/health.go b/health/health.go index 05887043814ea..c456b53cbf174 100644 --- a/health/health.go +++ b/health/health.go @@ -25,6 +25,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/multierr" "tailscale.com/util/set" @@ -76,6 +77,9 @@ type Tracker struct { testClock tstime.Clock // nil means use time.Now / tstime.StdClock{} + eventClient *eventbus.Client + changePub *eventbus.Publisher[Change] + // mu guards everything that follows. mu sync.Mutex @@ -119,6 +123,20 @@ type Tracker struct { metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] } +// NewTracker contructs a new [Tracker] and attaches the given eventbus. +// NewTracker will panic is no eventbus is given. +func NewTracker(bus *eventbus.Bus) *Tracker { + if bus == nil { + panic("no eventbus set") + } + + cli := bus.Client("health.Tracker") + return &Tracker{ + eventClient: cli, + changePub: eventbus.Publish[Change](cli), + } +} + func (t *Tracker) now() time.Time { if t.testClock != nil { return t.testClock.Now() @@ -418,6 +436,28 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { Warnable: w, UnhealthyState: w.unhealthyState(ws), } + // Publish the change to the event bus. If the change is already visible + // now, publish it immediately; otherwise queue a timer to publish it at + // a future time when it becomes visible. + if w.IsVisible(ws, t.now) { + t.changePub.Publish(change) + } else { + visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) + tc := t.clock().AfterFunc(visibleIn, func() { + t.mu.Lock() + defer t.mu.Unlock() + // Check if the Warnable is still unhealthy, as it could have become healthy between the time + // the timer was set for and the time it was executed. + if t.warnableVal[w] != nil { + t.changePub.Publish(change) + delete(t.pendingVisibleTimers, w) + } + }) + mak.Set(&t.pendingVisibleTimers, w, tc) + } + + // Direct callbacks + // TODO(cmol): Remove once all watchers have been moved to events for _, cb := range t.watchers { // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable @@ -473,7 +513,9 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { WarnableChanged: true, Warnable: w, } + t.changePub.Publish(change) for _, cb := range t.watchers { + // TODO(cmol): Remove once all watchers have been moved to events cb(change) } } @@ -484,7 +526,11 @@ func (t *Tracker) notifyWatchersControlChangedLocked() { change := Change{ ControlHealthChanged: true, } + if t.changePub != nil { + t.changePub.Publish(change) + } for _, cb := range t.watchers { + // TODO(cmol): Remove once all watchers have been moved to events cb(change) } } diff --git a/health/health_test.go b/health/health_test.go index d66cea06c0f0b..c55b0e1f3b5a5 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -18,12 +18,34 @@ import ( "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/opt" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/version" ) +func wantChange(c Change) func(c Change) (bool, error) { + return func(cEv Change) (bool, error) { + if cEv.ControlHealthChanged != c.ControlHealthChanged { + return false, fmt.Errorf("expected ControlHealthChanged %t, got %t", c.ControlHealthChanged, cEv.ControlHealthChanged) + } + if cEv.WarnableChanged != c.WarnableChanged { + return false, fmt.Errorf("expected WarnableChanged %t, got %t", c.WarnableChanged, cEv.WarnableChanged) + } + if c.Warnable != nil && (cEv.Warnable == nil || cEv.Warnable != c.Warnable) { + return false, fmt.Errorf("expected Warnable %+v, got %+v", c.Warnable, cEv.Warnable) + } + + if c.UnhealthyState != nil { + panic("comparison of UnhealthyState is not yet supported") + } + + return true, nil + } +} + func TestAppendWarnableDebugFlags(t *testing.T) { - var tr Tracker + tr := NewTracker(eventbustest.NewBus(t)) for i := range 10 { w := Register(&Warnable{ @@ -68,7 +90,9 @@ func TestNilMethodsDontCrash(t *testing.T) { } func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -92,10 +116,20 @@ func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { if !reflect.DeepEqual(ht.Strings(), want) { t.Fatalf("after setting the healthy, newTracker.Strings() = %v; want = %v", ht.Strings(), want) } + + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } func TestRemoveAllWarnings(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -109,67 +143,105 @@ func TestRemoveAllWarnings(t *testing.T) { if len(ht.Strings()) != 0 { t.Fatalf("after RemoveAll, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } // TestWatcher tests that a registered watcher function gets called with the correct // Warnable and non-nil/nil UnhealthyState upon setting a Warnable to unhealthy/healthy. func TestWatcher(t *testing.T) { - ht := Tracker{} - wantText := "Hello world" - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) - - watcherFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != testWarnable { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) - } + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-callbacks", + preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { + t.Cleanup(tht.RegisterWatcher(fn)) + if len(tht.watchers) != 1 { + t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) + } + }, + }, + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } - if us != nil { - if us.Text != wantText { - t.Fatalf("unexpected us.Text: %s, want: %s", us.Text, wantText) - } - if us.Args[ArgError] != wantText { - t.Fatalf("unexpected us.Args[ArgError]: %s, want: %s", us.Args[ArgError], wantText) + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + wantText := "Hello world" + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) + + watcherFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != testWarnable { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) + } + + if us != nil { + if us.Text != wantText { + t.Fatalf("unexpected us.Text: %q, want: %s", us.Text, wantText) + } + if us.Args[ArgError] != wantText { + t.Fatalf("unexpected us.Args[ArgError]: %q, want: %s", us.Args[ArgError], wantText) + } + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } } - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } - unregisterFunc := ht.RegisterWatcher(watcherFunc) - if len(ht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(ht.watchers)) - } - ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) + // Set up test + tt.preFunc(t, ht, bus, watcherFunc) - select { - case <-becameUnhealthy: - // Test passed because the watcher got notified of an unhealthy state - case <-becameHealthy: - // Test failed because the watcher got of a healthy state instead of an unhealthy one - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + // Start running actual test + ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) - ht.SetHealthy(testWarnable) + select { + case <-becameUnhealthy: + // Test passed because the watcher got notified of an unhealthy state + case <-becameHealthy: + // Test failed because the watcher got of a healthy state instead of an unhealthy one + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got of an unhealthy state instead of a healthy one - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test passed because the watcher got notified of a healthy state - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + ht.SetHealthy(testWarnable) - unregisterFunc() - if len(ht.watchers) != 0 { - t.Fatalf("after unregisterFunc, len(newTracker.watchers) = %d; want = 0", len(ht.watchers)) + select { + case <-becameUnhealthy: + // Test failed because the watcher got of an unhealthy state instead of a healthy one + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test passed because the watcher got notified of a healthy state + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } + }) } } @@ -178,45 +250,81 @@ func TestWatcher(t *testing.T) { // has a TimeToVisible set, which means that a watcher should only be notified of an unhealthy state after // the TimeToVisible duration has passed. func TestSetUnhealthyWithTimeToVisible(t *testing.T) { - ht := Tracker{} - mw := Register(&Warnable{ - Code: "test-warnable-3-secs-to-visible", - Title: "Test Warnable with 3 seconds to visible", - Text: StaticMessage("Hello world"), - TimeToVisible: 2 * time.Second, - ImpactsConnectivity: true, - }) - defer unregister(mw) - - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-callbacks", + preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { + t.Cleanup(tht.RegisterWatcher(fn)) + if len(tht.watchers) != 1 { + t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) + } + }, + }, + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + mw := Register(&Warnable{ + Code: "test-warnable-3-secs-to-visible", + Title: "Test Warnable with 3 seconds to visible", + Text: StaticMessage("Hello world"), + TimeToVisible: 2 * time.Second, + ImpactsConnectivity: true, + }) - watchFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != mw { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) - } + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) - if us != nil { - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } + watchFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != mw { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) + } - ht.RegisterWatcher(watchFunc) - ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + if us != nil { + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got notified of an unhealthy state - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test failed because the watcher got of a healthy state - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - // As expected, watcherFunc still had not been called after 1 second + tt.preFunc(t, ht, bus, watchFunc) + ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + + select { + case <-becameUnhealthy: + // Test failed because the watcher got notified of an unhealthy state + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test failed because the watcher got of a healthy state + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(1 * time.Second): + // As expected, watcherFunc still had not been called after 1 second + } + unregister(mw) + }) } } @@ -242,7 +350,7 @@ func TestRegisterWarnablePanicsWithDuplicate(t *testing.T) { // TestCheckDependsOnAppearsInUnhealthyState asserts that the DependsOn field in the UnhealthyState // is populated with the WarnableCode(s) of the Warnable(s) that a warning depends on. func TestCheckDependsOnAppearsInUnhealthyState(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) w1 := Register(&Warnable{ Code: "w1", Text: StaticMessage("W1 Text"), @@ -352,11 +460,11 @@ func TestShowUpdateWarnable(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv + gotWarnable, gotShow := tr.showUpdateWarnable() if gotWarnable != tt.wantWarnable { t.Errorf("got warnable: %v, want: %v", gotWarnable, tt.wantWarnable) @@ -401,11 +509,10 @@ func TestHealthMetric(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv tr.SetMetricsRegistry(&usermetric.Registry{}) if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) @@ -426,9 +533,8 @@ func TestNoDERPHomeWarnable(t *testing.T) { Start: time.Unix(123, 0), FollowRealTime: false, }) - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) // Advance 30 seconds to get past the "recentlyLoggedIn" check. @@ -448,7 +554,7 @@ func TestNoDERPHomeWarnable(t *testing.T) { // but doesn't use tstest.Clock so avoids the deadlock // I hit: https://github.com/tailscale/tailscale/issues/14798 func TestNoDERPHomeWarnableManual(t *testing.T) { - ht := &Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) // Avoid wantRunning: @@ -462,7 +568,7 @@ func TestNoDERPHomeWarnableManual(t *testing.T) { } func TestControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -620,7 +726,7 @@ func TestControlHealthNotifies(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -643,7 +749,7 @@ func TestControlHealthNotifies(t *testing.T) { } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) gotNotified := false @@ -671,7 +777,7 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { // created from Control health & returned by [Tracker.CurrentState] is different // when the details of the [tailcfg.DisplayMessage] are different. func TestCurrentStateETagControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -776,9 +882,8 @@ func TestCurrentStateETagControlHealth(t *testing.T) { // when the details of the Warnable are different. func TestCurrentStateETagWarnable(t *testing.T) { newTracker := func(clock tstime.Clock) *Tracker { - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() return ht diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 509833ff6de46..f5c081a5bdb3e 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -847,7 +848,7 @@ func TestBackgroundProfileResolver(t *testing.T) { // Create a new profile manager and add the profiles to it. // We expose the profile manager to the extensions via the read-only [ipnext.ProfileStore] interface. - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) for i, p := range tt.profiles { // Generate a unique ID and key for each profile, // unless the profile already has them set diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6d92e58d0c111..4c27bea45136c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -206,6 +206,7 @@ type LocalBackend struct { eventClient *eventbus.Client clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] + healthChangeSub *eventbus.Subscriber[health.Change] subsDoneCh chan struct{} // closed when consumeEventbusTopics returns health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -216,7 +217,6 @@ type LocalBackend struct { pushDeviceToken syncs.AtomicValue[string] backendLogID logid.PublicID unregisterNetMon func() - unregisterHealthWatch func() unregisterSysPolicyWatch func() portpoll *portlist.Poller // may be nil portpollOnce sync.Once // guards starting readPoller @@ -488,7 +488,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo if loginFlags&controlclient.LocalBackendStartKeyOSNeutral != 0 { goos = "" } - pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker(), goos) + pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker.Get(), goos) if err != nil { return nil, err } @@ -521,7 +521,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, polc: sys.PolicyClientOrDefault(), - health: sys.HealthTracker(), + health: sys.HealthTracker.Get(), metrics: m, e: e, dialer: dialer, @@ -543,6 +543,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.eventClient = b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) + b.healthChangeSub = eventbus.Subscribe[health.Change](b.eventClient) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -570,7 +571,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo }() netMon := sys.NetMon.Get() - b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker()) + b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get()) if err != nil { log.Printf("error setting up sockstat logger: %v", err) } @@ -595,8 +596,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) b.unregisterNetMon = netMon.RegisterChangeCallback(b.linkChange) - b.unregisterHealthWatch = b.health.RegisterWatcher(b.onHealthChange) - if tunWrap, ok := b.sys.Tun.GetOK(); ok { tunWrap.PeerAPIPort = b.GetPeerAPIPort } else { @@ -628,12 +627,17 @@ func (b *LocalBackend) consumeEventbusTopics() { for { select { + // TODO(cmol): Move to using b.eventClient.Done() once implemented. + // In the meantime, we rely on the subs not going away until the client is + // closed, closing all its subscribers. case <-b.clientVersionSub.Done(): return case clientVersion := <-b.clientVersionSub.Events(): b.onClientVersion(&clientVersion) case au := <-b.autoUpdateSub.Events(): b.onTailnetDefaultAutoUpdate(au.Value) + case change := <-b.healthChangeSub.Events(): + b.onHealthChange(change) } } } @@ -1162,7 +1166,6 @@ func (b *LocalBackend) Shutdown() { b.stopOfflineAutoUpdate() b.unregisterNetMon() - b.unregisterHealthWatch() b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 261d5c4c20682..354cf686440fc 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -470,7 +470,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { t.Log("Added memory store for testing") } if _, ok := sys.Engine.GetOK(); !ok { - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -2897,7 +2897,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { if test.prefs == nil { test.prefs = ipn.NewPrefs() } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = test.prefs.View() b.currentNode().SetNetMap(test.nm) b.pm = pm @@ -3501,7 +3501,7 @@ func TestApplySysPolicy(t *testing.T) { wantPrefs.ControlURL = ipn.DefaultControlURL } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = usePrefs.View() b := newTestBackend(t, polc) @@ -5802,7 +5802,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys sys.Set(store) } if _, hasEngine := sys.Engine.GetOK(); !hasEngine { - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index 5bea6cabca4c4..d831aa8b075dc 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -50,7 +50,7 @@ func TestLocalLogLines(t *testing.T) { sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 93ecd977f6152..0d3f7db43ff0f 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" @@ -46,7 +47,7 @@ func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlcl f(s) } -func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { +func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni @@ -70,7 +71,7 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { if err != nil { t.Fatal(err) } - return cc + return cc, bus } func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) { @@ -158,8 +159,8 @@ func TestTKAEnablementFlow(t *testing.T) { defer ts.Close() temp := t.TempDir() - cc := fakeControlClient(t, client) - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + cc, bus := fakeControlClient(t, client) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(bus))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -199,7 +200,7 @@ func TestTKADisablementFlow(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -267,7 +268,7 @@ func TestTKADisablementFlow(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -391,7 +392,7 @@ func TestTKASync(t *testing.T) { t.Run(tc.name, func(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -518,7 +519,7 @@ func TestTKASync(t *testing.T) { defer ts.Close() // Setup the client. - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -707,7 +708,7 @@ func TestTKADisable(t *testing.T) { disablementSecret := bytes.Repeat([]byte{0xa5}, 32) nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -769,7 +770,7 @@ func TestTKADisable(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -798,7 +799,7 @@ func TestTKASign(t *testing.T) { toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -860,7 +861,7 @@ func TestTKASign(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -887,7 +888,7 @@ func TestTKAForceDisable(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -940,7 +941,7 @@ func TestTKAForceDisable(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) sys := tsd.NewSystem() sys.Set(pm.Store()) @@ -985,7 +986,7 @@ func TestTKAAffectedSigs(t *testing.T) { // toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -1076,7 +1077,7 @@ func TestTKAAffectedSigs(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1118,7 +1119,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { cosignPriv := key.NewNLPrivate() compromisedPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -1188,7 +1189,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1209,7 +1210,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { // Cosign using the cosigning key. { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 5654cf27799e2..db01dd608b2a7 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -194,10 +195,9 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) @@ -249,10 +249,9 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) @@ -323,11 +322,10 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) rc := &appctest.RouteCollector{} - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) reg := new(usermetric.Registry) @@ -392,10 +390,9 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 1d312cfa606b3..6e1db4ff25bbd 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -21,6 +21,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" ) var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") @@ -838,7 +839,9 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView { // ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing. func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) { - ht := new(health.Tracker) // in tests, don't care about the health status + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) // in tests, don't care about the health status pm, err := newProfileManager(store, logf, ht) if err != nil { return ipn.PrefsView{}, err diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 52b095be1a5fe..8dce388bcd7aa 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -20,13 +20,14 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) func TestProfileCurrentUserSwitch(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -63,7 +64,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty()) } - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -81,7 +82,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { func TestProfileList(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -285,7 +286,7 @@ func TestProfileDupe(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -318,7 +319,7 @@ func TestProfileDupe(t *testing.T) { func TestProfileManagement(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -416,7 +417,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -432,7 +433,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store after deleting default profile") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -474,7 +475,7 @@ func TestProfileManagement(t *testing.T) { t.Fatal("SetPrefs failed to save auto-update setting") } // Re-load profiles to trigger migration for invalid auto-update value. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -496,7 +497,7 @@ func TestProfileManagementWindows(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -565,7 +566,7 @@ func TestProfileManagementWindows(t *testing.T) { t.Logf("Recreate profile manager from store, should reset prefs") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -588,7 +589,7 @@ func TestProfileManagementWindows(t *testing.T) { } // Recreate the profile manager to ensure that it starts with test profile. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -1091,7 +1092,7 @@ func TestProfileStateChangeCallback(t *testing.T) { t.Parallel() store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatalf("newProfileManagerWithGOOS: %v", err) } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index d18ee4db90618..a081ed27bd3e4 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -900,7 +900,7 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -918,7 +918,7 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { dir := t.TempDir() b.SetVarRoot(dir) - pm := must.Get(newProfileManager(new(mem.Store), logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), logf, health.NewTracker(bus))) pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm diff --git a/ipn/ipnlocal/ssh_test.go b/ipn/ipnlocal/ssh_test.go index 6e93b34f05019..b24cd6732f605 100644 --- a/ipn/ipnlocal/ssh_test.go +++ b/ipn/ipnlocal/ssh_test.go @@ -13,6 +13,7 @@ import ( "tailscale.com/health" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -50,7 +51,7 @@ type fakeSSHServer struct { } func TestGetSSHUsernames(t *testing.T) { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) b := &LocalBackend{pm: pm, store: pm.Store()} b.sshServer = fakeSSHServer{} res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest)) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 30538f2c824e1..ff21c920c2b7a 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -336,7 +336,7 @@ func TestStateMachine(t *testing.T) { sys := tsd.NewSystem() store := new(testStateStorage) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -974,7 +974,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() sys.Set(new(mem.Store)) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -1525,7 +1525,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( EventBus: sys.Bus.Get(), NetMon: dialer.NetMon(), Metrics: sys.UserMetricsRegistry(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), DisablePortMapper: true, }) if err != nil { diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index ddf48fb2893d8..6a83431f351b1 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -33,7 +33,7 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { sys.Set(&mem.Store{}) } - e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { opts.tb.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 046eb744d460a..fa24717f7a942 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -339,7 +339,7 @@ func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index f4c42791e9b5b..46883a1e7db54 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -20,6 +20,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) func mkDNSRequest(domain dnsname.FQDN, tp dns.Type, modify func(*dns.Builder)) []byte { @@ -89,7 +90,7 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -174,7 +175,7 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + m := NewManager(log, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 522f9636abefe..b5a510862580b 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -19,6 +19,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) type fakeOSConfigurator struct { @@ -932,7 +933,7 @@ func TestManager(t *testing.T) { goos = "linux" } knobs := &controlknobs.Knobs{} - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) + m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1038,7 +1039,7 @@ func TestConfigRecompilation(t *testing.T) { SearchDomains: fqdns("foo.ts.net"), } - m := NewManager(t.Logf, f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + m := NewManager(t.Logf, f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f7cda15f6a000..f77388ca721da 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -29,7 +29,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/types/dnstype" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func (rr resolverAndDelay) String() string { @@ -455,8 +455,7 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { logf := tstest.WhileTestRunningLogger(tb) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(tb) netMon, err := netmon.New(bus, logf) if err != nil { tb.Fatal(err) @@ -465,7 +464,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) - fwd := newForwarder(logf, netMon, nil, &dialer, new(health.Tracker), nil) + fwd := newForwarder(logf, netMon, nil, &dialer, health.NewTracker(bus), nil) if modify != nil { modify(fwd) } diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 4bbfd4d6a417e..0823ea139bc1a 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -31,7 +31,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) var ( @@ -356,7 +356,7 @@ func newResolver(t testing.TB) *Resolver { return New(t.Logf, nil, // no link selector tsdial.NewDialer(netmon.NewStatic()), - new(health.Tracker), + health.NewTracker(eventbustest.NewBus(t)), nil, // no control knobs ) } @@ -1060,8 +1060,7 @@ func TestForwardLinkSelection(t *testing.T) { // routes differently. specialIP := netaddr.IPv4(1, 2, 3, 4) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, ".... netmon: ")) if err != nil { @@ -1074,7 +1073,7 @@ func TestForwardLinkSelection(t *testing.T) { return "special" } return "" - }), new(tsdial.Dialer), new(health.Tracker), nil /* no control knobs */) + }), new(tsdial.Dialer), health.NewTracker(bus), nil /* no control knobs */) // Test non-special IP. if got, err := fwd.packetListener(netip.Addr{}); err != nil { diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index e2c4cdd4f51cb..a288d765306e1 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -16,6 +16,7 @@ import ( "tailscale.com/health" "tailscale.com/net/bakedroots" + "tailscale.com/util/eventbus/eventbustest" ) func TestFallbackRootWorks(t *testing.T) { @@ -85,7 +86,7 @@ func TestFallbackRootWorks(t *testing.T) { }, DisableKeepAlives: true, // for test cleanup ease } - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) tr.TLSClientConfig = Config(ht, tr.TLSClientConfig) c := &http.Client{Transport: tr} diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 96fb87f4903c0..44b2d68dfdfb3 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1062,7 +1062,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/tsd/tsd.go b/tsd/tsd.go index e4a512e4b6eba..263b8de704cbb 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -60,6 +60,7 @@ type System struct { DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] PolicyClient SubSystem[policyclient.Client] + HealthTracker SubSystem[*health.Tracker] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -74,7 +75,6 @@ type System struct { controlKnobs controlknobs.Knobs proxyMap proxymap.Mapper - healthTracker health.Tracker userMetricsRegistry usermetric.Registry } @@ -91,6 +91,10 @@ func NewSystemWithBus(bus *eventbus.Bus) *System { } sys := new(System) sys.Set(bus) + + tracker := health.NewTracker(bus) + sys.Set(tracker) + return sys } @@ -138,6 +142,8 @@ func (s *System) Set(v any) { s.DriveForRemote.Set(v) case policyclient.Client: s.PolicyClient.Set(v) + case *health.Tracker: + s.HealthTracker.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } @@ -167,11 +173,6 @@ func (s *System) ProxyMapper() *proxymap.Mapper { return &s.proxyMap } -// HealthTracker returns the system health tracker. -func (s *System) HealthTracker() *health.Tracker { - return &s.healthTracker -} - // UserMetricsRegistry returns the system usermetrics. func (s *System) UserMetricsRegistry() *usermetric.Registry { return &s.userMetricsRegistry diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d25da0996d3a6..d9b9b64c1e2b7 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -577,7 +577,7 @@ func (s *Server) start() (reterr error) { sys := tsd.NewSystem() s.sys = sys - if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { + if err := s.startLogger(&closePool, sys.HealthTracker.Get(), tsLogf); err != nil { return err } @@ -595,7 +595,7 @@ func (s *Server) start() (reterr error) { Dialer: s.dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), }) if err != nil { @@ -603,7 +603,7 @@ func (s *Server) start() (reterr error) { } closePool.add(s.dialer) sys.Set(eng) - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) // TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how? ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper()) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index b7375adc40ed3..af725ace1f9ee 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -100,7 +100,7 @@ func Expect(tw *Watcher, filters ...any) error { case <-time.After(tw.TimeOut): return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", - eventCount, head) + eventCount, len(filters)) case <-tw.chDone: return errors.New("watcher closed while waiting for events") } @@ -138,7 +138,7 @@ func ExpectExactly(tw *Watcher, filters ...any) error { case <-time.After(tw.TimeOut): return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", - eventCount, pos) + eventCount, len(filters)) case <-tw.chDone: return errors.New("watcher closed while waiting for events") } diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 9b195bdb78fde..4de7677f26257 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -53,7 +53,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t1, SetSubsystem: s1.Set, - HealthTracker: s1.HealthTracker(), + HealthTracker: s1.HealthTracker.Get(), }) if err != nil { log.Fatalf("e1 init: %v", err) @@ -80,7 +80,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t2, SetSubsystem: s2.Set, - HealthTracker: s2.HealthTracker(), + HealthTracker: s2.HealthTracker.Get(), }) if err != nil { log.Fatalf("e2 init: %v", err) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index bb5922c8c352d..1b885c3f139a7 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -67,6 +67,7 @@ import ( "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" @@ -179,14 +180,13 @@ func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, der func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() - bus := eventbus.New() - t.Cleanup(bus.Close) + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logf) if err != nil { t.Fatalf("netmon.New: %v", err) } - ht := new(health.Tracker) + ht := health.NewTracker(bus) var reg usermetric.Registry epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary @@ -1352,8 +1352,7 @@ func newTestConn(t testing.TB) *Conn { t.Helper() port := pickPort(t) - bus := eventbus.New() - t.Cleanup(bus.Close) + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -1364,7 +1363,7 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, EventBus: bus, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(bus), Metrics: new(usermetric.Registry), DisablePortMapper: true, Logf: t.Logf, @@ -3038,7 +3037,7 @@ func TestMaybeSetNearestDERP(t *testing.T) { } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) c := newConn(t.Logf) c.myDerp = tt.old c.derpMap = derpMap diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 584b3babc6004..93022811ce409 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -50,7 +50,7 @@ func TestInjectInboundLeak(t *testing.T) { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -110,7 +110,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index b6a5a1ac04753..3b1eb7db6044e 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -375,7 +375,7 @@ ip route add throw 192.168.0.0/24 table 52` + basic, defer mon.Close() fake := NewFakeOS(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht, bus) router.(*linuxRouter).nfr = fake.nfr if err != nil { diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 5e7d1ce6a517d..8e7bbb7a9c5c9 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -21,7 +21,7 @@ func TestIsNetstack(t *testing.T) { tstest.WhileTestRunningLogger(t), wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }, @@ -73,7 +73,7 @@ func TestIsNetstackRouter(t *testing.T) { } conf := tt.conf conf.SetSubsystem = sys.Set - conf.HealthTracker = sys.HealthTracker() + conf.HealthTracker = sys.HealthTracker.Get() conf.Metrics = sys.UserMetricsRegistry() conf.EventBus = sys.Bus.Get() e, err := wgengine.NewUserspaceEngine(logger.Discard, conf) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 87a36c6734f08..89d75b98adafb 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -25,7 +25,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -101,10 +101,9 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { } func TestUserspaceEngineReconfig(t *testing.T) { - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { @@ -170,12 +169,11 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) // Keep making a wgengine until we find an unused port var ue *userspaceEngine - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) for i := range 100 { attempt := uint16(defaultPort + i) @@ -258,9 +256,8 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) if err != nil { diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index a54a0d3fa1e13..35fd8f33105e6 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -9,7 +9,7 @@ import ( "time" "tailscale.com/health" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" ) @@ -25,9 +25,8 @@ func TestWatchdog(t *testing.T) { t.Run("default watchdog does not fire", func(t *testing.T) { t.Parallel() - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { From 84659b1dc6afab63c7fca16b250d1ac1624515b4 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 16 Sep 2025 17:39:21 +0100 Subject: [PATCH 0345/1093] ipn: fix the string representation of an empty ipn.Notify Before: `ipn.Notify}` After: `ipn.Notify{}` Updates #cleanup Signed-off-by: Alex Chan --- ipn/backend.go | 6 +++++- ipn/backend_test.go | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 ipn/backend_test.go diff --git a/ipn/backend.go b/ipn/backend.go index fd4442f7160db..91cf81ca52962 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -205,7 +205,11 @@ func (n Notify) String() string { } s := sb.String() - return s[0:len(s)-1] + "}" + if s == "Notify{" { + return "Notify{}" + } else { + return s[0:len(s)-1] + "}" + } } // PartialFile represents an in-progress incoming file transfer. diff --git a/ipn/backend_test.go b/ipn/backend_test.go new file mode 100644 index 0000000000000..d72b966152ca3 --- /dev/null +++ b/ipn/backend_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipn + +import ( + "testing" + + "tailscale.com/health" + "tailscale.com/types/empty" +) + +func TestNotifyString(t *testing.T) { + for _, tt := range []struct { + name string + value Notify + expected string + }{ + { + name: "notify-empty", + value: Notify{}, + expected: "Notify{}", + }, + { + name: "notify-with-login-finished", + value: Notify{LoginFinished: &empty.Message{}}, + expected: "Notify{LoginFinished}", + }, + { + name: "notify-with-multiple-fields", + value: Notify{LoginFinished: &empty.Message{}, Health: &health.State{}}, + expected: "Notify{LoginFinished Health{...}}", + }, + } { + t.Run(tt.name, func(t *testing.T) { + actual := tt.value.String() + if actual != tt.expected { + t.Fatalf("expected=%q, actual=%q", tt.expected, actual) + } + }) + } +} From b63f5d7e7def89c73b3c4e7262b448164faaa5c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Bojanowski?= Date: Sat, 13 Sep 2025 12:58:46 +0200 Subject: [PATCH 0346/1093] logpolicy/logpolicy: use noopPretendSuccessTransport if NoLogsNoSupport envknob is set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Bojanowski --- logpolicy/logpolicy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 587b421f3c4cc..823c118b76a8e 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -868,7 +868,7 @@ type TransportOptions struct { // New returns an HTTP Transport particularly suited to uploading logs // to the given host name. See [DialContext] for details on how it works. func (opts TransportOptions) New() http.RoundTripper { - if testenv.InTest() { + if testenv.InTest() || envknob.NoLogsNoSupport() { return noopPretendSuccessTransport{} } if opts.NetMon == nil { From 8608e421031746187392c838e3008d087aaed4df Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 16 Sep 2025 10:52:39 -0700 Subject: [PATCH 0347/1093] feature,ipn/ipnlocal,wgengine: improve how eventbus shutdown is handled (#17156) Instead of waiting for a designated subscription to close as a canary for the bus being stopped, use the bus Client's own signal for closure added in #17118. Updates #cleanup Change-Id: I384ea39f3f1f6a030a6282356f7b5bdcdf8d7102 Signed-off-by: M. J. Fromberger --- feature/relayserver/relayserver.go | 4 +--- ipn/ipnlocal/expiry.go | 6 ++---- ipn/ipnlocal/local.go | 9 ++------- wgengine/magicsock/magicsock.go | 6 ++---- wgengine/router/router_linux.go | 6 ++---- 5 files changed, 9 insertions(+), 22 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 24304e8eccbad..d77d7145ae59c 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -157,9 +157,7 @@ func (e *extension) consumeEventbusTopics(port int) { select { case <-e.disconnectFromBusCh: return - case <-reqSub.Done(): - // If reqSub is done, the eventClient has been closed, which is a - // signal to return. + case <-eventClient.Done(): return case req := <-reqSub.Events(): if rs == nil { diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 3d20d57b464e5..9427f07382bd6 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -68,15 +68,13 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [controlclient.ControlTime] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (em *expiryManager) consumeEventbusTopics() { defer close(em.subsDoneCh) for { select { - case <-em.controlTimeSub.Done(): + case <-em.eventClient.Done(): return case time := <-em.controlTimeSub.Events(): em.onControlTime(time.Value) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4c27bea45136c..5cdfaf549c7c8 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -619,18 +619,13 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [tailcfg.ClientVersion] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (b *LocalBackend) consumeEventbusTopics() { defer close(b.subsDoneCh) for { select { - // TODO(cmol): Move to using b.eventClient.Done() once implemented. - // In the meantime, we rely on the subs not going away until the client is - // closed, closing all its subscribers. - case <-b.clientVersionSub.Done(): + case <-b.eventClient.Done(): return case clientVersion := <-b.clientVersionSub.Events(): b.onClientVersion(&clientVersion) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 36402122c9448..719cc68a4bed1 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -640,15 +640,13 @@ func newConn(logf logger.Logf) *Conn { // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (c *Conn) consumeEventbusTopics() { defer close(c.subsDoneCh) for { select { - case <-c.pmSub.Done(): + case <-c.eventClient.Done(): return case <-c.pmSub.Events(): c.onPortMapChanged() diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index 2382e87cd5185..a9edd7f9608b5 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -158,13 +158,11 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon // [eventbus.Subscriber]'s and passes them to their related handler. Events are // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). +// [eventbus.Client] is closed. func (r *linuxRouter) consumeEventbusTopics() { for { select { - case <-r.ruleDeletedSub.Done(): + case <-r.eventClient.Done(): return case rulesDeleted := <-r.ruleDeletedSub.Events(): r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) From 2b0f59cd3880275d786f8546321a1e02509f060c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 16 Sep 2025 18:35:55 +0100 Subject: [PATCH 0348/1093] logpolicy: remove the deprecated and now-unused `NewWithConfigPath` Updates #cleanup Signed-off-by: Alex Chan --- logpolicy/logpolicy.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 823c118b76a8e..4c90378d025d3 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -464,18 +464,6 @@ func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf }.New() } -// Deprecated: Use [Options.New] instead. -func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { - return Options{ - Collection: collection, - Dir: dir, - CmdName: cmdName, - NetMon: netMon, - Health: health, - Logf: logf, - }.New() -} - // Options is used to construct a [Policy]. type Options struct { // Collection is a required collection to upload logs under. From 99b3f69126e503dd18a794e24e822f667b330212 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 15 Sep 2025 19:50:21 -0700 Subject: [PATCH 0349/1093] feature/portmapper: make the portmapper & its debugging tools modular Starting at a minimal binary and adding one feature back... tailscaled tailscale combined (linux/amd64) 30073135 17451704 31543692 omitting everything + 480302 + 10258 + 493896 .. add debugportmapper + 475317 + 151943 + 467660 .. add portmapper + 500086 + 162873 + 510511 .. add portmapper+debugportmapper Fixes #17148 Change-Id: I90bd0e9d1bd8cbe64fa2e885e9afef8fb5ee74b1 Signed-off-by: Brad Fitzpatrick --- client/local/debugportmapper.go | 84 ++++++++ client/local/local.go | 64 ------ client/tailscale/localclient_aliases.go | 5 - cmd/k8s-operator/depaware.txt | 7 +- cmd/omitsize/omitsize.go | 25 ++- cmd/tailscale/cli/debug-portmap.go | 79 +++++++ cmd/tailscale/cli/debug.go | 56 +---- cmd/tailscale/cli/netcheck.go | 29 ++- cmd/tailscale/depaware.txt | 11 +- cmd/tailscaled/depaware.txt | 8 +- cmd/tailscaled/deps_test.go | 18 ++ cmd/tsidp/depaware.txt | 7 +- .../feature_debugportmapper_disabled.go | 13 ++ .../feature_debugportmapper_enabled.go | 13 ++ .../feature_portmapper_disabled.go | 13 ++ .../feature_portmapper_enabled.go | 13 ++ feature/condregister/condregister.go | 7 + feature/condregister/maybe_debugportmapper.go | 8 + feature/condregister/portmapper/doc.go | 6 + .../portmapper/maybe_portmapper.go | 8 + feature/debugportmapper/debugportmapper.go | 204 ++++++++++++++++++ feature/featuretags/featuretags.go | 2 + feature/portmapper/portmapper.go | 38 ++++ ipn/ipnlocal/local.go | 5 + ipn/ipnlocal/local_test.go | 1 + ipn/localapi/localapi.go | 163 -------------- net/netcheck/netcheck.go | 6 +- net/portmapper/igd_test.go | 8 +- net/portmapper/portmapper.go | 85 ++++---- net/portmapper/portmapper_test.go | 10 +- .../portmappertype/portmappertype.go | 88 ++++++++ net/portmapper/upnp.go | 4 +- net/portmapper/upnp_test.go | 3 +- tsnet/depaware.txt | 7 +- tsnet/tsnet.go | 1 + wgengine/magicsock/magicsock.go | 58 +++-- 36 files changed, 758 insertions(+), 399 deletions(-) create mode 100644 client/local/debugportmapper.go create mode 100644 cmd/tailscale/cli/debug-portmap.go create mode 100644 feature/buildfeatures/feature_debugportmapper_disabled.go create mode 100644 feature/buildfeatures/feature_debugportmapper_enabled.go create mode 100644 feature/buildfeatures/feature_portmapper_disabled.go create mode 100644 feature/buildfeatures/feature_portmapper_enabled.go create mode 100644 feature/condregister/maybe_debugportmapper.go create mode 100644 feature/condregister/portmapper/doc.go create mode 100644 feature/condregister/portmapper/maybe_portmapper.go create mode 100644 feature/debugportmapper/debugportmapper.go create mode 100644 feature/portmapper/portmapper.go create mode 100644 net/portmapper/portmappertype/portmappertype.go diff --git a/client/local/debugportmapper.go b/client/local/debugportmapper.go new file mode 100644 index 0000000000000..04ed1c109a54f --- /dev/null +++ b/client/local/debugportmapper.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package local + +import ( + "cmp" + "context" + "fmt" + "io" + "net/http" + "net/netip" + "net/url" + "strconv" + "time" + + "tailscale.com/client/tailscale/apitype" +) + +// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. +type DebugPortmapOpts struct { + // Duration is how long the mapping should be created for. It defaults + // to 5 seconds if not set. + Duration time.Duration + + // Type is the kind of portmap to debug. The empty string instructs the + // portmap client to perform all known types. Other valid options are + // "pmp", "pcp", and "upnp". + Type string + + // GatewayAddr specifies the gateway address used during portmapping. + // If set, SelfAddr must also be set. If unset, it will be + // autodetected. + GatewayAddr netip.Addr + + // SelfAddr specifies the gateway address used during portmapping. If + // set, GatewayAddr must also be set. If unset, it will be + // autodetected. + SelfAddr netip.Addr + + // LogHTTP instructs the debug-portmap endpoint to print all HTTP + // requests and responses made to the logs. + LogHTTP bool +} + +// DebugPortmap invokes the debug-portmap endpoint, and returns an +// io.ReadCloser that can be used to read the logs that are printed during this +// process. +// +// opts can be nil; if so, default values will be used. +func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { + vals := make(url.Values) + if opts == nil { + opts = &DebugPortmapOpts{} + } + + vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) + vals.Set("type", opts.Type) + vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) + + if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { + return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") + } else if opts.GatewayAddr.IsValid() { + vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) + } + + req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) + if err != nil { + return nil, err + } + res, err := lc.doLocalRequestNiceError(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + body, _ := io.ReadAll(res.Body) + res.Body.Close() + return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) + } + + return res.Body, nil +} diff --git a/client/local/local.go b/client/local/local.go index 32e8208da2fed..a132e577b9769 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -591,70 +591,6 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } -// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. -type DebugPortmapOpts struct { - // Duration is how long the mapping should be created for. It defaults - // to 5 seconds if not set. - Duration time.Duration - - // Type is the kind of portmap to debug. The empty string instructs the - // portmap client to perform all known types. Other valid options are - // "pmp", "pcp", and "upnp". - Type string - - // GatewayAddr specifies the gateway address used during portmapping. - // If set, SelfAddr must also be set. If unset, it will be - // autodetected. - GatewayAddr netip.Addr - - // SelfAddr specifies the gateway address used during portmapping. If - // set, GatewayAddr must also be set. If unset, it will be - // autodetected. - SelfAddr netip.Addr - - // LogHTTP instructs the debug-portmap endpoint to print all HTTP - // requests and responses made to the logs. - LogHTTP bool -} - -// DebugPortmap invokes the debug-portmap endpoint, and returns an -// io.ReadCloser that can be used to read the logs that are printed during this -// process. -// -// opts can be nil; if so, default values will be used. -func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { - vals := make(url.Values) - if opts == nil { - opts = &DebugPortmapOpts{} - } - - vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) - vals.Set("type", opts.Type) - vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) - - if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { - return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") - } else if opts.GatewayAddr.IsValid() { - vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) - } - - req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) - if err != nil { - return nil, err - } - res, err := lc.doLocalRequestNiceError(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - body, _ := io.ReadAll(res.Body) - res.Body.Close() - return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) - } - - return res.Body, nil -} - // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // The schema (including when keys are re-read) is not a stable interface. func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 2b53906b71ae4..58be312b47cc9 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -32,11 +32,6 @@ type IPNBusWatcher = local.IPNBusWatcher // Deprecated: import [tailscale.com/client/local] instead. type BugReportOpts = local.BugReportOpts -// DebugPortmapOpts is an alias for [tailscale.com/client/local.DebugPortmapOpts]. -// -// Deprecated: import [tailscale.com/client/local] instead. -type DebugPortmapOpts = local.DebugPortmapOpts - // PingOpts is an alias for [tailscale.com/client/local.PingOpts]. // // Deprecated: import [tailscale.com/client/local] instead. diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 87bae60c89b34..faf7b2f838d91 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,7 +798,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -866,7 +868,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index a4bce63295f25..5940ba5207f29 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -22,9 +22,9 @@ import ( var ( cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") - features = flag.String("features", "", "comma-separated list of features to consider, with or without the ts_omit_ prefix") + features = flag.String("features", "", "comma-separated list of features to list in the table, with or without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") - showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set") + showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set.") ) func main() { @@ -43,10 +43,14 @@ func main() { all = slices.Clone(allOmittable) } else { for v := range strings.SplitSeq(*features, ",") { - if !strings.HasPrefix(v, "ts_omit_") { - v = "ts_omit_" + v + var withOmit []string + for v := range strings.SplitSeq(v, "+") { + if !strings.HasPrefix(v, "ts_omit_") { + v = "ts_omit_" + v + } + withOmit = append(withOmit, v) } - all = append(all, v) + all = append(all, strings.Join(withOmit, "+")) } } @@ -70,6 +74,9 @@ func main() { fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) for _, t := range all { + if strings.Contains(t, "+") { + log.Fatalf("TODO: make --show-removals support ANDed features like %q", t) + } sizeD := measure("tailscaled", t) sizeC := measure("tailscale", t) sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) @@ -84,17 +91,17 @@ func main() { fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) for _, t := range all { - tags := allExcept(allOmittable, t) + tags := allExcept(allOmittable, strings.Split(t, "+")) sizeD := measure("tailscaled", tags...) sizeC := measure("tailscale", tags...) sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) - fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.TrimPrefix(t, "ts_omit_")) + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.ReplaceAll(t, "ts_omit_", "")) } } -func allExcept(all []string, omit string) []string { - return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return s == omit }) +func allExcept(all, omit []string) []string { + return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return slices.Contains(omit, s) }) } func measure(bin string, tags ...string) int64 { diff --git a/cmd/tailscale/cli/debug-portmap.go b/cmd/tailscale/cli/debug-portmap.go new file mode 100644 index 0000000000000..d8db1442c7073 --- /dev/null +++ b/cmd/tailscale/cli/debug-portmap.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_debugportmapper + +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "net/netip" + "os" + "time" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/local" +) + +func init() { + debugPortmapCmd = mkDebugPortmapCmd +} + +func mkDebugPortmapCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "portmap", + ShortUsage: "tailscale debug portmap", + Exec: debugPortmap, + ShortHelp: "Run portmap debugging", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("portmap") + fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") + fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) + fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) + fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) + fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) + return fs + })(), + } +} + +var debugPortmapArgs struct { + duration time.Duration + gatewayAddr string + selfAddr string + ty string + logHTTP bool +} + +func debugPortmap(ctx context.Context, args []string) error { + opts := &local.DebugPortmapOpts{ + Duration: debugPortmapArgs.duration, + Type: debugPortmapArgs.ty, + LogHTTP: debugPortmapArgs.logHTTP, + } + if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { + return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") + } + if debugPortmapArgs.gatewayAddr != "" { + var err error + opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) + if err != nil { + return fmt.Errorf("invalid --gateway-addr: %w", err) + } + opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) + if err != nil { + return fmt.Errorf("invalid --self-addr: %w", err) + } + } + rc, err := localClient.DebugPortmap(ctx, opts) + if err != nil { + return err + } + defer rc.Close() + + _, err = io.Copy(os.Stdout, rc) + return err +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 4960aeec2d50a..39c9748ef5289 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -30,7 +30,6 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" - "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" "tailscale.com/hostinfo" @@ -50,6 +49,7 @@ import ( var ( debugCaptureCmd func() *ffcli.Command // or nil + debugPortmapCmd func() *ffcli.Command // or nil ) func debugCmd() *ffcli.Command { @@ -319,21 +319,7 @@ func debugCmd() *ffcli.Command { ShortHelp: "Test a DERP configuration", }, ccall(debugCaptureCmd), - { - Name: "portmap", - ShortUsage: "tailscale debug portmap", - Exec: debugPortmap, - ShortHelp: "Run portmap debugging", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("portmap") - fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") - fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) - fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) - fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) - fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) - return fs - })(), - }, + ccall(debugPortmapCmd), { Name: "peer-endpoint-changes", ShortUsage: "tailscale debug peer-endpoint-changes ", @@ -1210,44 +1196,6 @@ func runSetExpire(ctx context.Context, args []string) error { return localClient.DebugSetExpireIn(ctx, setExpireArgs.in) } -var debugPortmapArgs struct { - duration time.Duration - gatewayAddr string - selfAddr string - ty string - logHTTP bool -} - -func debugPortmap(ctx context.Context, args []string) error { - opts := &local.DebugPortmapOpts{ - Duration: debugPortmapArgs.duration, - Type: debugPortmapArgs.ty, - LogHTTP: debugPortmapArgs.logHTTP, - } - if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { - return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") - } - if debugPortmapArgs.gatewayAddr != "" { - var err error - opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) - if err != nil { - return fmt.Errorf("invalid --gateway-addr: %w", err) - } - opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) - if err != nil { - return fmt.Errorf("invalid --self-addr: %w", err) - } - } - rc, err := localClient.DebugPortmap(ctx, opts) - if err != nil { - return err - } - defer rc.Close() - - _, err = io.Copy(os.Stdout, rc) - return err -} - func runPeerEndpointChanges(ctx context.Context, args []string) error { st, err := localClient.Status(ctx) if err != nil { diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 0bdab59cb8beb..5ae8db8fa3fbb 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -17,14 +17,23 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/tlsdial" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + + // The "netcheck" command also wants the portmapper linked. + // + // TODO: make that subcommand either hit LocalAPI for that info, or use a + // tailscaled subcommand, to avoid making the CLI also link in the portmapper. + // For now (2025-09-15), keep doing what we've done for the past five years and + // keep linking it here. + _ "tailscale.com/feature/condregister/portmapper" ) var netcheckCmd = &ffcli.Command{ @@ -56,14 +65,13 @@ func runNetcheck(ctx context.Context, args []string) error { return err } - // Ensure that we close the portmapper after running a netcheck; this - // will release any port mappings created. - pm := portmapper.NewClient(portmapper.Config{ - Logf: logf, - NetMon: netMon, - EventBus: bus, - }) - defer pm.Close() + var pm portmappertype.Client + if buildfeatures.HasPortMapper { + // Ensure that we close the portmapper after running a netcheck; this + // will release any port mappings created. + pm = portmappertype.HookNewPortMapper.Get()(logf, bus, netMon, nil, nil) + defer pm.Close() + } c := &netcheck.Client{ NetMon: netMon, @@ -210,6 +218,9 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { } func portMapping(r *netcheck.Report) string { + if !buildfeatures.HasPortMapper { + return "binary built without portmapper support" + } if !r.AnyPortMappingChecked() { return "not checked" } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index a39363353f4c7..c86af7ea76645 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -96,7 +96,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp - tailscale.com/control/controlknobs from tailscale.com/net/portmapper tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck @@ -105,7 +104,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli @@ -131,7 +133,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlhttp+ tailscale.com/net/ping from tailscale.com/net/netcheck - tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ tailscale.com/net/stun from tailscale.com/net/netcheck L tailscale.com/net/tcpinfo from tailscale.com/derp @@ -175,7 +178,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/eventbus from tailscale.com/net/portmapper+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ @@ -351,7 +354,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/tailscale/goupnp+ + encoding/xml from github.com/godbus/dbus/v5/introspect+ errors from archive/tar+ expvar from tailscale.com/derp+ flag from github.com/peterbourgon/ff/v3+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 736c268dcdb02..d4e1f13bf95b6 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -272,10 +272,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister @@ -338,7 +341,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/packet from tailscale.com/net/connstats+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/feature/portmapper+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a334eb9b76d4b..1609ba63350be 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -90,3 +90,21 @@ func TestOmitTailnetLock(t *testing.T) { }, }.Check(t) } + +func TestOmitPortmapper(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portmapper,ts_include_cli,ts_omit_debugportmapper", + OnDep: func(dep string) { + if dep == "tailscale.com/net/portmapper" { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + return + } + if strings.Contains(dep, "goupnp") || strings.Contains(dep, "/soap") || + strings.Contains(dep, "internetgateway2") { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index c9cd12d4118e8..0aafff8e159f9 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -239,7 +239,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -295,7 +297,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock diff --git a/feature/buildfeatures/feature_debugportmapper_disabled.go b/feature/buildfeatures/feature_debugportmapper_disabled.go new file mode 100644 index 0000000000000..eff85b8baaf50 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = false diff --git a/feature/buildfeatures/feature_debugportmapper_enabled.go b/feature/buildfeatures/feature_debugportmapper_enabled.go new file mode 100644 index 0000000000000..491aa5ed84af1 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = true diff --git a/feature/buildfeatures/feature_portmapper_disabled.go b/feature/buildfeatures/feature_portmapper_disabled.go new file mode 100644 index 0000000000000..212b22d40abfb --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = false diff --git a/feature/buildfeatures/feature_portmapper_enabled.go b/feature/buildfeatures/feature_portmapper_enabled.go new file mode 100644 index 0000000000000..2f915d277a313 --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = true diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index f9025095147f1..69e2b071cc19f 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -5,3 +5,10 @@ // by build tags. It is one central package that callers can empty import // to ensure all conditional features are registered. package condregister + +// Portmapper is special in that the CLI also needs to link it in, +// so it's pulled out into its own package, rather than using a maybe_*.go +// file in condregister. +import ( + _ "tailscale.com/feature/condregister/portmapper" +) diff --git a/feature/condregister/maybe_debugportmapper.go b/feature/condregister/maybe_debugportmapper.go new file mode 100644 index 0000000000000..4990d09ea5833 --- /dev/null +++ b/feature/condregister/maybe_debugportmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package condregister + +import _ "tailscale.com/feature/debugportmapper" diff --git a/feature/condregister/portmapper/doc.go b/feature/condregister/portmapper/doc.go new file mode 100644 index 0000000000000..5c30538c43a11 --- /dev/null +++ b/feature/condregister/portmapper/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for portmapper +// if it's not disabled via the ts_omit_portmapper build tag. +package portmapper diff --git a/feature/condregister/portmapper/maybe_portmapper.go b/feature/condregister/portmapper/maybe_portmapper.go new file mode 100644 index 0000000000000..c306fd3d5a1f0 --- /dev/null +++ b/feature/condregister/portmapper/maybe_portmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portmapper + +package portmapper + +import _ "tailscale.com/feature/portmapper" diff --git a/feature/debugportmapper/debugportmapper.go b/feature/debugportmapper/debugportmapper.go new file mode 100644 index 0000000000000..2625086c64dcf --- /dev/null +++ b/feature/debugportmapper/debugportmapper.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package debugportmapper registers support for debugging Tailscale's +// portmapping support. +package debugportmapper + +import ( + "context" + "fmt" + "net" + "net/http" + "net/netip" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/ipn/localapi" + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + localapi.Register("debug-portmap", serveDebugPortmap) +} + +func serveDebugPortmap(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/plain") + + dur, err := time.ParseDuration(r.FormValue("duration")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + gwSelf := r.FormValue("gateway_and_self") + + trueFunc := func() bool { return true } + // Update portmapper debug flags + debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} + switch r.FormValue("type") { + case "": + case "pmp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "pcp": + debugKnobs.DisablePMPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "upnp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisablePMPFunc = trueFunc + default: + http.Error(w, "unknown portmap debug type", http.StatusBadRequest) + return + } + if k := h.LocalBackend().ControlKnobs(); k != nil { + if k.DisableUPnP.Load() { + debugKnobs.DisableUPnPFunc = trueFunc + } + } + + if defBool(r.FormValue("log_http"), false) { + debugKnobs.LogHTTP = true + } + + var ( + logLock sync.Mutex + handlerDone bool + ) + logf := func(format string, args ...any) { + if !strings.HasSuffix(format, "\n") { + format = format + "\n" + } + + logLock.Lock() + defer logLock.Unlock() + + // The portmapper can call this log function after the HTTP + // handler returns, which is not allowed and can cause a panic. + // If this happens, ignore the log lines since this typically + // occurs due to a client disconnect. + if handlerDone { + return + } + + // Write and flush each line to the client so that output is streamed + fmt.Fprintf(w, format, args...) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + } + defer func() { + logLock.Lock() + handlerDone = true + logLock.Unlock() + }() + + ctx, cancel := context.WithTimeout(r.Context(), dur) + defer cancel() + + done := make(chan bool, 1) + + var c *portmapper.Client + c = portmapper.NewClient(portmapper.Config{ + Logf: logger.WithPrefix(logf, "portmapper: "), + NetMon: h.LocalBackend().NetMon(), + DebugKnobs: debugKnobs, + EventBus: h.LocalBackend().EventBus(), + OnChange: func() { + logf("portmapping changed.") + logf("have mapping: %v", c.HaveMapping()) + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("cb: mapping: %v", ext) + select { + case done <- true: + default: + } + return + } + logf("cb: no mapping") + }, + }) + defer c.Close() + + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) + if err != nil { + logf("error creating monitor: %v", err) + return + } + + gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { + if a, b, ok := strings.Cut(gwSelf, "/"); ok { + gw = netip.MustParseAddr(a) + self = netip.MustParseAddr(b) + return gw, self, true + } + return netMon.GatewayAndSelfIP() + } + + c.SetGatewayLookupFunc(gatewayAndSelfIP) + + gw, selfIP, ok := gatewayAndSelfIP() + if !ok { + logf("no gateway or self IP; %v", netMon.InterfaceState()) + return + } + logf("gw=%v; self=%v", gw, selfIP) + + uc, err := net.ListenPacket("udp", "0.0.0.0:0") + if err != nil { + return + } + defer uc.Close() + c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) + + res, err := c.Probe(ctx) + if err != nil { + logf("error in Probe: %v", err) + return + } + logf("Probe: %+v", res) + + if !res.PCP && !res.PMP && !res.UPnP { + logf("no portmapping services available") + return + } + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("mapping: %v", ext) + } else { + logf("no mapping") + } + + select { + case <-done: + case <-ctx.Done(): + if r.Context().Err() == nil { + logf("serveDebugPortmap: context done: %v", ctx.Err()) + } else { + h.Logf("serveDebugPortmap: context done: %v", ctx.Err()) + } + } +} + +func defBool(a string, def bool) bool { + if a == "" { + return def + } + v, err := strconv.ParseBool(a) + if err != nil { + return def + } + return v +} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6778593fae903..2c5f32310dcb8 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -48,9 +48,11 @@ var Features = map[FeatureTag]FeatureMeta{ "cli": {"CLI", "embed the CLI into the tailscaled binary"}, "completion": {"Completion", "CLI shell completion"}, "debugeventbus": {"DebugEventBus", "eventbus debug support"}, + "debugportmapper": {"DebugPortMapper", "portmapper debug support"}, "desktop_sessions": {"DesktopSessions", "Desktop sessions support"}, "drive": {"Drive", "Tailscale Drive (file server) support"}, "kube": {"Kube", "Kubernetes integration"}, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support"}, "relayserver": {"RelayServer", "Relay server"}, "serve": {"Serve", "Serve and Funnel support"}, "ssh": {"SSH", "Tailscale SSH support"}, diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go new file mode 100644 index 0000000000000..e7be00ad17d8c --- /dev/null +++ b/feature/portmapper/portmapper.go @@ -0,0 +1,38 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for NAT-PMP, PCP, and UPnP port +// mapping protocols to help get direction connections through NATs. +package portmapper + +import ( + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + portmappertype.HookNewPortMapper.Set(newPortMapper) +} + +func newPortMapper( + logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil func() bool, + onlyTCP443OrNil func() bool) portmappertype.Client { + + pm := portmapper.NewClient(portmapper.Config{ + EventBus: bus, + Logf: logf, + NetMon: netMon, + DebugKnobs: &portmapper.DebugKnobs{ + DisableAll: onlyTCP443OrNil, + DisableUPnPFunc: disableUPnPOrNil, + }, + }) + pm.SetGatewayLookupFunc(netMon.GatewayAndSelfIP) + return pm +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5cdfaf549c7c8..988c0b5383fb5 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6780,6 +6780,11 @@ func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs { return b.sys.ControlKnobs() } +// EventBus returns the node's event bus. +func (b *LocalBackend) EventBus() *eventbus.Bus { + return b.sys.Bus.Get() +} + // MagicConn returns the backend's *magicsock.Conn. func (b *LocalBackend) MagicConn() *magicsock.Conn { return b.sys.MagicSock.Get() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 354cf686440fc..0505e068b94d9 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -34,6 +34,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 7e54cef854de2..0c3a0a4edd0b9 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -35,9 +35,7 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" - "tailscale.com/net/netmon" "tailscale.com/net/netutil" - "tailscale.com/net/portmapper" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/dnstype" @@ -90,7 +88,6 @@ var handler = map[string]LocalAPIHandler{ "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, - "debug-portmap": (*Handler).serveDebugPortmap, "derpmap": (*Handler).serveDERPMap, "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, @@ -762,166 +759,6 @@ func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.R enc.Encode(nm.PacketFilter) } -func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/plain") - - dur, err := time.ParseDuration(r.FormValue("duration")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - gwSelf := r.FormValue("gateway_and_self") - - // Update portmapper debug flags - debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} - switch r.FormValue("type") { - case "": - case "pmp": - debugKnobs.DisablePCP = true - debugKnobs.DisableUPnP = true - case "pcp": - debugKnobs.DisablePMP = true - debugKnobs.DisableUPnP = true - case "upnp": - debugKnobs.DisablePCP = true - debugKnobs.DisablePMP = true - default: - http.Error(w, "unknown portmap debug type", http.StatusBadRequest) - return - } - - if defBool(r.FormValue("log_http"), false) { - debugKnobs.LogHTTP = true - } - - var ( - logLock sync.Mutex - handlerDone bool - ) - logf := func(format string, args ...any) { - if !strings.HasSuffix(format, "\n") { - format = format + "\n" - } - - logLock.Lock() - defer logLock.Unlock() - - // The portmapper can call this log function after the HTTP - // handler returns, which is not allowed and can cause a panic. - // If this happens, ignore the log lines since this typically - // occurs due to a client disconnect. - if handlerDone { - return - } - - // Write and flush each line to the client so that output is streamed - fmt.Fprintf(w, format, args...) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - } - defer func() { - logLock.Lock() - handlerDone = true - logLock.Unlock() - }() - - ctx, cancel := context.WithTimeout(r.Context(), dur) - defer cancel() - - done := make(chan bool, 1) - - var c *portmapper.Client - c = portmapper.NewClient(portmapper.Config{ - Logf: logger.WithPrefix(logf, "portmapper: "), - NetMon: h.b.NetMon(), - DebugKnobs: debugKnobs, - ControlKnobs: h.b.ControlKnobs(), - EventBus: h.eventBus, - OnChange: func() { - logf("portmapping changed.") - logf("have mapping: %v", c.HaveMapping()) - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("cb: mapping: %v", ext) - select { - case done <- true: - default: - } - return - } - logf("cb: no mapping") - }, - }) - defer c.Close() - - bus := eventbus.New() - defer bus.Close() - netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) - if err != nil { - logf("error creating monitor: %v", err) - return - } - - gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { - if a, b, ok := strings.Cut(gwSelf, "/"); ok { - gw = netip.MustParseAddr(a) - self = netip.MustParseAddr(b) - return gw, self, true - } - return netMon.GatewayAndSelfIP() - } - - c.SetGatewayLookupFunc(gatewayAndSelfIP) - - gw, selfIP, ok := gatewayAndSelfIP() - if !ok { - logf("no gateway or self IP; %v", netMon.InterfaceState()) - return - } - logf("gw=%v; self=%v", gw, selfIP) - - uc, err := net.ListenPacket("udp", "0.0.0.0:0") - if err != nil { - return - } - defer uc.Close() - c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) - - res, err := c.Probe(ctx) - if err != nil { - logf("error in Probe: %v", err) - return - } - logf("Probe: %+v", res) - - if !res.PCP && !res.PMP && !res.UPnP { - logf("no portmapping services available") - return - } - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("mapping: %v", ext) - } else { - logf("no mapping") - } - - select { - case <-done: - case <-ctx.Done(): - if r.Context().Err() == nil { - logf("serveDebugPortmap: context done: %v", ctx.Err()) - } else { - h.logf("serveDebugPortmap: context done: %v", ctx.Err()) - } - } -} - // EventError provides the JSON encoding of internal errors from event processing. type EventError struct { Error string diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index cb622a339944d..ba9a8cb0f45d5 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -33,7 +33,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/syncs" @@ -215,7 +215,7 @@ type Client struct { // PortMapper, if non-nil, is used for portmap queries. // If nil, portmap discovery is not done. - PortMapper *portmapper.Client // lazily initialized on first use + PortMapper portmappertype.Client // UseDNSCache controls whether this client should use a // *dnscache.Resolver to resolve DERP hostnames, when no IP address is @@ -730,7 +730,7 @@ func (rs *reportState) probePortMapServices() { res, err := rs.c.PortMapper.Probe(context.Background()) if err != nil { - if !errors.Is(err, portmapper.ErrGatewayRange) { + if !errors.Is(err, portmappertype.ErrGatewayRange) { // "skipping portmap; gateway range likely lacks support" // is not very useful, and too spammy on cloud systems. // If there are other errors, we want to log those. diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index cca87e0b8238e..77015f5bfb189 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -14,7 +14,6 @@ import ( "sync/atomic" "testing" - "tailscale.com/control/controlknobs" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/syncs" @@ -273,10 +272,9 @@ func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { } var c *Client c = NewClient(Config{ - Logf: tstest.WhileTestRunningLogger(t), - NetMon: netmon.NewStatic(), - ControlKnobs: new(controlknobs.Knobs), - EventBus: bus, + Logf: tstest.WhileTestRunningLogger(t), + NetMon: netmon.NewStatic(), + EventBus: bus, OnChange: func() { // TODO(creachadair): Remove. t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index a1ab868155219..024c6dc784d67 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -8,7 +8,6 @@ package portmapper import ( "context" "encoding/binary" - "errors" "fmt" "io" "net" @@ -20,12 +19,12 @@ import ( "time" "go4.org/mem" - "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/net/netaddr" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/syncs" "tailscale.com/types/logger" @@ -34,6 +33,13 @@ import ( "tailscale.com/util/eventbus" ) +var ( + ErrNoPortMappingServices = portmappertype.ErrNoPortMappingServices + ErrGatewayRange = portmappertype.ErrGatewayRange + ErrGatewayIPv6 = portmappertype.ErrGatewayIPv6 + ErrPortMappingDisabled = portmappertype.ErrPortMappingDisabled +) + var disablePortMapperEnv = envknob.RegisterBool("TS_DISABLE_PORTMAPPER") // DebugKnobs contains debug configuration that can be provided when creating a @@ -49,15 +55,33 @@ type DebugKnobs struct { LogHTTP bool // Disable* disables a specific service from mapping. - DisableUPnP bool - DisablePMP bool - DisablePCP bool + // If the funcs are nil or return false, the service is not disabled. + // Use the corresponding accessor methods without the "Func" suffix + // to check whether a service is disabled. + DisableUPnPFunc func() bool + DisablePMPFunc func() bool + DisablePCPFunc func() bool // DisableAll, if non-nil, is a func that reports whether all port // mapping attempts should be disabled. DisableAll func() bool } +// DisableUPnP reports whether UPnP is disabled. +func (k *DebugKnobs) DisableUPnP() bool { + return k != nil && k.DisableUPnPFunc != nil && k.DisableUPnPFunc() +} + +// DisablePMP reports whether NAT-PMP is disabled. +func (k *DebugKnobs) DisablePMP() bool { + return k != nil && k.DisablePMPFunc != nil && k.DisablePMPFunc() +} + +// DisablePCP reports whether PCP is disabled. +func (k *DebugKnobs) DisablePCP() bool { + return k != nil && k.DisablePCPFunc != nil && k.DisablePCPFunc() +} + func (k *DebugKnobs) disableAll() bool { if disablePortMapperEnv() { return true @@ -88,11 +112,10 @@ type Client struct { // The following two fields must both be non-nil. // Both are immutable after construction. pubClient *eventbus.Client - updates *eventbus.Publisher[Mapping] + updates *eventbus.Publisher[portmappertype.Mapping] logf logger.Logf netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand - controlKnobs *controlknobs.Knobs ipAndGateway func() (gw, ip netip.Addr, ok bool) onChange func() // or nil debug DebugKnobs @@ -130,6 +153,8 @@ type Client struct { mapping mapping // non-nil if we have a mapping } +var _ portmappertype.Client = (*Client)(nil) + func (c *Client) vlogf(format string, args ...any) { if c.debug.VerboseLogs { c.logf(format, args...) @@ -159,7 +184,6 @@ type mapping interface { MappingDebug() string } -// HaveMapping reports whether we have a current valid mapping. func (c *Client) HaveMapping() bool { c.mu.Lock() defer c.mu.Unlock() @@ -223,10 +247,6 @@ type Config struct { // debugging. If nil, a sensible set of defaults will be used. DebugKnobs *DebugKnobs - // ControlKnobs, if non-nil, specifies knobs from the control plane that - // might disable port mapping. - ControlKnobs *controlknobs.Knobs - // OnChange is called to run in a new goroutine whenever the port mapping // status has changed. If nil, no callback is issued. OnChange func() @@ -246,10 +266,9 @@ func NewClient(c Config) *Client { netMon: c.NetMon, ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon onChange: c.OnChange, - controlKnobs: c.ControlKnobs, } ret.pubClient = c.EventBus.Client("portmapper") - ret.updates = eventbus.Publish[Mapping](ret.pubClient) + ret.updates = eventbus.Publish[portmappertype.Mapping](ret.pubClient) if ret.logf == nil { ret.logf = logger.Discard } @@ -448,13 +467,6 @@ func IsNoMappingError(err error) bool { return ok } -var ( - ErrNoPortMappingServices = errors.New("no port mapping services were found") - ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") - ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") - ErrPortMappingDisabled = errors.New("port mapping is disabled") -) - // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. // If there's not one, it starts up a background goroutine to create one. // If the background goroutine ends up creating one, the onChange hook registered with the @@ -512,7 +524,7 @@ func (c *Client) createMapping() { // the control flow to eliminate that possibility. Meanwhile, this // mitigates a panic downstream, cf. #16662. } - c.updates.Publish(Mapping{ + c.updates.Publish(portmappertype.Mapping{ External: mapping.External(), Type: mapping.MappingType(), GoodUntil: mapping.GoodUntil(), @@ -524,15 +536,6 @@ func (c *Client) createMapping() { } } -// Mapping is an event recording the allocation of a port mapping. -type Mapping struct { - External netip.AddrPort - Type string - GoodUntil time.Time - - // TODO(creachadair): Record whether we reused an existing mapping? -} - // wildcardIP is used when the previous external IP is not known for PCP port mapping. var wildcardIP = netip.MustParseAddr("0.0.0.0") @@ -545,7 +548,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter if c.debug.disableAll() { return nil, netip.AddrPort{}, NoMappingError{ErrPortMappingDisabled} } - if c.debug.DisableUPnP && c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisableUPnP() && c.debug.DisablePCP() && c.debug.DisablePMP() { return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } gw, myIP, ok := c.gatewayAndSelfIP() @@ -624,7 +627,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter prevPort = m.External().Port() } - if c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisablePCP() && c.debug.DisablePMP() { c.mu.Unlock() if external, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { return nil, external, nil @@ -675,7 +678,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter pxpAddr := netip.AddrPortFrom(gw, c.pxpPort()) - preferPCP := !c.debug.DisablePCP && (c.debug.DisablePMP || (!haveRecentPMP && haveRecentPCP)) + preferPCP := !c.debug.DisablePCP() && (c.debug.DisablePMP() || (!haveRecentPMP && haveRecentPCP)) // Create a mapping, defaulting to PMP unless only PCP was seen recently. if preferPCP { @@ -860,19 +863,13 @@ func parsePMPResponse(pkt []byte) (res pmpResponse, ok bool) { return res, true } -type ProbeResult struct { - PCP bool - PMP bool - UPnP bool -} - // Probe returns a summary of which port mapping services are // available on the network. // // If a probe has run recently and there haven't been any network changes since, // the returned result might be server from the Client's cache, without // sending any network traffic. -func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { +func (c *Client) Probe(ctx context.Context) (res portmappertype.ProbeResult, err error) { if c.debug.disableAll() { return res, ErrPortMappingDisabled } @@ -907,19 +904,19 @@ func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { // https://github.com/tailscale/tailscale/issues/1001 if c.sawPMPRecently() { res.PMP = true - } else if !c.debug.DisablePMP { + } else if !c.debug.DisablePMP() { metricPMPSent.Add(1) uc.WriteToUDPAddrPort(pmpReqExternalAddrPacket, pxpAddr) } if c.sawPCPRecently() { res.PCP = true - } else if !c.debug.DisablePCP { + } else if !c.debug.DisablePCP() { metricPCPSent.Add(1) uc.WriteToUDPAddrPort(pcpAnnounceRequest(myIP), pxpAddr) } if c.sawUPnPRecently() { res.UPnP = true - } else if !c.debug.DisableUPnP { + } else if !c.debug.DisableUPnP() { // Strictly speaking, you discover UPnP services by sending an // SSDP query (which uPnPPacket is) to udp/1900 on the SSDP // multicast address, and then get a flood of responses back diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index e66d3c159eccb..a697a39089635 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "tailscale.com/control/controlknobs" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/util/eventbus/eventbustest" ) @@ -19,7 +19,7 @@ func TestCreateOrGetMapping(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.SetLocalPort(1234) for i := range 2 { @@ -35,7 +35,7 @@ func TestClientProbe(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() for i := range 3 { if i > 0 { @@ -50,7 +50,7 @@ func TestClientProbeThenMap(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.debug.VerboseLogs = true c.SetLocalPort(1234) @@ -150,7 +150,7 @@ func TestUpdateEvent(t *testing.T) { t.Fatalf("Probe failed: %v", err) } c.GetCachedMappingOrStartCreatingOne() - if err := eventbustest.Expect(tw, eventbustest.Type[Mapping]()); err != nil { + if err := eventbustest.Expect(tw, eventbustest.Type[portmappertype.Mapping]()); err != nil { t.Error(err.Error()) } } diff --git a/net/portmapper/portmappertype/portmappertype.go b/net/portmapper/portmappertype/portmappertype.go new file mode 100644 index 0000000000000..cc8358a4aed12 --- /dev/null +++ b/net/portmapper/portmappertype/portmappertype.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmappertype defines the net/portmapper interface, which may or may not be +// linked into the binary. +package portmappertype + +import ( + "context" + "errors" + "net/netip" + "time" + + "tailscale.com/feature" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +// HookNewPortMapper is a hook to install the portmapper creation function. +// It must be set by an init function when buildfeatures.HasPortmapper is true. +var HookNewPortMapper feature.Hook[func(logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil, + onlyTCP443OrNil func() bool) Client] + +var ( + ErrNoPortMappingServices = errors.New("no port mapping services were found") + ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") + ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") + ErrPortMappingDisabled = errors.New("port mapping is disabled") +) + +// ProbeResult is the result of a portmapper probe, saying +// which port mapping protocols were discovered. +type ProbeResult struct { + PCP bool + PMP bool + UPnP bool +} + +// Client is the interface implemented by a portmapper client. +type Client interface { + // Probe returns a summary of which port mapping services are available on + // the network. + // + // If a probe has run recently and there haven't been any network changes + // since, the returned result might be server from the Client's cache, + // without sending any network traffic. + Probe(context.Context) (ProbeResult, error) + + // HaveMapping reports whether we have a current valid mapping. + HaveMapping() bool + + // SetGatewayLookupFunc set the func that returns the machine's default + // gateway IP, and the primary IP address for that gateway. It must be + // called before the client is used. If not called, + // interfaces.LikelyHomeRouterIP is used. + SetGatewayLookupFunc(f func() (gw, myIP netip.Addr, ok bool)) + + // NoteNetworkDown should be called when the network has transitioned to a down state. + // It's too late to release port mappings at this point (the user might've just turned off + // their wifi), but we can make sure we invalidate mappings for later when the network + // comes back. + NoteNetworkDown() + + // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. + // If there's not one, it starts up a background goroutine to create one. + // If the background goroutine ends up creating one, the onChange hook registered with the + // NewClient constructor (if any) will fire. + GetCachedMappingOrStartCreatingOne() (external netip.AddrPort, ok bool) + + // SetLocalPort updates the local port number to which we want to port + // map UDP traffic + SetLocalPort(localPort uint16) + + Close() error +} + +// Mapping is an event recording the allocation of a port mapping. +type Mapping struct { + External netip.AddrPort + Type string + GoodUntil time.Time + + // TODO(creachadair): Record whether we reused an existing mapping? +} diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 13418313597f0..d65d6e94d70fd 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -209,7 +209,7 @@ func addAnyPortMapping( // The meta is the most recently parsed UDP discovery packet response // from the Internet Gateway Device. func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, gw netip.Addr, meta uPnPDiscoResponse) (rootDev *goupnp.RootDevice, loc *url.URL, err error) { - if debug.DisableUPnP { + if debug.DisableUPnP() { return nil, nil, nil } @@ -434,7 +434,7 @@ func (c *Client) getUPnPPortMapping( internal netip.AddrPort, prevPort uint16, ) (external netip.AddrPort, ok bool) { - if disableUPnpEnv() || c.debug.DisableUPnP || (c.controlKnobs != nil && c.controlKnobs.DisableUPnP.Load()) { + if disableUPnpEnv() || c.debug.DisableUPnP() { return netip.AddrPort{}, false } diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index c07ec020813ed..a954b2beac094 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -18,6 +18,7 @@ import ( "sync/atomic" "testing" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/tstest" ) @@ -1039,7 +1040,7 @@ func (u *upnpServer) handleControl(w http.ResponseWriter, r *http.Request, handl } } -func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) ProbeResult { +func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) portmappertype.ProbeResult { tb.Helper() res, err := c.Probe(ctx) if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9b93ce8dbf2ec..b3e2b7f0e1f72 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -235,7 +235,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ - tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -291,7 +293,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d9b9b64c1e2b7..6b083132f86dd 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,6 +29,7 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 719cc68a4bed1..6eb5660762d0a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -33,6 +33,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/disco" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -44,7 +45,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/packet" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockopts" "tailscale.com/net/sockstats" "tailscale.com/net/stun" @@ -177,7 +178,7 @@ type Conn struct { // These [eventbus.Subscriber] fields are solely accessed by // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmapper.Mapping] + pmSub *eventbus.Subscriber[portmappertype.Mapping] filterSub *eventbus.Subscriber[FilterUpdate] nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] @@ -207,7 +208,8 @@ type Conn struct { // portMapper is the NAT-PMP/PCP/UPnP prober/client, for requesting // port mappings from NAT devices. - portMapper *portmapper.Client + // If nil, the portmapper is disabled. + portMapper portmappertype.Client // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. @@ -731,7 +733,7 @@ func NewConn(opts Options) (*Conn, error) { // Subscribe calls must return before NewConn otherwise published // events can be missed. - c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) + c.pmSub = eventbus.Subscribe[portmappertype.Mapping](c.eventClient) c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) @@ -747,19 +749,21 @@ func NewConn(opts Options) (*Conn, error) { // Don't log the same log messages possibly every few seconds in our // portmapper. - portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") - portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) - portMapOpts := &portmapper.DebugKnobs{ - DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, - } - c.portMapper = portmapper.NewClient(portmapper.Config{ - EventBus: c.eventBus, - Logf: portmapperLogf, - NetMon: opts.NetMon, - DebugKnobs: portMapOpts, - ControlKnobs: opts.ControlKnobs, - }) - c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) + if buildfeatures.HasPortMapper && !opts.DisablePortMapper { + portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") + portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) + var disableUPnP func() bool + if c.controlKnobs != nil { + disableUPnP = c.controlKnobs.DisableUPnP.Load + } + newPortMapper, ok := portmappertype.HookNewPortMapper.GetOk() + if ok { + c.portMapper = newPortMapper(portmapperLogf, opts.EventBus, opts.NetMon, disableUPnP, c.onlyTCP443.Load) + } else if !testenv.InTest() { + panic("unexpected: HookNewPortMapper not set") + } + } + c.netMon = opts.NetMon c.health = opts.HealthTracker c.onPortUpdate = opts.OnPortUpdate @@ -1081,7 +1085,9 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) { UPnP: report.UPnP, PMP: report.PMP, PCP: report.PCP, - HavePortMap: c.portMapper.HaveMapping(), + } + if c.portMapper != nil { + ni.HavePortMap = c.portMapper.HaveMapping() } for rid, d := range report.RegionV4Latency { ni.DERPLatency[fmt.Sprintf("%d-v4", rid)] = d.Seconds() @@ -1248,7 +1254,7 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, error) { var havePortmap bool var portmapExt netip.AddrPort - if runtime.GOOS != "js" { + if runtime.GOOS != "js" && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } @@ -1288,7 +1294,7 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro } // If we didn't have a portmap earlier, maybe it's done by now. - if !havePortmap { + if !havePortmap && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } if havePortmap { @@ -2662,7 +2668,9 @@ func (c *Conn) SetNetworkUp(up bool) { if up { c.startDerpHomeConnectLocked() } else { - c.portMapper.NoteNetworkDown() + if c.portMapper != nil { + c.portMapper.NoteNetworkDown() + } c.closeAllDerpLocked("network-down") } } @@ -3324,7 +3332,9 @@ func (c *Conn) Close() error { c.derpCleanupTimer.Stop() } c.stopPeriodicReSTUNTimerLocked() - c.portMapper.Close() + if c.portMapper != nil { + c.portMapper.Close() + } c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() @@ -3577,7 +3587,9 @@ func (c *Conn) rebind(curPortFate currentPortFate) error { if err := c.bindSocket(&c.pconn4, "udp4", curPortFate); err != nil { return fmt.Errorf("magicsock: Rebind IPv4 failed: %w", err) } - c.portMapper.SetLocalPort(c.LocalPort()) + if c.portMapper != nil { + c.portMapper.SetLocalPort(c.LocalPort()) + } c.UpdatePMTUD() return nil } From e180fc267b2fab61641bce08d075ad3e52b97a97 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 10:07:50 -0700 Subject: [PATCH 0350/1093] feature/featuretags, all: add ts_omit_acme to disable TLS cert support I'd started to do this in the earlier ts_omit_server PR but decided to split it into this separate PR. Updates #17128 Change-Id: Ief8823a78d1f7bbb79e64a5cab30a7d0a5d6ff4b Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- client/local/cert.go | 151 ++++++++++++++++++ client/local/local.go | 135 ---------------- client/tailscale/cert.go | 34 ++++ client/tailscale/localclient_aliases.go | 22 --- cmd/tailscale/cli/cert.go | 32 ++-- cmd/tailscale/cli/cli.go | 3 +- cmd/tailscale/cli/configure-synology-cert.go | 6 + .../cli/configure-synology-cert_test.go | 2 + cmd/tailscale/cli/configure.go | 8 +- cmd/tailscaled/deps_test.go | 13 ++ .../buildfeatures/feature_acme_disabled.go | 13 ++ feature/buildfeatures/feature_acme_enabled.go | 13 ++ feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/c2n.go | 56 ------- ipn/ipnlocal/cert.go | 59 ++++++- ipn/ipnlocal/{cert_js.go => cert_disabled.go} | 21 ++- ipn/localapi/cert.go | 6 +- ipn/localapi/localapi.go | 1 - 19 files changed, 342 insertions(+), 236 deletions(-) create mode 100644 client/local/cert.go create mode 100644 client/tailscale/cert.go create mode 100644 feature/buildfeatures/feature_acme_disabled.go create mode 100644 feature/buildfeatures/feature_acme_enabled.go rename ipn/ipnlocal/{cert_js.go => cert_disabled.go} (51%) diff --git a/build_dist.sh b/build_dist.sh index 45d471be0a6e0..be0d4d47e0564 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,7 +41,7 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" - tags="${tags:+$tags,},$($go run ./cmd/featuretags --min)" + tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min)" ;; --box) if [ ! -z "${TAGS:-}" ]; then diff --git a/client/local/cert.go b/client/local/cert.go new file mode 100644 index 0000000000000..bfaac7303297b --- /dev/null +++ b/client/local/cert.go @@ -0,0 +1,151 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package local + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "go4.org/mem" +) + +// SetDNS adds a DNS TXT record for the given domain name, containing +// the provided TXT value. The intended use case is answering +// LetsEncrypt/ACME dns-01 challenges. +// +// The control plane will only permit SetDNS requests with very +// specific names and values. The name should be +// "_acme-challenge." + your node's MagicDNS name. It's expected that +// clients cache the certs from LetsEncrypt (or whichever CA is +// providing them) and only request new ones as needed; the control plane +// rate limits SetDNS requests. +// +// This is a low-level interface; it's expected that most Tailscale +// users use a higher level interface to getting/using TLS +// certificates. +func (lc *Client) SetDNS(ctx context.Context, name, value string) error { + v := url.Values{} + v.Set("name", name) + v.Set("value", value) + _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) + return err +} + +// CertPair returns a cert and private key for the provided DNS domain. +// +// It returns a cached certificate from disk if it's still valid. +// +// Deprecated: use [Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return defaultClient.CertPair(ctx, domain) +} + +// CertPair returns a cert and private key for the provided DNS domain. +// +// It returns a cached certificate from disk if it's still valid. +// +// API maturity: this is considered a stable API. +func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return lc.CertPairWithValidity(ctx, domain, 0) +} + +// CertPairWithValidity returns a cert and private key for the provided DNS +// domain. +// +// It returns a cached certificate from disk if it's still valid. +// When minValidity is non-zero, the returned certificate will be valid for at +// least the given duration, if permitted by the CA. If the certificate is +// valid, but for less than minValidity, it will be synchronously renewed. +// +// API maturity: this is considered a stable API. +func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { + res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) + if err != nil { + return nil, nil, err + } + // with ?type=pair, the response PEM is first the one private + // key PEM block, then the cert PEM blocks. + i := mem.Index(mem.B(res), mem.S("--\n--")) + if i == -1 { + return nil, nil, fmt.Errorf("unexpected output: no delimiter") + } + i += len("--\n") + keyPEM, certPEM = res[:i], res[i:] + if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { + return nil, nil, fmt.Errorf("unexpected output: key in cert") + } + return certPEM, keyPEM, nil +} + +// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. +// +// It returns a cached certificate from disk if it's still valid. +// +// It's the right signature to use as the value of +// [tls.Config.GetCertificate]. +// +// Deprecated: use [Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return defaultClient.GetCertificate(hi) +} + +// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. +// +// It returns a cached certificate from disk if it's still valid. +// +// It's the right signature to use as the value of +// [tls.Config.GetCertificate]. +// +// API maturity: this is considered a stable API. +func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + if hi == nil || hi.ServerName == "" { + return nil, errors.New("no SNI ServerName") + } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + name := hi.ServerName + if !strings.Contains(name, ".") { + if v, ok := lc.ExpandSNIName(ctx, name); ok { + name = v + } + } + certPEM, keyPEM, err := lc.CertPair(ctx, name) + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + return nil, err + } + return &cert, nil +} + +// ExpandSNIName expands bare label name into the most likely actual TLS cert name. +// +// Deprecated: use [Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return defaultClient.ExpandSNIName(ctx, name) +} + +// ExpandSNIName expands bare label name into the most likely actual TLS cert name. +func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + st, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return "", false + } + for _, d := range st.CertDomains { + if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { + return d, true + } + } + return "", false +} diff --git a/client/local/local.go b/client/local/local.go index a132e577b9769..a606fbdf38341 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -9,7 +9,6 @@ import ( "bytes" "cmp" "context" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -28,7 +27,6 @@ import ( "sync" "time" - "go4.org/mem" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" @@ -907,28 +905,6 @@ func (lc *Client) Logout(ctx context.Context) error { return err } -// SetDNS adds a DNS TXT record for the given domain name, containing -// the provided TXT value. The intended use case is answering -// LetsEncrypt/ACME dns-01 challenges. -// -// The control plane will only permit SetDNS requests with very -// specific names and values. The name should be -// "_acme-challenge." + your node's MagicDNS name. It's expected that -// clients cache the certs from LetsEncrypt (or whichever CA is -// providing them) and only request new ones as needed; the control plane -// rate limits SetDNS requests. -// -// This is a low-level interface; it's expected that most Tailscale -// users use a higher level interface to getting/using TLS -// certificates. -func (lc *Client) SetDNS(ctx context.Context, name, value string) error { - v := url.Values{} - v.Set("name", name) - v.Set("value", value) - _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) - return err -} - // DialTCP connects to the host's port via Tailscale. // // The host may be a base DNS name (resolved from the netmap inside @@ -1009,117 +985,6 @@ func (lc *Client) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) return &derpMap, nil } -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// Deprecated: use [Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return defaultClient.CertPair(ctx, domain) -} - -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return lc.CertPairWithValidity(ctx, domain, 0) -} - -// CertPairWithValidity returns a cert and private key for the provided DNS -// domain. -// -// It returns a cached certificate from disk if it's still valid. -// When minValidity is non-zero, the returned certificate will be valid for at -// least the given duration, if permitted by the CA. If the certificate is -// valid, but for less than minValidity, it will be synchronously renewed. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { - res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) - if err != nil { - return nil, nil, err - } - // with ?type=pair, the response PEM is first the one private - // key PEM block, then the cert PEM blocks. - i := mem.Index(mem.B(res), mem.S("--\n--")) - if i == -1 { - return nil, nil, fmt.Errorf("unexpected output: no delimiter") - } - i += len("--\n") - keyPEM, certPEM = res[:i], res[i:] - if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { - return nil, nil, fmt.Errorf("unexpected output: key in cert") - } - return certPEM, keyPEM, nil -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// Deprecated: use [Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return defaultClient.GetCertificate(hi) -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// API maturity: this is considered a stable API. -func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if hi == nil || hi.ServerName == "" { - return nil, errors.New("no SNI ServerName") - } - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - name := hi.ServerName - if !strings.Contains(name, ".") { - if v, ok := lc.ExpandSNIName(ctx, name); ok { - name = v - } - } - certPEM, keyPEM, err := lc.CertPair(ctx, name) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return nil, err - } - return &cert, nil -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -// -// Deprecated: use [Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return defaultClient.ExpandSNIName(ctx, name) -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - st, err := lc.StatusWithoutPeers(ctx) - if err != nil { - return "", false - } - for _, d := range st.CertDomains { - if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { - return d, true - } - } - return "", false -} - // PingOpts contains options for the ping request. // // The zero value is valid, which means to use defaults. diff --git a/client/tailscale/cert.go b/client/tailscale/cert.go new file mode 100644 index 0000000000000..4f351ab990984 --- /dev/null +++ b/client/tailscale/cert.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package tailscale + +import ( + "context" + "crypto/tls" + + "tailscale.com/client/local" +) + +// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return local.GetCertificate(hi) +} + +// CertPair is an alias for [tailscale.com/client/local.CertPair]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return local.CertPair(ctx, domain) +} + +// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return local.ExpandSNIName(ctx, name) +} diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 58be312b47cc9..e3492e841b1c9 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -5,7 +5,6 @@ package tailscale import ( "context" - "crypto/tls" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" @@ -37,13 +36,6 @@ type BugReportOpts = local.BugReportOpts // Deprecated: import [tailscale.com/client/local] instead. type PingOpts = local.PingOpts -// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return local.GetCertificate(hi) -} - // SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler]. // // Deprecated: import [tailscale.com/client/local] instead. @@ -85,17 +77,3 @@ func Status(ctx context.Context) (*ipnstate.Status, error) { func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { return local.StatusWithoutPeers(ctx) } - -// CertPair is an alias for [tailscale.com/client/local.CertPair]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return local.CertPair(ctx, domain) -} - -// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return local.ExpandSNIName(ctx, name) -} diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index 9c8eca5b7d7d0..171eebe1eafc9 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !js && !ts_omit_acme + package cli import ( @@ -25,19 +27,23 @@ import ( "tailscale.com/version" ) -var certCmd = &ffcli.Command{ - Name: "cert", - Exec: runCert, - ShortHelp: "Get TLS certs", - ShortUsage: "tailscale cert [flags] ", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("cert") - fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") - fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") - fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") - fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") - return fs - })(), +func init() { + maybeCertCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "cert", + Exec: runCert, + ShortHelp: "Get TLS certs", + ShortUsage: "tailscale cert [flags] ", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("cert") + fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") + fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") + fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") + fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") + return fs + })(), + } + } } var certArgs struct { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index d039be607b6a2..dfc8f3249577c 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -215,6 +215,7 @@ var ( maybeNetlockCmd, maybeFunnelCmd, maybeServeCmd, + maybeCertCmd, _ func() *ffcli.Command ) @@ -262,7 +263,7 @@ change in the future. nilOrCall(maybeWebCmd), nilOrCall(fileCmd), bugReportCmd, - certCmd, + nilOrCall(maybeCertCmd), nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 663d0c8790456..6ceef33ca2ae9 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme + package cli import ( @@ -22,6 +24,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + maybeConfigSynologyCertCmd = synologyConfigureCertCmd +} + func synologyConfigureCertCmd() *ffcli.Command { if runtime.GOOS != "linux" || distro.Get() != distro.Synology { return nil diff --git a/cmd/tailscale/cli/configure-synology-cert_test.go b/cmd/tailscale/cli/configure-synology-cert_test.go index 801285e550d9b..c7da5622fb629 100644 --- a/cmd/tailscale/cli/configure-synology-cert_test.go +++ b/cmd/tailscale/cli/configure-synology-cert_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme + package cli import ( diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index 0354a19446a8f..20236eb28b5f5 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -10,7 +10,11 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" ) -var maybeJetKVMConfigureCmd func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +var ( + maybeJetKVMConfigureCmd, + maybeConfigSynologyCertCmd, + _ func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +) func configureCmd() *ffcli.Command { return &ffcli.Command{ @@ -28,7 +32,7 @@ services on the host to use Tailscale in more ways. Subcommands: nonNilCmds( configureKubeconfigCmd(), synologyConfigureCmd(), - synologyConfigureCertCmd(), + ccall(maybeConfigSynologyCertCmd), ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), ccall(maybeJetKVMConfigureCmd), diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 1609ba63350be..0d56b55d2f958 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -108,3 +108,16 @@ func TestOmitPortmapper(t *testing.T) { }, }.Check(t) } + +func TestOmitACME(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_acme,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "/acme") { + t.Errorf("unexpected dep with ts_omit_acme: %q", dep) + } + }, + }.Check(t) +} diff --git a/feature/buildfeatures/feature_acme_disabled.go b/feature/buildfeatures/feature_acme_disabled.go new file mode 100644 index 0000000000000..0a7f25a821cc5 --- /dev/null +++ b/feature/buildfeatures/feature_acme_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = false diff --git a/feature/buildfeatures/feature_acme_enabled.go b/feature/buildfeatures/feature_acme_enabled.go new file mode 100644 index 0000000000000..f074bfb4e1a7e --- /dev/null +++ b/feature/buildfeatures/feature_acme_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 2c5f32310dcb8..fc26dd3704605 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -42,6 +42,7 @@ type FeatureMeta struct { // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ + "acme": {"ACME", "ACME TLS certificate management"}, "aws": {"AWS", "AWS integration"}, "bird": {"Bird", "Bird BGP integration"}, "capture": {"Capture", "Packet capture"}, diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index b5f50f3bccc70..0487774dba7e6 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -4,9 +4,7 @@ package ipnlocal import ( - "crypto/x509" "encoding/json" - "encoding/pem" "errors" "fmt" "io" @@ -54,9 +52,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ req("POST /logtail/flush"): handleC2NLogtailFlush, req("POST /sockstats"): handleC2NSockStats, - // Check TLS certificate status. - req("GET /tls-cert-status"): handleC2NTLSCertStatus, - // SSH req("/ssh/usernames"): handleC2NSSHUsernames, @@ -497,54 +492,3 @@ func regularFileExists(path string) bool { fi, err := os.Stat(path) return err == nil && fi.Mode().IsRegular() } - -// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the -// provided domain. This can be called by the controlplane to clean up DNS TXT -// records when they're no longer needed by LetsEncrypt. -// -// It does not kick off a cert fetch or async refresh. It only reports anything -// that's already sitting on disk, and only reports metadata about the public -// cert (stuff that'd be the in CT logs anyway). -func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - cs, err := b.getCertStore() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - domain := r.FormValue("domain") - if domain == "" { - http.Error(w, "no 'domain'", http.StatusBadRequest) - return - } - - ret := &tailcfg.C2NTLSCertInfo{} - pair, err := getCertPEMCached(cs, domain, b.clock.Now()) - ret.Valid = err == nil - if err != nil { - ret.Error = err.Error() - if errors.Is(err, errCertExpired) { - ret.Expired = true - } else if errors.Is(err, ipn.ErrStateNotExist) { - ret.Missing = true - ret.Error = "no certificate" - } - } else { - block, _ := pem.Decode(pair.CertPEM) - if block == nil { - ret.Error = "invalid PEM" - ret.Valid = false - } else { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - ret.Error = fmt.Sprintf("invalid certificate: %v", err) - ret.Valid = false - } else { - ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) - ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) - } - } - } - - writeJSON(w, ret) -} diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 86052eb8d5861..bf85affa637ef 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_acme package ipnlocal @@ -24,6 +24,7 @@ import ( "log" randv2 "math/rand/v2" "net" + "net/http" "os" "path/filepath" "runtime" @@ -40,6 +41,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/tailcfg" "tailscale.com/tempfork/acme" "tailscale.com/types/logger" "tailscale.com/util/testenv" @@ -47,6 +49,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatus) +} + // Process-wide cache. (A new *Handler is created per connection, // effectively per request) var ( @@ -836,3 +842,54 @@ func checkCertDomain(st *ipnstate.Status, domain string) error { } return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains) } + +// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the +// provided domain. This can be called by the controlplane to clean up DNS TXT +// records when they're no longer needed by LetsEncrypt. +// +// It does not kick off a cert fetch or async refresh. It only reports anything +// that's already sitting on disk, and only reports metadata about the public +// cert (stuff that'd be the in CT logs anyway). +func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + cs, err := b.getCertStore() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + domain := r.FormValue("domain") + if domain == "" { + http.Error(w, "no 'domain'", http.StatusBadRequest) + return + } + + ret := &tailcfg.C2NTLSCertInfo{} + pair, err := getCertPEMCached(cs, domain, b.clock.Now()) + ret.Valid = err == nil + if err != nil { + ret.Error = err.Error() + if errors.Is(err, errCertExpired) { + ret.Expired = true + } else if errors.Is(err, ipn.ErrStateNotExist) { + ret.Missing = true + ret.Error = "no certificate" + } + } else { + block, _ := pem.Decode(pair.CertPEM) + if block == nil { + ret.Error = "invalid PEM" + ret.Valid = false + } else { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + ret.Error = fmt.Sprintf("invalid certificate: %v", err) + ret.Valid = false + } else { + ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) + ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) + } + } + } + + writeJSON(w, ret) +} diff --git a/ipn/ipnlocal/cert_js.go b/ipn/ipnlocal/cert_disabled.go similarity index 51% rename from ipn/ipnlocal/cert_js.go rename to ipn/ipnlocal/cert_disabled.go index 6acc57a60a0ac..17d446c11af39 100644 --- a/ipn/ipnlocal/cert_js.go +++ b/ipn/ipnlocal/cert_disabled.go @@ -1,20 +1,30 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build js || ts_omit_acme + package ipnlocal import ( "context" "errors" + "io" + "net/http" "time" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatusDisabled) +} + +var errNoCerts = errors.New("cert support not compiled in this build") + type TLSCertKeyPair struct { CertPEM, KeyPEM []byte } func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts } var errCertExpired = errors.New("cert expired") @@ -22,9 +32,14 @@ var errCertExpired = errors.New("cert expired") type certStore interface{} func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts } func (b *LocalBackend) getCertStore() (certStore, error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts +} + +func handleC2NTLSCertStatusDisabled(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"Missing":true}`) // a minimal tailcfg.C2NTLSCertInfo } diff --git a/ipn/localapi/cert.go b/ipn/localapi/cert.go index 323406f7ba650..2313631cc3229 100644 --- a/ipn/localapi/cert.go +++ b/ipn/localapi/cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_acme package localapi @@ -14,6 +14,10 @@ import ( "tailscale.com/ipn/ipnlocal" ) +func init() { + Register("cert/", (*Handler).serveCert) +} + func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite && !h.PermitCert { http.Error(w, "cert access denied", http.StatusForbidden) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 0c3a0a4edd0b9..01966f84b3826 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -67,7 +67,6 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) // then it's a prefix match. var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: - "cert/": (*Handler).serveCert, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME From 6db30a10f7a160efeaeeb955e92569c767ca8b2d Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 16 Sep 2025 15:49:03 -0700 Subject: [PATCH 0351/1093] cmd/tailscale: shrink QR codes using half blocks (#17084) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running `tailscale up --qr`, the QR code is rendered using two full blocks ██ to form a square pixel. This is a problem for people with smaller terminals, because the output is 37 lines high. All modern terminals support half block characters, like ▀ and ▄, which only takes 19 lines and can easily fit in a regular terminal window. For example, https://login.tailscale.com/a/0123456789 is now rendered: ``` user@host:~$ tailscale up --qr █████████████████████████████████████ █████████████████████████████████████ ████ ▄▄▄▄▄ █ ▀▀ █▄▀▀ ▄ █ ▄▄▄▄▄ ████ ████ █ █ █▀ ▄▄▄█▀█▄▀ ▄█ █ █ ████ ████ █▄▄▄█ ██▄ ▄▀▀▄▄ ▀▀ ▀█ █▄▄▄█ ████ ████▄▄▄▄▄▄▄█ ▀▄▀ █▄▀▄▀▄█ █▄▄▄▄▄▄▄████ ████▄█▄ ▀▄▄▄█▀▄█▀ ▀▄ ▄ ▀▀ ▀▀▄█▄ ████ ████▄▀▄▀▄█▄ █ ▄▄▄▄█▀██▀██▄▄█▀█▄▄▀████ ████▄█▀ ▀ ▄█▄▄▀▄▀█ ▄ ▄█▀█▄▀██▄ ▀▀████ █████▀ ▀ ▄▀▀▀▀▄▀▄▀▀ ▄▄ ▄ ▀ █▄ ▄████ ██████ ▄▄█▄▄▄▄▄▀ █ ▄▀▀▄█▀ █ ▄ ▀ █████ ████▄█▄▄ ▄▀ ▀██▀ ▄█▀▀████▄▀█ ██████ █████▄▄▄█▄▄▄▀▀ █▄▄▄▄▄ ▀█ ▄▄▄ ▀▀████ ████ ▄▄▄▄▄ █ ██▄ ▀ █▀█ ▄ █▄█ █▄█████ ████ █ █ █▀ █ ▀█▄▄ █▀ ▄ ▀▄▀▄████ ████ █▄▄▄█ █▄█▀█▄▀██▀██▄ ▀█▄▀▀▄▀▄████ ████▄▄▄▄▄▄▄█▄▄███▄▄▄███▄▄▄██▄██▄█████ █████████████████████████████████████ ▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀ ``` To render a QR code with full blocks, like we did in the past, use the new `--qr-format` flag: ``` user@host:~$ tailscale up --qr --qr-format=large ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ████████ ██ ████ ██ ████ ██ ████████ ████████ ██████████ ██ ████ ██ ██ ██████████ ████████ ████████ ██ ██ ████ ██████ ██ ██ ██ ██ ████████ ████████ ██ ██ ██ ████████ ████ ████ ██ ██ ████████ ████████ ██ ██ ████ ████ ████ ████ ██ ██ ████████ ████████ ██████████ ██████ ██ ████ ██ ██████████ ████████ ████████ ██ ██ ██ ██ ██ ██ ██ ██ ████████ ████████████████████████ ██ ████ ██ ████ ████████████████████████ ████████ ██ ██ ████ ████ ██ ████ ████ ██ ████████ ██████████████ ████████ ████ ██ ██ ██████ ████████ ████████ ██ ██ ██ ██ ██████████████ ██████ ██████████ ██████████ ██ ██████ ██ ██████████ ████ ██████████ ██████ ████████ ████████ ████ ██ ██ ██ ████ ██████ ██████ ████████████ ████████████ ████████ ██ ██ ██ ████ ████ ██████ ████████ ████████████ ██ ████████ ██ ████ ██ ██ ████████ ██████████ ██ ██ ██ ████ ██ ████ ██████████ ████████████ ██ ██ ██ ████ ████ ██ ██ ██████████ ████████████ ████████████████ ██ ██ ████ ██ ██ ██████████ ████████ ██ ██ ████████ ██████████████ ████ ████████████ ████████████████ ██ ████ ████ ██████████ ██ ████████████ ██████████ ██ ████ ██ ████ ████████████ ████████████████████████ ████████████ ██ ██████ ████████ ████████ ██ ████ ██ ██████ ██ ██ ██ ██████████ ████████ ██████████ ██ ██████ ██ ██ ██ ██████ ██████████████ ████████ ██ ██ ████ ██ ████ ████ ██ ██ ████████ ████████ ██ ██ ██ ██ ██████ ██ ██ ██ ██████████ ████████ ██ ██ ██ ██████ ████████████ ████ ████ ██ ████████ ████████ ██████████ ██████ ████ ████ ██████ ████ ██ ██████████ ████████ ██ ██████ ██████ ████ ████ ██████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ██████████████████████████████████████████████████████████████████████████ ``` Fixes #17083 Signed-off-by: Simon Law --- cmd/tailscale/cli/up.go | 13 +++++++++++-- cmd/tailscale/cli/up_test.go | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 097af725b9d78..c78a6356965b4 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -95,6 +95,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // When adding new flags, prefer to put them under "tailscale set" instead // of here. Setting preferences via "tailscale up" is deprecated. upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") + upf.StringVar(&upArgs.qrFormat, "qr-format", "small", "QR code formatting (small or large)") upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server") @@ -164,6 +165,7 @@ func defaultNetfilterMode() string { // added to it. Add new arguments to setArgsT instead. type upArgsT struct { qr bool + qrFormat string reset bool server string acceptRoutes bool @@ -658,7 +660,14 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { log.Printf("QR code error: %v", err) } else { - fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + switch upArgs.qrFormat { + case "large": + fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + case "small": + fmt.Fprintf(Stderr, "%s\n", q.ToSmallString(false)) + default: + log.Printf("unknown QR code format: %q", upArgs.qrFormat) + } } } } @@ -805,7 +814,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "json", "timeout", "accept-risk", "host-routes": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes": return true } return false diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index eb06f84dce2ea..efddb53249b55 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -35,6 +35,7 @@ var validUpFlags = set.Of( "operator", "report-posture", "qr", + "qr-format", "reset", "shields-up", "snat-subnet-routes", From 697098ed6ccc0f2fd8727fa36a86d952495acf50 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 18:11:28 -0700 Subject: [PATCH 0352/1093] ipn/ipnlocal: fix a case where ts_omit_ssh was still linking in x/crypto/ssh And add a test. Updates #12614 Change-Id: Icb1c77f5890def794a4938583725c1a0886b197d Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 3 ++- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/ssh.go | 2 +- ipn/ipnlocal/ssh_stub.go | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 0d56b55d2f958..9e6624d9a9e81 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -15,8 +15,9 @@ func TestOmitSSH(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", - Tags: "ts_omit_ssh", + Tags: "ts_omit_ssh,ts_include_cli", BadDeps: map[string]string{ + "golang.org/x/crypto/ssh": msg, "tailscale.com/ssh/tailssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 988c0b5383fb5..1340942019477 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5610,7 +5610,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) var sshHostKeys []string - if prefs.RunSSH() && envknob.CanSSHD() { + if buildfeatures.HasSSH && prefs.RunSSH() && envknob.CanSSHD() { // TODO(bradfitz): this is called with b.mu held. Not ideal. // If the filesystem gets wedged or something we could block for // a long time. But probably fine. diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index e48b1f2f1286e..e2c2f50671386 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 +//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh package ipnlocal diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go index d129084e4c10c..6b2e36015c2d7 100644 --- a/ipn/ipnlocal/ssh_stub.go +++ b/ipn/ipnlocal/ssh_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) +//go:build ts_omit_ssh || ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) package ipnlocal From 312582bdbfca47948453b446a055c87a40b416d4 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 17 Sep 2025 11:11:35 +0100 Subject: [PATCH 0353/1093] ssh/tailssh: mark TestSSHRecordingCancelsSessionsOnUploadFailure as flaky Updates https://github.com/tailscale/tailscale/issues/7707 Signed-off-by: Alex Chan --- ssh/tailssh/tailssh_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 44b2d68dfdfb3..3b6d3c52c391c 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -36,6 +36,7 @@ import ( gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -489,6 +490,8 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { } func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7707") + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } From ddc0cd7e1eb289ab1b9d491762b6b5249a960b77 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 10 Sep 2025 17:31:25 +0100 Subject: [PATCH 0354/1093] ipn/ipnlocal: disconnect and block when key expires even when using seamless Updates tailscale/corp#31478 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 6 +- ipn/ipnlocal/state_test.go | 142 ++++++++++++++++++++++++++++++++++++- 2 files changed, 144 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 1340942019477..a712dc98aff84 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5735,9 +5735,9 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock switch newState { case ipn.NeedsLogin: systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } + // always block updates on NeedsLogin even if seamless renewal is enabled, + // to prevent calls to authReconfig from reconfiguring the engine when our + // key has expired and we're waiting to authenticate to use the new key. b.blockEngineUpdates(true) fallthrough case ipn.Stopped, ipn.NoState: diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ff21c920c2b7a..609a51c5bd657 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -204,6 +204,16 @@ func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { cc.send(nil, "", true, nm) } +func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { + s := controlclient.Status{ + URL: "https://example.com/a/foo", + NetMap: nm, + Persist: cc.persist.View(), + } + s.SetStateForTest(controlclient.StateURLVisitRequired) + cc.opts.Observer.SetControlClientStatus(cc, s) +} + // called records that a particular function name was called. func (cc *mockControl) called(s string) { cc.mu.Lock() @@ -1362,11 +1372,141 @@ func TestEngineReconfigOnStateChange(t *testing.T) { steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) - cc().authenticated(node3) + cc().authenticated(node1) + cc().send(nil, "", false, &netmap.NetworkMap{ + Expiry: time.Now().Add(-time.Minute), + }) + }, + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // Without seamless renewal, even starting a reauth tears down everything: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // With seamless renewal, starting a reauth should leave everything up: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) cc().send(nil, "", false, &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), }) }, + // Even with seamless, if the key we are using expires, we want to disconnect: wantState: ipn.NeedsLogin, wantCfg: &wgcfg.Config{}, wantRouterCfg: &router.Config{}, From db0b9a361c6ae3398b05733086e17b59b27110b6 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 17 Sep 2025 10:46:13 +0100 Subject: [PATCH 0355/1093] net/dns: don't timeout if inotify sends multiple events This fixes a flaky test which has been occasionally timing out in CI. In particular, this test times out if `watchFile` receives multiple notifications from inotify before we cancel the test context. We block processing the second notification, because we've stopped listening to the `callbackDone` channel. This patch changes the test so we only send on the first notification. Testing this locally with `stress` confirms that the test is no longer flaky. Fixes #17172 Updates #14699 Signed-off-by: Alex Chan --- net/dns/direct_linux_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/dns/direct_linux_test.go b/net/dns/direct_linux_test.go index 079d060ed3323..e8f917b907a80 100644 --- a/net/dns/direct_linux_test.go +++ b/net/dns/direct_linux_test.go @@ -25,8 +25,13 @@ func TestWatchFile(t *testing.T) { var callbackCalled atomic.Bool callbackDone := make(chan bool) callback := func() { - callbackDone <- true - callbackCalled.Store(true) + // We only send to the channel once to avoid blocking if the + // callback is called multiple times -- this happens occasionally + // if inotify sends multiple events before we cancel the context. + if !callbackCalled.Load() { + callbackDone <- true + callbackCalled.Store(true) + } } var eg errgroup.Group From 48029a897df2643bc47446076b9516e1cf55a03c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 17 Sep 2025 07:20:34 -0700 Subject: [PATCH 0356/1093] util/eventbus: allow test expectations reporting only an error (#17146) Extend the Expect method of a Watcher to allow filter functions that report only an error value, and which "pass" when the reported error is nil. Updates #15160 Change-Id: I582d804554bd1066a9e499c1f3992d068c9e8148 Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest.go | 24 ++++++++++++++----- .../eventbustest/eventbustest_test.go | 21 ++++++++++++++++ 2 files changed, 39 insertions(+), 6 deletions(-) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index af725ace1f9ee..d5cfe53950a8c 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -79,6 +79,11 @@ func Type[T any]() func(T) { return func(T) {} } // // The if error != nil, the test helper will return that error immediately. // func(e ExpectedType) (bool, error) // +// // Tests for event type and whatever is defined in the body. +// // If a non-nil error is reported, the test helper will return that error +// // immediately; otherwise the expectation is considered to be met. +// func(e ExpectedType) error +// // If the list of events must match exactly with no extra events, // use [ExpectExactly]. func Expect(tw *Watcher, filters ...any) error { @@ -179,15 +184,22 @@ func eventFilter(f any) filter { return []reflect.Value{reflect.ValueOf(true), reflect.Zero(reflect.TypeFor[error]())} } case 1: - if ft.Out(0) != reflect.TypeFor[bool]() { - panic(fmt.Sprintf("result is %T, want bool", ft.Out(0))) - } - fixup = func(vals []reflect.Value) []reflect.Value { - return append(vals, reflect.Zero(reflect.TypeFor[error]())) + switch ft.Out(0) { + case reflect.TypeFor[bool](): + fixup = func(vals []reflect.Value) []reflect.Value { + return append(vals, reflect.Zero(reflect.TypeFor[error]())) + } + case reflect.TypeFor[error](): + fixup = func(vals []reflect.Value) []reflect.Value { + pass := vals[0].IsZero() + return append([]reflect.Value{reflect.ValueOf(pass)}, vals...) + } + default: + panic(fmt.Sprintf("result is %v, want bool or error", ft.Out(0))) } case 2: if ft.Out(0) != reflect.TypeFor[bool]() || ft.Out(1) != reflect.TypeFor[error]() { - panic(fmt.Sprintf("results are %T, %T; want bool, error", ft.Out(0), ft.Out(1))) + panic(fmt.Sprintf("results are %v, %v; want bool, error", ft.Out(0), ft.Out(1))) } fixup = func(vals []reflect.Value) []reflect.Value { return vals } default: diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index fd95973e5538d..351553cc8af2b 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -54,6 +54,27 @@ func TestExpectFilter(t *testing.T) { }, wantErr: false, }, + { + name: "filter-with-nil-error", + events: []int{1, 2, 3}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + }, + { + name: "filter-with-non-nil-error", + events: []int{100, 200, 300}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + wantErr: true, + }, { name: "first event has to be func", events: []int{24, 42}, From df362d0a0899e57b7e11e5de397b3688e850847b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 17 Sep 2025 10:49:41 -0400 Subject: [PATCH 0357/1093] net/netmon: make ChangeDelta event not a pointer (#17112) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes things work slightly better over the eventbus. Also switches ipnlocal to use the event over the eventbus instead of the direct callback. Updates #15160 Signed-off-by: Claus Lensbøl --- ipn/ipnlocal/local.go | 11 +++--- net/netmon/netmon.go | 12 ++---- net/netmon/netmon_test.go | 2 +- wgengine/userspace.go | 78 +++++++++++++++++++++++++-------------- 4 files changed, 61 insertions(+), 42 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a712dc98aff84..0173491650e22 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -207,6 +207,7 @@ type LocalBackend struct { clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] healthChangeSub *eventbus.Subscriber[health.Change] + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] subsDoneCh chan struct{} // closed when consumeEventbusTopics returns health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -216,7 +217,6 @@ type LocalBackend struct { dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys pushDeviceToken syncs.AtomicValue[string] backendLogID logid.PublicID - unregisterNetMon func() unregisterSysPolicyWatch func() portpoll *portlist.Poller // may be nil portpollOnce sync.Once // guards starting readPoller @@ -544,6 +544,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) b.healthChangeSub = eventbus.Subscribe[health.Change](b.eventClient) + b.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](b.eventClient) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -591,10 +592,9 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() - // Call our linkChange code once with the current state, and - // then also whenever it changes: + // Call our linkChange code once with the current state. + // Following changes are triggered via the eventbus. b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) - b.unregisterNetMon = netMon.RegisterChangeCallback(b.linkChange) if tunWrap, ok := b.sys.Tun.GetOK(); ok { tunWrap.PeerAPIPort = b.GetPeerAPIPort @@ -633,6 +633,8 @@ func (b *LocalBackend) consumeEventbusTopics() { b.onTailnetDefaultAutoUpdate(au.Value) case change := <-b.healthChangeSub.Events(): b.onHealthChange(change) + case changeDelta := <-b.changeDeltaSub.Events(): + b.linkChange(&changeDelta) } } } @@ -1160,7 +1162,6 @@ func (b *LocalBackend) Shutdown() { } b.stopOfflineAutoUpdate() - b.unregisterNetMon() b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index b97b184d476f4..fcac9c4ee2bee 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -53,7 +53,7 @@ type osMon interface { type Monitor struct { logf logger.Logf b *eventbus.Client - changed *eventbus.Publisher[*ChangeDelta] + changed *eventbus.Publisher[ChangeDelta] om osMon // nil means not supported on this platform change chan bool // send false to wake poller, true to also force ChangeDeltas be sent @@ -84,9 +84,6 @@ type ChangeFunc func(*ChangeDelta) // ChangeDelta describes the difference between two network states. type ChangeDelta struct { - // Monitor is the network monitor that sent this delta. - Monitor *Monitor - // Old is the old interface state, if known. // It's nil if the old state is unknown. // Do not mutate it. @@ -126,7 +123,7 @@ func New(bus *eventbus.Bus, logf logger.Logf) (*Monitor, error) { stop: make(chan struct{}), lastWall: wallTime(), } - m.changed = eventbus.Publish[*ChangeDelta](m.b) + m.changed = eventbus.Publish[ChangeDelta](m.b) st, err := m.interfaceStateUncached() if err != nil { return nil, err @@ -401,8 +398,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { return } - delta := &ChangeDelta{ - Monitor: m, + delta := ChangeDelta{ Old: oldState, New: newState, TimeJumped: timeJumped, @@ -437,7 +433,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { } m.changed.Publish(delta) for _, cb := range m.cbs { - go cb(delta) + go cb(&delta) } } diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index b8ec1b75f97ec..5fcdcc6ccd64e 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -81,7 +81,7 @@ func TestMonitorInjectEventOnBus(t *testing.T) { mon.Start() mon.InjectEvent() - if err := eventbustest.Expect(tw, eventbustest.Type[*ChangeDelta]()); err != nil { + if err := eventbustest.Expect(tw, eventbustest.Type[ChangeDelta]()); err != nil { t.Error(err) } } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 4a9f321430c12..42c12c008cffe 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -93,26 +93,28 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - - logf logger.Logf - wgLogger *wglog.Logger // a wireguard-go logging wrapper - reqCh chan struct{} - waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool - timeNow func() mono.Time - tundev *tstun.Wrapper - wgdev *device.Device - router router.Router - dialer *tsdial.Dialer - confListenPort uint16 // original conf.ListenPort - dns *dns.Manager - magicConn *magicsock.Conn - netMon *netmon.Monitor - health *health.Tracker - netMonOwned bool // whether we created netMon (and thus need to close it) - netMonUnregister func() // unsubscribes from changes; used regardless of netMonOwned - birdClient BIRDClient // or nil - controlKnobs *controlknobs.Knobs // or nil + eventBus *eventbus.Bus + eventClient *eventbus.Client + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] + subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + + logf logger.Logf + wgLogger *wglog.Logger // a wireguard-go logging wrapper + reqCh chan struct{} + waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool + timeNow func() mono.Time + tundev *tstun.Wrapper + wgdev *device.Device + router router.Router + dialer *tsdial.Dialer + confListenPort uint16 // original conf.ListenPort + dns *dns.Manager + magicConn *magicsock.Conn + netMon *netmon.Monitor + health *health.Tracker + netMonOwned bool // whether we created netMon (and thus need to close it) + birdClient BIRDClient // or nil + controlKnobs *controlknobs.Knobs // or nil testMaybeReconfigHook func() // for tests; if non-nil, fires if maybeReconfigWireguardLocked called @@ -352,7 +354,11 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) controlKnobs: conf.ControlKnobs, reconfigureVPN: conf.ReconfigureVPN, health: conf.HealthTracker, + subsDoneCh: make(chan struct{}), } + e.eventClient = e.eventBus.Client("userspaceEngine") + e.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](e.eventClient) + closePool.addFunc(e.eventClient.Close) if e.birdClient != nil { // Disable the protocol at start time. @@ -385,13 +391,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) logf("link state: %+v", e.netMon.InterfaceState()) - unregisterMonWatch := e.netMon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - tshttpproxy.InvalidateCache() - e.linkChange(delta) - }) - closePool.addFunc(unregisterMonWatch) - e.netMonUnregister = unregisterMonWatch - endpointsFn := func(endpoints []tailcfg.Endpoint) { e.mu.Lock() e.endpoints = append(e.endpoints[:0], endpoints...) @@ -546,10 +545,31 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } + go e.consumeEventbusTopics() + e.logf("Engine created.") return e, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [eventbus.Client] is closed. +func (e *userspaceEngine) consumeEventbusTopics() { + defer close(e.subsDoneCh) + + for { + select { + case <-e.eventClient.Done(): + return + case changeDelta := <-e.changeDeltaSub.Events(): + tshttpproxy.InvalidateCache() + e.linkChange(&changeDelta) + } + } +} + // echoRespondToAll is an inbound post-filter responding to all echo requests. func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if p.IsEchoRequest() { @@ -1208,6 +1228,9 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { + e.eventClient.Close() + <-e.subsDoneCh + e.mu.Lock() if e.closing { e.mu.Unlock() @@ -1219,7 +1242,6 @@ func (e *userspaceEngine) Close() { r := bufio.NewReader(strings.NewReader("")) e.wgdev.IpcSetOperation(r) e.magicConn.Close() - e.netMonUnregister() if e.netMonOwned { e.netMon.Close() } From 8a4b1eb6a3cf9c3f082c2d725968239084bfeb51 Mon Sep 17 00:00:00 2001 From: Elliot Blackburn Date: Wed, 17 Sep 2025 16:18:25 +0100 Subject: [PATCH 0358/1093] words: add some more (#17177) Updates #words Signed-off-by: Elliot Blackburn --- words/scales.txt | 9 +++++++++ words/tails.txt | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/words/scales.txt b/words/scales.txt index 532734f6dcf8a..bb623fb6f1ab8 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -442,3 +442,12 @@ salary fujita caiman cichlid +logarithm +exponential +geological +cosmological +barometric +ph +pain +temperature +wyrm diff --git a/words/tails.txt b/words/tails.txt index 20ff326c1e6fd..f5e93bf504687 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -764,3 +764,12 @@ sailfish billfish taimen sargo +story +tale +gecko +wyrm +meteor +ribbon +echo +lemming +worm From 6992f958fc5eb8309f204da953664181256e96ed Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 17 Sep 2025 08:39:29 -0700 Subject: [PATCH 0359/1093] util/eventbus: add an EqualTo helper for testing (#17178) For a common case of events being simple struct types with some exported fields, add a helper to check (reflectively) for equal values using cmp.Diff so that a failed comparison gives a useful diff in the test output. More complex uses will still want to provide their own comparisons; this (intentionally) does not export diff options or other hooks from the cmp package. Updates #15160 Change-Id: I86bee1771cad7debd9e3491aa6713afe6fd577a6 Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest.go | 14 ++++++++ .../eventbustest/eventbustest_test.go | 35 ++++++++++++++----- 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index d5cfe53950a8c..c32e7114036dc 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "tailscale.com/util/eventbus" ) @@ -249,3 +250,16 @@ func Inject[T any](inj *Injector, event T) { } pub.(*eventbus.Publisher[T]).Publish(event) } + +// EqualTo returns an event-matching function for use with [Expect] and +// [ExpectExactly] that matches on an event of the given type that is equal to +// want by comparison with [cmp.Diff]. The expectation fails with an error +// message including the diff, if present. +func EqualTo[T any](want T) func(T) error { + return func(got T) error { + if diff := cmp.Diff(got, want); diff != "" { + return fmt.Errorf("wrong result (-got, +want):\n%s", diff) + } + return nil + } +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index 351553cc8af2b..f8b37eefec716 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -5,6 +5,7 @@ package eventbustest_test import ( "fmt" + "strings" "testing" "time" @@ -29,19 +30,17 @@ func TestExpectFilter(t *testing.T) { name string events []int expectFunc any - wantErr bool + wantErr string // if non-empty, an error is expected containing this text }{ { name: "single event", events: []int{42}, expectFunc: eventbustest.Type[EventFoo](), - wantErr: false, }, { name: "multiple events, single expectation", events: []int{42, 1, 2, 3, 4, 5}, expectFunc: eventbustest.Type[EventFoo](), - wantErr: false, }, { name: "filter on event with function", @@ -52,7 +51,6 @@ func TestExpectFilter(t *testing.T) { } return false, nil }, - wantErr: false, }, { name: "filter-with-nil-error", @@ -73,7 +71,7 @@ func TestExpectFilter(t *testing.T) { } return nil }, - wantErr: true, + wantErr: "value > 10", }, { name: "first event has to be func", @@ -84,7 +82,18 @@ func TestExpectFilter(t *testing.T) { } return false, nil }, - wantErr: true, + wantErr: "expected 42, got 24", + }, + { + name: "equal-values", + events: []int{23}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + }, + { + name: "unequal-values", + events: []int{37}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + wantErr: "wrong result (-got, +want)", }, { name: "no events", @@ -92,7 +101,7 @@ func TestExpectFilter(t *testing.T) { expectFunc: func(event EventFoo) (bool, error) { return true, nil }, - wantErr: true, + wantErr: "timed out waiting", }, } @@ -113,8 +122,16 @@ func TestExpectFilter(t *testing.T) { updater.Publish(EventFoo{i}) } - if err := eventbustest.Expect(tw, tt.expectFunc); (err != nil) != tt.wantErr { - t.Errorf("ExpectFilter[EventFoo]: error = %v, wantErr %v", err, tt.wantErr) + if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { + if tt.wantErr == "" { + t.Errorf("Expect[EventFoo]: unexpected error: %v", err) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) + } else { + t.Logf("Got expected error: %v (OK)", err) + } + } else if tt.wantErr != "" { + t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) } }) } From 9d661663f33a0cba3c372278864600f62c39a4b4 Mon Sep 17 00:00:00 2001 From: Remy Guercio Date: Wed, 17 Sep 2025 14:05:22 -0500 Subject: [PATCH 0360/1093] cmd/tsidp: update README with new repo location warning Fixes: #17170 Signed-off-by: Remy Guercio --- cmd/tsidp/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index ffc296b87862a..1635feabf22f8 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -1,3 +1,6 @@ +> [!CAUTION] +> Development of tsidp has been moved to [https://github.com/tailscale/tsidp](https://github.com/tailscale/tsidp) and it is no longer maintained here. Please visit the new repository to see the latest updates, file an issue, or contribute. + # `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider [![status: community project](https://img.shields.io/badge/status-community_project-blue)](https://tailscale.com/kb/1531/community-projects) From bb38bf74144b69130acffdd479db31607a14b339 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 17 Sep 2025 20:22:24 +0100 Subject: [PATCH 0361/1093] docker: bump alpine v3.19 -> 3.22 (#17155) Updates #15328 Change-Id: Ib33baf8756b648176dce461b25169e079cbd5533 Signed-off-by: Tom Proctor --- ALPINE.txt | 2 +- Dockerfile | 6 +++--- Dockerfile.base | 8 ++++---- build_docker.sh | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ALPINE.txt b/ALPINE.txt index 318956c3d51e2..93a84c380075c 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.19 \ No newline at end of file +3.22 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index bd0f2840fddc1..c546cf6574abd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -71,10 +71,10 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN ln -s /sbin/iptables-legacy /sbin/iptables +RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ # For compat with the previous run.sh, although ideally you should be diff --git a/Dockerfile.base b/Dockerfile.base index b7e79a43c6fdf..6c3c8ed084fce 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,12 +1,12 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils -# Alpine 3.19 replaces legacy iptables with nftables based implementation. We +# Alpine 3.19 replaced legacy iptables with nftables based implementation. We # can't be certain that all hosts that run Tailscale containers currently # suppport nftables, so link back to legacy for backwards compatibility reasons. # TODO(irbekrm): add some way how to determine if we still run on nodes that # don't support nftables, so that we can eventually remove these symlinks. -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN ln -s /sbin/iptables-legacy /sbin/iptables +RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables diff --git a/build_docker.sh b/build_docker.sh index bdeaa8659b805..37f00bf53e3d9 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -26,7 +26,7 @@ eval "$(./build_dist.sh shellvars)" DEFAULT_TARGET="client" DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}" -DEFAULT_BASE="tailscale/alpine-base:3.19" +DEFAULT_BASE="tailscale/alpine-base:3.22" # Set a few pre-defined OCI annotations. The source annotation is used by tools such as Renovate that scan the linked # Github repo to find release notes for any new image tags. Note that for official Tailscale images the default # annotations defined here will be overriden by release scripts that call this script. From 73c371f78403b9e11259d7241caba2ca4654911b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 12:49:00 -0700 Subject: [PATCH 0362/1093] cmd/derper: permit port 80 in ACE targets Updates tailscale/corp#32168 Updates tailscale/corp#32226 Change-Id: Iddc017b060c76e6eab8f6d0c989a775bcaae3518 Signed-off-by: Brad Fitzpatrick --- cmd/derper/ace.go | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go index 301b029ccf1cf..a11539a6e19fb 100644 --- a/cmd/derper/ace.go +++ b/cmd/derper/ace.go @@ -35,8 +35,35 @@ func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) { if err != nil { return err } - if port != "443" { - return fmt.Errorf("only port 443 is allowed") + if port != "443" && port != "80" { + // There are only two types of CONNECT requests the client makes + // via ACE: requests for /key (port 443) and requests to upgrade + // to the bidirectional ts2021 Noise protocol. + // + // The ts2021 layer can bootstrap over port 80 (http) or port + // 443 (https). + // + // Without ACE, we prefer port 80 to avoid unnecessary double + // encryption. But enough places require TLS+port 443 that we do + // support that double encryption path as a fallback. + // + // But ACE adds its own TLS layer (ACE is always CONNECT over + // https). If we don't permit port 80 here as a target, we'd + // have three layers of encryption (TLS + TLS + Noise) which is + // even more silly than two. + // + // So we permit port 80 such that we can only have two layers of + // encryption, varying by the request type: + // + // 1. TLS from client to ACE proxy (CONNECT) + // 2a. TLS from ACE proxy to https://controlplane.tailscale.com/key (port 443) + // 2b. ts2021 Noise from ACE proxy to http://controlplane.tailscale.com/ts2021 (port 80) + // + // But nothing's stopping the client from doing its ts2021 + // upgrade over https anyway and having three layers of + // encryption. But we can at least permit the client to do a + // "CONNECT controlplane.tailscale.com:80 HTTP/1.1" if it wants. + return fmt.Errorf("only ports 443 and 80 are allowed") } // TODO(bradfitz): make policy configurable from flags and/or come // from local tailscaled nodeAttrs From 55d0e6d3a8f2622355d9dde1c71c4932731fb319 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 20:32:28 -0700 Subject: [PATCH 0363/1093] net/dns/recursive: remove recursive DNS resolver It doesn't really pull its weight: it adds 577 KB to the binary and is rarely useful. Also, we now have static IPs and other connectivity paths coming soon enough. Updates #5853 Updates #1278 Updates tailscale/corp#32168 Change-Id: If336fed00a9c9ae9745419e6d81f7de6da6f7275 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 6 +- cmd/tailscale/depaware.txt | 12 +- cmd/tailscaled/depaware.txt | 8 +- cmd/tsidp/depaware.txt | 6 +- net/dns/recursive/recursive.go | 622 ----------------------- net/dns/recursive/recursive_test.go | 742 ---------------------------- net/dnsfallback/dnsfallback.go | 155 +----- tsnet/depaware.txt | 6 +- 8 files changed, 15 insertions(+), 1542 deletions(-) delete mode 100644 net/dns/recursive/recursive.go delete mode 100644 net/dns/recursive/recursive_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index faf7b2f838d91..e65977875ca7a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -168,7 +168,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go @@ -847,7 +846,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -1026,8 +1024,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index c86af7ea76645..ae4a7bd4d774b 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -48,7 +48,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ @@ -121,7 +120,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/metrics from tailscale.com/derp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+ tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -192,8 +190,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp+ - tailscale.com/util/singleflight from tailscale.com/net/dnscache+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + tailscale.com/util/singleflight from tailscale.com/net/dnscache + tailscale.com/util/slicesx from tailscale.com/client/systray+ L tailscale.com/util/stringsx from tailscale.com/client/systray tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ @@ -250,8 +248,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from golang.org/x/net/icmp+ + golang.org/x/net/ipv6 from golang.org/x/net/icmp+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials @@ -337,7 +335,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/miekg/dns+ + crypto/tls from golang.org/x/net/http2+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d4e1f13bf95b6..4482ad125075b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -149,7 +149,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ @@ -321,7 +320,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -433,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock tailscale.com/util/set from tailscale.com/derp+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ @@ -504,8 +502,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 0aafff8e159f9..7db7849b74954 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -132,7 +132,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ @@ -276,7 +275,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -455,8 +453,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/net/dns/recursive/recursive.go b/net/dns/recursive/recursive.go deleted file mode 100644 index fd865e37ab737..0000000000000 --- a/net/dns/recursive/recursive.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package recursive implements a simple recursive DNS resolver. -package recursive - -import ( - "context" - "errors" - "fmt" - "net" - "net/netip" - "slices" - "strings" - "time" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/net/netns" - "tailscale.com/types/logger" - "tailscale.com/util/dnsname" - "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/slicesx" -) - -const ( - // maxDepth is how deep from the root nameservers we'll recurse when - // resolving; passing this limit will instead return an error. - // - // maxDepth must be at least 20 to resolve "console.aws.amazon.com", - // which is a domain with a moderately complicated DNS setup. The - // current value of 30 was chosen semi-arbitrarily to ensure that we - // have about 50% headroom. - maxDepth = 30 - // numStartingServers is the number of root nameservers that we use as - // initial candidates for our recursion. - numStartingServers = 3 - // udpQueryTimeout is the amount of time we wait for a UDP response - // from a nameserver before falling back to a TCP connection. - udpQueryTimeout = 5 * time.Second - - // These constants aren't typed in the DNS package, so we create typed - // versions here to avoid having to do repeated type casts. - qtypeA dns.Type = dns.Type(dns.TypeA) - qtypeAAAA dns.Type = dns.Type(dns.TypeAAAA) -) - -var ( - // ErrMaxDepth is returned when recursive resolving exceeds the maximum - // depth limit for this package. - ErrMaxDepth = fmt.Errorf("exceeded max depth %d when resolving", maxDepth) - - // ErrAuthoritativeNoResponses is the error returned when an - // authoritative nameserver indicates that there are no responses to - // the given query. - ErrAuthoritativeNoResponses = errors.New("authoritative server returned no responses") - - // ErrNoResponses is returned when our resolution process completes - // with no valid responses from any nameserver, but no authoritative - // server explicitly returned NXDOMAIN. - ErrNoResponses = errors.New("no responses to query") -) - -var rootServersV4 = []netip.Addr{ - netip.MustParseAddr("198.41.0.4"), // a.root-servers.net - netip.MustParseAddr("170.247.170.2"), // b.root-servers.net - netip.MustParseAddr("192.33.4.12"), // c.root-servers.net - netip.MustParseAddr("199.7.91.13"), // d.root-servers.net - netip.MustParseAddr("192.203.230.10"), // e.root-servers.net - netip.MustParseAddr("192.5.5.241"), // f.root-servers.net - netip.MustParseAddr("192.112.36.4"), // g.root-servers.net - netip.MustParseAddr("198.97.190.53"), // h.root-servers.net - netip.MustParseAddr("192.36.148.17"), // i.root-servers.net - netip.MustParseAddr("192.58.128.30"), // j.root-servers.net - netip.MustParseAddr("193.0.14.129"), // k.root-servers.net - netip.MustParseAddr("199.7.83.42"), // l.root-servers.net - netip.MustParseAddr("202.12.27.33"), // m.root-servers.net -} - -var rootServersV6 = []netip.Addr{ - netip.MustParseAddr("2001:503:ba3e::2:30"), // a.root-servers.net - netip.MustParseAddr("2801:1b8:10::b"), // b.root-servers.net - netip.MustParseAddr("2001:500:2::c"), // c.root-servers.net - netip.MustParseAddr("2001:500:2d::d"), // d.root-servers.net - netip.MustParseAddr("2001:500:a8::e"), // e.root-servers.net - netip.MustParseAddr("2001:500:2f::f"), // f.root-servers.net - netip.MustParseAddr("2001:500:12::d0d"), // g.root-servers.net - netip.MustParseAddr("2001:500:1::53"), // h.root-servers.net - netip.MustParseAddr("2001:7fe::53"), // i.root-servers.net - netip.MustParseAddr("2001:503:c27::2:30"), // j.root-servers.net - netip.MustParseAddr("2001:7fd::1"), // k.root-servers.net - netip.MustParseAddr("2001:500:9f::42"), // l.root-servers.net - netip.MustParseAddr("2001:dc3::35"), // m.root-servers.net -} - -var debug = envknob.RegisterBool("TS_DEBUG_RECURSIVE_DNS") - -// Resolver is a recursive DNS resolver that is designed for looking up A and AAAA records. -type Resolver struct { - // Dialer is used to create outbound connections. If nil, a zero - // net.Dialer will be used instead. - Dialer netns.Dialer - - // Logf is the logging function to use; if none is specified, then logs - // will be dropped. - Logf logger.Logf - - // NoIPv6, if set, will prevent this package from querying for AAAA - // records and will avoid contacting nameservers over IPv6. - NoIPv6 bool - - // Test mocks - testQueryHook func(name dnsname.FQDN, nameserver netip.Addr, protocol string, qtype dns.Type) (*dns.Msg, error) - testExchangeHook func(nameserver netip.Addr, network string, msg *dns.Msg) (*dns.Msg, error) - rootServers []netip.Addr - timeNow func() time.Time - - // Caching - // NOTE(andrew): if we make resolution parallel, this needs a mutex - queryCache map[dnsQuery]dnsMsgWithExpiry - - // Possible future additions: - // - Additional nameservers? From the system maybe? - // - NoIPv4 for IPv4 - // - DNS-over-HTTPS or DNS-over-TLS support -} - -// queryState stores all state during the course of a single query -type queryState struct { - // rootServers are the root nameservers to start from - rootServers []netip.Addr - - // TODO: metrics? -} - -type dnsQuery struct { - nameserver netip.Addr - name dnsname.FQDN - qtype dns.Type -} - -func (q dnsQuery) String() string { - return fmt.Sprintf("dnsQuery{nameserver:%q,name:%q,qtype:%v}", q.nameserver.String(), q.name, q.qtype) -} - -type dnsMsgWithExpiry struct { - *dns.Msg - expiresAt time.Time -} - -func (r *Resolver) now() time.Time { - if r.timeNow != nil { - return r.timeNow() - } - return time.Now() -} - -func (r *Resolver) logf(format string, args ...any) { - if r.Logf == nil { - return - } - r.Logf(format, args...) -} - -func (r *Resolver) depthlogf(depth int, format string, args ...any) { - if r.Logf == nil || !debug() { - return - } - prefix := fmt.Sprintf("[%d] %s", depth, strings.Repeat(" ", depth)) - r.Logf(prefix+format, args...) -} - -var defaultDialer net.Dialer - -func (r *Resolver) dialer() netns.Dialer { - if r.Dialer != nil { - return r.Dialer - } - - return &defaultDialer -} - -func (r *Resolver) newState() *queryState { - var rootServers []netip.Addr - if len(r.rootServers) > 0 { - rootServers = r.rootServers - } else { - // Select a random subset of root nameservers to start from, since if - // we don't get responses from those, something else has probably gone - // horribly wrong. - roots4 := slices.Clone(rootServersV4) - slicesx.Shuffle(roots4) - roots4 = roots4[:numStartingServers] - - var roots6 []netip.Addr - if !r.NoIPv6 { - roots6 = slices.Clone(rootServersV6) - slicesx.Shuffle(roots6) - roots6 = roots6[:numStartingServers] - } - - // Interleave the root servers so that we try to contact them over - // IPv4, then IPv6, IPv4, IPv6, etc. - rootServers = slicesx.Interleave(roots4, roots6) - } - - return &queryState{ - rootServers: rootServers, - } -} - -// Resolve will perform a recursive DNS resolution for the provided name, -// starting at a randomly-chosen root DNS server, and return the A and AAAA -// responses as a slice of netip.Addrs along with the minimum TTL for the -// returned records. -func (r *Resolver) Resolve(ctx context.Context, name string) (addrs []netip.Addr, minTTL time.Duration, err error) { - dnsName, err := dnsname.ToFQDN(name) - if err != nil { - return nil, 0, err - } - - qstate := r.newState() - - r.logf("querying IPv4 addresses for: %q", name) - addrs4, minTTL4, err4 := r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeA) - - var ( - addrs6 []netip.Addr - minTTL6 time.Duration - err6 error - ) - if !r.NoIPv6 { - r.logf("querying IPv6 addresses for: %q", name) - addrs6, minTTL6, err6 = r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeAAAA) - } - - if err4 != nil && err6 != nil { - if err4 == err6 { - return nil, 0, err4 - } - - return nil, 0, multierr.New(err4, err6) - } - if err4 != nil { - return addrs6, minTTL6, nil - } else if err6 != nil { - return addrs4, minTTL4, nil - } - - minTTL = minTTL4 - if minTTL6 < minTTL { - minTTL = minTTL6 - } - - addrs = append(addrs4, addrs6...) - if len(addrs) == 0 { - return nil, 0, ErrNoResponses - } - - slicesx.Shuffle(addrs) - return addrs, minTTL, nil -} - -func (r *Resolver) resolveRecursiveFromRoot( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - r.depthlogf(depth, "resolving %q from root (type: %v)", name, qtype) - - var depthError bool - for _, server := range qstate.rootServers { - addrs, minTTL, err := r.resolveRecursive(ctx, qstate, depth, name, server, qtype) - if err == nil { - return addrs, minTTL, err - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - depthError = true - } - } - - if depthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -func (r *Resolver) resolveRecursive( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - if depth == maxDepth { - r.depthlogf(depth, "not recursing past maximum depth") - return nil, 0, ErrMaxDepth - } - - // Ask this nameserver for an answer. - resp, err := r.queryNameserver(ctx, depth, name, nameserver, qtype) - if err != nil { - return nil, 0, err - } - - // If we get an actual answer from the nameserver, then return it. - var ( - answers []netip.Addr - cnames []dnsname.FQDN - minTTL = 24 * 60 * 60 // 24 hours in seconds - ) - for _, answer := range resp.Answer { - if crec, ok := answer.(*dns.CNAME); ok { - cnameFQDN, err := dnsname.ToFQDN(crec.Target) - if err != nil { - r.logf("bad CNAME %q returned: %v", crec.Target, err) - continue - } - - cnames = append(cnames, cnameFQDN) - continue - } - - addr := addrFromRecord(answer) - if !addr.IsValid() { - r.logf("[unexpected] invalid record in %T answer", answer) - } else if addr.Is4() && qtype != qtypeA { - r.logf("[unexpected] got IPv4 answer but qtype=%v", qtype) - } else if addr.Is6() && qtype != qtypeAAAA { - r.logf("[unexpected] got IPv6 answer but qtype=%v", qtype) - } else { - answers = append(answers, addr) - minTTL = min(minTTL, int(answer.Header().Ttl)) - } - } - - if len(answers) > 0 { - r.depthlogf(depth, "got answers for %q: %v", name, answers) - return answers, time.Duration(minTTL) * time.Second, nil - } - - r.depthlogf(depth, "no answers for %q", name) - - // If we have a non-zero number of CNAMEs, then try resolving those - // (from the root again) and return the first one that succeeds. - // - // TODO: return the union of all responses? - // TODO: parallelism? - if len(cnames) > 0 { - r.depthlogf(depth, "got CNAME responses for %q: %v", name, cnames) - } - var cnameDepthError bool - for _, cname := range cnames { - answers, minTTL, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, cname, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - cnameDepthError = true - } - } - - // If this is an authoritative response, then we know that continuing - // to look further is not going to result in any answers and we should - // bail out. - if resp.MsgHdr.Authoritative { - // If we failed to recurse into a CNAME due to a depth limit, - // propagate that here. - if cnameDepthError { - return nil, 0, ErrMaxDepth - } - - r.depthlogf(depth, "got authoritative response with no answers; stopping") - return nil, 0, ErrAuthoritativeNoResponses - } - - r.depthlogf(depth, "got %d NS responses and %d ADDITIONAL responses for %q", len(resp.Ns), len(resp.Extra), name) - - // No CNAMEs and no answers; see if we got any AUTHORITY responses, - // which indicate which nameservers to query next. - var authorities []dnsname.FQDN - for _, rr := range resp.Ns { - ns, ok := rr.(*dns.NS) - if !ok { - continue - } - - nsName, err := dnsname.ToFQDN(ns.Ns) - if err != nil { - r.logf("unexpected bad NS name %q: %v", ns.Ns, err) - continue - } - - authorities = append(authorities, nsName) - } - - // Also check for "glue" records, which are IP addresses provided by - // the DNS server for authority responses; these are required when the - // authority server is a subdomain of what's being resolved. - glueRecords := make(map[dnsname.FQDN][]netip.Addr) - for _, rr := range resp.Extra { - name, err := dnsname.ToFQDN(rr.Header().Name) - if err != nil { - r.logf("unexpected bad Name %q in Extra addr: %v", rr.Header().Name, err) - continue - } - - if addr := addrFromRecord(rr); addr.IsValid() { - glueRecords[name] = append(glueRecords[name], addr) - } else { - r.logf("unexpected bad Extra %T addr", rr) - } - } - - // Try authorities with glue records first, to minimize the number of - // additional DNS queries that we need to make. - authoritiesGlue, authoritiesNoGlue := slicesx.Partition(authorities, func(aa dnsname.FQDN) bool { - return len(glueRecords[aa]) > 0 - }) - - authorityDepthError := false - - r.depthlogf(depth, "authorities with glue records for recursion: %v", authoritiesGlue) - for _, authority := range authoritiesGlue { - for _, nameserver := range glueRecords[authority] { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - - r.depthlogf(depth, "authorities with no glue records for recursion: %v", authoritiesNoGlue) - for _, authority := range authoritiesNoGlue { - // First, resolve the IP for the authority server from the - // root, querying for both IPv4 and IPv6 addresses regardless - // of what the current question type is. - // - // TODO: check for infinite recursion; it'll get caught by our - // recursion depth, but we want to bail early. - for _, authorityQtype := range []dns.Type{qtypeAAAA, qtypeA} { - answers, _, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, authority, authorityQtype) - if err != nil { - r.depthlogf(depth, "error querying authority %q: %v", authority, err) - continue - } - r.depthlogf(depth, "resolved authority %q (type %v) to: %v", authority, authorityQtype, answers) - - // Now, query this authority for the final address. - for _, nameserver := range answers { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - } - - if authorityDepthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -// queryNameserver sends a query for "name" to the nameserver "nameserver" for -// records of type "qtype", trying both UDP and TCP connections as -// appropriate. -func (r *Resolver) queryNameserver( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - qtype dns.Type, -) (*dns.Msg, error) { - // TODO(andrew): we should QNAME minimisation here to avoid sending the - // full name to intermediate/root nameservers. See: - // https://www.rfc-editor.org/rfc/rfc7816 - - // Handle the case where UDP is blocked by adding an explicit timeout - // for the UDP portion of this query. - udpCtx, udpCtxCancel := context.WithTimeout(ctx, udpQueryTimeout) - defer udpCtxCancel() - - msg, err := r.queryNameserverProto(udpCtx, depth, name, nameserver, "udp", qtype) - if err == nil { - return msg, nil - } - - msg, err2 := r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err2 == nil { - return msg, nil - } - - return nil, multierr.New(err, err2) -} - -// queryNameserverProto sends a query for "name" to the nameserver "nameserver" -// for records of type "qtype" over the provided protocol (either "udp" -// or "tcp"), and returns the DNS response or an error. -func (r *Resolver) queryNameserverProto( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - protocol string, - qtype dns.Type, -) (resp *dns.Msg, err error) { - if r.testQueryHook != nil { - return r.testQueryHook(name, nameserver, protocol, qtype) - } - - now := r.now() - nameserverStr := nameserver.String() - - cacheKey := dnsQuery{ - nameserver: nameserver, - name: name, - qtype: qtype, - } - cacheEntry, ok := r.queryCache[cacheKey] - if ok && cacheEntry.expiresAt.Before(now) { - r.depthlogf(depth, "using cached response from %s about %q (type: %v)", nameserverStr, name, qtype) - return cacheEntry.Msg, nil - } - - var network string - if nameserver.Is4() { - network = protocol + "4" - } else { - network = protocol + "6" - } - - // Prepare a message asking for an appropriately-typed record - // for the name we're querying. - m := new(dns.Msg) - m.SetEdns0(1232, false /* no DNSSEC */) - m.SetQuestion(name.WithTrailingDot(), uint16(qtype)) - - // Allow mocking out the network components with our exchange hook. - if r.testExchangeHook != nil { - resp, err = r.testExchangeHook(nameserver, network, m) - } else { - // Dial the current nameserver using our dialer. - var nconn net.Conn - nconn, err = r.dialer().DialContext(ctx, network, net.JoinHostPort(nameserverStr, "53")) - if err != nil { - return nil, err - } - - var c dns.Client // TODO: share? - conn := &dns.Conn{ - Conn: nconn, - UDPSize: c.UDPSize, - } - - // Send the DNS request to the current nameserver. - r.depthlogf(depth, "asking %s over %s about %q (type: %v)", nameserverStr, protocol, name, qtype) - resp, _, err = c.ExchangeWithConnContext(ctx, m, conn) - } - if err != nil { - return nil, err - } - - // If the message was truncated and we're using UDP, re-run with TCP. - if resp.MsgHdr.Truncated && protocol == "udp" { - r.depthlogf(depth, "response message truncated; re-running query with TCP") - resp, err = r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err != nil { - return nil, err - } - } - - // Find minimum expiry for all records in this message. - var minTTL int - for _, rr := range resp.Answer { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Ns { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Extra { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - - mak.Set(&r.queryCache, cacheKey, dnsMsgWithExpiry{ - Msg: resp, - expiresAt: now.Add(time.Duration(minTTL) * time.Second), - }) - return resp, nil -} - -func addrFromRecord(rr dns.RR) netip.Addr { - switch v := rr.(type) { - case *dns.A: - ip, ok := netip.AddrFromSlice(v.A) - if !ok || !ip.Is4() { - return netip.Addr{} - } - return ip - case *dns.AAAA: - ip, ok := netip.AddrFromSlice(v.AAAA) - if !ok || !ip.Is6() { - return netip.Addr{} - } - return ip - } - return netip.Addr{} -} diff --git a/net/dns/recursive/recursive_test.go b/net/dns/recursive/recursive_test.go deleted file mode 100644 index d47e4cebf70f2..0000000000000 --- a/net/dns/recursive/recursive_test.go +++ /dev/null @@ -1,742 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package recursive - -import ( - "context" - "errors" - "flag" - "fmt" - "net" - "net/netip" - "reflect" - "strings" - "testing" - "time" - - "slices" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/tstest" -) - -const testDomain = "tailscale.com" - -// Recursively resolving the AWS console requires being able to handle CNAMEs, -// glue records, falling back from UDP to TCP for oversize queries, and more; -// it's a great integration test for DNS resolution and they can handle the -// traffic :) -const complicatedTestDomain = "console.aws.amazon.com" - -var flagNetworkAccess = flag.Bool("enable-network-access", false, "run tests that need external network access") - -func init() { - envknob.Setenv("TS_DEBUG_RECURSIVE_DNS", "true") -} - -func newResolver(tb testing.TB) *Resolver { - clock := tstest.NewClock(tstest.ClockOpts{ - Step: 50 * time.Millisecond, - }) - return &Resolver{ - Logf: tb.Logf, - timeNow: clock.Now, - } -} - -func TestResolve(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } - - var has4, has6 bool - for _, addr := range addrs { - has4 = has4 || addr.Is4() - has6 = has6 || addr.Is6() - } - - if !has4 { - t.Errorf("expected at least one IPv4 address") - } - if !has6 { - t.Errorf("expected at least one IPv6 address") - } -} - -func TestResolveComplicated(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, complicatedTestDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } -} - -func TestResolveNoIPv6(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - r := newResolver(t) - r.NoIPv6 = true - - addrs, _, err := r.Resolve(context.Background(), testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - for _, addr := range addrs { - if addr.Is6() { - t.Errorf("got unexpected IPv6 address: %v", addr) - } - } -} - -func TestResolveFallbackToTCP(t *testing.T) { - var udpCalls, tcpCalls int - hook := func(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if strings.HasPrefix(network, "udp") { - t.Logf("got %q query; returning truncated result", network) - udpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Truncated = true - return resp, nil - } - - t.Logf("got %q query; returning real result", network) - tcpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Answer = append(resp.Answer, &dns.A{ - Hdr: dns.RR_Header{ - Name: req.Question[0].Name, - Rrtype: req.Question[0].Qtype, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IPv4(1, 2, 3, 4), - }) - return resp, nil - } - - r := newResolver(t) - r.testExchangeHook = hook - - ctx := context.Background() - resp, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - - if len(resp.Answer) < 1 { - t.Fatalf("no answers in response: %v", resp) - } - rrA, ok := resp.Answer[0].(*dns.A) - if !ok { - t.Fatalf("invalid RR type: %T", resp.Answer[0]) - } - if !rrA.A.Equal(net.IPv4(1, 2, 3, 4)) { - t.Errorf("wanted A response 1.2.3.4, got: %v", rrA.A) - } - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } - - // Verify that we're cached and re-run to fetch from the cache. - if len(r.queryCache) < 1 { - t.Errorf("wanted entries in the query cache") - } - - resp2, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(resp, resp2) { - t.Errorf("expected equal responses; old=%+v new=%+v", resp, resp2) - } - - // We didn't make any more network requests since we loaded from the cache. - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } -} - -func dnsIPRR(name string, addr netip.Addr) dns.RR { - if addr.Is4() { - return &dns.A{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(addr.AsSlice()), - } - } - - return &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 300, - }, - AAAA: net.IP(addr.AsSlice()), - } -} - -func cnameRR(name, target string) dns.RR { - return &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 300, - }, - Target: target, - } -} - -func nsRR(name, target string) dns.RR { - return &dns.NS{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 300, - }, - Ns: target, - } -} - -type mockReply struct { - name string - qtype dns.Type - resp *dns.Msg -} - -type replyMock struct { - tb testing.TB - replies map[netip.Addr][]mockReply -} - -func (r *replyMock) exchangeHook(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if len(req.Question) != 1 { - r.tb.Fatalf("unsupported multiple or empty question: %v", req.Question) - } - question := req.Question[0] - - replies := r.replies[nameserver] - if len(replies) == 0 { - r.tb.Fatalf("no configured replies for nameserver: %v", nameserver) - } - - for _, reply := range replies { - if reply.name == question.Name && reply.qtype == dns.Type(question.Qtype) { - return reply.resp.Copy(), nil - } - } - - r.tb.Fatalf("no replies found for query %q of type %v to %v", question.Name, question.Qtype, nameserver) - panic("unreachable") -} - -// responses for mocking, shared between the following tests -var ( - rootServerAddr = netip.MustParseAddr("198.41.0.4") // a.root-servers.net. - comNSAddr = netip.MustParseAddr("192.5.6.30") // a.gtld-servers.net. - - // DNS response from the root nameservers for a .com nameserver - comRecord = &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "a.gtld-servers.net.")}, - Extra: []dns.RR{dnsIPRR("a.gtld-servers.net.", comNSAddr)}, - } - - // Random Amazon nameservers that we use in glue records - amazonNS = netip.MustParseAddr("205.251.192.197") - amazonNSv6 = netip.MustParseAddr("2600:9000:5306:1600::1") - - // Nameservers for the tailscale.com domain - tailscaleNameservers = &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", "ns-197.awsdns-24.com."), - nsRR("tailscale.com.", "ns-557.awsdns-05.net."), - nsRR("tailscale.com.", "ns-1558.awsdns-02.co.uk."), - nsRR("tailscale.com.", "ns-1359.awsdns-41.org."), - }, - Extra: []dns.RR{ - dnsIPRR("ns-197.awsdns-24.com.", amazonNS), - }, - } -) - -func TestBasicRecursion(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("76.223.15.28")), - }, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5")), - }, - }}, - }, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - ctx := context.Background() - addrs, minTTL, err := r.Resolve(ctx, "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("76.223.15.28"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestNoAnswers(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns no responses, authoritatively. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -func TestRecursionCNAME(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "subdomain.otherdomain.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionNoGlue(t *testing.T) { - coukNS := netip.MustParseAddr("213.248.216.1") - coukRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "dns1.nic.uk.")}, - Extra: []dns.RR{dnsIPRR("dns1.nic.uk.", coukNS)}, - } - - intermediateNS := netip.MustParseAddr("205.251.193.66") // g-ns-322.awsdns-02.co.uk. - intermediateRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("awsdns-02.co.uk.", "g-ns-322.awsdns-02.co.uk.")}, - Extra: []dns.RR{dnsIPRR("g-ns-322.awsdns-02.co.uk.", intermediateNS)}, - } - - const amazonNameserver = "ns-1558.awsdns-02.co.uk." - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", amazonNameserver), - }, - } - - tailscaleResponses := []mockReply{ - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - } - - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - rootServerAddr: { - // Query to the root server returns the .com server + a glue record - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - // Querying the .co.uk nameserver returns the .co.uk nameserver + a glue record. - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: coukRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: coukRecord}, - }, - - // Queries to the ".com" server return the nameservers - // for tailscale.com, which don't contain a glue - // record. - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Queries to the ".co.uk" nameserver returns the - // address of the intermediate Amazon nameserver. - coukNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: intermediateRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: intermediateRecord}, - }, - - // Queries to the intermediate nameserver returns an - // answer for the final Amazon nameserver. - intermediateNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNS)}, - }}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNSv6)}, - }}, - }, - - // Queries to the actual nameserver work and return - // responses to the query. - amazonNS: tailscaleResponses, - amazonNSv6: tailscaleResponses, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionLimit(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{}, - } - - // Fill out a CNAME chain equal to our recursion limit; we won't get - // this far since each CNAME is more than 1 level "deep", but this - // ensures that we have more than the limit. - for i := range maxDepth + 1 { - curr := fmt.Sprintf("%d-tailscale.com.", i) - - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{nsRR(curr, "ns-197.awsdns-24.com.")}, - Extra: []dns.RR{dnsIPRR("ns-197.awsdns-24.com.", amazonNS)}, - } - - // Query to the root server returns the .com server + a glue record - mock.replies[rootServerAddr] = append(mock.replies[rootServerAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: comRecord}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - ) - - // Query to the ".com" server return the nameservers for NN-tailscale.com - mock.replies[comNSAddr] = append(mock.replies[comNSAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - ) - - // Queries to the nameserver return a CNAME for the n+1th server. - next := fmt.Sprintf("%d-tailscale.com.", i+1) - mock.replies[amazonNS] = append(mock.replies[amazonNS], - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeAAAA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - ) - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for the first node in the chain, 0-tailscale.com, and verify - // we get a max-depth error. - ctx := context.Background() - _, _, err := r.Resolve(ctx, "0-tailscale.com") - if err == nil { - t.Fatal("expected error, got nil") - } else if !errors.Is(err, ErrMaxDepth) { - t.Fatalf("got err=%v, want ErrMaxDepth", err) - } -} - -func TestInvalidResponses(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns an invalid IP address - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - // Note: this is an IPv6 addr in an IPv4 response - A: net.IP(netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5").AsSlice()), - }}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - // This an IPv4 response to an IPv6 query - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(netip.MustParseAddr("13.248.141.131").AsSlice()), - }}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get no responses since the - // addresses are invalid. - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -// TODO(andrew): test for more edge cases that aren't currently covered: -// * Nameservers that cross between IPv4 and IPv6 -// * Authoritative no replies after following CNAME -// * Authoritative no replies after following non-glue NS record -// * Error querying non-glue NS record followed by success diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 8e53c3b293cb4..9843d46f91ab0 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -22,35 +22,20 @@ import ( "net/url" "os" "reflect" - "slices" "sync/atomic" "time" "tailscale.com/atomicfile" - "tailscale.com/envknob" "tailscale.com/health" - "tailscale.com/net/dns/recursive" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/tlsdial" "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/logger" - "tailscale.com/util/clientmetric" - "tailscale.com/util/singleflight" "tailscale.com/util/slicesx" ) -var ( - optRecursiveResolver = envknob.RegisterOptBool("TS_DNSFALLBACK_RECURSIVE_RESOLVER") - disableRecursiveResolver = envknob.RegisterBool("TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER") // legacy pre-1.52 env knob name -) - -type resolveResult struct { - addrs []netip.Addr - minTTL time.Duration -} - // MakeLookupFunc creates a function that can be used to resolve hostnames // (e.g. as a LookupIPFallback from dnscache.Resolver). // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. @@ -68,145 +53,13 @@ type fallbackResolver struct { logf logger.Logf netMon *netmon.Monitor // or nil healthTracker *health.Tracker // or nil - sf singleflight.Group[string, resolveResult] // for tests waitForCompare bool } func (fr *fallbackResolver) Lookup(ctx context.Context, host string) ([]netip.Addr, error) { - // If they've explicitly disabled the recursive resolver with the legacy - // TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER envknob or not set the - // newer TS_DNSFALLBACK_RECURSIVE_RESOLVER to true, then don't use the - // recursive resolver. (tailscale/corp#15261) In the future, we might - // change the default (the opt.Bool being unset) to mean enabled. - if disableRecursiveResolver() || !optRecursiveResolver().EqualBool(true) { - return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - } - - addrsCh := make(chan []netip.Addr, 1) - - // Run the recursive resolver in the background so we can - // compare the results. For tests, we also allow waiting for the - // comparison to complete; normally, we do this entirely asynchronously - // so as not to block the caller. - var done chan struct{} - if fr.waitForCompare { - done = make(chan struct{}) - go func() { - defer close(done) - fr.compareWithRecursive(ctx, addrsCh, host) - }() - } else { - go fr.compareWithRecursive(ctx, addrsCh, host) - } - - addrs, err := lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - if err != nil { - addrsCh <- nil - return nil, err - } - - addrsCh <- slices.Clone(addrs) - if fr.waitForCompare { - select { - case <-done: - case <-ctx.Done(): - } - } - return addrs, nil -} - -// compareWithRecursive is responsible for comparing the DNS resolution -// performed via the "normal" path (bootstrap DNS requests to the DERP servers) -// with DNS resolution performed with our in-process recursive DNS resolver. -// -// It will select on addrsCh to read exactly one set of addrs (returned by the -// "normal" path) and compare against the results returned by the recursive -// resolver. If ctx is canceled, then it will abort. -func (fr *fallbackResolver) compareWithRecursive( - ctx context.Context, - addrsCh <-chan []netip.Addr, - host string, -) { - logf := logger.WithPrefix(fr.logf, "recursive: ") - - // Ensure that we catch panics while we're testing this - // code path; this should never panic, but we don't - // want to take down the process by having the panic - // propagate to the top of the goroutine's stack and - // then terminate. - defer func() { - if r := recover(); r != nil { - logf("bootstrap DNS: recovered panic: %v", r) - metricRecursiveErrors.Add(1) - } - }() - - // Don't resolve the same host multiple times - // concurrently; if we end up in a tight loop, this can - // take up a lot of CPU. - var didRun bool - result, err, _ := fr.sf.Do(host, func() (resolveResult, error) { - didRun = true - resolver := &recursive.Resolver{ - Dialer: netns.NewDialer(logf, fr.netMon), - Logf: logf, - } - addrs, minTTL, err := resolver.Resolve(ctx, host) - if err != nil { - logf("error using recursive resolver: %v", err) - metricRecursiveErrors.Add(1) - return resolveResult{}, err - } - return resolveResult{addrs, minTTL}, nil - }) - - // The singleflight function handled errors; return if - // there was one. Additionally, don't bother doing the - // comparison if we waited on another singleflight - // caller; the results are likely to be the same, so - // rather than spam the logs we can just exit and let - // the singleflight call that did execute do the - // comparison. - // - // Returning here is safe because the addrsCh channel - // is buffered, so the main function won't block even - // if we never read from it. - if err != nil || !didRun { - return - } - - addrs, minTTL := result.addrs, result.minTTL - compareAddr := func(a, b netip.Addr) int { return a.Compare(b) } - slices.SortFunc(addrs, compareAddr) - - // Wait for a response from the main function; try this once before we - // check whether the context is canceled since selects are - // nondeterministic. - var oldAddrs []netip.Addr - select { - case oldAddrs = <-addrsCh: - // All good; continue - default: - // Now block. - select { - case oldAddrs = <-addrsCh: - case <-ctx.Done(): - return - } - } - slices.SortFunc(oldAddrs, compareAddr) - - matches := slices.Equal(addrs, oldAddrs) - - logf("bootstrap DNS comparison: matches=%v oldAddrs=%v addrs=%v minTTL=%v", matches, oldAddrs, addrs, minTTL) - - if matches { - metricRecursiveMatches.Add(1) - } else { - metricRecursiveMismatches.Add(1) - } + return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) } func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Tracker, netMon *netmon.Monitor) ([]netip.Addr, error) { @@ -428,9 +281,3 @@ func SetCachePath(path string, logf logger.Logf) { cachedDERPMap.Store(dm) logf("[v2] dnsfallback: SetCachePath loaded cached DERP map") } - -var ( - metricRecursiveMatches = clientmetric.NewCounter("dnsfallback_recursive_matches") - metricRecursiveMismatches = clientmetric.NewCounter("dnsfallback_recursive_mismatches") - metricRecursiveErrors = clientmetric.NewCounter("dnsfallback_recursive_errors") -) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b3e2b7f0e1f72..c115332fa8b16 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -132,7 +132,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/mdlayher/netlink/nltest from github.com/google/nftables L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ @@ -272,7 +271,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ @@ -448,8 +446,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ From 5e3e536c2d1faf9109a703f557c96b4144667d53 Mon Sep 17 00:00:00 2001 From: Esteban-Bermudez Date: Thu, 8 May 2025 21:11:33 -0700 Subject: [PATCH 0364/1093] cmd/tailscale/cli: add `remove` subcommand Fixes #12255 Add a new subcommand to `switch` for removing a profile from the local client. This does not delete the profile from the Tailscale account, but removes it from the local machine. This functionality is available on the GUI's, but not yet on the CLI. Signed-off-by: Esteban-Bermudez --- cmd/tailscale/cli/switch.go | 62 +++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index 0677da1b31868..a06630f7866b5 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -34,6 +34,22 @@ This command is currently in alpha and may change in the future.`, return fs }(), Exec: switchProfile, + + // Add remove subcommand + Subcommands: []*ffcli.Command{ + { + Name: "remove", + ShortUsage: "tailscale switch remove ", + ShortHelp: "Remove a Tailscale account", + LongHelp: `"tailscale switch remove" removes a Tailscale account from the +local machine. This does not delete the account itself, but +it will no longer be available for switching to. You can +add it back by logging in again. + +This command is currently in alpha and may change in the future.`, + Exec: removeProfile, + }, + }, } func init() { @@ -186,3 +202,49 @@ func switchProfile(ctx context.Context, args []string) error { } } } + +func removeProfile(ctx context.Context, args []string) error { + if len(args) != 1 { + outln("usage: tailscale switch remove NAME") + os.Exit(1) + } + cp, all, err := localClient.ProfileStatus(ctx) + if err != nil { + errf("Failed to remove account: %v\n", err) + os.Exit(1) + } + + profID, ok := matchProfile(args[0], all) + if !ok { + errf("No profile named %q\n", args[0]) + os.Exit(1) + } + + if profID == cp.ID { + printf("Already on account %q\n", args[0]) + os.Exit(0) + } + + return localClient.DeleteProfile(ctx, profID) +} + +func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { + // Allow matching by ID, Tailnet, or Account + // in that order. + for _, p := range all { + if p.ID == ipn.ProfileID(arg) { + return p.ID, true + } + } + for _, p := range all { + if p.NetworkProfile.DomainName == arg { + return p.ID, true + } + } + for _, p := range all { + if p.Name == arg { + return p.ID, true + } + } + return "", false +} From 1c9aaa444da163bf0597cef09a100a4e7a0221b8 Mon Sep 17 00:00:00 2001 From: Esteban-Bermudez Date: Wed, 25 Jun 2025 12:15:53 -0700 Subject: [PATCH 0365/1093] cmd/tailscale/cli: use helper function for matching profiles This makes the `switch` command use the helper `matchProfile` function that was introduced in the `remove` sub command. Signed-off-by: Esteban-Bermudez --- cmd/tailscale/cli/switch.go | 43 +++++++------------------------------ 1 file changed, 8 insertions(+), 35 deletions(-) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index a06630f7866b5..b315a21e7437f 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -122,40 +122,8 @@ func switchProfile(ctx context.Context, args []string) error { errf("Failed to switch to account: %v\n", err) os.Exit(1) } - var profID ipn.ProfileID - // Allow matching by ID, Tailnet, Account, or Display Name - // in that order. - for _, p := range all { - if p.ID == ipn.ProfileID(args[0]) { - profID = p.ID - break - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DomainName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.Name == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DisplayName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { + profID, ok := matchProfile(args[0], all) + if !ok { errf("No profile named %q\n", args[0]) os.Exit(1) } @@ -229,7 +197,7 @@ func removeProfile(ctx context.Context, args []string) error { } func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { - // Allow matching by ID, Tailnet, or Account + // Allow matching by ID, Tailnet, Account, or Display Name // in that order. for _, p := range all { if p.ID == ipn.ProfileID(arg) { @@ -246,5 +214,10 @@ func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { return p.ID, true } } + for _, p := range all { + if p.NetworkProfile.DisplayName == arg { + return p.ID, true + } + } return "", false } From cd153aa644dd861602e386e71df20a61733b56a8 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Thu, 11 Sep 2025 13:11:41 +0100 Subject: [PATCH 0366/1093] control, ipn, tailcfg: enable seamless key renewal by default Previously, seamless key renewal was an opt-in feature. Customers had to set a `seamless-key-renewal` node attribute in their policy file. This patch enables seamless key renewal by default for all clients. It includes a `disable-seamless-key-renewal` node attribute we can set in Control, so we can manage the rollout and disable the feature for clients with known bugs. This new attribute makes the feature opt-out. Updates tailscale/corp#31479 Signed-off-by: Alex Chan --- control/controlknobs/controlknobs.go | 22 +++++++++++++++++++--- ipn/ipnlocal/local.go | 8 ++++---- tailcfg/tailcfg.go | 18 +++++++++++++++--- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 2578744cade65..09c16b8b12f1e 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -62,8 +62,9 @@ type Knobs struct { // netfiltering, unless overridden by the user. LinuxForceNfTables atomic.Bool - // SeamlessKeyRenewal is whether to enable the alpha functionality of - // renewing node keys without breaking connections. + // SeamlessKeyRenewal is whether to renew node keys without breaking connections. + // This is enabled by default in 1.90 and later, but we but we can remotely disable + // it from the control plane if there's a problem. // http://go/seamless-key-renewal SeamlessKeyRenewal atomic.Bool @@ -128,6 +129,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { forceIPTables = has(tailcfg.NodeAttrLinuxMustUseIPTables) forceNfTables = has(tailcfg.NodeAttrLinuxMustUseNfTables) seamlessKeyRenewal = has(tailcfg.NodeAttrSeamlessKeyRenewal) + disableSeamlessKeyRenewal = has(tailcfg.NodeAttrDisableSeamlessKeyRenewal) probeUDPLifetime = has(tailcfg.NodeAttrProbeUDPLifetime) appCStoreRoutes = has(tailcfg.NodeAttrStoreAppCRoutes) userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes) @@ -154,7 +156,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.SilentDisco.Store(silentDisco) k.LinuxForceIPTables.Store(forceIPTables) k.LinuxForceNfTables.Store(forceNfTables) - k.SeamlessKeyRenewal.Store(seamlessKeyRenewal) k.ProbeUDPLifetime.Store(probeUDPLifetime) k.AppCStoreRoutes.Store(appCStoreRoutes) k.UserDialUseRoutes.Store(userDialUseRoutes) @@ -162,6 +163,21 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) + + // If both attributes are present, then "enable" should win. This reflects + // the history of seamless key renewal. + // + // Before 1.90, seamless was a private alpha, opt-in feature. Devices would + // only seamless do if customers opted in using the seamless renewal attr. + // + // In 1.90 and later, seamless is the default behaviour, and devices will use + // seamless unless explicitly told not to by control (e.g. if we discover + // a bug and want clients to use the prior behaviour). + // + // If a customer has opted in to the pre-1.90 seamless implementation, we + // don't want to switch it off for them -- we only want to switch it off for + // devices that haven't opted in. + k.SeamlessKeyRenewal.Store(seamlessKeyRenewal || !disableSeamlessKeyRenewal) } // AsDebugJSON returns k as something that can be marshalled with json.Marshal diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0173491650e22..1e102d53eedf2 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7420,10 +7420,10 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { return ri, nil } -// seamlessRenewalEnabled reports whether seamless key renewals are enabled -// (i.e. we saw our self node with the SeamlessKeyRenewal attr in a netmap). -// This enables beta functionality of renewing node keys without breaking -// connections. +// seamlessRenewalEnabled reports whether seamless key renewals are enabled. +// +// As of 2025-09-11, this is the default behaviour unless nodes receive +// [tailcfg.NodeAttrDisableSeamlessKeyRenewal] in their netmap. func (b *LocalBackend) seamlessRenewalEnabled() bool { return b.ControlKnobs().SeamlessKeyRenewal.Load() } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6c1357a6336c3..6130df9013e1d 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -170,7 +170,8 @@ type CapabilityVersion int // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. -const CurrentCapabilityVersion CapabilityVersion = 125 +// - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) +const CurrentCapabilityVersion CapabilityVersion = 126 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2530,8 +2531,19 @@ const ( // This cannot be set simultaneously with NodeAttrLinuxMustUseIPTables. NodeAttrLinuxMustUseNfTables NodeCapability = "linux-netfilter?v=nftables" - // NodeAttrSeamlessKeyRenewal makes clients enable beta functionality - // of renewing node keys without breaking connections. + // NodeAttrDisableSeamlessKeyRenewal disables seamless key renewal, which is + // enabled by default in clients as of 2025-09-17 (1.90 and later). + // + // We will use this attribute to manage the rollout, and disable seamless in + // clients with known bugs. + // http://go/seamless-key-renewal + NodeAttrDisableSeamlessKeyRenewal NodeCapability = "disable-seamless-key-renewal" + + // NodeAttrSeamlessKeyRenewal was used to opt-in to seamless key renewal + // during its private alpha. + // + // Deprecated: NodeAttrSeamlessKeyRenewal is deprecated as of CapabilityVersion 126, + // because seamless key renewal is now enabled by default. NodeAttrSeamlessKeyRenewal NodeCapability = "seamless-key-renewal" // NodeAttrProbeUDPLifetime makes the client probe UDP path lifetime at the From 61751a0c9a80ca83dc55ffacff6d595e7c6c348d Mon Sep 17 00:00:00 2001 From: Erisa A Date: Thu, 18 Sep 2025 15:50:01 +0100 Subject: [PATCH 0367/1093] scripts/installer.sh: add Siemens Industrial OS (#17185) Fixes #17179 Signed-off-by: Erisa A --- scripts/installer.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 4d968cd2b7285..b40177005821b 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -110,6 +110,17 @@ main() { APT_KEY_TYPE="keyring" fi ;; + industrial-os) + OS="debian" + PACKAGETYPE="apt" + if [ "$(printf %.1s "$VERSION_ID")" -lt 5 ]; then + VERSION="buster" + APT_KEY_TYPE="legacy" + else + VERSION="bullseye" + APT_KEY_TYPE="keyring" + fi + ;; parrot|mendel) OS="debian" PACKAGETYPE="apt" From 73bbd7cacaf1990926a24032c04e1fa379d0cf72 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 16 Sep 2025 20:22:14 -0700 Subject: [PATCH 0368/1093] build_dist.sh: add -trimpath Saves 81KB (20320440 to 20238520 bytes for linux/amd64) Updates #1278 Change-Id: Id607480c76220c74c8854ef1a2459aee650ad7b6 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_dist.sh b/build_dist.sh index be0d4d47e0564..564e30221db1c 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -57,4 +57,4 @@ while [ "$#" -gt 1 ]; do esac done -exec $go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@" +exec $go build ${tags:+-tags=$tags} -trimpath -ldflags "$ldflags" "$@" From 70dfdac609396440308a390f58dff0a97f78f0f4 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 18 Sep 2025 09:10:33 -0700 Subject: [PATCH 0369/1093] prober: allow custom tls.Config for TLS probes (#17186) Updates https://github.com/tailscale/corp/issues/28569 Signed-off-by: Andrew Lytvynov --- prober/derp.go | 5 +++-- prober/derp_test.go | 2 +- prober/dns_example_test.go | 3 ++- prober/tls.go | 30 ++++++++++++++++-------------- prober/tls_test.go | 18 ++++++++++++++++-- 5 files changed, 38 insertions(+), 20 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index c7a82317dcabc..52e56fd4eff1e 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -8,6 +8,7 @@ import ( "cmp" "context" crand "crypto/rand" + "crypto/tls" "encoding/binary" "encoding/json" "errors" @@ -68,7 +69,7 @@ type derpProber struct { ProbeMap ProbeClass // Probe classes for probing individual derpers. - tlsProbeFn func(string) ProbeClass + tlsProbeFn func(string, *tls.Config) ProbeClass udpProbeFn func(string, int) ProbeClass meshProbeFn func(string, string) ProbeClass bwProbeFn func(string, string, int64) ProbeClass @@ -206,7 +207,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { if d.probes[n] == nil { log.Printf("adding DERP TLS probe for %s (%s) every %v", server.Name, region.RegionName, d.tlsInterval) derpPort := cmp.Or(server.DERPPort, 443) - d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort))) + d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort), nil)) } } diff --git a/prober/derp_test.go b/prober/derp_test.go index 93b8d760b3f18..1ace9983c9ca4 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -74,7 +74,7 @@ func TestDerpProber(t *testing.T) { p: p, derpMapURL: srv.URL, tlsInterval: time.Second, - tlsProbeFn: func(_ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + tlsProbeFn: func(_ string, _ *tls.Config) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, udpInterval: time.Second, udpProbeFn: func(_ string, _ int) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, meshInterval: time.Second, diff --git a/prober/dns_example_test.go b/prober/dns_example_test.go index a8326fd721232..089816919489a 100644 --- a/prober/dns_example_test.go +++ b/prober/dns_example_test.go @@ -5,6 +5,7 @@ package prober_test import ( "context" + "crypto/tls" "flag" "fmt" "log" @@ -40,7 +41,7 @@ func ExampleForEachAddr() { // This function is called every time we discover a new IP address to check. makeTLSProbe := func(addr netip.Addr) []*prober.Probe { - pf := prober.TLSWithIP(*hostname, netip.AddrPortFrom(addr, 443)) + pf := prober.TLSWithIP(netip.AddrPortFrom(addr, 443), &tls.Config{ServerName: *hostname}) if *verbose { logger := logger.WithPrefix(log.Printf, fmt.Sprintf("[tls %s]: ", addr)) pf = probeLogWrapper(logger, pf) diff --git a/prober/tls.go b/prober/tls.go index 4fb4aa9c6becf..777b2b5089d8f 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -9,9 +9,9 @@ import ( "crypto/x509" "fmt" "io" - "net" "net/http" "net/netip" + "slices" "time" "tailscale.com/util/multierr" @@ -28,33 +28,31 @@ const letsEncryptStartedStaplingCRL int64 = 1746576000 // 2025-05-07 00:00:00 UT // The ProbeFunc connects to a hostPort (host:port string), does a TLS // handshake, verifies that the hostname matches the presented certificate, // checks certificate validity time and OCSP revocation status. -func TLS(hostPort string) ProbeClass { +// +// The TLS config is optional and may be nil. +func TLS(hostPort string, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - certDomain, _, err := net.SplitHostPort(hostPort) - if err != nil { - return err - } - return probeTLS(ctx, certDomain, hostPort) + return probeTLS(ctx, config, hostPort) }, Class: "tls", } } -// TLSWithIP is like TLS, but dials the provided dialAddr instead -// of using DNS resolution. The certDomain is the expected name in -// the cert (and the SNI name to send). -func TLSWithIP(certDomain string, dialAddr netip.AddrPort) ProbeClass { +// TLSWithIP is like TLS, but dials the provided dialAddr instead of using DNS +// resolution. Use config.ServerName to send SNI and validate the name in the +// cert. +func TLSWithIP(dialAddr netip.AddrPort, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - return probeTLS(ctx, certDomain, dialAddr.String()) + return probeTLS(ctx, config, dialAddr.String()) }, Class: "tls", } } -func probeTLS(ctx context.Context, certDomain string, dialHostPort string) error { - dialer := &tls.Dialer{Config: &tls.Config{ServerName: certDomain}} +func probeTLS(ctx context.Context, config *tls.Config, dialHostPort string) error { + dialer := &tls.Dialer{Config: config} conn, err := dialer.DialContext(ctx, "tcp", dialHostPort) if err != nil { return fmt.Errorf("connecting to %q: %w", dialHostPort, err) @@ -108,6 +106,10 @@ func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr } if len(leafCert.CRLDistributionPoints) == 0 { + if !slices.Contains(leafCert.Issuer.Organization, "Let's Encrypt") { + // LE certs contain a CRL, but certs from other CAs might not. + return + } if leafCert.NotBefore.Before(time.Unix(letsEncryptStartedStaplingCRL, 0)) { // Certificate might not have a CRL. return diff --git a/prober/tls_test.go b/prober/tls_test.go index f6ca4aeb19be6..86fba91b98836 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -83,7 +83,7 @@ func TestTLSConnection(t *testing.T) { srv.StartTLS() defer srv.Close() - err = probeTLS(context.Background(), "fail.example.com", srv.Listener.Addr().String()) + err = probeTLS(context.Background(), &tls.Config{ServerName: "fail.example.com"}, srv.Listener.Addr().String()) // The specific error message here is platform-specific ("certificate is not trusted" // on macOS and "certificate signed by unknown authority" on Linux), so only check // that it contains the word 'certificate'. @@ -269,40 +269,54 @@ func TestCRL(t *testing.T) { name string cert *x509.Certificate crlBytes []byte + issuer pkix.Name wantErr string }{ { "ValidCert", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { "RevokedCert", leafCertParsed, rlBytes, + caCert.Issuer, "has been revoked on", }, { "EmptyCRL", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { - "NoCRL", + "NoCRLLetsEncrypt", leafCertParsed, nil, + pkix.Name{CommonName: "tlsprobe.test", Organization: []string{"Let's Encrypt"}}, "no CRL server presented in leaf cert for", }, + { + "NoCRLOtherCA", + leafCertParsed, + nil, + caCert.Issuer, + "", + }, { "NotBeforeCRLStaplingDate", noCRLStapledParsed, nil, + caCert.Issuer, "", }, } { t.Run(tt.name, func(t *testing.T) { + tt.cert.Issuer = tt.issuer cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{tt.cert, caCert}} if tt.crlBytes != nil { crlServer.crlBytes = tt.crlBytes From 4f211ea5c5d40f14a861d3482a6edc75342b627d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 18 Sep 2025 12:44:06 -0700 Subject: [PATCH 0370/1093] util/eventbus: add a LogAllEvents helper for testing (#17187) When developing (and debugging) tests, it is useful to be able to see all the traffic that transits the event bus during the execution of a test. Updates #15160 Change-Id: I929aee62ccf13bdd4bd07d786924ce9a74acd17a Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest.go | 22 +++++++++++++++++++ .../eventbustest/eventbustest_test.go | 7 ++++++ 2 files changed, 29 insertions(+) diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index c32e7114036dc..0916ae52280cf 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -263,3 +263,25 @@ func EqualTo[T any](want T) func(T) error { return nil } } + +// LogAllEvents logs summaries of all the events routed via the specified bus +// during the execution of the test governed by t. This is intended to support +// development and debugging of tests. +func LogAllEvents(t testing.TB, bus *eventbus.Bus) { + dw := bus.Debugger().WatchBus() + done := make(chan struct{}) + go func() { + defer close(done) + var i int + for { + select { + case <-dw.Done(): + return + case re := <-dw.Events(): + i++ + t.Logf("[eventbus] #%[1]d: %[2]T | %+[2]v", i, re.Event) + } + } + }() + t.Cleanup(func() { dw.Close(); <-done }) +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index f8b37eefec716..f1b21ea8fc9ee 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -4,6 +4,7 @@ package eventbustest_test import ( + "flag" "fmt" "strings" "testing" @@ -13,6 +14,8 @@ import ( "tailscale.com/util/eventbus/eventbustest" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + type EventFoo struct { Value int } @@ -109,7 +112,11 @@ func TestExpectFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } tw := eventbustest.NewWatcher(t, bus) + // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest tw.TimeOut = 10 * time.Millisecond From 78035fb9d2e894b4f307f4a840a7f83aba7e0f2c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 09:03:17 -0700 Subject: [PATCH 0371/1093] feature/featuretags,cmd/omitsize: support feature dependencies This produces the following omitsizes output: Starting with everything and removing a feature... tailscaled tailscale combined (linux/amd64) 27005112 18153656 39727288 - 7696384 - 7282688 -19607552 .. remove * - 167936 - 110592 - 245760 .. remove acme - 1925120 - 0 - 7340032 .. remove aws - 4096 - 0 - 8192 .. remove bird - 20480 - 12288 - 32768 .. remove capture - 0 - 57344 - 61440 .. remove completion - 249856 - 696320 - 692224 .. remove debugeventbus - 12288 - 4096 - 24576 .. remove debugportmapper - 0 - 0 - 0 .. remove desktop_sessions - 815104 - 8192 - 544768 .. remove drive - 65536 - 356352 - 425984 .. remove kube - 233472 - 286720 - 311296 .. remove portmapper (and debugportmapper) - 90112 - 0 - 110592 .. remove relayserver - 655360 - 712704 - 598016 .. remove serve (and webclient) - 937984 - 0 - 950272 .. remove ssh - 708608 - 401408 - 344064 .. remove syspolicy - 0 - 4071424 -11132928 .. remove systray - 159744 - 61440 - 225280 .. remove taildrop - 618496 - 454656 - 757760 .. remove tailnetlock - 122880 - 0 - 131072 .. remove tap - 442368 - 0 - 483328 .. remove tpm - 16384 - 0 - 20480 .. remove wakeonlan - 278528 - 368640 - 286720 .. remove webclient Starting at a minimal binary and adding one feature back... tailscaled tailscale combined (linux/amd64) 19308728 10870968 20119736 omitting everything + 352256 + 454656 + 643072 .. add acme + 2035712 + 0 + 2035712 .. add aws + 8192 + 0 + 8192 .. add bird + 20480 + 12288 + 36864 .. add capture + 0 + 57344 + 61440 .. add completion + 262144 + 274432 + 266240 .. add debugeventbus + 344064 + 118784 + 360448 .. add debugportmapper (and portmapper) + 0 + 0 + 0 .. add desktop_sessions + 978944 + 8192 + 991232 .. add drive + 61440 + 364544 + 425984 .. add kube + 331776 + 110592 + 335872 .. add portmapper + 122880 + 0 + 102400 .. add relayserver + 598016 + 155648 + 737280 .. add serve + 1142784 + 0 + 1142784 .. add ssh + 708608 + 860160 + 720896 .. add syspolicy + 0 + 4079616 + 6221824 .. add systray + 180224 + 65536 + 237568 .. add taildrop + 647168 + 393216 + 720896 .. add tailnetlock + 122880 + 0 + 126976 .. add tap + 446464 + 0 + 454656 .. add tpm + 20480 + 0 + 24576 .. add wakeonlan + 1011712 + 1011712 + 1138688 .. add webclient (and serve) Fixes #17139 Change-Id: Ia91be2da00de8481a893243d577d20e988a0920a Signed-off-by: Brad Fitzpatrick --- cmd/featuretags/featuretags.go | 17 ++- cmd/omitsize/omitsize.go | 158 ++++++++++++++++++------ feature/featuretags/featuretags.go | 105 ++++++++++++---- feature/featuretags/featuretags_test.go | 81 ++++++++++++ 4 files changed, 299 insertions(+), 62 deletions(-) create mode 100644 feature/featuretags/featuretags_test.go diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index c34adbb3f1f3e..8c8a2ceaf54ff 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -14,6 +14,7 @@ import ( "strings" "tailscale.com/feature/featuretags" + "tailscale.com/util/set" ) var ( @@ -38,7 +39,9 @@ func main() { var keep = map[featuretags.FeatureTag]bool{} for t := range strings.SplitSeq(*add, ",") { if t != "" { - keep[featuretags.FeatureTag(t)] = true + for ft := range featuretags.Requires(featuretags.FeatureTag(t)) { + keep[ft] = true + } } } var tags []string @@ -55,6 +58,7 @@ func main() { } } } + removeSet := set.Set[featuretags.FeatureTag]{} for v := range strings.SplitSeq(*remove, ",") { if v == "" { continue @@ -63,7 +67,16 @@ func main() { if _, ok := features[f]; !ok { log.Fatalf("unknown feature %q in --remove", f) } - tags = append(tags, f.OmitTag()) + removeSet.Add(f) + } + for ft := range removeSet { + set := featuretags.RequiredBy(ft) + for dependent := range set { + if !removeSet.Contains(dependent) { + log.Fatalf("cannot remove %q without also removing %q, which depends on it", ft, dependent) + } + } + tags = append(tags, ft.OmitTag()) } slices.Sort(tags) tags = slices.Compact(tags) diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index 5940ba5207f29..35e03d268e186 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -10,56 +10,69 @@ import ( "flag" "fmt" "log" + "maps" "os" "os/exec" "path/filepath" "slices" "strconv" "strings" + "sync" "tailscale.com/feature/featuretags" + "tailscale.com/util/set" ) var ( cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") - features = flag.String("features", "", "comma-separated list of features to list in the table, with or without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") + features = flag.String("features", "", "comma-separated list of features to list in the table, without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set.") ) -func main() { - flag.Parse() - - var all []string - var allOmittable []string - +// allOmittable returns the list of all build tags that remove features. +var allOmittable = sync.OnceValue(func() []string { + var ret []string // all build tags that can be omitted for k := range featuretags.Features { if k.IsOmittable() { - allOmittable = append(allOmittable, k.OmitTag()) + ret = append(ret, k.OmitTag()) } } + slices.Sort(ret) + return ret +}) + +func main() { + flag.Parse() + + // rows is a set (usually of size 1) of feature(s) to add/remove, without deps + // included at this point (as dep direction depends on whether we're adding or removing, + // so it's expanded later) + var rows []set.Set[featuretags.FeatureTag] if *features == "" { - all = slices.Clone(allOmittable) + for _, k := range slices.Sorted(maps.Keys(featuretags.Features)) { + if k.IsOmittable() { + rows = append(rows, set.Of(k)) + } + } } else { for v := range strings.SplitSeq(*features, ",") { - var withOmit []string - for v := range strings.SplitSeq(v, "+") { - if !strings.HasPrefix(v, "ts_omit_") { - v = "ts_omit_" + v + s := set.Set[featuretags.FeatureTag]{} + for fts := range strings.SplitSeq(v, "+") { + ft := featuretags.FeatureTag(fts) + if _, ok := featuretags.Features[ft]; !ok { + log.Fatalf("unknown feature %q", v) } - withOmit = append(withOmit, v) + s.Add(ft) } - all = append(all, strings.Join(withOmit, "+")) + rows = append(rows, s) } } - slices.Sort(all) - all = slices.Compact(all) - - minD := measure("tailscaled", allOmittable...) - minC := measure("tailscale", allOmittable...) - minBoth := measure("tailscaled", append(slices.Clone(allOmittable), "ts_include_cli")...) + minD := measure("tailscaled", allOmittable()...) + minC := measure("tailscale", allOmittable()...) + minBoth := measure("tailscaled", append(slices.Clone(allOmittable()), "ts_include_cli")...) if *showRemovals { baseD := measure("tailscaled") @@ -71,33 +84,108 @@ func main() { fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) - fmt.Printf("-%8d -%8d -%8d omit-all\n", baseD-minD, baseC-minC, baseBoth-minBoth) + fmt.Printf("-%8d -%8d -%8d .. remove *\n", baseD-minD, baseC-minC, baseBoth-minBoth) - for _, t := range all { - if strings.Contains(t, "+") { - log.Fatalf("TODO: make --show-removals support ANDed features like %q", t) - } - sizeD := measure("tailscaled", t) - sizeC := measure("tailscale", t) - sizeBoth := measure("tailscaled", append([]string{t}, "ts_include_cli")...) + for _, s := range rows { + title, tags := computeRemove(s) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(slices.Clone(tags), "ts_include_cli")...) saveD := max(baseD-sizeD, 0) saveC := max(baseC-sizeC, 0) saveBoth := max(baseBoth-sizeBoth, 0) - fmt.Printf("-%8d -%8d -%8d %s\n", saveD, saveC, saveBoth, t) + fmt.Printf("-%8d -%8d -%8d .. remove %s\n", saveD, saveC, saveBoth, title) + } } - fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n") + fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n\n") fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) - for _, t := range all { - tags := allExcept(allOmittable, strings.Split(t, "+")) + for _, s := range rows { + title, tags := computeAdd(s) sizeD := measure("tailscaled", tags...) sizeC := measure("tailscale", tags...) sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) - fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), strings.ReplaceAll(t, "ts_omit_", "")) + + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), title) + } + +} + +// computeAdd returns a human-readable title of a set of features and the build +// tags to use to add that set of features to a minimal binary, including their +// feature dependencies. +func computeAdd(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their outbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.Requires(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) + } + tags = allExcept(allOmittable(), removeTags) + return titleBuf.String(), tags +} + +// computeRemove returns a human-readable title of a set of features and the build +// tags to use to remove that set of features from a full binary, including removing +// any features that depend on features in the provided set. +func computeRemove(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their inbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.RequiredBy(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) } + return titleBuf.String(), removeTags } func allExcept(all, omit []string) []string { @@ -120,7 +208,7 @@ func measure(bin string, tags ...string) int64 { } } - cmd := exec.Command("go", "build", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) + cmd := exec.Command("go", "build", "-trimpath", "-ldflags=-w -s", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) log.Printf("# Measuring %v", cmd.Args) cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64") out, err := cmd.CombinedOutput() diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index fc26dd3704605..6afb40893cb6d 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -4,6 +4,8 @@ // The featuretags package is a registry of all the ts_omit-able build tags. package featuretags +import "tailscale.com/util/set" + // CLI is a special feature in the [Features] map that works opposite // from the others: it is opt-in, rather than opt-out, having a different // build tag format. @@ -32,37 +34,90 @@ func (ft FeatureTag) OmitTag() string { return "ts_omit_" + string(ft) } +// Requires returns the set of features that must be included to +// use the given feature, including the provided feature itself. +func Requires(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + var add func(FeatureTag) + add = func(ft FeatureTag) { + if !ft.IsOmittable() { + return + } + s.Add(ft) + for _, dep := range Features[ft].Deps { + add(dep) + } + } + add(ft) + return s +} + +// RequiredBy is the inverse of Requires: it returns the set of features that +// depend on the given feature (directly or indirectly), including the feature +// itself. +func RequiredBy(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + for f := range Features { + if featureDependsOn(f, ft) { + s.Add(f) + } + } + return s +} + +// featureDependsOn reports whether feature a (directly or indirectly) depends on b. +// It returns true if a == b. +func featureDependsOn(a, b FeatureTag) bool { + if a == b { + return true + } + for _, dep := range Features[a].Deps { + if featureDependsOn(dep, b) { + return true + } + } + return false +} + // FeatureMeta describes a modular feature that can be conditionally linked into // the binary. type FeatureMeta struct { - Sym string // exported Go symbol for boolean const - Desc string // human-readable description + Sym string // exported Go symbol for boolean const + Desc string // human-readable description + Deps []FeatureTag // other features this feature requires } // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ - "acme": {"ACME", "ACME TLS certificate management"}, - "aws": {"AWS", "AWS integration"}, - "bird": {"Bird", "Bird BGP integration"}, - "capture": {"Capture", "Packet capture"}, - "cli": {"CLI", "embed the CLI into the tailscaled binary"}, - "completion": {"Completion", "CLI shell completion"}, - "debugeventbus": {"DebugEventBus", "eventbus debug support"}, - "debugportmapper": {"DebugPortMapper", "portmapper debug support"}, - "desktop_sessions": {"DesktopSessions", "Desktop sessions support"}, - "drive": {"Drive", "Tailscale Drive (file server) support"}, - "kube": {"Kube", "Kubernetes integration"}, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support"}, - "relayserver": {"RelayServer", "Relay server"}, - "serve": {"Serve", "Serve and Funnel support"}, - "ssh": {"SSH", "Tailscale SSH support"}, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support"}, - "systray": {"SysTray", "Linux system tray"}, - "taildrop": {"Taildrop", "Taildrop (file sending) support"}, - "tailnetlock": {"TailnetLock", "Tailnet Lock support"}, - "tap": {"Tap", "Experimental Layer 2 (ethernet) support"}, - "tpm": {"TPM", "TPM support"}, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support"}, - "webclient": {"WebClient", "Web client support"}, + "acme": {"ACME", "ACME TLS certificate management", nil}, + "aws": {"AWS", "AWS integration", nil}, + "bird": {"Bird", "Bird BGP integration", nil}, + "capture": {"Capture", "Packet capture", nil}, + "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, + "completion": {"Completion", "CLI shell completion", nil}, + "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, + "debugportmapper": { + Sym: "DebugPortMapper", + Desc: "portmapper debug support", + Deps: []FeatureTag{"portmapper"}, + }, + "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, + "drive": {"Drive", "Tailscale Drive (file server) support", nil}, + "kube": {"Kube", "Kubernetes integration", nil}, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "relayserver": {"RelayServer", "Relay server", nil}, + "serve": {"Serve", "Serve and Funnel support", nil}, + "ssh": {"SSH", "Tailscale SSH support", nil}, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "systray": {"SysTray", "Linux system tray", nil}, + "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, + "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, + "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, + "tpm": {"TPM", "TPM support", nil}, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "webclient": { + Sym: "WebClient", Desc: "Web client support", + Deps: []FeatureTag{"serve"}, + }, } diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go new file mode 100644 index 0000000000000..4a268c90da311 --- /dev/null +++ b/feature/featuretags/featuretags_test.go @@ -0,0 +1,81 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package featuretags + +import ( + "maps" + "slices" + "testing" + + "tailscale.com/util/set" +) + +func TestRequires(t *testing.T) { + for tag, meta := range Features { + for _, dep := range meta.Deps { + if _, ok := Features[dep]; !ok { + t.Errorf("feature %q has unknown dependency %q", tag, dep) + } + } + + // And indirectly check for cycles. If there were a cycle, + // this would infinitely loop. + deps := Requires(tag) + t.Logf("deps of %q: %v", tag, slices.Sorted(maps.Keys(deps))) + } +} + +func TestDepSet(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "serve", + want: setOf("serve"), + }, + { + in: "webclient", + want: setOf("webclient", "serve"), + }, + } + for _, tt := range tests { + got := Requires(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("DepSet(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} + +func TestRequiredBy(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "webclient", + want: setOf("webclient"), + }, + { + in: "serve", + want: setOf("webclient", "serve"), + }, + } + for _, tt := range tests { + got := RequiredBy(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("FeaturesWhichDependOn(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} From fc9a74a4055b3b86a1ddcc8a5b816fcab35a1856 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 07:56:45 -0700 Subject: [PATCH 0372/1093] util/eventbus: fix flakes in eventbustest tests (#17198) When tests run in parallel, events from multiple tests on the same bus can intercede with each other. This is working as intended, but for the test cases we want to control exactly what goes through the bus. To fix that, allocate a fresh bus for each subtest. Fixes #17197 Change-Id: I53f285ebed8da82e72a2ed136a61884667ef9a5e Signed-off-by: M. J. Fromberger --- util/eventbus/eventbustest/eventbustest_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index f1b21ea8fc9ee..7a6b511c7bae0 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -108,10 +108,11 @@ func TestExpectFilter(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + bus := eventbustest.NewBus(t) + t.Cleanup(bus.Close) + if *doDebug { eventbustest.LogAllEvents(t, bus) } @@ -241,10 +242,11 @@ func TestExpectEvents(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + bus := eventbustest.NewBus(t) + t.Cleanup(bus.Close) + tw := eventbustest.NewWatcher(t, bus) // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest @@ -374,10 +376,11 @@ func TestExpectExactlyEventsFilter(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + bus := eventbustest.NewBus(t) + t.Cleanup(bus.Close) + tw := eventbustest.NewWatcher(t, bus) // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest From 394718a4ca78e376e9f20782d3bb2efa07e5280f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 9 Sep 2025 13:31:01 +0100 Subject: [PATCH 0373/1093] tstest/integration: support multiple C2N handlers in testcontrol Instead of a single hard-coded C2N handler, add support for calling arbitrary C2N endpoints via a node roundtripper. Updates tailscale/corp#32095 Signed-off-by: Anton Tolchanov --- tstest/integration/integration_test.go | 56 +++++------- tstest/integration/testcontrol/testcontrol.go | 86 ++++++++++++++++++- 2 files changed, 105 insertions(+), 37 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index b282adcf86249..5fef04488daf7 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -596,22 +596,6 @@ func TestC2NPingRequest(t *testing.T) { env := NewTestEnv(t) - gotPing := make(chan bool, 1) - env.Control.HandleC2N = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Errorf("unexpected ping method %q", r.Method) - } - got, err := io.ReadAll(r.Body) - if err != nil { - t.Errorf("ping body read error: %v", err) - } - const want = "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Type: text/plain; charset=utf-8\r\n\r\nabc" - if string(got) != want { - t.Errorf("body error\n got: %q\nwant: %q", got, want) - } - gotPing <- true - }) - n1 := NewTestNode(t, env) n1.StartDaemon() @@ -635,27 +619,33 @@ func TestC2NPingRequest(t *testing.T) { } cancel() - pr := &tailcfg.PingRequest{ - URL: fmt.Sprintf("https://unused/some-c2n-path/ping-%d", try), - Log: true, - Types: "c2n", - Payload: []byte("POST /echo HTTP/1.0\r\nContent-Length: 3\r\n\r\nabc"), + ctx, cancel = context.WithTimeout(t.Context(), 2*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "POST", "/echo", bytes.NewReader([]byte("abc"))) + if err != nil { + t.Errorf("failed to create request: %v", err) + continue } - if !env.Control.AddPingRequest(nodeKey, pr) { - t.Logf("failed to AddPingRequest") + r, err := env.Control.NodeRoundTripper(nodeKey).RoundTrip(req) + if err != nil { + t.Errorf("RoundTrip failed: %v", err) continue } - - // Wait for PingRequest to come back - pingTimeout := time.NewTimer(2 * time.Second) - defer pingTimeout.Stop() - select { - case <-gotPing: - t.Logf("got ping; success") - return - case <-pingTimeout.C: - // Try again. + if r.StatusCode != 200 { + t.Errorf("unexpected status code: %d", r.StatusCode) + continue + } + b, err := io.ReadAll(r.Body) + if err != nil { + t.Errorf("error reading body: %v", err) + continue + } + if string(b) != "abc" { + t.Errorf("body = %q; want %q", b, "abc") + continue } + return } t.Error("all ping attempts failed") } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 66d868aca6294..7a371ef76df2a 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -5,6 +5,7 @@ package testcontrol import ( + "bufio" "bytes" "cmp" "context" @@ -30,10 +31,12 @@ import ( "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" + "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/rands" @@ -53,7 +56,7 @@ type Server struct { Verbose bool DNSConfig *tailcfg.DNSConfig // nil means no DNS config MagicDNSDomain string - HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func // PeerRelayGrants, if true, inserts relay capabilities into the wildcard // grants rules. @@ -183,6 +186,52 @@ func (s *Server) AddPingRequest(nodeKeyDst key.NodePublic, pr *tailcfg.PingReque return s.addDebugMessage(nodeKeyDst, pr) } +// c2nRoundTripper is an http.RoundTripper that sends requests to a node via C2N. +type c2nRoundTripper struct { + s *Server + n key.NodePublic +} + +func (s *Server) NodeRoundTripper(n key.NodePublic) http.RoundTripper { + return c2nRoundTripper{s, n} +} + +func (rt c2nRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + resc := make(chan *http.Response, 1) + if err := rt.s.SendC2N(rt.n, req, func(r *http.Response) { resc <- r }); err != nil { + return nil, err + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case r := <-resc: + return r, nil + } +} + +// SendC2N sends req to node. When the response is received, onRes is called. +func (s *Server) SendC2N(node key.NodePublic, req *http.Request, onRes func(*http.Response)) error { + var buf bytes.Buffer + if err := req.Write(&buf); err != nil { + return err + } + + token := rands.HexString(10) + pr := &tailcfg.PingRequest{ + URL: "https://unused/c2n/" + token, + Log: true, + Types: "c2n", + Payload: buf.Bytes(), + } + s.C2NResponses.Store(token, onRes) + if !s.AddPingRequest(node, pr) { + s.C2NResponses.Delete(token) + return fmt.Errorf("node %v not connected", node) + } + return nil +} + // AddRawMapResponse delivers the raw MapResponse mr to nodeKeyDst. It's meant // for testing incremental map updates. // @@ -269,9 +318,7 @@ func (s *Server) initMux() { s.mux.HandleFunc("/key", s.serveKey) s.mux.HandleFunc("/machine/", s.serveMachine) s.mux.HandleFunc("/ts2021", s.serveNoiseUpgrade) - if s.HandleC2N != nil { - s.mux.Handle("/some-c2n-path/", s.HandleC2N) - } + s.mux.HandleFunc("/c2n/", s.serveC2N) } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -285,6 +332,37 @@ func (s *Server) serveUnhandled(w http.ResponseWriter, r *http.Request) { go panic(fmt.Sprintf("testcontrol.Server received unhandled request: %s", got.Bytes())) } +// serveC2N handles a POST from a node containing a c2n response. +func (s *Server) serveC2N(w http.ResponseWriter, r *http.Request) { + if err := func() error { + if r.Method != httpm.POST { + return fmt.Errorf("POST required") + } + token, ok := strings.CutPrefix(r.URL.Path, "/c2n/") + if !ok { + return fmt.Errorf("invalid path %q", r.URL.Path) + } + + onRes, ok := s.C2NResponses.Load(token) + if !ok { + return fmt.Errorf("unknown c2n token %q", token) + } + s.C2NResponses.Delete(token) + + res, err := http.ReadResponse(bufio.NewReader(r.Body), nil) + if err != nil { + return fmt.Errorf("error reading c2n response: %w", err) + } + onRes(res) + return nil + }(); err != nil { + s.logf("testcontrol: %s", err) + http.Error(w, err.Error(), 500) + return + } + w.WriteHeader(http.StatusNoContent) +} + type peerMachinePublicContextKey struct{} func (s *Server) serveNoiseUpgrade(w http.ResponseWriter, r *http.Request) { From 4a04161828edf8b950b923530e286d74fe6f329f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Wed, 13 Aug 2025 15:00:35 +0100 Subject: [PATCH 0374/1093] ipn/ipnlocal: add a C2N endpoint for fetching a netmap For debugging purposes, add a new C2N endpoint returning the current netmap. Optionally, coordination server can send a new "candidate" map response, which the client will generate a separate netmap for. Coordination server can later compare two netmaps, detecting unexpected changes to the client state. Updates tailscale/corp#32095 Signed-off-by: Anton Tolchanov --- control/controlclient/direct.go | 21 ++ control/controlclient/map_test.go | 26 +++ ipn/ipnlocal/c2n.go | 65 +++++++ ipn/ipnlocal/c2n_test.go | 183 ++++++++++++++++++ ipn/ipnlocal/local.go | 28 ++- tailcfg/c2ntypes.go | 31 ++- tailcfg/tailcfg.go | 3 +- tstest/integration/integration_test.go | 145 ++++++++++++++ tstest/integration/testcontrol/testcontrol.go | 13 +- 9 files changed, 506 insertions(+), 9 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index b9e26cc9823cc..ea8661bff911f 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1160,6 +1160,27 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap return nil } +// NetmapFromMapResponseForDebug returns a NetworkMap from the given MapResponse. +// It is intended for debugging only. +func NetmapFromMapResponseForDebug(ctx context.Context, pr persist.PersistView, resp *tailcfg.MapResponse) (*netmap.NetworkMap, error) { + if resp == nil { + return nil, errors.New("nil MapResponse") + } + if resp.Node == nil { + return nil, errors.New("MapResponse lacks Node") + } + + nu := &rememberLastNetmapUpdater{} + sess := newMapSession(pr.PrivateNodeKey(), nu, nil) + defer sess.Close() + + if err := sess.HandleNonKeepAliveMapResponse(ctx, resp); err != nil { + return nil, fmt.Errorf("HandleNonKeepAliveMapResponse: %w", err) + } + + return sess.netmap(), nil +} + func (c *Direct) handleDebugMessage(ctx context.Context, debug *tailcfg.Debug) error { if code := debug.Exit; code != nil { c.logf("exiting process with status %v per controlplane", *code) diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 59b8988fcd46e..4e8c911e3d10e 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -20,6 +20,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -27,6 +28,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/types/persist" "tailscale.com/types/ptr" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" @@ -1419,3 +1421,27 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { t.Errorf("unexpected message contents (-want +got):\n%s", diff) } } + +func TestNetmapForMapResponseForDebug(t *testing.T) { + mr := &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + ID: 1, + Name: "foo.bar.ts.net.", + }, + Peers: []*tailcfg.Node{ + {ID: 2, Name: "peer1.bar.ts.net.", HomeDERP: 1}, + {ID: 3, Name: "peer2.bar.ts.net.", HomeDERP: 1}, + }, + } + ms := newTestMapSession(t, nil) + nm1 := ms.netmapForResponse(mr) + + prefs := &ipn.Prefs{Persist: &persist.Persist{PrivateNodeKey: ms.privateNodeKey}} + nm2, err := NetmapFromMapResponseForDebug(t.Context(), prefs.View().Persist(), mr) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(nm1, nm2) { + t.Errorf("mismatch\nnm1: %s\nnm2: %s\n", logger.AsJSON(nm1), logger.AsJSON(nm2)) + } +} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 0487774dba7e6..2b48b19fa90bf 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -13,19 +13,23 @@ import ( "os/exec" "path" "path/filepath" + "reflect" "runtime" "strconv" "strings" "time" "tailscale.com/clientupdate" + "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/posture" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" + "tailscale.com/util/httpm" "tailscale.com/util/set" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" @@ -44,6 +48,7 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ req("/debug/metrics"): handleC2NDebugMetrics, req("/debug/component-logging"): handleC2NDebugComponentLogging, req("/debug/logheap"): handleC2NDebugLogHeap, + req("/debug/netmap"): handleC2NDebugNetMap, // PPROF - We only expose a subset of typical pprof endpoints for security. req("/debug/pprof/heap"): handleC2NPprof, @@ -142,6 +147,66 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } } +func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + if r.Method != httpm.POST && r.Method != httpm.GET { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + b.logf("c2n: %s /debug/netmap received", r.Method) + + // redactAndMarshal redacts private keys from the given netmap, clears fields + // that should be omitted, and marshals it to JSON. + redactAndMarshal := func(nm *netmap.NetworkMap, omitFields []string) (json.RawMessage, error) { + for _, f := range omitFields { + field := reflect.ValueOf(nm).Elem().FieldByName(f) + if !field.IsValid() { + b.logf("c2n: /debug/netmap: unknown field %q in omitFields", f) + continue + } + field.SetZero() + } + nm, _ = redactNetmapPrivateKeys(nm) + return json.Marshal(nm) + } + + var omitFields []string + resp := &tailcfg.C2NDebugNetmapResponse{} + + if r.Method == httpm.POST { + var req tailcfg.C2NDebugNetmapRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, fmt.Sprintf("failed to decode request body: %v", err), http.StatusBadRequest) + return + } + omitFields = req.OmitFields + + if req.Candidate != nil { + cand, err := controlclient.NetmapFromMapResponseForDebug(ctx, b.unsanitizedPersist(), req.Candidate) + if err != nil { + http.Error(w, fmt.Sprintf("failed to convert candidate MapResponse: %v", err), http.StatusBadRequest) + return + } + candJSON, err := redactAndMarshal(cand, omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal candidate netmap: %v", err), http.StatusInternalServerError) + return + } + resp.Candidate = candJSON + } + } + + var err error + resp.Current, err = redactAndMarshal(b.currentNode().netMapWithPeers(), omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal current netmap: %v", err), http.StatusInternalServerError) + return + } + + writeJSON(w, resp) +} + func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Write(goroutines.ScrubbedGoroutineDump(true)) diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index cc31e284af8a1..04ed8bf5d8685 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -4,9 +4,11 @@ package ipnlocal import ( + "bytes" "cmp" "crypto/x509" "encoding/json" + "fmt" "net/http/httptest" "net/url" "os" @@ -18,8 +20,15 @@ import ( "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/types/opt" + "tailscale.com/types/views" "tailscale.com/util/must" + + gcmp "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) func TestHandleC2NTLSCertStatus(t *testing.T) { @@ -132,3 +141,177 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } } + +// reflectNonzero returns a non-zero value for a given reflect.Value. +func reflectNonzero(t reflect.Type) reflect.Value { + switch t.Kind() { + case reflect.Bool: + return reflect.ValueOf(true) + case reflect.String: + if reflect.TypeFor[opt.Bool]() == t { + return reflect.ValueOf("true").Convert(t) + } + return reflect.ValueOf("foo").Convert(t) + case reflect.Int64: + return reflect.ValueOf(int64(1)).Convert(t) + case reflect.Slice: + return reflect.MakeSlice(t, 1, 1) + case reflect.Ptr: + return reflect.New(t.Elem()) + case reflect.Map: + return reflect.MakeMap(t) + case reflect.Struct: + switch t { + case reflect.TypeFor[key.NodePrivate](): + return reflect.ValueOf(key.NewNode()) + } + } + panic(fmt.Sprintf("unhandled %v", t)) +} + +// setFieldsToRedact sets fields in the given netmap to non-zero values +// according to the fieldMap, which maps field names to whether they +// should be reset (true) or not (false). +func setFieldsToRedact(t *testing.T, nm *netmap.NetworkMap, fieldMap map[string]bool) { + t.Helper() + v := reflect.ValueOf(nm).Elem() + for i := range v.NumField() { + name := v.Type().Field(i).Name + f := v.Field(i) + if !f.CanSet() { + continue + } + shouldReset, ok := fieldMap[name] + if !ok { + t.Errorf("fieldMap missing field %q", name) + } + if shouldReset { + f.Set(reflectNonzero(f.Type())) + } + } +} + +func TestRedactNetmapPrivateKeys(t *testing.T) { + fieldMap := map[string]bool{ + // Private fields (should be redacted): + "PrivateKey": true, + + // Public fields (should not be redacted): + "AllCaps": false, + "CollectServices": false, + "DERPMap": false, + "DNS": false, + "DisplayMessages": false, + "Domain": false, + "DomainAuditLogID": false, + "Expiry": false, + "MachineKey": false, + "Name": false, + "NodeKey": false, + "PacketFilter": false, + "PacketFilterRules": false, + "Peers": false, + "SSHPolicy": false, + "SelfNode": false, + "TKAEnabled": false, + "TKAHead": false, + "UserProfiles": false, + } + + nm := &netmap.NetworkMap{} + setFieldsToRedact(t, nm, fieldMap) + + got, _ := redactNetmapPrivateKeys(nm) + if !reflect.DeepEqual(got, &netmap.NetworkMap{}) { + t.Errorf("redacted netmap is not empty: %+v", got) + } +} + +func TestHandleC2NDebugNetmap(t *testing.T) { + nm := &netmap.NetworkMap{ + Name: "myhost", + SelfNode: (&tailcfg.Node{ + ID: 100, + Name: "myhost", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "myhost"}).View(), + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 101, + Name: "peer1", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "peer1"}).View(), + }).View(), + }, + PrivateKey: key.NewNode(), + } + withoutPrivateKey := *nm + withoutPrivateKey.PrivateKey = key.NodePrivate{} + + for _, tt := range []struct { + name string + req *tailcfg.C2NDebugNetmapRequest + want *netmap.NetworkMap + }{ + { + name: "simple_get", + want: &withoutPrivateKey, + }, + { + name: "post_no_omit", + req: &tailcfg.C2NDebugNetmapRequest{}, + want: &withoutPrivateKey, + }, + { + name: "post_omit_peers_and_name", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"Peers", "Name"}}, + want: &netmap.NetworkMap{ + SelfNode: nm.SelfNode, + }, + }, + { + name: "post_omit_nonexistent_field", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"ThisFieldDoesNotExist"}}, + want: &withoutPrivateKey, + }, + } { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + b.currentNode().SetNetMap(nm) + + rec := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/debug/netmap", nil) + if tt.req != nil { + b, err := json.Marshal(tt.req) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + req = httptest.NewRequest("POST", "/debug/netmap", bytes.NewReader(b)) + } + handleC2NDebugNetMap(b, rec, req) + res := rec.Result() + wantStatus := 200 + if res.StatusCode != wantStatus { + t.Fatalf("status code = %v; want %v. Body: %s", res.Status, wantStatus, rec.Body.Bytes()) + } + var resp tailcfg.C2NDebugNetmapResponse + if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil { + t.Fatalf("bad JSON: %v", err) + } + got := &netmap.NetworkMap{} + if err := json.Unmarshal(resp.Current, got); err != nil { + t.Fatalf("bad JSON: %v", err) + } + + if diff := gcmp.Diff(tt.want, got, + gcmp.AllowUnexported(netmap.NetworkMap{}, key.NodePublic{}, views.Slice[tailcfg.FilterRule]{}), + cmpopts.EquateComparable(key.MachinePublic{}), + ); diff != "" { + t.Errorf("netmap mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 1e102d53eedf2..7ac8f0ecbf8ba 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1223,6 +1223,13 @@ func (b *LocalBackend) sanitizedPrefsLocked() ipn.PrefsView { return stripKeysFromPrefs(b.pm.CurrentPrefs()) } +// unsanitizedPersist returns the current PersistView, including any private keys. +func (b *LocalBackend) unsanitizedPersist() persist.PersistView { + b.mu.Lock() + defer b.mu.Unlock() + return b.pm.CurrentPrefs().Persist() +} + // Status returns the latest status of the backend and its // sub-components. func (b *LocalBackend) Status() *ipnstate.Status { @@ -3257,21 +3264,34 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A // listener. func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { return func(n *ipn.Notify) bool { - if n.NetMap == nil || n.NetMap.PrivateKey.IsZero() { + redacted, changed := redactNetmapPrivateKeys(n.NetMap) + if !changed { return fn(n) } // The netmap in n is shared across all watchers, so to mutate it for a // single watcher we have to clone the notify and the netmap. We can // make shallow clones, at least. - nm2 := *n.NetMap n2 := *n - n2.NetMap = &nm2 - n2.NetMap.PrivateKey = key.NodePrivate{} + n2.NetMap = redacted return fn(&n2) } } +// redactNetmapPrivateKeys returns a copy of nm with private keys zeroed out. +// If no change was needed, it returns nm unmodified. +func redactNetmapPrivateKeys(nm *netmap.NetworkMap) (redacted *netmap.NetworkMap, changed bool) { + if nm == nil || nm.PrivateKey.IsZero() { + return nm, false + } + + // The netmap might be shared across watchers, so make at least a shallow + // clone before mutating it. + nm2 := *nm + nm2.PrivateKey = key.NodePrivate{} + return &nm2, true +} + // appendHealthActions returns an IPN listener func that wraps the supplied IPN // listener func and transforms health messages passed to the wrapped listener. // If health messages with PrimaryActions are present, it appends the label & diff --git a/tailcfg/c2ntypes.go b/tailcfg/c2ntypes.go index 66f95785c4a83..d78baef1c29a4 100644 --- a/tailcfg/c2ntypes.go +++ b/tailcfg/c2ntypes.go @@ -5,7 +5,10 @@ package tailcfg -import "net/netip" +import ( + "encoding/json" + "net/netip" +) // C2NSSHUsernamesRequest is the request for the /ssh/usernames. // A GET request without a request body is equivalent to the zero value of this type. @@ -117,3 +120,29 @@ type C2NVIPServicesResponse struct { // changes. This value matches what is reported in latest [Hostinfo.ServicesHash]. ServicesHash string } + +// C2NDebugNetmapRequest is the request (from control to node) for the +// /debug/netmap handler. +type C2NDebugNetmapRequest struct { + // Candidate is an optional full MapResponse to be used for generating a candidate + // network map. If unset, only the current network map is returned. + Candidate *MapResponse `json:"candidate,omitzero"` + + // OmitFields is an optional list of netmap fields to omit from the response. + // If unset, no fields are omitted. + OmitFields []string `json:"omitFields,omitzero"` +} + +// C2NDebugNetmapResponse is the response (from node to control) from the +// /debug/netmap handler. It contains the current network map and, if a +// candidate full MapResponse was provided in the request, a candidate network +// map generated from it. +// To avoid import cycles, and reflect the non-stable nature of +// netmap.NetworkMap values, they are returned as json.RawMessage. +type C2NDebugNetmapResponse struct { + // Current is the current network map (netmap.NetworkMap). + Current json.RawMessage `json:"current"` + + // Candidate is a network map produced based on the candidate MapResponse. + Candidate json.RawMessage `json:"candidate,omitzero"` +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 6130df9013e1d..057e1a54b6522 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -171,7 +171,8 @@ type CapabilityVersion int // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. // - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) -const CurrentCapabilityVersion CapabilityVersion = 126 +// - 127: 2025-09-19: can handle C2N /debug/netmap. +const CurrentCapabilityVersion CapabilityVersion = 127 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 5fef04488daf7..6e0dc87eb4130 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/miekg/dns" "go4.org/mem" "tailscale.com/client/local" @@ -41,6 +42,7 @@ import ( "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" + "tailscale.com/types/netmap" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/must" @@ -1623,3 +1625,146 @@ func TestPeerRelayPing(t *testing.T) { } } } + +func TestC2NDebugNetmap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + + var testNodes []*TestNode + var nodes []*tailcfg.Node + for i := range 2 { + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + testNodes = append(testNodes, n) + + controlNodes := env.Control.AllNodes() + if len(controlNodes) != i+1 { + t.Fatalf("expected %d nodes, got %d nodes", i+1, len(controlNodes)) + } + for _, cn := range controlNodes { + if n.MustStatus().Self.PublicKey == cn.Key { + nodes = append(nodes, cn) + break + } + } + } + + // getC2NNetmap fetches the current netmap. If a candidate map response is provided, + // a candidate netmap is also fetched and compared to the current netmap. + getC2NNetmap := func(node key.NodePublic, cand *tailcfg.MapResponse) *netmap.NetworkMap { + t.Helper() + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + var req *http.Request + if cand != nil { + body := must.Get(json.Marshal(&tailcfg.C2NDebugNetmapRequest{Candidate: cand})) + req = must.Get(http.NewRequestWithContext(ctx, "POST", "/debug/netmap", bytes.NewReader(body))) + } else { + req = must.Get(http.NewRequestWithContext(ctx, "GET", "/debug/netmap", nil)) + } + httpResp := must.Get(env.Control.NodeRoundTripper(node).RoundTrip(req)) + defer httpResp.Body.Close() + + if httpResp.StatusCode != 200 { + t.Errorf("unexpected status code: %d", httpResp.StatusCode) + return nil + } + + respBody := must.Get(io.ReadAll(httpResp.Body)) + var resp tailcfg.C2NDebugNetmapResponse + must.Do(json.Unmarshal(respBody, &resp)) + + var current netmap.NetworkMap + must.Do(json.Unmarshal(resp.Current, ¤t)) + + if !current.PrivateKey.IsZero() { + t.Errorf("current netmap has non-zero private key: %v", current.PrivateKey) + } + // Check candidate netmap if we sent a map response. + if cand != nil { + var candidate netmap.NetworkMap + must.Do(json.Unmarshal(resp.Candidate, &candidate)) + if !candidate.PrivateKey.IsZero() { + t.Errorf("candidate netmap has non-zero private key: %v", candidate.PrivateKey) + } + if diff := cmp.Diff(current.SelfNode, candidate.SelfNode); diff != "" { + t.Errorf("SelfNode differs (-current +candidate):\n%s", diff) + } + if diff := cmp.Diff(current.Peers, candidate.Peers); diff != "" { + t.Errorf("Peers differ (-current +candidate):\n%s", diff) + } + } + return ¤t + } + + for _, n := range nodes { + mr := must.Get(env.Control.MapResponse(&tailcfg.MapRequest{NodeKey: n.Key})) + nm := getC2NNetmap(n.Key, mr) + + // Make sure peers do not have "testcap" initially (we'll change this later). + if len(nm.Peers) != 1 || nm.Peers[0].CapMap().Contains("testcap") { + t.Fatalf("expected 1 peer without testcap, got: %v", nm.Peers) + } + + // Make sure nodes think each other are offline initially. + if nm.Peers[0].Online().Get() { + t.Fatalf("expected 1 peer to be offline, got: %v", nm.Peers) + } + } + + // Send a delta update to n0, setting "testcap" on node 1. + env.Control.AddRawMapResponse(nodes[0].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[1].ID, CapMap: tailcfg.NodeCapMap{"testcap": []tailcfg.RawMessage{}}, + }}, + }) + + // node 0 should see node 1 with "testcap". + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[0].MustStatus() + p, ok := st.Peer[nodes[1].Key] + if !ok { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + if _, ok := p.CapMap["testcap"]; !ok { + return fmt.Errorf("node 0 (%s) sees node 1 (%s) as peer but without testcap\n%v", nodes[0].Key, nodes[1].Key, p) + } + return nil + })) + + // Check that node 0's current netmap has "testcap" for node 1. + nm := getC2NNetmap(nodes[0].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].CapMap().Contains("testcap") { + t.Errorf("current netmap missing testcap: %v", nm.Peers[0].CapMap()) + } + + // Send a delta update to n1, marking node 0 as online. + env.Control.AddRawMapResponse(nodes[1].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[0].ID, Online: ptr.To(true), + }}, + }) + + // node 1 should see node 0 as online. + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[1].MustStatus() + p, ok := st.Peer[nodes[0].Key] + if !ok || !p.Online { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as an online peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + return nil + })) + + // The netmap from node 1 should show node 0 as online. + nm = getC2NNetmap(nodes[1].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].Online().Get() { + t.Errorf("expected peer to be online; got %+v", nm.Peers[0].AsStruct()) + } +} diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 7a371ef76df2a..7ce7186e7426a 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -336,7 +336,7 @@ func (s *Server) serveUnhandled(w http.ResponseWriter, r *http.Request) { func (s *Server) serveC2N(w http.ResponseWriter, r *http.Request) { if err := func() error { if r.Method != httpm.POST { - return fmt.Errorf("POST required") + return errors.New("POST required") } token, ok := strings.CutPrefix(r.URL.Path, "/c2n/") if !ok { @@ -1148,18 +1148,25 @@ func (s *Server) canGenerateAutomaticMapResponseFor(nk key.NodePublic) bool { func (s *Server) hasPendingRawMapMessage(nk key.NodePublic) bool { s.mu.Lock() defer s.mu.Unlock() - _, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + _, ok := s.msgToSend[nk] return ok } func (s *Server) takeRawMapMessage(nk key.NodePublic) (mapResJSON []byte, ok bool) { s.mu.Lock() defer s.mu.Unlock() - mr, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + mr, ok := s.msgToSend[nk] if !ok { return nil, false } delete(s.msgToSend, nk) + + // If it's a bare PingRequest, wrap it in a MapResponse. + switch pr := mr.(type) { + case *tailcfg.PingRequest: + mr = &tailcfg.MapResponse{PingRequest: pr} + } + var err error mapResJSON, err = json.Marshal(mr) if err != nil { From 2351cc0d0ec6b21635058e29652c5dcbf362e64b Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 19 Sep 2025 16:18:14 +0100 Subject: [PATCH 0375/1093] ipn/ipnlocal: make the redactNetmapPrivateKeys test recursive Expand TestRedactNetmapPrivateKeys to cover all sub-structs of NetworkMap and confirm that a) all fields are annotated as private or public, and b) all private fields are getting redacted. Updates tailscale/corp#32095 Signed-off-by: Anton Tolchanov --- ipn/ipnlocal/c2n_test.go | 389 ++++++++++++++++++++++++++++++++------- 1 file changed, 319 insertions(+), 70 deletions(-) diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 04ed8bf5d8685..75a57dee5b79b 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -8,24 +8,26 @@ import ( "cmp" "crypto/x509" "encoding/json" - "fmt" "net/http/httptest" "net/url" "os" "path/filepath" "reflect" + "strings" "testing" "time" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/opt" "tailscale.com/types/views" "tailscale.com/util/must" + "tailscale.com/util/set" + "tailscale.com/wgengine/filter/filtertype" gcmp "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -142,88 +144,335 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } -// reflectNonzero returns a non-zero value for a given reflect.Value. -func reflectNonzero(t reflect.Type) reflect.Value { - switch t.Kind() { - case reflect.Bool: - return reflect.ValueOf(true) - case reflect.String: - if reflect.TypeFor[opt.Bool]() == t { - return reflect.ValueOf("true").Convert(t) - } - return reflect.ValueOf("foo").Convert(t) - case reflect.Int64: - return reflect.ValueOf(int64(1)).Convert(t) - case reflect.Slice: - return reflect.MakeSlice(t, 1, 1) - case reflect.Ptr: - return reflect.New(t.Elem()) - case reflect.Map: - return reflect.MakeMap(t) - case reflect.Struct: - switch t { - case reflect.TypeFor[key.NodePrivate](): - return reflect.ValueOf(key.NewNode()) +// eachStructField calls cb for each struct field in struct type tp, recursively. +func eachStructField(tp reflect.Type, cb func(reflect.Type, reflect.StructField)) { + if !strings.HasPrefix(tp.PkgPath(), "tailscale.com/") { + // Stop traversing when we reach a non-tailscale type. + return + } + + for i := range tp.NumField() { + cb(tp, tp.Field(i)) + + switch tp.Field(i).Type.Kind() { + case reflect.Struct: + eachStructField(tp.Field(i).Type, cb) + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + if tp.Field(i).Type.Elem().Kind() == reflect.Struct { + eachStructField(tp.Field(i).Type.Elem(), cb) + } } } - panic(fmt.Sprintf("unhandled %v", t)) } -// setFieldsToRedact sets fields in the given netmap to non-zero values -// according to the fieldMap, which maps field names to whether they -// should be reset (true) or not (false). -func setFieldsToRedact(t *testing.T, nm *netmap.NetworkMap, fieldMap map[string]bool) { - t.Helper() - v := reflect.ValueOf(nm).Elem() +// eachStructValue calls cb for each struct field in the struct value v, recursively. +func eachStructValue(v reflect.Value, cb func(reflect.Type, reflect.StructField, reflect.Value)) { + if v.IsZero() { + return + } + for i := range v.NumField() { - name := v.Type().Field(i).Name - f := v.Field(i) - if !f.CanSet() { - continue - } - shouldReset, ok := fieldMap[name] - if !ok { - t.Errorf("fieldMap missing field %q", name) - } - if shouldReset { - f.Set(reflectNonzero(f.Type())) + cb(v.Type(), v.Type().Field(i), v.Field(i)) + + switch v.Type().Field(i).Type.Kind() { + case reflect.Struct: + eachStructValue(v.Field(i), cb) + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + if v.Field(i).Type().Elem().Kind() == reflect.Struct { + eachStructValue(v.Field(i).Addr().Elem(), cb) + } } } } +// TestRedactNetmapPrivateKeys tests that redactNetmapPrivateKeys redacts all private keys +// and other private fields from a netmap.NetworkMap, and only those fields. func TestRedactNetmapPrivateKeys(t *testing.T) { - fieldMap := map[string]bool{ - // Private fields (should be redacted): - "PrivateKey": true, + type field struct { + t reflect.Type + f string + } + f := func(t any, f string) field { + return field{reflect.TypeOf(t), f} + } + // fields is a map of all struct fields in netmap.NetworkMap and its + // sub-structs, marking each field as private (true) or public (false). + // If you add a new field to netmap.NetworkMap or its sub-structs, + // you must add it to this list, marking it as private or public. + fields := map[field]bool{ + // Private fields to be redacted. + f(netmap.NetworkMap{}, "PrivateKey"): true, + + // All other fields are public. + f(netmap.NetworkMap{}, "AllCaps"): false, + f(netmap.NetworkMap{}, "CollectServices"): false, + f(netmap.NetworkMap{}, "DERPMap"): false, + f(netmap.NetworkMap{}, "DNS"): false, + f(netmap.NetworkMap{}, "DisplayMessages"): false, + f(netmap.NetworkMap{}, "Domain"): false, + f(netmap.NetworkMap{}, "DomainAuditLogID"): false, + f(netmap.NetworkMap{}, "Expiry"): false, + f(netmap.NetworkMap{}, "MachineKey"): false, + f(netmap.NetworkMap{}, "Name"): false, + f(netmap.NetworkMap{}, "NodeKey"): false, + f(netmap.NetworkMap{}, "PacketFilter"): false, + f(netmap.NetworkMap{}, "PacketFilterRules"): false, + f(netmap.NetworkMap{}, "Peers"): false, + f(netmap.NetworkMap{}, "SSHPolicy"): false, + f(netmap.NetworkMap{}, "SelfNode"): false, + f(netmap.NetworkMap{}, "TKAEnabled"): false, + f(netmap.NetworkMap{}, "TKAHead"): false, + f(netmap.NetworkMap{}, "UserProfiles"): false, + f(filtertype.CapMatch{}, "Cap"): false, + f(filtertype.CapMatch{}, "Dst"): false, + f(filtertype.CapMatch{}, "Values"): false, + f(filtertype.Match{}, "Caps"): false, + f(filtertype.Match{}, "Dsts"): false, + f(filtertype.Match{}, "IPProto"): false, + f(filtertype.Match{}, "SrcCaps"): false, + f(filtertype.Match{}, "Srcs"): false, + f(filtertype.Match{}, "SrcsContains"): false, + f(filtertype.NetPortRange{}, "Net"): false, + f(filtertype.NetPortRange{}, "Ports"): false, + f(filtertype.PortRange{}, "First"): false, + f(filtertype.PortRange{}, "Last"): false, + f(key.DiscoPublic{}, "k"): false, + f(key.MachinePublic{}, "k"): false, + f(key.NodePrivate{}, "_"): false, + f(key.NodePrivate{}, "k"): false, + f(key.NodePublic{}, "k"): false, + f(tailcfg.CapGrant{}, "CapMap"): false, + f(tailcfg.CapGrant{}, "Caps"): false, + f(tailcfg.CapGrant{}, "Dsts"): false, + f(tailcfg.DERPHomeParams{}, "RegionScore"): false, + f(tailcfg.DERPMap{}, "HomeParams"): false, + f(tailcfg.DERPMap{}, "OmitDefaultRegions"): false, + f(tailcfg.DERPMap{}, "Regions"): false, + f(tailcfg.DNSConfig{}, "CertDomains"): false, + f(tailcfg.DNSConfig{}, "Domains"): false, + f(tailcfg.DNSConfig{}, "ExitNodeFilteredSet"): false, + f(tailcfg.DNSConfig{}, "ExtraRecords"): false, + f(tailcfg.DNSConfig{}, "FallbackResolvers"): false, + f(tailcfg.DNSConfig{}, "Nameservers"): false, + f(tailcfg.DNSConfig{}, "Proxied"): false, + f(tailcfg.DNSConfig{}, "Resolvers"): false, + f(tailcfg.DNSConfig{}, "Routes"): false, + f(tailcfg.DNSConfig{}, "TempCorpIssue13969"): false, + f(tailcfg.DNSRecord{}, "Name"): false, + f(tailcfg.DNSRecord{}, "Type"): false, + f(tailcfg.DNSRecord{}, "Value"): false, + f(tailcfg.DisplayMessageAction{}, "Label"): false, + f(tailcfg.DisplayMessageAction{}, "URL"): false, + f(tailcfg.DisplayMessage{}, "ImpactsConnectivity"): false, + f(tailcfg.DisplayMessage{}, "PrimaryAction"): false, + f(tailcfg.DisplayMessage{}, "Severity"): false, + f(tailcfg.DisplayMessage{}, "Text"): false, + f(tailcfg.DisplayMessage{}, "Title"): false, + f(tailcfg.FilterRule{}, "CapGrant"): false, + f(tailcfg.FilterRule{}, "DstPorts"): false, + f(tailcfg.FilterRule{}, "IPProto"): false, + f(tailcfg.FilterRule{}, "SrcBits"): false, + f(tailcfg.FilterRule{}, "SrcIPs"): false, + f(tailcfg.HostinfoView{}, "ж"): false, + f(tailcfg.Hostinfo{}, "AllowsUpdate"): false, + f(tailcfg.Hostinfo{}, "App"): false, + f(tailcfg.Hostinfo{}, "AppConnector"): false, + f(tailcfg.Hostinfo{}, "BackendLogID"): false, + f(tailcfg.Hostinfo{}, "Cloud"): false, + f(tailcfg.Hostinfo{}, "Container"): false, + f(tailcfg.Hostinfo{}, "Desktop"): false, + f(tailcfg.Hostinfo{}, "DeviceModel"): false, + f(tailcfg.Hostinfo{}, "Distro"): false, + f(tailcfg.Hostinfo{}, "DistroCodeName"): false, + f(tailcfg.Hostinfo{}, "DistroVersion"): false, + f(tailcfg.Hostinfo{}, "Env"): false, + f(tailcfg.Hostinfo{}, "ExitNodeID"): false, + f(tailcfg.Hostinfo{}, "FrontendLogID"): false, + f(tailcfg.Hostinfo{}, "GoArch"): false, + f(tailcfg.Hostinfo{}, "GoArchVar"): false, + f(tailcfg.Hostinfo{}, "GoVersion"): false, + f(tailcfg.Hostinfo{}, "Hostname"): false, + f(tailcfg.Hostinfo{}, "IPNVersion"): false, + f(tailcfg.Hostinfo{}, "IngressEnabled"): false, + f(tailcfg.Hostinfo{}, "Location"): false, + f(tailcfg.Hostinfo{}, "Machine"): false, + f(tailcfg.Hostinfo{}, "NetInfo"): false, + f(tailcfg.Hostinfo{}, "NoLogsNoSupport"): false, + f(tailcfg.Hostinfo{}, "OS"): false, + f(tailcfg.Hostinfo{}, "OSVersion"): false, + f(tailcfg.Hostinfo{}, "Package"): false, + f(tailcfg.Hostinfo{}, "PushDeviceToken"): false, + f(tailcfg.Hostinfo{}, "RequestTags"): false, + f(tailcfg.Hostinfo{}, "RoutableIPs"): false, + f(tailcfg.Hostinfo{}, "SSH_HostKeys"): false, + f(tailcfg.Hostinfo{}, "Services"): false, + f(tailcfg.Hostinfo{}, "ServicesHash"): false, + f(tailcfg.Hostinfo{}, "ShareeNode"): false, + f(tailcfg.Hostinfo{}, "ShieldsUp"): false, + f(tailcfg.Hostinfo{}, "StateEncrypted"): false, + f(tailcfg.Hostinfo{}, "TPM"): false, + f(tailcfg.Hostinfo{}, "Userspace"): false, + f(tailcfg.Hostinfo{}, "UserspaceRouter"): false, + f(tailcfg.Hostinfo{}, "WireIngress"): false, + f(tailcfg.Hostinfo{}, "WoLMACs"): false, + f(tailcfg.Location{}, "City"): false, + f(tailcfg.Location{}, "CityCode"): false, + f(tailcfg.Location{}, "Country"): false, + f(tailcfg.Location{}, "CountryCode"): false, + f(tailcfg.Location{}, "Latitude"): false, + f(tailcfg.Location{}, "Longitude"): false, + f(tailcfg.Location{}, "Priority"): false, + f(tailcfg.NetInfo{}, "DERPLatency"): false, + f(tailcfg.NetInfo{}, "FirewallMode"): false, + f(tailcfg.NetInfo{}, "HairPinning"): false, + f(tailcfg.NetInfo{}, "HavePortMap"): false, + f(tailcfg.NetInfo{}, "LinkType"): false, + f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, + f(tailcfg.NetInfo{}, "OSHasIPv6"): false, + f(tailcfg.NetInfo{}, "PCP"): false, + f(tailcfg.NetInfo{}, "PMP"): false, + f(tailcfg.NetInfo{}, "PreferredDERP"): false, + f(tailcfg.NetInfo{}, "UPnP"): false, + f(tailcfg.NetInfo{}, "WorkingICMPv4"): false, + f(tailcfg.NetInfo{}, "WorkingIPv6"): false, + f(tailcfg.NetInfo{}, "WorkingUDP"): false, + f(tailcfg.NetPortRange{}, "Bits"): false, + f(tailcfg.NetPortRange{}, "IP"): false, + f(tailcfg.NetPortRange{}, "Ports"): false, + f(tailcfg.NetPortRange{}, "_"): false, + f(tailcfg.NodeView{}, "ж"): false, + f(tailcfg.Node{}, "Addresses"): false, + f(tailcfg.Node{}, "AllowedIPs"): false, + f(tailcfg.Node{}, "Cap"): false, + f(tailcfg.Node{}, "CapMap"): false, + f(tailcfg.Node{}, "Capabilities"): false, + f(tailcfg.Node{}, "ComputedName"): false, + f(tailcfg.Node{}, "ComputedNameWithHost"): false, + f(tailcfg.Node{}, "Created"): false, + f(tailcfg.Node{}, "DataPlaneAuditLogID"): false, + f(tailcfg.Node{}, "DiscoKey"): false, + f(tailcfg.Node{}, "Endpoints"): false, + f(tailcfg.Node{}, "ExitNodeDNSResolvers"): false, + f(tailcfg.Node{}, "Expired"): false, + f(tailcfg.Node{}, "HomeDERP"): false, + f(tailcfg.Node{}, "Hostinfo"): false, + f(tailcfg.Node{}, "ID"): false, + f(tailcfg.Node{}, "IsJailed"): false, + f(tailcfg.Node{}, "IsWireGuardOnly"): false, + f(tailcfg.Node{}, "Key"): false, + f(tailcfg.Node{}, "KeyExpiry"): false, + f(tailcfg.Node{}, "KeySignature"): false, + f(tailcfg.Node{}, "LastSeen"): false, + f(tailcfg.Node{}, "LegacyDERPString"): false, + f(tailcfg.Node{}, "Machine"): false, + f(tailcfg.Node{}, "MachineAuthorized"): false, + f(tailcfg.Node{}, "Name"): false, + f(tailcfg.Node{}, "Online"): false, + f(tailcfg.Node{}, "PrimaryRoutes"): false, + f(tailcfg.Node{}, "SelfNodeV4MasqAddrForThisPeer"): false, + f(tailcfg.Node{}, "SelfNodeV6MasqAddrForThisPeer"): false, + f(tailcfg.Node{}, "Sharer"): false, + f(tailcfg.Node{}, "StableID"): false, + f(tailcfg.Node{}, "Tags"): false, + f(tailcfg.Node{}, "UnsignedPeerAPIOnly"): false, + f(tailcfg.Node{}, "User"): false, + f(tailcfg.Node{}, "computedHostIfDifferent"): false, + f(tailcfg.PortRange{}, "First"): false, + f(tailcfg.PortRange{}, "Last"): false, + f(tailcfg.SSHPolicy{}, "Rules"): false, + f(tailcfg.Service{}, "Description"): false, + f(tailcfg.Service{}, "Port"): false, + f(tailcfg.Service{}, "Proto"): false, + f(tailcfg.Service{}, "_"): false, + f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, + f(tailcfg.TPMInfo{}, "Manufacturer"): false, + f(tailcfg.TPMInfo{}, "Model"): false, + f(tailcfg.TPMInfo{}, "SpecRevision"): false, + f(tailcfg.TPMInfo{}, "Vendor"): false, + f(tailcfg.UserProfileView{}, "ж"): false, + f(tailcfg.UserProfile{}, "DisplayName"): false, + f(tailcfg.UserProfile{}, "ID"): false, + f(tailcfg.UserProfile{}, "LoginName"): false, + f(tailcfg.UserProfile{}, "ProfilePicURL"): false, + f(views.Slice[ipproto.Proto]{}, "ж"): false, + f(views.Slice[tailcfg.FilterRule]{}, "ж"): false, + } + + t.Run("field_list_is_complete", func(t *testing.T) { + seen := set.Set[field]{} + eachStructField(reflect.TypeOf(netmap.NetworkMap{}), func(rt reflect.Type, sf reflect.StructField) { + f := field{rt, sf.Name} + seen.Add(f) + if _, ok := fields[f]; !ok { + // Fail the test if netmap has a field not in the list. If you see this test + // failure, please add the new field to the fields map above, marking it as private or public. + t.Errorf("netmap field has not been declared as private or public: %v.%v", rt, sf.Name) + } + }) + + for want := range fields { + if !seen.Contains(want) { + // Fail the test if the list has a field not in netmap. If you see this test + // failure, please remove the field from the fields map above. + t.Errorf("field declared that has not been found in netmap: %v.%v", want.t, want.f) + } + } + }) - // Public fields (should not be redacted): - "AllCaps": false, - "CollectServices": false, - "DERPMap": false, - "DNS": false, - "DisplayMessages": false, - "Domain": false, - "DomainAuditLogID": false, - "Expiry": false, - "MachineKey": false, - "Name": false, - "NodeKey": false, - "PacketFilter": false, - "PacketFilterRules": false, - "Peers": false, - "SSHPolicy": false, - "SelfNode": false, - "TKAEnabled": false, - "TKAHead": false, - "UserProfiles": false, + // tests is a list of test cases, each with a non-redacted netmap and the expected redacted netmap. + // If you add a new private field to netmap.NetworkMap or its sub-structs, please add a test case + // here that has that field set in nm, and the expected redacted value in wantRedacted. + tests := []struct { + name string + nm *netmap.NetworkMap + wantRedacted *netmap.NetworkMap + }{ + { + name: "redact_private_key", + nm: &netmap.NetworkMap{ + PrivateKey: key.NewNode(), + }, + wantRedacted: &netmap.NetworkMap{}, + }, } - nm := &netmap.NetworkMap{} - setFieldsToRedact(t, nm, fieldMap) + // confirmedRedacted is a set of all private fields that have been covered by the tests above. + confirmedRedacted := set.Set[field]{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Record which of the private fields are set in the non-redacted netmap. + eachStructValue(reflect.ValueOf(tt.nm).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { + f := field{tt, sf.Name} + if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { + confirmedRedacted.Add(f) + } + }) + + got, _ := redactNetmapPrivateKeys(tt.nm) + if !reflect.DeepEqual(got, tt.wantRedacted) { + t.Errorf("unexpected redacted netmap: %+v", got) + } + + // Check that all private fields in the redacted netmap are zero. + eachStructValue(reflect.ValueOf(got).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { + f := field{tt, sf.Name} + if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { + t.Errorf("field not redacted: %v.%v", tt, sf.Name) + } + }) + }) + } - got, _ := redactNetmapPrivateKeys(nm) - if !reflect.DeepEqual(got, &netmap.NetworkMap{}) { - t.Errorf("redacted netmap is not empty: %+v", got) + // Check that all private fields in netmap.NetworkMap and its sub-structs + // are covered by the tests above. If you see a test failure here, + // please add a test case above that has that field set in nm. + for f, shouldRedact := range fields { + if shouldRedact { + if !confirmedRedacted.Contains(f) { + t.Errorf("field not covered by tests: %v.%v", f.t, f.f) + } + } } } From b9cda4bca5a0c5562021deae1512de8e3a3c2bc4 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 19 Sep 2025 12:31:44 -0400 Subject: [PATCH 0376/1093] tsnet,internal/client/tailscale: resolve OAuth into authkeys in tsnet (#17191) * tsnet,internal/client/tailscale: resolve OAuth into authkeys in tsnet Updates #8403. * internal/client/tailscale: omit OAuth library via build tag Updates #12614. Signed-off-by: Naman Sood --- cmd/k8s-operator/depaware.txt | 6 +- cmd/tailscale/cli/up.go | 98 ++-------------- cmd/tailscale/depaware.txt | 6 +- cmd/tailscaled/deps_test.go | 13 +++ cmd/tsidp/depaware.txt | 7 ++ .../feature_oauthkey_disabled.go | 13 +++ .../buildfeatures/feature_oauthkey_enabled.go | 13 +++ feature/condregister/oauthkey/doc.go | 10 ++ .../condregister/oauthkey/maybe_oauthkey.go | 8 ++ feature/featuretags/featuretags.go | 1 + feature/oauthkey/oauthkey.go | 108 ++++++++++++++++++ internal/client/tailscale/oauthkeys.go | 20 ++++ tsnet/depaware.txt | 7 ++ tsnet/tsnet.go | 10 ++ 14 files changed, 226 insertions(+), 94 deletions(-) create mode 100644 feature/buildfeatures/feature_oauthkey_disabled.go create mode 100644 feature/buildfeatures/feature_oauthkey_enabled.go create mode 100644 feature/condregister/oauthkey/doc.go create mode 100644 feature/condregister/oauthkey/maybe_oauthkey.go create mode 100644 feature/oauthkey/oauthkey.go create mode 100644 internal/client/tailscale/oauthkeys.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e65977875ca7a..e5eccf2c2552f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -798,13 +798,15 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator + tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ @@ -1030,7 +1032,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator + golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator+ golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index c78a6356965b4..12c26b21c5e2a 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -12,13 +12,11 @@ import ( "fmt" "log" "net/netip" - "net/url" "os" "os/signal" "reflect" "runtime" "sort" - "strconv" "strings" "syscall" "time" @@ -26,7 +24,7 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" - "golang.org/x/oauth2/clientcredentials" + _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" @@ -566,9 +564,13 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } - authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags) - if err != nil { - return err + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + authKey, err = f(ctx, authKey, strings.Split(upArgs.advertiseTags, ",")) + if err != nil { + return err + } } err = localClient.Start(ctx, ipn.Options{ AuthKey: authKey, @@ -1109,90 +1111,6 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { return } -// resolveAuthKey either returns v unchanged (in the common case) or, if it -// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like -// -// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] -// -// and does the OAuth2 dance to get and return an authkey. The "ephemeral" -// property defaults to true if unspecified. The "preauthorized" defaults to -// false. The "baseURL" defaults to https://api.tailscale.com. -// The passed in tags are required, and must be non-empty. These will be -// set on the authkey generated by the OAuth2 dance. -func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { - if !strings.HasPrefix(v, "tskey-client-") { - return v, nil - } - if tags == "" { - return "", errors.New("oauth authkeys require --advertise-tags") - } - - clientSecret, named, _ := strings.Cut(v, "?") - attrs, err := url.ParseQuery(named) - if err != nil { - return "", err - } - for k := range attrs { - switch k { - case "ephemeral", "preauthorized", "baseURL": - default: - return "", fmt.Errorf("unknown attribute %q", k) - } - } - getBool := func(name string, def bool) (bool, error) { - v := attrs.Get(name) - if v == "" { - return def, nil - } - ret, err := strconv.ParseBool(v) - if err != nil { - return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) - } - return ret, nil - } - ephemeral, err := getBool("ephemeral", true) - if err != nil { - return "", err - } - preauth, err := getBool("preauthorized", false) - if err != nil { - return "", err - } - - baseURL := "https://api.tailscale.com" - if v := attrs.Get("baseURL"); v != "" { - baseURL = v - } - - credentials := clientcredentials.Config{ - ClientID: "some-client-id", // ignored - ClientSecret: clientSecret, - TokenURL: baseURL + "/api/v2/oauth/token", - } - - tsClient := tailscale.NewClient("-", nil) - tsClient.UserAgent = "tailscale-cli" - tsClient.HTTPClient = credentials.Client(ctx) - tsClient.BaseURL = baseURL - - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Ephemeral: ephemeral, - Preauthorized: preauth, - Tags: strings.Split(tags, ","), - }, - }, - } - - authkey, _, err := tsClient.CreateKey(ctx, caps) - if err != nil { - return "", err - } - return authkey, nil -} - func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index ae4a7bd4d774b..e25eece594bcd 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -105,13 +105,15 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ @@ -253,7 +255,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 9e6624d9a9e81..538cdc115426b 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -122,3 +122,16 @@ func TestOmitACME(t *testing.T) { }, }.Check(t) } + +func TestOmitOAuthKey(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_oauthkey,ts_include_cli", + OnDep: func(dep string) { + if strings.HasPrefix(dep, "golang.org/x/oauth2") { + t.Errorf("unexpected dep with ts_omit_oauthkey: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 7db7849b74954..df5476a602a7c 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -217,6 +217,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -239,12 +240,15 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ @@ -457,6 +461,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LD golang.org/x/sys/unix from github.com/google/nftables+ diff --git a/feature/buildfeatures/feature_oauthkey_disabled.go b/feature/buildfeatures/feature_oauthkey_disabled.go new file mode 100644 index 0000000000000..72ad1723b1d14 --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = false diff --git a/feature/buildfeatures/feature_oauthkey_enabled.go b/feature/buildfeatures/feature_oauthkey_enabled.go new file mode 100644 index 0000000000000..39c52a2b0b46d --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = true diff --git a/feature/condregister/oauthkey/doc.go b/feature/condregister/oauthkey/doc.go new file mode 100644 index 0000000000000..4c4ea5e4e3078 --- /dev/null +++ b/feature/condregister/oauthkey/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for OAuth key resolution +// if it's not disabled via the ts_omit_oauthkey build tag. +// Currently (2025-09-19), tailscaled does not need OAuth key +// resolution, only the CLI and tsnet do, so this package is +// pulled out separately to avoid linking OAuth packages into +// tailscaled. +package oauthkey diff --git a/feature/condregister/oauthkey/maybe_oauthkey.go b/feature/condregister/oauthkey/maybe_oauthkey.go new file mode 100644 index 0000000000000..be8d04b8ec035 --- /dev/null +++ b/feature/condregister/oauthkey/maybe_oauthkey.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_oauthkey + +package oauthkey + +import _ "tailscale.com/feature/oauthkey" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6afb40893cb6d..325f46a44906e 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -105,6 +105,7 @@ var Features = map[FeatureTag]FeatureMeta{ "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, + "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "relayserver": {"RelayServer", "Relay server", nil}, "serve": {"Serve", "Serve and Funnel support", nil}, diff --git a/feature/oauthkey/oauthkey.go b/feature/oauthkey/oauthkey.go new file mode 100644 index 0000000000000..5834c33becad6 --- /dev/null +++ b/feature/oauthkey/oauthkey.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for using OAuth client secrets to +// automatically request authkeys for logging in. +package oauthkey + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2/clientcredentials" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" +) + +func init() { + feature.Register("oauthkey") + tailscale.HookResolveAuthKey.Set(resolveAuthKey) +} + +// resolveAuthKey either returns v unchanged (in the common case) or, if it +// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like +// +// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] +// +// and does the OAuth2 dance to get and return an authkey. The "ephemeral" +// property defaults to true if unspecified. The "preauthorized" defaults to +// false. The "baseURL" defaults to https://api.tailscale.com. +// The passed in tags are required, and must be non-empty. These will be +// set on the authkey generated by the OAuth2 dance. +func resolveAuthKey(ctx context.Context, v string, tags []string) (string, error) { + if !strings.HasPrefix(v, "tskey-client-") { + return v, nil + } + if len(tags) == 0 { + return "", errors.New("oauth authkeys require --advertise-tags") + } + + clientSecret, named, _ := strings.Cut(v, "?") + attrs, err := url.ParseQuery(named) + if err != nil { + return "", err + } + for k := range attrs { + switch k { + case "ephemeral", "preauthorized", "baseURL": + default: + return "", fmt.Errorf("unknown attribute %q", k) + } + } + getBool := func(name string, def bool) (bool, error) { + v := attrs.Get(name) + if v == "" { + return def, nil + } + ret, err := strconv.ParseBool(v) + if err != nil { + return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) + } + return ret, nil + } + ephemeral, err := getBool("ephemeral", true) + if err != nil { + return "", err + } + preauth, err := getBool("preauthorized", false) + if err != nil { + return "", err + } + + baseURL := "https://api.tailscale.com" + if v := attrs.Get("baseURL"); v != "" { + baseURL = v + } + + credentials := clientcredentials.Config{ + ClientID: "some-client-id", // ignored + ClientSecret: clientSecret, + TokenURL: baseURL + "/api/v2/oauth/token", + } + + tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-cli" + tsClient.HTTPClient = credentials.Client(ctx) + tsClient.BaseURL = baseURL + + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + } + + authkey, _, err := tsClient.CreateKey(ctx, caps) + if err != nil { + return "", err + } + return authkey, nil +} diff --git a/internal/client/tailscale/oauthkeys.go b/internal/client/tailscale/oauthkeys.go new file mode 100644 index 0000000000000..21102ce0b5fc8 --- /dev/null +++ b/internal/client/tailscale/oauthkeys.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKey resolves to [oauthkey.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// authKey is a standard device auth key or an OAuth client secret to +// resolve into an auth key. +// tags is the list of tags being advertised by the client (required to be +// provided for the OAuth secret case, and required to be the same as the +// list of tags for which the OAuth secret is allowed to issue auth keys). +var HookResolveAuthKey feature.Hook[func(ctx context.Context, authKey string, tags []string) (string, error)] diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index c115332fa8b16..4fd9b7dbaacbb 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -213,6 +213,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ @@ -235,12 +236,15 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ @@ -450,6 +454,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ LDAI golang.org/x/sys/unix from github.com/google/nftables+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 6b083132f86dd..978819519d7dd 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,9 +29,11 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" @@ -680,6 +682,14 @@ func (s *Server) start() (reterr error) { prefs.RunWebClient = s.RunWebClient prefs.AdvertiseTags = s.AdvertiseTags authKey := s.getAuthKey() + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + authKey, err = f(s.shutdownCtx, s.getAuthKey(), prefs.AdvertiseTags) + if err != nil { + return fmt.Errorf("resolving auth key: %w", err) + } + } err = lb.Start(ipn.Options{ UpdatePrefs: prefs, AuthKey: authKey, From ecfdd86fc9956631759277d1ddbd78f0456dc365 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 09:44:50 -0700 Subject: [PATCH 0377/1093] net/ace, control/controlhttp: start adding ACE dialing support Updates tailscale/corp#32227 Change-Id: I38afc668f99eb1d6f7632e82554b82922f3ebb9f Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/debug.go | 22 +++++- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + control/controlhttp/client.go | 78 ++++++++++++++------- net/ace/ace.go | 123 ++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 9 ++- tsnet/depaware.txt | 1 + 9 files changed, 211 insertions(+), 26 deletions(-) create mode 100644 net/ace/ace.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e5eccf2c2552f..b962f51f23d8b 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -842,6 +842,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 39c9748ef5289..9e8fa0d7f82a6 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -35,6 +35,7 @@ import ( "tailscale.com/hostinfo" "tailscale.com/internal/noiseconn" "tailscale.com/ipn" + "tailscale.com/net/ace" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tshttpproxy" @@ -287,6 +288,7 @@ func debugCmd() *ffcli.Command { fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane") fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") + fs.StringVar(&ts2021Args.aceHost, "ace", "", "if non-empty, use this ACE server IP/hostname as a candidate path") return fs })(), }, @@ -964,6 +966,7 @@ var ts2021Args struct { host string // "controlplane.tailscale.com" version int // 27 or whatever verbose bool + aceHost string // if non-empty, FQDN of https ACE server to use ("ace.example.com") } func runTS2021(ctx context.Context, args []string) error { @@ -972,6 +975,13 @@ func runTS2021(ctx context.Context, args []string) error { keysURL := "https://" + ts2021Args.host + "/key?v=" + strconv.Itoa(ts2021Args.version) + keyTransport := http.DefaultTransport.(*http.Transport).Clone() + if ts2021Args.aceHost != "" { + log.Printf("using ACE server %q", ts2021Args.aceHost) + keyTransport.Proxy = nil + keyTransport.DialContext = (&ace.Dialer{ACEHost: ts2021Args.aceHost}).Dial + } + if ts2021Args.verbose { u, err := url.Parse(keysURL) if err != nil { @@ -997,7 +1007,7 @@ func runTS2021(ctx context.Context, args []string) error { if err != nil { return err } - res, err := http.DefaultClient.Do(req) + res, err := keyTransport.RoundTrip(req) if err != nil { log.Printf("Do: %v", err) return err @@ -1052,6 +1062,16 @@ func runTS2021(ctx context.Context, args []string) error { Logf: logf, NetMon: netMon, } + if ts2021Args.aceHost != "" { + noiseDialer.DialPlan = &tailcfg.ControlDialPlan{ + Candidates: []tailcfg.ControlIPCandidate{ + { + ACEHost: ts2021Args.aceHost, + DialTimeoutSec: 10, + }, + }, + } + } const tries = 2 for i := range tries { err := tryConnect(ctx, keys.PublicKey, noiseDialer) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index e25eece594bcd..27d7864aec087 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -120,6 +120,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4482ad125075b..e4405a689fef1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -314,6 +314,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index df5476a602a7c..872dc8f81589e 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -273,6 +273,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 1bb60d672980d..87061c310dd44 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -20,6 +20,7 @@ package controlhttp import ( + "cmp" "context" "crypto/tls" "encoding/base64" @@ -41,6 +42,7 @@ import ( "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/net/ace" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/netutil" @@ -104,7 +106,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // host we know about. useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN") if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 { - return a.dialHost(ctx, netip.Addr{}) + return a.dialHost(ctx) } candidates := a.DialPlan.Candidates @@ -125,10 +127,9 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // Now, for each candidate, kick off a dial in parallel. type dialResult struct { - conn *ClientConn - err error - addr netip.Addr - priority int + conn *ClientConn + err error + cand tailcfg.ControlIPCandidate } resultsCh := make(chan dialResult, len(candidates)) @@ -143,7 +144,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // Always send results back to our channel. defer func() { - resultsCh <- dialResult{conn, err, c.IP, c.Priority} + resultsCh <- dialResult{conn, err, c} if pending.Add(-1) == 0 { close(resultsCh) } @@ -168,9 +169,13 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second))) defer cancel() + if c.IP.IsValid() { + a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) + } else if c.ACEHost != "" { + a.logf("[v2] controlhttp: trying to dial %q via ACE %q", a.Hostname, c.ACEHost) + } // This will dial, and the defer above sends it back to our parent. - a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) - conn, err = a.dialHost(ctx, c.IP) + conn, err = a.dialHostOpt(ctx, c.IP, c.ACEHost) }(ctx, c) } @@ -183,8 +188,8 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // TODO(andrew): we could make this better by keeping track of // the highest remaining priority dynamically, instead of just // checking for the highest total - if res.priority == highestPriority && res.conn != nil { - a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr) + if res.cand.Priority == highestPriority && res.conn != nil { + a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, cmp.Or(res.cand.ACEHost, res.cand.IP.String())) // Drain the channel and any existing connections in // the background. @@ -232,7 +237,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { sort.Slice(results, func(i, j int) bool { // NOTE: intentionally inverted so that the highest priority // item comes first - return results[i].priority > results[j].priority + return results[i].cand.Priority > results[j].cand.Priority }) var ( @@ -245,7 +250,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { continue } - a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr) + a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(result.cand.ACEHost, result.cand.IP.String())) conn = result.conn results[i].conn = nil // so we don't close it in the defer return conn, nil @@ -259,7 +264,7 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error()) - return a.dialHost(ctx, netip.Addr{}) + return a.dialHost(ctx) } // The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to @@ -316,10 +321,19 @@ var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL") // dialHost connects to the configured Dialer.Hostname and upgrades the // connection into a controlbase.Conn. +func (a *Dialer) dialHost(ctx context.Context) (*ClientConn, error) { + return a.dialHostOpt(ctx, + netip.Addr{}, // no pre-resolved IP + "", // don't use ACE + ) +} + +// dialHostOpt connects to the configured Dialer.Hostname and upgrades the +// connection into a controlbase.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialHostOpt(ctx context.Context, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { // Create one shared context used by both port 80 and port 443 dials. // If port 80 is still in flight when 443 returns, this deferred cancel // will stop the port 80 dial. @@ -341,7 +355,7 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")), Path: serverUpgradePath, } - if a.HTTPSPort == NoPort { + if a.HTTPSPort == NoPort || optACEHost != "" { u443 = nil } @@ -353,11 +367,11 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, ch := make(chan tryURLRes) // must be unbuffered try := func(u *url.URL) { if debugNoiseDial() { - a.logf("trying noise dial (%v, %v) ...", u, optAddr) + a.logf("trying noise dial (%v, %v) ...", u, cmp.Or(optACEHost, optAddr.String())) } - cbConn, err := a.dialURL(ctx, u, optAddr) + cbConn, err := a.dialURL(ctx, u, optAddr, optACEHost) if debugNoiseDial() { - a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err) + a.logf("noise dial (%v, %v) = (%v, %v)", u, cmp.Or(optACEHost, optAddr.String()), cbConn, err) } select { case ch <- tryURLRes{u, cbConn, err}: @@ -423,12 +437,12 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion) if err != nil { return nil, err } - netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init) + netConn, err := a.tryURLUpgrade(ctx, u, optAddr, optACEHost, init) if err != nil { return nil, err } @@ -480,7 +494,7 @@ var macOSScreenTime = health.Register(&health.Warnable{ // the provided address. // // Only the provided ctx is used, not a.ctx. -func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) { +func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string, init []byte) (_ net.Conn, retErr error) { var dns *dnscache.Resolver // If we were provided an address to dial, then create a resolver that just @@ -502,6 +516,14 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad dialer = stdDialer.DialContext } + if optACEHost != "" { + dialer = (&ace.Dialer{ + ACEHost: optACEHost, + ACEHostIP: optAddr, // may be zero + NetDialer: dialer, + }).Dial + } + // On macOS, see if Screen Time is blocking things. if runtime.GOOS == "darwin" { var proxydIntercepted atomic.Bool // intercepted by macOS webfilterproxyd @@ -528,9 +550,17 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad tr := http.DefaultTransport.(*http.Transport).Clone() defer tr.CloseIdleConnections() - tr.Proxy = a.getProxyFunc() - tshttpproxy.SetTransportGetProxyConnectHeader(tr) - tr.DialContext = dnscache.Dialer(dialer, dns) + if optACEHost != "" { + // If using ACE, we don't want to use any HTTP proxy. + // ACE is already a tunnel+proxy. + // TODO(tailscale/corp#32483): use system proxy too? + tr.Proxy = nil + tr.DialContext = dialer + } else { + tr.Proxy = a.getProxyFunc() + tshttpproxy.SetTransportGetProxyConnectHeader(tr) + tr.DialContext = dnscache.Dialer(dialer, dns) + } // Disable HTTP2, since h2 can't do protocol switching. tr.TLSClientConfig.NextProtos = []string{} tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} diff --git a/net/ace/ace.go b/net/ace/ace.go new file mode 100644 index 0000000000000..1bb64d64d19ab --- /dev/null +++ b/net/ace/ace.go @@ -0,0 +1,123 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace implements a Dialer that dials via a Tailscale ACE (CONNECT) +// proxy. +// +// TODO: document this more, when it's more done. As of 2025-09-17, it's in +// development. +package ace + +import ( + "bufio" + "cmp" + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/netip" + "sync/atomic" +) + +// Dialer is an HTTP CONNECT proxy dialer to dial the control plane via an ACE +// proxy. +type Dialer struct { + ACEHost string + ACEHostIP netip.Addr // optional; if non-zero, use this IP instead of DNS + ACEPort int // zero means 443 + + NetDialer func(ctx context.Context, network, address string) (net.Conn, error) +} + +func (d *Dialer) netDialer() func(ctx context.Context, network, address string) (net.Conn, error) { + if d.NetDialer != nil { + return d.NetDialer + } + var std net.Dialer + return std.DialContext +} + +func (d *Dialer) acePort() int { return cmp.Or(d.ACEPort, 443) } + +func (d *Dialer) Dial(ctx context.Context, network, address string) (_ net.Conn, err error) { + if network != "tcp" { + return nil, errors.New("only TCP is supported") + } + + var targetHost string + if d.ACEHostIP.IsValid() { + targetHost = d.ACEHostIP.String() + } else { + targetHost = d.ACEHost + } + + cc, err := d.netDialer()(ctx, "tcp", net.JoinHostPort(targetHost, fmt.Sprint(d.acePort()))) + if err != nil { + return nil, err + } + + // Now that we've dialed, we're about to do three potentially blocking + // operations: the TLS handshake, the CONNECT write, and the HTTP response + // read. To make our context work over all that, we use a context.AfterFunc + // to start a goroutine that'll tear down the underlying connection if the + // context expires. + // + // To prevent races, we use an atomic.Bool to guard access to the underlying + // connection being either good or bad. Only one goroutine (the success path + // in this goroutine after the ReadResponse or the AfterFunc's failure + // goroutine) will compare-and-swap it from false to true. + var done atomic.Bool + stop := context.AfterFunc(ctx, func() { + if done.CompareAndSwap(false, true) { + cc.Close() + } + }) + defer func() { + if err != nil { + if ctx.Err() != nil { + // Prefer the context error. The other error is likely a side + // effect of the context expiring and our tearing down of the + // underlying connection, and is thus probably something like + // "use of closed network connection", which isn't useful (and + // actually misleading) for the caller. + err = ctx.Err() + } + stop() + cc.Close() + } + }() + + tc := tls.Client(cc, &tls.Config{ServerName: d.ACEHost}) + if err := tc.Handshake(); err != nil { + return nil, err + } + + // TODO(tailscale/corp#32484): send proxy-auth header + if _, err := fmt.Fprintf(tc, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", address, d.ACEHost); err != nil { + return nil, err + } + + br := bufio.NewReader(tc) + connRes, err := http.ReadResponse(br, &http.Request{Method: "CONNECT"}) + if err != nil { + return nil, fmt.Errorf("reading CONNECT response: %w", err) + } + + // Now that we're done with blocking operations, mark the connection + // as good, to prevent the context's AfterFunc from closing it. + if !stop() || !done.CompareAndSwap(false, true) { + // We lost a race and the context expired. + return nil, ctx.Err() + } + + if connRes.StatusCode != http.StatusOK { + return nil, fmt.Errorf("ACE CONNECT response: %s", connRes.Status) + } + + if br.Buffered() > 0 { + return nil, fmt.Errorf("unexpected %d bytes of buffered data after ACE CONNECT", br.Buffered()) + } + return tc, nil +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 057e1a54b6522..88cda044f6d7f 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2264,7 +2264,14 @@ type ControlDialPlan struct { // connecting to the control server. type ControlIPCandidate struct { // IP is the address to attempt connecting to. - IP netip.Addr + IP netip.Addr `json:",omitzero"` + + // ACEHost, if non-empty, means that the client should connect to the + // control plane using an HTTPS CONNECT request to the provided hostname. If + // the IP field is also set, then the IP is the IP address of the ACEHost + // (and not the control plane) and DNS should not be used. The target (the + // argument to CONNECT) is always the control plane's hostname, not an IP. + ACEHost string `json:",omitempty"` // DialStartSec is the number of seconds after the beginning of the // connection process to wait before trying this candidate. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4fd9b7dbaacbb..5f7ca2e329190 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -269,6 +269,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ From 5e698a81b688c57a7241f69385a5461b53b5aa7f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 09:44:50 -0700 Subject: [PATCH 0378/1093] cmd/tailscaled: make the outbound HTTP/SOCKS5 proxy modular Updates #12614 Change-Id: Icba6f1c0838dce6ee13aa2dc662fb551813262e4 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 13 +++ cmd/tailscaled/proxy.go | 94 ++++++++++++++++++- cmd/tailscaled/tailscaled.go | 90 ++++-------------- .../feature_netstack_disabled.go | 13 +++ .../buildfeatures/feature_netstack_enabled.go | 13 +++ .../feature_outboundproxy_disabled.go | 13 +++ .../feature_outboundproxy_enabled.go | 13 +++ feature/featuretags/featuretags.go | 36 ++++--- feature/featuretags/featuretags_test.go | 8 +- 9 files changed, 207 insertions(+), 86 deletions(-) create mode 100644 feature/buildfeatures/feature_netstack_disabled.go create mode 100644 feature/buildfeatures/feature_netstack_enabled.go create mode 100644 feature/buildfeatures/feature_outboundproxy_disabled.go create mode 100644 feature/buildfeatures/feature_outboundproxy_enabled.go diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 538cdc115426b..50e584fe02444 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -135,3 +135,16 @@ func TestOmitOAuthKey(t *testing.T) { }, }.Check(t) } + +func TestOmitOutboundProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_outboundproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "socks5") || strings.Contains(dep, "proxymux") { + t.Errorf("unexpected dep with ts_omit_outboundproxy: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index a91c62bfa44ac..790b5e18ebe4d 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_outboundproxy // HTTP proxy code @@ -9,13 +9,105 @@ package main import ( "context" + "flag" "io" + "log" "net" "net/http" "net/http/httputil" "strings" + + "tailscale.com/net/proxymux" + "tailscale.com/net/socks5" + "tailscale.com/net/tsdial" + "tailscale.com/net/tshttpproxy" + "tailscale.com/types/logger" ) +func init() { + hookRegisterOutboundProxyFlags.Set(registerOutboundProxyFlags) + hookOutboundProxyListen.Set(outboundProxyListen) +} + +func registerOutboundProxyFlags() { + flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) + flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) +} + +// outboundProxyListen creates listeners for local SOCKS and HTTP proxies, if +// the respective addresses are not empty. args.socksAddr and args.httpProxyAddr +// can be the same, in which case the SOCKS5 Listener will receive connections +// that look like they're speaking SOCKS and httpListener will receive +// everything else. +// +// socksListener and httpListener can be nil, if their respective addrs are +// empty. +// +// The returned func closes over those two (possibly nil) listeners and +// starts the respective servers on the listener when called. +func outboundProxyListen() proxyStartFunc { + socksAddr, httpAddr := args.socksAddr, args.httpProxyAddr + + if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { + ln, err := net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("proxy listener: %v", err) + } + return mkProxyStartFunc(proxymux.SplitSOCKSAndHTTP(ln)) + } + + var socksListener, httpListener net.Listener + var err error + if socksAddr != "" { + socksListener, err = net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("SOCKS5 listener: %v", err) + } + if strings.HasSuffix(socksAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("SOCKS5 listening on %v", socksListener.Addr()) + } + } + if httpAddr != "" { + httpListener, err = net.Listen("tcp", httpAddr) + if err != nil { + log.Fatalf("HTTP proxy listener: %v", err) + } + if strings.HasSuffix(httpAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("HTTP proxy listening on %v", httpListener.Addr()) + } + } + + return mkProxyStartFunc(socksListener, httpListener) +} + +func mkProxyStartFunc(socksListener, httpListener net.Listener) proxyStartFunc { + return func(logf logger.Logf, dialer *tsdial.Dialer) { + var addrs []string + if httpListener != nil { + hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} + go func() { + log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpListener)) + }() + addrs = append(addrs, httpListener.Addr().String()) + } + if socksListener != nil { + ss := &socks5.Server{ + Logf: logger.WithPrefix(logf, "socks5: "), + Dialer: dialer.UserDial, + } + go func() { + log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) + }() + addrs = append(addrs, socksListener.Addr().String()) + } + tshttpproxy.SetSelfProxy(addrs...) + } +} + // httpProxyHandler returns an HTTP proxy http.Handler using the // provided backend dialer. func httpProxyHandler(dialer func(ctx context.Context, netw, addr string) (net.Conn, error)) http.Handler { diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 734c8e8e88342..9e099f9cba9d2 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -48,10 +48,7 @@ import ( "tailscale.com/net/dnsfallback" "tailscale.com/net/netmon" "tailscale.com/net/netns" - "tailscale.com/net/proxymux" - "tailscale.com/net/socks5" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/paths" "tailscale.com/safesocket" @@ -176,6 +173,17 @@ func shouldRunCLI() bool { return false } +// Outbound Proxy hooks +var ( + hookRegisterOutboundProxyFlags feature.Hook[func()] + hookOutboundProxyListen feature.Hook[func() proxyStartFunc] +) + +// proxyStartFunc is the type of the function returned by +// outboundProxyListen, to start the servers on the Listeners +// started by hookOutboundProxyListen. +type proxyStartFunc = func(logf logger.Logf, dialer *tsdial.Dialer) + func main() { envknob.PanicIfAnyEnvCheckedInInit() if shouldRunCLI() { @@ -190,8 +198,6 @@ func main() { flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") - flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) - flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) @@ -202,6 +208,9 @@ func main() { flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { + f() + } if runtime.GOOS == "plan9" && os.Getenv("_NETSHELL_CHILD_") != "" { os.Args = []string{"tailscaled", "be-child", "plan9-netshell"} @@ -595,7 +604,10 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID logPol.Logtail.SetNetMon(sys.NetMon.Get()) } - socksListener, httpProxyListener := mustStartProxyListeners(args.socksAddr, args.httpProxyAddr) + var startProxy proxyStartFunc + if listen, ok := hookOutboundProxyListen.GetOk(); ok { + startProxy = listen() + } dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used) sys.Set(dialer) @@ -646,26 +658,8 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID return udpConn, nil } } - if socksListener != nil || httpProxyListener != nil { - var addrs []string - if httpProxyListener != nil { - hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} - go func() { - log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpProxyListener)) - }() - addrs = append(addrs, httpProxyListener.Addr().String()) - } - if socksListener != nil { - ss := &socks5.Server{ - Logf: logger.WithPrefix(logf, "socks5: "), - Dialer: dialer.UserDial, - } - go func() { - log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) - }() - addrs = append(addrs, socksListener.Addr().String()) - } - tshttpproxy.SetSelfProxy(addrs...) + if startProxy != nil { + go startProxy(logf, dialer) } opts := ipnServerOpts() @@ -893,50 +887,6 @@ func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { return ret, nil } -// mustStartProxyListeners creates listeners for local SOCKS and HTTP -// proxies, if the respective addresses are not empty. socksAddr and -// httpAddr can be the same, in which case socksListener will receive -// connections that look like they're speaking SOCKS and httpListener -// will receive everything else. -// -// socksListener and httpListener can be nil, if their respective -// addrs are empty. -func mustStartProxyListeners(socksAddr, httpAddr string) (socksListener, httpListener net.Listener) { - if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { - ln, err := net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("proxy listener: %v", err) - } - return proxymux.SplitSOCKSAndHTTP(ln) - } - - var err error - if socksAddr != "" { - socksListener, err = net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("SOCKS5 listener: %v", err) - } - if strings.HasSuffix(socksAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("SOCKS5 listening on %v", socksListener.Addr()) - } - } - if httpAddr != "" { - httpListener, err = net.Listen("tcp", httpAddr) - if err != nil { - log.Fatalf("HTTP proxy listener: %v", err) - } - if strings.HasSuffix(httpAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("HTTP proxy listening on %v", httpListener.Addr()) - } - } - - return socksListener, httpListener -} - var beChildFunc = beChild func beChild(args []string) error { diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go new file mode 100644 index 0000000000000..7369645a0d0b1 --- /dev/null +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = false diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go new file mode 100644 index 0000000000000..a7e57098b5c42 --- /dev/null +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = true diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go new file mode 100644 index 0000000000000..a84c24e6d0e0d --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = false diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go new file mode 100644 index 0000000000000..c306bbeb205bc --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 325f46a44906e..ec21122db1cad 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -106,17 +106,31 @@ var Features = map[FeatureTag]FeatureMeta{ "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "relayserver": {"RelayServer", "Relay server", nil}, - "serve": {"Serve", "Serve and Funnel support", nil}, - "ssh": {"SSH", "Tailscale SSH support", nil}, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, - "systray": {"SysTray", "Linux system tray", nil}, - "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, - "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, - "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, - "tpm": {"TPM", "TPM support", nil}, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "outboundproxy": { + Sym: "OutboundProxy", + Desc: "Outbound localhost HTTP/SOCK5 proxy support", + Deps: []FeatureTag{"netstack"}, + }, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "relayserver": {"RelayServer", "Relay server", nil}, + "serve": { + Sym: "Serve", + Desc: "Serve and Funnel support", + Deps: []FeatureTag{"netstack"}, + }, + "ssh": { + Sym: "SSH", + Desc: "Tailscale SSH support", + Deps: []FeatureTag{"netstack"}, + }, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "systray": {"SysTray", "Linux system tray", nil}, + "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, + "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, + "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, + "tpm": {"TPM", "TPM support", nil}, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go index 4a268c90da311..b1524ce4f20f7 100644 --- a/feature/featuretags/featuretags_test.go +++ b/feature/featuretags/featuretags_test.go @@ -11,7 +11,7 @@ import ( "tailscale.com/util/set" ) -func TestRequires(t *testing.T) { +func TestKnownDeps(t *testing.T) { for tag, meta := range Features { for _, dep := range meta.Deps { if _, ok := Features[dep]; !ok { @@ -26,7 +26,7 @@ func TestRequires(t *testing.T) { } } -func TestDepSet(t *testing.T) { +func TestRequires(t *testing.T) { var setOf = set.Of[FeatureTag] tests := []struct { in FeatureTag @@ -38,11 +38,11 @@ func TestDepSet(t *testing.T) { }, { in: "serve", - want: setOf("serve"), + want: setOf("serve", "netstack"), }, { in: "webclient", - want: setOf("webclient", "serve"), + want: setOf("webclient", "serve", "netstack"), }, } for _, tt := range tests { From d559a214189d40a9493e2a2df3f46dc1b08928c0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 19 Sep 2025 10:34:55 -0700 Subject: [PATCH 0379/1093] util/eventbus/eventbustest: fix typo of test name And another case of the same typo in a comment elsewhere. Updates #cleanup Change-Id: Iaa9d865a1cf83318d4a30263c691451b5d708c9c Signed-off-by: Brad Fitzpatrick --- cmd/containerboot/egressservices.go | 2 +- util/eventbus/eventbustest/examples_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 64ca0a13a4ed7..fe835a69e0b82 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -570,7 +570,7 @@ func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner } // ensureRulesDeleted ensures that the given rules are deleted from the firewall -// configuration. For any rules that do not exist, calling this funcion is a +// configuration. For any rules that do not exist, calling this function is a // no-op. func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { for svc, rules := range rulesPerSvc { diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go index 914e29933b2a2..bc06e60a9230b 100644 --- a/util/eventbus/eventbustest/examples_test.go +++ b/util/eventbus/eventbustest/examples_test.go @@ -157,7 +157,7 @@ func TestExample_Expect_WithMultipleFunctions(t *testing.T) { // OK } -func TestExample_ExpectExactly_WithMultipleFuncions(t *testing.T) { +func TestExample_ExpectExactly_WithMultipleFunctions(t *testing.T) { type eventOfInterest struct { value int } From 009d702adfa0fca9f0319f6767f6a3259e484092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 19 Sep 2025 14:58:37 -0400 Subject: [PATCH 0380/1093] health: remove direct callback and replace with eventbus (#17199) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pulls out the last callback logic and ensures timers are still running. The eventbustest package is updated support the absence of events. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/auto.go | 40 +++++- health/health.go | 120 +++--------------- health/health_test.go | 88 +++++++------ util/eventbus/eventbustest/eventbustest.go | 7 +- .../eventbustest/eventbustest_test.go | 2 +- 5 files changed, 110 insertions(+), 147 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 7bca6c8d8b316..bbc129c5e943e 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -23,6 +23,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/types/structs" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" ) @@ -122,7 +123,9 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - unregisterHealthWatch func() + eventClient *eventbus.Client + healthChangeSub *eventbus.Subscriber[health.Change] + subsDoneCh chan struct{} // close-only channel when eventClient has closed mu sync.Mutex // mutex guards the following fields @@ -192,21 +195,42 @@ func NewNoStart(opts Options) (_ *Auto, err error) { updateDone: make(chan struct{}), observer: opts.Observer, shutdownFn: opts.Shutdown, + subsDoneCh: make(chan struct{}), } + + c.eventClient = opts.Bus.Client("controlClient.Auto") + c.healthChangeSub = eventbus.Subscribe[health.Change](c.eventClient) + c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(func(c health.Change) { - if c.WarnableChanged { - direct.ReportWarnableChange(c.Warnable, c.UnhealthyState) - } - }) + go c.consumeEventbusTopics() return c, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [eventbus.Client] is closed. +func (c *Auto) consumeEventbusTopics() { + defer close(c.subsDoneCh) + + for { + select { + case <-c.eventClient.Done(): + return + case change := <-c.healthChangeSub.Events(): + if change.WarnableChanged { + c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) + } + } + } +} + // SetPaused controls whether HTTP activity should be paused. // // The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once. @@ -760,6 +784,9 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } func (c *Auto) Shutdown() { + c.eventClient.Close() + <-c.subsDoneCh + c.mu.Lock() if c.closed { c.mu.Unlock() @@ -783,7 +810,6 @@ func (c *Auto) Shutdown() { shutdownFn() } - c.unregisterHealthWatch() <-c.authDone <-c.mapDone <-c.updateDone diff --git a/health/health.go b/health/health.go index c456b53cbf174..3d1c46a3d945b 100644 --- a/health/health.go +++ b/health/health.go @@ -28,7 +28,6 @@ import ( "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/multierr" - "tailscale.com/util/set" "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -65,6 +64,21 @@ var receiveNames = []string{ // Tracker tracks the health of various Tailscale subsystems, // comparing each subsystems' state with each other to make sure // they're consistent based on the user's intended state. +// +// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, +// an event will be emitted with WarnableChanged set to true and the Warnable +// and its UnhealthyState: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: us} +// +// If a Warnable becomes healthy, an event will be emitted with +// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil} +// +// If the health messages from the control-plane change, an event will be +// emitted with ControlHealthChanged set to true. Recipients can fetch the set of +// control-plane health messages by calling [Tracker.CurrentState]: type Tracker struct { // MagicSockReceiveFuncs tracks the state of the three // magicsock receive functions: IPv4, IPv6, and DERP. @@ -91,9 +105,8 @@ type Tracker struct { // sysErr maps subsystems to their current error (or nil if the subsystem is healthy) // Deprecated: using Warnables should be preferred - sysErr map[Subsystem]error - watchers set.HandleSet[func(Change)] // opt func to run if error state changes - timer tstime.TimerController + sysErr map[Subsystem]error + timer tstime.TimerController latestVersion *tailcfg.ClientVersion // or nil checkForUpdates bool @@ -131,10 +144,12 @@ func NewTracker(bus *eventbus.Bus) *Tracker { } cli := bus.Client("health.Tracker") - return &Tracker{ + t := &Tracker{ eventClient: cli, changePub: eventbus.Publish[Change](cli), } + t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) + return t } func (t *Tracker) now() time.Time { @@ -455,33 +470,6 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { }) mak.Set(&t.pendingVisibleTimers, w, tc) } - - // Direct callbacks - // TODO(cmol): Remove once all watchers have been moved to events - for _, cb := range t.watchers { - // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be - // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable - // becomes visible. - if w.IsVisible(ws, t.now) { - cb(change) - continue - } - - // The time remaining until the Warnable will be visible to the user is the TimeToVisible - // minus the time that has already passed since the Warnable became unhealthy. - visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) - var tc tstime.TimerController = t.clock().AfterFunc(visibleIn, func() { - t.mu.Lock() - defer t.mu.Unlock() - // Check if the Warnable is still unhealthy, as it could have become healthy between the time - // the timer was set for and the time it was executed. - if t.warnableVal[w] != nil { - cb(change) - delete(t.pendingVisibleTimers, w) - } - }) - mak.Set(&t.pendingVisibleTimers, w, tc) - } } } @@ -514,10 +502,6 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { Warnable: w, } t.changePub.Publish(change) - for _, cb := range t.watchers { - // TODO(cmol): Remove once all watchers have been moved to events - cb(change) - } } // notifyWatchersControlChangedLocked calls each watcher to signal that control @@ -526,13 +510,7 @@ func (t *Tracker) notifyWatchersControlChangedLocked() { change := Change{ ControlHealthChanged: true, } - if t.changePub != nil { - t.changePub.Publish(change) - } - for _, cb := range t.watchers { - // TODO(cmol): Remove once all watchers have been moved to events - cb(change) - } + t.changePub.Publish(change) } // AppendWarnableDebugFlags appends to base any health items that are currently in failed @@ -577,62 +555,6 @@ type Change struct { UnhealthyState *UnhealthyState } -// RegisterWatcher adds a function that will be called its own goroutine -// whenever the health state of any client [Warnable] or control-plane health -// messages changes. The returned function can be used to unregister the -// callback. -// -// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, -// the callback will be called with WarnableChanged set to true and the Warnable -// and its UnhealthyState: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: us}) -// -// If a Warnable becomes healthy, the callback will be called with -// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil}) -// -// If the health messages from the control-plane change, the callback will be -// called with ControlHealthChanged set to true. Recipients can fetch the set of -// control-plane health messages by calling [Tracker.CurrentState]: -// -// go cb(Change{ControlHealthChanged: true}) -func (t *Tracker) RegisterWatcher(cb func(Change)) (unregister func()) { - return t.registerSyncWatcher(func(c Change) { - go cb(c) - }) -} - -// registerSyncWatcher adds a function that will be called whenever the health -// state changes. The provided callback function will be executed synchronously. -// Call RegisterWatcher to register any callbacks that won't return from -// execution immediately. -func (t *Tracker) registerSyncWatcher(cb func(c Change)) (unregister func()) { - if t.nil() { - return func() {} - } - t.initOnce.Do(t.doOnceInit) - t.mu.Lock() - defer t.mu.Unlock() - if t.watchers == nil { - t.watchers = set.HandleSet[func(Change)]{} - } - handle := t.watchers.Add(cb) - if t.timer == nil { - t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) - } - return func() { - t.mu.Lock() - defer t.mu.Unlock() - delete(t.watchers, handle) - if len(t.watchers) == 0 && t.timer != nil { - t.timer.Stop() - t.timer = nil - } - } -} - // SetRouterHealth sets the state of the wgengine/router.Router. // // Deprecated: Warnables should be preferred over Subsystem errors. diff --git a/health/health_test.go b/health/health_test.go index c55b0e1f3b5a5..3ada377556909 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -4,6 +4,7 @@ package health import ( + "errors" "fmt" "maps" "reflect" @@ -158,15 +159,6 @@ func TestWatcher(t *testing.T) { name string preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) }{ - { - name: "with-callbacks", - preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { - t.Cleanup(tht.RegisterWatcher(fn)) - if len(tht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) - } - }, - }, { name: "with-eventbus", preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { @@ -254,15 +246,6 @@ func TestSetUnhealthyWithTimeToVisible(t *testing.T) { name string preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) }{ - { - name: "with-callbacks", - preFunc: func(t *testing.T, tht *Tracker, _ *eventbus.Bus, fn func(c Change)) { - t.Cleanup(tht.RegisterWatcher(fn)) - if len(tht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(tht.watchers)) - } - }, - }, { name: "with-eventbus", preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { @@ -668,7 +651,7 @@ func TestControlHealthNotifies(t *testing.T) { name string initialState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage newState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage - wantNotify bool + wantEvents []any } tests := []test{ { @@ -679,7 +662,7 @@ func TestControlHealthNotifies(t *testing.T) { newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test": {}, }, - wantNotify: false, + wantEvents: []any{}, }, { name: "on-set", @@ -687,7 +670,9 @@ func TestControlHealthNotifies(t *testing.T) { newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test": {}, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, { name: "details-change", @@ -701,7 +686,9 @@ func TestControlHealthNotifies(t *testing.T) { Title: "Updated title", }, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, { name: "action-changes", @@ -721,42 +708,54 @@ func TestControlHealthNotifies(t *testing.T) { }, }, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ht := NewTracker(eventbustest.NewBus(t)) + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + tw.TimeOut = time.Second + + ht := NewTracker(bus) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() + // Expect events at starup, before doing anything else + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[Change](), // warming-up + eventbustest.Type[Change](), // is-using-unstable-version + eventbustest.Type[Change](), // not-in-map-poll + ); err != nil { + t.Errorf("startup error: %v", err) + } + + // Only set initial state if we need to if len(test.initialState) != 0 { ht.SetControlHealth(test.initialState) + if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { + t.Errorf("initial state error: %v", err) + } } - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - ht.SetControlHealth(test.newState) - if gotNotified != test.wantNotify { - t.Errorf("notified: got %v, want %v", gotNotified, test.wantNotify) + if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { + t.Errorf("event error: %v", err) } }) } } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - ht := NewTracker(eventbustest.NewBus(t)) + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + tw.TimeOut = 100 * time.Millisecond + ht := NewTracker(bus) ht.SetIPNState("NeedsLogin", true) - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "control-health": {}, }) @@ -768,8 +767,19 @@ func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { t.Error("got a warning with code 'control-health', want none") } - if gotNotified { - t.Error("watcher got called, want it to not be called") + // An event is emitted when SetIPNState is run above, + // so only fail on the second event. + eventCounter := 0 + expectOne := func(c *Change) error { + eventCounter++ + if eventCounter == 1 { + return nil + } + return errors.New("saw more than 1 event") + } + + if err := eventbustest.Expect(tw, expectOne); err == nil { + t.Error("event got emitted, want it to not be called") } } diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 0916ae52280cf..3f7bf45531db4 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -120,7 +120,12 @@ func Expect(tw *Watcher, filters ...any) error { // [Expect]. Use [Expect] if other events are allowed. func ExpectExactly(tw *Watcher, filters ...any) error { if len(filters) == 0 { - return errors.New("no event filters were provided") + select { + case event := <-tw.events: + return fmt.Errorf("saw event type %s, expected none", reflect.TypeOf(event)) + case <-time.After(tw.TimeOut): + return nil + } } eventCount := 0 for pos, next := range filters { diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index 7a6b511c7bae0..2d126767d13ce 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -250,7 +250,7 @@ func TestExpectEvents(t *testing.T) { tw := eventbustest.NewWatcher(t, bus) // TODO(cmol): When synctest is out of experimental, use that instead: // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + tw.TimeOut = 100 * time.Millisecond client := bus.Client("testClient") defer client.Close() From ca9d79500615082dc46fffc4b1d93ad66fa6b8eb Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 12:34:06 -0700 Subject: [PATCH 0381/1093] util/eventbus: add a Monitor type to manage subscriber goroutines (#17127) A common pattern in event bus usage is to run a goroutine to service a collection of subscribers on a single bus client. To have an orderly shutdown, however, we need a way to wait for such a goroutine to be finished. This commit adds a Monitor type that makes this pattern easier to wire up: rather than having to track all the subscribers and an extra channel, the component need only track the client and the monitor. For example: cli := bus.Client("example") m := cli.Monitor(func(c *eventbus.Client) { s1 := eventbus.Subscribe[T](cli) s2 := eventbus.Subscribe[U](cli) for { select { case <-c.Done(): return case t := <-s1.Events(): processT(t) case u := <-s2.Events(): processU(u) } } }) To shut down the client and wait for the goroutine, the caller can write: m.Close() which closes cli and waits for the goroutine to finish. Or, separately: cli.Close() // do other stuff m.Wait() While the goroutine management is not explicitly tied to subscriptions, it is a common enough pattern that this seems like a useful simplification in use. Updates #15160 Change-Id: I657afda1cfaf03465a9dce1336e9fd518a968bca Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 73 +++++++++++++++++++++++++++++++++++++++ util/eventbus/monitor.go | 42 ++++++++++++++++++++++ 2 files changed, 115 insertions(+) create mode 100644 util/eventbus/monitor.go diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 9fd0e440948e1..7782634ae92ab 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -221,6 +221,79 @@ func TestClient_Done(t *testing.T) { } } +func TestMonitor(t *testing.T) { + t.Run("ZeroWait", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Wait(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Wait to return") + } + }) + + t.Run("ZeroClose", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Close(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Close to return") + } + }) + + testMon := func(t *testing.T, release func(*eventbus.Client, eventbus.Monitor)) func(t *testing.T) { + t.Helper() + return func(t *testing.T) { + bus := eventbus.New() + cli := bus.Client("test client") + + // The monitored goroutine runs until the client or test subscription ends. + m := cli.Monitor(func(c *eventbus.Client) { + sub := eventbus.Subscribe[string](cli) + select { + case <-c.Done(): + t.Log("client closed") + case <-sub.Done(): + t.Log("subscription closed") + } + }) + + done := make(chan struct{}) + go func() { + defer close(done) + m.Wait() + }() + + // While the goroutine is running, Wait does not complete. + select { + case <-done: + t.Error("monitor is ready before its goroutine is finished") + default: + // OK + } + + release(cli, m) + select { + case <-done: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete") + } + } + } + t.Run("Close", testMon(t, func(_ *eventbus.Client, m eventbus.Monitor) { m.Close() })) + t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) +} + type queueChecker struct { t *testing.T want []any diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go new file mode 100644 index 0000000000000..18cc2a413ddef --- /dev/null +++ b/util/eventbus/monitor.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +// A Monitor monitors the execution of a goroutine processing events from a +// [Client], allowing the caller to block until it is complete. The zero value +// of m is valid and its Close and Wait methods return immediately. +type Monitor struct { + // These fields are immutable after initialization + cli *Client + done <-chan struct{} +} + +// Close closes the client associated with m and blocks until the processing +// goroutine is complete. +func (m Monitor) Close() { + if m.cli == nil { + return + } + m.cli.Close() + <-m.done +} + +// Wait blocks until the goroutine monitored by m has finished executing, but +// does not close the associated client. It is safe to call Wait repeatedly, +// and from multiple concurrent goroutines. +func (m Monitor) Wait() { + if m.done == nil { + return + } + <-m.done +} + +// Monitor executes f in a new goroutine attended by a [Monitor]. The caller +// is responsible for waiting for the goroutine to complete, by calling either +// [Monitor.Close] or [Monitor.Wait]. +func (c *Client) Monitor(f func(*Client)) Monitor { + done := make(chan struct{}) + go func() { defer close(done); f(c) }() + return Monitor{cli: c, done: done} +} From 2b6bc11586b65259ed737d3f77e3879647ac9df3 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 13:20:50 -0700 Subject: [PATCH 0382/1093] wgengine: use eventbus.Client.Monitor to simplify subscriber maintenance (#17203) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I40c23b183c2a6a6ea3feec7767c8e5417019fc07 Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 88 +++++++++++++++------------------ wgengine/userspace.go | 39 ++++++--------- 2 files changed, 56 insertions(+), 71 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6eb5660762d0a..39a7bb2e687ba 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -156,7 +156,7 @@ type Conn struct { // struct. Initialized once at construction, then constant. eventBus *eventbus.Bus - eventClient *eventbus.Client + eventSubs eventbus.Monitor logf logger.Logf epFunc func([]tailcfg.Endpoint) derpActiveFunc func() @@ -176,17 +176,10 @@ type Conn struct { connCtxCancel func() // closes connCtx donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call - // These [eventbus.Subscriber] fields are solely accessed by - // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmappertype.Mapping] - filterSub *eventbus.Subscriber[FilterUpdate] - nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] - nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] - syncSub *eventbus.Subscriber[syncPoint] + // A publisher for synchronization points to ensure correct ordering of + // config changes between magicsock and wireguard. syncPub *eventbus.Publisher[syncPoint] allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] - allocRelayEndpointSub *eventbus.Subscriber[UDPRelayAllocResp] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -643,26 +636,34 @@ func newConn(logf logger.Logf) *Conn { // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (c *Conn) consumeEventbusTopics() { - defer close(c.subsDoneCh) - - for { - select { - case <-c.eventClient.Done(): - return - case <-c.pmSub.Events(): - c.onPortMapChanged() - case filterUpdate := <-c.filterSub.Events(): - c.onFilterUpdate(filterUpdate) - case nodeViews := <-c.nodeViewsSub.Events(): - c.onNodeViewsUpdate(nodeViews) - case nodeMuts := <-c.nodeMutsSub.Events(): - c.onNodeMutationsUpdate(nodeMuts) - case syncPoint := <-c.syncSub.Events(): - c.dlogf("magicsock: received sync point after reconfig") - syncPoint.Signal() - case allocResp := <-c.allocRelayEndpointSub.Events(): - c.onUDPRelayAllocResp(allocResp) +func (c *Conn) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { + // Subscribe calls must return before NewConn otherwise published + // events can be missed. + pmSub := eventbus.Subscribe[portmappertype.Mapping](cli) + filterSub := eventbus.Subscribe[FilterUpdate](cli) + nodeViewsSub := eventbus.Subscribe[NodeViewsUpdate](cli) + nodeMutsSub := eventbus.Subscribe[NodeMutationsUpdate](cli) + syncSub := eventbus.Subscribe[syncPoint](cli) + allocRelayEndpointSub := eventbus.Subscribe[UDPRelayAllocResp](cli) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case <-pmSub.Events(): + c.onPortMapChanged() + case filterUpdate := <-filterSub.Events(): + c.onFilterUpdate(filterUpdate) + case nodeViews := <-nodeViewsSub.Events(): + c.onNodeViewsUpdate(nodeViews) + case nodeMuts := <-nodeMutsSub.Events(): + c.onNodeMutationsUpdate(nodeMuts) + case syncPoint := <-syncSub.Events(): + c.dlogf("magicsock: received sync point after reconfig") + syncPoint.Signal() + case allocResp := <-allocRelayEndpointSub.Events(): + c.onUDPRelayAllocResp(allocResp) + } } } } @@ -729,20 +730,12 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity - c.eventClient = c.eventBus.Client("magicsock.Conn") - - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - c.pmSub = eventbus.Subscribe[portmappertype.Mapping](c.eventClient) - c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) - c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) - c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) - c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) - c.syncPub = eventbus.Publish[syncPoint](c.eventClient) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) - c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) - c.subsDoneCh = make(chan struct{}) - go c.consumeEventbusTopics() + // Set up publishers and subscribers. Subscribe calls must return before + // NewConn otherwise published events can be missed. + cli := c.eventBus.Client("magicsock.Conn") + c.syncPub = eventbus.Publish[syncPoint](cli) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](cli) + c.eventSubs = cli.Monitor(c.consumeEventbusTopics(cli)) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) c.donec = c.connCtx.Done() @@ -3313,14 +3306,13 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { - // Close the [eventbus.Client] and wait for Conn.consumeEventbusTopics to - // return. Do this before acquiring c.mu: + // Close the [eventbus.Client] and wait for c.consumeEventbusTopics to + // return before acquiring c.mu: // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can // deadlock with c.Close(). // 2. Conn.consumeEventbusTopics event handlers may not guard against // undesirable post/in-progress Conn.Close() behaviors. - c.eventClient.Close() - <-c.subsDoneCh + c.eventSubs.Close() c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 42c12c008cffe..86136d977485a 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -93,10 +93,8 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - eventClient *eventbus.Client - changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + eventBus *eventbus.Bus + eventSubs eventbus.Monitor logf logger.Logf wgLogger *wglog.Logger // a wireguard-go logging wrapper @@ -354,11 +352,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) controlKnobs: conf.ControlKnobs, reconfigureVPN: conf.ReconfigureVPN, health: conf.HealthTracker, - subsDoneCh: make(chan struct{}), } - e.eventClient = e.eventBus.Client("userspaceEngine") - e.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](e.eventClient) - closePool.addFunc(e.eventClient.Close) if e.birdClient != nil { // Disable the protocol at start time. @@ -545,8 +539,8 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } - go e.consumeEventbusTopics() - + cli := e.eventBus.Client("userspaceEngine") + e.eventSubs = cli.Monitor(e.consumeEventbusTopics(cli)) e.logf("Engine created.") return e, nil } @@ -556,16 +550,17 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (e *userspaceEngine) consumeEventbusTopics() { - defer close(e.subsDoneCh) - - for { - select { - case <-e.eventClient.Done(): - return - case changeDelta := <-e.changeDeltaSub.Events(): - tshttpproxy.InvalidateCache() - e.linkChange(&changeDelta) +func (e *userspaceEngine) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { + changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](cli) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case changeDelta := <-changeDeltaSub.Events(): + tshttpproxy.InvalidateCache() + e.linkChange(&changeDelta) + } } } } @@ -1228,9 +1223,7 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { - e.eventClient.Close() - <-e.subsDoneCh - + e.eventSubs.Close() e.mu.Lock() if e.closing { e.mu.Unlock() From f9c699812adaa286980aed97811217d884cf37fb Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 19 Sep 2025 14:31:55 -0700 Subject: [PATCH 0383/1093] ipn/ipnlocal: use eventbus.Monitor in expiryManager (#17204) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I0a175e67e867459daaedba0731bf68bd331e5ebc Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/expiry.go | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 9427f07382bd6..849e28610d33e 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -43,9 +43,7 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock - eventClient *eventbus.Client - controlTimeSub *eventbus.Subscriber[controlclient.ControlTime] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + eventSubs eventbus.Monitor } func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { @@ -55,12 +53,8 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { clock: tstime.StdClock{}, } - em.eventClient = bus.Client("ipnlocal.expiryManager") - em.controlTimeSub = eventbus.Subscribe[controlclient.ControlTime](em.eventClient) - - em.subsDoneCh = make(chan struct{}) - go em.consumeEventbusTopics() - + cli := bus.Client("ipnlocal.expiryManager") + em.eventSubs = cli.Monitor(em.consumeEventbusTopics(cli)) return em } @@ -69,15 +63,16 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (em *expiryManager) consumeEventbusTopics() { - defer close(em.subsDoneCh) - - for { - select { - case <-em.eventClient.Done(): - return - case time := <-em.controlTimeSub.Events(): - em.onControlTime(time.Value) +func (em *expiryManager) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { + controlTimeSub := eventbus.Subscribe[controlclient.ControlTime](cli) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case time := <-controlTimeSub.Events(): + em.onControlTime(time.Value) + } } } } @@ -250,10 +245,7 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } -func (em *expiryManager) close() { - em.eventClient.Close() - <-em.subsDoneCh -} +func (em *expiryManager) close() { em.eventSubs.Close() } // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded From 798fddbe5cf21d6f87ee24ce630dfef9420afdb6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 19 Sep 2025 17:15:04 -0700 Subject: [PATCH 0384/1093] feature/linuxdnsfight: move inotify watching of /etc/resolv.conf out to a feature tsnet apps in particular never use the Linux DNS OSManagers, so they don't need DBus, etc. I started to pull that all out into separate features so tsnet doesn't need to bring in DBus, but hit this first. Here you can see that tsnet (and the k8s-operator) no longer pulls in inotify. Updates #17206 Change-Id: I7af0f391f60c5e7dbeed7a080346f83262346591 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 - cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 2 - .../feature_linuxdnsfight_disabled.go | 13 +++ .../feature_linuxdnsfight_enabled.go | 13 +++ feature/condregister/maybe_linuxdnsfight.go | 8 ++ feature/featuretags/featuretags.go | 1 + feature/linuxdnsfight/linuxdnsfight.go | 51 +++++++++ .../linuxdnsfight/linuxdnsfight_test.go | 4 +- net/dns/direct.go | 68 ++++++++++++ net/dns/direct_linux.go | 104 ------------------ net/dns/direct_notlinux.go | 10 -- tsnet/depaware.txt | 2 - 13 files changed, 159 insertions(+), 122 deletions(-) create mode 100644 feature/buildfeatures/feature_linuxdnsfight_disabled.go create mode 100644 feature/buildfeatures/feature_linuxdnsfight_enabled.go create mode 100644 feature/condregister/maybe_linuxdnsfight.go create mode 100644 feature/linuxdnsfight/linuxdnsfight.go rename net/dns/direct_linux_test.go => feature/linuxdnsfight/linuxdnsfight_test.go (96%) delete mode 100644 net/dns/direct_linux.go delete mode 100644 net/dns/direct_notlinux.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b962f51f23d8b..442a9661101c4 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -145,8 +145,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e4405a689fef1..22f80d5d70347 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -124,7 +124,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/feature/linuxdnsfight L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 @@ -277,6 +277,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister + L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 872dc8f81589e..d92a0b41a6341 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -114,8 +114,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink diff --git a/feature/buildfeatures/feature_linuxdnsfight_disabled.go b/feature/buildfeatures/feature_linuxdnsfight_disabled.go new file mode 100644 index 0000000000000..2e5b50ea06af0 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = false diff --git a/feature/buildfeatures/feature_linuxdnsfight_enabled.go b/feature/buildfeatures/feature_linuxdnsfight_enabled.go new file mode 100644 index 0000000000000..b9419fccbfc09 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = true diff --git a/feature/condregister/maybe_linuxdnsfight.go b/feature/condregister/maybe_linuxdnsfight.go new file mode 100644 index 0000000000000..0dae62b00ab8a --- /dev/null +++ b/feature/condregister/maybe_linuxdnsfight.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linuxdnsfight + +package condregister + +import _ "tailscale.com/feature/linuxdnsfight" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index ec21122db1cad..269ff1fc12955 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -105,6 +105,7 @@ var Features = map[FeatureTag]FeatureMeta{ "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, + "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", diff --git a/feature/linuxdnsfight/linuxdnsfight.go b/feature/linuxdnsfight/linuxdnsfight.go new file mode 100644 index 0000000000000..02d99a3144246 --- /dev/null +++ b/feature/linuxdnsfight/linuxdnsfight.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +// Package linuxdnsfight provides Linux support for detecting DNS fights +// (inotify watching of /etc/resolv.conf). +package linuxdnsfight + +import ( + "context" + "fmt" + + "github.com/illarion/gonotify/v3" + "tailscale.com/net/dns" +) + +func init() { + dns.HookWatchFile.Set(watchFile) +} + +// watchFile sets up an inotify watch for a given directory and +// calls the callback function every time a particular file is changed. +// The filename should be located in the provided directory. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + const events = gonotify.IN_ATTRIB | + gonotify.IN_CLOSE_WRITE | + gonotify.IN_CREATE | + gonotify.IN_DELETE | + gonotify.IN_MODIFY | + gonotify.IN_MOVE + + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) + } + + for { + select { + case event := <-watcher.C: + if event.Name == filename { + cb() + } + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/net/dns/direct_linux_test.go b/feature/linuxdnsfight/linuxdnsfight_test.go similarity index 96% rename from net/dns/direct_linux_test.go rename to feature/linuxdnsfight/linuxdnsfight_test.go index e8f917b907a80..bd3463666d46b 100644 --- a/net/dns/direct_linux_test.go +++ b/feature/linuxdnsfight/linuxdnsfight_test.go @@ -1,7 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package dns +//go:build linux && !android + +package linuxdnsfight import ( "context" diff --git a/net/dns/direct.go b/net/dns/direct.go index f23723d9a1515..59eb0696498e8 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/tsaddr" @@ -415,6 +416,73 @@ func (m *directManager) GetBaseConfig() (OSConfig, error) { return oscfg, nil } +// HookWatchFile is a hook for watching file changes, for platforms that support it. +// The function is called with a directory and filename to watch, and a callback +// to call when the file changes. It returns an error if the watch could not be set up. +var HookWatchFile feature.Hook[func(ctx context.Context, dir, filename string, cb func()) error] + +func (m *directManager) runFileWatcher() { + watchFile, ok := HookWatchFile.GetOk() + if !ok { + return + } + if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { + // This is all best effort for now, so surface warnings to users. + m.logf("dns: inotify: %s", err) + } +} + +var resolvTrampleWarnable = health.Register(&health.Warnable{ + Code: "resolv-conf-overwritten", + Severity: health.SeverityMedium, + Title: "DNS configuration issue", + Text: health.StaticMessage("System DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), +}) + +// checkForFileTrample checks whether /etc/resolv.conf has been trampled +// by another program on the system. (e.g. a DHCP client) +func (m *directManager) checkForFileTrample() { + m.mu.Lock() + want := m.wantResolvConf + lastWarn := m.lastWarnContents + m.mu.Unlock() + + if want == nil { + return + } + + cur, err := m.fs.ReadFile(resolvConf) + if err != nil { + m.logf("trample: read error: %v", err) + return + } + if bytes.Equal(cur, want) { + m.health.SetHealthy(resolvTrampleWarnable) + if lastWarn != nil { + m.mu.Lock() + m.lastWarnContents = nil + m.mu.Unlock() + m.logf("trample: resolv.conf again matches expected content") + } + return + } + if bytes.Equal(cur, lastWarn) { + // We already logged about this, so not worth doing it again. + return + } + + m.mu.Lock() + m.lastWarnContents = cur + m.mu.Unlock() + + show := cur + if len(show) > 1024 { + show = show[:1024] + } + m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) + m.health.SetUnhealthy(resolvTrampleWarnable, nil) +} + func (m *directManager) Close() error { m.ctxClose() diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go deleted file mode 100644 index 0558f0f51b253..0000000000000 --- a/net/dns/direct_linux.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && !android - -package dns - -import ( - "bytes" - "context" - "fmt" - - "github.com/illarion/gonotify/v3" - "tailscale.com/health" -) - -func (m *directManager) runFileWatcher() { - if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { - // This is all best effort for now, so surface warnings to users. - m.logf("dns: inotify: %s", err) - } -} - -// watchFile sets up an inotify watch for a given directory and -// calls the callback function every time a particular file is changed. -// The filename should be located in the provided directory. -func watchFile(ctx context.Context, dir, filename string, cb func()) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - const events = gonotify.IN_ATTRIB | - gonotify.IN_CLOSE_WRITE | - gonotify.IN_CREATE | - gonotify.IN_DELETE | - gonotify.IN_MODIFY | - gonotify.IN_MOVE - - watcher, err := gonotify.NewDirWatcher(ctx, events, dir) - if err != nil { - return fmt.Errorf("NewDirWatcher: %w", err) - } - - for { - select { - case event := <-watcher.C: - if event.Name == filename { - cb() - } - case <-ctx.Done(): - return ctx.Err() - } - } -} - -var resolvTrampleWarnable = health.Register(&health.Warnable{ - Code: "resolv-conf-overwritten", - Severity: health.SeverityMedium, - Title: "Linux DNS configuration issue", - Text: health.StaticMessage("Linux DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), -}) - -// checkForFileTrample checks whether /etc/resolv.conf has been trampled -// by another program on the system. (e.g. a DHCP client) -func (m *directManager) checkForFileTrample() { - m.mu.Lock() - want := m.wantResolvConf - lastWarn := m.lastWarnContents - m.mu.Unlock() - - if want == nil { - return - } - - cur, err := m.fs.ReadFile(resolvConf) - if err != nil { - m.logf("trample: read error: %v", err) - return - } - if bytes.Equal(cur, want) { - m.health.SetHealthy(resolvTrampleWarnable) - if lastWarn != nil { - m.mu.Lock() - m.lastWarnContents = nil - m.mu.Unlock() - m.logf("trample: resolv.conf again matches expected content") - } - return - } - if bytes.Equal(cur, lastWarn) { - // We already logged about this, so not worth doing it again. - return - } - - m.mu.Lock() - m.lastWarnContents = cur - m.mu.Unlock() - - show := cur - if len(show) > 1024 { - show = show[:1024] - } - m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) - m.health.SetUnhealthy(resolvTrampleWarnable, nil) -} diff --git a/net/dns/direct_notlinux.go b/net/dns/direct_notlinux.go deleted file mode 100644 index a73a35e5ead2b..0000000000000 --- a/net/dns/direct_notlinux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !android && !ios - -package dns - -func (m *directManager) runFileWatcher() { - // Not implemented on other platforms. Maybe it could resort to polling. -} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 5f7ca2e329190..de9e69f9cf787 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -114,8 +114,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L github.com/google/nftables/xt from github.com/google/nftables/expr+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink From d7ec043306ed128e5c5f540e944371a98474f36c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 20 Sep 2025 15:55:33 -0700 Subject: [PATCH 0385/1093] cmd/tailscale/cli: add ts2021 debug flag to set a dial plan Updates tailscale/corp#32534 Change-Id: Ief4ee0a263ea1edbf652b74d8c335c1e5ee209d7 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 9e8fa0d7f82a6..b3170d000d924 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -289,6 +289,7 @@ func debugCmd() *ffcli.Command { fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") fs.StringVar(&ts2021Args.aceHost, "ace", "", "if non-empty, use this ACE server IP/hostname as a candidate path") + fs.StringVar(&ts2021Args.dialPlanJSONFile, "dial-plan", "", "if non-empty, use this JSON file to configure the dial plan") return fs })(), }, @@ -967,6 +968,8 @@ var ts2021Args struct { version int // 27 or whatever verbose bool aceHost string // if non-empty, FQDN of https ACE server to use ("ace.example.com") + + dialPlanJSONFile string // if non-empty, path to JSON file [tailcfg.ControlDialPlan] JSON } func runTS2021(ctx context.Context, args []string) error { @@ -1051,6 +1054,18 @@ func runTS2021(ctx context.Context, args []string) error { return fmt.Errorf("creating netmon: %w", err) } + var dialPlan *tailcfg.ControlDialPlan + if ts2021Args.dialPlanJSONFile != "" { + b, err := os.ReadFile(ts2021Args.dialPlanJSONFile) + if err != nil { + return fmt.Errorf("reading dial plan JSON file: %w", err) + } + dialPlan = new(tailcfg.ControlDialPlan) + if err := json.Unmarshal(b, dialPlan); err != nil { + return fmt.Errorf("unmarshaling dial plan JSON file: %w", err) + } + } + noiseDialer := &controlhttp.Dialer{ Hostname: ts2021Args.host, HTTPPort: "80", @@ -1058,6 +1073,7 @@ func runTS2021(ctx context.Context, args []string) error { MachineKey: machinePrivate, ControlKey: keys.PublicKey, ProtocolVersion: uint16(ts2021Args.version), + DialPlan: dialPlan, Dialer: dialFunc, Logf: logf, NetMon: netMon, From 1b6bc37f2859007dc4ed949b14f1f8531990b3cf Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 20 Sep 2025 16:14:44 -0700 Subject: [PATCH 0386/1093] net/dnscache: fix case where Resolver could return zero IP with single IPv6 address The controlhttp dialer with a ControlDialPlan IPv6 entry was hitting a case where the dnscache Resolver was returning an netip.Addr zero value, where it should've been returning the IPv6 address. We then tried to dial "invalid IP:80", which would immediately fail, at least locally. Mostly this was causing spammy logs when debugging other stuff. Updates tailscale/corp#32534 Change-Id: If8b9a20f10c1a6aa8a662c324151d987fe9bd2f8 Signed-off-by: Brad Fitzpatrick --- net/dnscache/dnscache.go | 3 ++ net/dnscache/dnscache_test.go | 58 +++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index d60e92f0b8bbc..94d4bbee7955f 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -205,6 +205,9 @@ func (r *Resolver) LookupIP(ctx context.Context, host string) (ip, v6 netip.Addr } allIPs = append(allIPs, naIP) } + if !ip.IsValid() && v6.IsValid() { + ip = v6 + } r.dlogf("returning %d static results", len(allIPs)) return } diff --git a/net/dnscache/dnscache_test.go b/net/dnscache/dnscache_test.go index ef4249b7401f3..58bb6cd7f594c 100644 --- a/net/dnscache/dnscache_test.go +++ b/net/dnscache/dnscache_test.go @@ -11,6 +11,7 @@ import ( "net" "net/netip" "reflect" + "slices" "testing" "time" @@ -240,3 +241,60 @@ func TestShouldTryBootstrap(t *testing.T) { }) } } + +func TestSingleHostStaticResult(t *testing.T) { + v4 := netip.MustParseAddr("0.0.0.1") + v6 := netip.MustParseAddr("2001::a") + + tests := []struct { + name string + static []netip.Addr + wantIP netip.Addr + wantIP6 netip.Addr + wantAll []netip.Addr + }{ + { + name: "just-v6", + static: []netip.Addr{v6}, + wantIP: v6, + wantIP6: v6, + wantAll: []netip.Addr{v6}, + }, + { + name: "just-v4", + static: []netip.Addr{v4}, + wantIP: v4, + wantIP6: netip.Addr{}, + wantAll: []netip.Addr{v4}, + }, + { + name: "v6-then-v4", + static: []netip.Addr{v6, v4}, + wantIP: v4, + wantIP6: v6, + wantAll: []netip.Addr{v6, v4}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Resolver{ + SingleHost: "example.com", + SingleHostStaticResult: tt.static, + } + ip, ip6, all, err := r.LookupIP(context.Background(), "example.com") + if err != nil { + t.Fatal(err) + } + if ip != tt.wantIP { + t.Errorf("got ip %v; want %v", ip, tt.wantIP) + } + if ip6 != tt.wantIP6 { + t.Errorf("got ip6 %v; want %v", ip6, tt.wantIP6) + } + if !slices.Equal(all, tt.wantAll) { + t.Errorf("got all %v; want %v", all, tt.wantAll) + } + }) + } +} From db048e905d6636006d06c93da06fad3ff075e97b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 20 Sep 2025 16:48:18 -0700 Subject: [PATCH 0387/1093] control/controlhttp: simplify, fix race dialing, remove priority concept controlhttp has the responsibility of dialing a set of candidate control endpoints in a way that minimizes user facing latency. If one control endpoint is unavailable we promptly dial another, racing across the dimensions of: IPv6, IPv4, port 80, and port 443, over multiple server endpoints. In the case that the top priority endpoint was not available, the prior implementation would hang waiting for other results, so as to try to return the highest priority successful connection to the rest of the client code. This hang would take too long with a large dialplan and sufficient client to endpoint latency as to cause the server to timeout the connection due to inactivity in the intermediate state. Instead of trying to prioritize non-ideal candidate connections, the first successful connection is now used unconditionally, improving user facing latency and avoiding any delays that would encroach on the server-side timeout. The tests are converted to memnet and synctest, running on all platforms. Fixes #8442 Fixes tailscale/corp#32534 Co-authored-by: James Tucker Change-Id: I4eb57f046d8b40403220e40eb67a31c41adb3a38 Signed-off-by: Brad Fitzpatrick Signed-off-by: James Tucker --- cmd/tailscale/depaware.txt | 2 +- control/controlhttp/client.go | 180 +++---------- control/controlhttp/constants.go | 1 - control/controlhttp/http_test.go | 447 ++++++++++++++++++------------- 4 files changed, 306 insertions(+), 324 deletions(-) diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 27d7864aec087..b9b7db525a843 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -186,7 +186,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/multierr from tailscale.com/control/controlhttp+ + tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 87061c310dd44..da9590c4809cf 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -27,14 +27,12 @@ import ( "errors" "fmt" "io" - "math" "net" "net/http" "net/http/httptrace" "net/netip" "net/url" "runtime" - "sort" "sync/atomic" "time" @@ -53,7 +51,6 @@ import ( "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) var stdDialer net.Dialer @@ -110,18 +107,8 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { } candidates := a.DialPlan.Candidates - // Otherwise, we try dialing per the plan. Store the highest priority - // in the list, so that if we get a connection to one of those - // candidates we can return quickly. - var highestPriority int = math.MinInt - for _, c := range candidates { - if c.Priority > highestPriority { - highestPriority = c.Priority - } - } - - // This context allows us to cancel in-flight connections if we get a - // highest-priority connection before we're all done. + // Create a context to be canceled as we return, so once we get a good connection, + // we can drop all the other ones. ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -129,142 +116,58 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { type dialResult struct { conn *ClientConn err error - cand tailcfg.ControlIPCandidate - } - resultsCh := make(chan dialResult, len(candidates)) - - var pending atomic.Int32 - pending.Store(int32(len(candidates))) - for _, c := range candidates { - go func(ctx context.Context, c tailcfg.ControlIPCandidate) { - var ( - conn *ClientConn - err error - ) - - // Always send results back to our channel. - defer func() { - resultsCh <- dialResult{conn, err, c} - if pending.Add(-1) == 0 { - close(resultsCh) - } - }() - - // If non-zero, wait the configured start timeout - // before we do anything. - if c.DialStartDelaySec > 0 { - a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP) - tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second))) - defer tmr.Stop() - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-tmrChannel: - } - } + } + resultsCh := make(chan dialResult) // unbuffered, never closed - // Now, create a sub-context with the given timeout and - // try dialing the provided host. - ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second))) - defer cancel() + dialCand := func(cand tailcfg.ControlIPCandidate) (*ClientConn, error) { + if cand.ACEHost != "" { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q via ACE %s (%s)", cand.DialStartDelaySec, a.Hostname, cand.ACEHost, cmp.Or(cand.IP.String(), "dns")) + } else { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q @ %s", cand.DialStartDelaySec, a.Hostname, cand.IP.String()) + } - if c.IP.IsValid() { - a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) - } else if c.ACEHost != "" { - a.logf("[v2] controlhttp: trying to dial %q via ACE %q", a.Hostname, c.ACEHost) - } - // This will dial, and the defer above sends it back to our parent. - conn, err = a.dialHostOpt(ctx, c.IP, c.ACEHost) - }(ctx, c) + ctx, cancel := context.WithTimeout(ctx, time.Duration(cand.DialTimeoutSec*float64(time.Second))) + defer cancel() + return a.dialHostOpt(ctx, cand.IP, cand.ACEHost) } - var results []dialResult - for res := range resultsCh { - // If we get a response that has the highest priority, we don't - // need to wait for any of the other connections to finish; we - // can just return this connection. - // - // TODO(andrew): we could make this better by keeping track of - // the highest remaining priority dynamically, instead of just - // checking for the highest total - if res.cand.Priority == highestPriority && res.conn != nil { - a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, cmp.Or(res.cand.ACEHost, res.cand.IP.String())) - - // Drain the channel and any existing connections in - // the background. + for _, cand := range candidates { + timer := time.AfterFunc(time.Duration(cand.DialStartDelaySec*float64(time.Second)), func() { go func() { - for _, res := range results { - if res.conn != nil { - res.conn.Close() + conn, err := dialCand(cand) + select { + case resultsCh <- dialResult{conn, err}: + if err == nil { + a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(cand.ACEHost, cand.IP.String())) } - } - for res := range resultsCh { - if res.conn != nil { - res.conn.Close() + case <-ctx.Done(): + if conn != nil { + conn.Close() } } - if a.drainFinished != nil { - close(a.drainFinished) - } }() - return res.conn, nil - } - - // This isn't a highest-priority result, so just store it until - // we're done. - results = append(results, res) + }) + defer timer.Stop() } - // After we finish this function, close any remaining open connections. - defer func() { - for _, result := range results { - // Note: below, we nil out the returned connection (if - // any) in the slice so we don't close it. - if result.conn != nil { - result.conn.Close() + var errs []error + for { + select { + case res := <-resultsCh: + if res.err == nil { + return res.conn, nil } + errs = append(errs, res.err) + if len(errs) == len(candidates) { + // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. + a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", errors.Join(errs...)) + return a.dialHost(ctx) + } + case <-ctx.Done(): + a.logf("controlhttp: context aborted dialing") + return nil, ctx.Err() } - - // We don't drain asynchronously after this point, so notify our - // channel when we return. - if a.drainFinished != nil { - close(a.drainFinished) - } - }() - - // Sort by priority, then take the first non-error response. - sort.Slice(results, func(i, j int) bool { - // NOTE: intentionally inverted so that the highest priority - // item comes first - return results[i].cand.Priority > results[j].cand.Priority - }) - - var ( - conn *ClientConn - errs []error - ) - for i, result := range results { - if result.err != nil { - errs = append(errs, result.err) - continue - } - - a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(result.cand.ACEHost, result.cand.IP.String())) - conn = result.conn - results[i].conn = nil // so we don't close it in the defer - return conn, nil } - if ctx.Err() != nil { - a.logf("controlhttp: context aborted dialing") - return nil, ctx.Err() - } - - merr := multierr.New(errs...) - - // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. - a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error()) - return a.dialHost(ctx) } // The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to @@ -402,6 +305,9 @@ func (a *Dialer) dialHostOpt(ctx context.Context, optAddr netip.Addr, optACEHost } var err80, err443 error + if forceTLS { + err80 = errors.New("TLS forced: no port 80 dialed") + } for { select { case <-ctx.Done(): diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 12038fae45b1c..58fed1b76ac3a 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -98,7 +98,6 @@ type Dialer struct { logPort80Failure atomic.Bool // For tests only - drainFinished chan struct{} omitCertErrorLogging bool testFallbackDelay time.Duration diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 0b4e117f98928..6485761ac1eec 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -15,19 +15,20 @@ import ( "net/http/httputil" "net/netip" "net/url" - "runtime" "slices" "strconv" + "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/health" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" - "tailscale.com/net/netx" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -36,6 +37,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/must" ) type httpTestParam struct { @@ -532,6 +534,28 @@ EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== } } +// slowListener wraps a memnet listener to delay accept operations +type slowListener struct { + net.Listener + delay time.Duration +} + +func (sl *slowListener) Accept() (net.Conn, error) { + // Add delay before accepting connections + timer := time.NewTimer(sl.delay) + defer timer.Stop() + <-timer.C + + return sl.Listener.Accept() +} + +func newSlowListener(inner net.Listener, delay time.Duration) net.Listener { + return &slowListener{ + Listener: inner, + delay: delay, + } +} + func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue) @@ -545,33 +569,102 @@ func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { } func TestDialPlan(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip("only works on Linux due to multiple localhost addresses") + testCases := []struct { + name string + plan *tailcfg.ControlDialPlan + want []netip.Addr + allowFallback bool + maxDuration time.Duration + }{ + { + name: "single", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "broken-then-good", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10, DialStartDelaySec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "multiple-candidates-with-broken", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + // Multiple good IPs plus a broken one + // Should succeed with any of the good ones + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.4"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.4"), netip.MustParseAddr("10.0.0.3")}, + }, + { + name: "multiple-candidates-race", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.3"), netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "fallback", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + allowFallback: true, + }, + { + // In tailscale/corp#32534 we discovered that a prior implementation + // of the dial race was waiting for all dials to complete when the + // top priority dial was failing. This delay was long enough that in + // real scenarios the server will close the connection due to + // inactivity, because the client does not send the first inside of + // noise request soon enough. This test is a regression guard + // against that behavior - proving that the dial returns promptly + // even if there is some cause of a slow race. + name: "slow-endpoint-doesnt-block", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.12"), Priority: 5, DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), Priority: 1, DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + maxDuration: 2 * time.Second, // Must complete quickly, not wait for slow endpoint + }, } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + runDialPlanTest(t, tt.plan, tt.want, tt.allowFallback, tt.maxDuration) + }) + }) + } +} + +func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.Addr, allowFallback bool, maxDuration time.Duration) { client, server := key.NewMachine(), key.NewMachine() const ( testProtocolVersion = 1 + httpPort = "80" + httpsPort = "443" ) - getRandomPort := func() string { - ln, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("net.Listen: %v", err) - } - defer ln.Close() - _, port, err := net.SplitHostPort(ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - return port - } + memNetwork := &memnet.Network{} - // We need consistent ports for each address; these are chosen - // randomly and we hope that they won't conflict during this test. - httpPort := getRandomPort() - httpsPort := getRandomPort() + fallbackAddr := netip.MustParseAddr("10.0.0.1") + goodAddr := netip.MustParseAddr("10.0.0.2") + otherAddr := netip.MustParseAddr("10.0.0.3") + other2Addr := netip.MustParseAddr("10.0.0.4") + brokenAddr := netip.MustParseAddr("10.0.0.10") + slowAddr := netip.MustParseAddr("10.0.0.12") makeHandler := func(t *testing.T, name string, host netip.Addr, wrap func(http.Handler) http.Handler) { done := make(chan struct{}) @@ -592,17 +685,66 @@ func TestDialPlan(t *testing.T) { handler = wrap(handler) } - httpLn, err := net.Listen("tcp", host.String()+":"+httpPort) + httpLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpPort)) + httpsLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpsPort)) + + httpServer := &http.Server{Handler: handler} + go httpServer.Serve(httpLn) + t.Cleanup(func() { + httpServer.Close() + }) + + httpsServer := &http.Server{ + Handler: handler, + TLSConfig: tlsConfig(t), + ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), + } + go httpsServer.ServeTLS(httpsLn, "", "") + t.Cleanup(func() { + httpsServer.Close() + }) + } + + // Use synctest's controlled time + clock := tstime.StdClock{} + makeHandler(t, "fallback", fallbackAddr, nil) + makeHandler(t, "good", goodAddr, nil) + makeHandler(t, "other", otherAddr, nil) + makeHandler(t, "other2", other2Addr, nil) + makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { + return brokenMITMHandler(clock) + }) + // Create slow listener that delays accept by 5 seconds + makeSlowHandler := func(t *testing.T, name string, host netip.Addr, delay time.Duration) { + done := make(chan struct{}) + t.Cleanup(func() { + close(done) + }) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, nil) + if err != nil { + log.Print(err) + } else { + defer conn.Close() + } + w.Header().Set("X-Handler-Name", name) + <-done + }) + + httpLn, err := memNetwork.Listen("tcp", host.String()+":"+httpPort) if err != nil { t.Fatalf("HTTP listen: %v", err) } - httpsLn, err := net.Listen("tcp", host.String()+":"+httpsPort) + httpsLn, err := memNetwork.Listen("tcp", host.String()+":"+httpsPort) if err != nil { t.Fatalf("HTTPS listen: %v", err) } + slowHttpLn := newSlowListener(httpLn, delay) + slowHttpsLn := newSlowListener(httpsLn, delay) + httpServer := &http.Server{Handler: handler} - go httpServer.Serve(httpLn) + go httpServer.Serve(slowHttpLn) t.Cleanup(func() { httpServer.Close() }) @@ -612,213 +754,148 @@ func TestDialPlan(t *testing.T) { TLSConfig: tlsConfig(t), ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), } - go httpsServer.ServeTLS(httpsLn, "", "") + go httpsServer.ServeTLS(slowHttpsLn, "", "") t.Cleanup(func() { httpsServer.Close() }) - return } + makeSlowHandler(t, "slow", slowAddr, 5*time.Second) - fallbackAddr := netip.MustParseAddr("127.0.0.1") - goodAddr := netip.MustParseAddr("127.0.0.2") - otherAddr := netip.MustParseAddr("127.0.0.3") - other2Addr := netip.MustParseAddr("127.0.0.4") - brokenAddr := netip.MustParseAddr("127.0.0.10") + // memnetDialer with connection tracking, so we can catch connection leaks. + dialer := &memnetDialer{ + inner: memNetwork.Dial, + t: t, + } + defer dialer.waitForAllClosedSynctest() - testCases := []struct { - name string - plan *tailcfg.ControlDialPlan - wrap func(http.Handler) http.Handler - want netip.Addr + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() - allowFallback bool - }{ - { - name: "single", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: goodAddr, - }, - { - name: "broken-then-good", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Dials the broken one, which fails, and then - // eventually dials the good one and succeeds - {IP: brokenAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10, DialStartDelaySec: 1}, - }}, - want: goodAddr, - }, - // TODO(#8442): fix this test - // { - // name: "multiple-priority-fast-path", - // plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // // Dials some good IPs and our bad one (which - // // hangs forever), which then hits the fast - // // path where we bail without waiting. - // {IP: brokenAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: other2Addr, Priority: 1, DialTimeoutSec: 10}, - // {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - // }}, - // want: otherAddr, - // }, - { - name: "multiple-priority-slow-path", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Our broken address is the highest priority, - // so we don't hit our fast path. - {IP: brokenAddr, Priority: 10, DialTimeoutSec: 10}, - {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: otherAddr, - }, - { - name: "fallback", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: brokenAddr, Priority: 1, DialTimeoutSec: 1}, - }}, - want: fallbackAddr, - allowFallback: true, - }, + host := "example.com" + if allowFallback { + host = fallbackAddr.String() } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - // TODO(awly): replace this with tstest.NewClock and update the - // test to advance the clock correctly. - clock := tstime.StdClock{} - makeHandler(t, "fallback", fallbackAddr, nil) - makeHandler(t, "good", goodAddr, nil) - makeHandler(t, "other", otherAddr, nil) - makeHandler(t, "other2", other2Addr, nil) - makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { - return brokenMITMHandler(clock) - }) - dialer := closeTrackDialer{ - t: t, - inner: tsdial.NewDialer(netmon.NewStatic()).SystemDial, - conns: make(map[*closeTrackConn]bool), - } - defer dialer.Done() + a := &Dialer{ + Hostname: host, + HTTPPort: httpPort, + HTTPSPort: httpsPort, + MachineKey: client, + ControlKey: server.Public(), + ProtocolVersion: testProtocolVersion, + Dialer: dialer.Dial, + Logf: t.Logf, + DialPlan: plan, + proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, + omitCertErrorLogging: true, + testFallbackDelay: 50 * time.Millisecond, + Clock: clock, + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), + } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + start := time.Now() + conn, err := a.dial(ctx) + duration := time.Since(start) - // By default, we intentionally point to something that - // we know won't connect, since we want a fallback to - // DNS to be an error. - host := "example.com" - if tt.allowFallback { - host = "localhost" - } + if err != nil { + t.Fatalf("dialing controlhttp: %v", err) + } + defer conn.Close() - drained := make(chan struct{}) - a := &Dialer{ - Hostname: host, - HTTPPort: httpPort, - HTTPSPort: httpsPort, - MachineKey: client, - ControlKey: server.Public(), - ProtocolVersion: testProtocolVersion, - Dialer: dialer.Dial, - Logf: t.Logf, - DialPlan: tt.plan, - proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, - drainFinished: drained, - omitCertErrorLogging: true, - testFallbackDelay: 50 * time.Millisecond, - Clock: clock, - HealthTracker: health.NewTracker(eventbustest.NewBus(t)), - } + if maxDuration > 0 && duration > maxDuration { + t.Errorf("dial took %v, expected < %v (should not wait for slow endpoints)", duration, maxDuration) + } - conn, err := a.dial(ctx) - if err != nil { - t.Fatalf("dialing controlhttp: %v", err) - } - defer conn.Close() + raddr := conn.RemoteAddr() + raddrStr := raddr.String() - raddr := conn.RemoteAddr().(*net.TCPAddr) + // split on "|" first to remove memnet pipe suffix + addrPart := raddrStr + if idx := strings.Index(raddrStr, "|"); idx >= 0 { + addrPart = raddrStr[:idx] + } - got, ok := netip.AddrFromSlice(raddr.IP) - if !ok { - t.Errorf("invalid remote IP: %v", raddr.IP) - } else if got != tt.want { - t.Errorf("got connection from %q; want %q", got, tt.want) - } else { - t.Logf("successfully connected to %q", raddr.String()) - } + host, _, err2 := net.SplitHostPort(addrPart) + if err2 != nil { + t.Fatalf("failed to parse remote address %q: %v", addrPart, err2) + } - // Wait until our dialer drains so we can verify that - // all connections are closed. - <-drained - }) + got, err3 := netip.ParseAddr(host) + if err3 != nil { + t.Errorf("invalid remote IP: %v", host) + } else { + found := slices.Contains(want, got) + if !found { + t.Errorf("got connection from %q; want one of %v", got, want) + } else { + t.Logf("successfully connected to %q", raddr.String()) + } } } -type closeTrackDialer struct { - t testing.TB - inner netx.DialFunc +// memnetDialer wraps memnet.Network.Dial to track connections for testing +type memnetDialer struct { + inner func(ctx context.Context, network, addr string) (net.Conn, error) + t *testing.T mu sync.Mutex - conns map[*closeTrackConn]bool + conns map[net.Conn]string // conn -> remote address for debugging } -func (d *closeTrackDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { - c, err := d.inner(ctx, network, addr) +func (d *memnetDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := d.inner(ctx, network, addr) if err != nil { return nil, err } - ct := &closeTrackConn{Conn: c, d: d} d.mu.Lock() - d.conns[ct] = true + if d.conns == nil { + d.conns = make(map[net.Conn]string) + } + d.conns[conn] = conn.RemoteAddr().String() + d.t.Logf("tracked connection opened to %s", conn.RemoteAddr()) d.mu.Unlock() - return ct, nil + + return &memnetTrackedConn{Conn: conn, dialer: d}, nil } -func (d *closeTrackDialer) Done() { - // Unfortunately, tsdial.Dialer.SystemDial closes connections - // asynchronously in a goroutine, so we can't assume that everything is - // closed by the time we get here. - // - // Sleep/wait a few times on the assumption that things will close - // "eventually". - const iters = 100 - for i := range iters { +func (d *memnetDialer) waitForAllClosedSynctest() { + const maxWait = 15 * time.Second + const checkInterval = 100 * time.Millisecond + + for range int(maxWait / checkInterval) { d.mu.Lock() - if len(d.conns) == 0 { + remaining := len(d.conns) + if remaining == 0 { d.mu.Unlock() return } + d.mu.Unlock() - // Only error on last iteration - if i != iters-1 { - d.mu.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } + time.Sleep(checkInterval) + } - for conn := range d.conns { - d.t.Errorf("expected close of conn %p; RemoteAddr=%q", conn, conn.RemoteAddr().String()) - } - d.mu.Unlock() + d.mu.Lock() + defer d.mu.Unlock() + for _, addr := range d.conns { + d.t.Errorf("connection to %s was not closed after %v", addr, maxWait) } } -func (d *closeTrackDialer) noteClose(c *closeTrackConn) { +func (d *memnetDialer) noteClose(conn net.Conn) { d.mu.Lock() - delete(d.conns, c) // safe if already deleted + if addr, exists := d.conns[conn]; exists { + d.t.Logf("tracked connection closed to %s", addr) + delete(d.conns, conn) + } d.mu.Unlock() } -type closeTrackConn struct { +type memnetTrackedConn struct { net.Conn - d *closeTrackDialer + dialer *memnetDialer } -func (c *closeTrackConn) Close() error { - c.d.noteClose(c) +func (c *memnetTrackedConn) Close() error { + c.dialer.noteClose(c.Conn) return c.Conn.Close() } From 8ec07b5f7fc31e5d86aa9db4f0c7fe5498d3f9fa Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 21 Sep 2025 08:08:41 -0700 Subject: [PATCH 0388/1093] ipn/ipnauth: don't crash on OpenBSD trying to log username of unknown peer We never implemented the peercred package on OpenBSD (and I just tried again and failed), but we've always documented that the creds pointer can be nil for operating systems where we can't map the unix socket back to its UID. On those platforms, we set the default unix socket permissions such that only the admin can open it anyway and we don't have a read-only vs read-write distinction. OpenBSD was always in that camp, where any access to Tailscale's unix socket meant full access. But during some refactoring, we broke OpenBSD in that we started assuming during one logging path (during login) that Creds was non-nil when looking up an ipnauth.Actor's username, which wasn't relevant (it was called from a function "maybeUsernameOf" anyway, which threw away errors). Verified on an OpenBSD VM. We don't have any OpenBSD integration tests yet. Fixes #17209 Updates #17221 Change-Id: I473c5903dfaa645694bcc75e7f5d484f3dd6044d Signed-off-by: Brad Fitzpatrick --- ipn/ipnauth/ipnauth.go | 2 +- ipn/ipnauth/ipnauth_notwindows.go | 7 ++++++- ipn/ipnserver/actor.go | 6 +++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index e6560570cd755..513daf5b3a7e6 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -64,7 +64,7 @@ type ConnIdentity struct { // Fields used when NotWindows: isUnixSock bool // Conn is a *net.UnixConn - creds *peercred.Creds // or nil + creds *peercred.Creds // or nil if peercred.Get was not implemented on this OS // Used on Windows: // TODO(bradfitz): merge these into the peercreds package and diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_notwindows.go index d9d11bd0a17a1..f5dc07a8cbeb0 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_notwindows.go @@ -18,8 +18,13 @@ import ( func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { ci = &ConnIdentity{conn: c, notWindows: true} _, ci.isUnixSock = c.(*net.UnixConn) - if ci.creds, _ = peercred.Get(c); ci.creds != nil { + if ci.creds, err = peercred.Get(c); ci.creds != nil { ci.pid, _ = ci.creds.PID() + } else if err == peercred.ErrNotImplemented { + // peercred.Get is not implemented on this OS (such as OpenBSD) + // Just leave creds as nil, as documented. + } else if err != nil { + return nil, err } return ci, nil } diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 9d86d2c825fda..924417a33e54a 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -145,7 +145,11 @@ func (a *actor) Username() (string, error) { defer tok.Close() return tok.Username() case "darwin", "linux", "illumos", "solaris", "openbsd": - uid, ok := a.ci.Creds().UserID() + creds := a.ci.Creds() + if creds == nil { + return "", errors.New("peer credentials not implemented on this OS") + } + uid, ok := creds.UserID() if !ok { return "", errors.New("missing user ID") } From 986b4d1b0b22b71126b9fbc32c0563331eb4f4ea Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 8 Sep 2025 10:36:14 +0200 Subject: [PATCH 0389/1093] control/controlclient: fix tka godoc Updates #cleanup Signed-off-by: Kristoffer Dalby --- control/controlclient/direct.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ea8661bff911f..991767e5deddc 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -398,7 +398,7 @@ func (c *Direct) SetNetInfo(ni *tailcfg.NetInfo) bool { return true } -// SetNetInfo stores a new TKA head value for next update. +// SetTKAHead stores a new TKA head value for next update. // It reports whether the TKA head changed. func (c *Direct) SetTKAHead(tkaHead string) bool { c.mu.Lock() From cc1761e8d272f5ddf326d35de8a647c6cbf6a8c7 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 22 Sep 2025 13:55:16 +0100 Subject: [PATCH 0390/1093] cmd/k8s-operator: send operator logs to tailscale (#17110) This commit modifies the k8s operator to wrap its logger using the logtail logger provided via the tsnet server. This causes any logs written by the operator to make their way to Tailscale in the same fashion as wireguard logs to be used by support. This functionality can also be opted-out of entirely using the "TS_NO_LOGS_NO_SUPPORT" environment variable. Updates https://github.com/tailscale/corp/issues/32037 Signed-off-by: David Bond --- cmd/k8s-operator/logger.go | 26 ++++++++++++++++++++++++++ cmd/k8s-operator/operator.go | 9 +++++++++ cmd/k8s-operator/sts.go | 19 +++++++++---------- tsnet/tsnet.go | 8 ++++---- 4 files changed, 48 insertions(+), 14 deletions(-) create mode 100644 cmd/k8s-operator/logger.go diff --git a/cmd/k8s-operator/logger.go b/cmd/k8s-operator/logger.go new file mode 100644 index 0000000000000..46b1fc0c82d48 --- /dev/null +++ b/cmd/k8s-operator/logger.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "io" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// wrapZapCore returns a zapcore.Core implementation that splits the core chain using zapcore.NewTee. This causes +// logs to be simultaneously written to both the original core and the provided io.Writer implementation. +func wrapZapCore(core zapcore.Core, writer io.Writer) zapcore.Core { + encoder := &kzap.KubeAwareEncoder{ + Encoder: zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + } + + // We use a tee logger here so that logs are written to stdout/stderr normally while at the same time being + // sent upstream. + return zapcore.NewTee(core, zapcore.NewCore(encoder, zapcore.AddSync(writer), zap.DebugLevel)) +} diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 76d2df51d47d2..1d988eb033078 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -44,6 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/envknob" "tailscale.com/client/local" "tailscale.com/client/tailscale" @@ -133,6 +134,14 @@ func main() { } }() } + + // Operator log uploads can be opted-out using the "TS_NO_LOGS_NO_SUPPORT" environment variable. + if !envknob.NoLogsNoSupport() { + zlog = zlog.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return wrapZapCore(core, s.LogtailWriter()) + })) + } + rOpts := reconcilerOpts{ log: zlog, tsServer: s, diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 9a87d26438b8a..80c9ca806db10 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -439,12 +439,12 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z } if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + logger.With("config", sanitizeConfig(latestConfig)).Debugf("patching the existing proxy Secret") if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { return nil, err } } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + logger.With("config", sanitizeConfig(latestConfig)).Debugf("creating a new Secret for the proxy") if err = a.Create(ctx, secret); err != nil { return nil, err } @@ -494,17 +494,16 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z return secretNames, nil } -// sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted -// auth key. -func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { +// sanitizeConfig returns an ipn.ConfigVAlpha with sensitive fields redacted. Since we pump everything +// into JSON-encoded logs it's easier to read this with a .With method than converting it to a string. +func sanitizeConfig(c ipn.ConfigVAlpha) ipn.ConfigVAlpha { + // Explicitly redact AuthKey because we never want it appearing in logs. Never populate this with the + // actual auth key. if c.AuthKey != nil { c.AuthKey = ptr.To("**redacted**") } - sanitizedBytes, err := json.Marshal(c) - if err != nil { - return "invalid config" - } - return string(sanitizedBytes) + + return c } // DeviceInfo returns the device ID, hostname, IPs and capver for the Tailscale device that acts as an operator proxy. diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 978819519d7dd..08f08281a28f0 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -495,14 +495,14 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return ip4, ip6 } -// Logtailf returns a [logger.Logf] that outputs to Tailscale's logging service and will be only visible to Tailscale's +// LogtailWriter returns an [io.Writer] that writes to Tailscale's logging service and will be only visible to Tailscale's // support team. Logs written there cannot be retrieved by the user. This method always returns a non-nil value. -func (s *Server) Logtailf() logger.Logf { +func (s *Server) LogtailWriter() io.Writer { if s.logtail == nil { - return logger.Discard + return io.Discard } - return s.logtail.Logf + return s.logtail } func (s *Server) getAuthKey() string { From 6e128498a788e506921059a5c17acc9452195a5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 22 Sep 2025 09:16:13 -0400 Subject: [PATCH 0391/1093] controlclient/auto: switch eventbus to using a monitor (#17205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only changes how the go routine consuming the events starts and stops, not what it does. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/auto.go | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index bbc129c5e943e..9a654b679b57a 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -123,9 +123,7 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - eventClient *eventbus.Client - healthChangeSub *eventbus.Subscriber[health.Change] - subsDoneCh chan struct{} // close-only channel when eventClient has closed + eventSubs eventbus.Monitor mu sync.Mutex // mutex guards the following fields @@ -195,11 +193,11 @@ func NewNoStart(opts Options) (_ *Auto, err error) { updateDone: make(chan struct{}), observer: opts.Observer, shutdownFn: opts.Shutdown, - subsDoneCh: make(chan struct{}), } - c.eventClient = opts.Bus.Client("controlClient.Auto") - c.healthChangeSub = eventbus.Subscribe[health.Change](c.eventClient) + // Set up eventbus client and subscriber + ec := opts.Bus.Client("controlClient.Auto") + c.eventSubs = ec.Monitor(c.consumeEventbusTopics(ec)) c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) @@ -207,7 +205,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - go c.consumeEventbusTopics() return c, nil } @@ -216,16 +213,17 @@ func NewNoStart(opts Options) (_ *Auto, err error) { // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (c *Auto) consumeEventbusTopics() { - defer close(c.subsDoneCh) - - for { - select { - case <-c.eventClient.Done(): - return - case change := <-c.healthChangeSub.Events(): - if change.WarnableChanged { - c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) +func (c *Auto) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + healthChangeSub := eventbus.Subscribe[health.Change](ec) + return func(cli *eventbus.Client) { + for { + select { + case <-cli.Done(): + return + case change := <-healthChangeSub.Events(): + if change.WarnableChanged { + c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) + } } } } @@ -784,8 +782,7 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } func (c *Auto) Shutdown() { - c.eventClient.Close() - <-c.subsDoneCh + c.eventSubs.Close() c.mu.Lock() if c.closed { From e59fbaab64ea1ba4f19ba586ca39a77af51bcd83 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 22 Sep 2025 07:07:57 -0700 Subject: [PATCH 0392/1093] util/eventbus: give a nicer error when attempting to use a closed client (#17208) It is a programming error to Publish or Subscribe on a closed Client, but now the way you discover that is by getting a panic from down in the machinery of the bus after the client state has been cleaned up. To provide a more helpful error, let's panic explicitly when that happens and say what went wrong ("the client is closed"), by preventing subscriptions from interleaving with closure of the client. With this change, either an attachment fails outright (because the client is already closed) or completes and then shuts down in good order in the normal course. This does not change the semantics of the client, publishers, or subscribers, it's just making the failure more eager so we can attach explanatory text. Updates #15160 Change-Id: Ia492f4c1dea7535aec2cdcc2e5ea5410ed5218d2 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 39 ++++++++++++++++++++++++++++++++++++++- util/eventbus/client.go | 31 ++++++++++++++++++++++++++----- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 7782634ae92ab..67f68cd4a14d1 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -257,8 +257,8 @@ func TestMonitor(t *testing.T) { cli := bus.Client("test client") // The monitored goroutine runs until the client or test subscription ends. + sub := eventbus.Subscribe[string](cli) m := cli.Monitor(func(c *eventbus.Client) { - sub := eventbus.Subscribe[string](cli) select { case <-c.Done(): t.Log("client closed") @@ -294,6 +294,43 @@ func TestMonitor(t *testing.T) { t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) } +func TestRegression(t *testing.T) { + bus := eventbus.New() + t.Cleanup(bus.Close) + + t.Run("SubscribeClosed", func(t *testing.T) { + c := bus.Client("test sub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Subscribe[string](c) + }() + if v == nil { + t.Fatal("Expected a panic from Subscribe on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) + + t.Run("PublishClosed", func(t *testing.T) { + c := bus.Client("test pub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Publish[string](c) + }() + if v == nil { + t.Fatal("expected a panic from Publish on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) +} + type queueChecker struct { t *testing.T want []any diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 176b6f2bc8e60..9b4119865ebb9 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -51,6 +51,8 @@ func (c *Client) Close() { c.stop.Stop() } +func (c *Client) isClosed() bool { return c.pub == nil && c.sub == nil } + // Done returns a channel that is closed when [Client.Close] is called. // The channel is closed after all the publishers and subscribers governed by // the client have been closed. @@ -83,6 +85,10 @@ func (c *Client) subscribeTypes() []reflect.Type { func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() + return c.subscribeStateLocked() +} + +func (c *Client) subscribeStateLocked() *subscribeState { if c.sub == nil { c.sub = newSubscribeState(c) } @@ -92,6 +98,9 @@ func (c *Client) subscribeState() *subscribeState { func (c *Client) addPublisher(pub publisher) { c.mu.Lock() defer c.mu.Unlock() + if c.isClosed() { + panic("cannot Publish on a closed client") + } c.pub.Add(pub) } @@ -117,17 +126,29 @@ func (c *Client) shouldPublish(t reflect.Type) bool { return c.publishDebug.active() || c.bus.shouldPublish(t) } -// Subscribe requests delivery of events of type T through the given -// Queue. Panics if the queue already has a subscriber for T. +// Subscribe requests delivery of events of type T through the given client. +// It panics if c already has a subscriber for type T, or if c is closed. func Subscribe[T any](c *Client) *Subscriber[T] { - r := c.subscribeState() + // Hold the client lock throughout the subscription process so that a caller + // attempting to subscribe on a closed client will get a useful diagnostic + // instead of a random panic from inside the subscriber plumbing. + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot Subscribe on a closed client") + } + + r := c.subscribeStateLocked() s := newSubscriber[T](r) r.addSubscriber(s) return s } -// Publish returns a publisher for event type T using the given -// client. +// Publish returns a publisher for event type T using the given client. +// It panics if c is closed. func Publish[T any](c *Client) *Publisher[T] { p := newPublisher[T](c) c.addPublisher(p) From 1b5201023fd2a07f9b4f30331daaf3ed39086844 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 22 Sep 2025 08:43:39 -0700 Subject: [PATCH 0393/1093] ipn/ipnlocal: use eventbus.Monitor in LocalBackend (#17225) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I06860ac4e43952a9bb4d85366138c9d9a17fd9cd Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 68 ++++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7ac8f0ecbf8ba..4af0a3aa645be 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -197,18 +197,14 @@ var ( // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelCauseFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System - eventClient *eventbus.Client - clientVersionSub *eventbus.Subscriber[tailcfg.ClientVersion] - autoUpdateSub *eventbus.Subscriber[controlclient.AutoUpdate] - healthChangeSub *eventbus.Subscriber[health.Change] - changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + eventSubs eventbus.Monitor + health *health.Tracker // always non-nil polc policyclient.Client // always non-nil metrics metrics @@ -538,13 +534,10 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), - subsDoneCh: make(chan struct{}), } - b.eventClient = b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") - b.clientVersionSub = eventbus.Subscribe[tailcfg.ClientVersion](b.eventClient) - b.autoUpdateSub = eventbus.Subscribe[controlclient.AutoUpdate](b.eventClient) - b.healthChangeSub = eventbus.Subscribe[health.Change](b.eventClient) - b.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](b.eventClient) + ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -611,7 +604,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } - go b.consumeEventbusTopics() return b, nil } @@ -620,21 +612,26 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (b *LocalBackend) consumeEventbusTopics() { - defer close(b.subsDoneCh) - - for { - select { - case <-b.eventClient.Done(): - return - case clientVersion := <-b.clientVersionSub.Events(): - b.onClientVersion(&clientVersion) - case au := <-b.autoUpdateSub.Events(): - b.onTailnetDefaultAutoUpdate(au.Value) - case change := <-b.healthChangeSub.Events(): - b.onHealthChange(change) - case changeDelta := <-b.changeDeltaSub.Events(): - b.linkChange(&changeDelta) +func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) + autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) + healthChangeSub := eventbus.Subscribe[health.Change](ec) + changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case clientVersion := <-clientVersionSub.Events(): + b.onClientVersion(&clientVersion) + case au := <-autoUpdateSub.Events(): + b.onTailnetDefaultAutoUpdate(au.Value) + case change := <-healthChangeSub.Events(): + b.onHealthChange(change) + case changeDelta := <-changeDeltaSub.Events(): + b.linkChange(&changeDelta) + } } } } @@ -1103,8 +1100,7 @@ func (b *LocalBackend) Shutdown() { // they can deadlock with c.Shutdown(). // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against // undesirable post/in-progress LocalBackend.Shutdown() behaviors. - b.eventClient.Close() - <-b.subsDoneCh + b.eventSubs.Close() b.em.close() From f67ad67c6f0588ce001ee1034a776e384b1fd1f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 22 Sep 2025 13:14:55 -0400 Subject: [PATCH 0394/1093] control/controlclient: switch ID to be incrementing instead of random (#17230) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also cleans up a a few comments. Updates #15160 Signed-off-by: Claus Lensbøl --- control/controlclient/direct.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 991767e5deddc..ffac7e9471244 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -14,7 +14,6 @@ import ( "fmt" "io" "log" - "math/rand/v2" "net" "net/http" "net/netip" @@ -221,6 +220,8 @@ type NetmapDeltaUpdater interface { UpdateNetmapDelta([]netmap.NodeMutation) (ok bool) } +var nextControlClientID atomic.Int64 + // NewDirect returns a new Direct client. func NewDirect(opts Options) (*Direct, error) { if opts.ServerURL == "" { @@ -314,7 +315,7 @@ func NewDirect(opts Options) (*Direct, error) { } c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) - c.controlClientID = rand.Int64() + c.controlClientID = nextControlClientID.Add(1) if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) @@ -835,21 +836,21 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } -// ClientID returns the ControlClientID of the controlClient +// ClientID returns the controlClientID of the controlClient. func (c *Direct) ClientID() int64 { return c.controlClientID } -// AutoUpdate wraps a bool for naming on the eventbus +// AutoUpdate is an eventbus value, reporting the value of tailcfg.MapResponse.DefaultAutoUpdate. type AutoUpdate struct { - ClientID int64 // The ID field is used for consumers to differentiate instances of Direct - Value bool + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value bool // The Value represents DefaultAutoUpdate from [tailcfg.MapResponse]. } -// ControlTime wraps a [time.Time] for naming on the eventbus +// ControlTime is an eventbus value, reporting the value of tailcfg.MapResponse.ControlTime. type ControlTime struct { - ClientID int64 // The ID field is used for consumers to differentiate instances of Direct - Value time.Time + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value time.Time // The Value represents ControlTime from [tailcfg.MapResponse]. } // If we go more than watchdogTimeout without hearing from the server, From 5e79e497d3682741ce192d245fd193322c03b85a Mon Sep 17 00:00:00 2001 From: Mahyar Mirrashed <59240843+mahyarmirrashed@users.noreply.github.com> Date: Mon, 22 Sep 2025 12:37:27 -0500 Subject: [PATCH 0395/1093] cmd/tailscale/cli: show last seen time on status command (#16588) Add a last seen time on the cli's status command, similar to the web portal. Before: ``` 100.xxx.xxx.xxx tailscale-operator tagged-devices linux offline ``` After: ``` 100.xxx.xxx.xxx tailscale-operator tagged-devices linux offline, last seen 20d ago ``` Fixes #16584 Signed-off-by: Mahyar Mirrashed --- cmd/tailscale/cli/cli.go | 17 +++++++++++++++++ cmd/tailscale/cli/exitnode.go | 6 ++++-- cmd/tailscale/cli/status.go | 6 +++--- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index dfc8f3249577c..5206fdd588a1b 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -18,6 +18,7 @@ import ( "strings" "sync" "text/tabwriter" + "time" "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" @@ -538,3 +539,19 @@ func jsonDocsWalk(cmd *ffcli.Command) *commandDoc { } return res } + +func lastSeenFmt(t time.Time) string { + if t.IsZero() { + return "" + } + d := max(time.Since(t), time.Minute) // at least 1 minute + + switch { + case d < time.Hour: + return fmt.Sprintf(", last seen %dm ago", int(d.Minutes())) + case d < 24*time.Hour: + return fmt.Sprintf(", last seen %dh ago", int(d.Hours())) + default: + return fmt.Sprintf(", last seen %dd ago", int(d.Hours()/24)) + } +} diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index b153f096d6869..b47b9f0bd4949 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -173,11 +173,13 @@ func hasAnyExitNodeSuggestions(peers []*ipnstate.PeerStatus) bool { // a peer. If there is no notable state, a - is returned. func peerStatus(peer *ipnstate.PeerStatus) string { if !peer.Active { + lastseen := lastSeenFmt(peer.LastSeen) + if peer.ExitNode { - return "selected but offline" + return "selected but offline" + lastseen } if !peer.Online { - return "offline" + return "offline" + lastseen } } diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 97f6708db675a..94e0977fe57bf 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -164,7 +164,7 @@ func runStatus(ctx context.Context, args []string) error { anyTraffic := ps.TxBytes != 0 || ps.RxBytes != 0 var offline string if !ps.Online { - offline = "; offline" + offline = "; offline" + lastSeenFmt(ps.LastSeen) } if !ps.Active { if ps.ExitNode { @@ -174,7 +174,7 @@ func runStatus(ctx context.Context, args []string) error { } else if anyTraffic { f("idle" + offline) } else if !ps.Online { - f("offline") + f("offline" + lastSeenFmt(ps.LastSeen)) } else { f("-") } @@ -193,7 +193,7 @@ func runStatus(ctx context.Context, args []string) error { f("peer-relay %s", ps.PeerRelay) } if !ps.Online { - f("; offline") + f(offline) } } if anyTraffic { From daad5c2b5c6753dd1ffccffeb6e3adb4c4a36fe8 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 22 Sep 2025 10:49:28 -0700 Subject: [PATCH 0396/1093] wgengine/router: use eventbus.Monitor in linuxRouter (#17232) This commit does not change the order or meaning of any eventbus activity, it only updates the way the plumbing is set up. Updates #15160 Change-Id: I61b863f9c05459d530a4c34063a8bad9046c0e27 Signed-off-by: M. J. Fromberger --- wgengine/router/router_linux.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index a9edd7f9608b5..dc1425708d312 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -49,8 +49,7 @@ type linuxRouter struct { tunname string netMon *netmon.Monitor health *health.Tracker - eventClient *eventbus.Client - ruleDeletedSub *eventbus.Subscriber[netmon.RuleDeleted] + eventSubs eventbus.Monitor rulesAddedPub *eventbus.Publisher[AddIPRules] unregNetMon func() addrs map[netip.Prefix]bool @@ -100,7 +99,6 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon tunname: tunname, netfilterMode: netfilterOff, netMon: netMon, - eventClient: bus.Client("router-linux"), health: health, cmd: cmd, @@ -108,9 +106,9 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon ipRuleFixLimiter: rate.NewLimiter(rate.Every(5*time.Second), 10), ipPolicyPrefBase: 5200, } - r.ruleDeletedSub = eventbus.Subscribe[netmon.RuleDeleted](r.eventClient) - r.rulesAddedPub = eventbus.Publish[AddIPRules](r.eventClient) - go r.consumeEventbusTopics() + ec := bus.Client("router-linux") + r.rulesAddedPub = eventbus.Publish[AddIPRules](ec) + r.eventSubs = ec.Monitor(r.consumeEventbusTopics(ec)) if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) @@ -159,13 +157,16 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon // always handled in the order they are received, i.e. the next event is not // read until the previous event's handler has returned. It returns when the // [eventbus.Client] is closed. -func (r *linuxRouter) consumeEventbusTopics() { - for { - select { - case <-r.eventClient.Done(): - return - case rulesDeleted := <-r.ruleDeletedSub.Events(): - r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) +func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + ruleDeletedSub := eventbus.Subscribe[netmon.RuleDeleted](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case rs := <-ruleDeletedSub.Events(): + r.onIPRuleDeleted(rs.Table, rs.Priority) + } } } } @@ -362,7 +363,7 @@ func (r *linuxRouter) Close() error { if r.unregNetMon != nil { r.unregNetMon() } - r.eventClient.Close() + r.eventSubs.Close() if err := r.downInterface(); err != nil { return err } From 15b3876c2c4ac98d966a2cfafce3c3411a9ecd40 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 22 Sep 2025 10:50:38 -0700 Subject: [PATCH 0397/1093] client/systray: use new tailnet display name is profile title Updates tailscale/corp#30456 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index 536cfe1825cd5..4ac08058854e4 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -339,9 +339,9 @@ func profileTitle(profile ipn.LoginProfile) string { if profile.NetworkProfile.DomainName != "" { if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { // windows and mac don't support multi-line menu - title += " (" + profile.NetworkProfile.DomainName + ")" + title += " (" + profile.NetworkProfile.DisplayNameOrDefault() + ")" } else { - title += "\n" + profile.NetworkProfile.DomainName + title += "\n" + profile.NetworkProfile.DisplayNameOrDefault() } } return title From e582fb9b53e56c39353b665f92eb7a2aeacdbf1d Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 22 Sep 2025 10:48:45 -0700 Subject: [PATCH 0398/1093] client/web: use network profile for displaying tailnet info Also update to use the new DisplayNameOrDefault. Updates tailscale/corp#30456 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/web/web.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 71a015daba465..d88239843e190 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -978,9 +978,18 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { data.ClientVersion = cv } - if st.CurrentTailnet != nil { - data.TailnetName = st.CurrentTailnet.MagicDNSSuffix - data.DomainName = st.CurrentTailnet.Name + profile, _, err := s.lc.ProfileStatus(r.Context()) + if err != nil { + s.logf("error fetching profiles: %v", err) + // If for some reason we can't fetch profiles, + // continue to use st.CurrentTailnet if set. + if st.CurrentTailnet != nil { + data.TailnetName = st.CurrentTailnet.MagicDNSSuffix + data.DomainName = st.CurrentTailnet.Name + } + } else { + data.TailnetName = profile.NetworkProfile.MagicDNSName + data.DomainName = profile.NetworkProfile.DisplayNameOrDefault() } if st.Self.Tags != nil { data.Tags = st.Self.Tags.AsSlice() From e3307fbce137853a0cf77b4feeeafee58f938a05 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Mon, 22 Sep 2025 16:21:21 -0500 Subject: [PATCH 0399/1093] cmd/tailscale: omit the `drive` subcommand in MacOS GUI apps In MacOS GUI apps, users have to select folders to share via the GUI. This is both because the GUI app keeps its own record of shares, and because the sandboxed version of the GUI app needs to gain access to the shared folders by having the user pick them in a file selector. The new build tag `ts_mac_gui` allows the MacOS GUI app build to signal that this is a MacOS GUI app, which causes the `drive` subcommand to be omitted so that people do not mistakenly attempt to use it. Updates tailscale/tailscale#17210 Signed-off-by: Percy Wegmann --- cmd/tailscale/cli/drive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 67536ace07367..131f468477314 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ts_omit_drive +//go:build !ts_omit_drive && !ts_mac_gui package cli From 1791f878708ec31ef4622222a5858217e749e777 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 22 Sep 2025 15:02:38 +0000 Subject: [PATCH 0400/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 15 +++++++-------- licenses/tailscale.md | 2 -- licenses/windows.md | 17 ++++++++--------- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 91ba966981785..6b6d470457227 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -33,7 +33,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -53,7 +53,6 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) @@ -68,13 +67,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 6feb85aafcea6..b15b937440c9d 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -65,7 +65,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) @@ -101,7 +100,6 @@ Some packages may only be included on certain architectures or operating systems - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE)) - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) - - [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) - [tailscale.com/tempfork/gliderlabs/ssh](https://pkg.go.dev/tailscale.com/tempfork/gliderlabs/ssh) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/gliderlabs/ssh/LICENSE)) - [tailscale.com/tempfork/spf13/cobra](https://pkg.go.dev/tailscale.com/tempfork/spf13/cobra) ([Apache-2.0](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/spf13/cobra/LICENSE.txt)) diff --git a/licenses/windows.md b/licenses/windows.md index aff149d4d4ba4..37c41ca3fc05f 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -36,7 +36,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) @@ -52,7 +52,6 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) @@ -72,15 +71,15 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.27.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) From 4af15a11482d40caa4b4a2a7db244d385965ced8 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 23 Sep 2025 13:35:22 -0400 Subject: [PATCH 0401/1093] magicsock: fix deadlock in SetStaticEndpoints (#17247) updates tailscale/corp#32600 A localAPI/cli call to reload-config can end up leaving magicsock's mutex locked. We were missing an unlock for the early exit where there's no change in the static endpoints when the disk-based config is loaded. This is not likely the root cause of the linked issue - just noted during investigation. Signed-off-by: Jonathan Nobels --- wgengine/magicsock/magicsock.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 39a7bb2e687ba..72fff34110b7b 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1015,6 +1015,7 @@ func (c *Conn) setEndpoints(endpoints []tailcfg.Endpoint) (changed bool) { func (c *Conn) SetStaticEndpoints(ep views.Slice[netip.AddrPort]) { c.mu.Lock() if reflect.DeepEqual(c.staticEndpoints.AsSlice(), ep.AsSlice()) { + c.mu.Unlock() return } c.staticEndpoints = ep From 87ccfbd2500cb6078be43bf7fe08e596faa06201 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 14:42:11 -0700 Subject: [PATCH 0402/1093] ipn/ipnlocal: fix eventbus data race Fixes #17252 Change-Id: Id969fca750a48fb43431c53f3e0631bd9bd496d1 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4af0a3aa645be..ce42ae75a0fd7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -535,8 +535,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") - b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) nb := newNodeBackend(ctx, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) @@ -604,6 +602,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } } + + // Start the event bus late, once all the assignments above are done. + // (See previous race in tailscale/tailscale#17252) + ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + return b, nil } From 4657cbdb11c632cc95fa35241a2d058665ce2f12 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 23 Sep 2025 15:26:10 -0700 Subject: [PATCH 0403/1093] client, cmd/tailscale/cli, feature/relayserver, net/udprelay: implement tailscale debug peer-relay-sessions (#17239) Fixes tailscale/corp#30035 Signed-off-by: Dylan Bargatze Signed-off-by: Jordan Whited Co-authored-by: Dylan Bargatze --- client/local/local.go | 11 ++++ cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/debug-peer-relay.go | 77 ++++++++++++++++++++++++ cmd/tailscale/cli/debug.go | 6 +- cmd/tailscale/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + feature/relayserver/relayserver.go | 83 ++++++++++++++++++++++++-- net/udprelay/server.go | 45 ++++++++++++++ net/udprelay/status/status.go | 75 +++++++++++++++++++++++ tsnet/depaware.txt | 1 + tstest/integration/integration_test.go | 45 +++++++++++++- 13 files changed, 341 insertions(+), 7 deletions(-) create mode 100644 cmd/tailscale/cli/debug-peer-relay.go create mode 100644 net/udprelay/status/status.go diff --git a/client/local/local.go b/client/local/local.go index a606fbdf38341..1be1f2ca74440 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -33,6 +33,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" + "tailscale.com/net/udprelay/status" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -1184,6 +1185,16 @@ func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error { return err } +// DebugPeerRelaySessions returns debug information about the current peer +// relay sessions running through this node. +func (lc *Client) DebugPeerRelaySessions(ctx context.Context) (*status.ServerStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/debug-peer-relay-sessions", 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[*status.ServerStatus](body) +} + // StreamDebugCapture streams a pcap-formatted packet capture. // // The provided context does not determine the lifetime of the diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 61e42ede14a41..b0501b5885fee 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -122,6 +122,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/local 💣 tailscale.com/safesocket from tailscale.com/client/local diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 442a9661101c4..e0fdc27bb2ea2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -883,6 +883,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscale/cli/debug-peer-relay.go b/cmd/tailscale/cli/debug-peer-relay.go new file mode 100644 index 0000000000000..bef8b83693aca --- /dev/null +++ b/cmd/tailscale/cli/debug-peer-relay.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_relayserver + +package cli + +import ( + "bytes" + "cmp" + "context" + "fmt" + "net/netip" + "slices" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/net/udprelay/status" +) + +func init() { + debugPeerRelayCmd = mkDebugPeerRelaySessionsCmd +} + +func mkDebugPeerRelaySessionsCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "peer-relay-sessions", + ShortUsage: "tailscale debug peer-relay-sessions", + Exec: runPeerRelaySessions, + ShortHelp: "Print the current set of active peer relay sessions relayed through this node", + } +} + +func runPeerRelaySessions(ctx context.Context, args []string) error { + srv, err := localClient.DebugPeerRelaySessions(ctx) + if err != nil { + return err + } + + var buf bytes.Buffer + f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + + f("Server port: ") + if srv.UDPPort == nil { + f("not configured (you can configure the port with 'tailscale set --relay-server-port=')") + } else { + f("%d", *srv.UDPPort) + } + f("\n") + f("Sessions count: %d\n", len(srv.Sessions)) + if len(srv.Sessions) == 0 { + Stdout.Write(buf.Bytes()) + return nil + } + + fmtSessionDirection := func(a, z status.ClientInfo) string { + fmtEndpoint := func(ap netip.AddrPort) string { + if ap.IsValid() { + return ap.String() + } + return "" + } + return fmt.Sprintf("%s(%s) --> %s(%s), Packets: %d Bytes: %d", + fmtEndpoint(a.Endpoint), a.ShortDisco, + fmtEndpoint(z.Endpoint), z.ShortDisco, + a.PacketsTx, a.BytesTx) + } + + f("\n") + slices.SortFunc(srv.Sessions, func(s1, s2 status.ServerSession) int { return cmp.Compare(s1.VNI, s2.VNI) }) + for _, s := range srv.Sessions { + f("VNI: %d\n", s.VNI) + f(" %s\n", fmtSessionDirection(s.Client1, s.Client2)) + f(" %s\n", fmtSessionDirection(s.Client2, s.Client1)) + } + Stdout.Write(buf.Bytes()) + return nil +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index b3170d000d924..c8a0d57c125b6 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -49,8 +49,9 @@ import ( ) var ( - debugCaptureCmd func() *ffcli.Command // or nil - debugPortmapCmd func() *ffcli.Command // or nil + debugCaptureCmd func() *ffcli.Command // or nil + debugPortmapCmd func() *ffcli.Command // or nil + debugPeerRelayCmd func() *ffcli.Command // or nil ) func debugCmd() *ffcli.Command { @@ -374,6 +375,7 @@ func debugCmd() *ffcli.Command { return fs })(), }, + ccall(debugPeerRelayCmd), }...), } } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b9b7db525a843..deeb9c3a3bd19 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -143,6 +143,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 22f80d5d70347..f85063ddb868f 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -358,6 +358,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ + tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index d92a0b41a6341..f6bab697848e5 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -314,6 +314,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index d77d7145ae59c..91d07484c1137 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,7 +6,10 @@ package relayserver import ( + "encoding/json" + "fmt" "log" + "net/http" "net/netip" "strings" "sync" @@ -16,8 +19,10 @@ import ( "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -33,6 +38,32 @@ const featureName = "relayserver" func init() { feature.Register(featureName) ipnext.RegisterExtension(featureName, newExtension) + localapi.Register("debug-peer-relay-sessions", servePeerRelayDebugSessions) +} + +// servePeerRelayDebugSessions is an HTTP handler for the Local API that +// returns debug/status information for peer relay sessions being relayed by +// this Tailscale node. It writes a JSON-encoded [status.ServerStatus] into the +// HTTP response, or returns an HTTP 405/500 with error text as the body. +func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + var e *extension + if ok := h.LocalBackend().FindMatchingExtension(&e); !ok { + http.Error(w, "peer relay server extension unavailable", http.StatusInternalServerError) + return + } + + st := e.serverStatus() + j, err := json.Marshal(st) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal json: %v", err), http.StatusInternalServerError) + return + } + w.Write(j) } // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server @@ -53,16 +84,18 @@ type extension struct { mu sync.Mutex // guards the following fields shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return - busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + port *int // ipn.Prefs.RelayServerPort, nil if disabled + disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return + busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns + debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } // relayServer is the interface of [udprelay.Server]. type relayServer interface { AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) Close() error + GetSessions() []status.ServerSession } // Name implements [ipnext.Extension]. @@ -93,6 +126,7 @@ func (e *extension) handleBusLifetimeLocked() { port := *e.port e.disconnectFromBusCh = make(chan struct{}) e.busDoneCh = make(chan struct{}) + e.debugSessionsCh = make(chan chan []status.ServerSession) go e.consumeEventbusTopics(port) } @@ -139,6 +173,11 @@ var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { return }) +// consumeEventbusTopics serves endpoint allocation requests over the eventbus. +// It also serves [relayServer] debug information on a channel. +// consumeEventbusTopics must never acquire [extension.mu], which can be held by +// other goroutines while waiting to receive on [extension.busDoneCh] or the +// inner [extension.debugSessionsCh] channel. func (e *extension) consumeEventbusTopics(port int) { defer close(e.busDoneCh) @@ -159,6 +198,14 @@ func (e *extension) consumeEventbusTopics(port int) { return case <-eventClient.Done(): return + case respCh := <-e.debugSessionsCh: + if rs == nil { + // Don't initialize the server simply for a debug request. + respCh <- nil + continue + } + sessions := rs.GetSessions() + respCh <- sessions case req := <-reqSub.Events(): if rs == nil { var err error @@ -199,6 +246,7 @@ func (e *extension) disconnectFromBusLocked() { <-e.busDoneCh e.busDoneCh = nil e.disconnectFromBusCh = nil + e.debugSessionsCh = nil } } @@ -210,3 +258,30 @@ func (e *extension) Shutdown() error { e.shutdown = true return nil } + +// serverStatus gathers and returns current peer relay server status information +// for this Tailscale node, and status of each peer relay session this node is +// relaying (if any). +func (e *extension) serverStatus() status.ServerStatus { + e.mu.Lock() + defer e.mu.Unlock() + + st := status.ServerStatus{ + UDPPort: nil, + Sessions: nil, + } + if e.port == nil || e.busDoneCh == nil { + return st + } + st.UDPPort = ptr.To(*e.port) + + ch := make(chan []status.ServerSession) + select { + case e.debugSessionsCh <- ch: + resp := <-ch + st.Sessions = resp + return st + case <-e.busDoneCh: + return st + } +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 123813c165dfc..424c7a61731f1 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -31,6 +31,7 @@ import ( "tailscale.com/net/sockopts" "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -95,6 +96,8 @@ type serverEndpoint struct { boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time challenge [2][disco.BindUDPRelayChallengeLen]byte + packetsRx [2]uint64 // num packets received from/sent by each client after they are bound + bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound lamportID uint64 vni uint32 @@ -223,9 +226,13 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade switch { case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() + e.packetsRx[0]++ + e.bytesRx[0] += uint64(len(b)) return b, e.boundAddrPorts[1] case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() + e.packetsRx[1]++ + e.bytesRx[1] += uint64(len(b)) return b, e.boundAddrPorts[0] default: // unrecognized source @@ -782,3 +789,41 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, }, nil } + +// extractClientInfo constructs a [status.ClientInfo] for one of the two peer +// relay clients involved in this session. +func extractClientInfo(idx int, ep *serverEndpoint) status.ClientInfo { + if idx != 0 && idx != 1 { + panic(fmt.Sprintf("idx passed to extractClientInfo() must be 0 or 1; got %d", idx)) + } + + return status.ClientInfo{ + Endpoint: ep.boundAddrPorts[idx], + ShortDisco: ep.discoPubKeys.Get()[idx].ShortString(), + PacketsTx: ep.packetsRx[idx], + BytesTx: ep.bytesRx[idx], + } +} + +// GetSessions returns a slice of peer relay session statuses, with each +// entry containing detailed info about the server and clients involved in +// each session. This information is intended for debugging/status UX, and +// should not be relied on for any purpose outside of that. +func (s *Server) GetSessions() []status.ServerSession { + s.mu.Lock() + defer s.mu.Unlock() + if s.closed { + return nil + } + var sessions = make([]status.ServerSession, 0, len(s.byDisco)) + for _, se := range s.byDisco { + c1 := extractClientInfo(0, se) + c2 := extractClientInfo(1, se) + sessions = append(sessions, status.ServerSession{ + VNI: se.vni, + Client1: c1, + Client2: c2, + }) + } + return sessions +} diff --git a/net/udprelay/status/status.go b/net/udprelay/status/status.go new file mode 100644 index 0000000000000..3866efada2542 --- /dev/null +++ b/net/udprelay/status/status.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package status contains types relating to the status of peer relay sessions +// between peer relay client nodes via a peer relay server. +package status + +import ( + "net/netip" +) + +// ServerStatus contains the listening UDP port and active sessions (if any) for +// this node's peer relay server at a point in time. +type ServerStatus struct { + // UDPPort is the UDP port number that the peer relay server forwards over, + // as configured by the user with 'tailscale set --relay-server-port='. + // If the port has not been configured, UDPPort will be nil. + UDPPort *int + // Sessions is a slice of detailed status information about each peer + // relay session that this node's peer relay server is involved with. It + // may be empty. + Sessions []ServerSession +} + +// ClientInfo contains status-related information about a single peer relay +// client involved in a single peer relay session. +type ClientInfo struct { + // Endpoint is the [netip.AddrPort] of this peer relay client's underlay + // endpoint participating in the session, or a zero value if the client + // has not completed a handshake. + Endpoint netip.AddrPort + // ShortDisco is a string representation of this peer relay client's disco + // public key. + // + // TODO: disco keys are pretty meaningless to end users, and they are also + // ephemeral. We really need node keys (or translation to first ts addr), + // but those are not fully plumbed into the [udprelay.Server]. Disco keys + // can also be ambiguous to a node key, but we could add node key into a + // [disco.AllocateUDPRelayEndpointRequest] in similar fashion to + // [disco.Ping]. There's also the problem of netmap trimming, where we + // can't verify a node key maps to a disco key. + ShortDisco string + // PacketsTx is the number of packets this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the number of packets that the peer relay server has + // received from this client. + PacketsTx uint64 + // BytesTx is the total overlay bytes this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the total overlay bytes that the peer relay server has + // received from this client. + BytesTx uint64 +} + +// ServerSession contains status information for a single session between two +// peer relay clients, which are relayed via one peer relay server. This is the +// status as seen by the peer relay server; each client node may have a +// different view of the session's current status based on connectivity and +// where the client is in the peer relay endpoint setup (allocation, binding, +// pinging, active). +type ServerSession struct { + // VNI is the Virtual Network Identifier for this peer relay session, which + // comes from the Geneve header and is unique to this session. + VNI uint32 + // Client1 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client1' does NOT mean this + // was/wasn't the allocating client, or the first client to bind, etc; this + // is just one client of two. + Client1 ClientInfo + // Client2 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client2' does NOT mean this + // was/wasn't the allocating client, or the second client to bind, etc; this + // is just one client of two. + Client2 ClientInfo +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index de9e69f9cf787..619183a60b742 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -310,6 +310,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 6e0dc87eb4130..136004bc89ce8 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -38,6 +38,7 @@ import ( "tailscale.com/ipn" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" @@ -1526,7 +1527,8 @@ func TestEncryptStateMigration(t *testing.T) { // TestPeerRelayPing creates three nodes with one acting as a peer relay. // The test succeeds when "tailscale ping" flows through the peer -// relay between all 3 nodes. +// relay between all 3 nodes, and "tailscale debug peer-relay-sessions" returns +// expected values. func TestPeerRelayPing(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) @@ -1624,6 +1626,47 @@ func TestPeerRelayPing(t *testing.T) { t.Fatal(err) } } + + allControlNodes := env.Control.AllNodes() + wantSessionsForDiscoShorts := make(set.Set[[2]string]) + for i, a := range allControlNodes { + if i == len(allControlNodes)-1 { + break + } + for _, z := range allControlNodes[i+1:] { + wantSessionsForDiscoShorts.Add([2]string{a.DiscoKey.ShortString(), z.DiscoKey.ShortString()}) + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + debugSessions, err := peerRelay.LocalClient().DebugPeerRelaySessions(ctx) + cancel() + if err != nil { + t.Fatalf("debug peer-relay-sessions failed: %v", err) + } + if len(debugSessions.Sessions) != len(wantSessionsForDiscoShorts) { + t.Errorf("got %d peer relay sessions, want %d", len(debugSessions.Sessions), len(wantSessionsForDiscoShorts)) + } + for _, session := range debugSessions.Sessions { + if !wantSessionsForDiscoShorts.Contains([2]string{session.Client1.ShortDisco, session.Client2.ShortDisco}) && + !wantSessionsForDiscoShorts.Contains([2]string{session.Client2.ShortDisco, session.Client1.ShortDisco}) { + t.Errorf("peer relay session for disco keys %s<->%s not found in debug peer-relay-sessions: %+v", session.Client1.ShortDisco, session.Client2.ShortDisco, debugSessions.Sessions) + } + for _, client := range []status.ClientInfo{session.Client1, session.Client2} { + if client.BytesTx == 0 { + t.Errorf("unexpected 0 bytes TX counter in peer relay session: %+v", session) + } + if client.PacketsTx == 0 { + t.Errorf("unexpected 0 packets TX counter in peer relay session: %+v", session) + } + if !client.Endpoint.IsValid() { + t.Errorf("unexpected endpoint zero value in peer relay session: %+v", session) + } + if len(client.ShortDisco) == 0 { + t.Errorf("unexpected zero len short disco in peer relay session: %+v", session) + } + } + } } func TestC2NDebugNetmap(t *testing.T) { From 8fe575409f4287880b485d5bfbd05e5ef573c4bb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 15:49:49 -0700 Subject: [PATCH 0404/1093] feature/featuretags: add build tag to remove captive portal detection This doesn't yet fully pull it out into a feature/captiveportal package. This is the usual first step, moving the code to its own files within the same packages. Updates #17254 Change-Id: Idfaec839debf7c96f51ca6520ce36ccf2f8eec92 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 13 ++ .../feature_captiveportal_disabled.go | 13 ++ .../feature_captiveportal_enabled.go | 13 ++ feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/captiveportal.go | 186 ++++++++++++++++++ ipn/ipnlocal/local.go | 183 ++--------------- net/netcheck/captiveportal.go | 55 ++++++ net/netcheck/netcheck.go | 40 +--- 9 files changed, 304 insertions(+), 202 deletions(-) create mode 100644 feature/buildfeatures/feature_captiveportal_disabled.go create mode 100644 feature/buildfeatures/feature_captiveportal_enabled.go create mode 100644 ipn/ipnlocal/captiveportal.go create mode 100644 net/netcheck/captiveportal.go diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index deeb9c3a3bd19..abb3298064510 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -103,7 +103,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb+ - tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 50e584fe02444..818764b708039 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -123,6 +123,19 @@ func TestOmitACME(t *testing.T) { }.Check(t) } +func TestOmitCaptivePortal(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_captiveportal,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "captive") { + t.Errorf("unexpected dep with ts_omit_captiveportal: %q", dep) + } + }, + }.Check(t) +} + func TestOmitOAuthKey(t *testing.T) { deptest.DepChecker{ GOOS: "linux", diff --git a/feature/buildfeatures/feature_captiveportal_disabled.go b/feature/buildfeatures/feature_captiveportal_disabled.go new file mode 100644 index 0000000000000..367fef81bdc16 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = false diff --git a/feature/buildfeatures/feature_captiveportal_enabled.go b/feature/buildfeatures/feature_captiveportal_enabled.go new file mode 100644 index 0000000000000..bd8e1f6a80ff1 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 269ff1fc12955..9e6de018c8636 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -93,6 +93,7 @@ var Features = map[FeatureTag]FeatureMeta{ "acme": {"ACME", "ACME TLS certificate management", nil}, "aws": {"AWS", "AWS integration", nil}, "bird": {"Bird", "Bird BGP integration", nil}, + "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, "completion": {"Completion", "CLI shell completion", nil}, diff --git a/ipn/ipnlocal/captiveportal.go b/ipn/ipnlocal/captiveportal.go new file mode 100644 index 0000000000000..14f8b799eb6dd --- /dev/null +++ b/ipn/ipnlocal/captiveportal.go @@ -0,0 +1,186 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package ipnlocal + +import ( + "context" + "time" + + "tailscale.com/health" + "tailscale.com/net/captivedetection" + "tailscale.com/util/clientmetric" +) + +func init() { + hookCaptivePortalHealthChange.Set(captivePortalHealthChange) + hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop) +} + +var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") + +// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken +// before running captive portal detection. +const captivePortalDetectionInterval = 2 * time.Second + +func captivePortalHealthChange(b *LocalBackend, state *health.State) { + isConnectivityImpacted := false + for _, w := range state.Warnings { + // Ignore the captive portal warnable itself. + if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { + isConnectivityImpacted = true + break + } + } + + // captiveCtx can be changed, and is protected with 'mu'; grab that + // before we start our select, below. + // + // It is guaranteed to be non-nil. + b.mu.Lock() + ctx := b.captiveCtx + b.mu.Unlock() + + // If the context is canceled, we don't need to do anything. + if ctx.Err() != nil { + return + } + + if isConnectivityImpacted { + b.logf("health: connectivity impacted; triggering captive portal detection") + + // Ensure that we select on captiveCtx so that we can time out + // triggering captive portal detection if the backend is shutdown. + select { + case b.needsCaptiveDetection <- true: + case <-ctx.Done(): + } + } else { + // If connectivity is not impacted, we know for sure we're not behind a captive portal, + // so drop any warning, and signal that we don't need captive portal detection. + b.health.SetHealthy(captivePortalWarnable) + select { + case b.needsCaptiveDetection <- false: + case <-ctx.Done(): + } + } +} + +// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. +var captivePortalWarnable = health.Register(&health.Warnable{ + Code: "captive-portal-detected", + Title: "Captive portal detected", + // High severity, because captive portals block all traffic and require user intervention. + Severity: health.SeverityHigh, + Text: health.StaticMessage("This network requires you to log in using your web browser."), + ImpactsConnectivity: true, +}) + +func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) { + var tmr *time.Timer + + maybeStartTimer := func() { + // If there's an existing timer, nothing to do; just continue + // waiting for it to expire. Otherwise, create a new timer. + if tmr == nil { + tmr = time.NewTimer(captivePortalDetectionInterval) + } + } + maybeStopTimer := func() { + if tmr == nil { + return + } + if !tmr.Stop() { + <-tmr.C + } + tmr = nil + } + + for { + if ctx.Err() != nil { + maybeStopTimer() + return + } + + // First, see if we have a signal on our "healthy" channel, which + // takes priority over an existing timer. Because a select is + // nondeterministic, we explicitly check this channel before + // entering the main select below, so that we're guaranteed to + // stop the timer before starting captive portal detection. + select { + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + maybeStopTimer() + } + default: + } + + var timerChan <-chan time.Time + if tmr != nil { + timerChan = tmr.C + } + select { + case <-ctx.Done(): + // All done; stop the timer and then exit. + maybeStopTimer() + return + case <-timerChan: + // Kick off captive portal check + b.performCaptiveDetection() + // nil out timer to force recreation + tmr = nil + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + // Healthy; cancel any existing timer + maybeStopTimer() + } + } + } +} + +// shouldRunCaptivePortalDetection reports whether captive portal detection +// should be run. It is enabled by default, but can be disabled via a control +// knob. It is also only run when the user explicitly wants the backend to be +// running. +func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() +} + +// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs +// the detection and updates the Warnable accordingly. +func (b *LocalBackend) performCaptiveDetection() { + if !b.shouldRunCaptivePortalDetection() { + return + } + + d := captivedetection.NewDetector(b.logf) + b.mu.Lock() // for b.hostinfo + cn := b.currentNode() + dm := cn.DERPMap() + preferredDERP := 0 + if b.hostinfo != nil { + if b.hostinfo.NetInfo != nil { + preferredDERP = b.hostinfo.NetInfo.PreferredDERP + } + } + ctx := b.ctx + netMon := b.NetMon() + b.mu.Unlock() + found := d.Detect(ctx, netMon, dm, preferredDERP) + if found { + if !b.health.IsUnhealthy(captivePortalWarnable) { + metricCaptivePortalDetected.Add(1) + } + b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) + } else { + b.health.SetHealthy(captivePortalWarnable) + } +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ce42ae75a0fd7..623a0a3a316ed 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -64,7 +64,6 @@ import ( "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" - "tailscale.com/net/captivedetection" "tailscale.com/net/dns" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -168,8 +167,6 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } -var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") - var ( // errShutdown indicates that the [LocalBackend.Shutdown] was called. errShutdown = errors.New("shutting down") @@ -943,10 +940,6 @@ func (b *LocalBackend) DisconnectControl() { cc.Shutdown() } -// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken -// before running captive portal detection. -const captivePortalDetectionInterval = 2 * time.Second - // linkChange is our network monitor callback, called whenever the network changes. func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.mu.Lock() @@ -1002,6 +995,12 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { } } +// Captive portal detection hooks. +var ( + hookCaptivePortalHealthChange feature.Hook[func(*LocalBackend, *health.State)] + hookCheckCaptivePortalLoop feature.Hook[func(*LocalBackend, context.Context)] +) + func (b *LocalBackend) onHealthChange(change health.Change) { if change.WarnableChanged { w := change.Warnable @@ -1019,45 +1018,8 @@ func (b *LocalBackend) onHealthChange(change health.Change) { Health: state, }) - isConnectivityImpacted := false - for _, w := range state.Warnings { - // Ignore the captive portal warnable itself. - if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { - isConnectivityImpacted = true - break - } - } - - // captiveCtx can be changed, and is protected with 'mu'; grab that - // before we start our select, below. - // - // It is guaranteed to be non-nil. - b.mu.Lock() - ctx := b.captiveCtx - b.mu.Unlock() - - // If the context is canceled, we don't need to do anything. - if ctx.Err() != nil { - return - } - - if isConnectivityImpacted { - b.logf("health: connectivity impacted; triggering captive portal detection") - - // Ensure that we select on captiveCtx so that we can time out - // triggering captive portal detection if the backend is shutdown. - select { - case b.needsCaptiveDetection <- true: - case <-ctx.Done(): - } - } else { - // If connectivity is not impacted, we know for sure we're not behind a captive portal, - // so drop any warning, and signal that we don't need captive portal detection. - b.health.SetHealthy(captivePortalWarnable) - select { - case b.needsCaptiveDetection <- false: - case <-ctx.Done(): - } + if f, ok := hookCaptivePortalHealthChange.GetOk(); ok { + f(b, state) } } @@ -1115,7 +1077,7 @@ func (b *LocalBackend) Shutdown() { } b.shutdownCalled = true - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.logf("canceling captive portal context") b.captiveCancel() } @@ -2767,123 +2729,6 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } -// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. -var captivePortalWarnable = health.Register(&health.Warnable{ - Code: "captive-portal-detected", - Title: "Captive portal detected", - // High severity, because captive portals block all traffic and require user intervention. - Severity: health.SeverityHigh, - Text: health.StaticMessage("This network requires you to log in using your web browser."), - ImpactsConnectivity: true, -}) - -func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) { - var tmr *time.Timer - - maybeStartTimer := func() { - // If there's an existing timer, nothing to do; just continue - // waiting for it to expire. Otherwise, create a new timer. - if tmr == nil { - tmr = time.NewTimer(captivePortalDetectionInterval) - } - } - maybeStopTimer := func() { - if tmr == nil { - return - } - if !tmr.Stop() { - <-tmr.C - } - tmr = nil - } - - for { - if ctx.Err() != nil { - maybeStopTimer() - return - } - - // First, see if we have a signal on our "healthy" channel, which - // takes priority over an existing timer. Because a select is - // nondeterministic, we explicitly check this channel before - // entering the main select below, so that we're guaranteed to - // stop the timer before starting captive portal detection. - select { - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - maybeStopTimer() - } - default: - } - - var timerChan <-chan time.Time - if tmr != nil { - timerChan = tmr.C - } - select { - case <-ctx.Done(): - // All done; stop the timer and then exit. - maybeStopTimer() - return - case <-timerChan: - // Kick off captive portal check - b.performCaptiveDetection() - // nil out timer to force recreation - tmr = nil - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - // Healthy; cancel any existing timer - maybeStopTimer() - } - } - } -} - -// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs -// the detection and updates the Warnable accordingly. -func (b *LocalBackend) performCaptiveDetection() { - if !b.shouldRunCaptivePortalDetection() { - return - } - - d := captivedetection.NewDetector(b.logf) - b.mu.Lock() // for b.hostinfo - cn := b.currentNode() - dm := cn.DERPMap() - preferredDERP := 0 - if b.hostinfo != nil { - if b.hostinfo.NetInfo != nil { - preferredDERP = b.hostinfo.NetInfo.PreferredDERP - } - } - ctx := b.ctx - netMon := b.NetMon() - b.mu.Unlock() - found := d.Detect(ctx, netMon, dm, preferredDERP) - if found { - if !b.health.IsUnhealthy(captivePortalWarnable) { - metricCaptivePortalDetected.Add(1) - } - b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) - } else { - b.health.SetHealthy(captivePortalWarnable) - } -} - -// shouldRunCaptivePortalDetection reports whether captive portal detection -// should be run. It is enabled by default, but can be disabled via a control -// knob. It is also only run when the user explicitly wants the backend to be -// running. -func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { - b.mu.Lock() - defer b.mu.Unlock() - return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() -} - // packetFilterPermitsUnlockedNodes reports any peer in peers with the // UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet // filter. @@ -5715,16 +5560,18 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it // can be shut down if we transition away from Running. - if b.captiveCancel == nil { - b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) }) + if buildfeatures.HasCaptivePortal { + if b.captiveCancel == nil { + b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) + b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, b.captiveCtx) }) + } } } else if oldState == ipn.Running { // Transitioning away from running. b.closePeerAPIListenersLocked() // Stop any existing captive portal detection loop. - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.captiveCancel() b.captiveCancel = nil diff --git a/net/netcheck/captiveportal.go b/net/netcheck/captiveportal.go new file mode 100644 index 0000000000000..ad11f19a05b6b --- /dev/null +++ b/net/netcheck/captiveportal.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package netcheck + +import ( + "context" + "time" + + "tailscale.com/net/captivedetection" + "tailscale.com/tailcfg" +) + +func init() { + hookStartCaptivePortalDetection.Set(startCaptivePortalDetection) +} + +func startCaptivePortalDetection(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (done <-chan struct{}, stop func()) { + c := rs.c + + // NOTE(andrew): we can't simply add this goroutine to the + // `NewWaitGroupChan` below, since we don't wait for that + // waitgroup to finish when exiting this function and thus get + // a data race. + ch := make(chan struct{}) + + tmr := time.AfterFunc(c.captivePortalDelay(), func() { + defer close(ch) + d := captivedetection.NewDetector(c.logf) + found := d.Detect(ctx, c.NetMon, dm, preferredDERP) + rs.report.CaptivePortal.Set(found) + }) + + stop = func() { + // Don't cancel our captive portal check if we're + // explicitly doing a verbose netcheck. + if c.Verbose { + return + } + + if tmr.Stop() { + // Stopped successfully; need to close the + // signal channel ourselves. + close(ch) + return + } + + // Did not stop; do nothing and it'll finish by itself + // and close the signal channel. + } + + return ch, stop +} diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index ba9a8cb0f45d5..169133ceb360b 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -26,8 +26,9 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" - "tailscale.com/net/captivedetection" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -786,6 +787,8 @@ func (c *Client) SetForcePreferredDERP(region int) { c.ForcePreferredDERP = region } +var hookStartCaptivePortalDetection feature.Hook[func(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (<-chan struct{}, func())] + // GetReport gets a report. The 'opts' argument is optional and can be nil. // Callers are discouraged from passing a ctx with an arbitrary deadline as this // may cause GetReport to return prematurely before all reporting methods have @@ -910,38 +913,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // it's unnecessary. captivePortalDone := syncs.ClosedChan() captivePortalStop := func() {} - if !rs.incremental && !onlySTUN { - // NOTE(andrew): we can't simply add this goroutine to the - // `NewWaitGroupChan` below, since we don't wait for that - // waitgroup to finish when exiting this function and thus get - // a data race. - ch := make(chan struct{}) - captivePortalDone = ch - - tmr := time.AfterFunc(c.captivePortalDelay(), func() { - defer close(ch) - d := captivedetection.NewDetector(c.logf) - found := d.Detect(ctx, c.NetMon, dm, preferredDERP) - rs.report.CaptivePortal.Set(found) - }) - - captivePortalStop = func() { - // Don't cancel our captive portal check if we're - // explicitly doing a verbose netcheck. - if c.Verbose { - return - } - - if tmr.Stop() { - // Stopped successfully; need to close the - // signal channel ourselves. - close(ch) - return - } - - // Did not stop; do nothing and it'll finish by itself - // and close the signal channel. - } + if buildfeatures.HasCaptivePortal && !rs.incremental && !onlySTUN { + start := hookStartCaptivePortalDetection.Get() + captivePortalDone, captivePortalStop = start(ctx, rs, dm, preferredDERP) } wg := syncs.NewWaitGroupChan() From b54cdf9f38b1476de2d519c25eb84b7bedebd613 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 18:15:48 -0700 Subject: [PATCH 0405/1093] all: use buildfeatures.HasCapture const in a handful of places Help out the linker's dead code elimination. Updates #12614 Change-Id: I6c13cb44d3250bf1e3a01ad393c637da4613affb Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 6 ++++++ net/tstun/wrap.go | 4 ++++ wgengine/magicsock/magicsock.go | 3 +++ wgengine/userspace.go | 4 ++++ wgengine/watchdog.go | 4 ++++ 5 files changed, 21 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 623a0a3a316ed..5c5fb034bc091 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1026,6 +1026,9 @@ func (b *LocalBackend) onHealthChange(change health.Change) { // GetOrSetCaptureSink returns the current packet capture sink, creating it // with the provided newSink function if it does not already exist. func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) packet.CaptureSink { + if !buildfeatures.HasCapture { + return nil + } b.mu.Lock() defer b.mu.Unlock() @@ -1039,6 +1042,9 @@ func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) pa } func (b *LocalBackend) ClearCaptureSink() { + if !buildfeatures.HasCapture { + return + } // Shut down & uninstall the sink if there are no longer // any outputs on it. b.mu.Lock() diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 442184065aa92..4c88c7eefead3 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,6 +24,7 @@ import ( "go4.org/mem" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" + "tailscale.com/feature/buildfeatures" tsmetrics "tailscale.com/metrics" "tailscale.com/net/connstats" "tailscale.com/net/packet" @@ -1491,5 +1492,8 @@ var ( ) func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } t.captureHook.Store(cb) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 72fff34110b7b..0d8a1e53a42e6 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -890,6 +890,9 @@ func deregisterMetrics(m *metrics) { // can be called with a nil argument to uninstall the capture // hook. func (c *Conn) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } c.captureHook.Store(cb) } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 86136d977485a..7fb5805149791 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -23,6 +23,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" @@ -1652,6 +1653,9 @@ var ( ) func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.tundev.InstallCaptureHook(cb) e.magicConn.InstallCaptureHook(cb) } diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 74a1917488dd8..13bc48fb09d3e 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -15,6 +15,7 @@ import ( "time" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/packet" @@ -163,6 +164,9 @@ func (e *watchdogEngine) Done() <-chan struct{} { } func (e *watchdogEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.wrap.InstallCaptureHook(cb) } From b3e9a128afdbb8229a6b85eea8be4783d9224e47 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 14:11:04 -0700 Subject: [PATCH 0406/1093] net/dns, feature/featuretags: make NetworkManager, systemd-resolved, and DBus modular Saves 360 KB (19951800 => 19591352 on linux/amd64 --extra-small --box binary) Updates #12614 Updates #17206 Change-Id: Iafd5b2536dd735111b447546cba335a7a64379ed Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/deps_test.go | 13 ++ .../buildfeatures/feature_dbus_disabled.go | 13 ++ feature/buildfeatures/feature_dbus_enabled.go | 13 ++ .../feature_networkmanager_disabled.go | 13 ++ .../feature_networkmanager_enabled.go | 13 ++ .../feature_resolved_disabled.go | 13 ++ .../buildfeatures/feature_resolved_enabled.go | 13 ++ feature/featuretags/featuretags.go | 25 ++- net/dns/dbus.go | 59 +++++++ net/dns/manager_linux.go | 151 +++++++----------- net/dns/nm.go | 63 ++++++-- net/dns/resolved.go | 8 +- 12 files changed, 284 insertions(+), 113 deletions(-) create mode 100644 feature/buildfeatures/feature_dbus_disabled.go create mode 100644 feature/buildfeatures/feature_dbus_enabled.go create mode 100644 feature/buildfeatures/feature_networkmanager_disabled.go create mode 100644 feature/buildfeatures/feature_networkmanager_enabled.go create mode 100644 feature/buildfeatures/feature_resolved_disabled.go create mode 100644 feature/buildfeatures/feature_resolved_enabled.go create mode 100644 net/dns/dbus.go diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 818764b708039..2e797e36695f7 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -161,3 +161,16 @@ func TestOmitOutboundProxy(t *testing.T) { }, }.Check(t) } + +func TestOmitDBus(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_networkmanager,ts_omit_dbus,ts_omit_resolved,ts_omit_systray,ts_omit_ssh,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "dbus") { + t.Errorf("unexpected DBus dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/feature/buildfeatures/feature_dbus_disabled.go b/feature/buildfeatures/feature_dbus_disabled.go new file mode 100644 index 0000000000000..e6ab896773fd1 --- /dev/null +++ b/feature/buildfeatures/feature_dbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = false diff --git a/feature/buildfeatures/feature_dbus_enabled.go b/feature/buildfeatures/feature_dbus_enabled.go new file mode 100644 index 0000000000000..374331cdabe0c --- /dev/null +++ b/feature/buildfeatures/feature_dbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = true diff --git a/feature/buildfeatures/feature_networkmanager_disabled.go b/feature/buildfeatures/feature_networkmanager_disabled.go new file mode 100644 index 0000000000000..d0ec6f01796ab --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = false diff --git a/feature/buildfeatures/feature_networkmanager_enabled.go b/feature/buildfeatures/feature_networkmanager_enabled.go new file mode 100644 index 0000000000000..ec284c3109f75 --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = true diff --git a/feature/buildfeatures/feature_resolved_disabled.go b/feature/buildfeatures/feature_resolved_disabled.go new file mode 100644 index 0000000000000..283dd20c76aaa --- /dev/null +++ b/feature/buildfeatures/feature_resolved_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = false diff --git a/feature/buildfeatures/feature_resolved_enabled.go b/feature/buildfeatures/feature_resolved_enabled.go new file mode 100644 index 0000000000000..af1b3b41e9358 --- /dev/null +++ b/feature/buildfeatures/feature_resolved_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 9e6de018c8636..6f8c4ac170a3d 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, "completion": {"Completion", "CLI shell completion", nil}, + "dbus": {"DBus", "Linux DBus support", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, "debugportmapper": { Sym: "DebugPortMapper", @@ -113,9 +114,19 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Outbound localhost HTTP/SOCK5 proxy support", Deps: []FeatureTag{"netstack"}, }, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "networkmanager": { + Sym: "NetworkManager", + Desc: "Linux NetworkManager integration", + Deps: []FeatureTag{"dbus"}, + }, "relayserver": {"RelayServer", "Relay server", nil}, + "resolved": { + Sym: "Resolved", + Desc: "Linux systemd-resolved integration", + Deps: []FeatureTag{"dbus"}, + }, "serve": { Sym: "Serve", Desc: "Serve and Funnel support", @@ -124,10 +135,14 @@ var Features = map[FeatureTag]FeatureMeta{ "ssh": { Sym: "SSH", Desc: "Tailscale SSH support", - Deps: []FeatureTag{"netstack"}, + Deps: []FeatureTag{"dbus", "netstack"}, + }, + "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "systray": { + Sym: "SysTray", + Desc: "Linux system tray", + Deps: []FeatureTag{"dbus"}, }, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, - "systray": {"SysTray", "Linux system tray", nil}, "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, diff --git a/net/dns/dbus.go b/net/dns/dbus.go new file mode 100644 index 0000000000000..c53e8b7205949 --- /dev/null +++ b/net/dns/dbus.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_dbus + +package dns + +import ( + "context" + "time" + + "github.com/godbus/dbus/v5" +) + +func init() { + optDBusPing.Set(dbusPing) + optDBusReadString.Set(dbusReadString) +} + +func dbusPing(name, objectPath string) error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) + return call.Err +} + +// dbusReadString reads a string property from the provided name and object +// path. property must be in "interface.member" notation. +func dbusReadString(name, objectPath, iface, member string) (string, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return "", err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + + var result dbus.Variant + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) + if err != nil { + return "", err + } + + if s, ok := result.Value().(string); ok { + return s, nil + } + return result.String(), nil +} diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 8b66ac3a685e3..b2f8197ae8ba9 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -7,7 +7,6 @@ package dns import ( "bytes" - "context" "errors" "fmt" "os" @@ -15,13 +14,12 @@ import ( "sync" "time" - "github.com/godbus/dbus/v5" "tailscale.com/control/controlknobs" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" - "tailscale.com/util/cmpver" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -36,6 +34,31 @@ func (kv kv) String() string { var publishOnce sync.Once +// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. +// +// This is particularly useful because certain conditions can cause indefinite hangs +// (such as improper dbus auth followed by contextless dbus.Object.Call). +// Such operations should be wrapped in a timeout context. +const reconfigTimeout = time.Second + +// Set unless ts_omit_networkmanager +var ( + optNewNMManager feature.Hook[func(ifName string) (OSConfigurator, error)] + optNMIsUsingResolved feature.Hook[func() error] + optNMVersionBetween feature.Hook[func(v1, v2 string) (bool, error)] +) + +// Set unless ts_omit_resolved +var ( + optNewResolvedManager feature.Hook[func(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error)] +) + +// Set unless ts_omit_dbus +var ( + optDBusPing feature.Hook[func(name, objectPath string) error] + optDBusReadString feature.Hook[func(name, objectPath, iface, member string) (string, error)] +) + // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. @@ -45,13 +68,25 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. } env := newOSConfigEnv{ - fs: directFS{}, - dbusPing: dbusPing, - dbusReadString: dbusReadString, - nmIsUsingResolved: nmIsUsingResolved, - nmVersionBetween: nmVersionBetween, - resolvconfStyle: resolvconfStyle, + fs: directFS{}, + resolvconfStyle: resolvconfStyle, } + if f, ok := optDBusPing.GetOk(); ok { + env.dbusPing = f + } else { + env.dbusPing = func(_, _ string) error { return errors.ErrUnsupported } + } + if f, ok := optDBusReadString.GetOk(); ok { + env.dbusReadString = f + } else { + env.dbusReadString = func(_, _, _, _ string) (string, error) { return "", errors.ErrUnsupported } + } + if f, ok := optNMIsUsingResolved.GetOk(); ok { + env.nmIsUsingResolved = f + } else { + env.nmIsUsingResolved = func() error { return errors.ErrUnsupported } + } + env.nmVersionBetween, _ = optNMVersionBetween.GetOk() // GetOk to not panic if nil; unused if optNMIsUsingResolved returns an error mode, err := dnsMode(logf, health, env) if err != nil { return nil, err @@ -66,17 +101,24 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. case "direct": return newDirectManagerOnFS(logf, health, env.fs), nil case "systemd-resolved": - return newResolvedManager(logf, health, interfaceName) + if f, ok := optNewResolvedManager.GetOk(); ok { + return f(logf, health, interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "network-manager": - return newNMManager(interfaceName) + if f, ok := optNewNMManager.GetOk(); ok { + return f(interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "debian-resolvconf": return newDebianResolvconfManager(logf) case "openresolv": return newOpenresolvManager(logf) default: logf("[unexpected] detected unknown DNS mode %q, using direct manager as last resort", mode) - return newDirectManagerOnFS(logf, health, env.fs), nil } + + return newDirectManagerOnFS(logf, health, env.fs), nil } // newOSConfigEnv are the funcs newOSConfigurator needs, pulled out for testing. @@ -292,50 +334,6 @@ func dnsMode(logf logger.Logf, health *health.Tracker, env newOSConfigEnv) (ret } } -func nmVersionBetween(first, last string) (bool, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return false, err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") - if err != nil { - return false, err - } - - version, ok := v.Value().(string) - if !ok { - return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) - } - - outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 - return !outside, nil -} - -func nmIsUsingResolved() error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") - if err != nil { - return fmt.Errorf("getting NM mode: %w", err) - } - mode, ok := v.Value().(string) - if !ok { - return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) - } - if mode != "systemd-resolved" { - return errors.New("NetworkManager is not using systemd-resolved for DNS") - } - return nil -} - // resolvedIsActuallyResolver reports whether the system is using // systemd-resolved as the resolver. There are two different ways to // use systemd-resolved: @@ -396,44 +394,3 @@ func isLibnssResolveUsed(env newOSConfigEnv) error { } return fmt.Errorf("libnss_resolve not used") } - -func dbusPing(name, objectPath string) error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) - return call.Err -} - -// dbusReadString reads a string property from the provided name and object -// path. property must be in "interface.member" notation. -func dbusReadString(name, objectPath, iface, member string) (string, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return "", err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - - var result dbus.Variant - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) - if err != nil { - return "", err - } - - if s, ok := result.Value().(string); ok { - return s, nil - } - return result.String(), nil -} diff --git a/net/dns/nm.go b/net/dns/nm.go index 97557e33aa9bf..a88d29b374ebb 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_networkmanager package dns import ( "context" "encoding/binary" + "errors" "fmt" "net" "net/netip" @@ -16,6 +17,7 @@ import ( "github.com/godbus/dbus/v5" "tailscale.com/net/tsaddr" + "tailscale.com/util/cmpver" "tailscale.com/util/dnsname" ) @@ -25,13 +27,6 @@ const ( lowerPriority = int32(200) // lower than all builtin auto priorities ) -// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. -// -// This is particularly useful because certain conditions can cause indefinite hangs -// (such as improper dbus auth followed by contextless dbus.Object.Call). -// Such operations should be wrapped in a timeout context. -const reconfigTimeout = time.Second - // nmManager uses the NetworkManager DBus API. type nmManager struct { interfaceName string @@ -39,7 +34,13 @@ type nmManager struct { dnsManager dbus.BusObject } -func newNMManager(interfaceName string) (*nmManager, error) { +func init() { + optNewNMManager.Set(newNMManager) + optNMIsUsingResolved.Set(nmIsUsingResolved) + optNMVersionBetween.Set(nmVersionBetween) +} + +func newNMManager(interfaceName string) (OSConfigurator, error) { conn, err := dbus.SystemBus() if err != nil { return nil, err @@ -389,3 +390,47 @@ func (m *nmManager) Close() error { // settings when the tailscale interface goes away. return nil } + +func nmVersionBetween(first, last string) (bool, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return false, err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") + if err != nil { + return false, err + } + + version, ok := v.Value().(string) + if !ok { + return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) + } + + outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 + return !outside, nil +} + +func nmIsUsingResolved() error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") + if err != nil { + return fmt.Errorf("getting NM mode: %w", err) + } + mode, ok := v.Value().(string) + if !ok { + return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) + } + if mode != "systemd-resolved" { + return errors.New("NetworkManager is not using systemd-resolved for DNS") + } + return nil +} diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 4f58f3f9cc080..5d9130f05ecb5 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_resolved package dns @@ -70,7 +70,11 @@ type resolvedManager struct { configCR chan changeRequest // tracks OSConfigs changes and error responses } -func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (*resolvedManager, error) { +func init() { + optNewResolvedManager.Set(newResolvedManager) +} + +func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error) { iface, err := net.InterfaceByName(interfaceName) if err != nil { return nil, err From df747f1c1b24057de03844ba0561e41123de7c27 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 24 Sep 2025 09:14:41 -0700 Subject: [PATCH 0407/1093] util/eventbus: add a Done method to the Monitor type (#17263) Some systems need to tell whether the monitored goroutine has finished alongside other channel operations (notably in this case the relay server, but there seem likely to be others similarly situated). Updates #15160 Change-Id: I5f0f3fae827b07f9b7102a3b08f60cda9737fe28 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 27 +++++++++++++++++++++++++-- util/eventbus/monitor.go | 14 +++++++++++++- 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 67f68cd4a14d1..f9e7ee3dd0459 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -236,6 +236,17 @@ func TestMonitor(t *testing.T) { } }) + t.Run("ZeroDone", func(t *testing.T) { + var zero eventbus.Monitor + + select { + case <-zero.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for zero monitor to be done") + } + }) + t.Run("ZeroClose", func(t *testing.T) { var zero eventbus.Monitor @@ -276,7 +287,13 @@ func TestMonitor(t *testing.T) { // While the goroutine is running, Wait does not complete. select { case <-done: - t.Error("monitor is ready before its goroutine is finished") + t.Error("monitor is ready before its goroutine is finished (Wait)") + default: + // OK + } + select { + case <-m.Done(): + t.Error("monitor is ready before its goroutine is finished (Done)") default: // OK } @@ -286,7 +303,13 @@ func TestMonitor(t *testing.T) { case <-done: // OK case <-time.After(time.Second): - t.Fatal("timeout waiting for monitor to complete") + t.Fatal("timeout waiting for monitor to complete (Wait)") + } + select { + case <-m.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete (Done)") } } } diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go index 18cc2a413ddef..db6fe1be44737 100644 --- a/util/eventbus/monitor.go +++ b/util/eventbus/monitor.go @@ -3,9 +3,12 @@ package eventbus +import "tailscale.com/syncs" + // A Monitor monitors the execution of a goroutine processing events from a // [Client], allowing the caller to block until it is complete. The zero value -// of m is valid and its Close and Wait methods return immediately. +// of m is valid; its Close and Wait methods return immediately, and its Done +// method returns an already-closed channel. type Monitor struct { // These fields are immutable after initialization cli *Client @@ -32,6 +35,15 @@ func (m Monitor) Wait() { <-m.done } +// Done returns a channel that is closed when the monitored goroutine has +// finished executing. +func (m Monitor) Done() <-chan struct{} { + if m.done == nil { + return syncs.ClosedChan() + } + return m.done +} + // Monitor executes f in a new goroutine attended by a [Monitor]. The caller // is responsible for waiting for the goroutine to complete, by calling either // [Monitor.Close] or [Monitor.Wait]. From 21dc5f4e212e15f48f15fceb8ec487f8be54989f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 23 Sep 2025 17:07:48 -0700 Subject: [PATCH 0408/1093] derp/derpserver: split off derp.Server out of derp into its own package This exports a number of things from the derp (generic + client) package to be used by the new derpserver package, as now used by cmd/derper. And then enough other misc changes to lock in that cmd/tailscaled can be configured to not bring in tailscale.com/client/local. (The webclient in particular, even when disabled, was bringing it in, so that's now fixed) Fixes #17257 Change-Id: I88b6c7958643fb54f386dd900bddf73d2d4d96d5 Signed-off-by: Brad Fitzpatrick --- cmd/derper/ace.go | 4 +- cmd/derper/cert_test.go | 6 +- cmd/derper/depaware.txt | 15 +- cmd/derper/derper.go | 17 +- cmd/derper/derper_test.go | 10 +- cmd/derper/mesh.go | 5 +- cmd/derper/websocket.go | 4 +- cmd/k8s-operator/depaware.txt | 11 +- cmd/tailscale/depaware.txt | 14 +- cmd/tailscaled/depaware.txt | 13 +- cmd/tailscaled/deps_test.go | 11 + cmd/tailscaled/tailscaled.go | 10 +- cmd/tailscaled/webclient.go | 21 + cmd/tsidp/depaware.txt | 11 +- derp/client_test.go | 235 ++++ derp/derp.go | 105 +- derp/derp_client.go | 80 +- derp/derp_test.go | 1051 +---------------- derp/derphttp/derphttp_client.go | 2 +- derp/derphttp/derphttp_test.go | 94 +- derp/derphttp/export_test.go | 24 + derp/{ => derpserver}/derp_server.go | 159 ++- derp/{ => derpserver}/derp_server_default.go | 2 +- derp/{ => derpserver}/derp_server_linux.go | 2 +- derp/derpserver/derpserver_test.go | 782 ++++++++++++ .../handler.go} | 14 +- derp/{ => derpserver}/testdata/example_ss.txt | 0 derp/export_test.go | 10 + ipn/ipnlocal/web_client_stub.go | 4 +- net/captivedetection/captivedetection_test.go | 4 +- prober/derp_test.go | 5 +- tsnet/depaware.txt | 11 +- tstest/integration/integration.go | 7 +- tstest/natlab/vnet/vnet.go | 11 +- wgengine/magicsock/magicsock_test.go | 7 +- 35 files changed, 1442 insertions(+), 1319 deletions(-) create mode 100644 cmd/tailscaled/webclient.go create mode 100644 derp/client_test.go create mode 100644 derp/derphttp/export_test.go rename derp/{ => derpserver}/derp_server.go (94%) rename derp/{ => derpserver}/derp_server_default.go (91%) rename derp/{ => derpserver}/derp_server_linux.go (99%) create mode 100644 derp/derpserver/derpserver_test.go rename derp/{derphttp/derphttp_server.go => derpserver/handler.go} (86%) rename derp/{ => derpserver}/testdata/example_ss.txt (100%) create mode 100644 derp/export_test.go diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go index a11539a6e19fb..56fb68c336cd3 100644 --- a/cmd/derper/ace.go +++ b/cmd/derper/ace.go @@ -12,12 +12,12 @@ import ( "net/http" "strings" - "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/net/connectproxy" ) // serveConnect handles a CONNECT request for ACE support. -func serveConnect(s *derp.Server, w http.ResponseWriter, r *http.Request) { +func serveConnect(s *derpserver.Server, w http.ResponseWriter, r *http.Request) { if !*flagACEEnabled { http.Error(w, "CONNECT not enabled", http.StatusForbidden) return diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 31fd4ea446949..1ef932e7fe56e 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -22,8 +22,8 @@ import ( "testing" "time" - "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -131,9 +131,9 @@ func TestPinnedCertRawIP(t *testing.T) { } defer ln.Close() - ds := derp.NewServer(key.NewNode(), t.Logf) + ds := derpserver.NewServer(key.NewNode(), t.Logf) - derpHandler := derphttp.Handler(ds) + derpHandler := derpserver.Handler(ds) mux := http.NewServeMux() mux.Handle("/derp", derpHandler) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index b0501b5885fee..d19ea1f17658b 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -89,12 +89,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/derp + tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local tailscale.com/derp from tailscale.com/cmd/derper+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper - tailscale.com/disco from tailscale.com/derp + tailscale.com/derp/derpserver from tailscale.com/cmd/derper + tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/feature from tailscale.com/tsweb @@ -117,7 +118,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/sockstats from tailscale.com/derp/derphttp tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/derper - L tailscale.com/net/tcpinfo from tailscale.com/derp + L tailscale.com/net/tcpinfo from tailscale.com/derp/derpserver tailscale.com/net/tlsdial from tailscale.com/derp/derphttp tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ @@ -132,7 +133,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/derp + tailscale.com/tstime/rate from tailscale.com/derp/derpserver tailscale.com/tsweb from tailscale.com/cmd/derper+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ @@ -167,7 +168,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/derp/derpserver+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting @@ -180,7 +181,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ - tailscale.com/version from tailscale.com/derp+ + tailscale.com/version from tailscale.com/cmd/derper+ tailscale.com/version/distro from tailscale.com/envknob+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index b25bf22de72d7..eed94bd68c712 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -40,8 +40,7 @@ import ( "github.com/tailscale/setec/client/setec" "golang.org/x/time/rate" "tailscale.com/atomicfile" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/metrics" "tailscale.com/net/ktimeout" "tailscale.com/net/stunserver" @@ -90,7 +89,7 @@ var ( // tcpUserTimeout is intentionally short, so that hung connections are cleaned up promptly. DERPs should be nearby users. tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. - tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + tcpWriteTimeout = flag.Duration("tcp-write-timeout", derpserver.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") // ACE flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]") @@ -189,7 +188,7 @@ func main() { serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual" - s := derp.NewServer(cfg.PrivateKey, log.Printf) + s := derpserver.NewServer(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) @@ -256,7 +255,7 @@ func main() { mux := http.NewServeMux() if *runDERP { - derpHandler := derphttp.Handler(s) + derpHandler := derpserver.Handler(s) derpHandler = addWebSocketSupport(s, derpHandler) mux.Handle("/derp", derpHandler) } else { @@ -267,8 +266,8 @@ func main() { // These two endpoints are the same. Different versions of the clients // have assumes different paths over time so we support both. - mux.HandleFunc("/derp/probe", derphttp.ProbeHandler) - mux.HandleFunc("/derp/latency-check", derphttp.ProbeHandler) + mux.HandleFunc("/derp/probe", derpserver.ProbeHandler) + mux.HandleFunc("/derp/latency-check", derpserver.ProbeHandler) go refreshBootstrapDNSLoop() mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS)) @@ -280,7 +279,7 @@ func main() { tsweb.AddBrowserHeaders(w) io.WriteString(w, "User-agent: *\nDisallow: /\n") })) - mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent)) + mux.Handle("/generate_204", http.HandlerFunc(derpserver.ServeNoContent)) debug := tsweb.Debugger(mux) debug.KV("TLS hostname", *hostname) debug.KV("Mesh key", s.HasMeshKey()) @@ -388,7 +387,7 @@ func main() { if *httpPort > -1 { go func() { port80mux := http.NewServeMux() - port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + port80mux.HandleFunc("/generate_204", derpserver.ServeNoContent) port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux})) port80srv := &http.Server{ Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)), diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 6dce1fcdfebdd..d27f8cb20144d 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/tstest/deptest" ) @@ -78,20 +78,20 @@ func TestNoContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil) if tt.input != "" { - req.Header.Set(derphttp.NoContentChallengeHeader, tt.input) + req.Header.Set(derpserver.NoContentChallengeHeader, tt.input) } w := httptest.NewRecorder() - derphttp.ServeNoContent(w, req) + derpserver.ServeNoContent(w, req) resp := w.Result() if tt.want == "" { - if h, found := resp.Header[derphttp.NoContentResponseHeader]; found { + if h, found := resp.Header[derpserver.NoContentResponseHeader]; found { t.Errorf("got %+v; expected no response header", h) } return } - if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want { + if got := resp.Header.Get(derpserver.NoContentResponseHeader); got != tt.want { t.Errorf("got %q; want %q", got, tt.want) } }) diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index cbb2fa59ac030..909b5f2ca18c4 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -13,11 +13,12 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/types/logger" ) -func startMesh(s *derp.Server) error { +func startMesh(s *derpserver.Server) error { if *meshWith == "" { return nil } @@ -32,7 +33,7 @@ func startMesh(s *derp.Server) error { return nil } -func startMeshWithHost(s *derp.Server, hostTuple string) error { +func startMeshWithHost(s *derpserver.Server, hostTuple string) error { var host string var dialHost string hostParts := strings.Split(hostTuple, "/") diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go index 05f40deb816d5..82fd30bed165a 100644 --- a/cmd/derper/websocket.go +++ b/cmd/derper/websocket.go @@ -11,14 +11,14 @@ import ( "strings" "github.com/coder/websocket" - "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/net/wsconn" ) var counterWebSocketAccepts = expvar.NewInt("derp_websocket_accepts") // addWebSocketSupport returns a Handle wrapping base that adds WebSocket server support. -func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler { +func addWebSocketSupport(s *derpserver.Server, base http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { up := strings.ToLower(r.Header.Get("Upgrade")) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e0fdc27bb2ea2..2281d38195309 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -784,9 +784,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -839,7 +839,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -875,7 +875,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -902,7 +901,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -1217,7 +1216,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/google/go-cmp/cmp+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from github.com/go-openapi/swag+ mime/quotedprintable from mime/multipart diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index abb3298064510..4bd4e6bcabb22 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -96,9 +96,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck - tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web @@ -119,7 +118,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck @@ -138,7 +137,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ tailscale.com/net/stun from tailscale.com/net/netcheck - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -153,7 +151,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ + tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ @@ -193,7 +191,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/ipn+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/client/systray+ L tailscale.com/util/stringsx from tailscale.com/client/systray @@ -358,7 +356,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep encoding/pem from crypto/tls+ encoding/xml from github.com/godbus/dbus/v5/introspect+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from github.com/peterbourgon/ff/v3+ fmt from archive/tar+ hash from compress/zlib+ @@ -431,7 +429,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/mdlayher/netlink+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from golang.org/x/oauth2/internal+ mime/multipart from net/http mime/quotedprintable from mime/multipart diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index f85063ddb868f..70be690ee9a20 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -256,9 +256,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/feature/relayserver+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -314,7 +314,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ @@ -349,7 +349,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -378,7 +377,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -432,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy @@ -613,7 +612,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/cmd/tailscaled+ flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from compress/zlib+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 2e797e36695f7..35975b57ce58f 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -44,6 +44,17 @@ func TestOmitSyspolicy(t *testing.T) { }.Check(t) } +func TestOmitLocalClient(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_webclient,ts_omit_relayserver,ts_omit_oauthkey,ts_omit_acme", + BadDeps: map[string]string{ + "tailscale.com/client/local": "unexpected", + }, + }.Check(t) +} + // Test that we can build a binary without reflect.MethodByName. // See https://github.com/tailscale/tailscale/issues/17063 func TestOmitReflectThings(t *testing.T) { diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 9e099f9cba9d2..0c6e6d22f4c7a 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -30,7 +30,6 @@ import ( "syscall" "time" - "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" "tailscale.com/envknob" @@ -685,16 +684,17 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if root := lb.TailscaleVarRoot(); root != "" { dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf) } - lb.ConfigureWebClient(&local.Client{ - Socket: args.socketpath, - UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), - }) + if f, ok := hookConfigureWebClient.GetOk(); ok { + f(lb) + } if err := ns.Start(lb); err != nil { log.Fatalf("failed to start netstack: %v", err) } return lb, nil } +var hookConfigureWebClient feature.Hook[func(*ipnlocal.LocalBackend)] + // createEngine tries to the wgengine.Engine based on the order of tunnels // specified in the command line flags. // diff --git a/cmd/tailscaled/webclient.go b/cmd/tailscaled/webclient.go new file mode 100644 index 0000000000000..672ba7126d2a7 --- /dev/null +++ b/cmd/tailscaled/webclient.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_webclient + +package main + +import ( + "tailscale.com/client/local" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/paths" +) + +func init() { + hookConfigureWebClient.Set(func(lb *ipnlocal.LocalBackend) { + lb.ConfigureWebClient(&local.Client{ + Socket: args.socketpath, + UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), + }) + }) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f6bab697848e5..4fd7c8020abb7 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -226,9 +226,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -270,7 +270,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -306,7 +306,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -332,7 +331,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tsnet from tailscale.com/cmd/tsidp tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -566,7 +565,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ fmt from archive/tar+ hash from compress/zlib+ diff --git a/derp/client_test.go b/derp/client_test.go new file mode 100644 index 0000000000000..a731ad197f1e7 --- /dev/null +++ b/derp/client_test.go @@ -0,0 +1,235 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import ( + "bufio" + "bytes" + "io" + "net" + "reflect" + "sync" + "testing" + "time" + + "tailscale.com/tstest" + "tailscale.com/types/key" +) + +type dummyNetConn struct { + net.Conn +} + +func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } + +func TestClientRecv(t *testing.T) { + tests := []struct { + name string + input []byte + want any + }{ + { + name: "ping", + input: []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "pong", + input: []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "health_bad", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 3, + byte('B'), byte('A'), byte('D'), + }, + want: HealthMessage{Problem: "BAD"}, + }, + { + name: "health_ok", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 0, + }, + want: HealthMessage{}, + }, + { + name: "server_restarting", + input: []byte{ + byte(FrameRestarting), 0, 0, 0, 8, + 0, 0, 0, 1, + 0, 0, 0, 2, + }, + want: ServerRestartingMessage{ + ReconnectIn: 1 * time.Millisecond, + TryFor: 2 * time.Millisecond, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + nc: dummyNetConn{}, + br: bufio.NewReader(bytes.NewReader(tt.input)), + logf: t.Logf, + clock: &tstest.Clock{}, + } + got, err := c.Recv() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %#v; want %#v", got, tt.want) + } + }) + } +} + +func TestClientSendPing(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func TestClientSendPong(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func BenchmarkWriteUint32(b *testing.B) { + w := bufio.NewWriter(io.Discard) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + writeUint32(w, 0x0ba3a) + } +} + +type nopRead struct{} + +func (r nopRead) Read(p []byte) (int, error) { + return len(p), nil +} + +var sinkU32 uint32 + +func BenchmarkReadUint32(b *testing.B) { + r := bufio.NewReader(nopRead{}) + var err error + b.ReportAllocs() + b.ResetTimer() + for range b.N { + sinkU32, err = readUint32(r) + if err != nil { + b.Fatal(err) + } + } +} + +type countWriter struct { + mu sync.Mutex + writes int + bytes int64 +} + +func (w *countWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + w.writes++ + w.bytes += int64(len(p)) + return len(p), nil +} + +func (w *countWriter) Stats() (writes int, bytes int64) { + w.mu.Lock() + defer w.mu.Unlock() + return w.writes, w.bytes +} + +func (w *countWriter) ResetStats() { + w.mu.Lock() + defer w.mu.Unlock() + w.writes, w.bytes = 0, 0 +} + +func TestClientSendRateLimiting(t *testing.T) { + cw := new(countWriter) + c := &Client{ + bw: bufio.NewWriter(cw), + clock: &tstest.Clock{}, + } + c.setSendRateLimiter(ServerInfoMessage{}) + + pkt := make([]byte, 1000) + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + writes1, bytes1 := cw.Stats() + if writes1 != 1 { + t.Errorf("writes = %v, want 1", writes1) + } + + // Flood should all succeed. + cw.ResetStats() + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writes1K, bytes1K := cw.Stats() + if writes1K != 1000 { + t.Logf("writes = %v; want 1000", writes1K) + } + if got, want := bytes1K, bytes1*1000; got != want { + t.Logf("bytes = %v; want %v", got, want) + } + + // Set a rate limiter + cw.ResetStats() + c.setSendRateLimiter(ServerInfoMessage{ + TokenBucketBytesPerSecond: 1, + TokenBucketBytesBurst: int(bytes1 * 2), + }) + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writesLimited, bytesLimited := cw.Stats() + if writesLimited == 0 || writesLimited == writes1K { + t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) + } + if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { + t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) + } +} diff --git a/derp/derp.go b/derp/derp.go index 24c1ca65cfb3c..e19a99b0025ce 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -27,15 +27,15 @@ import ( // including its on-wire framing overhead) const MaxPacketSize = 64 << 10 -// magic is the DERP magic number, sent in the frameServerKey frame +// Magic is the DERP Magic number, sent in the frameServerKey frame // upon initial connection. -const magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 +const Magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 const ( - nonceLen = 24 - frameHeaderLen = 1 + 4 // frameType byte + 4 byte length - keyLen = 32 - maxInfoLen = 1 << 20 + NonceLen = 24 + FrameHeaderLen = 1 + 4 // frameType byte + 4 byte length + KeyLen = 32 + MaxInfoLen = 1 << 20 ) // KeepAlive is the minimum frequency at which the DERP server sends @@ -48,10 +48,10 @@ const KeepAlive = 60 * time.Second // - version 2: received packets have src addrs in frameRecvPacket at beginning const ProtocolVersion = 2 -// frameType is the one byte frame type at the beginning of the frame +// FrameType is the one byte frame type at the beginning of the frame // header. The second field is a big-endian uint32 describing the // length of the remaining frame (not including the initial 5 bytes). -type frameType byte +type FrameType byte /* Protocol flow: @@ -69,14 +69,14 @@ Steady state: * server then sends frameRecvPacket to recipient */ const ( - frameServerKey = frameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) - frameClientInfo = frameType(0x02) // 32B pub key + 24B nonce + naclbox(json) - frameServerInfo = frameType(0x03) // 24B nonce + naclbox(json) - frameSendPacket = frameType(0x04) // 32B dest pub key + packet bytes - frameForwardPacket = frameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes - frameRecvPacket = frameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes - frameKeepAlive = frameType(0x06) // no payload, no-op (to be replaced with ping/pong) - frameNotePreferred = frameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node + FrameServerKey = FrameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) + FrameClientInfo = FrameType(0x02) // 32B pub key + 24B nonce + naclbox(json) + FrameServerInfo = FrameType(0x03) // 24B nonce + naclbox(json) + FrameSendPacket = FrameType(0x04) // 32B dest pub key + packet bytes + FrameForwardPacket = FrameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes + FrameRecvPacket = FrameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes + FrameKeepAlive = FrameType(0x06) // no payload, no-op (to be replaced with ping/pong) + FrameNotePreferred = FrameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node // framePeerGone is sent from server to client to signal that // a previous sender is no longer connected. That is, if A @@ -85,7 +85,7 @@ const ( // exists on that connection to get back to A. It is also sent // if A tries to send a CallMeMaybe to B and the server has no // record of B - framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason + FramePeerGone = FrameType(0x08) // 32B pub key of peer that's gone + 1 byte reason // framePeerPresent is like framePeerGone, but for other members of the DERP // region when they're meshed up together. @@ -96,7 +96,7 @@ const ( // remaining after that, it's a PeerPresentFlags byte. // While current servers send 41 bytes, old servers will send fewer, and newer // servers might send more. - framePeerPresent = frameType(0x09) + FramePeerPresent = FrameType(0x09) // frameWatchConns is how one DERP node in a regional mesh // subscribes to the others in the region. @@ -104,30 +104,30 @@ const ( // is closed. Otherwise, the client is initially flooded with // framePeerPresent for all connected nodes, and then a stream of // framePeerPresent & framePeerGone has peers connect and disconnect. - frameWatchConns = frameType(0x10) + FrameWatchConns = FrameType(0x10) // frameClosePeer is a privileged frame type (requires the // mesh key for now) that closes the provided peer's // connection. (To be used for cluster load balancing // purposes, when clients end up on a non-ideal node) - frameClosePeer = frameType(0x11) // 32B pub key of peer to close. + FrameClosePeer = FrameType(0x11) // 32B pub key of peer to close. - framePing = frameType(0x12) // 8 byte ping payload, to be echoed back in framePong - framePong = frameType(0x13) // 8 byte payload, the contents of the ping being replied to + FramePing = FrameType(0x12) // 8 byte ping payload, to be echoed back in framePong + FramePong = FrameType(0x13) // 8 byte payload, the contents of the ping being replied to // frameHealth is sent from server to client to tell the client // if their connection is unhealthy somehow. Currently the only unhealthy state // is whether the connection is detected as a duplicate. // The entire frame body is the text of the error message. An empty message // clears the error state. - frameHealth = frameType(0x14) + FrameHealth = FrameType(0x14) // frameRestarting is sent from server to client for the // server to declare that it's restarting. Payload is two big // endian uint32 durations in milliseconds: when to reconnect, // and how long to try total. See ServerRestartingMessage docs for // more details on how the client should interpret them. - frameRestarting = frameType(0x15) + FrameRestarting = FrameType(0x15) ) // PeerGoneReasonType is a one byte reason code explaining why a @@ -154,6 +154,18 @@ const ( PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node ) +// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests +// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. +// The HTTP header value is the name of the node they wish they were connected +// to. This is an optional header. +const IdealNodeHeader = "Ideal-Node" + +// FastStartHeader is the header (with value "1") that signals to the HTTP +// server that the DERP HTTP client does not want the HTTP 101 response +// headers and it will begin writing & reading the DERP protocol immediately +// following its HTTP request. +const FastStartHeader = "Derp-Fast-Start" + var bin = binary.BigEndian func writeUint32(bw *bufio.Writer, v uint32) error { @@ -186,15 +198,24 @@ func readUint32(br *bufio.Reader) (uint32, error) { return bin.Uint32(b[:]), nil } -func readFrameTypeHeader(br *bufio.Reader, wantType frameType) (frameLen uint32, err error) { - gotType, frameLen, err := readFrameHeader(br) +// ReadFrameTypeHeader reads a frame header from br and +// verifies that the frame type matches wantType. +// +// If it does, it returns the frame length (not including +// the 5 byte header) and a nil error. +// +// If it doesn't, it returns an error and a zero length. +func ReadFrameTypeHeader(br *bufio.Reader, wantType FrameType) (frameLen uint32, err error) { + gotType, frameLen, err := ReadFrameHeader(br) if err == nil && wantType != gotType { err = fmt.Errorf("bad frame type 0x%X, want 0x%X", gotType, wantType) } return frameLen, err } -func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) { +// ReadFrameHeader reads the header of a DERP frame, +// reading 5 bytes from br. +func ReadFrameHeader(br *bufio.Reader) (t FrameType, frameLen uint32, err error) { tb, err := br.ReadByte() if err != nil { return 0, 0, err @@ -203,7 +224,7 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) if err != nil { return 0, 0, err } - return frameType(tb), frameLen, nil + return FrameType(tb), frameLen, nil } // readFrame reads a frame header and then reads its payload into @@ -216,8 +237,8 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) // bytes are read, err will be io.ErrShortBuffer, and frameLen and t // will both be set. That is, callers need to explicitly handle when // they get more data than expected. -func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLen uint32, err error) { - t, frameLen, err = readFrameHeader(br) +func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t FrameType, frameLen uint32, err error) { + t, frameLen, err = ReadFrameHeader(br) if err != nil { return 0, 0, err } @@ -239,19 +260,26 @@ func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLe return t, frameLen, err } -func writeFrameHeader(bw *bufio.Writer, t frameType, frameLen uint32) error { +// WriteFrameHeader writes a frame header to bw. +// +// The frame header is 5 bytes: a one byte frame type +// followed by a big-endian uint32 length of the +// remaining frame (not including the 5 byte header). +// +// It does not flush bw. +func WriteFrameHeader(bw *bufio.Writer, t FrameType, frameLen uint32) error { if err := bw.WriteByte(byte(t)); err != nil { return err } return writeUint32(bw, frameLen) } -// writeFrame writes a complete frame & flushes it. -func writeFrame(bw *bufio.Writer, t frameType, b []byte) error { +// WriteFrame writes a complete frame & flushes it. +func WriteFrame(bw *bufio.Writer, t FrameType, b []byte) error { if len(b) > 10<<20 { return errors.New("unreasonably large frame write") } - if err := writeFrameHeader(bw, t, uint32(len(b))); err != nil { + if err := WriteFrameHeader(bw, t, uint32(len(b))); err != nil { return err } if _, err := bw.Write(b); err != nil { @@ -270,3 +298,12 @@ type Conn interface { SetReadDeadline(time.Time) error SetWriteDeadline(time.Time) error } + +// ServerInfo is the message sent from the server to clients during +// the connection setup. +type ServerInfo struct { + Version int `json:"version,omitempty"` + + TokenBucketBytesPerSecond int `json:",omitempty"` + TokenBucketBytesBurst int `json:",omitempty"` +} diff --git a/derp/derp_client.go b/derp/derp_client.go index 69f35db1e2791..d28905cd2c8b2 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -133,17 +133,17 @@ func (c *Client) recvServerKey() error { if err != nil { return err } - if flen < uint32(len(buf)) || t != frameServerKey || string(buf[:len(magic)]) != magic { + if flen < uint32(len(buf)) || t != FrameServerKey || string(buf[:len(Magic)]) != Magic { return errors.New("invalid server greeting") } - c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(magic):])) + c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(Magic):])) return nil } -func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { - const maxLength = nonceLen + maxInfoLen +func (c *Client) parseServerInfo(b []byte) (*ServerInfo, error) { + const maxLength = NonceLen + MaxInfoLen fl := len(b) - if fl < nonceLen { + if fl < NonceLen { return nil, fmt.Errorf("short serverInfo frame") } if fl > maxLength { @@ -153,14 +153,16 @@ func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { if !ok { return nil, fmt.Errorf("failed to open naclbox from server key %s", c.serverKey) } - info := new(serverInfo) + info := new(ServerInfo) if err := json.Unmarshal(msg, info); err != nil { return nil, fmt.Errorf("invalid JSON: %v", err) } return info, nil } -type clientInfo struct { +// ClientInfo is the information a DERP client sends to the server +// about itself when it connects. +type ClientInfo struct { // MeshKey optionally specifies a pre-shared key used by // trusted clients. It's required to subscribe to the // connection list & forward packets. It's empty for regular @@ -180,7 +182,7 @@ type clientInfo struct { } // Equal reports if two clientInfo values are equal. -func (c *clientInfo) Equal(other *clientInfo) bool { +func (c *ClientInfo) Equal(other *ClientInfo) bool { if c == nil || other == nil { return c == other } @@ -191,7 +193,7 @@ func (c *clientInfo) Equal(other *clientInfo) bool { } func (c *Client) sendClientKey() error { - msg, err := json.Marshal(clientInfo{ + msg, err := json.Marshal(ClientInfo{ Version: ProtocolVersion, MeshKey: c.meshKey, CanAckPings: c.canAckPings, @@ -202,10 +204,10 @@ func (c *Client) sendClientKey() error { } msgbox := c.privateKey.SealTo(c.serverKey, msg) - buf := make([]byte, 0, keyLen+len(msgbox)) + buf := make([]byte, 0, KeyLen+len(msgbox)) buf = c.publicKey.AppendTo(buf) buf = append(buf, msgbox...) - return writeFrame(c.bw, frameClientInfo, buf) + return WriteFrame(c.bw, FrameClientInfo, buf) } // ServerPublicKey returns the server's public key. @@ -230,12 +232,12 @@ func (c *Client) send(dstKey key.NodePublic, pkt []byte) (ret error) { c.wmu.Lock() defer c.wmu.Unlock() if c.rate != nil { - pktLen := frameHeaderLen + key.NodePublicRawLen + len(pkt) + pktLen := FrameHeaderLen + key.NodePublicRawLen + len(pkt) if !c.rate.AllowN(c.clock.Now(), pktLen) { return nil // drop } } - if err := writeFrameHeader(c.bw, frameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { return err } if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil { @@ -264,7 +266,7 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e timer := c.clock.AfterFunc(5*time.Second, c.writeTimeoutFired) defer timer.Stop() - if err := writeFrameHeader(c.bw, frameForwardPacket, uint32(keyLen*2+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameForwardPacket, uint32(KeyLen*2+len(pkt))); err != nil { return err } if _, err := c.bw.Write(srcKey.AppendTo(nil)); err != nil { @@ -282,17 +284,17 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e func (c *Client) writeTimeoutFired() { c.nc.Close() } func (c *Client) SendPing(data [8]byte) error { - return c.sendPingOrPong(framePing, data) + return c.sendPingOrPong(FramePing, data) } func (c *Client) SendPong(data [8]byte) error { - return c.sendPingOrPong(framePong, data) + return c.sendPingOrPong(FramePong, data) } -func (c *Client) sendPingOrPong(typ frameType, data [8]byte) error { +func (c *Client) sendPingOrPong(typ FrameType, data [8]byte) error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, typ, 8); err != nil { + if err := WriteFrameHeader(c.bw, typ, 8); err != nil { return err } if _, err := c.bw.Write(data[:]); err != nil { @@ -314,7 +316,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameNotePreferred, 1); err != nil { + if err := WriteFrameHeader(c.bw, FrameNotePreferred, 1); err != nil { return err } var b byte = 0x00 @@ -332,7 +334,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { func (c *Client) WatchConnectionChanges() error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameWatchConns, 0); err != nil { + if err := WriteFrameHeader(c.bw, FrameWatchConns, 0); err != nil { return err } return c.bw.Flush() @@ -343,7 +345,7 @@ func (c *Client) WatchConnectionChanges() error { func (c *Client) ClosePeer(target key.NodePublic) error { c.wmu.Lock() defer c.wmu.Unlock() - return writeFrame(c.bw, frameClosePeer, target.AppendTo(nil)) + return WriteFrame(c.bw, FrameClosePeer, target.AppendTo(nil)) } // ReceivedMessage represents a type returned by Client.Recv. Unless @@ -502,7 +504,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro c.peeked = 0 } - t, n, err := readFrameHeader(c.br) + t, n, err := ReadFrameHeader(c.br) if err != nil { return nil, err } @@ -533,7 +535,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro switch t { default: continue - case frameServerInfo: + case FrameServerInfo: // Server sends this at start-up. Currently unused. // Just has a JSON message saying "version: 2", // but the protocol seems extensible enough as-is without @@ -550,29 +552,29 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro } c.setSendRateLimiter(sm) return sm, nil - case frameKeepAlive: + case FrameKeepAlive: // A one-way keep-alive message that doesn't require an acknowledgement. // This predated framePing/framePong. return KeepAliveMessage{}, nil - case framePeerGone: - if n < keyLen { + case FramePeerGone: + if n < KeyLen { c.logf("[unexpected] dropping short peerGone frame from DERP server") continue } // Backward compatibility for the older peerGone without reason byte reason := PeerGoneReasonDisconnected - if n > keyLen { - reason = PeerGoneReasonType(b[keyLen]) + if n > KeyLen { + reason = PeerGoneReasonType(b[KeyLen]) } pg := PeerGoneMessage{ - Peer: key.NodePublicFromRaw32(mem.B(b[:keyLen])), + Peer: key.NodePublicFromRaw32(mem.B(b[:KeyLen])), Reason: reason, } return pg, nil - case framePeerPresent: + case FramePeerPresent: remain := b - chunk, remain, ok := cutLeadingN(remain, keyLen) + chunk, remain, ok := cutLeadingN(remain, KeyLen) if !ok { c.logf("[unexpected] dropping short peerPresent frame from DERP server") continue @@ -600,17 +602,17 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro msg.Flags = PeerPresentFlags(chunk[0]) return msg, nil - case frameRecvPacket: + case FrameRecvPacket: var rp ReceivedPacket - if n < keyLen { + if n < KeyLen { c.logf("[unexpected] dropping short packet from DERP server") continue } - rp.Source = key.NodePublicFromRaw32(mem.B(b[:keyLen])) - rp.Data = b[keyLen:n] + rp.Source = key.NodePublicFromRaw32(mem.B(b[:KeyLen])) + rp.Data = b[KeyLen:n] return rp, nil - case framePing: + case FramePing: var pm PingMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -619,7 +621,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case framePong: + case FramePong: var pm PongMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -628,10 +630,10 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case frameHealth: + case FrameHealth: return HealthMessage{Problem: string(b[:])}, nil - case frameRestarting: + case FrameRestarting: var m ServerRestartingMessage if n < 8 { c.logf("[unexpected] dropping short server restarting frame") diff --git a/derp/derp_test.go b/derp/derp_test.go index 9d07e159b4584..e765f7b54001a 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -1,59 +1,56 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derp +package derp_test import ( "bufio" "bytes" - "cmp" "context" - "crypto/x509" - "encoding/asn1" "encoding/json" "errors" "expvar" "fmt" "io" - "log" "net" - "os" - "reflect" - "strconv" "strings" "sync" "testing" "time" - qt "github.com/frankban/quicktest" - "go4.org/mem" - "golang.org/x/time/rate" - "tailscale.com/derp/derpconst" + "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" + "tailscale.com/metrics" "tailscale.com/net/memnet" - "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/must" ) +type ( + ClientInfo = derp.ClientInfo + Conn = derp.Conn + Client = derp.Client +) + func TestClientInfoUnmarshal(t *testing.T) { for i, in := range map[string]struct { json string - want *clientInfo + want *ClientInfo wantErr string }{ "empty": { json: `{}`, - want: &clientInfo{}, + want: &ClientInfo{}, }, "valid": { json: `{"Version":5,"MeshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "validLowerMeshKey": { json: `{"version":5,"meshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "invalidMeshKeyToShort": { json: `{"version":5,"meshKey":"abcdefg"}`, @@ -66,7 +63,7 @@ func TestClientInfoUnmarshal(t *testing.T) { } { t.Run(i, func(t *testing.T) { t.Parallel() - var got clientInfo + var got ClientInfo err := json.Unmarshal([]byte(in.json), &got) if in.wantErr != "" { if err == nil || !strings.Contains(err.Error(), in.wantErr) { @@ -86,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) { func TestSendRecv(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() const numClients = 3 @@ -132,7 +129,7 @@ func TestSendRecv(t *testing.T) { key := clientPrivateKeys[i] brw := bufio.NewReadWriter(bufio.NewReader(cout), bufio.NewWriter(cout)) - c, err := NewClient(key, cout, brw, t.Logf) + c, err := derp.NewClient(key, cout, brw, t.Logf) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -159,16 +156,16 @@ func TestSendRecv(t *testing.T) { default: t.Errorf("unexpected message type %T", m) continue - case PeerGoneMessage: + case derp.PeerGoneMessage: switch m.Reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: peerGoneCountDisconnected.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: peerGoneCountNotHere.Add(1) default: t.Errorf("unexpected PeerGone reason %v", m.Reason) } - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { t.Errorf("zero Source address in ReceivedPacket") } @@ -198,12 +195,15 @@ func TestSendRecv(t *testing.T) { } } + serverMetrics := s.ExpVar().(*metrics.Set) + wantActive := func(total, home int64) { t.Helper() dl := time.Now().Add(5 * time.Second) var gotTotal, gotHome int64 for time.Now().Before(dl) { - gotTotal, gotHome = s.curClients.Value(), s.curHomeClients.Value() + gotTotal = serverMetrics.Get("gauge_current_connections").(*expvar.Int).Value() + gotHome = serverMetrics.Get("gauge_current_home_connections").(*expvar.Int).Value() if gotTotal == total && gotHome == home { return } @@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) { func TestSendFreeze(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() s.WriteTimeout = 100 * time.Millisecond @@ -323,7 +323,7 @@ func TestSendFreeze(t *testing.T) { go s.Accept(ctx, c1, bufio.NewReadWriter(bufio.NewReader(c1), bufio.NewWriter(c1)), name) brw := bufio.NewReadWriter(bufio.NewReader(c2), bufio.NewWriter(c2)) - c, err := NewClient(k, c2, brw, t.Logf) + c, err := derp.NewClient(k, c2, brw, t.Logf) if err != nil { t.Fatal(err) } @@ -374,7 +374,7 @@ func TestSendFreeze(t *testing.T) { default: errCh <- fmt.Errorf("%s: unexpected message type %T", name, m) return - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { errCh <- fmt.Errorf("%s: zero Source address in ReceivedPacket", name) return @@ -504,7 +504,7 @@ func TestSendFreeze(t *testing.T) { } type testServer struct { - s *Server + s *derpserver.Server ln net.Listener logf logger.Logf @@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") - s := NewServer(key.NewNode(), logf) + s := derpserver.NewServer(key.NewNode(), logf) s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -614,7 +614,7 @@ func newTestClient(t *testing.T, ts *testServer, name string, newClient func(net func newRegularClient(t *testing.T, ts *testServer, name string) *testClient { return newTestClient(t, ts, name, func(nc net.Conn, priv key.NodePrivate, logf logger.Logf) (*Client, error) { brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf) + c, err := derp.NewClient(priv, nc, brw, logf) if err != nil { return nil, err } @@ -631,7 +631,7 @@ func newTestWatcher(t *testing.T, ts *testServer, name string) *testClient { return nil, err } brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf, MeshKey(mk)) + c, err := derp.NewClient(priv, nc, brw, logf, derp.MeshKey(mk)) if err != nil { return nil, err } @@ -651,12 +651,12 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerPresentMessage: + case derp.PeerPresentMessage: got := m.Key if !want[got] { t.Fatalf("got peer present for %v; want present for %v", tc.ts.keyName(got), logger.ArgWriter(func(bw *bufio.Writer) { @@ -667,7 +667,7 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } t.Logf("got present with IP %v, flags=%v", m.IPPort, m.Flags) switch m.Flags { - case PeerPresentIsMeshPeer, PeerPresentIsRegular: + case derp.PeerPresentIsMeshPeer, derp.PeerPresentIsRegular: // Okay default: t.Errorf("unexpected PeerPresentIsMeshPeer flags %v", m.Flags) @@ -684,19 +684,19 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { func (tc *testClient) wantGone(t *testing.T, peer key.NodePublic) { t.Helper() - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerGoneMessage: + case derp.PeerGoneMessage: got := key.NodePublic(m.Peer) if peer != got { t.Errorf("got gone message for %v; want gone for %v", tc.ts.keyName(got), tc.ts.keyName(peer)) } reason := m.Reason - if reason != PeerGoneReasonDisconnected { - t.Errorf("got gone message for reason %v; wanted %v", reason, PeerGoneReasonDisconnected) + if reason != derp.PeerGoneReasonDisconnected { + t.Errorf("got gone message for reason %v; wanted %v", reason, derp.PeerGoneReasonDisconnected) } default: t.Fatalf("unexpected message type %T", m) @@ -754,863 +754,15 @@ func TestWatch(t *testing.T) { w3.wantGone(t, c1.pub) } -type testFwd int - -func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { - panic("not called in tests") -} -func (testFwd) String() string { - panic("not called in tests") -} - -func pubAll(b byte) (ret key.NodePublic) { - var bs [32]byte - for i := range bs { - bs[i] = b - } - return key.NodePublicFromRaw32(mem.B(bs[:])) -} - -func TestForwarderRegistration(t *testing.T) { - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - want := func(want map[key.NodePublic]PacketForwarder) { - t.Helper() - if got := s.clientsMesh; !reflect.DeepEqual(got, want) { - t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) - } - } - wantCounter := func(c *expvar.Int, want int) { - t.Helper() - if got := c.Value(); got != int64(want) { - t.Errorf("counter = %v; want %v", got, want) - } - } - singleClient := func(c *sclient) *clientSet { - cs := &clientSet{} - cs.activeClient.Store(c) - return cs - } - - u1 := pubAll(1) - u2 := pubAll(2) - u3 := pubAll(3) - - s.AddPacketForwarder(u1, testFwd(1)) - s.AddPacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered forwarder is no-op. - s.RemovePacketForwarder(u2, testFwd(999)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered user is no-op. - s.RemovePacketForwarder(u3, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Actual removal. - s.RemovePacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - }) - - // Adding a dup for a user. - wantCounter(&s.multiForwarderCreated, 0) - s.AddPacketForwarder(u1, testFwd(100)) - s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - wantCounter(&s.multiForwarderCreated, 1) - - // Removing a forwarder in a multi set that doesn't exist; does nothing. - s.RemovePacketForwarder(u1, testFwd(55)) - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - - // Removing a forwarder in a multi set that does exist should collapse it away - // from being a multiForwarder. - wantCounter(&s.multiForwarderDeleted, 0) - s.RemovePacketForwarder(u1, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(100), - }) - wantCounter(&s.multiForwarderDeleted, 1) - - // Removing an entry for a client that's still connected locally should result - // in a nil forwarder. - u1c := &sclient{ - key: u1, - logf: logger.Discard, - } - s.clients[u1] = singleClient(u1c) - s.RemovePacketForwarder(u1, testFwd(100)) - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - - // But once that client disconnects, it should go away. - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{}) - - // But if it already has a forwarder, it's not removed. - s.AddPacketForwarder(u1, testFwd(2)) - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(2), - }) - - // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard - // that they're also connected to a peer of ours. That shouldn't transition the forwarder - // from nil to the new one, not a multiForwarder. - s.clients[u1] = singleClient(u1c) - s.clientsMesh[u1] = nil - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - s.AddPacketForwarder(u1, testFwd(3)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(3), - }) -} - -type channelFwd struct { - // id is to ensure that different instances that reference the - // same channel are not equal, as they are used as keys in the - // multiForwarder map. - id int - c chan []byte -} - -func (f channelFwd) String() string { return "" } -func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { - f.c <- packet - return nil -} - -func TestMultiForwarder(t *testing.T) { - received := 0 - var wg sync.WaitGroup - ch := make(chan []byte) - ctx, cancel := context.WithCancel(context.Background()) - - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - u := pubAll(1) - s.AddPacketForwarder(u, channelFwd{1, ch}) - - wg.Add(2) - go func() { - defer wg.Done() - for { - select { - case <-ch: - received += 1 - case <-ctx.Done(): - return - } - } - }() - go func() { - defer wg.Done() - for { - s.AddPacketForwarder(u, channelFwd{2, ch}) - s.AddPacketForwarder(u, channelFwd{3, ch}) - s.RemovePacketForwarder(u, channelFwd{2, ch}) - s.RemovePacketForwarder(u, channelFwd{1, ch}) - s.AddPacketForwarder(u, channelFwd{1, ch}) - s.RemovePacketForwarder(u, channelFwd{3, ch}) - if ctx.Err() != nil { - return - } - } - }() - - // Number of messages is chosen arbitrarily, just for this loop to - // run long enough concurrently with {Add,Remove}PacketForwarder loop above. - numMsgs := 5000 - var fwd PacketForwarder - for i := range numMsgs { - s.mu.Lock() - fwd = s.clientsMesh[u] - s.mu.Unlock() - fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) - } - - cancel() - wg.Wait() - if received != numMsgs { - t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) - } -} -func TestMetaCert(t *testing.T) { - priv := key.NewNode() - pub := priv.Public() - s := NewServer(priv, t.Logf) - - certBytes := s.MetaCert() - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - log.Fatal(err) - } - if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(ProtocolVersion) { - t.Errorf("serial = %v; want %v", cert.SerialNumber, ProtocolVersion) - } - if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { - t.Errorf("CommonName = %q; want %q", g, w) - } - if n := len(cert.Extensions); n != 1 { - t.Fatalf("got %d extensions; want 1", n) - } - - // oidExtensionBasicConstraints is the Basic Constraints ID copied - // from the x509 package. - oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} - - if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { - t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) - } -} - -type dummyNetConn struct { - net.Conn -} - -func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } - -func TestClientRecv(t *testing.T) { - tests := []struct { - name string - input []byte - want any - }{ - { - name: "ping", - input: []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "pong", - input: []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "health_bad", - input: []byte{ - byte(frameHealth), 0, 0, 0, 3, - byte('B'), byte('A'), byte('D'), - }, - want: HealthMessage{Problem: "BAD"}, - }, - { - name: "health_ok", - input: []byte{ - byte(frameHealth), 0, 0, 0, 0, - }, - want: HealthMessage{}, - }, - { - name: "server_restarting", - input: []byte{ - byte(frameRestarting), 0, 0, 0, 8, - 0, 0, 0, 1, - 0, 0, 0, 2, - }, - want: ServerRestartingMessage{ - ReconnectIn: 1 * time.Millisecond, - TryFor: 2 * time.Millisecond, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - nc: dummyNetConn{}, - br: bufio.NewReader(bytes.NewReader(tt.input)), - logf: t.Logf, - clock: &tstest.Clock{}, - } - got, err := c.Recv() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("got %#v; want %#v", got, tt.want) - } - }) - } -} - -func TestClientSendPing(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestClientSendPong(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestServerDupClients(t *testing.T) { - serverPriv := key.NewNode() - var s *Server - - clientPriv := key.NewNode() - clientPub := clientPriv.Public() - - var c1, c2, c3 *sclient - var clientName map[*sclient]string - - // run starts a new test case and resets clients back to their zero values. - run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { - s = NewServer(serverPriv, t.Logf) - s.dupPolicy = dupPolicy - c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} - c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} - c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} - clientName = map[*sclient]string{ - c1: "c1", - c2: "c2", - c3: "c3", - } - t.Run(name, f) - } - runBothWays := func(name string, f func(t *testing.T)) { - run(name+"_disablefighters", disableFighters, f) - run(name+"_lastwriteractive", lastWriterIsActive, f) - } - wantSingleClient := func(t *testing.T, want *sclient) { - t.Helper() - got, ok := s.clients[want.key] - if !ok { - t.Error("no clients for key") - return - } - if got.dup != nil { - t.Errorf("unexpected dup set for single client") - } - cur := got.activeClient.Load() - if cur != want { - t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) - } - if cur != nil { - if cur.isDup.Load() { - t.Errorf("unexpected isDup on singleClient") - } - if cur.isDisabled.Load() { - t.Errorf("unexpected isDisabled on singleClient") - } - } - } - wantNoClient := func(t *testing.T) { - t.Helper() - _, ok := s.clients[clientPub] - if !ok { - // Good - return - } - t.Errorf("got client; want empty") - } - wantDupSet := func(t *testing.T) *dupClientSet { - t.Helper() - cs, ok := s.clients[clientPub] - if !ok { - t.Fatal("no set for key; want dup set") - return nil - } - if cs.dup != nil { - return cs.dup - } - t.Fatalf("no dup set for key; want dup set") - return nil - } - wantActive := func(t *testing.T, want *sclient) { - t.Helper() - set, ok := s.clients[clientPub] - if !ok { - t.Error("no set for key") - return - } - got := set.activeClient.Load() - if got != want { - t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) - } - } - checkDup := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDup.Load(); got != want { - t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) - } - } - checkDisabled := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDisabled.Load(); got != want { - t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) - } - } - wantDupConns := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientConns.Value(); got != int64(want) { - t.Errorf("dupClientConns = %v; want %v", got, want) - } - } - wantDupKeys := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientKeys.Value(); got != int64(want) { - t.Errorf("dupClientKeys = %v; want %v", got, want) - } - } - - // Common case: a single client comes and goes, with no dups. - runBothWays("one_comes_and_goes", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - s.unregisterClient(c1) - wantNoClient(t) - }) - - // A still somewhat common case: a single client was - // connected and then their wifi dies or laptop closes - // or they switch networks and connect from a - // different network. They have two connections but - // it's not very bad. Only their new one is - // active. The last one, being dead, doesn't send and - // thus the new one doesn't get disabled. - runBothWays("small_overlap_replacement", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - wantDupKeys(t, 0) - wantDupKeys(t, 0) - - s.registerClient(c2) // wifi dies; c2 replacement connects - wantDupSet(t) - wantDupConns(t, 2) - wantDupKeys(t, 1) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) // sends go to the replacement - - s.unregisterClient(c1) // c1 finally times out - wantSingleClient(t, c2) - checkDup(t, c2, false) // c2 is longer a dup - wantActive(t, c2) - wantDupConns(t, 0) - wantDupKeys(t, 0) - }) - - // Key cloning situation with concurrent clients, both trying - // to write. - run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - s.registerClient(c2) - wantDupSet(t) - wantDupKeys(t, 1) - wantDupConns(t, 2) - wantActive(t, c2) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - wantActive(t, nil) - - s.registerClient(c3) - wantActive(t, c3) - checkDisabled(t, c3, false) - wantDupKeys(t, 1) - wantDupConns(t, 3) - - s.unregisterClient(c3) - wantActive(t, nil) - wantDupKeys(t, 1) - wantDupConns(t, 2) - - s.unregisterClient(c2) - wantSingleClient(t, c1) - wantDupKeys(t, 0) - wantDupConns(t, 0) - }) - - // Key cloning with an A->B->C->A series instead. - run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - s.registerClient(c2) - s.registerClient(c3) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - checkDisabled(t, c3, true) - wantActive(t, nil) - }) - - run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { - wantNoClient(t) - - // Last registered client is the active one... - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - s.registerClient(c3) - s.noteClientActivity(c2) - wantActive(t, c3) - - // But if the last one goes away, the one with the - // most recent activity wins. - s.unregisterClient(c3) - wantActive(t, c2) - }) - - run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { - wantNoClient(t) - - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - - s.noteClientActivity(c1) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c1) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) - - s.unregisterClient(c2) - checkDisabled(t, c1, false) - wantActive(t, c1) - }) -} - -func TestLimiter(t *testing.T) { - rl := rate.NewLimiter(rate.Every(time.Minute), 100) - for i := range 200 { - r := rl.Reserve() - d := r.Delay() - t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) - } -} - -// BenchmarkConcurrentStreams exercises mutex contention on a -// single Server instance with multiple concurrent client flows. -func BenchmarkConcurrentStreams(b *testing.B) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go func() { - for ctx.Err() == nil { - connIn, err := ln.Accept() - if err != nil { - if ctx.Err() != nil { - return - } - b.Error(err) - return - } - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - go s.Accept(ctx, connIn, brwServer, "test-client") - } - }() - - newClient := func(t testing.TB) *Client { - t.Helper() - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - t.Cleanup(func() { connOut.Close() }) - - k := key.NewNode() - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - return client - } - - b.RunParallel(func(pb *testing.PB) { - c1, c2 := newClient(b), newClient(b) - const packetSize = 100 - msg := make([]byte, packetSize) - for pb.Next() { - if err := c1.Send(c2.PublicKey(), msg); err != nil { - b.Fatal(err) - } - _, err := c2.Recv() - if err != nil { - return - } - } - }) -} - -func BenchmarkSendRecv(b *testing.B) { - for _, size := range []int{10, 100, 1000, 10000} { - b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) - } -} - -func benchmarkSendRecvSize(b *testing.B, packetSize int) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - k := key.NewNode() - clientKey := k.Public() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - defer connOut.Close() - - connIn, err := ln.Accept() - if err != nil { - b.Fatal(err) - } - defer connIn.Close() - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go s.Accept(ctx, connIn, brwServer, "test-client") - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - - go func() { - for { - _, err := client.Recv() - if err != nil { - return - } - } - }() - - msg := make([]byte, packetSize) - b.SetBytes(int64(len(msg))) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - if err := client.Send(clientKey, msg); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkWriteUint32(b *testing.B) { - w := bufio.NewWriter(io.Discard) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - writeUint32(w, 0x0ba3a) - } -} - -type nopRead struct{} - -func (r nopRead) Read(p []byte) (int, error) { - return len(p), nil -} - -var sinkU32 uint32 - -func BenchmarkReadUint32(b *testing.B) { - r := bufio.NewReader(nopRead{}) - var err error - b.ReportAllocs() - b.ResetTimer() - for range b.N { - sinkU32, err = readUint32(r) - if err != nil { - b.Fatal(err) - } - } -} - func waitConnect(t testing.TB, c *Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) - } else if v, ok := m.(ServerInfoMessage); !ok { + } else if v, ok := m.(derp.ServerInfoMessage); !ok { t.Fatalf("client first Recv was unexpected type %T", v) } } -func TestParseSSOutput(t *testing.T) { - contents, err := os.ReadFile("testdata/example_ss.txt") - if err != nil { - t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) - } - seen := parseSSOutput(string(contents)) - if len(seen) == 0 { - t.Errorf("parseSSOutput expected non-empty map") - } -} - -type countWriter struct { - mu sync.Mutex - writes int - bytes int64 -} - -func (w *countWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - w.writes++ - w.bytes += int64(len(p)) - return len(p), nil -} - -func (w *countWriter) Stats() (writes int, bytes int64) { - w.mu.Lock() - defer w.mu.Unlock() - return w.writes, w.bytes -} - -func (w *countWriter) ResetStats() { - w.mu.Lock() - defer w.mu.Unlock() - w.writes, w.bytes = 0, 0 -} - -func TestClientSendRateLimiting(t *testing.T) { - cw := new(countWriter) - c := &Client{ - bw: bufio.NewWriter(cw), - clock: &tstest.Clock{}, - } - c.setSendRateLimiter(ServerInfoMessage{}) - - pkt := make([]byte, 1000) - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - writes1, bytes1 := cw.Stats() - if writes1 != 1 { - t.Errorf("writes = %v, want 1", writes1) - } - - // Flood should all succeed. - cw.ResetStats() - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writes1K, bytes1K := cw.Stats() - if writes1K != 1000 { - t.Logf("writes = %v; want 1000", writes1K) - } - if got, want := bytes1K, bytes1*1000; got != want { - t.Logf("bytes = %v; want %v", got, want) - } - - // Set a rate limiter - cw.ResetStats() - c.setSendRateLimiter(ServerInfoMessage{ - TokenBucketBytesPerSecond: 1, - TokenBucketBytesBurst: int(bytes1 * 2), - }) - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writesLimited, bytesLimited := cw.Stats() - if writesLimited == 0 || writesLimited == writes1K { - t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) - } - if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { - t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) - } -} - func TestServerRepliesToPing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1627,12 +779,12 @@ func TestServerRepliesToPing(t *testing.T) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PongMessage: + case derp.PongMessage: if ([8]byte(m)) != data { t.Fatalf("got pong %2x; want %2x", [8]byte(m), data) } @@ -1640,122 +792,3 @@ func TestServerRepliesToPing(t *testing.T) { } } } - -func TestGetPerClientSendQueueDepth(t *testing.T) { - c := qt.New(t) - envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" - - testCases := []struct { - envVal string - want int - }{ - // Empty case, envknob treats empty as missing also. - { - "", defaultPerClientSendQueueDepth, - }, - { - "64", 64, - }, - } - - for _, tc := range testCases { - t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { - t.Setenv(envKey, tc.envVal) - val := getPerClientSendQueueDepth() - c.Assert(val, qt.Equals, tc.want) - }) - } -} - -func TestSetMeshKey(t *testing.T) { - for name, tt := range map[string]struct { - key string - want key.DERPMesh - wantErr bool - }{ - "clobber": { - key: testMeshKey, - wantErr: false, - }, - "invalid": { - key: "badf00d", - wantErr: true, - }, - } { - t.Run(name, func(t *testing.T) { - s := &Server{} - - err := s.SetMeshKey(tt.key) - if tt.wantErr { - if err == nil { - t.Fatalf("expected err") - } - return - } - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - - want, err := key.ParseDERPMesh(tt.key) - if err != nil { - t.Fatal(err) - } - if !s.meshKey.Equal(want) { - t.Fatalf("got %v, want %v", s.meshKey, want) - } - }) - } -} - -func TestIsMeshPeer(t *testing.T) { - s := &Server{} - err := s.SetMeshKey(testMeshKey) - if err != nil { - t.Fatal(err) - } - for name, tt := range map[string]struct { - want bool - meshKey string - wantAllocs float64 - }{ - "nil": { - want: false, - wantAllocs: 0, - }, - "mismatch": { - meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", - want: false, - wantAllocs: 1, - }, - "match": { - meshKey: testMeshKey, - want: true, - wantAllocs: 0, - }, - } { - t.Run(name, func(t *testing.T) { - var got bool - var mKey key.DERPMesh - if tt.meshKey != "" { - mKey, err = key.ParseDERPMesh(tt.meshKey) - if err != nil { - t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) - } - } - - info := clientInfo{ - MeshKey: mKey, - } - allocs := testing.AllocsPerRun(1, func() { - got = s.isMeshPeer(&info) - }) - if got != tt.want { - t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) - } - - if allocs != tt.wantAllocs && tt.want { - t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) - } - }) - } -} diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 704b8175d07c6..57f008a1ae3fe 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -522,7 +522,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien // just to get routed into the server's HTTP Handler so it // can Hijack the request, but we signal with a special header // that we don't want to deal with its HTTP response. - req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response + req.Header.Set(derp.FastStartHeader, "1") // suppresses the server's HTTP response if err := req.Write(brw); err != nil { return nil, 0, err } diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 6e8e0bd21c9e9..dd7cbcd247cb7 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derphttp_test import ( "bytes" @@ -21,9 +21,12 @@ import ( "time" "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/key" ) @@ -41,12 +44,12 @@ func TestSendRecv(t *testing.T) { clientKeys = append(clientKeys, priv.Public()) } - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -65,7 +68,7 @@ func TestSendRecv(t *testing.T) { } }() - var clients []*Client + var clients []*derphttp.Client var recvChs []chan []byte done := make(chan struct{}) var wg sync.WaitGroup @@ -78,7 +81,7 @@ func TestSendRecv(t *testing.T) { }() for i := range numClients { key := clientPrivateKeys[i] - c, err := NewClient(key, serverURL, t.Logf, netMon) + c, err := derphttp.NewClient(key, serverURL, t.Logf, netMon) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -158,7 +161,7 @@ func TestSendRecv(t *testing.T) { recvNothing(1) } -func waitConnect(t testing.TB, c *Client) { +func waitConnect(t testing.TB, c *derphttp.Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) @@ -169,12 +172,12 @@ func waitConnect(t testing.TB, c *Client) { func TestPing(t *testing.T) { serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -193,7 +196,7 @@ func TestPing(t *testing.T) { } }() - c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) + c, err := derphttp.NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatalf("NewClient: %v", err) } @@ -221,11 +224,11 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) { - s = derp.NewServer(k, t.Logf) +func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) { + s = derpserver.NewServer(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -247,8 +250,8 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S return } -func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) { - c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) +func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *derphttp.Client) { + c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatal(err) } @@ -260,30 +263,16 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW return } -// breakConnection breaks the connection, which should trigger a reconnect. -func (c *Client) breakConnection(brokenClient *derp.Client) { - c.mu.Lock() - defer c.mu.Unlock() - if c.client != brokenClient { - return - } - if c.netConn != nil { - c.netConn.Close() - c.netConn = nil - } - c.client = nil -} - // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is waiting on recv(). func TestBreakWatcherConnRecv(t *testing.T) { + // TODO(bradfitz): use synctest + memnet instead + // Set the wait time before a retry after connection failure to be much lower. // This needs to be early in the test, for defer to run right at the end after // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) var wg sync.WaitGroup // Make the watcher server @@ -301,11 +290,11 @@ func TestBreakWatcherConnRecv(t *testing.T) { defer watcher.Close() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() watcherChan := make(chan int, 1) defer close(watcherChan) errChan := make(chan error, 1) - defer close(errChan) // Start the watcher thread (which connects to the watched server) wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 @@ -320,7 +309,10 @@ func TestBreakWatcherConnRecv(t *testing.T) { } remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } notifyErr := func(err error) { - errChan <- err + select { + case errChan <- err: + case <-ctx.Done(): + } } watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) @@ -345,7 +337,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { t.Fatalf("watcher did not process the peer update") } timer.Reset(5 * time.Second) - watcher.breakConnection(watcher.client) + watcher.BreakConnection(watcher) // re-establish connection by sending a packet watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } @@ -357,12 +349,12 @@ func TestBreakWatcherConnRecv(t *testing.T) { // updates after a different thread breaks and reconnects the connection, while // the watcher is not waiting on recv(). func TestBreakWatcherConn(t *testing.T) { + // TODO(bradfitz): use synctest + memnet instead + // Set the wait time before a retry after connection failure to be much lower. // This needs to be early in the test, for defer to run right at the end after // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) var wg sync.WaitGroup // Make the watcher server @@ -428,7 +420,7 @@ func TestBreakWatcherConn(t *testing.T) { case <-timer.C: t.Fatalf("watcher did not process the peer update") } - watcher1.breakConnection(watcher1.client) + watcher1.BreakConnection(watcher1) // re-establish connection by sending a packet watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) // signal that the breaker is done @@ -446,7 +438,7 @@ func noopRemove(derp.PeerGoneMessage) {} func noopNotifyError(error) {} func TestRunWatchConnectionLoopServeConnect(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -461,7 +453,7 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { defer watcher.Close() // Test connecting to ourselves, and that we get hung up on. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -470,12 +462,12 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted self-connect; wasn't") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError) // Test connecting to the server with a zero value for ignoreServerKey, // so we should always connect. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -484,16 +476,14 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError) } // verify that the LocalAddr method doesn't acquire the mutex. // See https://github.com/tailscale/tailscale/issues/11519 func TestLocalAddrNoMutex(t *testing.T) { - var c Client - c.mu.Lock() - defer c.mu.Unlock() // not needed in test but for symmetry + var c derphttp.Client _, err := c.LocalAddr() if got, want := fmt.Sprint(err), "client not connected"; got != want { @@ -502,7 +492,7 @@ func TestLocalAddrNoMutex(t *testing.T) { } func TestProbe(t *testing.T) { - h := Handler(nil) + h := derpserver.Handler(nil) tests := []struct { path string @@ -523,7 +513,7 @@ func TestProbe(t *testing.T) { } func TestNotifyError(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() @@ -541,7 +531,7 @@ func TestNotifyError(t *testing.T) { })) defer watcher.Close() - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err == nil { t.Fatal("expected error connecting to server, got nil") @@ -550,7 +540,7 @@ func TestNotifyError(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) errChan := make(chan error, 1) notifyError := func(err error) { @@ -587,7 +577,7 @@ func TestManualDial(t *testing.T) { region := slices.Sorted(maps.Keys(dm.Regions))[0] netMon := netmon.NewStatic() - rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + rc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { return dm.Regions[region] }) defer rc.Close() @@ -625,7 +615,7 @@ func TestURLDial(t *testing.T) { } } netMon := netmon.NewStatic() - c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) defer c.Close() if err := c.Connect(context.Background()); err != nil { diff --git a/derp/derphttp/export_test.go b/derp/derphttp/export_test.go new file mode 100644 index 0000000000000..59d8324dcba3e --- /dev/null +++ b/derp/derphttp/export_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derphttp + +func SetTestHookWatchLookConnectResult(f func(connectError error, wasSelfConnect bool) (keepRunning bool)) { + testHookWatchLookConnectResult = f +} + +// breakConnection breaks the connection, which should trigger a reconnect. +func (c *Client) BreakConnection(brokenClient *Client) { + c.mu.Lock() + defer c.mu.Unlock() + if c.client != brokenClient.client { + return + } + if c.netConn != nil { + c.netConn.Close() + c.netConn = nil + } + c.client = nil +} + +var RetryInterval = &retryInterval diff --git a/derp/derp_server.go b/derp/derpserver/derp_server.go similarity index 94% rename from derp/derp_server.go rename to derp/derpserver/derp_server.go index f0c635a5aef50..917ef147c5112 100644 --- a/derp/derp_server.go +++ b/derp/derpserver/derp_server.go @@ -1,7 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derp +// Package derpserver implements a DERP server. +package derpserver // TODO(crawshaw): with predefined serverKey in clients and HMAC on packets we could skip TLS @@ -38,6 +39,7 @@ import ( "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" + "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" @@ -55,19 +57,15 @@ import ( "tailscale.com/version" ) +type Conn = derp.Conn + // verboseDropKeys is the set of destination public keys that should // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} -// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests -// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. -// The HTTP header value is the name of the node they wish they were connected -// to. This is an optional header. -const IdealNodeHeader = "Ideal-Node" - // IdealNodeContextKey is the context key used to pass the IdealNodeHeader value // from the HTTP handler to the DERP server's Accept method. -var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "") +var IdealNodeContextKey = ctxkey.New("ideal-node", "") func init() { keys := envknob.String("TS_DEBUG_VERBOSE_DROPS") @@ -620,7 +618,7 @@ func (s *Server) initMetacert() { log.Fatal(err) } tmpl := &x509.Certificate{ - SerialNumber: big.NewInt(ProtocolVersion), + SerialNumber: big.NewInt(derp.ProtocolVersion), Subject: pkix.Name{ CommonName: derpconst.MetaCertCommonNamePrefix + s.publicKey.UntypedHexString(), }, @@ -724,7 +722,7 @@ func (s *Server) registerClient(c *sclient) { // presence changed. // // s.mu must be held. -func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags, present bool) { +func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags, present bool) { for w := range s.watchers { w.peerStateChange = append(w.peerStateChange, peerConnState{ peer: peer, @@ -868,7 +866,7 @@ func (s *Server) notePeerGoneFromRegionLocked(key key.NodePublic) { // requestPeerGoneWriteLimited sends a request to write a "peer gone" // frame, but only in reply to a disco packet, and only if we haven't // sent one recently. -func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason derp.PeerGoneReasonType) { if disco.LooksLikeDiscoWrapper(contents) != true { return } @@ -1010,7 +1008,7 @@ func (c *sclient) run(ctx context.Context) error { c.startStatsLoop(sendCtx) for { - ft, fl, err := readFrameHeader(c.br) + ft, fl, err := derp.ReadFrameHeader(c.br) c.debugLogf("read frame type %d len %d err %v", ft, fl, err) if err != nil { if errors.Is(err, io.EOF) { @@ -1025,17 +1023,17 @@ func (c *sclient) run(ctx context.Context) error { } c.s.noteClientActivity(c) switch ft { - case frameNotePreferred: + case derp.FrameNotePreferred: err = c.handleFrameNotePreferred(ft, fl) - case frameSendPacket: + case derp.FrameSendPacket: err = c.handleFrameSendPacket(ft, fl) - case frameForwardPacket: + case derp.FrameForwardPacket: err = c.handleFrameForwardPacket(ft, fl) - case frameWatchConns: + case derp.FrameWatchConns: err = c.handleFrameWatchConns(ft, fl) - case frameClosePeer: + case derp.FrameClosePeer: err = c.handleFrameClosePeer(ft, fl) - case framePing: + case derp.FramePing: err = c.handleFramePing(ft, fl) default: err = c.handleUnknownFrame(ft, fl) @@ -1046,12 +1044,12 @@ func (c *sclient) run(ctx context.Context) error { } } -func (c *sclient) handleUnknownFrame(ft frameType, fl uint32) error { +func (c *sclient) handleUnknownFrame(ft derp.FrameType, fl uint32) error { _, err := io.CopyN(io.Discard, c.br, int64(fl)) return err } -func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { +func (c *sclient) handleFrameNotePreferred(ft derp.FrameType, fl uint32) error { if fl != 1 { return fmt.Errorf("frameNotePreferred wrong size") } @@ -1063,7 +1061,7 @@ func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { +func (c *sclient) handleFrameWatchConns(ft derp.FrameType, fl uint32) error { if fl != 0 { return fmt.Errorf("handleFrameWatchConns wrong size") } @@ -1074,9 +1072,9 @@ func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFramePing(ft frameType, fl uint32) error { +func (c *sclient) handleFramePing(ft derp.FrameType, fl uint32) error { c.s.gotPing.Add(1) - var m PingMessage + var m derp.PingMessage if fl < uint32(len(m)) { return fmt.Errorf("short ping: %v", fl) } @@ -1101,8 +1099,8 @@ func (c *sclient) handleFramePing(ft frameType, fl uint32) error { return err } -func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { - if fl != keyLen { +func (c *sclient) handleFrameClosePeer(ft derp.FrameType, fl uint32) error { + if fl != derp.KeyLen { return fmt.Errorf("handleFrameClosePeer wrong size") } if !c.canMesh { @@ -1135,7 +1133,7 @@ func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { // handleFrameForwardPacket reads a "forward packet" frame from the client // (which must be a trusted client, a peer in our mesh). -func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameForwardPacket(ft derp.FrameType, fl uint32) error { if !c.canMesh { return fmt.Errorf("insufficient permissions") } @@ -1162,7 +1160,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, srcKey, dstKey, reason) return nil @@ -1178,7 +1176,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { } // handleFrameSendPacket reads a "send packet" frame from the client. -func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameSendPacket(ft derp.FrameType, fl uint32) error { s := c.s dstKey, contents, err := s.recvPacket(c.br, fl) @@ -1215,7 +1213,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, c.key, dstKey, reason) c.debugLogf("SendPacket for %s, dropping with reason=%s", dstKey.ShortString(), reason) @@ -1325,13 +1323,13 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { // notified (in a new goroutine) whenever a peer has disconnected from all DERP // nodes in the current region. func (c *sclient) onPeerGoneFromRegion(peer key.NodePublic) { - c.requestPeerGoneWrite(peer, PeerGoneReasonDisconnected) + c.requestPeerGoneWrite(peer, derp.PeerGoneReasonDisconnected) } // requestPeerGoneWrite sends a request to write a "peer gone" frame // with an explanation of why it is gone. It blocks until either the // write request is scheduled, or the client has closed. -func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason derp.PeerGoneReasonType) { select { case c.peerGone <- peerGoneMsg{ peer: peer, @@ -1358,7 +1356,7 @@ func (c *sclient) requestMeshUpdate() { // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. -func (s *Server) isMeshPeer(info *clientInfo) bool { +func (s *Server) isMeshPeer(info *derp.ClientInfo) bool { // Compare mesh keys in constant time to prevent timing attacks. // Since mesh keys are a fixed length, we don’t need to be concerned // about timing attacks on client mesh keys that are the wrong length. @@ -1372,7 +1370,7 @@ func (s *Server) isMeshPeer(info *clientInfo) bool { // verifyClient checks whether the client is allowed to connect to the derper, // depending on how & whether the server's been configured to verify. -func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *clientInfo, clientIP netip.Addr) error { +func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *derp.ClientInfo, clientIP netip.Addr) error { if s.isMeshPeer(info) { // Trusted mesh peer. No need to verify further. In fact, verifying // further wouldn't work: it's not part of the tailnet so tailscaled and @@ -1436,10 +1434,10 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf } func (s *Server) sendServerKey(lw *lazyBufioWriter) error { - buf := make([]byte, 0, len(magic)+key.NodePublicRawLen) - buf = append(buf, magic...) + buf := make([]byte, 0, len(derp.Magic)+key.NodePublicRawLen) + buf = append(buf, derp.Magic...) buf = s.publicKey.AppendTo(buf) - err := writeFrame(lw.bw(), frameServerKey, buf) + err := derp.WriteFrame(lw.bw(), derp.FrameServerKey, buf) lw.Flush() // redundant (no-op) flush to release bufio.Writer return err } @@ -1504,21 +1502,16 @@ func (s *Server) noteClientActivity(c *sclient) { dup.sendHistory = append(dup.sendHistory, c) } -type serverInfo struct { - Version int `json:"version,omitempty"` - - TokenBucketBytesPerSecond int `json:",omitempty"` - TokenBucketBytesBurst int `json:",omitempty"` -} +type ServerInfo = derp.ServerInfo func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) error { - msg, err := json.Marshal(serverInfo{Version: ProtocolVersion}) + msg, err := json.Marshal(ServerInfo{Version: derp.ProtocolVersion}) if err != nil { return err } msgbox := s.privateKey.SealTo(clientKey, msg) - if err := writeFrameHeader(bw.bw(), frameServerInfo, uint32(len(msgbox))); err != nil { + if err := derp.WriteFrameHeader(bw.bw(), derp.FrameServerInfo, uint32(len(msgbox))); err != nil { return err } if _, err := bw.Write(msgbox); err != nil { @@ -1530,12 +1523,12 @@ func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) e // recvClientKey reads the frameClientInfo frame from the client (its // proof of identity) upon its initial connection. It should be // considered especially untrusted at this point. -func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *clientInfo, err error) { - fl, err := readFrameTypeHeader(br, frameClientInfo) +func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *derp.ClientInfo, err error) { + fl, err := derp.ReadFrameTypeHeader(br, derp.FrameClientInfo) if err != nil { return zpub, nil, err } - const minLen = keyLen + nonceLen + const minLen = derp.KeyLen + derp.NonceLen if fl < minLen { return zpub, nil, errors.New("short client info") } @@ -1547,7 +1540,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if err := clientKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - msgLen := int(fl - keyLen) + msgLen := int(fl - derp.KeyLen) msgbox := make([]byte, msgLen) if _, err := io.ReadFull(br, msgbox); err != nil { return zpub, nil, fmt.Errorf("msgbox: %v", err) @@ -1556,7 +1549,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if !ok { return zpub, nil, fmt.Errorf("msgbox: cannot open len=%d with client key %s", msgLen, clientKey) } - info = new(clientInfo) + info = new(derp.ClientInfo) if err := json.Unmarshal(msg, info); err != nil { return zpub, nil, fmt.Errorf("msg: %v", err) } @@ -1564,15 +1557,15 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info } func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen { + if frameLen < derp.KeyLen { return zpub, nil, errors.New("short send packet frame") } if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - packetLen := frameLen - keyLen - if packetLen > MaxPacketSize { - return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen + if packetLen > derp.MaxPacketSize { + return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1592,7 +1585,7 @@ func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodeP var zpub key.NodePublic func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen*2 { + if frameLen < derp.KeyLen*2 { return zpub, zpub, nil, errors.New("short send packet frame") } if err := srcKey.ReadRawWithoutAllocating(br); err != nil { @@ -1601,9 +1594,9 @@ func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, d if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, zpub, nil, err } - packetLen := frameLen - keyLen*2 - if packetLen > MaxPacketSize { - return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen*2 + if packetLen > derp.MaxPacketSize { + return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1628,7 +1621,7 @@ type sclient struct { s *Server nc Conn key key.NodePublic - info clientInfo + info derp.ClientInfo logf logger.Logf done <-chan struct{} // closed when connection closes remoteIPPort netip.AddrPort // zero if remoteAddr is not ip:port. @@ -1666,19 +1659,19 @@ type sclient struct { peerGoneLim *rate.Limiter } -func (c *sclient) presentFlags() PeerPresentFlags { - var f PeerPresentFlags +func (c *sclient) presentFlags() derp.PeerPresentFlags { + var f derp.PeerPresentFlags if c.info.IsProber { - f |= PeerPresentIsProber + f |= derp.PeerPresentIsProber } if c.canMesh { - f |= PeerPresentIsMeshPeer + f |= derp.PeerPresentIsMeshPeer } if c.isNotIdealConn { - f |= PeerPresentNotIdeal + f |= derp.PeerPresentNotIdeal } if f == 0 { - return PeerPresentIsRegular + return derp.PeerPresentIsRegular } return f } @@ -1688,7 +1681,7 @@ func (c *sclient) presentFlags() PeerPresentFlags { type peerConnState struct { ipPort netip.AddrPort // if present, the peer's IP:port peer key.NodePublic - flags PeerPresentFlags + flags derp.PeerPresentFlags present bool } @@ -1709,7 +1702,7 @@ type pkt struct { // peerGoneMsg is a request to write a peerGone frame to an sclient type peerGoneMsg struct { peer key.NodePublic - reason PeerGoneReasonType + reason derp.PeerGoneReasonType } func (c *sclient) setPreferred(v bool) { @@ -1788,7 +1781,7 @@ func (c *sclient) sendLoop(ctx context.Context) error { defer c.onSendLoopDone() jitter := rand.N(5 * time.Second) - keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(KeepAlive + jitter) + keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(derp.KeepAlive + jitter) defer keepAliveTick.Stop() var werr error // last write error @@ -1887,14 +1880,14 @@ func (c *sclient) setWriteDeadline() { // sendKeepAlive sends a keep-alive frame, without flushing. func (c *sclient) sendKeepAlive() error { c.setWriteDeadline() - return writeFrameHeader(c.bw.bw(), frameKeepAlive, 0) + return derp.WriteFrameHeader(c.bw.bw(), derp.FrameKeepAlive, 0) } // sendPong sends a pong reply, without flushing. func (c *sclient) sendPong(data [8]byte) error { c.s.sentPong.Add(1) c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePong, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePong, uint32(len(data))); err != nil { return err } _, err := c.bw.Write(data[:]) @@ -1902,23 +1895,23 @@ func (c *sclient) sendPong(data [8]byte) error { } const ( - peerGoneFrameLen = keyLen + 1 - peerPresentFrameLen = keyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags + peerGoneFrameLen = derp.KeyLen + 1 + peerPresentFrameLen = derp.KeyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags ) // sendPeerGone sends a peerGone frame, without flushing. -func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) error { +func (c *sclient) sendPeerGone(peer key.NodePublic, reason derp.PeerGoneReasonType) error { switch reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: c.s.peerGoneDisconnectedFrames.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: c.s.peerGoneNotHereFrames.Add(1) } c.setWriteDeadline() data := make([]byte, 0, peerGoneFrameLen) data = peer.AppendTo(data) data = append(data, byte(reason)) - if err := writeFrameHeader(c.bw.bw(), framePeerGone, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerGone, uint32(len(data))); err != nil { return err } @@ -1927,17 +1920,17 @@ func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) e } // sendPeerPresent sends a peerPresent frame, without flushing. -func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags) error { +func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags) error { c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePeerPresent, peerPresentFrameLen); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerPresent, peerPresentFrameLen); err != nil { return err } payload := make([]byte, peerPresentFrameLen) _ = peer.AppendTo(payload[:0]) a16 := ipPort.Addr().As16() - copy(payload[keyLen:], a16[:]) - binary.BigEndian.PutUint16(payload[keyLen+16:], ipPort.Port()) - payload[keyLen+18] = byte(flags) + copy(payload[derp.KeyLen:], a16[:]) + binary.BigEndian.PutUint16(payload[derp.KeyLen+16:], ipPort.Port()) + payload[derp.KeyLen+18] = byte(flags) _, err := c.bw.Write(payload) return err } @@ -1975,7 +1968,7 @@ func (c *sclient) sendMeshUpdates() error { if pcs.present { err = c.sendPeerPresent(pcs.peer, pcs.ipPort, pcs.flags) } else { - err = c.sendPeerGone(pcs.peer, PeerGoneReasonDisconnected) + err = c.sendPeerGone(pcs.peer, derp.PeerGoneReasonDisconnected) } if err != nil { return err @@ -2010,7 +2003,7 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) pktLen += key.NodePublicRawLen c.noteSendFromSrc(srcKey) } - if err = writeFrameHeader(c.bw.bw(), frameRecvPacket, uint32(pktLen)); err != nil { + if err = derp.WriteFrameHeader(c.bw.bw(), derp.FrameRecvPacket, uint32(pktLen)); err != nil { return err } if withKey { @@ -2286,7 +2279,7 @@ func (s *Server) checkVerifyClientsLocalTailscaled() error { if err != nil { return fmt.Errorf("localClient.Status: %w", err) } - info := &clientInfo{ + info := &derp.ClientInfo{ IsProber: true, } clientIP := netip.IPv6Loopback() diff --git a/derp/derp_server_default.go b/derp/derpserver/derp_server_default.go similarity index 91% rename from derp/derp_server_default.go rename to derp/derpserver/derp_server_default.go index 014cfffd642c2..874e590d3c812 100644 --- a/derp/derp_server_default.go +++ b/derp/derpserver/derp_server_default.go @@ -3,7 +3,7 @@ //go:build !linux || android -package derp +package derpserver import "context" diff --git a/derp/derp_server_linux.go b/derp/derpserver/derp_server_linux.go similarity index 99% rename from derp/derp_server_linux.go rename to derp/derpserver/derp_server_linux.go index 5a40e114eecd2..768e6a2ab6ab7 100644 --- a/derp/derp_server_linux.go +++ b/derp/derpserver/derp_server_linux.go @@ -3,7 +3,7 @@ //go:build linux && !android -package derp +package derpserver import ( "context" diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go new file mode 100644 index 0000000000000..3f0ba2ec0cda3 --- /dev/null +++ b/derp/derpserver/derpserver_test.go @@ -0,0 +1,782 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derpserver + +import ( + "bufio" + "cmp" + "context" + "crypto/x509" + "encoding/asn1" + "expvar" + "fmt" + "log" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "time" + + qt "github.com/frankban/quicktest" + "go4.org/mem" + "golang.org/x/time/rate" + "tailscale.com/derp" + "tailscale.com/derp/derpconst" + "tailscale.com/types/key" + "tailscale.com/types/logger" +) + +const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + +func TestSetMeshKey(t *testing.T) { + for name, tt := range map[string]struct { + key string + want key.DERPMesh + wantErr bool + }{ + "clobber": { + key: testMeshKey, + wantErr: false, + }, + "invalid": { + key: "badf00d", + wantErr: true, + }, + } { + t.Run(name, func(t *testing.T) { + s := &Server{} + + err := s.SetMeshKey(tt.key) + if tt.wantErr { + if err == nil { + t.Fatalf("expected err") + } + return + } + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + want, err := key.ParseDERPMesh(tt.key) + if err != nil { + t.Fatal(err) + } + if !s.meshKey.Equal(want) { + t.Fatalf("got %v, want %v", s.meshKey, want) + } + }) + } +} + +func TestIsMeshPeer(t *testing.T) { + s := &Server{} + err := s.SetMeshKey(testMeshKey) + if err != nil { + t.Fatal(err) + } + for name, tt := range map[string]struct { + want bool + meshKey string + wantAllocs float64 + }{ + "nil": { + want: false, + wantAllocs: 0, + }, + "mismatch": { + meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", + want: false, + wantAllocs: 1, + }, + "match": { + meshKey: testMeshKey, + want: true, + wantAllocs: 0, + }, + } { + t.Run(name, func(t *testing.T) { + var got bool + var mKey key.DERPMesh + if tt.meshKey != "" { + mKey, err = key.ParseDERPMesh(tt.meshKey) + if err != nil { + t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) + } + } + + info := derp.ClientInfo{ + MeshKey: mKey, + } + allocs := testing.AllocsPerRun(1, func() { + got = s.isMeshPeer(&info) + }) + if got != tt.want { + t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) + } + + if allocs != tt.wantAllocs && tt.want { + t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) + } + }) + } +} + +type testFwd int + +func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { + panic("not called in tests") +} +func (testFwd) String() string { + panic("not called in tests") +} + +func pubAll(b byte) (ret key.NodePublic) { + var bs [32]byte + for i := range bs { + bs[i] = b + } + return key.NodePublicFromRaw32(mem.B(bs[:])) +} + +func TestForwarderRegistration(t *testing.T) { + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + want := func(want map[key.NodePublic]PacketForwarder) { + t.Helper() + if got := s.clientsMesh; !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) + } + } + wantCounter := func(c *expvar.Int, want int) { + t.Helper() + if got := c.Value(); got != int64(want) { + t.Errorf("counter = %v; want %v", got, want) + } + } + singleClient := func(c *sclient) *clientSet { + cs := &clientSet{} + cs.activeClient.Store(c) + return cs + } + + u1 := pubAll(1) + u2 := pubAll(2) + u3 := pubAll(3) + + s.AddPacketForwarder(u1, testFwd(1)) + s.AddPacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered forwarder is no-op. + s.RemovePacketForwarder(u2, testFwd(999)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered user is no-op. + s.RemovePacketForwarder(u3, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Actual removal. + s.RemovePacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + }) + + // Adding a dup for a user. + wantCounter(&s.multiForwarderCreated, 0) + s.AddPacketForwarder(u1, testFwd(100)) + s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + wantCounter(&s.multiForwarderCreated, 1) + + // Removing a forwarder in a multi set that doesn't exist; does nothing. + s.RemovePacketForwarder(u1, testFwd(55)) + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + + // Removing a forwarder in a multi set that does exist should collapse it away + // from being a multiForwarder. + wantCounter(&s.multiForwarderDeleted, 0) + s.RemovePacketForwarder(u1, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(100), + }) + wantCounter(&s.multiForwarderDeleted, 1) + + // Removing an entry for a client that's still connected locally should result + // in a nil forwarder. + u1c := &sclient{ + key: u1, + logf: logger.Discard, + } + s.clients[u1] = singleClient(u1c) + s.RemovePacketForwarder(u1, testFwd(100)) + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + + // But once that client disconnects, it should go away. + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{}) + + // But if it already has a forwarder, it's not removed. + s.AddPacketForwarder(u1, testFwd(2)) + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(2), + }) + + // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard + // that they're also connected to a peer of ours. That shouldn't transition the forwarder + // from nil to the new one, not a multiForwarder. + s.clients[u1] = singleClient(u1c) + s.clientsMesh[u1] = nil + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + s.AddPacketForwarder(u1, testFwd(3)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(3), + }) +} + +type channelFwd struct { + // id is to ensure that different instances that reference the + // same channel are not equal, as they are used as keys in the + // multiForwarder map. + id int + c chan []byte +} + +func (f channelFwd) String() string { return "" } +func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { + f.c <- packet + return nil +} + +func TestMultiForwarder(t *testing.T) { + received := 0 + var wg sync.WaitGroup + ch := make(chan []byte) + ctx, cancel := context.WithCancel(context.Background()) + + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + u := pubAll(1) + s.AddPacketForwarder(u, channelFwd{1, ch}) + + wg.Add(2) + go func() { + defer wg.Done() + for { + select { + case <-ch: + received += 1 + case <-ctx.Done(): + return + } + } + }() + go func() { + defer wg.Done() + for { + s.AddPacketForwarder(u, channelFwd{2, ch}) + s.AddPacketForwarder(u, channelFwd{3, ch}) + s.RemovePacketForwarder(u, channelFwd{2, ch}) + s.RemovePacketForwarder(u, channelFwd{1, ch}) + s.AddPacketForwarder(u, channelFwd{1, ch}) + s.RemovePacketForwarder(u, channelFwd{3, ch}) + if ctx.Err() != nil { + return + } + } + }() + + // Number of messages is chosen arbitrarily, just for this loop to + // run long enough concurrently with {Add,Remove}PacketForwarder loop above. + numMsgs := 5000 + var fwd PacketForwarder + for i := range numMsgs { + s.mu.Lock() + fwd = s.clientsMesh[u] + s.mu.Unlock() + fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) + } + + cancel() + wg.Wait() + if received != numMsgs { + t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) + } +} +func TestMetaCert(t *testing.T) { + priv := key.NewNode() + pub := priv.Public() + s := NewServer(priv, t.Logf) + + certBytes := s.MetaCert() + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + log.Fatal(err) + } + if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(derp.ProtocolVersion) { + t.Errorf("serial = %v; want %v", cert.SerialNumber, derp.ProtocolVersion) + } + if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { + t.Errorf("CommonName = %q; want %q", g, w) + } + if n := len(cert.Extensions); n != 1 { + t.Fatalf("got %d extensions; want 1", n) + } + + // oidExtensionBasicConstraints is the Basic Constraints ID copied + // from the x509 package. + oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} + + if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { + t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) + } +} + +func TestServerDupClients(t *testing.T) { + serverPriv := key.NewNode() + var s *Server + + clientPriv := key.NewNode() + clientPub := clientPriv.Public() + + var c1, c2, c3 *sclient + var clientName map[*sclient]string + + // run starts a new test case and resets clients back to their zero values. + run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { + s = NewServer(serverPriv, t.Logf) + s.dupPolicy = dupPolicy + c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} + c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} + c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} + clientName = map[*sclient]string{ + c1: "c1", + c2: "c2", + c3: "c3", + } + t.Run(name, f) + } + runBothWays := func(name string, f func(t *testing.T)) { + run(name+"_disablefighters", disableFighters, f) + run(name+"_lastwriteractive", lastWriterIsActive, f) + } + wantSingleClient := func(t *testing.T, want *sclient) { + t.Helper() + got, ok := s.clients[want.key] + if !ok { + t.Error("no clients for key") + return + } + if got.dup != nil { + t.Errorf("unexpected dup set for single client") + } + cur := got.activeClient.Load() + if cur != want { + t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) + } + if cur != nil { + if cur.isDup.Load() { + t.Errorf("unexpected isDup on singleClient") + } + if cur.isDisabled.Load() { + t.Errorf("unexpected isDisabled on singleClient") + } + } + } + wantNoClient := func(t *testing.T) { + t.Helper() + _, ok := s.clients[clientPub] + if !ok { + // Good + return + } + t.Errorf("got client; want empty") + } + wantDupSet := func(t *testing.T) *dupClientSet { + t.Helper() + cs, ok := s.clients[clientPub] + if !ok { + t.Fatal("no set for key; want dup set") + return nil + } + if cs.dup != nil { + return cs.dup + } + t.Fatalf("no dup set for key; want dup set") + return nil + } + wantActive := func(t *testing.T, want *sclient) { + t.Helper() + set, ok := s.clients[clientPub] + if !ok { + t.Error("no set for key") + return + } + got := set.activeClient.Load() + if got != want { + t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) + } + } + checkDup := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDup.Load(); got != want { + t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) + } + } + checkDisabled := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDisabled.Load(); got != want { + t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) + } + } + wantDupConns := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientConns.Value(); got != int64(want) { + t.Errorf("dupClientConns = %v; want %v", got, want) + } + } + wantDupKeys := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientKeys.Value(); got != int64(want) { + t.Errorf("dupClientKeys = %v; want %v", got, want) + } + } + + // Common case: a single client comes and goes, with no dups. + runBothWays("one_comes_and_goes", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + s.unregisterClient(c1) + wantNoClient(t) + }) + + // A still somewhat common case: a single client was + // connected and then their wifi dies or laptop closes + // or they switch networks and connect from a + // different network. They have two connections but + // it's not very bad. Only their new one is + // active. The last one, being dead, doesn't send and + // thus the new one doesn't get disabled. + runBothWays("small_overlap_replacement", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + wantDupKeys(t, 0) + wantDupKeys(t, 0) + + s.registerClient(c2) // wifi dies; c2 replacement connects + wantDupSet(t) + wantDupConns(t, 2) + wantDupKeys(t, 1) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) // sends go to the replacement + + s.unregisterClient(c1) // c1 finally times out + wantSingleClient(t, c2) + checkDup(t, c2, false) // c2 is longer a dup + wantActive(t, c2) + wantDupConns(t, 0) + wantDupKeys(t, 0) + }) + + // Key cloning situation with concurrent clients, both trying + // to write. + run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + s.registerClient(c2) + wantDupSet(t) + wantDupKeys(t, 1) + wantDupConns(t, 2) + wantActive(t, c2) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + wantActive(t, nil) + + s.registerClient(c3) + wantActive(t, c3) + checkDisabled(t, c3, false) + wantDupKeys(t, 1) + wantDupConns(t, 3) + + s.unregisterClient(c3) + wantActive(t, nil) + wantDupKeys(t, 1) + wantDupConns(t, 2) + + s.unregisterClient(c2) + wantSingleClient(t, c1) + wantDupKeys(t, 0) + wantDupConns(t, 0) + }) + + // Key cloning with an A->B->C->A series instead. + run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + s.registerClient(c2) + s.registerClient(c3) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + checkDisabled(t, c3, true) + wantActive(t, nil) + }) + + run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { + wantNoClient(t) + + // Last registered client is the active one... + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + s.registerClient(c3) + s.noteClientActivity(c2) + wantActive(t, c3) + + // But if the last one goes away, the one with the + // most recent activity wins. + s.unregisterClient(c3) + wantActive(t, c2) + }) + + run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { + wantNoClient(t) + + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + + s.noteClientActivity(c1) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c1) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) + + s.unregisterClient(c2) + checkDisabled(t, c1, false) + wantActive(t, c1) + }) +} + +func TestLimiter(t *testing.T) { + rl := rate.NewLimiter(rate.Every(time.Minute), 100) + for i := range 200 { + r := rl.Reserve() + d := r.Delay() + t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) + } +} + +// BenchmarkConcurrentStreams exercises mutex contention on a +// single Server instance with multiple concurrent client flows. +func BenchmarkConcurrentStreams(b *testing.B) { + serverPrivateKey := key.NewNode() + s := NewServer(serverPrivateKey, logger.Discard) + defer s.Close() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + for ctx.Err() == nil { + connIn, err := ln.Accept() + if err != nil { + if ctx.Err() != nil { + return + } + b.Error(err) + return + } + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + go s.Accept(ctx, connIn, brwServer, "test-client") + } + }() + + newClient := func(t testing.TB) *derp.Client { + t.Helper() + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + t.Cleanup(func() { connOut.Close() }) + + k := key.NewNode() + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + return client + } + + b.RunParallel(func(pb *testing.PB) { + c1, c2 := newClient(b), newClient(b) + const packetSize = 100 + msg := make([]byte, packetSize) + for pb.Next() { + if err := c1.Send(c2.PublicKey(), msg); err != nil { + b.Fatal(err) + } + _, err := c2.Recv() + if err != nil { + return + } + } + }) +} + +func BenchmarkSendRecv(b *testing.B) { + for _, size := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) + } +} + +func benchmarkSendRecvSize(b *testing.B, packetSize int) { + serverPrivateKey := key.NewNode() + s := NewServer(serverPrivateKey, logger.Discard) + defer s.Close() + + k := key.NewNode() + clientKey := k.Public() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + defer connOut.Close() + + connIn, err := ln.Accept() + if err != nil { + b.Fatal(err) + } + defer connIn.Close() + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go s.Accept(ctx, connIn, brwServer, "test-client") + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + + go func() { + for { + _, err := client.Recv() + if err != nil { + return + } + } + }() + + msg := make([]byte, packetSize) + b.SetBytes(int64(len(msg))) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + if err := client.Send(clientKey, msg); err != nil { + b.Fatal(err) + } + } +} + +func TestParseSSOutput(t *testing.T) { + contents, err := os.ReadFile("testdata/example_ss.txt") + if err != nil { + t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) + } + seen := parseSSOutput(string(contents)) + if len(seen) == 0 { + t.Errorf("parseSSOutput expected non-empty map") + } +} + +func TestGetPerClientSendQueueDepth(t *testing.T) { + c := qt.New(t) + envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" + + testCases := []struct { + envVal string + want int + }{ + // Empty case, envknob treats empty as missing also. + { + "", defaultPerClientSendQueueDepth, + }, + { + "64", 64, + }, + } + + for _, tc := range testCases { + t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { + t.Setenv(envKey, tc.envVal) + val := getPerClientSendQueueDepth() + c.Assert(val, qt.Equals, tc.want) + }) + } +} diff --git a/derp/derphttp/derphttp_server.go b/derp/derpserver/handler.go similarity index 86% rename from derp/derphttp/derphttp_server.go rename to derp/derpserver/handler.go index 50aba774a9f1c..7cd6aa2fd5b95 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derpserver/handler.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derpserver import ( "fmt" @@ -12,14 +12,8 @@ import ( "tailscale.com/derp" ) -// fastStartHeader is the header (with value "1") that signals to the HTTP -// server that the DERP HTTP client does not want the HTTP 101 response -// headers and it will begin writing & reading the DERP protocol immediately -// following its HTTP request. -const fastStartHeader = "Derp-Fast-Start" - // Handler returns an http.Handler to be mounted at /derp, serving s. -func Handler(s *derp.Server) http.Handler { +func Handler(s *Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -42,7 +36,7 @@ func Handler(s *derp.Server) http.Handler { return } - fastStart := r.Header.Get(fastStartHeader) == "1" + fastStart := r.Header.Get(derp.FastStartHeader) == "1" h, ok := w.(http.Hijacker) if !ok { @@ -69,7 +63,7 @@ func Handler(s *derp.Server) http.Handler { } if v := r.Header.Get(derp.IdealNodeHeader); v != "" { - ctx = derp.IdealNodeContextKey.WithValue(ctx, v) + ctx = IdealNodeContextKey.WithValue(ctx, v) } s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String()) diff --git a/derp/testdata/example_ss.txt b/derp/derpserver/testdata/example_ss.txt similarity index 100% rename from derp/testdata/example_ss.txt rename to derp/derpserver/testdata/example_ss.txt diff --git a/derp/export_test.go b/derp/export_test.go new file mode 100644 index 0000000000000..677a4932d2657 --- /dev/null +++ b/derp/export_test.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import "time" + +func (c *Client) RecvTimeoutForTest(timeout time.Duration) (m ReceivedMessage, err error) { + return c.recvTimeout(timeout) +} diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 5f37560cc6ddb..787867b4f450e 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -8,15 +8,13 @@ package ipnlocal import ( "errors" "net" - - "tailscale.com/client/local" ) const webClientPort = 5252 type webClient struct{} -func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {} +func (b *LocalBackend) ConfigureWebClient(any) {} func (b *LocalBackend) webClientGetOrInit() error { return errors.New("not implemented") diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 064a86c8c35e5..0778e07df393a 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/syncs" "tailscale.com/tstest/nettest" @@ -136,7 +136,7 @@ func TestAgainstDERPHandler(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s := httptest.NewServer(http.HandlerFunc(derphttp.ServeNoContent)) + s := httptest.NewServer(http.HandlerFunc(derpserver.ServeNoContent)) defer s.Close() e := Endpoint{ URL: must.Get(url.Parse(s.URL + "/generate_204")), diff --git a/prober/derp_test.go b/prober/derp_test.go index 1ace9983c9ca4..92bcb0a617020 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -16,6 +16,7 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -145,12 +146,12 @@ func TestDerpProber(t *testing.T) { func TestRunDerpProbeNodePair(t *testing.T) { // os.Setenv("DERP_DEBUG_LOGS", "true") serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.NewServer(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: derphttp.Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 619183a60b742..795e4367fa3f7 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -222,9 +222,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/doctor from tailscale.com/ipn/ipnlocal tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal @@ -266,7 +266,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -302,7 +302,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ @@ -327,7 +326,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tsd from tailscale.com/ipn/ipnext+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal @@ -559,7 +558,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from tailscale.com/util/testenv fmt from archive/tar+ hash from compress/zlib+ diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index b28ebaba1fbdc..56643f5d47114 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -34,8 +34,7 @@ import ( "go4.org/mem" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/ipn" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" @@ -297,14 +296,14 @@ func exe() string { func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) { t.Helper() - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.NewServer(key.NewNode(), logf) ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0")) if err != nil { t.Fatal(err) } - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Listener.Close() httpsrv.Listener = ln httpsrv.Config.ErrorLog = logger.StdLogger(logf) diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 1fa170d87df50..27ee517268597 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -51,8 +51,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netutil" "tailscale.com/net/netx" "tailscale.com/net/stun" @@ -601,7 +600,7 @@ func (n *node) String() string { } type derpServer struct { - srv *derp.Server + srv *derpserver.Server handler http.Handler tlsConfig *tls.Config } @@ -612,12 +611,12 @@ func newDERPServer() *derpServer { ts.Close() ds := &derpServer{ - srv: derp.NewServer(key.NewNode(), logger.Discard), + srv: derpserver.NewServer(key.NewNode(), logger.Discard), tlsConfig: ts.TLS, // self-signed; test client configure to not check } var mux http.ServeMux - mux.Handle("/derp", derphttp.Handler(ds.srv)) - mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + mux.Handle("/derp", derpserver.Handler(ds.srv)) + mux.HandleFunc("/generate_204", derpserver.ServeNoContent) ds.handler = &mux return ds diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1b885c3f139a7..de24a5f60cd3b 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -39,8 +39,7 @@ import ( "golang.org/x/net/ipv4" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/control/controlknobs" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/health" @@ -112,9 +111,9 @@ func (c *Conn) WaitReady(t testing.TB) { } func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.NewServer(key.NewNode(), logf) - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() From db02a4664547a7d1d82650e28bd84e5eb4b243ef Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Wed, 24 Sep 2025 10:20:41 -0700 Subject: [PATCH 0409/1093] types/key: Update HardwareAttestationPublic representation (#17233) Sidestep cmd/viewer incompatibility hiccups with HardwareAttestationPublic type due to its *ecdsa.PublicKey inner member by serializing the key to a byte slice instead. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- types/key/hardware_attestation.go | 55 +++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index ead077a5d1fa4..ac3914ab20896 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -7,6 +7,7 @@ import ( "crypto" "crypto/ecdsa" "crypto/elliptic" + "crypto/subtle" "encoding/json" "fmt" "io" @@ -18,11 +19,13 @@ var ErrUnsupported = fmt.Errorf("key type not supported on this platform") const hardwareAttestPublicHexPrefix = "hwattestpub:" +const pubkeyLength = 65 // uncompressed P-256 + // HardwareAttestationKey describes a hardware-backed key that is used to // identify a node. Implementation details will // vary based on the platform in use (SecureEnclave for Apple, TPM for // Windows/Linux, Android Hardware-backed Keystore). -// This key can only be marshalled and unmarshalled on the same machine. +// This key can only be marshalled and unmarshaled on the same machine. type HardwareAttestationKey interface { crypto.Signer json.Marshaler @@ -43,25 +46,41 @@ func HardwareAttestationPublicFromPlatformKey(k HardwareAttestationKey) Hardware if !ok { panic("hardware attestation key is not ECDSA") } - return HardwareAttestationPublic{k: ecdsaPub} + bytes, err := ecdsaPub.Bytes() + if err != nil { + panic(err) + } + if len(bytes) != pubkeyLength { + panic("hardware attestation key is not uncompressed ECDSA P-256") + } + var ecdsaPubArr [pubkeyLength]byte + copy(ecdsaPubArr[:], bytes) + return HardwareAttestationPublic{k: ecdsaPubArr} } // HardwareAttestationPublic is the public key counterpart to // HardwareAttestationKey. type HardwareAttestationPublic struct { - k *ecdsa.PublicKey + k [pubkeyLength]byte } -func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { - if k.k == nil || o.k == nil { - return k.k == o.k +func (k *HardwareAttestationPublic) Clone() *HardwareAttestationPublic { + if k == nil { + return nil } - return k.k.X.Cmp(o.k.X) == 0 && k.k.Y.Cmp(o.k.Y) == 0 && k.k.Curve == o.k.Curve + var out HardwareAttestationPublic + copy(out.k[:], k.k[:]) + return &out +} + +func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { + return subtle.ConstantTimeCompare(k.k[:], o.k[:]) == 1 } // IsZero reports whether k is the zero value. func (k HardwareAttestationPublic) IsZero() bool { - return k.k == nil + var zero [pubkeyLength]byte + return k.k == zero } // String returns the hex-encoded public key with a type prefix. @@ -75,7 +94,7 @@ func (k HardwareAttestationPublic) String() string { // MarshalText implements encoding.TextMarshaler. func (k HardwareAttestationPublic) MarshalText() ([]byte, error) { - if k.k == nil { + if k.IsZero() { return nil, nil } return k.AppendText(nil) @@ -89,30 +108,30 @@ func (k *HardwareAttestationPublic) UnmarshalText(b []byte) error { return nil } - kb := make([]byte, 65) + kb := make([]byte, pubkeyLength) if err := parseHex(kb, mem.B(b), mem.S(hardwareAttestPublicHexPrefix)); err != nil { return err } - pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) + _, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) if err != nil { return err } - k.k = pk + copy(k.k[:], kb) return nil } func (k HardwareAttestationPublic) AppendText(dst []byte) ([]byte, error) { - b, err := k.k.Bytes() - if err != nil { - return nil, err - } - return appendHexKey(dst, hardwareAttestPublicHexPrefix, b), nil + return appendHexKey(dst, hardwareAttestPublicHexPrefix, k.k[:]), nil } // Verifier returns the ECDSA public key for verifying signatures made by k. func (k HardwareAttestationPublic) Verifier() *ecdsa.PublicKey { - return k.k + pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), k.k[:]) + if err != nil { + panic(err) + } + return pk } // emptyHardwareAttestationKey is a function that returns an empty From 34242df51b413351a1caec2213d7e9ca41dd75ed Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 24 Sep 2025 10:38:30 -0700 Subject: [PATCH 0410/1093] derp/derpserver: clean up extraction of derp.Server (#17264) PR #17258 extracted `derp.Server` into `derp/derpserver.Server`. This followup patch adds the following cleanups: 1. Rename `derp_server*.go` files to `derpserver*.go` to match the package name. 2. Rename the `derpserver.NewServer` constructor to `derpserver.New` to reduce stuttering. 3. Remove the unnecessary `derpserver.Conn` type alias. Updates #17257 Updates #cleanup Signed-off-by: Simon Law --- cmd/derper/cert_test.go | 2 +- cmd/derper/derper.go | 2 +- derp/derp_test.go | 6 +++--- derp/derphttp/derphttp_test.go | 6 +++--- .../derpserver/{derp_server.go => derpserver.go} | 16 +++++++--------- ...p_server_default.go => derpserver_default.go} | 0 ...{derp_server_linux.go => derpserver_linux.go} | 0 derp/derpserver/derpserver_test.go | 8 ++++---- prober/derp_test.go | 2 +- tstest/integration/integration.go | 2 +- tstest/natlab/vnet/vnet.go | 2 +- wgengine/magicsock/magicsock_test.go | 2 +- 12 files changed, 23 insertions(+), 25 deletions(-) rename derp/derpserver/{derp_server.go => derpserver.go} (99%) rename derp/derpserver/{derp_server_default.go => derpserver_default.go} (100%) rename derp/derpserver/{derp_server_linux.go => derpserver_linux.go} (100%) diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 1ef932e7fe56e..c8a3229e9f41c 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -131,7 +131,7 @@ func TestPinnedCertRawIP(t *testing.T) { } defer ln.Close() - ds := derpserver.NewServer(key.NewNode(), t.Logf) + ds := derpserver.New(key.NewNode(), t.Logf) derpHandler := derpserver.Handler(ds) mux := http.NewServeMux() diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index eed94bd68c712..857d7def3b6ff 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -188,7 +188,7 @@ func main() { serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual" - s := derpserver.NewServer(cfg.PrivateKey, log.Printf) + s := derpserver.New(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) diff --git a/derp/derp_test.go b/derp/derp_test.go index e765f7b54001a..52793f90fa9f5 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -83,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) { func TestSendRecv(t *testing.T) { serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() const numClients = 3 @@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) { func TestSendFreeze(t *testing.T) { serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() s.WriteTimeout = 100 * time.Millisecond @@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") - s := derpserver.NewServer(key.NewNode(), logf) + s := derpserver.New(key.NewNode(), logf) s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index dd7cbcd247cb7..36c11f4fc25cc 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -44,7 +44,7 @@ func TestSendRecv(t *testing.T) { clientKeys = append(clientKeys, priv.Public()) } - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ @@ -172,7 +172,7 @@ func waitConnect(t testing.TB, c *derphttp.Client) { func TestPing(t *testing.T) { serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ @@ -225,7 +225,7 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) { - s = derpserver.NewServer(k, t.Logf) + s = derpserver.New(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), Handler: derpserver.Handler(s), diff --git a/derp/derpserver/derp_server.go b/derp/derpserver/derpserver.go similarity index 99% rename from derp/derpserver/derp_server.go rename to derp/derpserver/derpserver.go index 917ef147c5112..31cf9363a43bf 100644 --- a/derp/derpserver/derp_server.go +++ b/derp/derpserver/derpserver.go @@ -57,8 +57,6 @@ import ( "tailscale.com/version" ) -type Conn = derp.Conn - // verboseDropKeys is the set of destination public keys that should // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} @@ -181,7 +179,7 @@ type Server struct { mu sync.Mutex closed bool - netConns map[Conn]chan struct{} // chan is closed when conn closes + netConns map[derp.Conn]chan struct{} // chan is closed when conn closes clients map[key.NodePublic]*clientSet watchers set.Set[*sclient] // mesh peers // clientsMesh tracks all clients in the cluster, both locally @@ -354,9 +352,9 @@ var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( "DERP bytes dropped by reason and by kind", ) -// NewServer returns a new DERP server. It doesn't listen on its own. +// New returns a new DERP server. It doesn't listen on its own. // Connections are given to it via Server.Accept. -func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { +func New(privateKey key.NodePrivate, logf logger.Logf) *Server { var ms runtime.MemStats runtime.ReadMemStats(&ms) @@ -369,7 +367,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { packetsRecvByKind: metrics.LabelMap{Label: "kind"}, clients: map[key.NodePublic]*clientSet{}, clientsMesh: map[key.NodePublic]PacketForwarder{}, - netConns: map[Conn]chan struct{}{}, + netConns: map[derp.Conn]chan struct{}{}, memSys0: ms.Sys, watchers: set.Set[*sclient]{}, peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, @@ -570,7 +568,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool { // on its own. // // Accept closes nc. -func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) { +func (s *Server) Accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string) { closed := make(chan struct{}) s.mu.Lock() @@ -910,7 +908,7 @@ func (s *Server) addWatcher(c *sclient) { go c.requestMeshUpdate() } -func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { +func (s *Server) accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { br := brw.Reader nc.SetDeadline(time.Now().Add(10 * time.Second)) bw := &lazyBufioWriter{w: nc, lbw: brw.Writer} @@ -1619,7 +1617,7 @@ type sclient struct { // Static after construction. connNum int64 // process-wide unique counter, incremented each Accept s *Server - nc Conn + nc derp.Conn key key.NodePublic info derp.ClientInfo logf logger.Logf diff --git a/derp/derpserver/derp_server_default.go b/derp/derpserver/derpserver_default.go similarity index 100% rename from derp/derpserver/derp_server_default.go rename to derp/derpserver/derpserver_default.go diff --git a/derp/derpserver/derp_server_linux.go b/derp/derpserver/derpserver_linux.go similarity index 100% rename from derp/derpserver/derp_server_linux.go rename to derp/derpserver/derpserver_linux.go diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go index 3f0ba2ec0cda3..2db5f25bc00b7 100644 --- a/derp/derpserver/derpserver_test.go +++ b/derp/derpserver/derpserver_test.go @@ -330,7 +330,7 @@ func TestMultiForwarder(t *testing.T) { func TestMetaCert(t *testing.T) { priv := key.NewNode() pub := priv.Public() - s := NewServer(priv, t.Logf) + s := New(priv, t.Logf) certBytes := s.MetaCert() cert, err := x509.ParseCertificate(certBytes) @@ -368,7 +368,7 @@ func TestServerDupClients(t *testing.T) { // run starts a new test case and resets clients back to their zero values. run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { - s = NewServer(serverPriv, t.Logf) + s = New(serverPriv, t.Logf) s.dupPolicy = dupPolicy c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} @@ -618,7 +618,7 @@ func TestLimiter(t *testing.T) { // single Server instance with multiple concurrent client flows. func BenchmarkConcurrentStreams(b *testing.B) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) + s := New(serverPrivateKey, logger.Discard) defer s.Close() ln, err := net.Listen("tcp", "127.0.0.1:0") @@ -688,7 +688,7 @@ func BenchmarkSendRecv(b *testing.B) { func benchmarkSendRecvSize(b *testing.B, packetSize int) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) + s := New(serverPrivateKey, logger.Discard) defer s.Close() k := key.NewNode() diff --git a/prober/derp_test.go b/prober/derp_test.go index 92bcb0a617020..08a65d6978f13 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -146,7 +146,7 @@ func TestDerpProber(t *testing.T) { func TestRunDerpProbeNodePair(t *testing.T) { // os.Setenv("DERP_DEBUG_LOGS", "true") serverPrivateKey := key.NewNode() - s := derpserver.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 56643f5d47114..3788f61495a08 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -296,7 +296,7 @@ func exe() string { func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) { t.Helper() - d := derpserver.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0")) if err != nil { diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 27ee517268597..49d47f02937ae 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -611,7 +611,7 @@ func newDERPServer() *derpServer { ts.Close() ds := &derpServer{ - srv: derpserver.NewServer(key.NewNode(), logger.Discard), + srv: derpserver.New(key.NewNode(), logger.Discard), tlsConfig: ts.TLS, // self-signed; test client configure to not check } var mux http.ServeMux diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index de24a5f60cd3b..c6be9129db2cf 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) { } func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { - d := derpserver.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) From 0bd4f4729b150cbbca6364affb3073064d3d522a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 24 Sep 2025 15:14:21 -0700 Subject: [PATCH 0411/1093] ipn/ipnlocal: rename misnamed DisablePortMapperForTest to DisablePortPollerForTest I think this was originally a brain-o in 9380e2dfc61a720d. It's disabling the port _poller_, listing what open ports (i.e. services) are open, not PMP/PCP/UPnP port mapping. While there, drop in some more testenv.AssertInTest() in a few places. Updates #cleanup Change-Id: Ia6f755ad3544f855883b8a7bdcfc066e8649547b Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 11 ++++++----- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/state_test.go | 2 +- ipn/lapitest/backend.go | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5c5fb034bc091..ef8fcab40d0e6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1503,9 +1503,7 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { } func (b *LocalBackend) GetFilterForTest() *filter.Filter { - if !testenv.InTest() { - panic("GetFilterForTest called outside of test") - } + testenv.AssertInTest() nb := b.currentNode() return nb.filterAtomic.Load() } @@ -2304,9 +2302,10 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// DisablePortMapperForTest disables the portmapper for tests. +// DisablePortPollerForTest disables the port list poller for tests. // It must be called before Start. -func (b *LocalBackend) DisablePortMapperForTest() { +func (b *LocalBackend) DisablePortPollerForTest() { + testenv.AssertInTest() b.mu.Lock() defer b.mu.Unlock() b.portpoll = nil @@ -2315,6 +2314,7 @@ func (b *LocalBackend) DisablePortMapperForTest() { // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { + testenv.AssertInTest() return b.currentNode().PeersForTest() } @@ -4030,6 +4030,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // It is used for testing only, and will be removed along with the rest of the // "current user" functionality as we progress on the multi-user improvements (tailscale/corp#18342). func (b *LocalBackend) CurrentUserForTest() (ipn.WindowsUserID, ipnauth.Actor) { + testenv.AssertInTest() b.mu.Lock() defer b.mu.Unlock() return b.pm.CurrentUserID(), b.currentUser diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 0505e068b94d9..56d65767b4f66 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5816,7 +5816,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() + b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { return newControl(t, opts), nil diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 609a51c5bd657..1a32f31562f41 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -358,7 +358,7 @@ func TestStateMachine(t *testing.T) { t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() + b.DisablePortPollerForTest() var cc, previousCC *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index 6a83431f351b1..725ffa4de4cca 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -45,7 +45,7 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { tb.Fatalf("NewLocalBackend: %v", err) } tb.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() + b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(opts.MakeControlClient) return b } From 70400cb75f9738b7ee5bb260a8dddefbb929b4f4 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 24 Sep 2025 15:45:41 +0100 Subject: [PATCH 0412/1093] cmd/tailscale/cli: reduce strength of lose-ssh risk warning Ideally we would remove this warning entirely, as it is now possible to reauthenticate without losing connectivty. However, it is still possible to lose SSH connectivity if the user changes the ownership of the machine when they do a force-reauth, and we have no way of knowing if they are going to do that before they do it. For now, let's just reduce the strength of the warning to warn them that they "may" lose their connection, rather than they "will". Updates tailscale/corp#32429 Signed-off-by: James Sanderson --- cmd/tailscale/cli/up.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 12c26b21c5e2a..96b561bee8f79 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -385,7 +385,7 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus } if env.upArgs.forceReauth && isSSHOverTailscale() { - if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { + if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action may result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { return false, nil, err } } From e0a77cf41a52066dd42058828799c12320d4b9cf Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 15 Sep 2025 11:44:00 +0100 Subject: [PATCH 0413/1093] tstest/integration: expand the tests for `tailscale up` Expand the integration tests to cover a wider range of scenarios, including: * Before and after a successful initial login * Auth URLs and auth keys * With and without the `--force-reauth` flag * With and without seamless key renewal These tests expose a race condition when using `--force-reauth` on an already-logged in device. The command completes too quickly, preventing the auth URL from being displayed. This issue is identified and will be fixed in a separate commit. Updates #17108 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 195 ++++++++++++++---- tstest/integration/testcontrol/testcontrol.go | 43 +++- 2 files changed, 193 insertions(+), 45 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 136004bc89ce8..f65ae1659ddbe 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -23,6 +23,7 @@ import ( "regexp" "runtime" "strconv" + "strings" "sync/atomic" "testing" "time" @@ -267,52 +268,168 @@ func TestStateSavedOnStart(t *testing.T) { } func TestOneNodeUpAuth(t *testing.T) { - tstest.Shard(t) - tstest.Parallel(t) - env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) { - control.RequireAuth = true - })) + for _, tt := range []struct { + name string + args []string + // + // What auth key should we use for control? + authKey string + // + // Is tailscaled already logged in before we run this `up` command? + alreadyLoggedIn bool + // + // Do we need to log in again with a new /auth/ URL? + needsNewAuthURL bool + }{ + { + name: "up", + args: []string{"up"}, + needsNewAuthURL: true, + }, + { + name: "up-with-force-reauth", + args: []string{"up", "--force-reauth"}, + needsNewAuthURL: true, + }, + { + name: "up-with-auth-key", + args: []string{"up", "--auth-key=opensesame"}, + authKey: "opensesame", + needsNewAuthURL: false, + }, + { + name: "up-with-force-reauth-and-auth-key", + args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, + authKey: "opensesame", + needsNewAuthURL: false, + }, + { + name: "up-after-login", + args: []string{"up"}, + alreadyLoggedIn: true, + needsNewAuthURL: false, + }, + // TODO(alexc): This test is failing because of a bug in `tailscale up` where + // it waits for ipn to enter the "Running" state. If we're already logged in + // and running, this completes immediately, before we've had a chance to show + // the user the auth URL. + // { + // name: "up-with-force-reauth-after-login", + // args: []string{"up", "--force-reauth"}, + // alreadyLoggedIn: true, + // needsNewAuthURL: true, + // }, + { + name: "up-with-auth-key-after-login", + args: []string{"up", "--auth-key=opensesame"}, + authKey: "opensesame", + alreadyLoggedIn: true, + needsNewAuthURL: false, + }, + { + name: "up-with-force-reauth-and-auth-key-after-login", + args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, + authKey: "opensesame", + alreadyLoggedIn: true, + needsNewAuthURL: false, + }, + } { + tstest.Shard(t) + + for _, useSeamlessKeyRenewal := range []bool{true, false} { + tt := tt // subtests are run in parallel, rebind tt + t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + if tt.authKey != "" { + control.RequireAuthKey = tt.authKey + } else { + control.RequireAuth = true + } + + control.AllNodesSameUser = true + + if useSeamlessKeyRenewal { + control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{ + tailcfg.NodeAttrSeamlessKeyRenewal: []tailcfg.RawMessage{}, + } + } + }, + )) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + cmdArgs := append(tt.args, "--login-server="+env.ControlURL()) + + // This handler looks for /auth/ URLs in the stdout from "tailscale up", + // and if it sees them, completes the auth process. + // + // It counts how many auth URLs it's seen. + var authCountAtomic atomic.Int32 + authURLHandler := &authURLParserWriter{fn: func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + if env.Control.CompleteAuth(urlStr) { + if authCountAtomic.Add(1) > 1 { + err := errors.New("completed multiple auth URLs") + t.Error(err) + return err + } + t.Logf("completed login to %s", urlStr) + return nil + } else { + err := fmt.Errorf("Failed to complete initial login to %q", urlStr) + t.Fatal(err) + return err + } + }} + + // If we should be logged in at the start of the test case, go ahead + // and run the login command. + // + // Otherwise, just wait for tailscaled to be listening. + if tt.alreadyLoggedIn { + t.Logf("Running initial login: %s", strings.Join(cmdArgs, " ")) + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = authURLHandler + cmd.Stderr = cmd.Stdout + if err := cmd.Run(); err != nil { + t.Fatalf("up: %v", err) + } + authCountAtomic.Store(0) + n1.AwaitRunning() + } else { + n1.AwaitListening() + } - n1 := NewTestNode(t, env) - d1 := n1.StartDaemon() + st := n1.MustStatus() + t.Logf("Status: %s", st.BackendState) - n1.AwaitListening() + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = authURLHandler + cmd.Stderr = cmd.Stdout - st := n1.MustStatus() - t.Logf("Status: %s", st.BackendState) + if err := cmd.Run(); err != nil { + t.Fatalf("up: %v", err) + } + t.Logf("Got IP: %v", n1.AwaitIP4()) - t.Logf("Running up --login-server=%s ...", env.ControlURL()) + n1.AwaitRunning() - cmd := n1.Tailscale("up", "--login-server="+env.ControlURL()) - var authCountAtomic atomic.Int32 - cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error { - t.Logf("saw auth URL %q", urlStr) - if env.Control.CompleteAuth(urlStr) { - if authCountAtomic.Add(1) > 1 { - err := errors.New("completed multple auth URLs") - t.Error(err) - return err - } - t.Logf("completed auth path %s", urlStr) - return nil + var expectedAuthUrls int32 + if tt.needsNewAuthURL { + expectedAuthUrls = 1 + } + if n := authCountAtomic.Load(); n != expectedAuthUrls { + t.Errorf("Auth URLs completed = %d; want %d", n, expectedAuthUrls) + } + }) } - err := fmt.Errorf("Failed to complete auth path to %q", urlStr) - t.Error(err) - return err - }} - cmd.Stderr = cmd.Stdout - if err := cmd.Run(); err != nil { - t.Fatalf("up: %v", err) - } - t.Logf("Got IP: %v", n1.AwaitIP4()) - - n1.AwaitRunning() - - if n := authCountAtomic.Load(); n != 1 { - t.Errorf("Auth URLs completed = %d; want 1", n) } - - d1.MustCleanShutdown(t) } func TestConfigFileAuthKey(t *testing.T) { diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 7ce7186e7426a..1d3b99f7a217d 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -66,6 +66,9 @@ type Server struct { // belong to the same user. AllNodesSameUser bool + // DefaultNodeCapabilities overrides the capability map sent to each client. + DefaultNodeCapabilities *tailcfg.NodeCapMap + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL @@ -726,6 +729,25 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. // some follow-ups? For now all are successes. } + // The in-memory list of nodes, users, and logins is keyed by + // the node key. If the node key changes, update all the data stores + // to use the new node key. + s.mu.Lock() + if _, oldNodeKeyOk := s.nodes[req.OldNodeKey]; oldNodeKeyOk { + if _, newNodeKeyOk := s.nodes[req.NodeKey]; !newNodeKeyOk { + s.nodes[req.OldNodeKey].Key = req.NodeKey + s.nodes[req.NodeKey] = s.nodes[req.OldNodeKey] + + s.users[req.NodeKey] = s.users[req.OldNodeKey] + s.logins[req.NodeKey] = s.logins[req.OldNodeKey] + + delete(s.nodes, req.OldNodeKey) + delete(s.users, req.OldNodeKey) + delete(s.logins, req.OldNodeKey) + } + } + s.mu.Unlock() + nk := req.NodeKey user, login := s.getUser(nk) @@ -745,6 +767,19 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. v4Prefix, v6Prefix, } + + var capMap tailcfg.NodeCapMap + if s.DefaultNodeCapabilities != nil { + capMap = *s.DefaultNodeCapabilities + } else { + capMap = tailcfg.NodeCapMap{ + tailcfg.CapabilityHTTPS: []tailcfg.RawMessage{}, + tailcfg.NodeAttrFunnel: []tailcfg.RawMessage{}, + tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{}, + tailcfg.CapabilityFunnelPorts + "?ports=8080,443": []tailcfg.RawMessage{}, + } + } + node := &tailcfg.Node{ ID: tailcfg.NodeID(nodeID), StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(nodeID))), @@ -757,12 +792,8 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. Hostinfo: req.Hostinfo.View(), Name: req.Hostinfo.Hostname, Cap: req.Version, - Capabilities: []tailcfg.NodeCapability{ - tailcfg.CapabilityHTTPS, - tailcfg.NodeAttrFunnel, - tailcfg.CapabilityFileSharing, - tailcfg.CapabilityFunnelPorts + "?ports=8080,443", - }, + CapMap: capMap, + Capabilities: slices.Collect(maps.Keys(capMap)), } s.nodes[nk] = node } From 0b27871860b1203e1c7c471bfecee6cb119c862f Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Thu, 25 Sep 2025 11:26:43 -0400 Subject: [PATCH 0414/1093] k8s-operator: add IPv6 support for DNS records (#16691) This change adds full IPv6 support to the Kubernetes operator's DNS functionality, enabling dual-stack and IPv6-only cluster support. Fixes #16633 Signed-off-by: Raj Singh --- cmd/k8s-nameserver/main.go | 122 +++++++++---- cmd/k8s-nameserver/main_test.go | 68 ++++++++ .../deploy/crds/tailscale.com_dnsconfigs.yaml | 1 - .../deploy/manifests/operator.yaml | 1 - cmd/k8s-operator/dnsrecords.go | 138 ++++++++++----- cmd/k8s-operator/dnsrecords_test.go | 165 +++++++++++++++++- k8s-operator/api.md | 1 - .../apis/v1alpha1/types_tsdnsconfig.go | 1 - k8s-operator/utils.go | 5 + 9 files changed, 408 insertions(+), 94 deletions(-) diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go index ca4b449358083..84e65452d2334 100644 --- a/cmd/k8s-nameserver/main.go +++ b/cmd/k8s-nameserver/main.go @@ -31,6 +31,9 @@ const ( tsNetDomain = "ts.net" // addr is the the address that the UDP and TCP listeners will listen on. addr = ":1053" + // defaultTTL is the default TTL for DNS records in seconds. + // Set to 0 to disable caching. Can be increased when usage patterns are better understood. + defaultTTL = 0 // The following constants are specific to the nameserver configuration // provided by a mounted Kubernetes Configmap. The Configmap mounted at @@ -39,9 +42,9 @@ const ( kubeletMountedConfigLn = "..data" ) -// nameserver is a simple nameserver that responds to DNS queries for A records +// nameserver is a simple nameserver that responds to DNS queries for A and AAAA records // for ts.net domain names over UDP or TCP. It serves DNS responses from -// in-memory IPv4 host records. It is intended to be deployed on Kubernetes with +// in-memory IPv4 and IPv6 host records. It is intended to be deployed on Kubernetes with // a ConfigMap mounted at /config that should contain the host records. It // dynamically reconfigures its in-memory mappings as the contents of the // mounted ConfigMap changes. @@ -56,10 +59,13 @@ type nameserver struct { // in-memory records. configWatcher <-chan string - mu sync.Mutex // protects following + mu sync.RWMutex // protects following // ip4 are the in-memory hostname -> IP4 mappings that the nameserver // uses to respond to A record queries. ip4 map[dnsname.FQDN][]net.IP + // ip6 are the in-memory hostname -> IP6 mappings that the nameserver + // uses to respond to AAAA record queries. + ip6 map[dnsname.FQDN][]net.IP } func main() { @@ -98,16 +104,13 @@ func main() { tcpSig <- s // stop the TCP listener } -// handleFunc is a DNS query handler that can respond to A record queries from +// handleFunc is a DNS query handler that can respond to A and AAAA record queries from // the nameserver's in-memory records. -// - If an A record query is received and the -// nameserver's in-memory records contain records for the queried domain name, -// return a success response. -// - If an A record query is received, but the -// nameserver's in-memory records do not contain records for the queried domain name, -// return NXDOMAIN. -// - If an A record query is received, but the queried domain name is not valid, return Format Error. -// - If a query is received for any other record type than A, return Not Implemented. +// - For A queries: returns IPv4 addresses if available, NXDOMAIN if the name doesn't exist +// - For AAAA queries: returns IPv6 addresses if available, NOERROR with no data if only +// IPv4 exists (per RFC 4074), or NXDOMAIN if the name doesn't exist at all +// - For invalid domain names: returns Format Error +// - For other record types: returns Not Implemented func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { h := func(w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) @@ -135,35 +138,19 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { m.RecursionAvailable = false ips := n.lookupIP4(fqdn) - if ips == nil || len(ips) == 0 { + if len(ips) == 0 { // As we are the authoritative nameserver for MagicDNS // names, if we do not have a record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } - // TODO (irbekrm): TTL is currently set to 0, meaning - // that cluster workloads will not cache the DNS - // records. Revisit this in future when we understand - // the usage patterns better- is it putting too much - // load on kube DNS server or is this fine? for _, ip := range ips { - rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}, A: ip} + rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: defaultTTL}, A: ip} m.SetRcode(r, dns.RcodeSuccess) m.Answer = append(m.Answer, rr) } case dns.TypeAAAA: - // TODO (irbekrm): add IPv6 support. - // The nameserver currently does not support IPv6 - // (records are not being created for IPv6 Pod addresses). - // However, we can expect that some callers will - // nevertheless send AAAA queries. - // We have to return NOERROR if a query is received for - // an AAAA record for a DNS name that we have an A - // record for- else the caller might not follow with an - // A record query. - // https://github.com/tailscale/tailscale/issues/12321 - // https://datatracker.ietf.org/doc/html/rfc4074 q := r.Question[0].Name fqdn, err := dnsname.ToFQDN(q) if err != nil { @@ -174,14 +161,27 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { // single source of truth for MagicDNS names by // non-tailnet Kubernetes workloads. m.Authoritative = true - ips := n.lookupIP4(fqdn) - if len(ips) == 0 { + m.RecursionAvailable = false + + ips := n.lookupIP6(fqdn) + // Also check if we have IPv4 records to determine correct response code. + // If the name exists (has A records) but no AAAA records, we return NOERROR + // per RFC 4074. If the name doesn't exist at all, we return NXDOMAIN. + ip4s := n.lookupIP4(fqdn) + + if len(ips) == 0 && len(ip4s) == 0 { // As we are the authoritative nameserver for MagicDNS - // names, if we do not have a record for this MagicDNS + // names, if we do not have any record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } + + // Return IPv6 addresses if available + for _, ip := range ips { + rr := &dns.AAAA{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: defaultTTL}, AAAA: ip} + m.Answer = append(m.Answer, rr) + } m.SetRcode(r, dns.RcodeSuccess) default: log.Printf("[unexpected] nameserver received a query for an unsupported record type: %s", r.Question[0].String()) @@ -231,10 +231,11 @@ func (n *nameserver) resetRecords() error { log.Printf("error reading nameserver's configuration: %v", err) return err } - if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 { + if len(dnsCfgBytes) == 0 { log.Print("nameserver's configuration is empty, any in-memory records will be unset") n.mu.Lock() n.ip4 = make(map[dnsname.FQDN][]net.IP) + n.ip6 = make(map[dnsname.FQDN][]net.IP) n.mu.Unlock() return nil } @@ -249,30 +250,63 @@ func (n *nameserver) resetRecords() error { } ip4 := make(map[dnsname.FQDN][]net.IP) + ip6 := make(map[dnsname.FQDN][]net.IP) defer func() { n.mu.Lock() defer n.mu.Unlock() n.ip4 = ip4 + n.ip6 = ip6 }() - if len(dnsCfg.IP4) == 0 { + if len(dnsCfg.IP4) == 0 && len(dnsCfg.IP6) == 0 { log.Print("nameserver's configuration contains no records, any in-memory records will be unset") return nil } + // Process IPv4 records for fqdn, ips := range dnsCfg.IP4 { fqdn, err := dnsname.ToFQDN(fqdn) if err != nil { log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) continue // one invalid hostname should not break the whole nameserver } + var validIPs []net.IP for _, ipS := range ips { ip := net.ParseIP(ipS).To4() if ip == nil { // To4 returns nil if IP is not a IPv4 address log.Printf("invalid nameserver's configuration: %v does not appear to be an IPv4 address; skipping this record", ipS) continue // one invalid IP address should not break the whole nameserver } - ip4[fqdn] = []net.IP{ip} + validIPs = append(validIPs, ip) + } + if len(validIPs) > 0 { + ip4[fqdn] = validIPs + } + } + + // Process IPv6 records + for fqdn, ips := range dnsCfg.IP6 { + fqdn, err := dnsname.ToFQDN(fqdn) + if err != nil { + log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) + continue // one invalid hostname should not break the whole nameserver + } + var validIPs []net.IP + for _, ipS := range ips { + ip := net.ParseIP(ipS) + if ip == nil { + log.Printf("invalid nameserver's configuration: %v does not appear to be a valid IP address; skipping this record", ipS) + continue + } + // Check if it's a valid IPv6 address + if ip.To4() != nil { + log.Printf("invalid nameserver's configuration: %v appears to be IPv4 but was in IPv6 records; skipping this record", ipS) + continue + } + validIPs = append(validIPs, ip.To16()) + } + if len(validIPs) > 0 { + ip6[fqdn] = validIPs } } return nil @@ -372,8 +406,20 @@ func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP { if n.ip4 == nil { return nil } - n.mu.Lock() - defer n.mu.Unlock() + n.mu.RLock() + defer n.mu.RUnlock() f := n.ip4[fqdn] return f } + +// lookupIP6 returns any IPv6 addresses for the given FQDN from nameserver's +// in-memory records. +func (n *nameserver) lookupIP6(fqdn dnsname.FQDN) []net.IP { + if n.ip6 == nil { + return nil + } + n.mu.RLock() + defer n.mu.RUnlock() + f := n.ip6[fqdn] + return f +} diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go index d9a33c4faffe5..bca010048664a 100644 --- a/cmd/k8s-nameserver/main_test.go +++ b/cmd/k8s-nameserver/main_test.go @@ -19,6 +19,7 @@ func TestNameserver(t *testing.T) { tests := []struct { name string ip4 map[dnsname.FQDN][]net.IP + ip6 map[dnsname.FQDN][]net.IP query *dns.Msg wantResp *dns.Msg }{ @@ -112,6 +113,49 @@ func TestNameserver(t *testing.T) { Authoritative: true, }}, }, + { + name: "AAAA record query with IPv6 record", + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1, RecursionDesired: true}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "foo.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + RecursionAvailable: false, + RecursionDesired: true, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, + { + name: "Dual-stack: both A and AAAA records exist", + ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {{10, 0, 0, 1}}}, + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "dual.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, { name: "CNAME record query", ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {{1, 2, 3, 4}}}, @@ -133,6 +177,7 @@ func TestNameserver(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.ip4, + ip6: tt.ip6, } handler := ns.handleFunc() fakeRespW := &fakeResponseWriter{} @@ -149,43 +194,63 @@ func TestResetRecords(t *testing.T) { name string config []byte hasIp4 map[dnsname.FQDN][]net.IP + hasIp6 map[dnsname.FQDN][]net.IP wantsIp4 map[dnsname.FQDN][]net.IP + wantsIp6 map[dnsname.FQDN][]net.IP wantsErr bool }{ { name: "previously empty nameserver.ip4 gets set", config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "configuration with incompatible version", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1beta1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, + wantsIp6: nil, wantsErr: true, }, { name: "nameserver.ip4 gets reset to empty config when no configuration is provided", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset to empty config when the provided configuration is empty", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {}}`), wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), + }, + { + name: "nameserver.ip6 gets set", + config: []byte(`{"version": "v1alpha1", "ip6": {"foo.bar.com": ["2001:db8::1"]}}`), + wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {net.ParseIP("2001:db8::1")}}, + }, + { + name: "dual-stack configuration", + config: []byte(`{"version": "v1alpha1", "ip4": {"dual.bar.com": ["10.0.0.1"]}, "ip6": {"dual.bar.com": ["2001:db8::1"]}}`), + wantsIp4: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {{10, 0, 0, 1}}}, + wantsIp6: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {net.ParseIP("2001:db8::1")}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.hasIp4, + ip6: tt.hasIp6, configReader: func() ([]byte, error) { return tt.config, nil }, } if err := ns.resetRecords(); err == nil == tt.wantsErr { @@ -194,6 +259,9 @@ func TestResetRecords(t *testing.T) { if diff := cmp.Diff(ns.ip4, tt.wantsIp4); diff != "" { t.Fatalf("unexpected nameserver.ip4 contents (-got +want): \n%s", diff) } + if diff := cmp.Diff(ns.ip6, tt.wantsIp6); diff != "" { + t.Fatalf("unexpected nameserver.ip6 contents (-got +want): \n%s", diff) + } }) } } diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index bffad47f97191..b047e11a7e017 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -52,7 +52,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type: object required: - spec diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 766d7f0d647a9..8b3c206c8a093 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -390,7 +390,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. properties: apiVersion: description: |- diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index 54c1584c6731e..1a9395aa00aa9 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -40,10 +40,10 @@ const ( // dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS // records. // The records that it creates are: -// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP address of -// the ingress proxy Pod. +// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP addresses +// (both IPv4 and IPv6) of the ingress proxy Pod. // - For egress proxies configured via tailscale.com/tailnet-fqdn annotation, a -// mapping of the tailnet FQDN to the IP address of the egress proxy Pod. +// mapping of the tailnet FQDN to the IP addresses (both IPv4 and IPv6) of the egress proxy Pod. // // Records will only be created if there is exactly one ready // tailscale.com/v1alpha1.DNSConfig instance in the cluster (so that we know @@ -122,16 +122,16 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. // For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from // ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses // retrieved from the EndpointSlice associated with this Service, i.e -// Records{IP4: : <[IPs of the ingress proxy Pods]>} +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For egress, the record is a mapping between tailscale.com/tailnet-fqdn // annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice // associated with this Service, i.e -// Records{IP4: {: <[IPs of the egress proxy Pods]>} +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For ProxyGroup egress, the record is a mapping between tailscale.com/magic-dnsname -// annotation and the ClusterIP Service IP (which provides portmapping), i.e -// Records{IP4: {: <[ClusterIP Service IP]>} +// annotation and the ClusterIP Service IPs (which provides portmapping), i.e +// Records{IP4: {: <[IPv4 ClusterIPs]>}, IP6: {: <[IPv6 ClusterIPs]>}} // // If records need to be created for this proxy, maybeProvision will also: // - update the Service with a tailscale.com/magic-dnsname annotation @@ -178,17 +178,22 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, proxySvc } // Get the IP addresses for the DNS record - ips, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) + ip4s, ip6s, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) if err != nil { return fmt.Errorf("error getting target IPs: %w", err) } - if len(ips) == 0 { + if len(ip4s) == 0 && len(ip6s) == 0 { logger.Debugf("No target IP addresses available yet. We will reconcile again once they are available.") return nil } updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips) + if len(ip4s) > 0 { + mak.Set(&rec.IP4, fqdn, ip4s) + } + if len(ip6s) > 0 { + mak.Set(&rec.IP6, fqdn, ip6s) + } } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) @@ -212,42 +217,45 @@ func epIsReady(ep *discoveryv1.Endpoint) bool { // has been removed from the Service. If the record is not found in the // ConfigMap, the ConfigMap does not exist, or the Service does not have // tailscale.com/magic-dnsname annotation, just remove the finalizer. -func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { +func (dnsRR *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { ix := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if ix == -1 { logger.Debugf("no finalizer, nothing to do") return nil } cm := &corev1.ConfigMap{} - err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm) + err := dnsRR.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { logger.Debug("'dnsrecords' ConfigMap not found") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } if err != nil { return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err) } if cm.Data == nil { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } _, ok := cm.Data[operatorutils.DNSRecordsCMKey] if !ok { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } - fqdn, _ := proxySvc.GetAnnotations()[annotationTSMagicDNSName] + fqdn := proxySvc.GetAnnotations()[annotationTSMagicDNSName] if fqdn == "" { - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } logger.Infof("removing DNS record for MagicDNS name %s", fqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, fqdn) + if rec.IP6 != nil { + delete(rec.IP6, fqdn) + } } - if err = h.updateDNSConfig(ctx, updateFunc); err != nil { + if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS config: %w", err) } - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } func (dnsRR *dnsRecordsReconciler) removeProxySvcFinalizer(ctx context.Context, proxySvc *corev1.Service) error { @@ -383,72 +391,106 @@ func (dnsRR *dnsRecordsReconciler) parentSvcTargetsFQDN(ctx context.Context, svc return parentSvc.Annotations[AnnotationTailnetTargetFQDN] != "" } -// getTargetIPs returns the IP addresses that should be used for DNS records +// getTargetIPs returns the IPv4 and IPv6 addresses that should be used for DNS records // for the given proxy Service. -func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { if dnsRR.isProxyGroupEgressService(proxySvc) { return dnsRR.getClusterIPServiceIPs(proxySvc, logger) } return dnsRR.getPodIPs(ctx, proxySvc, logger) } -// getClusterIPServiceIPs returns the ClusterIP of a ProxyGroup egress Service. -func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +// getClusterIPServiceIPs returns the ClusterIPs of a ProxyGroup egress Service. +// It separates IPv4 and IPv6 addresses for dual-stack services. +func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { + // Handle services with no ClusterIP if proxySvc.Spec.ClusterIP == "" || proxySvc.Spec.ClusterIP == "None" { logger.Debugf("ProxyGroup egress ClusterIP Service does not have a ClusterIP yet.") - return nil, nil + return nil, nil, nil + } + + var ip4s, ip6s []string + + // Check all ClusterIPs for dual-stack support + clusterIPs := proxySvc.Spec.ClusterIPs + if len(clusterIPs) == 0 && proxySvc.Spec.ClusterIP != "" { + // Fallback to single ClusterIP for backward compatibility + clusterIPs = []string{proxySvc.Spec.ClusterIP} } - // Validate that ClusterIP is a valid IPv4 address - if !net.IsIPv4String(proxySvc.Spec.ClusterIP) { - logger.Debugf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) - return nil, fmt.Errorf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + + for _, ip := range clusterIPs { + if net.IsIPv4String(ip) { + ip4s = append(ip4s, ip) + logger.Debugf("Using IPv4 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else if net.IsIPv6String(ip) { + ip6s = append(ip6s, ip) + logger.Debugf("Using IPv6 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else { + logger.Debugf("ClusterIP %s is not a valid IP address", ip) + } } - logger.Debugf("Using ClusterIP Service IP %s for ProxyGroup egress DNS record", proxySvc.Spec.ClusterIP) - return []string{proxySvc.Spec.ClusterIP}, nil + + if len(ip4s) == 0 && len(ip6s) == 0 { + return nil, nil, fmt.Errorf("no valid ClusterIPs found") + } + + return ip4s, ip6s, nil } -// getPodIPs returns Pod IP addresses from EndpointSlices for non-ProxyGroup Services. -func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +// getPodIPs returns Pod IPv4 and IPv6 addresses from EndpointSlices for non-ProxyGroup Services. +func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { // Get the Pod IP addresses for the proxy from the EndpointSlices for // the headless Service. The Service can have multiple EndpointSlices // associated with it, for example in dual-stack clusters. labels := map[string]string{discoveryv1.LabelServiceName: proxySvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership var eps = new(discoveryv1.EndpointSliceList) if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { - return nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) + return nil, nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) } if len(eps.Items) == 0 { logger.Debugf("proxy's Service EndpointSlice does not yet exist.") - return nil, nil + return nil, nil, nil } // Each EndpointSlice for a Service can have a list of endpoints that each // can have multiple addresses - these are the IP addresses of any Pods - // selected by that Service. Pick all the IPv4 addresses. + // selected by that Service. Separate IPv4 and IPv6 addresses. // It is also possible that multiple EndpointSlices have overlapping addresses. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints - ips := make(set.Set[string], 0) + ip4s := make(set.Set[string], 0) + ip6s := make(set.Set[string], 0) for _, slice := range eps.Items { - if slice.AddressType != discoveryv1.AddressTypeIPv4 { - logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) - continue - } for _, ep := range slice.Endpoints { if !epIsReady(&ep) { logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) continue } for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips.Add(ip) + switch slice.AddressType { + case discoveryv1.AddressTypeIPv4: + if net.IsIPv4String(ip) { + ip4s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv4 contains non-IPv4 address %q, ignoring", ip) + } + case discoveryv1.AddressTypeIPv6: + if net.IsIPv6String(ip) { + // Strip zone ID if present (e.g., fe80::1%eth0 -> fe80::1) + if idx := strings.IndexByte(ip, '%'); idx != -1 { + ip = ip[:idx] + } + ip6s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv6 contains non-IPv6 address %q, ignoring", ip) + } + default: + logger.Debugf("EndpointSlice is for unsupported AddressType %s, skipping", slice.AddressType) } } } } - if ips.Len() == 0 { - logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses.") - return nil, nil + if ip4s.Len() == 0 && ip6s.Len() == 0 { + logger.Debugf("EndpointSlice for the Service contains no IP addresses.") + return nil, nil, nil } - return ips.Slice(), nil + return ip4s.Slice(), ip6s.Slice(), nil } diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 51dfb90497ff7..13898078fd4ba 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -99,8 +99,9 @@ func TestDNSRecordsReconciler(t *testing.T) { mustCreate(t, fc, epv6) expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service // ConfigMap should now have a record for foo.bar.ts.net -> 10.8.8.7 - wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} // IPv6 endpoint is currently ignored - expectHostsRecords(t, fc, wantHosts) + wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} + wantHostsIPv6 := map[string][]string{"foo.bar.ts.net": {"2600:1900:4011:161:0:d:0:d"}} + expectHostsRecordsWithIPv6(t, fc, wantHosts, wantHostsIPv6) // 2. DNS record is updated if tailscale.com/tailnet-fqdn annotation's // value changes @@ -271,17 +272,148 @@ func TestDNSRecordsReconcilerErrorCases(t *testing.T) { // Test invalid IP format testSvc.Spec.ClusterIP = "invalid-ip" - _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + _, _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) if err == nil { t.Error("expected error for invalid IP format") } // Test valid IP testSvc.Spec.ClusterIP = "10.0.100.50" - _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + ip4s, ip6s, err := dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) if err != nil { t.Errorf("unexpected error for valid IP: %v", err) } + if len(ip4s) != 1 || ip4s[0] != "10.0.100.50" { + t.Errorf("expected IPv4 address 10.0.100.50, got %v", ip4s) + } + if len(ip6s) != 0 { + t.Errorf("expected no IPv6 addresses, got %v", ip6s) + } +} + +func TestDNSRecordsReconcilerDualStack(t *testing.T) { + // Test dual-stack (IPv4 and IPv6) scenarios + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + // Preconfigure cluster with DNSConfig + dnsCfg := &tsapi.DNSConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + TypeMeta: metav1.TypeMeta{Kind: "DNSConfig"}, + Spec: tsapi.DNSConfigSpec{Nameserver: &tsapi.Nameserver{}}, + } + dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + }) + + // Create dual-stack ingress + ing := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dual-stack-ingress", + Namespace: "test", + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + }, + Status: networkingv1.IngressStatus{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {Hostname: "dual-stack.example.ts.net"}, + }, + }, + }, + } + + headlessSvc := headlessSvcForParent(ing, "ingress") + headlessSvc.Name = "ts-dual-stack-ingress" + headlessSvc.SetLabels(map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "dual-stack-ingress", + LabelParentNamespace: "test", + LabelParentType: "ingress", + }) + + // Create both IPv4 and IPv6 endpoints + epv4 := endpointSliceForService(headlessSvc, "10.1.2.3", discoveryv1.AddressTypeIPv4) + epv6 := endpointSliceForService(headlessSvc, "2001:db8::1", discoveryv1.AddressTypeIPv6) + + dnsRRDualStack := &dnsRecordsReconciler{ + tsNamespace: "tailscale", + logger: zl.Sugar(), + } + + // Create the dnsrecords ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorutils.DNSRecordsCMName, + Namespace: "tailscale", + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(dnsCfg, ing, headlessSvc, epv4, epv6, cm). + WithStatusSubresource(dnsCfg). + Build() + + dnsRRDualStack.Client = fc + + // Test dual-stack service records + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-dual-stack-ingress") + + wantIPv4 := map[string][]string{"dual-stack.example.ts.net": {"10.1.2.3"}} + wantIPv6 := map[string][]string{"dual-stack.example.ts.net": {"2001:db8::1"}} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) + + // Test ProxyGroup with dual-stack ClusterIPs + // First create parent service + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg-service", + Namespace: "tailscale", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + + proxyGroupSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-dualstack", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + labelProxyGroup: "test-pg", + labelSvcType: typeEgress, + LabelParentName: "pg-service", + LabelParentNamespace: "tailscale", + LabelParentType: "svc", + }, + Annotations: map[string]string{ + annotationTSMagicDNSName: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.96.0.100", + ClusterIPs: []string{"10.96.0.100", "2001:db8::100"}, + }, + } + + mustCreate(t, fc, parentEgressSvc) + mustCreate(t, fc, proxyGroupSvc) + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-proxygroup-dualstack") + + wantIPv4["pg-service.example.ts.net"] = []string{"10.96.0.100"} + wantIPv6["pg-service.example.ts.net"] = []string{"2001:db8::100"} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { @@ -344,3 +476,28 @@ func expectHostsRecords(t *testing.T, cl client.Client, wantsHosts map[string][] t.Fatalf("unexpected dns config (-got +want):\n%s", diff) } } + +func expectHostsRecordsWithIPv6(t *testing.T, cl client.Client, wantsHostsIPv4, wantsHostsIPv6 map[string][]string) { + t.Helper() + cm := new(corev1.ConfigMap) + if err := cl.Get(context.Background(), types.NamespacedName{Name: "dnsrecords", Namespace: "tailscale"}, cm); err != nil { + t.Fatalf("getting dnsconfig ConfigMap: %v", err) + } + if cm.Data == nil { + t.Fatal("dnsconfig ConfigMap has no data") + } + dnsConfigString, ok := cm.Data[operatorutils.DNSRecordsCMKey] + if !ok { + t.Fatal("dnsconfig ConfigMap does not contain dnsconfig") + } + dnsConfig := &operatorutils.Records{} + if err := json.Unmarshal([]byte(dnsConfigString), dnsConfig); err != nil { + t.Fatalf("unmarshaling dnsconfig: %v", err) + } + if diff := cmp.Diff(dnsConfig.IP4, wantsHostsIPv4); diff != "" { + t.Fatalf("unexpected IPv4 dns config (-got +want):\n%s", diff) + } + if diff := cmp.Diff(dnsConfig.IP6, wantsHostsIPv6); diff != "" { + t.Fatalf("unexpected IPv6 dns config (-got +want):\n%s", diff) + } +} diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 79c8469e11bbc..180231bfaf4a0 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -213,7 +213,6 @@ NB: if you want cluster workloads to be able to refer to Tailscale Ingress using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. -NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0e26ee6476d7a..0b0f1eb5ca137 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -45,7 +45,6 @@ var DNSConfigKind = "DNSConfig" // using its MagicDNS name, you must also annotate the Ingress resource with // tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to // ensure that the proxy created for the Ingress listens on its Pod IP address. -// NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type DNSConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 420d7e49c7ec2..2acbf338dbdd3 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -27,6 +27,11 @@ type Records struct { Version string `json:"version"` // IP4 contains a mapping of DNS names to IPv4 address(es). IP4 map[string][]string `json:"ip4"` + // IP6 contains a mapping of DNS names to IPv6 address(es). + // This field is optional and will be omitted from JSON if empty. + // It enables dual-stack DNS support in Kubernetes clusters. + // +optional + IP6 map[string][]string `json:"ip6,omitempty"` } // TailscaledConfigFileName returns a tailscaled config file name in From a40f23ad4a851d20abb6d339db3b82b8c6567a26 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 09:39:27 -0700 Subject: [PATCH 0415/1093] util/eventbus: flesh out docs a bit Updates #cleanup Change-Id: Ia6b0e4b0426be1dd10a777aff0a81d4dd6b69b01 Signed-off-by: Brad Fitzpatrick --- util/eventbus/bus.go | 2 +- util/eventbus/client.go | 2 +- util/eventbus/publish.go | 4 ++++ util/eventbus/subscribe.go | 6 +++++- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index e5bf7329a67ee..d1507d8e67587 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -77,7 +77,7 @@ func (b *Bus) Debugger() *Debugger { return &Debugger{b} } -// Close closes the bus. Implicitly closes all clients, publishers and +// Close closes the bus. It implicitly closes all clients, publishers and // subscribers attached to the bus. // // Close blocks until the bus is fully shut down. The bus is diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 9b4119865ebb9..7c02688860861 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -29,7 +29,7 @@ type Client struct { func (c *Client) Name() string { return c.name } -// Close closes the client. Implicitly closes all publishers and +// Close closes the client. It implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { var ( diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 4a4bdfb7eda11..348bb9dff950c 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -27,6 +27,10 @@ func newPublisher[T any](c *Client) *Publisher[T] { // Close closes the publisher. // // Calls to Publish after Close silently do nothing. +// +// If the Bus or Client from which the Publisher was created is closed, +// the Publisher is implicitly closed and does not need to be closed +// separately. func (p *Publisher[T]) Close() { // Just unblocks any active calls to Publish, no other // synchronization needed. diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ee534781a2cce..ef155e621ae1a 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -158,7 +158,7 @@ func (q *subscribeState) subscriberFor(val any) subscriber { return q.outputs[reflect.TypeOf(val)] } -// Close closes the subscribeState. Implicitly closes all Subscribers +// Close closes the subscribeState. It implicitly closes all Subscribers // linked to this state, and any pending events are discarded. func (s *subscribeState) close() { s.dispatcher.StopAndWait() @@ -244,6 +244,10 @@ func (s *Subscriber[T]) Done() <-chan struct{} { // Close closes the Subscriber, indicating the caller no longer wishes // to receive this event type. After Close, receives on // [Subscriber.Events] block for ever. +// +// If the Bus from which the Subscriber was created is closed, +// the Subscriber is implicitly closed and does not need to be closed +// separately. func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers s.unregister() From c49ed5dd5a1ec27aa04ee87731f3e69f7b7c77fe Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 25 Sep 2025 11:54:41 -0700 Subject: [PATCH 0416/1093] feature/tpm: implement key.HardwareAttestationKey (#17256) Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 264 ++++++++++++++++++++++++++++++++ feature/tpm/attestation_test.go | 98 ++++++++++++ feature/tpm/tpm.go | 5 + 3 files changed, 367 insertions(+) create mode 100644 feature/tpm/attestation.go create mode 100644 feature/tpm/attestation_test.go diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go new file mode 100644 index 0000000000000..4b3018569b426 --- /dev/null +++ b/feature/tpm/attestation.go @@ -0,0 +1,264 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + + "github.com/google/go-tpm/tpm2" + "github.com/google/go-tpm/tpm2/transport" + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + "tailscale.com/types/key" +) + +type attestationKey struct { + tpm transport.TPMCloser + // private and public parts of the TPM key as returned from tpm2.Create. + // These are used for serialization. + tpmPrivate tpm2.TPM2BPrivate + tpmPublic tpm2.TPM2BPublic + // handle of the loaded TPM key. + handle *tpm2.NamedHandle + // pub is the parsed *ecdsa.PublicKey. + pub crypto.PublicKey +} + +func newAttestationKey() (ak *attestationKey, retErr error) { + tpm, err := open() + if err != nil { + return nil, key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak = &attestationKey{tpm: tpm} + + // Create a key under the storage hierarchy. + if err := withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Create{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPublic: tpm2.New2B( + tpm2.TPMTPublic{ + Type: tpm2.TPMAlgECC, + NameAlg: tpm2.TPMAlgSHA256, + ObjectAttributes: tpm2.TPMAObject{ + SensitiveDataOrigin: true, + UserWithAuth: true, + AdminWithPolicy: true, + NoDA: true, + FixedTPM: true, + FixedParent: true, + SignEncrypt: true, + }, + Parameters: tpm2.NewTPMUPublicParms( + tpm2.TPMAlgECC, + &tpm2.TPMSECCParms{ + CurveID: tpm2.TPMECCNistP256, + Scheme: tpm2.TPMTECCScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUAsymScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSigSchemeECDSA{ + // Unfortunately, TPMs don't let us use + // TPMAlgNull here to make the hash + // algorithm dynamic higher in the + // stack. We have to hardcode it here. + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + }, + ), + }, + ), + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Create: %w", err) + } + ak.tpmPrivate = resp.OutPrivate + ak.tpmPublic = resp.OutPublic + return nil + }); err != nil { + return nil, err + } + return ak, ak.load() +} + +func (ak *attestationKey) loaded() bool { + return ak.tpm != nil && ak.handle != nil && ak.pub != nil +} + +// load the key into the TPM from its public/private components. Must be called +// before Sign or Public. +func (ak *attestationKey) load() error { + if ak.loaded() { + return nil + } + if len(ak.tpmPrivate.Buffer) == 0 || len(ak.tpmPublic.Bytes()) == 0 { + return fmt.Errorf("attestationKey.load called without tpmPrivate or tpmPublic") + } + return withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Load{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPrivate: ak.tpmPrivate, + InPublic: ak.tpmPublic, + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Load: %w", err) + } + + ak.handle = &tpm2.NamedHandle{ + Handle: resp.ObjectHandle, + Name: resp.Name, + } + pub, err := ak.tpmPublic.Contents() + if err != nil { + return err + } + ak.pub, err = tpm2.Pub(*pub) + return err + }) +} + +// attestationKeySerialized is the JSON-serialized representation of +// attestationKey. +type attestationKeySerialized struct { + TPMPrivate []byte `json:"tpmPrivate"` + TPMPublic []byte `json:"tpmPublic"` +} + +func (ak *attestationKey) MarshalJSON() ([]byte, error) { + return json.Marshal(attestationKeySerialized{ + TPMPublic: ak.tpmPublic.Bytes(), + TPMPrivate: ak.tpmPrivate.Buffer, + }) +} + +func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { + var aks attestationKeySerialized + if err := json.Unmarshal(data, &aks); err != nil { + return err + } + + ak.tpmPrivate = tpm2.TPM2BPrivate{Buffer: aks.TPMPrivate} + ak.tpmPublic = tpm2.BytesAs2B[tpm2.TPMTPublic, *tpm2.TPMTPublic](aks.TPMPublic) + + tpm, err := open() + if err != nil { + return key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak.tpm = tpm + + return ak.load() +} + +func (ak *attestationKey) Public() crypto.PublicKey { + return ak.pub +} + +func (ak *attestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if !ak.loaded() { + return nil, errors.New("tpm2 attestation key is not loaded during Sign") + } + // Unfortunately, TPMs don't let us make keys with dynamic hash algorithms. + // The hash algorithm is fixed at key creation time (tpm2.Create). + if opts != crypto.SHA256 { + return nil, fmt.Errorf("tpm2 key is restricted to SHA256, have %q", opts) + } + resp, err := tpm2.Sign{ + KeyHandle: ak.handle, + Digest: tpm2.TPM2BDigest{ + Buffer: digest, + }, + InScheme: tpm2.TPMTSigScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUSigScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSchemeHash{ + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + Validation: tpm2.TPMTTKHashCheck{ + Tag: tpm2.TPMSTHashCheck, + }, + }.Execute(ak.tpm) + if err != nil { + return nil, fmt.Errorf("tpm2.Sign: %w", err) + } + sig, err := resp.Signature.Signature.ECDSA() + if err != nil { + return nil, err + } + return encodeSignature(sig.SignatureR.Buffer, sig.SignatureS.Buffer) +} + +// Copied from crypto/ecdsa. +func encodeSignature(r, s []byte) ([]byte, error) { + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + addASN1IntBytes(b, r) + addASN1IntBytes(b, s) + }) + return b.Bytes() +} + +// addASN1IntBytes encodes in ASN.1 a positive integer represented as +// a big-endian byte slice with zero or more leading zeroes. +func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) { + for len(bytes) > 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + b.SetError(errors.New("invalid integer")) + return + } + b.AddASN1(asn1.INTEGER, func(c *cryptobyte.Builder) { + if bytes[0]&0x80 != 0 { + c.AddUint8(0) + } + c.AddBytes(bytes) + }) +} + +func (ak *attestationKey) Close() error { + var errs []error + if ak.handle != nil && ak.tpm != nil { + _, err := tpm2.FlushContext{FlushHandle: ak.handle.Handle}.Execute(ak.tpm) + errs = append(errs, err) + } + if ak.tpm != nil { + errs = append(errs, ak.tpm.Close()) + } + return errors.Join(errs...) +} + +func (ak *attestationKey) Clone() key.HardwareAttestationKey { + return &attestationKey{ + tpm: ak.tpm, + tpmPrivate: ak.tpmPrivate, + tpmPublic: ak.tpmPublic, + handle: ak.handle, + pub: ak.pub, + } +} diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go new file mode 100644 index 0000000000000..ead88c955aeea --- /dev/null +++ b/feature/tpm/attestation_test.go @@ -0,0 +1,98 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "encoding/json" + "testing" +) + +func TestAttestationKeySign(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + + // Create a different key. + ak2, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + // Make sure that the keys are distinct via their public keys and the + // signatures they produce. + if ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Errorf("public keys of distinct attestation keys are the same") + } + sig2, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if bytes.Equal(sig, sig2) { + t.Errorf("signatures from distinct attestation keys are the same") + } +} + +func TestAttestationKeyUnmarshal(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + buf, err := ak.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var ak2 attestationKey + if err := json.Unmarshal(buf, &ak2); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + if !ak2.loaded() { + t.Error("unmarshalled key is not loaded") + } + + if !ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Error("unmarshalled public key is not the same as the original public key") + } +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 0260cca586e13..0192247388330 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -28,6 +28,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/paths" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" ) @@ -39,6 +40,10 @@ func init() { hi.TPM = infoOnce() }) store.Register(store.TPMPrefix, newStore) + key.RegisterHardwareAttestationKeyFns( + func() key.HardwareAttestationKey { return &attestationKey{} }, + func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, + ) } func info() *tailcfg.TPMInfo { From bbc5107d7d68ec0a736a568a1d4229c08c4c8202 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 25 Sep 2025 14:07:09 -0500 Subject: [PATCH 0417/1093] ipn/ipnlocal: do not reset extHost on (*LocalBackend).Shutdown We made changes to ipnext callback registration/unregistration/invocation in #15780 that made resetting b.exthost to a nil, no-op host in (*LocalBackend).Shutdown() unnecessary. But resetting it is also racy: b.exthost must be safe for concurrent use with or without b.mu held, so it shouldn't be written after NewLocalBackend returns. This PR removes it. Fixes #17279 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ef8fcab40d0e6..b36f54705c18b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1118,8 +1118,6 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } - extHost := b.extHost - b.extHost = nil b.mu.Unlock() b.webClientShutdown() @@ -1136,7 +1134,7 @@ func (b *LocalBackend) Shutdown() { } b.ctxCancel(errShutdown) b.currentNode().shutdown(errShutdown) - extHost.Shutdown() + b.extHost.Shutdown() b.e.Close() <-b.e.Done() b.awaitNoGoroutinesInTest() From 45d635cc98d1ef89eb3bd2a79b2c21d9c0968198 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 24 Sep 2025 15:12:24 -0700 Subject: [PATCH 0418/1093] feature/portlist: pull portlist service porting into extension, use eventbus And yay: tsnet (and thus k8s-operator etc) no longer depends on portlist! And LocalBackend is smaller. Removes 50 KB from the minimal binary. Updates #12614 Change-Id: Iee04057053dc39305303e8bd1d9599db8368d926 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 5 +- cmd/tailscaled/depaware.txt | 5 +- cmd/tailscaled/deps_test.go | 13 ++ cmd/tsidp/depaware.txt | 5 +- .../feature_portlist_disabled.go | 13 ++ .../buildfeatures/feature_portlist_enabled.go | 13 ++ feature/condregister/maybe_portlist.go | 8 + feature/featuretags/featuretags.go | 1 + feature/portlist/portlist.go | 157 ++++++++++++++++++ feature/taildrop/ext.go | 1 + ipn/ipnext/ipnext.go | 9 + ipn/ipnlocal/local.go | 131 +++++---------- ipn/ipnlocal/local_test.go | 1 - ipn/ipnlocal/node_backend.go | 6 + ipn/ipnlocal/state_test.go | 1 - ipn/lapitest/backend.go | 1 - tsnet/depaware.txt | 5 +- tsnet/tsnet_test.go | 13 ++ 18 files changed, 277 insertions(+), 111 deletions(-) create mode 100644 feature/buildfeatures/feature_portlist_disabled.go create mode 100644 feature/buildfeatures/feature_portlist_enabled.go create mode 100644 feature/condregister/maybe_portlist.go create mode 100644 feature/portlist/portlist.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2281d38195309..ea0e08b191bab 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -813,7 +813,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ @@ -861,7 +860,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/net/connstats+ @@ -885,7 +883,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ @@ -931,7 +928,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ tailscale.com/util/execqueue from tailscale.com/appc+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 70be690ee9a20..acd8e0459c0f5 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -278,6 +278,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister + tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ @@ -299,7 +300,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/policy from tailscale.com/feature/portlist tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store @@ -360,7 +361,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/portlist from tailscale.com/feature/portlist tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 35975b57ce58f..24a39312433bd 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -185,3 +185,16 @@ func TestOmitDBus(t *testing.T) { }, }.Check(t) } + +func TestOmitPortlist(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portlist,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 4fd7c8020abb7..69904c9761f69 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -255,7 +255,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store @@ -292,7 +291,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ @@ -316,7 +314,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ @@ -361,7 +358,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ tailscale.com/util/execqueue from tailscale.com/appc+ diff --git a/feature/buildfeatures/feature_portlist_disabled.go b/feature/buildfeatures/feature_portlist_disabled.go new file mode 100644 index 0000000000000..934061fd8328f --- /dev/null +++ b/feature/buildfeatures/feature_portlist_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = false diff --git a/feature/buildfeatures/feature_portlist_enabled.go b/feature/buildfeatures/feature_portlist_enabled.go new file mode 100644 index 0000000000000..c1dc1c163b80e --- /dev/null +++ b/feature/buildfeatures/feature_portlist_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = true diff --git a/feature/condregister/maybe_portlist.go b/feature/condregister/maybe_portlist.go new file mode 100644 index 0000000000000..1be56f177daf8 --- /dev/null +++ b/feature/condregister/maybe_portlist.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portlist + +package condregister + +import _ "tailscale.com/feature/portlist" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 6f8c4ac170a3d..d1752a80ca8ae 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -114,6 +114,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Outbound localhost HTTP/SOCK5 proxy support", Deps: []FeatureTag{"netstack"}, }, + "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, "networkmanager": { diff --git a/feature/portlist/portlist.go b/feature/portlist/portlist.go new file mode 100644 index 0000000000000..7d69796ffd5d2 --- /dev/null +++ b/feature/portlist/portlist.go @@ -0,0 +1,157 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portlist contains code to poll the local system for open ports +// and report them to the control plane, if enabled on the tailnet. +package portlist + +import ( + "context" + "sync/atomic" + + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/policy" + "tailscale.com/portlist" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/version" +) + +func init() { + ipnext.RegisterExtension("portlist", newExtension) +} + +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + busClient := sb.Sys().Bus.Get().Client("portlist") + e := &Extension{ + sb: sb, + busClient: busClient, + logf: logger.WithPrefix(logf, "portlist: "), + pub: eventbus.Publish[ipnlocal.PortlistServices](busClient), + pollerDone: make(chan struct{}), + wakePoller: make(chan struct{}), + } + e.ctx, e.ctxCancel = context.WithCancel(context.Background()) + return e, nil +} + +// Extension implements the portlist extension. +type Extension struct { + ctx context.Context + ctxCancel context.CancelFunc + pollerDone chan struct{} // close-only chan when poller goroutine exits + wakePoller chan struct{} // best effort chan to wake poller from sleep + busClient *eventbus.Client + pub *eventbus.Publisher[ipnlocal.PortlistServices] + logf logger.Logf + sb ipnext.SafeBackend + host ipnext.Host // from Init + + shieldsUp atomic.Bool + shouldUploadServicesAtomic atomic.Bool +} + +func (e *Extension) Name() string { return "portlist" } +func (e *Extension) Shutdown() error { + e.ctxCancel() + e.busClient.Close() + <-e.pollerDone + return nil +} + +func (e *Extension) Init(h ipnext.Host) error { + if !envknob.BoolDefaultTrue("TS_PORTLIST") { + return ipnext.SkipExtension + } + + e.host = h + h.Hooks().ShouldUploadServices.Set(e.shouldUploadServicesAtomic.Load) + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().OnSelfChange.Add(e.onSelfChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/taildrop/ext.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + go e.runPollLoop() + return nil +} + +func (e *Extension) onSelfChange(tailcfg.NodeView) { + e.updateShouldUploadServices() +} + +func (e *Extension) onChangeProfile(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.shieldsUp.Store(prefs.ShieldsUp()) + e.updateShouldUploadServices() +} + +func (e *Extension) updateShouldUploadServices() { + v := !e.shieldsUp.Load() && e.host.NodeBackend().CollectServices() + if e.shouldUploadServicesAtomic.CompareAndSwap(!v, v) && v { + // Upon transition from false to true (enabling service reporting), try + // to wake the poller to do an immediate poll if it's sleeping. + // It's not a big deal if we miss waking it. It'll get to it soon enough. + select { + case e.wakePoller <- struct{}{}: + default: + } + } +} + +// runPollLoop is a goroutine that periodically checks the open +// ports and publishes them if they've changed. +func (e *Extension) runPollLoop() { + defer close(e.pollerDone) + + var poller portlist.Poller + + ticker, tickerChannel := e.sb.Clock().NewTicker(portlist.PollInterval()) + defer ticker.Stop() + for { + select { + case <-tickerChannel: + case <-e.wakePoller: + case <-e.ctx.Done(): + return + } + + if !e.shouldUploadServicesAtomic.Load() { + continue + } + + ports, changed, err := poller.Poll() + if err != nil { + e.logf("Poll: %v", err) + // TODO: this is kinda weird that we just return here and never try + // again. Maybe that was because all errors are assumed to be + // permission errors and thus permanent? Audit varioys OS + // implementation and check error types, and then make this check + // for permanent vs temporary errors and keep looping with a backoff + // for temporary errors? But for now we just give up, like we always + // have. + return + } + if !changed { + continue + } + sl := []tailcfg.Service{} + for _, p := range ports { + s := tailcfg.Service{ + Proto: tailcfg.ServiceProto(p.Proto), + Port: p.Port, + Description: p.Process, + } + if policy.IsInterestingService(s, version.OS()) { + sl = append(sl, s) + } + } + e.pub.Publish(ipnlocal.PortlistServices(sl)) + } +} diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index f8f45b53fae26..6bdb375ccfe63 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -105,6 +105,7 @@ func (e *Extension) Init(h ipnext.Host) error { // TODO(nickkhyl): remove this after the profileManager refactoring. // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. profile, prefs := h.Profiles().CurrentProfileState() e.onChangeProfile(profile, prefs, false) return nil diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 066763ba4d2fa..4ff37dc8e3775 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -372,6 +372,10 @@ type Hooks struct { // SetPeerStatus is called to mutate PeerStatus. // Callers must only use NodeBackend to read data. SetPeerStatus feature.Hooks[func(*ipnstate.PeerStatus, tailcfg.NodeView, NodeBackend)] + + // ShouldUploadServices reports whether this node should include services + // in Hostinfo from the portlist extension. + ShouldUploadServices feature.Hook[func() bool] } // NodeBackend is an interface to query the current node and its peers. @@ -398,4 +402,9 @@ type NodeBackend interface { // It effectively just reports whether PeerAPIBase(node) is non-empty, but // potentially more efficiently. PeerHasPeerAPI(tailcfg.NodeView) bool + + // CollectServices reports whether the control plane is telling this + // node that the portlist service collection is desirable, should it + // choose to report them. + CollectServices() bool } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b36f54705c18b..62a3a213178b7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -61,7 +61,6 @@ import ( "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" - "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" "tailscale.com/net/dns" @@ -77,7 +76,6 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" - "tailscale.com/portlist" "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -211,12 +209,10 @@ type LocalBackend struct { pushDeviceToken syncs.AtomicValue[string] backendLogID logid.PublicID unregisterSysPolicyWatch func() - portpoll *portlist.Poller // may be nil - portpollOnce sync.Once // guards starting readPoller - varRoot string // or empty if SetVarRoot never called - logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend - sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend + varRoot string // or empty if SetVarRoot never called + logFlushFunc func() // or nil if SetLogFlusher wasn't called + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend @@ -522,7 +518,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo pm: pm, backendLogID: logID, state: ipn.NoState, - portpoll: new(portlist.Poller), em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, @@ -619,6 +614,12 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus healthChangeSub := eventbus.Subscribe[health.Change](ec) changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + var portlist <-chan PortlistServices + if buildfeatures.HasPortList { + portlistSub := eventbus.Subscribe[PortlistServices](ec) + portlist = portlistSub.Events() + } + return func(ec *eventbus.Client) { for { select { @@ -632,6 +633,10 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus b.onHealthChange(change) case changeDelta := <-changeDeltaSub.Events(): b.linkChange(&changeDelta) + case pl := <-portlist: + if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + b.setPortlistServices(pl) + } } } } @@ -2300,15 +2305,6 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// DisablePortPollerForTest disables the port list poller for tests. -// It must be called before Start. -func (b *LocalBackend) DisablePortPollerForTest() { - testenv.AssertInTest() - b.mu.Lock() - defer b.mu.Unlock() - b.portpoll = nil -} - // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { @@ -2457,12 +2453,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { persistv = new(persist.Persist) } - if b.portpoll != nil { - b.portpollOnce.Do(func() { - b.goTracker.Go(b.readPoller) - }) - } - discoPublic := b.MagicConn().DiscoPublicKey() var err error @@ -2906,57 +2896,6 @@ func shrinkDefaultRoute(route netip.Prefix, localInterfaceRoutes *netipx.IPSet, return b.IPSet() } -// readPoller is a goroutine that receives service lists from -// b.portpoll and propagates them into the controlclient's HostInfo. -func (b *LocalBackend) readPoller() { - if !envknob.BoolDefaultTrue("TS_PORTLIST") { - return - } - - ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval()) - defer ticker.Stop() - for { - select { - case <-tickerChannel: - case <-b.ctx.Done(): - return - } - - if !b.shouldUploadServices() { - continue - } - - ports, changed, err := b.portpoll.Poll() - if err != nil { - b.logf("error polling for open ports: %v", err) - return - } - if !changed { - continue - } - sl := []tailcfg.Service{} - for _, p := range ports { - s := tailcfg.Service{ - Proto: tailcfg.ServiceProto(p.Proto), - Port: p.Port, - Description: p.Process, - } - if policy.IsInterestingService(s, version.OS()) { - sl = append(sl, s) - } - } - - b.mu.Lock() - if b.hostinfo == nil { - b.hostinfo = new(tailcfg.Hostinfo) - } - b.hostinfo.Services = sl - b.mu.Unlock() - - b.doSetHostinfoFilterServices() - } -} - // GetPushDeviceToken returns the push notification device token. func (b *LocalBackend) GetPushDeviceToken() string { return b.pushDeviceToken.Load() @@ -3853,23 +3792,6 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt return ret } -// shouldUploadServices reports whether this node should include services -// in Hostinfo. When the user preferences currently request "shields up" -// mode, all inbound connections are refused, so services are not reported. -// Otherwise, shouldUploadServices respects NetMap.CollectServices. -// TODO(nickkhyl): move this into [nodeBackend]? -func (b *LocalBackend) shouldUploadServices() bool { - b.mu.Lock() - defer b.mu.Unlock() - - p := b.pm.CurrentPrefs() - nm := b.currentNode().NetMap() - if !p.Valid() || nm == nil { - return false // default to safest setting - } - return !p.ShieldsUp() && nm.CollectServices -} - // SetCurrentUser is used to implement support for multi-user systems (only // Windows 2022-11-25). On such systems, the actor is used to determine which // user's state should be used. The current user is maintained by active @@ -4812,6 +4734,25 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { return ret } +// PortlistServices is an eventbus topic for the portlist extension +// to advertise the running services on the host. +type PortlistServices []tailcfg.Service + +func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { + if !buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + return + } + + b.mu.Lock() + if b.hostinfo == nil { + b.hostinfo = new(tailcfg.Hostinfo) + } + b.hostinfo.Services = sl + b.mu.Unlock() + + b.doSetHostinfoFilterServices() +} + // doSetHostinfoFilterServices calls SetHostinfo on the controlclient, // possibly after mangling the given hostinfo. // @@ -4837,13 +4778,15 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. hi := *b.hostinfo // shallow copy - unlock.UnlockEarly() // Make a shallow copy of hostinfo so we can mutate // at the Service field. - if !b.shouldUploadServices() { + if f, ok := b.extHost.Hooks().ShouldUploadServices.GetOk(); !ok || !f() { hi.Services = []tailcfg.Service{} } + + unlock.UnlockEarly() + // Don't mutate hi.Service's underlying array. Append to // the slice with no free capacity. c := len(hi.Services) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 56d65767b4f66..fd78c341877c3 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5816,7 +5816,6 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { return newControl(t, opts), nil diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 4319ed372222f..a6e4b51f1bad5 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -258,6 +258,12 @@ func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { return ret } +func (nb *nodeBackend) CollectServices() bool { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.netMap != nil && nb.netMap.CollectServices +} + // AppendMatchingPeers returns base with all peers that match pred appended. // // It acquires b.mu to read the netmap but releases it before calling pred. diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 1a32f31562f41..9c0aa66a94282 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -358,7 +358,6 @@ func TestStateMachine(t *testing.T) { t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortPollerForTest() var cc, previousCC *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index 725ffa4de4cca..7a1c276a7b229 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -45,7 +45,6 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { tb.Fatalf("NewLocalBackend: %v", err) } tb.Cleanup(b.Shutdown) - b.DisablePortPollerForTest() b.SetControlClientGetterForTesting(opts.MakeControlClient) return b } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 795e4367fa3f7..ece4345d531b1 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -251,7 +251,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store @@ -288,7 +287,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ @@ -312,7 +310,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ @@ -356,7 +353,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/cmpver from tailscale.com/clientupdate+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ tailscale.com/util/execqueue from tailscale.com/appc+ diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index d00628453260f..1e22681fcfe36 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -43,6 +43,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/tstest/integration" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" @@ -1302,3 +1303,15 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { } t.Error("magicsock did not find a direct path from lc1 to lc2") } + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} From 892f8a9582156514a2bc6c3b447d3e972f4d94ff Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 24 Sep 2025 18:37:42 -0500 Subject: [PATCH 0419/1093] various: allow tailscaled shutdown via LocalAPI A customer wants to allow their employees to restart tailscaled at will, when access rights and MDM policy allow it, as a way to fully reset client state and re-create the tunnel in case of connectivity issues. On Windows, the main tailscaled process runs as a child of a service process. The service restarts the child when it exits (or crashes) until the service itself is stopped. Regular (non-admin) users can't stop the service, and allowing them to do so isn't ideal, especially in managed or multi-user environments. In this PR, we add a LocalAPI endpoint that instructs ipnserver.Server, and by extension the tailscaled process, to shut down. The service then restarts the child tailscaled. Shutting down tailscaled requires LocalAPI write access and an enabled policy setting. Updates tailscale/corp#32674 Updates tailscale/corp#32675 Signed-off-by: Nick Khyl --- client/local/local.go | 6 ++++ cmd/tailscaled/tailscaled.go | 2 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- ipn/ipnserver/server.go | 13 ++++++-- ipn/ipnserver/server_test.go | 60 +++++++++++++++++++++++++++++++++++ ipn/lapitest/server.go | 2 +- ipn/localapi/localapi.go | 37 +++++++++++++++++++++ util/syspolicy/pkey/pkey.go | 7 ++++ util/syspolicy/policy_keys.go | 1 + 9 files changed, 125 insertions(+), 5 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 1be1f2ca74440..246112c37b5c6 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1368,3 +1368,9 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti } return decodeJSON[apitype.ExitNodeSuggestionResponse](body) } + +// ShutdownTailscaled requests a graceful shutdown of tailscaled. +func (lc *Client) ShutdownTailscaled(ctx context.Context) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) + return err +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 0c6e6d22f4c7a..636627539ef92 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -546,7 +546,7 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, } }() - srv := ipnserver.New(logf, logID, sys.NetMon.Get()) + srv := ipnserver.New(logf, logID, sys.Bus.Get(), sys.NetMon.Get()) if debugMux != nil { debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus) } diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index ea40dba9ccbb1..fbf7968a01f11 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -138,7 +138,7 @@ func newIPN(jsConfig js.Value) map[string]any { sys.Tun.Get().Start() logid := lpc.PublicID - srv := ipnserver.New(logf, logid, sys.NetMon.Get()) + srv := ipnserver.New(logf, logid, sys.Bus.Get(), sys.NetMon.Get()) lb, err := ipnlocal.NewLocalBackend(logf, logid, sys, controlclient.LoginEphemeral) if err != nil { log.Fatalf("ipnlocal.NewLocalBackend: %v", err) diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index fdbd82b0b9e33..7e864959b36fe 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -29,6 +29,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/systemd" @@ -40,6 +41,7 @@ import ( type Server struct { lb atomic.Pointer[ipnlocal.LocalBackend] logf logger.Logf + bus *eventbus.Bus netMon *netmon.Monitor // must be non-nil backendLogID logid.PublicID @@ -446,13 +448,14 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o // // At some point, either before or after Run, the Server's SetLocalBackend // method must also be called before Server can do anything useful. -func New(logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor) *Server { +func New(logf logger.Logf, logID logid.PublicID, bus *eventbus.Bus, netMon *netmon.Monitor) *Server { if netMon == nil { panic("nil netMon") } return &Server{ backendLogID: logID, logf: logf, + bus: bus, netMon: netMon, } } @@ -494,10 +497,16 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { runDone := make(chan struct{}) defer close(runDone) - // When the context is closed or when we return, whichever is first, close our listener + ec := s.bus.Client("ipnserver.Server") + defer ec.Close() + shutdownSub := eventbus.Subscribe[localapi.Shutdown](ec) + + // When the context is closed, a [localapi.Shutdown] event is received, + // or when we return, whichever is first, close our listener // and all open connections. go func() { select { + case <-shutdownSub.Events(): case <-ctx.Done(): case <-runDone: } diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 903cb6b738331..713db9e50085e 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -5,6 +5,7 @@ package ipnserver_test import ( "context" + "errors" "runtime" "strconv" "sync" @@ -14,7 +15,10 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/lapitest" + "tailscale.com/tsd" "tailscale.com/types/ptr" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policytest" ) func TestUserConnectDisconnectNonWindows(t *testing.T) { @@ -253,6 +257,62 @@ func TestBlockWhileIdentityInUse(t *testing.T) { } } +func TestShutdownViaLocalAPI(t *testing.T) { + t.Parallel() + + errAccessDeniedByPolicy := errors.New("Access denied: shutdown access denied by policy") + + tests := []struct { + name string + allowTailscaledRestart *bool + wantErr error + }{ + { + name: "AllowTailscaledRestart/NotConfigured", + allowTailscaledRestart: nil, + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/False", + allowTailscaledRestart: ptr.To(false), + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/True", + allowTailscaledRestart: ptr.To(true), + wantErr: nil, // shutdown should be allowed + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + sys := tsd.NewSystem() + + var pol policytest.Config + if tt.allowTailscaledRestart != nil { + pol.Set(pkey.AllowTailscaledRestart, *tt.allowTailscaledRestart) + } + sys.Set(pol) + + server := lapitest.NewServer(t, lapitest.WithSys(sys)) + lc := server.ClientWithName("User") + + err := lc.ShutdownTailscaled(t.Context()) + checkError(t, err, tt.wantErr) + }) + } +} + +func checkError(tb testing.TB, got, want error) { + tb.Helper() + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + tb.Fatalf("gotErr: %v; wantErr: %v", got, want) + } +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go index d477dc1828549..457a338ab9f5a 100644 --- a/ipn/lapitest/server.go +++ b/ipn/lapitest/server.go @@ -236,7 +236,7 @@ func (s *Server) Close() { func newUnstartedIPNServer(opts *options) *ipnserver.Server { opts.TB().Helper() lb := opts.Backend() - server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.NetMon()) + server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.EventBus(), lb.NetMon()) server.SetLocalBackend(lb) return server } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 01966f84b3826..a83a2e17e4879 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -49,6 +49,7 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/osdiag" "tailscale.com/util/rands" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -112,6 +113,7 @@ var handler = map[string]LocalAPIHandler{ "set-push-device-token": (*Handler).serveSetPushDeviceToken, "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, + "shutdown": (*Handler).serveShutdown, "start": (*Handler).serveStart, "status": (*Handler).serveStatus, "suggest-exit-node": (*Handler).serveSuggestExitNode, @@ -2026,3 +2028,38 @@ func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) } + +// Shutdown is an eventbus value published when tailscaled shutdown +// is requested via LocalAPI. Its only consumer is [ipnserver.Server]. +type Shutdown struct{} + +// serveShutdown shuts down tailscaled. It requires write access +// and the [pkey.AllowTailscaledRestart] policy to be enabled. +// See tailscale/corp#32674. +func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + if !h.PermitWrite { + http.Error(w, "shutdown access denied", http.StatusForbidden) + return + } + + polc := h.b.Sys().PolicyClientOrDefault() + if permitShutdown, _ := polc.GetBoolean(pkey.AllowTailscaledRestart, false); !permitShutdown { + http.Error(w, "shutdown access denied by policy", http.StatusForbidden) + return + } + + ec := h.eventBus.Client("localapi.Handler") + defer ec.Close() + + w.WriteHeader(http.StatusOK) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + eventbus.Publish[Shutdown](ec).Publish(Shutdown{}) +} diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index cfef9e17a333a..1ef969d723aea 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -47,6 +47,13 @@ const ( // An empty string or a zero duration disables automatic reconnection. ReconnectAfter Key = "ReconnectAfter" + // AllowTailscaledRestart is a boolean key that controls whether users with write access + // to the LocalAPI are allowed to shutdown tailscaled with the intention of restarting it. + // On Windows, tailscaled will be restarted automatically by the service process + // (see babysitProc in cmd/tailscaled/tailscaled_windows.go). + // On other platforms, it is the client's responsibility to restart tailscaled. + AllowTailscaledRestart Key = "AllowTailscaledRestart" + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. // Exit node ID takes precedence over exit node IP. // To find the node ID, go to /api.md#device. diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ef2ac430dbccc..ae902e8c40a49 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -17,6 +17,7 @@ var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(pkey.AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), setting.NewDefinition(pkey.AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AllowTailscaledRestart, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.AlwaysOn, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), From 9154bc10f09c0a72b6807fbb6b91e2e2690bac48 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 26 Sep 2025 07:31:35 +0100 Subject: [PATCH 0420/1093] tstest/integration: skip this test rather than commenting it out Updates #17108 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index f65ae1659ddbe..6e5022edb29a4 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -309,16 +309,12 @@ func TestOneNodeUpAuth(t *testing.T) { alreadyLoggedIn: true, needsNewAuthURL: false, }, - // TODO(alexc): This test is failing because of a bug in `tailscale up` where - // it waits for ipn to enter the "Running" state. If we're already logged in - // and running, this completes immediately, before we've had a chance to show - // the user the auth URL. - // { - // name: "up-with-force-reauth-after-login", - // args: []string{"up", "--force-reauth"}, - // alreadyLoggedIn: true, - // needsNewAuthURL: true, - // }, + { + name: "up-with-force-reauth-after-login", + args: []string{"up", "--force-reauth"}, + alreadyLoggedIn: true, + needsNewAuthURL: true, + }, { name: "up-with-auth-key-after-login", args: []string{"up", "--auth-key=opensesame"}, @@ -341,6 +337,14 @@ func TestOneNodeUpAuth(t *testing.T) { t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { tstest.Parallel(t) + // TODO(alexc): This test is failing because of a bug in `tailscale up` where + // it waits for ipn to enter the "Running" state. If we're already logged in + // and running, this completes immediately, before we've had a chance to show + // the user the auth URL. + if tt.name == "up-with-force-reauth-after-login" { + t.Skip() + } + env := NewTestEnv(t, ConfigureControl( func(control *testcontrol.Server) { if tt.authKey != "" { From 260fe38ad8f7a0dfeb74872979e9e1729a211d65 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 20:48:41 -0700 Subject: [PATCH 0421/1093] Makefile, cmd/tailscaled: add minimal tailscale+cli binary depaware Updates #12614 Change-Id: I593ed30f620556c6503d80c0ccbbe242567fd5cf Signed-off-by: Brad Fitzpatrick --- Makefile | 6 +- cmd/tailscaled/depaware-minbox.txt | 498 +++++++++++++++++++++++++++++ 2 files changed, 503 insertions(+), 1 deletion(-) create mode 100644 cmd/tailscaled/depaware-minbox.txt diff --git a/Makefile b/Makefile index 532bded9413b9..95959fcf0ba42 100644 --- a/Makefile +++ b/Makefile @@ -25,8 +25,10 @@ updatedeps: ## Update depaware deps tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update -goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" @@ -40,6 +42,8 @@ depaware: ## Run depaware checks tailscale.com/cmd/tsidp PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt new file mode 100644 index 0000000000000..0c3e08c15cfe5 --- /dev/null +++ b/cmd/tailscaled/depaware-minbox.txt @@ -0,0 +1,498 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 + github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + github.com/digitalocean/go-smbios/smbios from tailscale.com/posture + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/nftables from tailscale.com/util/linuxfw + 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt + 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ + github.com/google/nftables/expr from github.com/google/nftables+ + github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ + github.com/google/nftables/xt from github.com/google/nftables/expr+ + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mdlayher/genetlink from tailscale.com/net/tstun + 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + github.com/mdlayher/netlink/nltest from github.com/google/nftables + github.com/mdlayher/sdnotify from tailscale.com/util/systemd + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + github.com/mitchellh/go-ps from tailscale.com/safesocket + 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + github.com/tailscale/hujson from tailscale.com/ipn/conffile + 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + github.com/vishvananda/netns from github.com/tailscale/netlink+ + 💣 go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer + 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs + 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ + gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log + gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ + gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ + gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ + 💣 gvisor.dev/gvisor/pkg/sleep from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ + gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state + 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack + 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ + gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 + gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ + gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ + 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro + gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/packet from gvisor.dev/gvisor/pkg/tcpip/transport/raw + gvisor.dev/gvisor/pkg/tcpip/transport/raw from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ + 💣 gvisor.dev/gvisor/pkg/tcpip/transport/tcp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal+ + tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/doctor from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal + tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/clientupdate+ + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/metrics from tailscale.com/health+ + tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/routetable from tailscale.com/doctor/routetable + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/posture from tailscale.com/ipn/ipnlocal + tailscale.com/proxymap from tailscale.com/tsd+ + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/dirwalk from tailscale.com/metrics + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash + tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/linuxfw from tailscale.com/net/netns+ + tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/systemd from tailscale.com/control/controlclient+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/truncate from tailscale.com/logtail + tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ + tailscale.com/version from tailscale.com/clientupdate+ + tailscale.com/version/distro from tailscale.com/clientupdate+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/doctor/permissions+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ + golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/proxy from tailscale.com/net/netns + golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/google/nftables+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + archive/tar from tailscale.com/clientupdate + bufio from compress/flate+ + bytes from archive/tar+ + cmp from encoding/json+ + compress/flate from compress/gzip + compress/gzip from golang.org/x/net/http2+ + container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from golang.org/x/net/http2+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from expvar+ + encoding/pem from crypto/tls+ + errors from archive/tar+ + expvar from tailscale.com/cmd/tailscaled+ + flag from tailscale.com/cmd/tailscaled+ + fmt from archive/tar+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from net/http/pprof+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from archive/tar+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall from internal/runtime/cgroup+ + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime+ + internal/unsafeheader from internal/reflectlite+ + io from archive/tar+ + io/fs from archive/tar+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ + iter from bytes+ + log from expvar+ + log/internal from log + maps from archive/tar+ + math from archive/tar+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from expvar+ + net/http/httptrace from golang.org/x/net/http2+ + net/http/internal from net/http + net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http + net/http/pprof from tailscale.com/cmd/tailscaled+ + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from github.com/coreos/go-iptables/iptables+ + os/signal from tailscale.com/cmd/tailscaled + os/user from archive/tar+ + path from archive/tar+ + path/filepath from archive/tar+ + reflect from archive/tar+ + regexp from github.com/coreos/go-iptables/iptables+ + regexp/syntax from regexp + runtime from archive/tar+ + runtime/debug from github.com/klauspost/compress/zstd+ + runtime/pprof from net/http/pprof+ + runtime/trace from net/http/pprof + slices from archive/tar+ + sort from compress/flate+ + strconv from archive/tar+ + strings from archive/tar+ + sync from archive/tar+ + sync/atomic from context+ + syscall from archive/tar+ + text/tabwriter from runtime/pprof + time from archive/tar+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ From c011369de2fb4b3cc2ce505402cba968b875f767 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 15 Sep 2025 23:20:38 +0100 Subject: [PATCH 0422/1093] cmd/tailscale/cli: start WatchIPNBus before initial Start This partially reverts f3d2fd2. When that patch was written, the goroutine that responds to IPN notifications could call `StartLoginInteractive`, creating a race condition that led to flaky integration tests. We no longer call `StartLoginInteractive` in that goroutine, so the race is now impossible. Moving the `WatchIPNBus` call earlier ensures the CLI gets all necessary IPN notifications, preventing a reauth from hanging. Updates tailscale/corp#31476 Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 96b561bee8f79..0a15c8fb7b670 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -540,8 +540,18 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } }() - running := make(chan bool, 1) // gets value once in state ipn.Running - watchErr := make(chan error, 1) + // Start watching the IPN bus before we call Start() or StartLoginInteractive(), + // or we could miss IPN notifications. + // + // In particular, if we're doing a force-reauth, we could miss the + // notification with the auth URL we should print for the user. The + // initial state could contain the auth URL, but only if IPN is in the + // NeedsLogin state -- sometimes it's in Starting, and we don't get the URL. + watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) + if err != nil { + return err + } + defer watcher.Close() // Special case: bare "tailscale up" means to just start // running, if there's ever been a login. @@ -587,11 +597,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) - if err != nil { - return err - } - defer watcher.Close() + running := make(chan bool, 1) + watchErr := make(chan error, 1) go func() { var printed bool // whether we've yet printed anything to stdout or stderr From 41a2aaf1da9be6c939058bdd32e253ab35373c42 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 16 Sep 2025 11:22:47 +0100 Subject: [PATCH 0423/1093] cmd/tailscale/cli: fix race condition in `up --force-reauth` This commit fixes a race condition where `tailscale up --force-reauth` would exit prematurely on an already-logged in device. Previously, the CLI would wait for IPN to report the "Running" state and then exit. However, this could happen before the new auth URL was printed, leading to two distinct issues: * **Without seamless key renewal:** The CLI could exit immediately after the `StartLoginInteractive` call, before IPN has time to switch into the "Starting" state or send a new auth URL back to the CLI. * **With seamless key renewal:** IPN stays in the "Running" state throughout the process, so the CLI exits immediately without performing any reauthentication. The fix is to change the CLI's exit condition. Instead of waiting for the "Running" state, if we're doing a `--force-reauth` we now wait to see the node key change, which is a more reliable indicator that a successful authentication has occurred. Updates tailscale/corp#31476 Updates tailscale/tailscale#17108 Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 67 ++++++++++++++++---------- tstest/integration/integration_test.go | 8 --- 2 files changed, 42 insertions(+), 33 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 0a15c8fb7b670..3c0883ec8ee04 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -446,6 +446,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return fixTailscaledConnectError(err) } origAuthURL := st.AuthURL + origNodeKey := st.Self.PublicKey // printAuthURL reports whether we should print out the // provided auth URL from an IPN notify. @@ -597,13 +598,24 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - running := make(chan bool, 1) + upComplete := make(chan bool, 1) watchErr := make(chan error, 1) go func() { var printed bool // whether we've yet printed anything to stdout or stderr var lastURLPrinted string + // If we're doing a force-reauth, we need to get two notifications: + // + // 1. IPN is running + // 2. The node key has changed + // + // These two notifications arrive separately, and trying to combine them + // has caused unexpected issues elsewhere in `tailscale up`. For now, we + // track them separately. + ipnIsRunning := false + waitingForKeyChange := upArgs.forceReauth + for { n, err := watcher.Next() if err != nil { @@ -614,29 +626,34 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE msg := *n.ErrMessage fatalf("backend error: %v\n", msg) } + if s := n.State; s != nil && *s == ipn.NeedsMachineAuth { + printed = true + if env.upArgs.json { + printUpDoneJSON(ipn.NeedsMachineAuth, "") + } else { + fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) + } + } if s := n.State; s != nil { - switch *s { - case ipn.NeedsMachineAuth: - printed = true - if env.upArgs.json { - printUpDoneJSON(ipn.NeedsMachineAuth, "") - } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) - } - case ipn.Running: - // Done full authentication process - if env.upArgs.json { - printUpDoneJSON(ipn.Running, "") - } else if printed { - // Only need to print an update if we printed the "please click" message earlier. - fmt.Fprintf(Stderr, "Success.\n") - } - select { - case running <- true: - default: - } - cancelWatch() + ipnIsRunning = *s == ipn.Running + } + if n.NetMap != nil && n.NetMap.NodeKey != origNodeKey { + waitingForKeyChange = false + } + if ipnIsRunning && !waitingForKeyChange { + // Done full authentication process + if env.upArgs.json { + printUpDoneJSON(ipn.Running, "") + } else if printed { + // Only need to print an update if we printed the "please click" message earlier. + fmt.Fprintf(Stderr, "Success.\n") + } + select { + case upComplete <- true: + default: } + cancelWatch() + return } if url := n.BrowseToURL; url != nil { authURL := *url @@ -698,18 +715,18 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE timeoutCh = timeoutTimer.C } select { - case <-running: + case <-upComplete: return nil case <-watchCtx.Done(): select { - case <-running: + case <-upComplete: return nil default: } return watchCtx.Err() case err := <-watchErr: select { - case <-running: + case <-upComplete: return nil default: } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 6e5022edb29a4..fde4ff35a05ed 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -337,14 +337,6 @@ func TestOneNodeUpAuth(t *testing.T) { t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { tstest.Parallel(t) - // TODO(alexc): This test is failing because of a bug in `tailscale up` where - // it waits for ipn to enter the "Running" state. If we're already logged in - // and running, this completes immediately, before we've had a chance to show - // the user the auth URL. - if tt.name == "up-with-force-reauth-after-login" { - t.Skip() - } - env := NewTestEnv(t, ConfigureControl( func(control *testcontrol.Server) { if tt.authKey != "" { From 8b3e88cd094c745f6e57f8ca53edb16792d3fee2 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 26 Sep 2025 08:06:39 -0700 Subject: [PATCH 0424/1093] wgengine/magicsock: fix rebind debouncing (#17282) On platforms that are causing EPIPE at a high frequency this is resulting in non-working connections, for example when Apple decides to forcefully close UDP sockets due to an unsoliced packet rejection in the firewall. Too frequent rebinds cause a failure to solicit the endpoints triggering the rebinds, that would normally happen via CallMeMaybe. Updates #14551 Updates tailscale/corp#25648 Signed-off-by: James Tucker --- wgengine/magicsock/magicsock.go | 1 + wgengine/magicsock/magicsock_test.go | 40 ++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 0d8a1e53a42e6..e3cf249c55ebc 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1563,6 +1563,7 @@ func (c *Conn) maybeRebindOnError(err error) { if c.lastErrRebind.Load().Before(time.Now().Add(-5 * time.Second)) { c.logf("magicsock: performing rebind due to %q", reason) + c.lastErrRebind.Store(time.Now()) c.Rebind() go c.ReSTUN(reason) } else { diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index c6be9129db2cf..1f533ddef4628 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -27,6 +27,7 @@ import ( "sync/atomic" "syscall" "testing" + "testing/synctest" "time" "unsafe" @@ -3114,18 +3115,35 @@ func TestMaybeRebindOnError(t *testing.T) { } t.Run("no-frequent-rebind", func(t *testing.T) { - if runtime.GOOS != "plan9" { - err := fmt.Errorf("outer err: %w", syscall.EPERM) - conn := newTestConn(t) - defer conn.Close() - conn.lastErrRebind.Store(time.Now().Add(-1 * time.Second)) - before := metricRebindCalls.Value() - conn.maybeRebindOnError(err) - after := metricRebindCalls.Value() - if before != after { - t.Errorf("should not rebind within 5 seconds of last") + synctest.Test(t, func(t *testing.T) { + if runtime.GOOS != "plan9" { + err := fmt.Errorf("outer err: %w", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + lastRebindTime := time.Now().Add(-1 * time.Second) + conn.lastErrRebind.Store(lastRebindTime) + before := metricRebindCalls.Value() + conn.maybeRebindOnError(err) + after := metricRebindCalls.Value() + if before != after { + t.Errorf("should not rebind within 5 seconds of last") + } + + // ensure that rebinds are performed and store an updated last + // rebind time. + time.Sleep(6 * time.Second) + + conn.maybeRebindOnError(err) + newTime := conn.lastErrRebind.Load() + if newTime == lastRebindTime { + t.Errorf("expected a rebind to occur") + } + if newTime.Sub(lastRebindTime) < 5*time.Second { + t.Errorf("expected at least 5 seconds between %s and %s", lastRebindTime, newTime) + } } - } + + }) }) } From 002ecb78d0c76d2e25bd7fb0b773f37c7c19dcb4 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 26 Sep 2025 07:35:54 +0100 Subject: [PATCH 0425/1093] all: don't rebind variables in for loops See https://tip.golang.org/wiki/LoopvarExperiment#does-this-mean-i-dont-have-to-write-x--x-in-my-loops-anymore Updates https://github.com/tailscale/tailscale/issues/11058 Signed-off-by: Alex Chan --- cmd/tailscale/cli/ffcomplete/internal/complete_test.go | 1 - net/netcheck/netcheck.go | 1 - tstest/archtest/qemu_test.go | 1 - tstest/clock_test.go | 9 --------- tstest/integration/integration_test.go | 1 - wgengine/magicsock/derp.go | 1 - wgengine/netstack/netstack.go | 4 ---- 7 files changed, 18 deletions(-) diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go index 7e36b1bcd1437..c216bdeec500d 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go @@ -196,7 +196,6 @@ func TestComplete(t *testing.T) { // Run the tests. for _, test := range tests { - test := test name := strings.Join(test.args, "␣") if test.showFlags { name += "+flags" diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 169133ceb360b..726221675fb03 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1073,7 +1073,6 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report continue } wg.Add(1) - rg := rg go func() { defer wg.Done() node := rg.Nodes[0] diff --git a/tstest/archtest/qemu_test.go b/tstest/archtest/qemu_test.go index 8b59ae5d9fee1..68ec38851069e 100644 --- a/tstest/archtest/qemu_test.go +++ b/tstest/archtest/qemu_test.go @@ -33,7 +33,6 @@ func TestInQemu(t *testing.T) { } inCI := cibuild.On() for _, arch := range arches { - arch := arch t.Run(arch.Goarch, func(t *testing.T) { t.Parallel() qemuUser := "qemu-" + arch.Qarch diff --git a/tstest/clock_test.go b/tstest/clock_test.go index d5816564a07f1..2ebaf752a1963 100644 --- a/tstest/clock_test.go +++ b/tstest/clock_test.go @@ -56,7 +56,6 @@ func TestClockWithDefinedStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -118,7 +117,6 @@ func TestClockWithDefaultStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -277,7 +275,6 @@ func TestClockSetStep(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -426,7 +423,6 @@ func TestClockAdvance(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -876,7 +872,6 @@ func TestSingleTicker(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1377,7 +1372,6 @@ func TestSingleTimer(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1911,7 +1905,6 @@ func TestClockFollowRealTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() realTimeClock := NewClock(tt.realTimeClockOpts) @@ -2364,7 +2357,6 @@ func TestAfterFunc(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -2468,7 +2460,6 @@ func TestSince(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index fde4ff35a05ed..5e9f15798426f 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -333,7 +333,6 @@ func TestOneNodeUpAuth(t *testing.T) { tstest.Shard(t) for _, useSeamlessKeyRenewal := range []bool{true, false} { - tt := tt // subtests are run in parallel, rebind tt t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { tstest.Parallel(t) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index b5fc36bb8aa9c..0d419841cfe4c 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -836,7 +836,6 @@ func (c *Conn) maybeCloseDERPsOnRebind(okayLocalIPs []netip.Prefix) { c.closeOrReconnectDERPLocked(regionID, "rebind-default-route-change") continue } - regionID := regionID dc := ad.c go func() { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 7381c515aba3c..94dbb6359d715 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1898,7 +1898,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"option_unknown_received", ipStats.OptionUnknownReceived}, } for _, metric := range ipMetrics { - metric := metric m.Set("counter_ip_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1925,7 +1924,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"errors", fwdStats.Errors}, } for _, metric := range fwdMetrics { - metric := metric m.Set("counter_ip_forward_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1969,7 +1967,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"forward_max_in_flight_drop", tcpStats.ForwardMaxInFlightDrop}, } for _, metric := range tcpMetrics { - metric := metric m.Set("counter_tcp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1996,7 +1993,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"checksum_errors", udpStats.ChecksumErrors}, } for _, metric := range udpMetrics { - metric := metric m.Set("counter_udp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) From f2b8d37436d047e444efa6d728961664f0d5009b Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 26 Sep 2025 08:39:02 -0700 Subject: [PATCH 0426/1093] feature/tpm: only register HardwareAttestationKey on linux/windows (#17293) We can only register one key implementation per process. When running on macOS or Android, trying to register a separate key implementation from feature/tpm causes a panic. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 0192247388330..e4c2b29e95971 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -14,6 +14,7 @@ import ( "log" "os" "path/filepath" + "runtime" "slices" "strings" "sync" @@ -40,10 +41,12 @@ func init() { hi.TPM = infoOnce() }) store.Register(store.TPMPrefix, newStore) - key.RegisterHardwareAttestationKeyFns( - func() key.HardwareAttestationKey { return &attestationKey{} }, - func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, - ) + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + key.RegisterHardwareAttestationKeyFns( + func() key.HardwareAttestationKey { return &attestationKey{} }, + func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, + ) + } } func info() *tailcfg.TPMInfo { From b3ae1cb0ccb73a0951cccdf4096e417c2739d455 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 13:19:11 -0700 Subject: [PATCH 0427/1093] wgengine/netstack/gro: permit building without GRO This only saves ~32KB in the minimal linux/amd64 binary, but it's a step towards permitting not depending on gvisor for small builds. Updates #17283 Change-Id: Iae8da5e9465127de354dbcaf25e794a6832d891b Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/deps_test.go | 11 +++++++++++ feature/buildfeatures/feature_gro_disabled.go | 13 +++++++++++++ feature/buildfeatures/feature_gro_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 1 + wgengine/netstack/gro/gro_default.go | 2 +- .../netstack/gro/{gro_ios.go => gro_disabled.go} | 13 +++++++++---- wgengine/netstack/link_endpoint.go | 3 ++- wgengine/netstack/netstack.go | 2 +- 9 files changed, 51 insertions(+), 8 deletions(-) create mode 100644 feature/buildfeatures/feature_gro_disabled.go create mode 100644 feature/buildfeatures/feature_gro_enabled.go rename wgengine/netstack/gro/{gro_ios.go => gro_disabled.go} (59%) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0c3e08c15cfe5..f5d2831b62421 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -84,7 +84,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/stack/gro from tailscale.com/wgengine/netstack/gro gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 24a39312433bd..92c6a872cad68 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -198,3 +198,14 @@ func TestOmitPortlist(t *testing.T) { }, }.Check(t) } + +func TestOmitGRO(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_include_cli", + BadDeps: map[string]string{ + "gvisor.dev/gvisor/pkg/tcpip/stack/gro": "unexpected dep with ts_omit_gro", + }, + }.Check(t) +} diff --git a/feature/buildfeatures/feature_gro_disabled.go b/feature/buildfeatures/feature_gro_disabled.go new file mode 100644 index 0000000000000..ffbd0da2e3e4f --- /dev/null +++ b/feature/buildfeatures/feature_gro_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = false diff --git a/feature/buildfeatures/feature_gro_enabled.go b/feature/buildfeatures/feature_gro_enabled.go new file mode 100644 index 0000000000000..e2c8024e07815 --- /dev/null +++ b/feature/buildfeatures/feature_gro_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index d1752a80ca8ae..1a1fcf2723d63 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -106,6 +106,7 @@ var Features = map[FeatureTag]FeatureMeta{ }, "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, + "gro": {"GRO", "Generic Receive Offload support (performance)", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, diff --git a/wgengine/netstack/gro/gro_default.go b/wgengine/netstack/gro/gro_default.go index f92ee15ecac15..c70e19f7c5861 100644 --- a/wgengine/netstack/gro/gro_default.go +++ b/wgengine/netstack/gro/gro_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios +//go:build !ios && !ts_omit_gro package gro diff --git a/wgengine/netstack/gro/gro_ios.go b/wgengine/netstack/gro/gro_disabled.go similarity index 59% rename from wgengine/netstack/gro/gro_ios.go rename to wgengine/netstack/gro/gro_disabled.go index 627b42d7e5cfd..d7ffbd9139d99 100644 --- a/wgengine/netstack/gro/gro_ios.go +++ b/wgengine/netstack/gro/gro_disabled.go @@ -1,22 +1,27 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios +//go:build ios || ts_omit_gro package gro import ( - "gvisor.dev/gvisor/pkg/tcpip/stack" + "runtime" + "tailscale.com/net/packet" ) type GRO struct{} func NewGRO() *GRO { - panic("unsupported on iOS") + if runtime.GOOS == "ios" { + panic("unsupported on iOS") + } + panic("GRO disabled in build") + } -func (g *GRO) SetDispatcher(_ stack.NetworkDispatcher) {} +func (g *GRO) SetDispatcher(any) {} func (g *GRO) Enqueue(_ *packet.Parsed) {} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 39da64b5503cc..50e8d755aa5a9 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -10,6 +10,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/types/ipproto" "tailscale.com/wgengine/netstack/gro" @@ -133,7 +134,7 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported // If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via // SetDispatcher(). func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { - if l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { + if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { // IPv6 may have extension headers preceding a TCP header, but we trade // for a fast path and assume p cannot be coalesced in such a case. l.injectInbound(p) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 94dbb6359d715..0e2712c675657 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -344,7 +344,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi } supportedGSOKind := stack.GSONotSupported supportedGROKind := groNotSupported - if runtime.GOOS == "linux" { + if runtime.GOOS == "linux" && buildfeatures.HasGRO { // TODO(jwhited): add Windows support https://github.com/tailscale/corp/issues/21874 supportedGROKind = tcpGROSupported supportedGSOKind = stack.HostGSOSupported From f715ee2be97db4cbb976aaae5d8d9ea530be531b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 25 Sep 2025 18:48:14 -0700 Subject: [PATCH 0428/1093] cmd/tailscaled: start implementing ts_omit_netstack Baby steps. This permits building without much of gvisor, but not all of it. Updates #17283 Change-Id: I8433146e259918cc901fe86b4ea29be22075b32c Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 36 ++------- cmd/tailscaled/netstack.go | 75 ++++++++++++++++++ cmd/tailscaled/tailscaled.go | 76 +++++-------------- feature/featuretags/featuretags.go | 12 ++- tsd/tsd.go | 4 + .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + wgengine/netstack/gro/gro.go | 2 + wgengine/netstack/gro/netstack_disabled.go | 10 +++ wgengine/netstack/link_endpoint.go | 2 +- wgengine/netstack/netstack.go | 9 ++- 14 files changed, 140 insertions(+), 91 deletions(-) create mode 100644 cmd/tailscaled/netstack.go create mode 100644 wgengine/netstack/gro/netstack_disabled.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f5d2831b62421..3699ac4e7d17a 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -14,7 +14,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header github.com/google/nftables from tailscale.com/util/linuxfw 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ @@ -63,36 +63,18 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ - 💣 gvisor.dev/gvisor/pkg/sleep from gvisor.dev/gvisor/pkg/tcpip/transport/tcp 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack + gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack+ - gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/header/parse+ - gvisor.dev/gvisor/pkg/tcpip/header/parse from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/internal/tcp from gvisor.dev/gvisor/pkg/tcpip/transport/tcp - gvisor.dev/gvisor/pkg/tcpip/network/hash from gvisor.dev/gvisor/pkg/tcpip/network/ipv4 - gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/internal/ip from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast from gvisor.dev/gvisor/pkg/tcpip/network/ipv4+ - gvisor.dev/gvisor/pkg/tcpip/network/ipv4 from tailscale.com/wgengine/netstack - gvisor.dev/gvisor/pkg/tcpip/network/ipv6 from tailscale.com/wgengine/netstack - gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack+ + gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/ports+ + gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ - 💣 gvisor.dev/gvisor/pkg/tcpip/stack from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ - gvisor.dev/gvisor/pkg/tcpip/transport from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ - gvisor.dev/gvisor/pkg/tcpip/transport/icmp from tailscale.com/wgengine/netstack - gvisor.dev/gvisor/pkg/tcpip/transport/internal/network from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ - gvisor.dev/gvisor/pkg/tcpip/transport/internal/noop from gvisor.dev/gvisor/pkg/tcpip/transport/raw - gvisor.dev/gvisor/pkg/tcpip/transport/packet from gvisor.dev/gvisor/pkg/tcpip/transport/raw - gvisor.dev/gvisor/pkg/tcpip/transport/raw from gvisor.dev/gvisor/pkg/tcpip/transport/icmp+ - 💣 gvisor.dev/gvisor/pkg/tcpip/transport/tcp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ + 💣 gvisor.dev/gvisor/pkg/tcpip/stack from tailscale.com/net/tstun gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal @@ -182,7 +164,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/cmd/tailscaled+ tailscale.com/posture from tailscale.com/ipn/ipnlocal - tailscale.com/proxymap from tailscale.com/tsd+ + tailscale.com/proxymap from tailscale.com/tsd tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ @@ -263,7 +245,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/netlog from tailscale.com/wgengine - tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ @@ -317,7 +298,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de cmp from encoding/json+ compress/flate from compress/gzip compress/gzip from golang.org/x/net/http2+ - container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -393,7 +373,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from archive/tar+ - expvar from tailscale.com/cmd/tailscaled+ + expvar from tailscale.com/health+ flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from crypto+ diff --git a/cmd/tailscaled/netstack.go b/cmd/tailscaled/netstack.go new file mode 100644 index 0000000000000..c0b34ed411c78 --- /dev/null +++ b/cmd/tailscaled/netstack.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package main + +import ( + "context" + "expvar" + "net" + "net/netip" + + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine/netstack" +) + +func init() { + hookNewNetstack.Set(newNetstack) +} + +func newNetstack(logf logger.Logf, sys *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error) { + ns, err := netstack.Create(logf, + sys.Tun.Get(), + sys.Engine.Get(), + sys.MagicSock.Get(), + sys.Dialer.Get(), + sys.DNSManager.Get(), + sys.ProxyMapper(), + ) + if err != nil { + return nil, err + } + // Only register debug info if we have a debug mux + if debugMux != nil { + expvar.Publish("netstack", ns.ExpVar()) + } + + sys.Set(ns) + ns.ProcessLocalIPs = onlyNetstack + ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() + + dialer := sys.Dialer.Get() // must be set by caller already + + if onlyNetstack { + e := sys.Engine.Get() + dialer.UseNetstackForIP = func(ip netip.Addr) bool { + _, ok := e.PeerForIP(ip) + return ok + } + dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextTCP or we'll return + // *gonet.TCPConn(nil) instead of a nil interface which trips up + // callers. + tcpConn, err := ns.DialContextTCP(ctx, dst) + if err != nil { + return nil, err + } + return tcpConn, nil + } + dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextUDP or we'll return + // *gonet.UDPConn(nil) instead of a nil interface which trips up + // callers. + udpConn, err := ns.DialContextUDP(ctx, dst) + if err != nil { + return nil, err + } + return udpConn, nil + } + } + + return ns, nil +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 636627539ef92..d01af199cfb08 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -13,14 +13,12 @@ package main // import "tailscale.com/cmd/tailscaled" import ( "context" "errors" - "expvar" "flag" "fmt" "log" "net" "net/http" "net/http/pprof" - "net/netip" "os" "os/signal" "path/filepath" @@ -34,6 +32,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -65,7 +64,6 @@ import ( "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" - "tailscale.com/wgengine/netstack" "tailscale.com/wgengine/router" ) @@ -598,6 +596,10 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, return nil } +var ( + hookNewNetstack feature.Hook[func(_ logger.Logf, _ *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error)] +) + func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) (_ *ipnlocal.LocalBackend, retErr error) { if logPol != nil { logPol.Logtail.SetNetMon(sys.NetMon.Get()) @@ -615,6 +617,9 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if err != nil { return nil, fmt.Errorf("createEngine: %w", err) } + if onlyNetstack && !buildfeatures.HasNetstack { + return nil, errors.New("userspace-networking support is not compiled in to this binary") + } if debugMux != nil { if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) @@ -622,41 +627,14 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID go runDebugServer(logf, debugMux, args.debug) } - ns, err := newNetstack(logf, sys) - if err != nil { - return nil, fmt.Errorf("newNetstack: %w", err) - } - sys.Set(ns) - ns.ProcessLocalIPs = onlyNetstack - ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() - - if onlyNetstack { - e := sys.Engine.Get() - dialer.UseNetstackForIP = func(ip netip.Addr) bool { - _, ok := e.PeerForIP(ip) - return ok - } - dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextTCP or we'll return - // *gonet.TCPConn(nil) instead of a nil interface which trips up - // callers. - tcpConn, err := ns.DialContextTCP(ctx, dst) - if err != nil { - return nil, err - } - return tcpConn, nil - } - dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextUDP or we'll return - // *gonet.UDPConn(nil) instead of a nil interface which trips up - // callers. - udpConn, err := ns.DialContextUDP(ctx, dst) - if err != nil { - return nil, err - } - return udpConn, nil + var ns tsd.NetstackImpl // or nil if not linked in + if newNetstack, ok := hookNewNetstack.GetOk(); ok { + ns, err = newNetstack(logf, sys, onlyNetstack) + if err != nil { + return nil, fmt.Errorf("newNetstack: %w", err) } } + if startProxy != nil { go startProxy(logf, dialer) } @@ -687,8 +665,11 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if f, ok := hookConfigureWebClient.GetOk(); ok { f(lb) } - if err := ns.Start(lb); err != nil { - log.Fatalf("failed to start netstack: %v", err) + + if ns != nil { + if err := ns.Start(lb); err != nil { + log.Fatalf("failed to start netstack: %v", err) + } } return lb, nil } @@ -868,25 +849,6 @@ func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { } } -func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { - ret, err := netstack.Create(logf, - sys.Tun.Get(), - sys.Engine.Get(), - sys.MagicSock.Get(), - sys.Dialer.Get(), - sys.DNSManager.Get(), - sys.ProxyMapper(), - ) - if err != nil { - return nil, err - } - // Only register debug info if we have a debug mux - if debugMux != nil { - expvar.Publish("netstack", ret.ExpVar()) - } - return ret, nil -} - var beChildFunc = beChild func beChild(args []string) error { diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 1a1fcf2723d63..1db377277085b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -106,10 +106,14 @@ var Features = map[FeatureTag]FeatureMeta{ }, "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, - "gro": {"GRO", "Generic Receive Offload support (performance)", nil}, - "kube": {"Kube", "Kubernetes integration", nil}, - "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, - "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, + "gro": { + Sym: "GRO", + Desc: "Generic Receive Offload support (performance)", + Deps: []FeatureTag{"netstack"}, + }, + "kube": {"Kube", "Kubernetes integration", nil}, + "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, + "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", Desc: "Outbound localhost HTTP/SOCK5 proxy support", diff --git a/tsd/tsd.go b/tsd/tsd.go index 263b8de704cbb..8223254dae942 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -98,10 +98,14 @@ func NewSystemWithBus(bus *eventbus.Bus) *System { return sys } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // NetstackImpl is the interface that *netstack.Impl implements. // It's an interface for circular dependency reasons: netstack.Impl // references LocalBackend, and LocalBackend has a tsd.System. type NetstackImpl interface { + Start(LocalBackend) error UpdateNetstackIPs(*netmap.NetworkMap) } diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index b025e3a4304bb..7a26300e56e0a 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index b025e3a4304bb..7a26300e56e0a 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index b025e3a4304bb..7a26300e56e0a 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index b025e3a4304bb..7a26300e56e0a 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -18,6 +18,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 32f95357dc039..08c8c27fff6e4 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -26,6 +26,7 @@ import ( _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go index 654d170566f0d..c8e5e56e1acb5 100644 --- a/wgengine/netstack/gro/gro.go +++ b/wgengine/netstack/gro/gro.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netstack + // Package gro implements GRO for the receive (write) path into gVisor. package gro diff --git a/wgengine/netstack/gro/netstack_disabled.go b/wgengine/netstack/gro/netstack_disabled.go new file mode 100644 index 0000000000000..a0f56fa4499cf --- /dev/null +++ b/wgengine/netstack/gro/netstack_disabled.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package gro + +func RXChecksumOffload(any) any { + panic("unreachable") +} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 50e8d755aa5a9..260b3196ab2fc 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -187,7 +187,7 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { l.mu.RLock() d := l.dispatcher l.mu.RUnlock() - if d == nil { + if d == nil || !buildfeatures.HasNetstack { return } pkt := gro.RXChecksumOffload(p) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 0e2712c675657..c2b5d8a3266c7 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -578,9 +578,16 @@ func (ns *Impl) decrementInFlightTCPForward(tei stack.TransportEndpointID, remot } } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // Start sets up all the handlers so netstack can start working. Implements // wgengine.FakeImpl. -func (ns *Impl) Start(lb *ipnlocal.LocalBackend) error { +func (ns *Impl) Start(b LocalBackend) error { + if b == nil { + panic("nil LocalBackend interface") + } + lb := b.(*ipnlocal.LocalBackend) if lb == nil { panic("nil LocalBackend") } From 0b994ef2fe398dd9c827a2418d48f224b5d63303 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 25 Sep 2025 18:15:58 -0500 Subject: [PATCH 0429/1093] docs/windows/policy: add ADMX policy definition for AllowTailscaledRestart Updates tailscale/corp#32675 Signed-off-by: Nick Khyl --- docs/windows/policy/en-US/tailscale.adml | 9 +++++++++ docs/windows/policy/tailscale.admx | 14 ++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 2e143d49c9c6c..58e13be19ca98 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -20,6 +20,7 @@ Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later Tailscale version 1.86.0 and later + Tailscale version 1.90.0 and later Tailscale UI customization Settings @@ -121,6 +122,14 @@ If you enable this policy setting, you can specify how long Tailscale will wait If you disable or don't configure this policy setting, Tailscale will only reconnect if a user chooses to or if required by a different policy setting. Refer to https://pkg.go.dev/time#ParseDuration for information about the supported duration strings.]]> + Allow users to restart tailscaled + Allow Local Network Access when an Exit Node is in use + + + @@ -187,6 +191,16 @@ + + + + + + + + + + From 09a33b926292036c2bf4bb7754ac69fb727c1c15 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 11:15:05 -0700 Subject: [PATCH 0430/1093] net/tstun: support ts_omit_netstack Updates #17283 Change-Id: I1134bb15b3e39a3fa26c0621512aae9181de2210 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 13 ++---- net/tstun/netstack_disabled.go | 69 ++++++++++++++++++++++++++++++ net/tstun/netstack_enabled.go | 22 ++++++++++ net/tstun/wrap.go | 34 ++++++++++----- 4 files changed, 119 insertions(+), 19 deletions(-) create mode 100644 net/tstun/netstack_disabled.go create mode 100644 net/tstun/netstack_enabled.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3699ac4e7d17a..0ec45d465554b 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -61,20 +61,15 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ - gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip+ - gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer+ + gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip + gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/hash/jenkins from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip/header from gvisor.dev/gvisor/pkg/tcpip/ports+ - gvisor.dev/gvisor/pkg/tcpip/ports from gvisor.dev/gvisor/pkg/tcpip/stack - gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header+ - 💣 gvisor.dev/gvisor/pkg/tcpip/stack from tailscale.com/net/tstun - gvisor.dev/gvisor/pkg/tcpip/transport/tcpconntrack from gvisor.dev/gvisor/pkg/tcpip/stack + gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum+ + gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal diff --git a/net/tstun/netstack_disabled.go b/net/tstun/netstack_disabled.go new file mode 100644 index 0000000000000..c1266b30559d4 --- /dev/null +++ b/net/tstun/netstack_disabled.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package tstun + +type netstack_PacketBuffer struct { + GSOOptions netstack_GSO +} + +func (*netstack_PacketBuffer) DecRef() { panic("unreachable") } +func (*netstack_PacketBuffer) Size() int { panic("unreachable") } + +type netstack_GSOType int + +const ( + netstack_GSONone netstack_GSOType = iota + netstack_GSOTCPv4 + netstack_GSOTCPv6 + netstack_GSOGvisor +) + +type netstack_GSO struct { + // Type is one of GSONone, GSOTCPv4, etc. + Type netstack_GSOType + // NeedsCsum is set if the checksum offload is enabled. + NeedsCsum bool + // CsumOffset is offset after that to place checksum. + CsumOffset uint16 + + // Mss is maximum segment size. + MSS uint16 + // L3Len is L3 (IP) header length. + L3HdrLen uint16 + + // MaxSize is maximum GSO packet size. + MaxSize uint32 +} + +func (p *netstack_PacketBuffer) NetworkHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) TransportHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) ToBuffer() netstack_Buffer { panic("unreachable") } + +func (p *netstack_PacketBuffer) Data() asRanger { + panic("unreachable") +} + +type asRanger struct{} + +func (asRanger) AsRange() toSlicer { panic("unreachable") } + +type toSlicer struct{} + +func (toSlicer) ToSlice() []byte { panic("unreachable") } + +type slicer struct{} + +func (s slicer) Slice() []byte { panic("unreachable") } + +type netstack_Buffer struct{} + +func (netstack_Buffer) Flatten() []byte { panic("unreachable") } diff --git a/net/tstun/netstack_enabled.go b/net/tstun/netstack_enabled.go new file mode 100644 index 0000000000000..8fc1a2e20e35a --- /dev/null +++ b/net/tstun/netstack_enabled.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package tstun + +import ( + "gvisor.dev/gvisor/pkg/tcpip/stack" +) + +type ( + netstack_PacketBuffer = stack.PacketBuffer + netstack_GSO = stack.GSO +) + +const ( + netstack_GSONone = stack.GSONone + netstack_GSOTCPv4 = stack.GSOTCPv4 + netstack_GSOTCPv6 = stack.GSOTCPv6 + netstack_GSOGvisor = stack.GSOGvisor +) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 4c88c7eefead3..c94844c90a28e 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -22,7 +22,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "go4.org/mem" - "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" "tailscale.com/feature/buildfeatures" tsmetrics "tailscale.com/metrics" @@ -229,7 +228,7 @@ func registerMetrics(reg *usermetric.Registry) *metrics { type tunInjectedRead struct { // Only one of packet or data should be set, and are read in that order of // precedence. - packet *stack.PacketBuffer + packet *netstack_PacketBuffer data []byte } @@ -999,7 +998,10 @@ const ( minTCPHeaderSize = 20 ) -func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { +func stackGSOToTunGSO(pkt []byte, gso netstack_GSO) (tun.GSOOptions, error) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } options := tun.GSOOptions{ CsumStart: gso.L3HdrLen, CsumOffset: gso.CsumOffset, @@ -1007,12 +1009,12 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { NeedsCsum: gso.NeedsCsum, } switch gso.Type { - case stack.GSONone: + case netstack_GSONone: options.GSOType = tun.GSONone return options, nil - case stack.GSOTCPv4: + case netstack_GSOTCPv4: options.GSOType = tun.GSOTCPv4 - case stack.GSOTCPv6: + case netstack_GSOTCPv6: options.GSOType = tun.GSOTCPv6 default: return tun.GSOOptions{}, fmt.Errorf("unsupported gVisor GSOType: %v", gso.Type) @@ -1035,7 +1037,10 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { // both before and after partial checksum updates where later checksum // offloading still expects a partial checksum. // TODO(jwhited): plumb partial checksum awareness into net/packet/checksum. -func invertGSOChecksum(pkt []byte, gso stack.GSO) { +func invertGSOChecksum(pkt []byte, gso netstack_GSO) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } if gso.NeedsCsum != true { return } @@ -1049,10 +1054,13 @@ func invertGSOChecksum(pkt []byte, gso stack.GSO) { // injectedRead handles injected reads, which bypass filters. func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []int, offset int) (n int, err error) { - var gso stack.GSO + var gso netstack_GSO pkt := outBuffs[0][offset:] if res.packet != nil { + if !buildfeatures.HasNetstack { + panic("unreachable") + } bufN := copy(pkt, res.packet.NetworkHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.TransportHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.Data().AsRange().ToSlice()) @@ -1298,7 +1306,10 @@ func (t *Wrapper) SetJailedFilter(filt *filter.Filter) { // // This path is typically used to deliver synthesized packets to the // host networking stack. -func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]byte, sizes []int) error { +func (t *Wrapper) InjectInboundPacketBuffer(pkt *netstack_PacketBuffer, buffs [][]byte, sizes []int) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } buf := buffs[0][PacketStartOffset:] bufN := copy(buf, pkt.NetworkHeader().Slice()) @@ -1437,7 +1448,10 @@ func (t *Wrapper) InjectOutbound(pkt []byte) error { // InjectOutboundPacketBuffer logically behaves as InjectOutbound. It takes ownership of one // reference count on the packet, and the packet may be mutated. The packet refcount will be // decremented after the injected buffer has been read. -func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error { +func (t *Wrapper) InjectOutboundPacketBuffer(pkt *netstack_PacketBuffer) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } size := pkt.Size() if size > MaxPacketSize { pkt.DecRef() From e7a79ef5f17a623bf804480b2a118a2487348560 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 12:29:36 -0700 Subject: [PATCH 0431/1093] tstest/integration: deflake TestC2NDebugNetmap, disable service collection Fixes #17298 Change-Id: I83459fa1dad583c32395a80548510bc7ec035c41 Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration_test.go | 4 +++- tstest/integration/testcontrol/testcontrol.go | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 5e9f15798426f..92f7441b08ceb 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1784,7 +1784,9 @@ func TestPeerRelayPing(t *testing.T) { func TestC2NDebugNetmap(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := NewTestEnv(t) + env := NewTestEnv(t, ConfigureControl(func(s *testcontrol.Server) { + s.CollectServices = "false" + })) var testNodes []*TestNode var nodes []*tailcfg.Node diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 1d3b99f7a217d..2c6ac1d6d283b 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -35,6 +35,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/httpm" "tailscale.com/util/mak" @@ -69,6 +70,10 @@ type Server struct { // DefaultNodeCapabilities overrides the capability map sent to each client. DefaultNodeCapabilities *tailcfg.NodeCapMap + // CollectServices, if non-empty, sets whether the control server asks + // for service updates. If empty, the default is "true". + CollectServices opt.Bool + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL @@ -1096,7 +1101,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, Node: node, DERPMap: s.DERPMap, Domain: domain, - CollectServices: "true", + CollectServices: cmp.Or(s.CollectServices, "true"), PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, From e766adf71f86fcd31651a8e1f89272a0ca50bc01 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 12:15:02 -0700 Subject: [PATCH 0432/1093] net/tstun: use ts_omit_gro in another place I missed earlier I didn't notice this GRO code during b3ae1cb0ccb73a0951c. Updates #17283 Change-Id: I95c06c19e489097fc8d61180dc57ae4b8a69c58c Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 2 +- net/tstun/wrap_linux.go | 2 ++ net/tstun/wrap_noop.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0ec45d465554b..c57d8a94bddd2 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -68,7 +68,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum+ + gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version diff --git a/net/tstun/wrap_linux.go b/net/tstun/wrap_linux.go index 136ddfe1efb2d..7498f107b5fda 100644 --- a/net/tstun/wrap_linux.go +++ b/net/tstun/wrap_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_gro + package tstun import ( diff --git a/net/tstun/wrap_noop.go b/net/tstun/wrap_noop.go index c743072ca6ba2..8ad04bafe94c1 100644 --- a/net/tstun/wrap_noop.go +++ b/net/tstun/wrap_noop.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_gro package tstun From afe909664b0529a25760395feaaa7f3fc0a0cfd1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 12:38:49 -0700 Subject: [PATCH 0433/1093] types/opt: de-weird the API a bit with new True and False consts Updates #cleanup Change-Id: I15d8d840877d43e2b884d42354b4eb156094df7d Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration_test.go | 2 +- tstest/integration/testcontrol/testcontrol.go | 2 +- types/opt/bool.go | 40 +++++++++++++------ 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 92f7441b08ceb..fa148abbec8a7 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1785,7 +1785,7 @@ func TestC2NDebugNetmap(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t, ConfigureControl(func(s *testcontrol.Server) { - s.CollectServices = "false" + s.CollectServices = opt.False })) var testNodes []*TestNode diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 2c6ac1d6d283b..ac7804918f6cc 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1101,7 +1101,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, Node: node, DERPMap: s.DERPMap, Domain: domain, - CollectServices: cmp.Or(s.CollectServices, "true"), + CollectServices: cmp.Or(s.CollectServices, opt.True), PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, diff --git a/types/opt/bool.go b/types/opt/bool.go index 0a3ee67ad2a6e..e2fd6a054ff0d 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -18,6 +18,22 @@ import ( // field without it being dropped. type Bool string +const ( + // True is the encoding of an explicit true. + True = Bool("true") + + // False is the encoding of an explicit false. + False = Bool("false") + + // ExplicitlyUnset is the encoding used by a null + // JSON value. It is a synonym for the empty string. + ExplicitlyUnset = Bool("unset") + + // Empty means the Bool is unset and it's neither + // true nor false. + Empty = Bool("") +) + // NewBool constructs a new Bool value equal to b. The returned Bool is set, // unless Set("") or Clear() methods are called. func NewBool(b bool) Bool { @@ -50,16 +66,16 @@ func (b *Bool) Scan(src any) error { switch src := src.(type) { case bool: if src { - *b = "true" + *b = True } else { - *b = "false" + *b = False } return nil case int64: if src == 0 { - *b = "false" + *b = False } else { - *b = "true" + *b = True } return nil default: @@ -75,18 +91,18 @@ func (b Bool) EqualBool(v bool) bool { } var ( - trueBytes = []byte("true") - falseBytes = []byte("false") + trueBytes = []byte(True) + falseBytes = []byte(False) nullBytes = []byte("null") ) func (b Bool) MarshalJSON() ([]byte, error) { switch b { - case "true": + case True: return trueBytes, nil - case "false": + case False: return falseBytes, nil - case "", "unset": + case Empty, ExplicitlyUnset: return nullBytes, nil } return nil, fmt.Errorf("invalid opt.Bool value %q", string(b)) @@ -95,11 +111,11 @@ func (b Bool) MarshalJSON() ([]byte, error) { func (b *Bool) UnmarshalJSON(j []byte) error { switch string(j) { case "true": - *b = "true" + *b = True case "false": - *b = "false" + *b = False case "null": - *b = "unset" + *b = ExplicitlyUnset default: return fmt.Errorf("invalid opt.Bool value %q", j) } From c95fdb0f8a94f53566637af6b0cdef2ef554b2d8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 11:57:33 -0700 Subject: [PATCH 0434/1093] net/packet/checksum: copy the gvisor checksum, remove the dep As part of making Tailscale's gvisor dependency optional for small builds, this was one of the last places left that depended on gvisor. Just copy the couple functions were were using. Updates #17283 Change-Id: Id2bc07ba12039afe4c8a3f0b68f4d76d1863bbfe Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 9 +-- net/packet/checksum/checksum.go | 122 ++++++++++++++++++++++++++--- 2 files changed, 113 insertions(+), 18 deletions(-) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index c57d8a94bddd2..3a7469c0febeb 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -14,7 +14,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header github.com/google/nftables from tailscale.com/util/linuxfw 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ @@ -56,7 +55,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de go4.org/netipx from tailscale.com/ipn/ipnlocal+ gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer - 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ + 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log @@ -66,10 +65,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/header+ - 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/tcpip/header from tailscale.com/net/packet/checksum - gvisor.dev/gvisor/pkg/tcpip/seqnum from gvisor.dev/gvisor/pkg/tcpip/header + gvisor.dev/gvisor/pkg/tcpip from tailscale.com/ipn/ipnlocal + 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go index 547ea3a3577ed..4b5b82174a22f 100644 --- a/net/packet/checksum/checksum.go +++ b/net/packet/checksum/checksum.go @@ -8,8 +8,6 @@ import ( "encoding/binary" "net/netip" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/header" "tailscale.com/net/packet" "tailscale.com/types/ipproto" ) @@ -88,13 +86,13 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { tr := p.Transport() switch p.IPProto { case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { // Not enough space for a UDP header. return } updateV4Checksum(tr[6:8], o4[:], n4[:]) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { // Not enough space for a TCP header. return } @@ -112,34 +110,60 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { } } +const ( + minUDPSize = 8 + minTCPSize = 20 + minICMPv6Size = 8 + minIPv6Header = 40 + + offsetICMPv6Checksum = 2 + offsetUDPChecksum = 6 + offsetTCPChecksum = 16 +) + // updateV6PacketChecksums updates the checksums in the packet buffer. // p is modified in place. // If p.IPProto is unknown, no checksums are updated. func updateV6PacketChecksums(p *packet.Parsed, old, new netip.Addr) { - if len(p.Buffer()) < 40 { + if len(p.Buffer()) < minIPv6Header { // Not enough space for an IPv6 header. return } - o6, n6 := tcpip.AddrFrom16Slice(old.AsSlice()), tcpip.AddrFrom16Slice(new.AsSlice()) + o6, n6 := old.As16(), new.As16() // Now update the transport layer checksums, where applicable. tr := p.Transport() switch p.IPProto { case ipproto.ICMPv6: - if len(tr) < header.ICMPv6MinimumSize { + if len(tr) < minICMPv6Size { return } - header.ICMPv6(tr).UpdateChecksumPseudoHeaderAddress(o6, n6) + + ss := tr[offsetICMPv6Checksum:] + xsum := binary.BigEndian.Uint16(ss) + binary.BigEndian.PutUint16(ss, + ^checksumUpdate2ByteAlignedAddress(^xsum, o6, n6)) + case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { return } - header.UDP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetUDPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { return } - header.TCP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetTCPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.SCTP: // No transport layer update required. } @@ -195,3 +219,77 @@ func updateV4Checksum(oldSum, old, new []byte) { hcPrime := ^uint16(cPrime) binary.BigEndian.PutUint16(oldSum, hcPrime) } + +// checksumUpdate2ByteAlignedAddress updates an address in a calculated +// checksum. +// +// The addresses must have the same length and must contain an even number +// of bytes. The address MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor, but updated to use [16]byte. +func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new [16]byte) uint16 { + const uint16Bytes = 2 + + oldAddr := old[:] + newAddr := new[:] + + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + for len(oldAddr) != 0 { + // Convert the 2 byte sequences to uint16 values then apply the increment + // update. + xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(oldAddr[0])<<8)+uint16(oldAddr[1]), (uint16(newAddr[0])<<8)+uint16(newAddr[1])) + oldAddr = oldAddr[uint16Bytes:] + newAddr = newAddr[uint16Bytes:] + } + + return xsum +} + +// checksumUpdate2ByteAlignedUint16 updates a uint16 value in a calculated +// checksum. +// +// The value MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor. +func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 { + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + if old == new { + return xsum + } + return checksumCombine(xsum, checksumCombine(new, ^old)) +} + +// checksumCombine combines the two uint16 to form their checksum. This is done +// by adding them and the carry. +// +// Note that checksum a must have been computed on an even number of bytes. +// +// This implementation is copied from gVisor. +func checksumCombine(a, b uint16) uint16 { + v := uint32(a) + uint32(b) + return uint16(v + v>>16) +} From eaecc0be544a592473b55fd32d46dcae7fb68b19 Mon Sep 17 00:00:00 2001 From: Mahyar Mirrashed <59240843+mahyarmirrashed@users.noreply.github.com> Date: Fri, 26 Sep 2025 15:42:16 -0500 Subject: [PATCH 0435/1093] cmd/tailscale/cli: use tabwriter for tailscale status (#16596) Fixes #17238 Signed-off-by: Mahyar Mirrashed --- cmd/tailscale/cli/status.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 94e0977fe57bf..89b18335b4ee0 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -4,7 +4,6 @@ package cli import ( - "bytes" "cmp" "context" "encoding/json" @@ -16,6 +15,7 @@ import ( "net/netip" "os" "strings" + "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" "github.com/toqueteos/webbrowser" @@ -56,6 +56,7 @@ https://github.com/tailscale/tailscale/blob/main/ipn/ipnstate/ipnstate.go fs.BoolVar(&statusArgs.peers, "peers", true, "show status of peers") fs.StringVar(&statusArgs.listen, "listen", "127.0.0.1:8384", "listen address for web mode; use port 0 for automatic") fs.BoolVar(&statusArgs.browser, "browser", true, "Open a browser in web mode") + fs.BoolVar(&statusArgs.header, "header", false, "show column headers in table format") return fs })(), } @@ -68,6 +69,7 @@ var statusArgs struct { active bool // in CLI mode, filter output to only peers with active sessions self bool // in CLI mode, show status of local machine peers bool // in CLI mode, show status of peer machines + header bool // in CLI mode, show column headers in table format } const mullvadTCD = "mullvad.ts.net." @@ -151,10 +153,15 @@ func runStatus(ctx context.Context, args []string) error { os.Exit(1) } - var buf bytes.Buffer - f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + w := tabwriter.NewWriter(Stdout, 0, 0, 2, ' ', 0) + f := func(format string, a ...any) { fmt.Fprintf(w, format, a...) } + if statusArgs.header { + fmt.Fprintln(w, "IP\tHostname\tOwner\tOS\tStatus\t") + fmt.Fprintln(w, "--\t--------\t-----\t--\t------\t") + } + printPS := func(ps *ipnstate.PeerStatus) { - f("%-15s %-20s %-12s %-7s ", + f("%s\t%s\t%s\t%s\t", firstIPString(ps.TailscaleIPs), dnsOrQuoteHostname(st, ps), ownerLogin(st, ps), @@ -199,7 +206,7 @@ func runStatus(ctx context.Context, args []string) error { if anyTraffic { f(", tx %d rx %d", ps.TxBytes, ps.RxBytes) } - f("\n") + f("\t\n") } if statusArgs.self && st.Self != nil { @@ -229,7 +236,8 @@ func runStatus(ctx context.Context, args []string) error { printPS(ps) } } - Stdout.Write(buf.Bytes()) + w.Flush() + if locBasedExitNode { outln() printf("# To see the full list of exit nodes, including location-based exit nodes, run `tailscale exit-node list` \n") From 87ee0f4e982cbb252d03d31beec251dad9c8ba1c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 13:05:20 -0700 Subject: [PATCH 0436/1093] ipn/ipnlocal: move last unconditional gvisor import, complete ts_omit_netstack support Fixes #17283 Change-Id: Ia84d269683e4a68d7d10562561204934eeaf53bb Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 17 +---- cmd/tailscaled/deps_test.go | 13 ++++ .../feature_netstack_disabled.go | 2 +- .../buildfeatures/feature_netstack_enabled.go | 2 +- feature/featuretags/featuretags.go | 2 +- ipn/ipnlocal/local.go | 60 --------------- ipn/ipnlocal/netstack.go | 74 +++++++++++++++++++ 7 files changed, 91 insertions(+), 79 deletions(-) create mode 100644 ipn/ipnlocal/netstack.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3a7469c0febeb..144871c9b0a69 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -53,21 +53,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/vishvananda/netns from github.com/tailscale/netlink+ 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ - gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ - gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer - 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip - gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/refs - 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/state/wire+ - gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log - gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context+ - gvisor.dev/gvisor/pkg/rand from gvisor.dev/gvisor/pkg/tcpip - gvisor.dev/gvisor/pkg/refs from gvisor.dev/gvisor/pkg/buffer - 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ - gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state - 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - gvisor.dev/gvisor/pkg/tcpip from tailscale.com/ipn/ipnlocal - 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer - gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ @@ -283,7 +268,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna - golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + golang.org/x/time/rate from tailscale.com/derp+ archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 92c6a872cad68..a41a08f9df479 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -186,6 +186,19 @@ func TestOmitDBus(t *testing.T) { }.Check(t) } +func TestNetstack(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_omit_netstack,ts_omit_outboundproxy,ts_omit_serve,ts_omit_ssh,ts_omit_webclient,ts_omit_tap", + OnDep: func(dep string) { + if strings.Contains(dep, "gvisor") { + t.Errorf("unexpected gvisor dep: %q", dep) + } + }, + }.Check(t) +} + func TestOmitPortlist(t *testing.T) { deptest.DepChecker{ GOOS: "linux", diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go index 7369645a0d0b1..acb6e8e76396e 100644 --- a/feature/buildfeatures/feature_netstack_disabled.go +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. // It's a const so it can be used for dead code elimination. const HasNetstack = false diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go index a7e57098b5c42..04f67118523a0 100644 --- a/feature/buildfeatures/feature_netstack_enabled.go +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support (TODO; not yet omittable)". +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". // Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. // It's a const so it can be used for dead code elimination. const HasNetstack = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 1db377277085b..25426c9737021 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -121,7 +121,7 @@ var Features = map[FeatureTag]FeatureMeta{ }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support (TODO; not yet omittable)", nil}, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, "networkmanager": { Sym: "NetworkManager", Desc: "Linux NetworkManager integration", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 62a3a213178b7..4b8032e9ce79f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -38,7 +38,6 @@ import ( "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" - "gvisor.dev/gvisor/pkg/tcpip" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" @@ -4643,65 +4642,6 @@ var ( hookServeClearVIPServicesTCPPortsInterceptedLocked feature.Hook[func(*LocalBackend)] ) -// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if -// no handler is needed. It also returns a list of TCP socket options to -// apply to the socket before calling the handler. -// TCPHandlerForDst is called both for connections to our node's local IP -// as well as to the service IP (quad 100). -func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { - // First handle internal connections to the service IP - hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 - if hittingServiceIP { - switch dst.Port() { - case 80: - // TODO(mpminardi): do we want to show an error message if the web client - // has been disabled instead of the more "basic" web UI? - if b.ShouldRunWebClient() { - return b.handleWebClientConn, opts - } - return b.HandleQuad100Port80Conn, opts - case DriveLocalPort: - return b.handleDriveConn, opts - } - } - - if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { - if handler := f(b, dst, src); handler != nil { - return handler, opts - } - } - // Then handle external connections to the local IP. - if !b.isLocalIP(dst.Addr()) { - return nil, nil - } - if dst.Port() == 22 && b.ShouldRunSSH() { - // Use a higher keepalive idle time for SSH connections, as they are - // typically long lived and idle connections are more likely to be - // intentional. Ideally we would turn this off entirely, but we can't - // tell the difference between a long lived connection that is idle - // vs a connection that is dead because the peer has gone away. - // We pick 72h as that is typically sufficient for a long weekend. - opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) - return b.handleSSHConn, opts - } - // TODO(will,sonia): allow customizing web client port ? - if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { - return b.handleWebClientConn, opts - } - if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { - return func(c net.Conn) error { - b.handlePeerAPIConn(src, dst, c) - return nil - }, opts - } - if f, ok := hookTCPHandlerForServe.GetOk(); ok { - if handler := f(b, dst.Port(), src, nil); handler != nil { - return handler, opts - } - } - return nil, nil -} - func (b *LocalBackend) handleDriveConn(conn net.Conn) error { fs, ok := b.sys.DriveForLocal.GetOK() if !ok || !b.DriveAccessEnabled() { diff --git a/ipn/ipnlocal/netstack.go b/ipn/ipnlocal/netstack.go new file mode 100644 index 0000000000000..f7ffd03058879 --- /dev/null +++ b/ipn/ipnlocal/netstack.go @@ -0,0 +1,74 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package ipnlocal + +import ( + "net" + "net/netip" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" + "tailscale.com/types/ptr" +) + +// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if +// no handler is needed. It also returns a list of TCP socket options to +// apply to the socket before calling the handler. +// TCPHandlerForDst is called both for connections to our node's local IP +// as well as to the service IP (quad 100). +func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { + // First handle internal connections to the service IP + hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 + if hittingServiceIP { + switch dst.Port() { + case 80: + // TODO(mpminardi): do we want to show an error message if the web client + // has been disabled instead of the more "basic" web UI? + if b.ShouldRunWebClient() { + return b.handleWebClientConn, opts + } + return b.HandleQuad100Port80Conn, opts + case DriveLocalPort: + return b.handleDriveConn, opts + } + } + + if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { + if handler := f(b, dst, src); handler != nil { + return handler, opts + } + } + // Then handle external connections to the local IP. + if !b.isLocalIP(dst.Addr()) { + return nil, nil + } + if dst.Port() == 22 && b.ShouldRunSSH() { + // Use a higher keepalive idle time for SSH connections, as they are + // typically long lived and idle connections are more likely to be + // intentional. Ideally we would turn this off entirely, but we can't + // tell the difference between a long lived connection that is idle + // vs a connection that is dead because the peer has gone away. + // We pick 72h as that is typically sufficient for a long weekend. + opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) + return b.handleSSHConn, opts + } + // TODO(will,sonia): allow customizing web client port ? + if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { + return b.handleWebClientConn, opts + } + if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { + return func(c net.Conn) error { + b.handlePeerAPIConn(src, dst, c) + return nil + }, opts + } + if f, ok := hookTCPHandlerForServe.GetOk(); ok { + if handler := f(b, dst.Port(), src, nil); handler != nil { + return handler, opts + } + } + return nil, nil +} From 832e94607e47258d36c07d6786d4ac12b170e63b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 13:33:08 -0700 Subject: [PATCH 0437/1093] doctor: add ts_omit_doctor support Updates #12614 Change-Id: I84c166c4b99ca75d70abe4087e5ff3f7d90d4bcc Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 9 +- cmd/tailscaled/depaware-minbox.txt | 11 +-- cmd/tailscaled/depaware.txt | 9 +- cmd/tsidp/depaware.txt | 9 +- .../buildfeatures/feature_doctor_disabled.go | 13 +++ .../buildfeatures/feature_doctor_enabled.go | 13 +++ feature/condregister/maybe_doctor.go | 8 ++ feature/doctor/doctor.go | 95 +++++++++++++++++++ feature/featuretags/featuretags.go | 1 + feature/taildrop/peerapi_test.go | 2 + ipn/ipnlocal/local.go | 56 +---------- ipn/ipnlocal/peerapi.go | 24 +---- ipn/localapi/localapi.go | 4 +- tsnet/depaware.txt | 9 +- 14 files changed, 154 insertions(+), 109 deletions(-) create mode 100644 feature/buildfeatures/feature_doctor_disabled.go create mode 100644 feature/buildfeatures/feature_doctor_enabled.go create mode 100644 feature/condregister/maybe_doctor.go create mode 100644 feature/doctor/doctor.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ea0e08b191bab..2adbd5f5dca9d 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -185,7 +185,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket @@ -200,7 +200,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -787,10 +787,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ @@ -868,7 +864,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/portmapper from tailscale.com/feature/portmapper tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 144871c9b0a69..08d7d59c6541a 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -37,9 +37,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/sdnotify from tailscale.com/util/systemd 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ github.com/mitchellh/go-ps from tailscale.com/safesocket - 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile - 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -69,10 +69,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/ipn+ tailscale.com/envknob from tailscale.com/cmd/tailscaled+ tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal @@ -127,7 +123,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ - tailscale.com/net/routetable from tailscale.com/doctor/routetable tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ @@ -242,7 +237,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/doctor/permissions+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index acd8e0459c0f5..579af5c0d3b2b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -259,10 +259,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ tailscale.com/disco from tailscale.com/feature/relayserver+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/doctor from tailscale.com/feature/doctor + tailscale.com/doctor/ethtool from tailscale.com/feature/doctor + 💣 tailscale.com/doctor/permissions from tailscale.com/feature/doctor + tailscale.com/doctor/routetable from tailscale.com/feature/doctor tailscale.com/drive from tailscale.com/client/local+ tailscale.com/drive/driveimpl from tailscale.com/cmd/tailscaled tailscale.com/drive/driveimpl/compositedav from tailscale.com/drive/driveimpl @@ -276,6 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister + tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portlist from tailscale.com/feature/condregister diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 69904c9761f69..270edd3719e8e 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -146,7 +146,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -229,10 +229,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ @@ -299,7 +295,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/portmapper from tailscale.com/feature/portmapper tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ diff --git a/feature/buildfeatures/feature_doctor_disabled.go b/feature/buildfeatures/feature_doctor_disabled.go new file mode 100644 index 0000000000000..8c15e951e311f --- /dev/null +++ b/feature/buildfeatures/feature_doctor_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = false diff --git a/feature/buildfeatures/feature_doctor_enabled.go b/feature/buildfeatures/feature_doctor_enabled.go new file mode 100644 index 0000000000000..a8a0bb7d2056b --- /dev/null +++ b/feature/buildfeatures/feature_doctor_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = true diff --git a/feature/condregister/maybe_doctor.go b/feature/condregister/maybe_doctor.go new file mode 100644 index 0000000000000..3dc9ffa539312 --- /dev/null +++ b/feature/condregister/maybe_doctor.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_doctor + +package condregister + +import _ "tailscale.com/feature/doctor" diff --git a/feature/doctor/doctor.go b/feature/doctor/doctor.go new file mode 100644 index 0000000000000..875b57d14c4f0 --- /dev/null +++ b/feature/doctor/doctor.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The doctor package registers the "doctor" problem diagnosis support into the +// rest of Tailscale. +package doctor + +import ( + "context" + "fmt" + "html" + "net/http" + "time" + + "tailscale.com/doctor" + "tailscale.com/doctor/ethtool" + "tailscale.com/doctor/permissions" + "tailscale.com/doctor/routetable" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/tsaddr" + "tailscale.com/types/logger" +) + +func init() { + ipnlocal.HookDoctor.Set(visitDoctor) + ipnlocal.RegisterPeerAPIHandler("/v0/doctor", handleServeDoctor) +} + +func handleServeDoctor(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + if !h.CanDebug() { + http.Error(w, "denied; no debug access", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintln(w, "

        Doctor Output

        ") + + fmt.Fprintln(w, "
        ")
        +
        +	b := h.LocalBackend()
        +	visitDoctor(r.Context(), b, func(format string, args ...any) {
        +		line := fmt.Sprintf(format, args...)
        +		fmt.Fprintln(w, html.EscapeString(line))
        +	})
        +
        +	fmt.Fprintln(w, "
        ") +} + +func visitDoctor(ctx context.Context, b *ipnlocal.LocalBackend, logf logger.Logf) { + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.Clock().Now) + + var checks []doctor.Check + checks = append(checks, + permissions.Check{}, + routetable.Check{}, + ethtool.Check{}, + ) + + // Print a log message if any of the global DNS resolvers are Tailscale + // IPs; this can interfere with our ability to connect to the Tailscale + // controlplane. + checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { + nm := b.NetMap() + if nm == nil { + return nil + } + + for i, resolver := range nm.DNS.Resolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("resolver %d is a Tailscale address: %v", i, resolver) + } + } + for i, resolver := range nm.DNS.FallbackResolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("fallback resolver %d is a Tailscale address: %v", i, resolver) + } + } + return nil + })) + + // TODO(andrew): more + + numChecks := len(checks) + checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { + log("%d checks", numChecks) + return nil + })) + + doctor.RunChecks(ctx, logf, checks...) +} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 25426c9737021..2edecef581b6c 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -105,6 +105,7 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"portmapper"}, }, "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, + "doctor": {"Doctor", "Diagnose possible issues with Tailscale and its host environment", nil}, "drive": {"Drive", "Tailscale Drive (file server) support", nil}, "gro": { Sym: "GRO", diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 6339973544453..254d8794e8273 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -33,11 +33,13 @@ type peerAPIHandler struct { isSelf bool // whether peerNode is owned by same user as this node selfNode tailcfg.NodeView // this node; always non-nil peerNode tailcfg.NodeView // peerNode is who's making the request + canDebug bool // whether peerNode can debug this node (goroutines, metrics, magicsock internal state, etc) } func (h *peerAPIHandler) IsSelfUntagged() bool { return !h.selfNode.IsTagged() && !h.peerNode.IsTagged() && h.isSelf } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug } func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4b8032e9ce79f..dd0a2f9f1e067 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -43,10 +43,6 @@ import ( "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/control/controlknobs" - "tailscale.com/doctor" - "tailscale.com/doctor/ethtool" - "tailscale.com/doctor/permissions" - "tailscale.com/doctor/routetable" "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" @@ -6706,56 +6702,8 @@ func (b *LocalBackend) handleQuad100Port80Conn(w http.ResponseWriter, r *http.Re io.WriteString(w, "
      \n") } -func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) { - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.clock.Now) - - var checks []doctor.Check - checks = append(checks, - permissions.Check{}, - routetable.Check{}, - ethtool.Check{}, - ) - - // Print a log message if any of the global DNS resolvers are Tailscale - // IPs; this can interfere with our ability to connect to the Tailscale - // controlplane. - checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { - b.mu.Lock() - nm := b.NetMap() - b.mu.Unlock() - if nm == nil { - return nil - } - - for i, resolver := range nm.DNS.Resolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("resolver %d is a Tailscale address: %v", i, resolver) - } - } - for i, resolver := range nm.DNS.FallbackResolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("fallback resolver %d is a Tailscale address: %v", i, resolver) - } - } - return nil - })) - - // TODO(andrew): more - - numChecks := len(checks) - checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { - log("%d checks", numChecks) - return nil - })) - - doctor.RunChecks(ctx, logf, checks...) -} +// HookDoctor is an optional hook for the "doctor" problem diagnosis feature. +var HookDoctor feature.Hook[func(context.Context, *LocalBackend, logger.Logf)] // SetDevStateStore updates the LocalBackend's state storage to the provided values. // diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 886a7129120b8..9d2b49a383810 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -217,6 +217,7 @@ type peerAPIHandler struct { type PeerAPIHandler interface { Peer() tailcfg.NodeView PeerCaps() tailcfg.PeerCapMap + CanDebug() bool // can remote node can debug this node (internal state, etc) Self() tailcfg.NodeView LocalBackend() *LocalBackend IsSelfUntagged() bool // whether the peer is untagged and the same as this user @@ -380,9 +381,6 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "/v0/interfaces": h.handleServeInterfaces(w, r) return - case "/v0/doctor": - h.handleServeDoctor(w, r) - return case "/v0/sockstats": h.handleServeSockStats(w, r) return @@ -455,24 +453,6 @@ func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Re fmt.Fprintln(w, "") } -func (h *peerAPIHandler) handleServeDoctor(w http.ResponseWriter, r *http.Request) { - if !h.canDebug() { - http.Error(w, "denied; no debug access", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintln(w, "

      Doctor Output

      ") - - fmt.Fprintln(w, "
      ")
      -
      -	h.ps.b.Doctor(r.Context(), func(format string, args ...any) {
      -		line := fmt.Sprintf(format, args...)
      -		fmt.Fprintln(w, html.EscapeString(line))
      -	})
      -
      -	fmt.Fprintln(w, "
      ") -} - func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -571,6 +551,8 @@ func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Req fmt.Fprintln(w, "") } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug() } + // canDebug reports whether h can debug this node (goroutines, metrics, // magicsock internal state, etc). func (h *peerAPIHandler) canDebug() bool { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index a83a2e17e4879..e8801e1ba01a1 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -402,7 +402,9 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { } if defBool(r.URL.Query().Get("diagnose"), false) { - h.b.Doctor(r.Context(), logger.WithPrefix(h.logf, "diag: ")) + if f, ok := ipnlocal.HookDoctor.GetOk(); ok { + f(r.Context(), h.b, logger.WithPrefix(h.logf, "diag: ")) + } } w.Header().Set("Content-Type", "text/plain") fmt.Fprintln(w, startMarker) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ece4345d531b1..c196cc14da9ad 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -146,7 +146,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ + L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -225,10 +225,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ tailscale.com/disco from tailscale.com/net/tstun+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ @@ -295,7 +291,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/portmapper from tailscale.com/feature/portmapper tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ From 9ae8155bab4e5bfafec0ebe90931704cda1d69c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 26 Sep 2025 17:30:24 -0400 Subject: [PATCH 0438/1093] cmol/pprof health (#17303) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit health: ensure timers are cleaned up Updates tailscale/corp#32696 Signed-off-by: Claus Lensbøl --- health/health.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/health/health.go b/health/health.go index 3d1c46a3d945b..d60762e3159c3 100644 --- a/health/health.go +++ b/health/health.go @@ -143,15 +143,30 @@ func NewTracker(bus *eventbus.Bus) *Tracker { panic("no eventbus set") } - cli := bus.Client("health.Tracker") + ec := bus.Client("health.Tracker") t := &Tracker{ - eventClient: cli, - changePub: eventbus.Publish[Change](cli), + eventClient: ec, + changePub: eventbus.Publish[Change](ec), } t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) + + ec.Monitor(t.awaitEventClientDone) + return t } +func (t *Tracker) awaitEventClientDone(ec *eventbus.Client) { + <-ec.Done() + t.mu.Lock() + defer t.mu.Unlock() + + for _, timer := range t.pendingVisibleTimers { + timer.Stop() + } + t.timer.Stop() + clear(t.pendingVisibleTimers) +} + func (t *Tracker) now() time.Time { if t.testClock != nil { return t.testClock.Now() From e9dae5441e4fb877554ecc8b274a5c008f736755 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 18:21:50 -0700 Subject: [PATCH 0439/1093] tka: use ts_omit_tailnetlock in another spot, for ed25519consensus I noticed this while modularizing clientupdate. With this in first, moving clientupdate to be modular removes a bunch more stuff from the minimal build + tsnet. Updates #17115 Change-Id: I44bd055fca65808633fd3a848b0bbc09b00ad4fa Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 2 +- tka/key.go | 22 ------------------ tka/tka.go | 2 +- tka/verify.go | 36 ++++++++++++++++++++++++++++++ tka/verify_disabled.go | 18 +++++++++++++++ 5 files changed, 56 insertions(+), 24 deletions(-) create mode 100644 tka/verify.go create mode 100644 tka/verify_disabled.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 08d7d59c6541a..cf4a9b039fca9 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -20,7 +20,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/google/nftables/expr from github.com/google/nftables+ github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ github.com/google/nftables/xt from github.com/google/nftables/expr+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd diff --git a/tka/key.go b/tka/key.go index 07736795d8e58..dca1b4416560b 100644 --- a/tka/key.go +++ b/tka/key.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" - "github.com/hdevalence/ed25519consensus" "tailscale.com/types/tkatype" ) @@ -136,24 +135,3 @@ func (k Key) StaticValidate() error { } return nil } - -// Verify returns a nil error if the signature is valid over the -// provided AUM BLAKE2s digest, using the given key. -func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { - // NOTE(tom): Even if we can compute the public from the KeyID, - // its possible for the KeyID to be attacker-controlled - // so we should use the public contained in the state machine. - switch key.Kind { - case Key25519: - if len(key.Public) != ed25519.PublicKeySize { - return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) - } - if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { - return nil - } - return errors.New("invalid signature") - - default: - return fmt.Errorf("unhandled key type: %v", key.Kind) - } -} diff --git a/tka/tka.go b/tka/tka.go index 3929ff22a607e..234c87fe1b89c 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -3,7 +3,7 @@ //go:build !ts_omit_tailnetlock -// Package tka (WIP) implements the Tailnet Key Authority. +// Package tka implements the Tailnet Key Authority (TKA) for Tailnet Lock. package tka import ( diff --git a/tka/verify.go b/tka/verify.go new file mode 100644 index 0000000000000..e4e22e5518e8b --- /dev/null +++ b/tka/verify.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + "fmt" + + "github.com/hdevalence/ed25519consensus" + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + // NOTE(tom): Even if we can compute the public from the KeyID, + // its possible for the KeyID to be attacker-controlled + // so we should use the public contained in the state machine. + switch key.Kind { + case Key25519: + if len(key.Public) != ed25519.PublicKeySize { + return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) + } + if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { + return nil + } + return errors.New("invalid signature") + + default: + return fmt.Errorf("unhandled key type: %v", key.Kind) + } +} diff --git a/tka/verify_disabled.go b/tka/verify_disabled.go new file mode 100644 index 0000000000000..ba72f93e27d8f --- /dev/null +++ b/tka/verify_disabled.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "errors" + + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + return errors.New("tailnetlock disabled in build") +} From d01a0adfa6c9bbf435bd8b5042e203c46fde6a18 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 21:17:07 -0700 Subject: [PATCH 0440/1093] types/dnstype: delete unused func, move other one to its sole caller The dnstype package is used by tailcfg, which tries to be light and leafy. But it brings in dnstype. So dnstype shouldn't bring in x/net/dns/dnsmessage. Updates #12614 Change-Id: I043637a7ce7fed097e648001f13ca1927a781def Signed-off-by: Brad Fitzpatrick --- cmd/stund/depaware.txt | 2 +- ipn/localapi/localapi.go | 40 ++++++++++++- types/dnstype/messagetypes-string.go | 84 ---------------------------- 3 files changed, 39 insertions(+), 87 deletions(-) delete mode 100644 types/dnstype/messagetypes-string.go diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index c8a18eb0752bc..97cf14cf0a6b1 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -97,7 +97,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from net golang.org/x/net/http/httpguts from net/http+ golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2/hpack from net/http+ diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e8801e1ba01a1..e628e677b4a6f 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -38,7 +38,6 @@ import ( "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -1995,7 +1994,7 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { queryType := q.Get("type") qt := dnsmessage.TypeA if queryType != "" { - t, err := dnstype.DNSMessageTypeForString(queryType) + t, err := dnsMessageTypeForString(queryType) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -2016,6 +2015,43 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { }) } +// dnsMessageTypeForString returns the dnsmessage.Type for the given string. +// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. +func dnsMessageTypeForString(s string) (t dnsmessage.Type, err error) { + s = strings.TrimSpace(strings.ToUpper(s)) + switch s { + case "AAAA": + return dnsmessage.TypeAAAA, nil + case "ALL": + return dnsmessage.TypeALL, nil + case "A": + return dnsmessage.TypeA, nil + case "CNAME": + return dnsmessage.TypeCNAME, nil + case "HINFO": + return dnsmessage.TypeHINFO, nil + case "MINFO": + return dnsmessage.TypeMINFO, nil + case "MX": + return dnsmessage.TypeMX, nil + case "NS": + return dnsmessage.TypeNS, nil + case "OPT": + return dnsmessage.TypeOPT, nil + case "PTR": + return dnsmessage.TypePTR, nil + case "SOA": + return dnsmessage.TypeSOA, nil + case "SRV": + return dnsmessage.TypeSRV, nil + case "TXT": + return dnsmessage.TypeTXT, nil + case "WKS": + return dnsmessage.TypeWKS, nil + } + return 0, errors.New("unknown DNS message type: " + s) +} + // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.GET { diff --git a/types/dnstype/messagetypes-string.go b/types/dnstype/messagetypes-string.go deleted file mode 100644 index 34abea1ba947b..0000000000000 --- a/types/dnstype/messagetypes-string.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package dnstype - -import ( - "errors" - "strings" - - "golang.org/x/net/dns/dnsmessage" -) - -// StringForType returns the string representation of a dnsmessage.Type. -// For example, StringForType(dnsmessage.TypeA) returns "A". -func StringForDNSMessageType(t dnsmessage.Type) string { - switch t { - case dnsmessage.TypeAAAA: - return "AAAA" - case dnsmessage.TypeALL: - return "ALL" - case dnsmessage.TypeA: - return "A" - case dnsmessage.TypeCNAME: - return "CNAME" - case dnsmessage.TypeHINFO: - return "HINFO" - case dnsmessage.TypeMINFO: - return "MINFO" - case dnsmessage.TypeMX: - return "MX" - case dnsmessage.TypeNS: - return "NS" - case dnsmessage.TypeOPT: - return "OPT" - case dnsmessage.TypePTR: - return "PTR" - case dnsmessage.TypeSOA: - return "SOA" - case dnsmessage.TypeSRV: - return "SRV" - case dnsmessage.TypeTXT: - return "TXT" - case dnsmessage.TypeWKS: - return "WKS" - } - return "UNKNOWN" -} - -// DNSMessageTypeForString returns the dnsmessage.Type for the given string. -// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. -func DNSMessageTypeForString(s string) (t dnsmessage.Type, err error) { - s = strings.TrimSpace(strings.ToUpper(s)) - switch s { - case "AAAA": - return dnsmessage.TypeAAAA, nil - case "ALL": - return dnsmessage.TypeALL, nil - case "A": - return dnsmessage.TypeA, nil - case "CNAME": - return dnsmessage.TypeCNAME, nil - case "HINFO": - return dnsmessage.TypeHINFO, nil - case "MINFO": - return dnsmessage.TypeMINFO, nil - case "MX": - return dnsmessage.TypeMX, nil - case "NS": - return dnsmessage.TypeNS, nil - case "OPT": - return dnsmessage.TypeOPT, nil - case "PTR": - return dnsmessage.TypePTR, nil - case "SOA": - return dnsmessage.TypeSOA, nil - case "SRV": - return dnsmessage.TypeSRV, nil - case "TXT": - return dnsmessage.TypeTXT, nil - case "WKS": - return dnsmessage.TypeWKS, nil - } - return 0, errors.New("unknown DNS message type: " + s) -} From 7df7e01d0f3b2015283a4a5045924c47fe0dd1dd Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sat, 27 Sep 2025 10:23:58 +0300 Subject: [PATCH 0441/1093] tstest/integration/vms,.github/workflows: bump Ubuntu and NixOS for VM tests + cleanup (#16098) This PR cleans up a bunch of things in ./tstest/integration/vms: - Bumps version of Ubuntu that's actually run from CI 20.04 -> 24.04 - Removes Ubuntu 18.04 test - Bumps NixOS 21.05 -> 25.05 Updates#cleanup Signed-off-by: Irbe Krumina --- .github/workflows/test.yml | 2 +- tstest/integration/vms/README.md | 23 +---- tstest/integration/vms/distros.hujson | 18 ++-- tstest/integration/vms/nixos_test.go | 5 +- .../vms/opensuse_leap_15_1_test.go | 85 ------------------- tstest/integration/vms/regex_flag.go | 29 ------- tstest/integration/vms/regex_flag_test.go | 21 ----- tstest/integration/vms/top_level_test.go | 18 +--- tstest/integration/vms/vms_test.go | 19 ----- 9 files changed, 15 insertions(+), 205 deletions(-) delete mode 100644 tstest/integration/vms/opensuse_leap_15_1_test.go delete mode 100644 tstest/integration/vms/regex_flag.go delete mode 100644 tstest/integration/vms/regex_flag_test.go diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 17e08ae9dc251..c3aa4f1bca1ff 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -328,7 +328,7 @@ jobs: enableCrossOsArchive: true - name: Run VM tests working-directory: src - run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004 + run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2404 env: HOME: "/var/lib/ghrunner/home" TMPDIR: "/tmp" diff --git a/tstest/integration/vms/README.md b/tstest/integration/vms/README.md index 519c3d000fb63..a68ed051428f8 100644 --- a/tstest/integration/vms/README.md +++ b/tstest/integration/vms/README.md @@ -1,7 +1,6 @@ # End-to-End VM-based Integration Testing -This test spins up a bunch of common linux distributions and then tries to get -them to connect to a +These tests spin up a Tailscale client in a Linux VM and try to connect it to [`testcontrol`](https://pkg.go.dev/tailscale.com/tstest/integration/testcontrol) server. @@ -55,26 +54,6 @@ If you pass the `-no-s3` flag to `go test`, the S3 step will be skipped in favor of downloading the images directly from upstream sources, which may cause the test to fail in odd places. -### Distribution Picking - -This test runs on a large number of distributions. By default it tries to run -everything, which may or may not be ideal for you. If you only want to test a -subset of distributions, you can use the `--distro-regex` flag to match a subset -of distributions using a [regular expression](https://golang.org/pkg/regexp/) -such as like this: - -```console -$ go test -run-vm-tests -distro-regex centos -``` - -This would run all tests on all versions of CentOS. - -```console -$ go test -run-vm-tests -distro-regex '(debian|ubuntu)' -``` - -This would run all tests on all versions of Debian and Ubuntu. - ### Ram Limiting This test uses a lot of memory. In order to avoid making machines run out of diff --git a/tstest/integration/vms/distros.hujson b/tstest/integration/vms/distros.hujson index 049091ed50e6e..2c90f9a2f82c1 100644 --- a/tstest/integration/vms/distros.hujson +++ b/tstest/integration/vms/distros.hujson @@ -12,24 +12,16 @@ // /var/log/cloud-init-output.log for what you messed up. [ { - "Name": "ubuntu-18-04", - "URL": "https://cloud-images.ubuntu.com/releases/bionic/release-20210817/ubuntu-18.04-server-cloudimg-amd64.img", - "SHA256Sum": "1ee1039f0b91c8367351413b5b5f56026aaf302fd5f66f17f8215132d6e946d2", + "Name": "ubuntu-24-04", + "URL": "https://cloud-images.ubuntu.com/noble/20250523/noble-server-cloudimg-amd64.img", + "SHA256Sum": "0e865619967706765cdc8179fb9929202417ab3a0719d77d8c8942d38aa9611b", "MemoryMegs": 512, "PackageManager": "apt", "InitSystem": "systemd" }, { - "Name": "ubuntu-20-04", - "URL": "https://cloud-images.ubuntu.com/releases/focal/release-20210819/ubuntu-20.04-server-cloudimg-amd64.img", - "SHA256Sum": "99e25e6e344e3a50a081235e825937238a3d51b099969e107ef66f0d3a1f955e", - "MemoryMegs": 512, - "PackageManager": "apt", - "InitSystem": "systemd" - }, - { - "Name": "nixos-21-11", - "URL": "channel:nixos-21.11", + "Name": "nixos-25-05", + "URL": "channel:nixos-25.05", "SHA256Sum": "lolfakesha", "MemoryMegs": 512, "PackageManager": "nix", diff --git a/tstest/integration/vms/nixos_test.go b/tstest/integration/vms/nixos_test.go index c2998ff3c087c..02b040fedfaff 100644 --- a/tstest/integration/vms/nixos_test.go +++ b/tstest/integration/vms/nixos_test.go @@ -97,7 +97,7 @@ let # Wrap tailscaled with the ip and iptables commands. wrapProgram $out/bin/tailscaled --prefix PATH : ${ - lib.makeBinPath [ iproute iptables ] + lib.makeBinPath [ iproute2 iptables ] } # Install systemd unit. @@ -127,6 +127,9 @@ in { # yolo, this vm can sudo freely. security.sudo.wheelNeedsPassword = false; + # nix considers squid insecure, but this is fine for a test. + nixpkgs.config.permittedInsecurePackages = [ "squid-7.0.1" ]; + # Enable cloud-init so we can set VM hostnames and the like the same as other # distros. This will also take care of SSH keys. It's pretty handy. services.cloud-init = { diff --git a/tstest/integration/vms/opensuse_leap_15_1_test.go b/tstest/integration/vms/opensuse_leap_15_1_test.go deleted file mode 100644 index 7d3ac579ec6d1..0000000000000 --- a/tstest/integration/vms/opensuse_leap_15_1_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !plan9 - -package vms - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - - "github.com/google/uuid" -) - -/* - The images that we use for OpenSUSE Leap 15.1 have an issue that makes the - nocloud backend[1] for cloud-init just not work. As a distro-specific - workaround, we're gonna pretend to be OpenStack. - - TODO(Xe): delete once we no longer need to support OpenSUSE Leap 15.1. - - [1]: https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html -*/ - -type openSUSELeap151MetaData struct { - Zone string `json:"availability_zone"` // nova - Hostname string `json:"hostname"` // opensuse-leap-15-1 - LaunchIndex string `json:"launch_index"` // 0 - Meta openSUSELeap151MetaDataMeta `json:"meta"` // some openstack metadata we don't need to care about - Name string `json:"name"` // opensuse-leap-15-1 - UUID string `json:"uuid"` // e9c664cd-b116-433b-aa61-7ff420163dcd -} - -type openSUSELeap151MetaDataMeta struct { - Role string `json:"role"` // server - DSMode string `json:"dsmode"` // local - Essential string `json:"essential"` // essential -} - -func hackOpenSUSE151UserData(t *testing.T, d Distro, dir string) bool { - if d.Name != "opensuse-leap-15-1" { - return false - } - - t.Log("doing OpenSUSE Leap 15.1 hack") - osDir := filepath.Join(dir, "openstack", "latest") - err := os.MkdirAll(osDir, 0755) - if err != nil { - t.Fatalf("can't make metadata home: %v", err) - } - - metadata, err := json.Marshal(openSUSELeap151MetaData{ - Zone: "nova", - Hostname: d.Name, - LaunchIndex: "0", - Meta: openSUSELeap151MetaDataMeta{ - Role: "server", - DSMode: "local", - Essential: "false", - }, - Name: d.Name, - UUID: uuid.New().String(), - }) - if err != nil { - t.Fatalf("can't encode metadata: %v", err) - } - err = os.WriteFile(filepath.Join(osDir, "meta_data.json"), metadata, 0666) - if err != nil { - t.Fatalf("can't write to meta_data.json: %v", err) - } - - data, err := os.ReadFile(filepath.Join(dir, "user-data")) - if err != nil { - t.Fatalf("can't read user_data: %v", err) - } - - err = os.WriteFile(filepath.Join(osDir, "user_data"), data, 0666) - if err != nil { - t.Fatalf("can't create output user_data: %v", err) - } - - return true -} diff --git a/tstest/integration/vms/regex_flag.go b/tstest/integration/vms/regex_flag.go deleted file mode 100644 index 02e399ecdfaad..0000000000000 --- a/tstest/integration/vms/regex_flag.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import "regexp" - -type regexValue struct { - r *regexp.Regexp -} - -func (r *regexValue) String() string { - if r.r == nil { - return "" - } - - return r.r.String() -} - -func (r *regexValue) Set(val string) error { - if rex, err := regexp.Compile(val); err != nil { - return err - } else { - r.r = rex - return nil - } -} - -func (r regexValue) Unwrap() *regexp.Regexp { return r.r } diff --git a/tstest/integration/vms/regex_flag_test.go b/tstest/integration/vms/regex_flag_test.go deleted file mode 100644 index 0f4e5f8f7bdec..0000000000000 --- a/tstest/integration/vms/regex_flag_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import ( - "flag" - "testing" -) - -func TestRegexFlag(t *testing.T) { - var v regexValue - fs := flag.NewFlagSet(t.Name(), flag.PanicOnError) - fs.Var(&v, "regex", "regex to parse") - - const want = `.*` - fs.Parse([]string{"-regex", want}) - if v.Unwrap().String() != want { - t.Fatalf("got wrong regex: %q, wanted: %q", v.Unwrap().String(), want) - } -} diff --git a/tstest/integration/vms/top_level_test.go b/tstest/integration/vms/top_level_test.go index c107fd89cc886..5db237b6e33b7 100644 --- a/tstest/integration/vms/top_level_test.go +++ b/tstest/integration/vms/top_level_test.go @@ -14,17 +14,13 @@ import ( expect "github.com/tailscale/goexpect" ) -func TestRunUbuntu1804(t *testing.T) { +func TestRunUbuntu2404(t *testing.T) { testOneDistribution(t, 0, Distros[0]) } -func TestRunUbuntu2004(t *testing.T) { - testOneDistribution(t, 1, Distros[1]) -} - -func TestRunNixos2111(t *testing.T) { +func TestRunNixos2505(t *testing.T) { t.Parallel() - testOneDistribution(t, 2, Distros[2]) + testOneDistribution(t, 1, Distros[1]) } // TestMITMProxy is a smoke test for derphttp through a MITM proxy. @@ -39,13 +35,7 @@ func TestRunNixos2111(t *testing.T) { func TestMITMProxy(t *testing.T) { t.Parallel() setupTests(t) - distro := Distros[2] // nixos-21.11 - - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } + distro := Distros[1] // nixos-25.05 ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index f71f2bdbf2069..0bab3ba5d96d5 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -15,7 +15,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strconv" "strings" "sync" @@ -43,11 +42,6 @@ var ( useVNC = flag.Bool("use-vnc", false, "if set, display guest vms over VNC") verboseLogcatcher = flag.Bool("verbose-logcatcher", true, "if set, print logcatcher to t.Logf") verboseQemu = flag.Bool("verbose-qemu", true, "if set, print qemu console to t.Logf") - distroRex = func() *regexValue { - result := ®exValue{r: regexp.MustCompile(`.*`)} - flag.Var(result, "distro-regex", "The regex that matches what distros should be run") - return result - }() ) func TestDownloadImages(t *testing.T) { @@ -59,9 +53,6 @@ func TestDownloadImages(t *testing.T) { distro := d t.Run(distro.Name, func(t *testing.T) { t.Parallel() - if !distroRex.Unwrap().MatchString(distro.Name) { - t.Skipf("distro name %q doesn't match regex: %s", distro.Name, distroRex) - } if strings.HasPrefix(distro.Name, "nixos") { t.Skip("NixOS is built on the fly, no need to download it") } @@ -175,10 +166,6 @@ func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) { filepath.Join(dir, "user-data"), } - if hackOpenSUSE151UserData(t, d, dir) { - args = append(args, filepath.Join(dir, "openstack")) - } - run(t, tdir, "genisoimage", args...) } @@ -247,12 +234,6 @@ var ramsem struct { func testOneDistribution(t *testing.T, n int, distro Distro) { setupTests(t) - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } - ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) From dd615c8fdd6c225ae9da777a47dbbecf08478472 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 16:19:12 -0700 Subject: [PATCH 0442/1093] util/linuxfw, feature/buildfeatures: add ts_omit_iptables to make IPTables optional Updates #12614 Change-Id: Ic0eba982aa8468a55c63e1b763345f032a55b4e2 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 5 +- .../feature_iptables_disabled.go | 13 ++ .../buildfeatures/feature_iptables_enabled.go | 13 ++ feature/featuretags/featuretags.go | 1 + ipn/ipn_view.go | 2 + ipn/prefs.go | 2 + util/linuxfw/detector.go | 37 +++- util/linuxfw/fake.go | 2 +- util/linuxfw/iptables.go | 165 +++++++++++++++++- util/linuxfw/iptables_disabled.go | 20 +++ util/linuxfw/iptables_for_svcs_test.go | 14 +- util/linuxfw/iptables_runner.go | 157 +---------------- util/linuxfw/iptables_runner_test.go | 12 +- util/linuxfw/linuxfw.go | 11 ++ util/linuxfw/linuxfw_unsupported.go | 40 ----- util/linuxfw/nftables.go | 4 + wgengine/router/router.go | 2 +- 18 files changed, 282 insertions(+), 221 deletions(-) create mode 100644 feature/buildfeatures/feature_iptables_disabled.go create mode 100644 feature/buildfeatures/feature_iptables_enabled.go create mode 100644 util/linuxfw/iptables_disabled.go delete mode 100644 util/linuxfw/linuxfw_unsupported.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index d19ea1f17658b..a68d67b6d9f62 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -98,7 +98,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ + L tailscale.com/feature/buildfeatures from tailscale.com/util/linuxfw tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index cf4a9b039fca9..3b66435668afc 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -2,7 +2,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -420,13 +419,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from tailscale.com/clientupdate+ os/signal from tailscale.com/cmd/tailscaled os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from internal/profile+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from github.com/klauspost/compress/zstd+ diff --git a/feature/buildfeatures/feature_iptables_disabled.go b/feature/buildfeatures/feature_iptables_disabled.go new file mode 100644 index 0000000000000..8cda5be5d6ae6 --- /dev/null +++ b/feature/buildfeatures/feature_iptables_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = false diff --git a/feature/buildfeatures/feature_iptables_enabled.go b/feature/buildfeatures/feature_iptables_enabled.go new file mode 100644 index 0000000000000..44d98473f05f2 --- /dev/null +++ b/feature/buildfeatures/feature_iptables_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 2edecef581b6c..40a5ac3f5f396 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -112,6 +112,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, + "iptables": {"IPTables", "Linux iptables support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 170dc409b2095..1c7639f6ff932 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -421,6 +421,8 @@ func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } // NetfilterKind specifies what netfilter implementation to use. // +// It can be "iptables", "nftables", or "" to auto-detect. +// // Linux-only. func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } diff --git a/ipn/prefs.go b/ipn/prefs.go index 1efb5d0feabd9..a2149950ddc1e 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -264,6 +264,8 @@ type Prefs struct { // NetfilterKind specifies what netfilter implementation to use. // + // It can be "iptables", "nftables", or "" to auto-detect. + // // Linux-only. NetfilterKind string diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index fffa523afdcf4..644126131bbba 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -10,6 +10,8 @@ import ( "os/exec" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/types/logger" "tailscale.com/version/distro" @@ -42,10 +44,12 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { var det linuxFWDetector if mode == "" { // We have no preference, so check if `iptables` is even available. - _, err := det.iptDetect() - if err != nil && errors.Is(err, exec.ErrNotFound) { - logf("iptables not found: %v; falling back to nftables", err) - mode = "nftables" + if buildfeatures.HasIPTables { + _, err := det.iptDetect() + if err != nil && errors.Is(err, exec.ErrNotFound) { + logf("iptables not found: %v; falling back to nftables", err) + mode = "nftables" + } } } @@ -59,11 +63,16 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { return FirewallModeNfTables case "iptables": hostinfo.SetFirewallMode("ipt-forced") - default: + return FirewallModeIPTables + } + if buildfeatures.HasIPTables { logf("default choosing iptables") hostinfo.SetFirewallMode("ipt-default") + return FirewallModeIPTables } - return FirewallModeIPTables + logf("default choosing nftables") + hostinfo.SetFirewallMode("nft-default") + return FirewallModeNfTables } // tableDetector abstracts helpers to detect the firewall mode. @@ -80,19 +89,33 @@ func (l linuxFWDetector) iptDetect() (int, error) { return detectIptables() } +var hookDetectNetfilter feature.Hook[func() (int, error)] + +// ErrUnsupported is the error returned from all functions on non-Linux +// platforms. +var ErrUnsupported = errors.New("linuxfw:unsupported") + // nftDetect returns the number of nftables rules in the current namespace. func (l linuxFWDetector) nftDetect() (int, error) { - return detectNetfilter() + if f, ok := hookDetectNetfilter.GetOk(); ok { + return f() + } + return 0, ErrUnsupported } // pickFirewallModeFromInstalledRules returns the firewall mode to use based on // the environment and the system's capabilities. func pickFirewallModeFromInstalledRules(logf logger.Logf, det tableDetector) FirewallMode { + if !buildfeatures.HasIPTables { + hostinfo.SetFirewallMode("nft-noipt") + return FirewallModeNfTables + } if distro.Get() == distro.Gokrazy { // Reduce startup logging on gokrazy. There's no way to do iptables on // gokrazy anyway. return FirewallModeNfTables } + iptAva, nftAva := true, true iptRuleCount, err := det.iptDetect() if err != nil { diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index 63a728d5566a5..d01849a2e5c9d 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -128,7 +128,7 @@ func (n *fakeIPTables) DeleteChain(table, chain string) error { } } -func NewFakeIPTablesRunner() *iptablesRunner { +func NewFakeIPTablesRunner() NetfilterRunner { ipt4 := newFakeIPTables() v6Available := false var ipt6 iptablesInterface diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 234fa526ce17c..73da920863d96 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,21 +1,34 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && (arm64 || amd64) && !ts_omit_iptables + // TODO(#8502): add support for more architectures -//go:build linux && (arm64 || amd64) package linuxfw import ( + "bytes" + "errors" "fmt" + "os" "os/exec" "strings" "unicode" + "github.com/coreos/go-iptables/iptables" "tailscale.com/types/logger" "tailscale.com/util/multierr" + "tailscale.com/version/distro" ) +func init() { + isNotExistError = func(err error) bool { + var e *iptables.Error + return errors.As(err, &e) && e.IsNotExist() + } +} + // DebugNetfilter prints debug information about iptables rules to the // provided log function. func DebugIptables(logf logger.Logf) error { @@ -71,3 +84,153 @@ func detectIptables() (int, error) { // return the count of non-default rules return count, nil } + +// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. +// If the underlying iptables library fails to initialize, that error is +// returned. The runner probes for IPv6 support once at initialization time and +// if not found, no IPv6 rules will be modified for the lifetime of the runner. +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + return nil, err + } + + supportsV6, supportsV6NAT, supportsV6Filter := false, false, false + v6err := CheckIPv6(logf) + ip6terr := checkIP6TablesExists() + var ipt6 *iptables.IPTables + switch { + case v6err != nil: + logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) + case ip6terr != nil: + logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) + default: + supportsV6 = true + ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + if err != nil { + return nil, err + } + supportsV6Filter = checkSupportsV6Filter(ipt6, logf) + supportsV6NAT = checkSupportsV6NAT(ipt6, logf) + logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) + } + return &iptablesRunner{ + ipt4: ipt4, + ipt6: ipt6, + v6Available: supportsV6, + v6NATAvailable: supportsV6NAT, + v6FilterAvailable: supportsV6Filter}, nil +} + +// checkSupportsV6Filter returns whether the system has a "filter" table in the +// IPv6 tables. Some container environments such as GitHub codespaces have +// limited local IPv6 support, and containers containing ip6tables, but do not +// have kernel support for IPv6 filtering. +// We will not set ip6tables rules in these instances. +func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil { + return false + } + _, filterListErr := ipt.ListChains("filter") + if filterListErr == nil { + return true + } + logf("ip6tables filtering is not supported on this host: %v", filterListErr) + return false +} + +// checkSupportsV6NAT returns whether the system has a "nat" table in the +// IPv6 netfilter stack. +// +// The nat table was added after the initial release of ipv6 +// netfilter, so some older distros ship a kernel that can't NAT IPv6 +// traffic. +// ipt must be initialized for IPv6. +func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { + return false + } + _, natListErr := ipt.ListChains("nat") + if natListErr == nil { + return true + } + + // TODO (irbekrm): the following two checks were added before the check + // above that verifies that nat chains can be listed. It is a + // container-friendly check (see + // https://github.com/tailscale/tailscale/issues/11344), but also should + // be good enough on its own in other environments. If we never observe + // it falsely succeed, let's remove the other two checks. + + bs, err := os.ReadFile("/proc/net/ip6_tables_names") + if err != nil { + return false + } + if bytes.Contains(bs, []byte("nat\n")) { + logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") + return true + } + if exec.Command("modprobe", "ip6table_nat").Run() == nil { + logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") + return true + } + return false +} + +func init() { + hookIPTablesCleanup.Set(ipTablesCleanUp) +} + +// ipTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func ipTablesCleanUp(logf logger.Logf) { + switch distro.Get() { + case distro.Gokrazy, distro.JetKVM: + // These use nftables and don't have the "iptables" command. + // Avoid log spam on cleanup. (#12277) + return + } + err := clearRules(iptables.ProtocolIPv4, logf) + if err != nil { + logf("linuxfw: clear iptables: %v", err) + } + + err = clearRules(iptables.ProtocolIPv6, logf) + if err != nil { + logf("linuxfw: clear ip6tables: %v", err) + } +} + +// clearRules clears all the iptables rules created by Tailscale +// for the given protocol. If error occurs, it's logged but not returned. +func clearRules(proto iptables.Protocol, logf logger.Logf) error { + ipt, err := iptables.NewWithProtocol(proto) + if err != nil { + return err + } + + var errs []error + + if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "filter", "ts-input"); err != nil { + errs = append(errs, err) + } + if err := delChain(ipt, "filter", "ts-forward"); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { + errs = append(errs, err) + } + + return multierr.New(errs...) +} diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go new file mode 100644 index 0000000000000..8736f83998fa3 --- /dev/null +++ b/util/linuxfw/iptables_disabled.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !(arm64 || amd64)) || ts_omit_iptables + +package linuxfw + +import ( + "errors" + + "tailscale.com/types/logger" +) + +func detectIptables() (int, error) { + return 0, nil +} + +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + return nil, errors.New("iptables disabled in build") +} diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go index c3c1b1f65d6fe..0e56d70ba7078 100644 --- a/util/linuxfw/iptables_for_svcs_test.go +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -10,6 +10,10 @@ import ( "testing" ) +func newFakeIPTablesRunner() *iptablesRunner { + return NewFakeIPTablesRunner().(*iptablesRunner) +} + func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") @@ -45,7 +49,7 @@ func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -103,7 +107,7 @@ func Test_iptablesRunner_DeletePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -127,7 +131,7 @@ func Test_iptablesRunner_DeleteSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // create two rules that will consitute svc1 s1R1 := argsForPortMapRule("svc1", "tailscale0", v4Addr, testPM) @@ -189,7 +193,7 @@ func Test_iptablesRunner_EnsureDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) @@ -248,7 +252,7 @@ func Test_iptablesRunner_DeleteDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 78844065a4edd..76b4cdd6fd142 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -6,31 +6,22 @@ package linuxfw import ( - "bytes" - "errors" "fmt" "log" "net/netip" - "os" "os/exec" "slices" "strconv" "strings" - "github.com/coreos/go-iptables/iptables" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" - "tailscale.com/util/multierr" - "tailscale.com/version/distro" ) // isNotExistError needs to be overridden in tests that rely on distinguishing // this error, because we don't have a good way how to create a new // iptables.Error of that type. -var isNotExistError = func(err error) bool { - var e *iptables.Error - return errors.As(err, &e) && e.IsNotExist() -} +var isNotExistError = func(err error) bool { return false } type iptablesInterface interface { // Adding this interface for testing purposes so we can mock out @@ -62,98 +53,6 @@ func checkIP6TablesExists() error { return nil } -// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. -// If the underlying iptables library fails to initialize, that error is -// returned. The runner probes for IPv6 support once at initialization time and -// if not found, no IPv6 rules will be modified for the lifetime of the runner. -func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { - ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) - if err != nil { - return nil, err - } - - supportsV6, supportsV6NAT, supportsV6Filter := false, false, false - v6err := CheckIPv6(logf) - ip6terr := checkIP6TablesExists() - var ipt6 *iptables.IPTables - switch { - case v6err != nil: - logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) - case ip6terr != nil: - logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) - default: - supportsV6 = true - ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) - if err != nil { - return nil, err - } - supportsV6Filter = checkSupportsV6Filter(ipt6, logf) - supportsV6NAT = checkSupportsV6NAT(ipt6, logf) - logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) - } - return &iptablesRunner{ - ipt4: ipt4, - ipt6: ipt6, - v6Available: supportsV6, - v6NATAvailable: supportsV6NAT, - v6FilterAvailable: supportsV6Filter}, nil -} - -// checkSupportsV6Filter returns whether the system has a "filter" table in the -// IPv6 tables. Some container environments such as GitHub codespaces have -// limited local IPv6 support, and containers containing ip6tables, but do not -// have kernel support for IPv6 filtering. -// We will not set ip6tables rules in these instances. -func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil { - return false - } - _, filterListErr := ipt.ListChains("filter") - if filterListErr == nil { - return true - } - logf("ip6tables filtering is not supported on this host: %v", filterListErr) - return false -} - -// checkSupportsV6NAT returns whether the system has a "nat" table in the -// IPv6 netfilter stack. -// -// The nat table was added after the initial release of ipv6 -// netfilter, so some older distros ship a kernel that can't NAT IPv6 -// traffic. -// ipt must be initialized for IPv6. -func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { - return false - } - _, natListErr := ipt.ListChains("nat") - if natListErr == nil { - return true - } - - // TODO (irbekrm): the following two checks were added before the check - // above that verifies that nat chains can be listed. It is a - // container-friendly check (see - // https://github.com/tailscale/tailscale/issues/11344), but also should - // be good enough on its own in other environments. If we never observe - // it falsely succeed, let's remove the other two checks. - - bs, err := os.ReadFile("/proc/net/ip6_tables_names") - if err != nil { - return false - } - if bytes.Contains(bs, []byte("nat\n")) { - logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") - return true - } - if exec.Command("modprobe", "ip6table_nat").Run() == nil { - logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") - return true - } - return false -} - // HasIPV6 reports true if the system supports IPv6. func (i *iptablesRunner) HasIPV6() bool { return i.v6Available @@ -685,26 +584,6 @@ func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error return nil } -// IPTablesCleanUp removes all Tailscale added iptables rules. -// Any errors that occur are logged to the provided logf. -func IPTablesCleanUp(logf logger.Logf) { - switch distro.Get() { - case distro.Gokrazy, distro.JetKVM: - // These use nftables and don't have the "iptables" command. - // Avoid log spam on cleanup. (#12277) - return - } - err := clearRules(iptables.ProtocolIPv4, logf) - if err != nil { - logf("linuxfw: clear iptables: %v", err) - } - - err = clearRules(iptables.ProtocolIPv6, logf) - if err != nil { - logf("linuxfw: clear ip6tables: %v", err) - } -} - // delTSHook deletes hook in a chain that jumps to a ts-chain. If the hook does not // exist, it's a no-op since the desired state is already achieved but we log the // error because error code from the iptables module resists unwrapping. @@ -733,40 +612,6 @@ func delChain(ipt iptablesInterface, table, chain string) error { return nil } -// clearRules clears all the iptables rules created by Tailscale -// for the given protocol. If error occurs, it's logged but not returned. -func clearRules(proto iptables.Protocol, logf logger.Logf) error { - ipt, err := iptables.NewWithProtocol(proto) - if err != nil { - return err - } - - var errs []error - - if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "filter", "ts-input"); err != nil { - errs = append(errs, err) - } - if err := delChain(ipt, "filter", "ts-forward"); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { - errs = append(errs, err) - } - - return multierr.New(errs...) -} - // argsFromPostRoutingRule accepts a rule as returned by iptables.List and, if it is a rule from POSTROUTING chain, // returns the args part, else returns the original rule. func argsFromPostRoutingRule(r string) string { diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 56f13c78a8010..451b8aab47529 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -20,7 +20,7 @@ func init() { } func TestAddAndDeleteChains(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() err := iptr.AddChains() if err != nil { t.Fatal(err) @@ -59,7 +59,7 @@ func TestAddAndDeleteChains(t *testing.T) { } func TestAddAndDeleteHooks(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // don't need to test what happens if the chains don't exist, because // this is handled by fake iptables, in realife iptables would return error. if err := iptr.AddChains(); err != nil { @@ -113,7 +113,7 @@ func TestAddAndDeleteHooks(t *testing.T) { } func TestAddAndDeleteBase(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() tunname := "tun0" if err := iptr.AddChains(); err != nil { t.Fatal(err) @@ -176,7 +176,7 @@ func TestAddAndDeleteBase(t *testing.T) { } func TestAddAndDelLoopbackRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // We don't need to test for malformed addresses, AddLoopbackRule // takes in a netip.Addr, which is already valid. fakeAddrV4 := netip.MustParseAddr("192.168.0.2") @@ -247,7 +247,7 @@ func TestAddAndDelLoopbackRule(t *testing.T) { } func TestAddAndDelSNATRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() if err := iptr.AddChains(); err != nil { t.Fatal(err) @@ -292,7 +292,7 @@ func TestAddAndDelSNATRule(t *testing.T) { func TestEnsureSNATForDst_ipt(t *testing.T) { ip1, ip2, ip3 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("100.77.77.77") - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // 1. A new rule gets added mustCreateSNATRule_ipt(t, iptr, ip1, ip2) diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index be520e7a4a074..4aa0f87829bd3 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -14,6 +14,7 @@ import ( "strings" "github.com/tailscale/netlink" + "tailscale.com/feature" "tailscale.com/types/logger" ) @@ -180,3 +181,13 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { defer netlink.RuleDel(rule) return netlink.RuleAdd(rule) } + +var hookIPTablesCleanup feature.Hook[func(logger.Logf)] + +// IPTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func IPTablesCleanUp(logf logger.Logf) { + if f, ok := hookIPTablesCleanup.GetOk(); ok { + f(logf) + } +} diff --git a/util/linuxfw/linuxfw_unsupported.go b/util/linuxfw/linuxfw_unsupported.go deleted file mode 100644 index 7bfb4fd010302..0000000000000 --- a/util/linuxfw/linuxfw_unsupported.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// NOTE: linux_{arm64, amd64} are the only two currently supported archs due to missing -// support in upstream dependencies. - -// TODO(#8502): add support for more architectures -//go:build linux && !(arm64 || amd64) - -package linuxfw - -import ( - "errors" - - "tailscale.com/types/logger" -) - -// ErrUnsupported is the error returned from all functions on non-Linux -// platforms. -var ErrUnsupported = errors.New("linuxfw:unsupported") - -// DebugNetfilter is not supported on non-Linux platforms. -func DebugNetfilter(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectNetfilter is not supported on non-Linux platforms. -func detectNetfilter() (int, error) { - return 0, ErrUnsupported -} - -// DebugIptables is not supported on non-Linux platforms. -func debugIptables(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectIptables is not supported on non-Linux platforms. -func detectIptables() (int, error) { - return 0, ErrUnsupported -} diff --git a/util/linuxfw/nftables.go b/util/linuxfw/nftables.go index e8b267b5e42ae..94ce51a1405a4 100644 --- a/util/linuxfw/nftables.go +++ b/util/linuxfw/nftables.go @@ -103,6 +103,10 @@ func DebugNetfilter(logf logger.Logf) error { return nil } +func init() { + hookDetectNetfilter.Set(detectNetfilter) +} + // detectNetfilter returns the number of nftables rules present in the system. func detectNetfilter() (int, error) { // Frist try creating a dummy postrouting chain. Emperically, we have diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 25d1c08a29f4d..edd7d14cbd4be 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -94,7 +94,7 @@ type Config struct { SNATSubnetRoutes bool // SNAT traffic to local subnets StatefulFiltering bool // Apply stateful filtering to inbound connections NetfilterMode preftype.NetfilterMode // how much to manage netfilter rules - NetfilterKind string // what kind of netfilter to use (nftables, iptables) + NetfilterKind string // what kind of netfilter to use ("nftables", "iptables", or "" to auto-detect) } func (a *Config) Equal(b *Config) bool { From f19409482d8c58f4b9478597aa09417289a79d71 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 23 Sep 2025 11:41:29 -0700 Subject: [PATCH 0443/1093] logtail: delete AppendTextOrJSONLocked This was accidentally added in #11671 for testing. Nothing uses it. Updates tailscale/corp#21363 Signed-off-by: Joe Tsai --- logtail/logtail.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index b355addd20b82..6c4bbccc5a20e 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -708,11 +708,6 @@ func appendTruncatedString(dst, src []byte, n int) []byte { return dst } -func (l *Logger) AppendTextOrJSONLocked(dst, src []byte) []byte { - l.clock = tstime.StdClock{} - return l.appendTextOrJSONLocked(dst, src, 0) -} - // appendTextOrJSONLocked appends a raw text message or a raw JSON object // in the Tailscale JSON log format. func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { From 475b520aa2d1cced66f6134712991944068287c9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 27 Sep 2025 13:07:05 -0700 Subject: [PATCH 0444/1093] tsconst, util/linuxfw, wgengine/router: move Linux fw consts to tsconst Now cmd/derper doesn't depend on iptables, nftables, and netlink code :) But this is really just a cleanup step I noticed on the way to making tsnet applications able to not link all the OS router code which they don't use. Updates #17313 Change-Id: Ic7b4e04e3a9639fd198e9dbeb0f7bae22a4a47a9 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 27 +++++------------ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 20 +++---------- cmd/tailscaled/depaware-minbox.txt | 3 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- net/netns/netns_linux.go | 4 +-- tsconst/linuxfw.go | 43 ++++++++++++++++++++++++++++ tsconst/{interface.go => tsconst.go} | 0 tsnet/depaware.txt | 2 +- util/linuxfw/iptables_runner.go | 12 ++++---- util/linuxfw/iptables_runner_test.go | 7 +++-- util/linuxfw/linuxfw.go | 26 ++++++----------- wgengine/router/router_linux.go | 13 +++++---- wgengine/router/router_linux_test.go | 13 +++++---- 15 files changed, 94 insertions(+), 82 deletions(-) create mode 100644 tsconst/linuxfw.go rename tsconst/{interface.go => tsconst.go} (100%) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index a68d67b6d9f62..7d322aa31e337 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -11,7 +11,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil+ github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ @@ -21,18 +20,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/munnerz/goautoneg from github.com/prometheus/common/expfmt @@ -49,11 +41,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/setec/client/setec from tailscale.com/cmd/derper github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr @@ -98,8 +87,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb+ - L tailscale.com/feature/buildfeatures from tailscale.com/util/linuxfw + tailscale.com/feature from tailscale.com/tsweb tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -131,7 +119,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/syncs from tailscale.com/cmd/derper+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tka from tailscale.com/client/local+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + LW tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/derp/derpserver @@ -164,7 +152,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/eventbus from tailscale.com/net/netmon+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto @@ -214,7 +201,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -363,7 +350,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - L io/ioutil from github.com/mitchellh/go-ps+ + L io/ioutil from github.com/mitchellh/go-ps iter from maps+ log from expvar+ log/internal from log @@ -387,13 +374,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+ os/signal from tailscale.com/cmd/derper W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/prometheus/client_golang/prometheus+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2adbd5f5dca9d..ba644eb03eb62 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -932,7 +932,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 4bd4e6bcabb22..47e5ca48e7669 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -14,7 +14,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode L github.com/fogleman/gg from tailscale.com/client/systray @@ -31,12 +30,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon @@ -44,9 +37,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli 💣 github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli 💣 github.com/mattn/go-isatty from tailscale.com/cmd/tailscale/cli+ - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/mitchellh/go-ps from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ @@ -66,11 +58,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr @@ -183,7 +172,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -259,7 +247,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -446,13 +434,13 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from github.com/atotto/clipboard+ os/signal from tailscale.com/cmd/tailscale/cli+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3b66435668afc..b0cc9d9c1a8eb 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -142,6 +142,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -184,7 +185,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/linuxfw from tailscale.com/net/netns+ + tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 579af5c0d3b2b..7fdac984c4b2a 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -419,7 +419,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 270edd3719e8e..bff8df411da7e 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -362,7 +362,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/net/netns/netns_linux.go b/net/netns/netns_linux.go index aaf6dab4a9d64..609f524b5cc01 100644 --- a/net/netns/netns_linux.go +++ b/net/netns/netns_linux.go @@ -15,8 +15,8 @@ import ( "golang.org/x/sys/unix" "tailscale.com/envknob" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" - "tailscale.com/util/linuxfw" ) // socketMarkWorksOnce is the sync.Once & cached value for useSocketMark. @@ -111,7 +111,7 @@ func controlC(network, address string, c syscall.RawConn) error { } func setBypassMark(fd uintptr) error { - if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, linuxfw.TailscaleBypassMarkNum); err != nil { + if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, tsconst.LinuxBypassMarkNum); err != nil { return fmt.Errorf("setting SO_MARK bypass: %w", err) } return nil diff --git a/tsconst/linuxfw.go b/tsconst/linuxfw.go new file mode 100644 index 0000000000000..ce571e40239ed --- /dev/null +++ b/tsconst/linuxfw.go @@ -0,0 +1,43 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// Linux firewall constants used by Tailscale. + +// The following bits are added to packet marks for Tailscale use. +// +// We tried to pick bits sufficiently out of the way that it's +// unlikely to collide with existing uses. We have 4 bytes of mark +// bits to play with. We leave the lower byte alone on the assumption +// that sysadmins would use those. Kubernetes uses a few bits in the +// second byte, so we steer clear of that too. +// +// Empirically, most of the documentation on packet marks on the +// internet gives the impression that the marks are 16 bits +// wide. Based on this, we theorize that the upper two bytes are +// relatively unused in the wild, and so we consume bits 16:23 (the +// third byte). +// +// The constants are in the iptables/iproute2 string format for +// matching and setting the bits, so they can be directly embedded in +// commands. +const ( + // The mask for reading/writing the 'firewall mask' bits on a packet. + // See the comment on the const block on why we only use the third byte. + // + // We claim bits 16:23 entirely. For now we only use the lower four + // bits, leaving the higher 4 bits for future use. + LinuxFwmarkMask = "0xff0000" + LinuxFwmarkMaskNum = 0xff0000 + + // Packet is from Tailscale and to a subnet route destination, so + // is allowed to be routed through this machine. + LinuxSubnetRouteMark = "0x40000" + LinuxSubnetRouteMarkNum = 0x40000 + + // Packet was originated by tailscaled itself, and must not be + // routed over the Tailscale network. + LinuxBypassMark = "0x80000" + LinuxBypassMarkNum = 0x80000 +) diff --git a/tsconst/interface.go b/tsconst/tsconst.go similarity index 100% rename from tsconst/interface.go rename to tsconst/tsconst.go diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index c196cc14da9ad..71789b7b67213 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -357,7 +357,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 76b4cdd6fd142..4443a907107d6 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -246,11 +246,11 @@ func (i *iptablesRunner) addBase4(tunname string) error { // POSTROUTING. So instead, we match on the inbound interface in // filter/FORWARD, and set a packet mark that nat/POSTROUTING can // use to effectively run that same test again. - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } @@ -352,11 +352,11 @@ func (i *iptablesRunner) addBase6(tunname string) error { return fmt.Errorf("adding %v in v6/filter/ts-input: %w", args, err) } - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } @@ -445,7 +445,7 @@ func (i *iptablesRunner) DelHooks(logf logger.Logf) error { // AddSNATRule adds a netfilter rule to SNAT traffic destined for // local subnets. func (i *iptablesRunner) AddSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Append("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("adding %v in nat/ts-postrouting: %w", args, err) @@ -457,7 +457,7 @@ func (i *iptablesRunner) AddSNATRule() error { // DelSNATRule removes the netfilter rule to SNAT traffic destined for // local subnets. An error is returned if the rule does not exist. func (i *iptablesRunner) DelSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Delete("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("deleting %v in nat/ts-postrouting: %w", args, err) diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 451b8aab47529..ce905aef3f75b 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -11,6 +11,7 @@ import ( "testing" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" ) var testIsNotExistErr = "exitcode:1" @@ -132,8 +133,8 @@ func TestAddAndDeleteBase(t *testing.T) { tsRulesCommon := []fakeRule{ // table/chain/rule {"filter", "ts-input", []string{"-i", tunname, "-j", "ACCEPT"}}, - {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask}}, - {"filter", "ts-forward", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"}}, + {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask}}, + {"filter", "ts-forward", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "ACCEPT"}}, {"filter", "ts-forward", []string{"-o", tunname, "-j", "ACCEPT"}}, } @@ -254,7 +255,7 @@ func TestAddAndDelSNATRule(t *testing.T) { } rule := fakeRule{ // table/chain/rule - "nat", "ts-postrouting", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"}, + "nat", "ts-postrouting", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "MASQUERADE"}, } // Add SNAT rule diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index 4aa0f87829bd3..ec73aaceea03a 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -15,6 +15,7 @@ import ( "github.com/tailscale/netlink" "tailscale.com/feature" + "tailscale.com/tsconst" "tailscale.com/types/logger" ) @@ -70,23 +71,12 @@ const ( // matching and setting the bits, so they can be directly embedded in // commands. const ( - // The mask for reading/writing the 'firewall mask' bits on a packet. - // See the comment on the const block on why we only use the third byte. - // - // We claim bits 16:23 entirely. For now we only use the lower four - // bits, leaving the higher 4 bits for future use. - TailscaleFwmarkMask = "0xff0000" - TailscaleFwmarkMaskNum = 0xff0000 - - // Packet is from Tailscale and to a subnet route destination, so - // is allowed to be routed through this machine. - TailscaleSubnetRouteMark = "0x40000" - TailscaleSubnetRouteMarkNum = 0x40000 - - // Packet was originated by tailscaled itself, and must not be - // routed over the Tailscale network. - TailscaleBypassMark = "0x80000" - TailscaleBypassMarkNum = 0x80000 + fwmarkMask = tsconst.LinuxFwmarkMask + fwmarkMaskNum = tsconst.LinuxFwmarkMaskNum + subnetRouteMark = tsconst.LinuxSubnetRouteMark + subnetRouteMarkNum = tsconst.LinuxSubnetRouteMarkNum + bypassMark = tsconst.LinuxBypassMark + bypassMarkNum = tsconst.LinuxBypassMarkNum ) // getTailscaleFwmarkMaskNeg returns the negation of TailscaleFwmarkMask in bytes. @@ -170,7 +160,7 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { // Try to actually create & delete one as a test. rule := netlink.NewRule() rule.Priority = 1234 - rule.Mark = TailscaleBypassMarkNum + rule.Mark = bypassMarkNum rule.Table = 52 rule.Family = netlink.FAMILY_V6 // First delete the rule unconditionally, and don't check for diff --git a/wgengine/router/router_linux.go b/wgengine/router/router_linux.go index dc1425708d312..75ff64f4037fd 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/router_linux.go @@ -26,6 +26,7 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/preftype" @@ -1238,14 +1239,14 @@ var baseIPRules = []netlink.Rule{ // main routing table. { Priority: 10, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: mainRouteTable.Num, }, // ...and then we try the 'default' table, for correctness, // even though it's been empty on every Linux system I've ever seen. { Priority: 30, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: defaultRouteTable.Num, }, // If neither of those matched (no default route on this system?) @@ -1253,7 +1254,7 @@ var baseIPRules = []netlink.Rule{ // to the tailscale routes, because that would create routing loops. { Priority: 50, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Type: unix.RTN_UNREACHABLE, }, // If we get to this point, capture all packets and send them @@ -1283,7 +1284,7 @@ var ubntIPRules = []netlink.Rule{ { Priority: 70, Invert: true, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: tailscaleRouteTable.Num, }, } @@ -1311,7 +1312,7 @@ func (r *linuxRouter) justAddIPRules() error { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() if ru.Mark != 0 { - ru.Mask = linuxfw.TailscaleFwmarkMaskNum + ru.Mask = tsconst.LinuxFwmarkMaskNum } ru.Goto = -1 ru.SuppressIfgroup = -1 @@ -1344,7 +1345,7 @@ func (r *linuxRouter) addIPRulesWithIPCommand() error { } if rule.Mark != 0 { if r.fwmaskWorks() { - args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, linuxfw.TailscaleFwmarkMask)) + args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, tsconst.LinuxFwmarkMask)) } else { args = append(args, "fwmark", fmt.Sprintf("0x%x", rule.Mark)) } diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/router_linux_test.go index 3b1eb7db6044e..b7f3a8ba12309 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/router_linux_test.go @@ -25,6 +25,7 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" @@ -572,8 +573,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { newRules := []struct{ chain, rule string }{ {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j RETURN", tunname, tsaddr.ChromeOSVMRange().String())}, {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } @@ -588,8 +589,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { func (n *fakeIPTablesRunner) addBase6(tunname string) error { curIPT := n.ipt6 newRules := []struct{ chain, rule string }{ - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } for _, rule := range newRules { @@ -673,7 +674,7 @@ func (n *fakeIPTablesRunner) DelBase() error { } func (n *fakeIPTablesRunner) AddSNATRule() error { - newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := appendRule(n, ipt, "nat/ts-postrouting", newRule); err != nil { return err @@ -683,7 +684,7 @@ func (n *fakeIPTablesRunner) AddSNATRule() error { } func (n *fakeIPTablesRunner) DelSNATRule() error { - delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := deleteRule(n, ipt, "nat/ts-postrouting", delRule); err != nil { return err From 01e645fae1d3e97d1b43a78ad9b6e5cf5d390c74 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 11:03:46 -0700 Subject: [PATCH 0445/1093] util/backoff: rename logtail/backoff package to util/backoff It has nothing to do with logtail and is confusing named like that. Updates #cleanup Updates #17323 Change-Id: Idd34587ba186a2416725f72ffc4c5778b0b9db4a Signed-off-by: Brad Fitzpatrick --- cmd/containerboot/kube.go | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/k8s-operator/egress-pod-readiness.go | 2 +- cmd/stunstamp/stunstamp.go | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/install_windows.go | 2 +- cmd/tailscaled/tailscaled_windows.go | 2 +- cmd/tsidp/depaware.txt | 2 +- control/controlclient/auto.go | 2 +- feature/taildrop/retrieve.go | 2 +- ipn/ipnlocal/serve.go | 2 +- ipn/ipnlocal/web_client.go | 2 +- net/dns/resolved.go | 2 +- prober/prober.go | 2 +- ssh/tailssh/tailssh.go | 2 +- tsnet/depaware.txt | 2 +- tstest/integration/tailscaled_deps_test_windows.go | 2 +- tstest/tstest.go | 2 +- {logtail => util}/backoff/backoff.go | 0 wgengine/magicsock/derp.go | 2 +- wgengine/router/router_windows.go | 2 +- 22 files changed, 21 insertions(+), 21 deletions(-) rename {logtail => util}/backoff/backoff.go (100%) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 4873ae13f753a..e566fa483447c 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -23,9 +23,9 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/set" ) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ba644eb03eb62..7140e57b19333 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -832,7 +832,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -917,6 +916,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index 05cf1aa1abfed..f3a812ecb9030 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -25,8 +25,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tstime" + "tailscale.com/util/backoff" "tailscale.com/util/httpm" ) diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index c3842e2e8b3be..71ed505690243 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -34,10 +34,10 @@ import ( "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" "github.com/tcnksm/go-httpstat" - "tailscale.com/logtail/backoff" "tailscale.com/net/stun" "tailscale.com/net/tcpinfo" "tailscale.com/tailcfg" + "tailscale.com/util/backoff" ) var ( diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index b0cc9d9c1a8eb..c6883496306cd 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -94,7 +94,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -170,6 +169,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/control/controlclient+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7fdac984c4b2a..6d17910526d89 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -314,7 +314,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ - tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -403,6 +402,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index e98a6461ea57b..6013660f5aa20 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -16,8 +16,8 @@ import ( "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" "tailscale.com/cmd/tailscaled/tailscaledhooks" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" ) func init() { diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 3a2edcac51886..14f31968b6748 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -47,13 +47,13 @@ import ( _ "tailscale.com/ipn/auditlog" _ "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/net/tstun" "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/backoff" "tailscale.com/util/osdiag" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index bff8df411da7e..21ea91b46d3a1 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -263,7 +263,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -347,6 +346,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 9a654b679b57a..f5495f8546218 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -13,7 +13,6 @@ import ( "time" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -22,6 +21,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/structs" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index b048a1b3b5f9d..e767bac324684 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -14,7 +14,7 @@ import ( "time" "tailscale.com/client/tailscale/apitype" - "tailscale.com/logtail/backoff" + "tailscale.com/util/backoff" "tailscale.com/util/set" ) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index cbf84fb29a1d5..dc41424042ee9 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -36,12 +36,12 @@ import ( "go4.org/mem" "golang.org/x/net/http2" "tailscale.com/ipn" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" "tailscale.com/util/mak" diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 7cfb30ca4efeb..a3c9387e46fce 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -19,11 +19,11 @@ import ( "tailscale.com/client/local" "tailscale.com/client/web" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tsconst" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" ) diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 5d9130f05ecb5..d8f63c9d66006 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -15,8 +15,8 @@ import ( "github.com/godbus/dbus/v5" "golang.org/x/sys/unix" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/dnsname" ) diff --git a/prober/prober.go b/prober/prober.go index af0e199343b2d..9073a95029163 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -317,7 +317,7 @@ func (p *Probe) loop() { p.run() // Wait and then retry if probe fails. We use the inverse of the // configured negative interval as our sleep period. - // TODO(percy):implement exponential backoff, possibly using logtail/backoff. + // TODO(percy):implement exponential backoff, possibly using util/backoff. select { case <-time.After(-1 * p.interval): p.run() diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index b249a10639c30..7d12ab45f8552 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -32,7 +32,6 @@ import ( gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" - "tailscale.com/logtail/backoff" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/sessionrecording" @@ -41,6 +40,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 71789b7b67213..6c7dc6b5507d1 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -259,7 +259,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ tailscale.com/net/ace from tailscale.com/control/controlhttp @@ -342,6 +341,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 08c8c27fff6e4..a5a0a428ffd3b 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -39,7 +39,6 @@ import ( _ "tailscale.com/ipn/store" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" - _ "tailscale.com/logtail/backoff" _ "tailscale.com/net/dns" _ "tailscale.com/net/dnsfallback" _ "tailscale.com/net/netmon" @@ -59,6 +58,7 @@ import ( _ "tailscale.com/types/key" _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" + _ "tailscale.com/util/backoff" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" _ "tailscale.com/util/multierr" diff --git a/tstest/tstest.go b/tstest/tstest.go index 2d0d1351e293a..169450686966d 100644 --- a/tstest/tstest.go +++ b/tstest/tstest.go @@ -14,8 +14,8 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/cibuild" ) diff --git a/logtail/backoff/backoff.go b/util/backoff/backoff.go similarity index 100% rename from logtail/backoff/backoff.go rename to util/backoff/backoff.go diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 0d419841cfe4c..d33745892b847 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -19,7 +19,6 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dnscache" "tailscale.com/net/netcheck" "tailscale.com/net/tsaddr" @@ -28,6 +27,7 @@ import ( "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" "tailscale.com/util/rands" "tailscale.com/util/testenv" diff --git a/wgengine/router/router_windows.go b/wgengine/router/router_windows.go index 32d05110dca45..edd258cb3f0a3 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/router_windows.go @@ -23,10 +23,10 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/eventbus" ) From a32102f7412bc3fda4ac773c13b208c2743c2b54 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 12:46:45 -0700 Subject: [PATCH 0446/1093] smallzstd: delete unused package As of the earlier 85febda86db1, our new preferred zstd API of choice is zstdframe. Updates #cleanup Updates tailscale/corp#18514 Change-Id: I5a6164d3162bf2513c3673b6d1e34cfae84cb104 Signed-off-by: Brad Fitzpatrick --- smallzstd/testdata | 14 ----- smallzstd/zstd.go | 78 ------------------------- smallzstd/zstd_test.go | 130 ----------------------------------------- 3 files changed, 222 deletions(-) delete mode 100644 smallzstd/testdata delete mode 100644 smallzstd/zstd.go delete mode 100644 smallzstd/zstd_test.go diff --git a/smallzstd/testdata b/smallzstd/testdata deleted file mode 100644 index 76640fdc57df0..0000000000000 --- a/smallzstd/testdata +++ /dev/null @@ -1,14 +0,0 @@ -{"logtail":{"client_time":"2020-07-01T14:49:40.196597018-07:00","server_time":"2020-07-01T21:49:40.198371511Z"},"text":"9.8M/25.6M magicsock: starting endpoint update (periodic)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:40.345925455-07:00","server_time":"2020-07-01T21:49:40.347904717Z"},"text":"9.9M/25.6M netcheck: udp=true v6=false mapvarydest=false hair=false v4a=202.188.7.1:41641 derp=2 derpdist=1v4:7ms,2v4:3ms,4v4:18ms\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347155742-07:00","server_time":"2020-07-01T21:49:43.34828658Z"},"text":"9.9M/25.6M control: map response long-poll timed out!\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347539333-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.9M/25.6M control: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347767812-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M control: sendStatus: mapRoutine1: state:authenticated\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347817165-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M blockEngineUpdates(false)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347989028-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M wgcfg: [SViTM] skipping subnet route\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.349997554-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M Received error: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.350072606-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M control: mapRoutine: backoff: 30136 msec\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998364646-07:00","server_time":"2020-07-01T21:49:47.999333754Z"},"text":"9.5M/25.6M [W1NbE] - [UcppE] Send handshake init [127.3.3.40:1, 6.1.1.6:37388*, 10.3.2.6:41641]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.99881914-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: adding connection to derp-1 for [W1NbE]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998904932-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: 2 active derp conns: derp-1=cr0s,wr0s derp-2=cr16h0m0s,wr14h38m0s\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.999045606-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M derphttp.Client.Recv: connecting to derp-1 (nyc)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:48.091104119-07:00","server_time":"2020-07-01T21:49:48.09280535Z"},"text":"9.6M/25.6M magicsock: rx [W1NbE] from 6.1.1.6:37388 (1/3), set as new priority\n"} diff --git a/smallzstd/zstd.go b/smallzstd/zstd.go deleted file mode 100644 index 1d80854224359..0000000000000 --- a/smallzstd/zstd.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package smallzstd produces zstd encoders and decoders optimized for -// low memory usage, at the expense of compression efficiency. -// -// This package is optimized primarily for the memory cost of -// compressing and decompressing data. We reduce this cost in two -// major ways: disable parallelism within the library (i.e. don't use -// multiple CPU cores to decompress), and drop the compression window -// down from the defaults of 4-16MiB, to 8kiB. -// -// Decompressors cost 2x the window size in RAM to run, so by using an -// 8kiB window, we can run ~1000 more decompressors per unit of memory -// than with the defaults. -// -// Depending on context, the benefit is either being able to run more -// decoders (e.g. in our logs processing system), or having a lower -// memory footprint when using compression in network protocols -// (e.g. in tailscaled, which should have a minimal RAM cost). -package smallzstd - -import ( - "io" - - "github.com/klauspost/compress/zstd" -) - -// WindowSize is the window size used for zstd compression. Decoder -// memory usage scales linearly with WindowSize. -const WindowSize = 8 << 10 // 8kiB - -// NewDecoder returns a zstd.Decoder configured for low memory usage, -// at the expense of decompression performance. -func NewDecoder(r io.Reader, options ...zstd.DOption) (*zstd.Decoder, error) { - defaults := []zstd.DOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithDecoderConcurrency(1), - // Default is to allocate more upfront for performance. We - // prefer lower memory use and a bit of GC load. - zstd.WithDecoderLowmem(true), - // You might expect to see zstd.WithDecoderMaxMemory - // here. However, it's not terribly safe to use if you're - // doing stateless decoding, because it sets the maximum - // amount of memory the decompressed data can occupy, rather - // than the window size of the zstd stream. This means a very - // compressible piece of data might violate the max memory - // limit here, even if the window size (and thus total memory - // required to decompress the data) is small. - // - // As a result, we don't set a decoder limit here, and rely on - // the encoder below producing "cheap" streams. Callers are - // welcome to set their own max memory setting, if - // contextually there is a clearly correct value (e.g. it's - // known from the upper layer protocol that the decoded data - // can never be more than 1MiB). - } - - return zstd.NewReader(r, append(defaults, options...)...) -} - -// NewEncoder returns a zstd.Encoder configured for low memory usage, -// both during compression and at decompression time, at the expense -// of performance and compression efficiency. -func NewEncoder(w io.Writer, options ...zstd.EOption) (*zstd.Encoder, error) { - defaults := []zstd.EOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithEncoderConcurrency(1), - // Default is several MiB, which bloats both encoders and - // their corresponding decoders. - zstd.WithWindowSize(WindowSize), - // Encode zero-length inputs in a way that the `zstd` utility - // can read, because interoperability is handy. - zstd.WithZeroFrames(true), - } - - return zstd.NewWriter(w, append(defaults, options...)...) -} diff --git a/smallzstd/zstd_test.go b/smallzstd/zstd_test.go deleted file mode 100644 index d1225bfac6058..0000000000000 --- a/smallzstd/zstd_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package smallzstd - -import ( - "os" - "testing" - - "github.com/klauspost/compress/zstd" -) - -func BenchmarkSmallEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkSmallEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkStockEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkStockEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkSmallDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkSmallDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkStockDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func BenchmarkStockDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func benchEncoder(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - b.ResetTimer() - for range b.N { - e.EncodeAll(in, out) - } -} - -func benchEncoderWithConstruction(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - b.ResetTimer() - for range b.N { - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - e.EncodeAll(in, out) - } -} - -func benchDecoder(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - b.ResetTimer() - for range b.N { - d.DecodeAll(in, out) - } -} - -func benchDecoderWithConstruction(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - b.ResetTimer() - for range b.N { - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - d.DecodeAll(in, out) - } -} - -func testdata(b *testing.B) []byte { - b.Helper() - in, err := os.ReadFile("testdata") - if err != nil { - b.Fatalf("reading testdata: %v", err) - } - return in -} - -func compressedTestdata(b *testing.B) []byte { - b.Helper() - uncomp := testdata(b) - e, err := NewEncoder(nil) - if err != nil { - b.Fatalf("creating encoder: %v", err) - } - return e.EncodeAll(uncomp, nil) -} From e466488a2a68176569a98f59e0ace8c9896b6b92 Mon Sep 17 00:00:00 2001 From: David Bond Date: Mon, 29 Sep 2025 12:38:15 +0100 Subject: [PATCH 0447/1093] cmd/k8s-operator: add replica support to nameserver (#17246) This commit modifies the `DNSConfig` custom resource to allow specifying a replica count when deploying a nameserver. This allows deploying nameservers in a HA configuration. Updates https://github.com/tailscale/corp/issues/32589 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 5 +++++ cmd/k8s-operator/deploy/manifests/operator.yaml | 5 +++++ cmd/k8s-operator/nameserver.go | 12 ++++++++++-- cmd/k8s-operator/nameserver_test.go | 3 +++ k8s-operator/api.md | 1 + k8s-operator/apis/v1alpha1/types_tsdnsconfig.go | 4 ++++ k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go | 5 +++++ 7 files changed, 33 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index b047e11a7e017..43ebaecec9161 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -100,6 +100,11 @@ spec: tag: description: Tag defaults to unstable. type: string + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + type: integer + format: int32 + minimum: 0 service: description: Service configuration. type: object diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 8b3c206c8a093..9c19554aa351d 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -431,6 +431,11 @@ spec: description: Tag defaults to unstable. type: string type: object + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + format: int32 + minimum: 0 + type: integer service: description: Service configuration. properties: diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 983a28c918276..3618642e1add1 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -30,6 +30,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstime" + "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/set" ) @@ -130,7 +131,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) } } - if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { + if err = a.maybeProvision(ctx, &dnsCfg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return reconcile.Result{}, nil @@ -167,7 +168,7 @@ func nameserverResourceLabels(name, namespace string) map[string]string { return labels } -func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { +func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig) error { labels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace) dCfg := &deployConfig{ ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))}, @@ -175,6 +176,11 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa labels: labels, imageRepo: defaultNameserverImageRepo, imageTag: defaultNameserverImageTag, + replicas: 1, + } + + if tsDNSCfg.Spec.Nameserver.Replicas != nil { + dCfg.replicas = *tsDNSCfg.Spec.Nameserver.Replicas } if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" { dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo @@ -211,6 +217,7 @@ type deployable struct { } type deployConfig struct { + replicas int32 imageRepo string imageTag string labels map[string]string @@ -236,6 +243,7 @@ var ( if err := yaml.Unmarshal(deployYaml, &d); err != nil { return fmt.Errorf("error unmarshalling Deployment yaml: %w", err) } + d.Spec.Replicas = ptr.To(cfg.replicas) d.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 55a998ac31979..88e48b753126f 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -22,6 +22,7 @@ import ( operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" + "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -33,6 +34,7 @@ func TestNameserverReconciler(t *testing.T) { }, Spec: tsapi.DNSConfigSpec{ Nameserver: &tsapi.Nameserver{ + Replicas: ptr.To[int32](3), Image: &tsapi.NameserverImage{ Repo: "test", Tag: "v0.0.1", @@ -74,6 +76,7 @@ func TestNameserverReconciler(t *testing.T) { } wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" + wantsDeploy.Spec.Replicas = ptr.To[int32](3) wantsDeploy.Namespace = tsNamespace wantsDeploy.ObjectMeta.Labels = nameserverLabels expectEqual(t, fc, wantsDeploy) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 180231bfaf4a0..b1c56c0687044 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -443,6 +443,7 @@ _Appears in:_ | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | | `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | +| `replicas` _integer_ | Replicas specifies how many Pods to create. Defaults to 1. | | Minimum: 0
      | #### NameserverImage diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0b0f1eb5ca137..4d8d569f68eba 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -84,6 +84,10 @@ type Nameserver struct { // Service configuration. // +optional Service *NameserverService `json:"service,omitempty"` + // Replicas specifies how many Pods to create. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` } type NameserverImage struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index d7a90ad0fd895..3fd64c28e7a12 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -422,6 +422,11 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverService) **out = **in } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. From 11b770fbc90c8b46d4b575ce5d087a3ee8d28fa9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 10:57:22 -0700 Subject: [PATCH 0448/1093] feature/logtail: pull logtail + netlog out to modular features Removes 434 KB from the minimal Linux binary, or ~3%. Primarily this comes from not linking in the zstd encoding code. Fixes #17323 Change-Id: I0a90de307dfa1ad7422db7aa8b1b46c782bfaaf7 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-minbox.txt | 5 +- cmd/tailscaled/tailscaled.go | 30 +++++---- cmd/tailscaled/tailscaled_windows.go | 10 ++- .../buildfeatures/feature_logtail_disabled.go | 13 ++++ .../buildfeatures/feature_logtail_enabled.go | 13 ++++ .../buildfeatures/feature_netlog_disabled.go | 13 ++++ .../buildfeatures/feature_netlog_enabled.go | 13 ++++ feature/featuretags/featuretags.go | 13 +++- ipn/ipnlocal/local.go | 4 +- ipn/localapi/localapi.go | 10 +++ log/sockstatlog/logger.go | 3 +- logpolicy/logpolicy.go | 4 +- logtail/buffer.go | 2 + logtail/config.go | 65 +++++++++++++++++++ logtail/logtail.go | 54 +-------------- logtail/logtail_omit.go | 44 +++++++++++++ wgengine/netlog/{logger.go => netlog.go} | 2 + wgengine/netlog/netlog_omit.go | 13 ++++ wgengine/userspace.go | 6 +- 19 files changed, 240 insertions(+), 77 deletions(-) create mode 100644 feature/buildfeatures/feature_logtail_disabled.go create mode 100644 feature/buildfeatures/feature_logtail_enabled.go create mode 100644 feature/buildfeatures/feature_netlog_disabled.go create mode 100644 feature/buildfeatures/feature_netlog_enabled.go create mode 100644 logtail/config.go create mode 100644 logtail/logtail_omit.go rename wgengine/netlog/{logger.go => netlog.go} (99%) create mode 100644 wgengine/netlog/netlog_omit.go diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index c6883496306cd..ad2bedf664493 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -158,7 +158,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -205,11 +205,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ - tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth - tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ + tailscale.com/util/zstdframe from tailscale.com/control/controlclient tailscale.com/version from tailscale.com/clientupdate+ tailscale.com/version/distro from tailscale.com/clientupdate+ tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index d01af199cfb08..2b0eec4826946 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -402,7 +402,7 @@ func ipnServerOpts() (o serverOptions) { return o } -var logPol *logpolicy.Policy +var logPol *logpolicy.Policy // or nil if not used var debugMux *http.ServeMux func run() (err error) { @@ -432,15 +432,19 @@ func run() (err error) { sys.Set(netMon) } - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) - pol.SetVerbosityLevel(args.verbose) - logPol = pol - defer func() { - // Finish uploading logs after closing everything else. - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - pol.Shutdown(ctx) - }() + var publicLogID logid.PublicID + if buildfeatures.HasLogTail { + pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) + pol.SetVerbosityLevel(args.verbose) + publicLogID = pol.PublicID + logPol = pol + defer func() { + // Finish uploading logs after closing everything else. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + pol.Shutdown(ctx) + }() + } if err := envknob.ApplyDiskConfigError(); err != nil { log.Printf("Error reading environment config: %v", err) @@ -449,7 +453,7 @@ func run() (err error) { if isWinSvc { // Run the IPN server from the Windows service manager. log.Printf("Running service...") - if err := runWindowsService(pol); err != nil { + if err := runWindowsService(logPol); err != nil { log.Printf("runservice: %v", err) } log.Printf("Service ended.") @@ -493,7 +497,7 @@ func run() (err error) { hostinfo.SetApp(app) } - return startIPNServer(context.Background(), logf, pol.PublicID, sys) + return startIPNServer(context.Background(), logf, publicLogID, sys) } var ( @@ -503,6 +507,7 @@ var ( var sigPipe os.Signal // set by sigpipe.go +// logID may be the zero value if logging is not in use. func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) error { ln, err := safesocket.Listen(args.socketpath) if err != nil { @@ -600,6 +605,7 @@ var ( hookNewNetstack feature.Hook[func(_ logger.Logf, _ *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error)] ) +// logID may be the zero value if logging is not in use. func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) (_ *ipnlocal.LocalBackend, retErr error) { if logPol != nil { logPol.Logtail.SetNetMon(sys.NetMon.Get()) diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 14f31968b6748..3019bbaf9695b 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -149,6 +149,8 @@ var syslogf logger.Logf = logger.Discard // // At this point we're still the parent process that // Windows started. +// +// pol may be nil. func runWindowsService(pol *logpolicy.Policy) error { go func() { logger.Logf(log.Printf).JSON(1, "SupportInfo", osdiag.SupportInfo(osdiag.LogSupportInfoReasonStartup)) @@ -169,7 +171,7 @@ func runWindowsService(pol *logpolicy.Policy) error { } type ipnService struct { - Policy *logpolicy.Policy + Policy *logpolicy.Policy // or nil if logging not in use } // Called by Windows to execute the windows service. @@ -186,7 +188,11 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch doneCh := make(chan struct{}) go func() { defer close(doneCh) - args := []string{"/subproc", service.Policy.PublicID.String()} + publicID := "none" + if service.Policy != nil { + publicID = service.Policy.PublicID.String() + } + args := []string{"/subproc", publicID} // Make a logger without a date prefix, as filelogger // and logtail both already add their own. All we really want // from the log package is the automatic newline. diff --git a/feature/buildfeatures/feature_logtail_disabled.go b/feature/buildfeatures/feature_logtail_disabled.go new file mode 100644 index 0000000000000..140092a2eba5b --- /dev/null +++ b/feature/buildfeatures/feature_logtail_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = false diff --git a/feature/buildfeatures/feature_logtail_enabled.go b/feature/buildfeatures/feature_logtail_enabled.go new file mode 100644 index 0000000000000..6e777216bf3cb --- /dev/null +++ b/feature/buildfeatures/feature_logtail_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = true diff --git a/feature/buildfeatures/feature_netlog_disabled.go b/feature/buildfeatures/feature_netlog_disabled.go new file mode 100644 index 0000000000000..60367a12600f3 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = false diff --git a/feature/buildfeatures/feature_netlog_enabled.go b/feature/buildfeatures/feature_netlog_enabled.go new file mode 100644 index 0000000000000..f9d2abad30553 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 40a5ac3f5f396..cd0db6e173595 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -115,7 +115,11 @@ var Features = map[FeatureTag]FeatureMeta{ "iptables": {"IPTables", "Linux iptables support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, - "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, + "logtail": { + Sym: "LogTail", + Desc: "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)", + }, + "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", Desc: "Outbound localhost HTTP/SOCK5 proxy support", @@ -123,7 +127,12 @@ var Features = map[FeatureTag]FeatureMeta{ }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, + "netlog": { + Sym: "NetLog", + Desc: "Network flow logging support", + Deps: []FeatureTag{"logtail"}, + }, + "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, "networkmanager": { Sym: "NetworkManager", Desc: "Linux NetworkManager integration", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index dd0a2f9f1e067..e07f7041c94e9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -202,7 +202,7 @@ type LocalBackend struct { store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys pushDeviceToken syncs.AtomicValue[string] - backendLogID logid.PublicID + backendLogID logid.PublicID // or zero value if logging not in use unregisterSysPolicyWatch func() varRoot string // or empty if SetVarRoot never called logFlushFunc func() // or nil if SetLogFlusher wasn't called @@ -456,6 +456,8 @@ type clientGen func(controlclient.Options) (controlclient.Client, error) // but is not actually running. // // If dialer is nil, a new one is made. +// +// The logID may be the zero value if logging is not in use. func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (_ *LocalBackend, err error) { e := sys.Engine.Get() store := sys.StateStore.Get() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e628e677b4a6f..e0c06b7dca558 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -28,6 +28,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -575,6 +576,15 @@ func (h *Handler) serveGoroutines(w http.ResponseWriter, r *http.Request) { func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) { ctx := r.Context() + if !buildfeatures.HasLogTail { + // TODO(bradfitz): separate out logtail tap functionality from upload + // functionality to make this possible? But seems unlikely people would + // want just this. They could "tail -f" or "journalctl -f" their logs + // themselves. + http.Error(w, "logtap not supported in this build", http.StatusNotImplemented) + return + } + // Require write access (~root) as the logs could contain something // sensitive. if !h.PermitWrite { diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 3cc27c22d8af7..4f8909725d1f1 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -17,6 +17,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -97,7 +98,7 @@ func SockstatLogID(logID logid.PublicID) logid.PrivateID { // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker) (*Logger, error) { - if !sockstats.IsAvailable { + if !sockstats.IsAvailable || !buildfeatures.HasLogTail { return nil, nil } if netMon == nil { diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 4c90378d025d3..c802d481f9046 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -31,6 +31,7 @@ import ( "golang.org/x/term" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/log/filelogger" @@ -106,6 +107,7 @@ type Policy struct { // Logtail is the logger. Logtail *logtail.Logger // PublicID is the logger's instance identifier. + // It may be the zero value if logging is not in use. PublicID logid.PublicID // Logf is where to write informational messages about this Logger. Logf logger.Logf @@ -682,7 +684,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { // New returns a new log policy (a logger and its instance ID). func (opts Options) New() *Policy { - disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" + disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" || !buildfeatures.HasLogTail _, policy := opts.init(disableLogging) return policy } diff --git a/logtail/buffer.go b/logtail/buffer.go index c9f2e1ad02e0a..d14d8fbf6ae51 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + package logtail import ( diff --git a/logtail/config.go b/logtail/config.go new file mode 100644 index 0000000000000..a6c068c0c86c6 --- /dev/null +++ b/logtail/config.go @@ -0,0 +1,65 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package logtail + +import ( + "io" + "net/http" + "time" + + "tailscale.com/tstime" + "tailscale.com/types/logid" +) + +// DefaultHost is the default host name to upload logs to when +// Config.BaseURL isn't provided. +const DefaultHost = "log.tailscale.com" + +const defaultFlushDelay = 2 * time.Second + +const ( + // CollectionNode is the name of a logtail Config.Collection + // for tailscaled (or equivalent: IPNExtension, Android app). + CollectionNode = "tailnode.log.tailscale.io" +) + +type Config struct { + Collection string // collection name, a domain name + PrivateID logid.PrivateID // private ID for the primary log stream + CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream + BaseURL string // if empty defaults to "https://log.tailscale.com" + HTTPC *http.Client // if empty defaults to http.DefaultClient + SkipClientTime bool // if true, client_time is not written to logs + LowMemory bool // if true, logtail minimizes memory use + Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now + Stderr io.Writer // if set, logs are sent here instead of os.Stderr + StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only + Buffer Buffer // temp storage, if nil a MemoryBuffer + CompressLogs bool // whether to compress the log uploads + MaxUploadSize int // maximum upload size; 0 means using the default + + // MetricsDelta, if non-nil, is a func that returns an encoding + // delta in clientmetrics to upload alongside existing logs. + // It can return either an empty string (for nothing) or a string + // that's safe to embed in a JSON string literal without further escaping. + MetricsDelta func() string + + // FlushDelayFn, if non-nil is a func that returns how long to wait to + // accumulate logs before uploading them. 0 or negative means to upload + // immediately. + // + // If nil, a default value is used. (currently 2 seconds) + FlushDelayFn func() time.Duration + + // IncludeProcID, if true, results in an ephemeral process identifier being + // included in logs. The ID is random and not guaranteed to be globally + // unique, but it can be used to distinguish between different instances + // running with same PrivateID. + IncludeProcID bool + + // IncludeProcSequence, if true, results in an ephemeral sequence number + // being included in the logs. The sequence number is incremented for each + // log message sent, but is not persisted across process restarts. + IncludeProcSequence bool +} diff --git a/logtail/logtail.go b/logtail/logtail.go index 6c4bbccc5a20e..948c5a4605f05 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + // Package logtail sends logs to log.tailscale.com. package logtail @@ -51,58 +53,6 @@ const lowMemRatio = 4 // but not too large to be a notable waste of memory if retained forever. const bufferSize = 4 << 10 -// DefaultHost is the default host name to upload logs to when -// Config.BaseURL isn't provided. -const DefaultHost = "log.tailscale.com" - -const defaultFlushDelay = 2 * time.Second - -const ( - // CollectionNode is the name of a logtail Config.Collection - // for tailscaled (or equivalent: IPNExtension, Android app). - CollectionNode = "tailnode.log.tailscale.io" -) - -type Config struct { - Collection string // collection name, a domain name - PrivateID logid.PrivateID // private ID for the primary log stream - CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream - BaseURL string // if empty defaults to "https://log.tailscale.com" - HTTPC *http.Client // if empty defaults to http.DefaultClient - SkipClientTime bool // if true, client_time is not written to logs - LowMemory bool // if true, logtail minimizes memory use - Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now - Stderr io.Writer // if set, logs are sent here instead of os.Stderr - StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only - Buffer Buffer // temp storage, if nil a MemoryBuffer - CompressLogs bool // whether to compress the log uploads - MaxUploadSize int // maximum upload size; 0 means using the default - - // MetricsDelta, if non-nil, is a func that returns an encoding - // delta in clientmetrics to upload alongside existing logs. - // It can return either an empty string (for nothing) or a string - // that's safe to embed in a JSON string literal without further escaping. - MetricsDelta func() string - - // FlushDelayFn, if non-nil is a func that returns how long to wait to - // accumulate logs before uploading them. 0 or negative means to upload - // immediately. - // - // If nil, a default value is used. (currently 2 seconds) - FlushDelayFn func() time.Duration - - // IncludeProcID, if true, results in an ephemeral process identifier being - // included in logs. The ID is random and not guaranteed to be globally - // unique, but it can be used to distinguish between different instances - // running with same PrivateID. - IncludeProcID bool - - // IncludeProcSequence, if true, results in an ephemeral sequence number - // being included in the logs. The sequence number is incremented for each - // log message sent, but is not persisted across process restarts. - IncludeProcSequence bool -} - func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.BaseURL == "" { cfg.BaseURL = "https://" + DefaultHost diff --git a/logtail/logtail_omit.go b/logtail/logtail_omit.go new file mode 100644 index 0000000000000..814fd3be90d8e --- /dev/null +++ b/logtail/logtail_omit.go @@ -0,0 +1,44 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_logtail + +package logtail + +import ( + "context" + + tslogger "tailscale.com/types/logger" + "tailscale.com/types/logid" +) + +// Noop implementations of everything when ts_omit_logtail is set. + +type Logger struct{} + +type Buffer any + +func Disable() {} + +func NewLogger(cfg Config, logf tslogger.Logf) *Logger { + return &Logger{} +} + +func (*Logger) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (*Logger) Logf(format string, args ...any) {} +func (*Logger) Shutdown(ctx context.Context) error { return nil } +func (*Logger) SetVerbosityLevel(level int) {} + +func (l *Logger) SetSockstatsLabel(label any) {} + +func (l *Logger) PrivateID() logid.PrivateID { return logid.PrivateID{} } +func (l *Logger) StartFlush() {} + +func RegisterLogTap(dst chan<- string) (unregister func()) { + return func() {} +} + +func (*Logger) SetNetMon(any) {} diff --git a/wgengine/netlog/logger.go b/wgengine/netlog/netlog.go similarity index 99% rename from wgengine/netlog/logger.go rename to wgengine/netlog/netlog.go index 3a696b246df54..8fd225c90e862 100644 --- a/wgengine/netlog/logger.go +++ b/wgengine/netlog/netlog.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netlog && !ts_omit_logtail + // Package netlog provides a logger that monitors a TUN device and // periodically records any traffic into a log stream. package netlog diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go new file mode 100644 index 0000000000000..43209df919ace --- /dev/null +++ b/wgengine/netlog/netlog_omit.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netlog || ts_omit_logtail + +package netlog + +type Logger struct{} + +func (*Logger) Startup(...any) error { return nil } +func (*Logger) Running() bool { return false } +func (*Logger) Shutdown(any) error { return nil } +func (*Logger) ReconfigRoutes(any) {} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 7fb5805149791..158a6d06f60d4 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -962,7 +962,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, netLogIDsWasValid := !oldLogIDs.NodeID.IsZero() && !oldLogIDs.DomainID.IsZero() netLogIDsChanged := netLogIDsNowValid && netLogIDsWasValid && newLogIDs != oldLogIDs netLogRunning := netLogIDsNowValid && !routerCfg.Equal(&router.Config{}) - if envknob.NoLogsNoSupport() { + if !buildfeatures.HasNetLog || envknob.NoLogsNoSupport() { netLogRunning = false } @@ -1017,7 +1017,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Shutdown the network logger because the IDs changed. // Let it be started back up by subsequent logic. - if netLogIDsChanged && e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogIDsChanged && e.networkLogger.Running() { e.logf("wgengine: Reconfig: shutting down network logger") ctx, cancel := context.WithTimeout(context.Background(), networkLoggerUploadTimeout) defer cancel() @@ -1028,7 +1028,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Startup the network logger. // Do this before configuring the router so that we capture initial packets. - if netLogRunning && !e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogRunning && !e.networkLogger.Running() { nid := cfg.NetworkLogging.NodeID tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled From 7bcab4ab2841883251edfbc4523704ef176ca3a6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 28 Sep 2025 19:03:26 -0700 Subject: [PATCH 0449/1093] feature/featuretags: make CLI connection error diagnostics modular Updates #12614 Change-Id: I09b8944166ee00910b402bcd5725cd7969e2c82c Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 2 +- cmd/tailscale/cli/cli.go | 10 ++++++++++ cmd/tailscale/cli/diag.go | 10 +++++++--- cmd/tailscale/cli/diag_other.go | 15 --------------- cmd/tailscaled/depaware-minbox.txt | 1 - .../buildfeatures/feature_cliconndiag_disabled.go | 13 +++++++++++++ .../buildfeatures/feature_cliconndiag_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 1 + safesocket/safesocket.go | 8 ++++++-- safesocket/safesocket_ps.go | 6 +++--- 10 files changed, 54 insertions(+), 25 deletions(-) delete mode 100644 cmd/tailscale/cli/diag_other.go create mode 100644 feature/buildfeatures/feature_cliconndiag_disabled.go create mode 100644 feature/buildfeatures/feature_cliconndiag_enabled.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7d322aa31e337..4a7a4b34d2d28 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -87,7 +87,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 5206fdd588a1b..389dc916ab723 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -26,6 +26,7 @@ import ( "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/paths" "tailscale.com/util/slicesx" "tailscale.com/version/distro" @@ -555,3 +556,12 @@ func lastSeenFmt(t time.Time) string { return fmt.Sprintf(", last seen %dd ago", int(d.Hours()/24)) } } + +var hookFixTailscaledConnectError feature.Hook[func(error) error] // for cliconndiag + +func fixTailscaledConnectError(origErr error) error { + if f, ok := hookFixTailscaledConnectError.GetOk(); ok { + return f(origErr) + } + return origErr +} diff --git a/cmd/tailscale/cli/diag.go b/cmd/tailscale/cli/diag.go index ebf26985fe0bd..3b2aa504b9ea7 100644 --- a/cmd/tailscale/cli/diag.go +++ b/cmd/tailscale/cli/diag.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || darwin +//go:build (linux || windows || darwin) && !ts_omit_cliconndiag package cli @@ -16,11 +16,15 @@ import ( "tailscale.com/version/distro" ) -// fixTailscaledConnectError is called when the local tailscaled has +func init() { + hookFixTailscaledConnectError.Set(fixTailscaledConnectErrorImpl) +} + +// fixTailscaledConnectErrorImpl is called when the local tailscaled has // been determined unreachable due to the provided origErr value. It // returns either the same error or a better one to help the user // understand why tailscaled isn't running for their platform. -func fixTailscaledConnectError(origErr error) error { +func fixTailscaledConnectErrorImpl(origErr error) error { procs, err := ps.Processes() if err != nil { return fmt.Errorf("failed to connect to local Tailscaled process and failed to enumerate processes while looking for it") diff --git a/cmd/tailscale/cli/diag_other.go b/cmd/tailscale/cli/diag_other.go deleted file mode 100644 index ece10cc79a822..0000000000000 --- a/cmd/tailscale/cli/diag_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !windows && !darwin - -package cli - -import "fmt" - -// The github.com/mitchellh/go-ps package doesn't work on all platforms, -// so just don't diagnose connect failures. - -func fixTailscaledConnectError(origErr error) error { - return fmt.Errorf("failed to connect to local tailscaled process (is it running?); got: %w", origErr) -} diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index ad2bedf664493..5c2cbefc2647d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -35,7 +35,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/netlink/nltest from github.com/google/nftables github.com/mdlayher/sdnotify from tailscale.com/util/systemd 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/mitchellh/go-ps from tailscale.com/safesocket 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ diff --git a/feature/buildfeatures/feature_cliconndiag_disabled.go b/feature/buildfeatures/feature_cliconndiag_disabled.go new file mode 100644 index 0000000000000..06d8c7935fd4a --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = false diff --git a/feature/buildfeatures/feature_cliconndiag_enabled.go b/feature/buildfeatures/feature_cliconndiag_enabled.go new file mode 100644 index 0000000000000..d6125ef08051c --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index cd0db6e173595..3e4a6043ac69b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -96,6 +96,7 @@ var Features = map[FeatureTag]FeatureMeta{ "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, + "cliconndiag": {"CLIConnDiag", "CLI connection error diagnostics", nil}, "completion": {"Completion", "CLI shell completion", nil}, "dbus": {"DBus", "Linux DBus support", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index 721b694dcf86c..ea79edab044c1 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -11,6 +11,8 @@ import ( "net" "runtime" "time" + + "tailscale.com/feature" ) type closeable interface { @@ -31,7 +33,8 @@ func ConnCloseWrite(c net.Conn) error { } var processStartTime = time.Now() -var tailscaledProcExists = func() bool { return false } // set by safesocket_ps.go + +var tailscaledProcExists feature.Hook[func() bool] // tailscaledStillStarting reports whether tailscaled is probably // still starting up. That is, it reports whether the caller should @@ -50,7 +53,8 @@ func tailscaledStillStarting() bool { if d > 5*time.Second { return false } - return tailscaledProcExists() + f, ok := tailscaledProcExists.GetOk() + return ok && f() } // ConnectContext connects to tailscaled using a unix socket or named pipe. diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index 48a8dd483478b..d3f409df58d15 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || windows || (darwin && !ios) || freebsd +//go:build ((linux && !android) || windows || (darwin && !ios) || freebsd) && !ts_omit_cliconndiag package safesocket @@ -12,7 +12,7 @@ import ( ) func init() { - tailscaledProcExists = func() bool { + tailscaledProcExists.Set(func() bool { procs, err := ps.Processes() if err != nil { return false @@ -30,5 +30,5 @@ func init() { } } return false - } + }) } From 976389c0f73de5048191cca329bfef4886fc5f21 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 27 Sep 2025 19:28:35 -0700 Subject: [PATCH 0450/1093] feature/sdnotify: move util/systemd to a modular feature Updates #12614 Change-Id: I08e714c83b455df7f538cc99cafe940db936b480 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 2 -- cmd/stund/depaware.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 2 -- cmd/tailscaled/depaware.txt | 4 +-- cmd/tsidp/depaware.txt | 2 -- control/controlclient/direct.go | 6 ++-- .../feature_sdnotify_disabled.go | 13 ++++++++ .../buildfeatures/feature_sdnotify_enabled.go | 13 ++++++++ feature/condregister/maybe_sdnotify.go | 8 +++++ feature/featuretags/featuretags.go | 4 +++ feature/sdnotify.go | 32 +++++++++++++++++++ .../doc.go => feature/sdnotify/sdnotify.go | 4 +-- .../sdnotify/sdnotify_linux.go | 16 +++++++--- ipn/ipnlocal/local.go | 7 ++-- ipn/ipnserver/server.go | 6 ++-- tsnet/depaware.txt | 2 -- util/systemd/systemd_nonlinux.go | 9 ------ 18 files changed, 98 insertions(+), 34 deletions(-) create mode 100644 feature/buildfeatures/feature_sdnotify_disabled.go create mode 100644 feature/buildfeatures/feature_sdnotify_enabled.go create mode 100644 feature/condregister/maybe_sdnotify.go create mode 100644 feature/sdnotify.go rename util/systemd/doc.go => feature/sdnotify/sdnotify.go (81%) rename util/systemd/systemd_linux.go => feature/sdnotify/sdnotify_linux.go (84%) delete mode 100644 util/systemd/systemd_nonlinux.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 4a7a4b34d2d28..258ff46863cf5 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -88,6 +88,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/feature tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7140e57b19333..7a66f25e964af 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -164,7 +164,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go @@ -957,7 +956,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 97cf14cf0a6b1..20f58ef2543b7 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -51,6 +51,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature/buildfeatures from tailscale.com/feature tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 5c2cbefc2647d..0498971b3118d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -33,7 +33,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/mdlayher/netlink from github.com/google/nftables+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ github.com/mdlayher/netlink/nltest from github.com/google/nftables - github.com/mdlayher/sdnotify from tailscale.com/util/systemd 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile @@ -202,7 +201,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 6d17910526d89..68a29b46e474c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -147,7 +147,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L github.com/mdlayher/sdnotify from tailscale.com/feature/sdnotify L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio @@ -282,6 +282,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/relayserver from tailscale.com/feature/condregister + L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister @@ -446,7 +447,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 21ea91b46d3a1..b68336d9d9aed 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -128,7 +128,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -387,7 +386,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ffac7e9471244..6d18e306f5dd4 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -29,6 +29,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -57,7 +58,6 @@ import ( "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" ) @@ -543,7 +543,9 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new } else { if expired { c.logf("Old key expired -> regen=true") - systemd.Status("key expired; run 'tailscale up' to authenticate") + if f, ok := feature.HookSystemdStatus.GetOk(); ok { + f("key expired; run 'tailscale up' to authenticate") + } regen = true } if (opt.Flags & LoginInteractive) != 0 { diff --git a/feature/buildfeatures/feature_sdnotify_disabled.go b/feature/buildfeatures/feature_sdnotify_disabled.go new file mode 100644 index 0000000000000..7efa2d22ff587 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = false diff --git a/feature/buildfeatures/feature_sdnotify_enabled.go b/feature/buildfeatures/feature_sdnotify_enabled.go new file mode 100644 index 0000000000000..40fec9755dd16 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = true diff --git a/feature/condregister/maybe_sdnotify.go b/feature/condregister/maybe_sdnotify.go new file mode 100644 index 0000000000000..647996f881d8f --- /dev/null +++ b/feature/condregister/maybe_sdnotify.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_sdnotify + +package condregister + +import _ "tailscale.com/feature/sdnotify" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 3e4a6043ac69b..c566eb9495b2e 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -145,6 +145,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Linux systemd-resolved integration", Deps: []FeatureTag{"dbus"}, }, + "sdnotify": { + Sym: "SDNotify", + Desc: "systemd notification support", + }, "serve": { Sym: "Serve", Desc: "Serve and Funnel support", diff --git a/feature/sdnotify.go b/feature/sdnotify.go new file mode 100644 index 0000000000000..e785dc1acc09a --- /dev/null +++ b/feature/sdnotify.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +import ( + "runtime" + + "tailscale.com/feature/buildfeatures" +) + +// HookSystemdReady sends a readiness to systemd. This will unblock service +// dependents from starting. +var HookSystemdReady Hook[func()] + +// HookSystemdStatus holds a func that will send a single line status update to +// systemd so that information shows up in systemctl output. +var HookSystemdStatus Hook[func(format string, args ...any)] + +// SystemdStatus sends a single line status update to systemd so that +// information shows up in systemctl output. +// +// It does nothing on non-Linux systems or if the binary was built without +// the sdnotify feature. +func SystemdStatus(format string, args ...any) { + if runtime.GOOS != "linux" || !buildfeatures.HasSDNotify { + return + } + if f, ok := HookSystemdStatus.GetOk(); ok { + f(format, args...) + } +} diff --git a/util/systemd/doc.go b/feature/sdnotify/sdnotify.go similarity index 81% rename from util/systemd/doc.go rename to feature/sdnotify/sdnotify.go index 0c28e182354ec..d13aa63f23c15 100644 --- a/util/systemd/doc.go +++ b/feature/sdnotify/sdnotify.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause /* -Package systemd contains a minimal wrapper around systemd-notify to enable +Package sdnotify contains a minimal wrapper around systemd-notify to enable applications to signal readiness and status to systemd. This package will only have effect on Linux systems running Tailscale in a @@ -10,4 +10,4 @@ systemd unit with the Type=notify flag set. On other operating systems (or when running in a Linux distro without being run from inside systemd) this package will become a no-op. */ -package systemd +package sdnotify diff --git a/util/systemd/systemd_linux.go b/feature/sdnotify/sdnotify_linux.go similarity index 84% rename from util/systemd/systemd_linux.go rename to feature/sdnotify/sdnotify_linux.go index fdfd1bba05451..b005f1bdb2bb2 100644 --- a/util/systemd/systemd_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -3,7 +3,7 @@ //go:build linux && !android -package systemd +package sdnotify import ( "errors" @@ -12,8 +12,14 @@ import ( "sync" "github.com/mdlayher/sdnotify" + "tailscale.com/feature" ) +func init() { + feature.HookSystemdReady.Set(ready) + feature.HookSystemdStatus.Set(status) +} + var getNotifyOnce struct { sync.Once v *sdnotify.Notifier @@ -46,15 +52,15 @@ func notifier() *sdnotify.Notifier { return getNotifyOnce.v } -// Ready signals readiness to systemd. This will unblock service dependents from starting. -func Ready() { +// ready signals readiness to systemd. This will unblock service dependents from starting. +func ready() { err := notifier().Notify(sdnotify.Ready) if err != nil { readyOnce.logf("systemd: error notifying: %v", err) } } -// Status sends a single line status update to systemd so that information shows up +// status sends a single line status update to systemd so that information shows up // in systemctl output. For example: // // $ systemctl status tailscale @@ -69,7 +75,7 @@ func Ready() { // CPU: 2min 38.469s // CGroup: /system.slice/tailscale.service // └─26741 /nix/store/sv6cj4mw2jajm9xkbwj07k29dj30lh0n-tailscale-date.20200727/bin/tailscaled --port 41641 -func Status(format string, args ...any) { +func status(format string, args ...any) { err := notifier().Notify(sdnotify.Statusf(format, args...)) if err != nil { statusOnce.logf("systemd: error notifying: %v", err) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e07f7041c94e9..f84a023f83138 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -102,7 +102,6 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -5488,7 +5487,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock switch newState { case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) + feature.SystemdStatus("Needs login: %s", authURL) // always block updates on NeedsLogin even if seamless renewal is enabled, // to prevent calls to authReconfig from reconfiguring the engine when our // key has expired and we're waiting to authenticate to use the new key. @@ -5503,7 +5502,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") + feature.SystemdStatus("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: b.authReconfig() @@ -5515,7 +5514,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock for _, p := range addrs.All() { addrStrs = append(addrStrs, p.Addr().String()) } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) default: b.logf("[unexpected] unknown newState %#v", newState) } diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 7e864959b36fe..6c382a57e9bd2 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -23,6 +23,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" @@ -32,7 +33,6 @@ import ( "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" ) @@ -513,7 +513,9 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { ln.Close() }() - systemd.Ready() + if ready, ok := feature.HookSystemdReady.GetOk(); ok { + ready() + } hs := &http.Server{ Handler: http.HandlerFunc(s.serveHTTP), diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6c7dc6b5507d1..97256508af82b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -128,7 +128,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -382,7 +381,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ diff --git a/util/systemd/systemd_nonlinux.go b/util/systemd/systemd_nonlinux.go deleted file mode 100644 index 5d7772bb3e61f..0000000000000 --- a/util/systemd/systemd_nonlinux.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package systemd - -func Ready() {} -func Status(string, ...any) {} From 65d6c80695b27b57a45572caad0f96d8f374f327 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 24 Sep 2025 15:02:57 -0700 Subject: [PATCH 0451/1093] cmd/tailscale/cli,client,ipn: add appc-routes cli command Allow the user to access information about routes an app connector has learned, such as how many routes for each domain. Fixes tailscale/corp#32624 Signed-off-by: Fran Bull --- client/local/local.go | 9 ++ cmd/derper/depaware.txt | 2 + cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/appcroutes.go | 153 +++++++++++++++++++++++++++++ cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/depaware.txt | 2 + cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- ipn/ipnlocal/local.go | 9 ++ ipn/localapi/localapi.go | 20 ++++ tsnet/depaware.txt | 2 +- 12 files changed, 201 insertions(+), 5 deletions(-) create mode 100644 cmd/tailscale/cli/appcroutes.go diff --git a/client/local/local.go b/client/local/local.go index 246112c37b5c6..a3717ad776a2e 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -27,6 +27,7 @@ import ( "sync" "time" + "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" @@ -1374,3 +1375,11 @@ func (lc *Client) ShutdownTailscaled(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) return err } + +func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appc.RouteInfo, error) { + body, err := lc.get200(ctx, "/localapi/v0/appc-route-info") + if err != nil { + return appc.RouteInfo{}, err + } + return decodeJSON[appc.RouteInfo](body) +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 258ff46863cf5..08aa374d6f87a 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -77,6 +77,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/client/local 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local @@ -151,6 +152,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ + tailscale.com/util/execqueue from tailscale.com/appc 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/health+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 7a66f25e964af..d81abf550dbc9 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -769,7 +769,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go new file mode 100644 index 0000000000000..83443f56c8dc0 --- /dev/null +++ b/cmd/tailscale/cli/appcroutes.go @@ -0,0 +1,153 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "slices" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/appc" +) + +var appcRoutesArgs struct { + all bool + domainMap bool + n bool +} + +var appcRoutesCmd = &ffcli.Command{ + Name: "appc-routes", + ShortUsage: "tailscale appc-routes", + Exec: runAppcRoutesInfo, + ShortHelp: "Print the current app connector routes", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("appc-routes") + fs.BoolVar(&appcRoutesArgs.all, "all", false, "Print learned domains and routes and extra policy configured routes.") + fs.BoolVar(&appcRoutesArgs.domainMap, "map", false, "Print the map of learned domains: [routes].") + fs.BoolVar(&appcRoutesArgs.n, "n", false, "Print the total number of routes this node advertises.") + return fs + })(), + LongHelp: strings.TrimSpace(` +The 'tailscale appc-routes' command prints the current App Connector route status. + +By default this command prints the domains configured in the app connector configuration and how many routes have been +learned for each domain. + +--all prints the routes learned from the domains configured in the app connector configuration; and any extra routes provided +in the the policy app connector 'routes' field. + +--map prints the routes learned from the domains configured in the app connector configuration. + +-n prints the total number of routes advertised by this device, whether learned, set in the policy, or set locally. + +For more information about App Connectors, refer to +https://tailscale.com/kb/1281/app-connectors +`), +} + +func getAllOutput(ri *appc.RouteInfo) (string, error) { + domains, err := json.MarshalIndent(ri.Domains, " ", " ") + if err != nil { + return "", err + } + control, err := json.MarshalIndent(ri.Control, " ", " ") + if err != nil { + return "", err + } + s := fmt.Sprintf(`Learned Routes +============== +%s + +Routes from Policy +================== +%s +`, domains, control) + return s, nil +} + +type domainCount struct { + domain string + count int +} + +func getSummarizeLearnedOutput(ri *appc.RouteInfo) string { + x := make([]domainCount, len(ri.Domains)) + i := 0 + maxDomainWidth := 0 + for k, v := range ri.Domains { + if len(k) > maxDomainWidth { + maxDomainWidth = len(k) + } + x[i] = domainCount{domain: k, count: len(v)} + i++ + } + slices.SortFunc(x, func(i, j domainCount) int { + if i.count > j.count { + return -1 + } + if i.count < j.count { + return 1 + } + if i.domain > j.domain { + return 1 + } + if i.domain < j.domain { + return -1 + } + return 0 + }) + s := "" + fmtString := fmt.Sprintf("%%-%ds %%d\n", maxDomainWidth) // eg "%-10s %d\n" + for _, dc := range x { + s += fmt.Sprintf(fmtString, dc.domain, dc.count) + } + return s +} + +func runAppcRoutesInfo(ctx context.Context, args []string) error { + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + if !prefs.AppConnector.Advertise { + fmt.Println("not a connector") + return nil + } + + if appcRoutesArgs.n { + fmt.Println(len(prefs.AdvertiseRoutes)) + return nil + } + + routeInfo, err := localClient.GetAppConnectorRouteInfo(ctx) + if err != nil { + return err + } + + if appcRoutesArgs.domainMap { + domains, err := json.Marshal(routeInfo.Domains) + if err != nil { + return err + } + fmt.Println(string(domains)) + return nil + } + + if appcRoutesArgs.all { + s, err := getAllOutput(&routeInfo) + if err != nil { + return err + } + fmt.Println(s) + return nil + } + + fmt.Print(getSummarizeLearnedOutput(&routeInfo)) + return nil +} diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 389dc916ab723..5ebc23a5befea 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -276,6 +276,7 @@ change in the future. idTokenCmd, configureHostCmd(), systrayCmd, + appcRoutesCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 47e5ca48e7669..2d724a9009bec 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -70,6 +70,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/client/local+ 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli @@ -168,6 +169,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/eventbus from tailscale.com/client/local+ + tailscale.com/util/execqueue from tailscale.com/appc tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0498971b3118d..aefa78c42a6cc 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -51,7 +51,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 68a29b46e474c..dde1e06810df7 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -240,7 +240,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b68336d9d9aed..71c274794a9ed 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -211,7 +211,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f84a023f83138..a95aef0f20239 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7124,6 +7124,15 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { return ri, nil } +// ReadRouteInfo returns the app connector route information that is +// stored in prefs to be consistent across restarts. It should be up +// to date with the RouteInfo in memory being used by appc. +func (b *LocalBackend) ReadRouteInfo() (*appc.RouteInfo, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.readRouteInfoLocked() +} + // seamlessRenewalEnabled reports whether seamless key renewals are enabled. // // As of 2025-09-11, this is the default behaviour unless nodes receive diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index e0c06b7dca558..caebbe0cc6730 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -25,6 +25,7 @@ import ( "time" "golang.org/x/net/dns/dnsmessage" + "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/clientupdate" "tailscale.com/envknob" @@ -73,6 +74,7 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 + "appc-route-info": (*Handler).serveGetAppcRouteInfo, "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, @@ -2111,3 +2113,21 @@ func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { eventbus.Publish[Shutdown](ec).Publish(Shutdown{}) } + +func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + res, err := h.b.ReadRouteInfo() + if err != nil { + if errors.Is(err, ipn.ErrStateNotExist) { + res = &appc.RouteInfo{} + } else { + WriteErrorJSON(w, err) + return + } + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 97256508af82b..47c6b033c37a3 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -207,7 +207,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale From 39e35379d41fc78871362bf9dea2111a92744e21 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 27 Sep 2025 15:18:25 -0700 Subject: [PATCH 0452/1093] wgengine/router{,/osrouter}: split OS router implementations into subpackage So wgengine/router is just the docs + entrypoint + types, and then underscore importing wgengine/router/osrouter registers the constructors with the wgengine/router package. Then tsnet can not pull those in. Updates #17313 Change-Id: If313226f6987d709ea9193c8f16a909326ceefe7 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 17 +------ cmd/tailscaled/depaware-minbox.txt | 21 ++------ cmd/tailscaled/depaware.txt | 5 +- cmd/tsidp/depaware.txt | 19 +------ .../feature_osrouter_disabled.go | 13 +++++ .../buildfeatures/feature_osrouter_enabled.go | 13 +++++ feature/condregister/maybe_osrouter.go | 8 +++ feature/featuretags/featuretags.go | 10 ++++ license_test.go | 2 +- tsnet/depaware.txt | 19 +------ wgengine/router/consolidating_router_test.go | 3 +- .../router/{ => osrouter}/ifconfig_windows.go | 5 +- .../{ => osrouter}/ifconfig_windows_test.go | 2 +- wgengine/router/osrouter/osrouter.go | 15 ++++++ wgengine/router/osrouter/osrouter_test.go | 15 ++++++ .../router/{ => osrouter}/router_freebsd.go | 17 +++---- .../router/{ => osrouter}/router_linux.go | 20 ++++++-- .../{ => osrouter}/router_linux_test.go | 5 +- .../router/{ => osrouter}/router_openbsd.go | 18 +++++-- .../router/{ => osrouter}/router_plan9.go | 21 +++++--- .../{ => osrouter}/router_userspace_bsd.go | 13 +++-- .../router/{ => osrouter}/router_windows.go | 17 ++++--- .../{ => osrouter}/router_windows_test.go | 2 +- wgengine/router/{ => osrouter}/runner.go | 2 +- wgengine/router/router.go | 49 ++++++++++++++++--- wgengine/router/router_android.go | 30 ------------ wgengine/router/router_darwin.go | 20 -------- wgengine/router/router_default.go | 25 ---------- wgengine/router/router_test.go | 9 ---- 29 files changed, 208 insertions(+), 207 deletions(-) create mode 100644 feature/buildfeatures/feature_osrouter_disabled.go create mode 100644 feature/buildfeatures/feature_osrouter_enabled.go create mode 100644 feature/condregister/maybe_osrouter.go rename wgengine/router/{ => osrouter}/ifconfig_windows.go (99%) rename wgengine/router/{ => osrouter}/ifconfig_windows_test.go (99%) create mode 100644 wgengine/router/osrouter/osrouter.go create mode 100644 wgengine/router/osrouter/osrouter_test.go rename wgengine/router/{ => osrouter}/router_freebsd.go (54%) rename wgengine/router/{ => osrouter}/router_linux.go (98%) rename wgengine/router/{ => osrouter}/router_linux_test.go (99%) rename wgengine/router/{ => osrouter}/router_openbsd.go (93%) rename wgengine/router/{ => osrouter}/router_plan9.go (89%) rename wgengine/router/{ => osrouter}/router_userspace_bsd.go (93%) rename wgengine/router/{ => osrouter}/router_windows.go (97%) rename wgengine/router/{ => osrouter}/router_windows_test.go (95%) rename wgengine/router/{ => osrouter}/runner.go (99%) delete mode 100644 wgengine/router/router_android.go delete mode 100644 wgengine/router/router_darwin.go delete mode 100644 wgengine/router/router_default.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d81abf550dbc9..223baa43c0a3d 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -86,7 +86,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ @@ -113,8 +112,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-logr/logr from github.com/go-logr/logr/slogr+ github.com/go-logr/logr/slogr from github.com/go-logr/zapr github.com/go-logr/zapr from sigs.k8s.io/controller-runtime/pkg/log/zap+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet github.com/go-openapi/jsonpointer from github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference @@ -137,12 +134,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm @@ -161,9 +152,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go @@ -199,8 +189,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -213,7 +201,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ @@ -931,7 +918,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -980,7 +966,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index aefa78c42a6cc..6cc3733a98280 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -13,12 +13,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/nftables from tailscale.com/util/linuxfw - 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - github.com/google/nftables/expr from github.com/google/nftables+ - github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -30,14 +24,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mdlayher/genetlink from tailscale.com/net/tstun - 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - github.com/mdlayher/netlink/nltest from github.com/google/nftables 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf github.com/tailscale/hujson from tailscale.com/ipn/conffile - 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ @@ -47,7 +38,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - github.com/vishvananda/netns from github.com/tailscale/netlink+ 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version @@ -139,7 +129,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/control/controlclient+ - tailscale.com/tsconst from tailscale.com/net/netns+ + tailscale.com/tsconst from tailscale.com/net/netns tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -183,7 +173,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -253,13 +242,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ - golang.org/x/sys/unix from github.com/google/nftables+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ golang.org/x/term from tailscale.com/logpolicy golang.org/x/text/secure/bidirule from golang.org/x/net/idna golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna - golang.org/x/time/rate from tailscale.com/derp+ + golang.org/x/time/rate from tailscale.com/derp archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ @@ -392,7 +381,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/digitalocean/go-smbios/smbios iter from bytes+ log from expvar+ log/internal from log diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index dde1e06810df7..4051000a68a81 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -420,7 +420,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router/osrouter tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -468,11 +468,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/router/osrouter from tailscale.com/feature/condregister tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router/osrouter golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 71c274794a9ed..dfb6553bdc5ad 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -84,7 +84,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc @@ -101,17 +100,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm @@ -125,9 +116,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -145,8 +135,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -159,7 +147,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -361,7 +348,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -410,7 +396,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ @@ -455,7 +440,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ diff --git a/feature/buildfeatures/feature_osrouter_disabled.go b/feature/buildfeatures/feature_osrouter_disabled.go new file mode 100644 index 0000000000000..ccd7192bb8899 --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = false diff --git a/feature/buildfeatures/feature_osrouter_enabled.go b/feature/buildfeatures/feature_osrouter_enabled.go new file mode 100644 index 0000000000000..a5dacc596bfbc --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = true diff --git a/feature/condregister/maybe_osrouter.go b/feature/condregister/maybe_osrouter.go new file mode 100644 index 0000000000000..7ab85add22021 --- /dev/null +++ b/feature/condregister/maybe_osrouter.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_osrouter + +package condregister + +import _ "tailscale.com/wgengine/router/osrouter" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c566eb9495b2e..c417647413952 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -126,6 +126,16 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Outbound localhost HTTP/SOCK5 proxy support", Deps: []FeatureTag{"netstack"}, }, + "osrouter": { + Sym: "OSRouter", + Desc: "Configure the operating system's network stack, IPs, and routing tables", + // TODO(bradfitz): if this is omitted, and netstack is too, then tailscaled needs + // external config to be useful. Some people may want that, and we should support it, + // but it's rare. Maybe there should be a way to declare here that this "Provides" + // another feature (and netstack can too), and then if those required features provided + // by some other feature are missing, then it's an error by default unless you accept + // that it's okay to proceed without that meta feature. + }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "netlog": { diff --git a/license_test.go b/license_test.go index ec452a6e36be7..9b62c48ed218e 100644 --- a/license_test.go +++ b/license_test.go @@ -34,7 +34,7 @@ func TestLicenseHeaders(t *testing.T) { // WireGuard copyright "cmd/tailscale/cli/authenticode_windows.go", - "wgengine/router/ifconfig_windows.go", + "wgengine/router/osrouter/ifconfig_windows.go", // noiseexplorer.com copyright "control/controlbase/noiseexplorer_test.go", diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 47c6b033c37a3..bda491f37cfdb 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -84,7 +84,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc @@ -101,17 +100,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm @@ -125,9 +116,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack @@ -145,8 +135,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -159,7 +147,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -356,7 +343,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/wgengine/router tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ @@ -405,7 +391,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ @@ -448,7 +433,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LDAI golang.org/x/sys/unix from github.com/google/nftables+ + LDAI golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ diff --git a/wgengine/router/consolidating_router_test.go b/wgengine/router/consolidating_router_test.go index 871682d1346bc..ba2e4d07a746a 100644 --- a/wgengine/router/consolidating_router_test.go +++ b/wgengine/router/consolidating_router_test.go @@ -4,7 +4,6 @@ package router import ( - "log" "net/netip" "testing" @@ -56,7 +55,7 @@ func TestConsolidateRoutes(t *testing.T) { }, } - cr := &consolidatingRouter{logf: log.Printf} + cr := &consolidatingRouter{logf: t.Logf} for _, test := range tests { t.Run(test.name, func(t *testing.T) { got := cr.consolidateRoutes(test.cfg) diff --git a/wgengine/router/ifconfig_windows.go b/wgengine/router/osrouter/ifconfig_windows.go similarity index 99% rename from wgengine/router/ifconfig_windows.go rename to wgengine/router/osrouter/ifconfig_windows.go index 40e9dc6e0cdfd..78ac8d45fb59f 100644 --- a/wgengine/router/ifconfig_windows.go +++ b/wgengine/router/osrouter/ifconfig_windows.go @@ -3,7 +3,7 @@ * Copyright (C) 2019 WireGuard LLC. All Rights Reserved. */ -package router +package osrouter import ( "errors" @@ -19,6 +19,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" "tailscale.com/util/multierr" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/winnet" ole "github.com/go-ole/go-ole" @@ -246,7 +247,7 @@ var networkCategoryWarnable = health.Register(&health.Warnable{ MapDebugFlag: "warn-network-category-unhealthy", }) -func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { +func configureInterface(cfg *router.Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { var mtu = tstun.DefaultTUNMTU() luid := winipcfg.LUID(tun.LUID()) iface, err := interfaceFromLUID(luid, diff --git a/wgengine/router/ifconfig_windows_test.go b/wgengine/router/osrouter/ifconfig_windows_test.go similarity index 99% rename from wgengine/router/ifconfig_windows_test.go rename to wgengine/router/osrouter/ifconfig_windows_test.go index 11b98d1d77d98..b858ef4f60d19 100644 --- a/wgengine/router/ifconfig_windows_test.go +++ b/wgengine/router/osrouter/ifconfig_windows_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "fmt" diff --git a/wgengine/router/osrouter/osrouter.go b/wgengine/router/osrouter/osrouter.go new file mode 100644 index 0000000000000..281454b069984 --- /dev/null +++ b/wgengine/router/osrouter/osrouter.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package osrouter contains OS-specific router implementations. +// This package has no API; it exists purely to import +// for the side effect of it registering itself with the wgengine/router +// package. +package osrouter + +import "tailscale.com/wgengine/router" + +// shutdownConfig is a routing configuration that removes all router +// state from the OS. It's the config used when callers pass in a nil +// Config. +var shutdownConfig router.Config diff --git a/wgengine/router/osrouter/osrouter_test.go b/wgengine/router/osrouter/osrouter_test.go new file mode 100644 index 0000000000000..d0cb3db6968c1 --- /dev/null +++ b/wgengine/router/osrouter/osrouter_test.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package osrouter + +import "net/netip" + +//lint:ignore U1000 used in Windows/Linux tests only +func mustCIDRs(ss ...string) []netip.Prefix { + var ret []netip.Prefix + for _, s := range ss { + ret = append(ret, netip.MustParsePrefix(s)) + } + return ret +} diff --git a/wgengine/router/router_freebsd.go b/wgengine/router/osrouter/router_freebsd.go similarity index 54% rename from wgengine/router/router_freebsd.go rename to wgengine/router/osrouter/router_freebsd.go index ce4753d7dc611..a142e7a84e14a 100644 --- a/wgengine/router/router_freebsd.go +++ b/wgengine/router/osrouter/router_freebsd.go @@ -1,23 +1,18 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" - "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) -// For now this router only supports the userspace WireGuard implementations. -// -// Work is currently underway for an in-kernel FreeBSD implementation of wireguard -// https://svnweb.freebsd.org/base?view=revision&revision=357986 - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) } func cleanUp(logf logger.Logf, interfaceName string) { diff --git a/wgengine/router/router_linux.go b/wgengine/router/osrouter/router_linux.go similarity index 98% rename from wgengine/router/router_linux.go rename to wgengine/router/osrouter/router_linux.go index 75ff64f4037fd..478935483ade6 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -3,7 +3,7 @@ //go:build !android -package router +package osrouter import ( "errors" @@ -34,8 +34,18 @@ import ( "tailscale.com/util/linuxfw" "tailscale.com/util/multierr" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + var getDistroFunc = distro.Get const ( @@ -81,7 +91,7 @@ type linuxRouter struct { magicsockPortV6 uint16 } -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tunDev.Name() if err != nil { return nil, err @@ -94,7 +104,7 @@ func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Moni return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health, bus) } -func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { r := &linuxRouter{ logf: logf, tunname: tunname, @@ -401,7 +411,7 @@ func (r *linuxRouter) setupNetfilter(kind string) error { } // Set implements the Router interface. -func (r *linuxRouter) Set(cfg *Config) error { +func (r *linuxRouter) Set(cfg *router.Config) error { var errs []error if cfg == nil { cfg = &shutdownConfig @@ -488,7 +498,7 @@ var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("Stateful filtering is enabled and Docker was detected; this may prevent Docker containers on this host from resolving DNS and connecting to Tailscale nodes. See https://tailscale.com/s/stateful-docker"), }) -func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *Config) { +func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *router.Config) { // If stateful filtering is disabled, clear the warning. if !r.statefulFiltering { r.health.SetHealthy(dockerStatefulFilteringWarnable) diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go similarity index 99% rename from wgengine/router/router_linux_test.go rename to wgengine/router/osrouter/router_linux_test.go index b7f3a8ba12309..39210ddef14a2 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -32,8 +32,11 @@ import ( "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +type Config = router.Config + func TestRouterStates(t *testing.T) { basic := ` ip rule add -4 pref 5210 fwmark 0x80000/0xff0000 table main diff --git a/wgengine/router/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go similarity index 93% rename from wgengine/router/router_openbsd.go rename to wgengine/router/osrouter/router_openbsd.go index f91878b4c993d..8f35993096858 100644 --- a/wgengine/router/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -17,10 +17,18 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/wgengine/router" ) -// For now this router only supports the WireGuard userspace implementation. -// There is an experimental kernel version in the works for OpenBSD: +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + // https://git.zx2c4.com/wireguard-openbsd. type openbsdRouter struct { @@ -32,7 +40,7 @@ type openbsdRouter struct { routes set.Set[netip.Prefix] } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -68,7 +76,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *openbsdRouter) Set(cfg *Config) error { +func (r *openbsdRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } diff --git a/wgengine/router/router_plan9.go b/wgengine/router/osrouter/router_plan9.go similarity index 89% rename from wgengine/router/router_plan9.go rename to wgengine/router/osrouter/router_plan9.go index fd6850ade3762..5872aa7fc0e19 100644 --- a/wgengine/router/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -15,10 +15,19 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" - "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanAllTailscaleRoutes(logf) + }) + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon) + }) +} + +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor) (router.Router, error) { r := &plan9Router{ logf: logf, tundev: tundev, @@ -39,7 +48,7 @@ func (r *plan9Router) Up() error { return nil } -func (r *plan9Router) Set(cfg *Config) error { +func (r *plan9Router) Set(cfg *router.Config) error { if cfg == nil { cleanAllTailscaleRoutes(r.logf) return nil @@ -118,10 +127,6 @@ func (r *plan9Router) Close() error { return nil } -func cleanUp(logf logger.Logf, _ string) { - cleanAllTailscaleRoutes(logf) -} - func cleanAllTailscaleRoutes(logf logger.Logf) { routes, err := os.OpenFile("/net/iproute", os.O_RDWR, 0) if err != nil { diff --git a/wgengine/router/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go similarity index 93% rename from wgengine/router/router_userspace_bsd.go rename to wgengine/router/osrouter/router_userspace_bsd.go index 0b7e4f36aa6e5..cdaf3adeae1b2 100644 --- a/wgengine/router/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -3,7 +3,7 @@ //go:build darwin || freebsd -package router +package osrouter import ( "fmt" @@ -19,8 +19,15 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/version" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceBSDRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health) + }) +} + type userspaceBSDRouter struct { logf logger.Logf netMon *netmon.Monitor @@ -30,7 +37,7 @@ type userspaceBSDRouter struct { routes map[netip.Prefix]bool } -func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -99,7 +106,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *userspaceBSDRouter) Set(cfg *Config) (reterr error) { +func (r *userspaceBSDRouter) Set(cfg *router.Config) (reterr error) { if cfg == nil { cfg = &shutdownConfig } diff --git a/wgengine/router/router_windows.go b/wgengine/router/osrouter/router_windows.go similarity index 97% rename from wgengine/router/router_windows.go rename to wgengine/router/osrouter/router_windows.go index edd258cb3f0a3..05bf210e82a7d 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -28,8 +28,15 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/backoff" "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) +} + type winRouter struct { logf func(fmt string, args ...any) netMon *netmon.Monitor // may be nil @@ -39,7 +46,7 @@ type winRouter struct { firewall *firewallTweaker } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { nativeTun := tundev.(*tun.NativeTun) luid := winipcfg.LUID(nativeTun.LUID()) guid, err := luid.GUID() @@ -73,7 +80,7 @@ func (r *winRouter) Up() error { return nil } -func (r *winRouter) Set(cfg *Config) error { +func (r *winRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } @@ -124,10 +131,6 @@ func (r *winRouter) Close() error { return nil } -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} - // firewallTweaker changes the Windows firewall. Normally this wouldn't be so complicated, // but it can be REALLY SLOW to change the Windows firewall for reasons not understood. // Like 4 minutes slow. But usually it's tens of milliseconds. diff --git a/wgengine/router/router_windows_test.go b/wgengine/router/osrouter/router_windows_test.go similarity index 95% rename from wgengine/router/router_windows_test.go rename to wgengine/router/osrouter/router_windows_test.go index 9989ddbc735a6..119b6a77867f9 100644 --- a/wgengine/router/router_windows_test.go +++ b/wgengine/router/osrouter/router_windows_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "path/filepath" diff --git a/wgengine/router/runner.go b/wgengine/router/osrouter/runner.go similarity index 99% rename from wgengine/router/runner.go rename to wgengine/router/osrouter/runner.go index 8fa068e335e66..7afb7fdc2088f 100644 --- a/wgengine/router/runner.go +++ b/wgengine/router/osrouter/runner.go @@ -3,7 +3,7 @@ //go:build linux -package router +package osrouter import ( "errors" diff --git a/wgengine/router/router.go b/wgengine/router/router.go index edd7d14cbd4be..7723138f4b587 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -6,10 +6,15 @@ package router import ( + "errors" + "fmt" "net/netip" "reflect" + "runtime" "github.com/tailscale/wireguard-go/tun" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" @@ -41,6 +46,22 @@ type Router interface { Close() error } +// NewOpts are the options passed to the NewUserspaceRouter hook. +type NewOpts struct { + Logf logger.Logf // required + Tun tun.Device // required + NetMon *netmon.Monitor // optional + Health *health.Tracker // required (but TODO: support optional later) + Bus *eventbus.Bus // required +} + +// HookNewUserspaceRouter is the registration point for router implementations +// to register a constructor for userspace routers. It's meant for implementations +// in wgengine/router/osrouter. +// +// If no implementation is registered, [New] will return an error. +var HookNewUserspaceRouter feature.Hook[func(NewOpts) (Router, error)] + // New returns a new Router for the current platform, using the // provided tun device. // @@ -50,14 +71,33 @@ func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, ) (Router, error) { logf = logger.WithPrefix(logf, "router: ") - return newUserspaceRouter(logf, tundev, netMon, health, bus) + if f, ok := HookNewUserspaceRouter.GetOk(); ok { + return f(NewOpts{ + Logf: logf, + Tun: tundev, + NetMon: netMon, + Health: health, + Bus: bus, + }) + } + if !buildfeatures.HasOSRouter { + return nil, errors.New("router: tailscaled was built without OSRouter support") + } + return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) } +// HookCleanUp is the optional registration point for router implementations +// to register a cleanup function for [CleanUp] to use. It's meant for +// implementations in wgengine/router/osrouter. +var HookCleanUp feature.Hook[func(_ logger.Logf, _ *netmon.Monitor, ifName string)] + // CleanUp restores the system network configuration to its original state // in case the Tailscale daemon terminated without closing the router. // No other state needs to be instantiated before this runs. func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) { - cleanUp(logf, interfaceName) + if f, ok := HookCleanUp.GetOk(); ok { + f(logf, netMon, interfaceName) + } } // Config is the subset of Tailscale configuration that is relevant to @@ -106,8 +146,3 @@ func (a *Config) Equal(b *Config) bool { } return reflect.DeepEqual(a, b) } - -// shutdownConfig is a routing configuration that removes all router -// state from the OS. It's the config used when callers pass in a nil -// Config. -var shutdownConfig = Config{} diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go deleted file mode 100644 index de680606f19cf..0000000000000 --- a/wgengine/router/router_android.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build android - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { - // Note, this codepath is _not_ used when building the android app - // from github.com/tailscale/tailscale-android. The android app - // constructs its own wgengine with a custom router implementation - // that plugs into Android networking APIs. - // - // In practice, the only place this fake router gets used is when - // you build a tsnet app for android, in which case we don't want - // to touch the OS network stack and a no-op router is correct. - return NewFake(logf), nil -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_darwin.go b/wgengine/router/router_darwin.go deleted file mode 100644 index ebb2615a0ed1f..0000000000000 --- a/wgengine/router/router_darwin.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) -} - -func cleanUp(logger.Logf, string) { - // Nothing to do. -} diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go deleted file mode 100644 index 190575973a4ee..0000000000000 --- a/wgengine/router/router_default.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !linux && !darwin && !openbsd && !freebsd && !plan9 - -package router - -import ( - "fmt" - "runtime" - - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { - return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_test.go b/wgengine/router/router_test.go index 8842173d7e4b4..fd17b8c5d5297 100644 --- a/wgengine/router/router_test.go +++ b/wgengine/router/router_test.go @@ -11,15 +11,6 @@ import ( "tailscale.com/types/preftype" ) -//lint:ignore U1000 used in Windows/Linux tests only -func mustCIDRs(ss ...string) []netip.Prefix { - var ret []netip.Prefix - for _, s := range ss { - ret = append(ret, netip.MustParsePrefix(s)) - } - return ret -} - func TestConfigEqual(t *testing.T) { testedFields := []string{ "LocalAddrs", "Routes", "LocalRoutes", "NewMTU", From 72bc7334fbcba0c03d3f926167f4c3149a4bb36d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 13:57:04 -0700 Subject: [PATCH 0453/1093] net/speedtest: mark flaky test, and skip it by default as it's slow Updates #17338 Change-Id: I1f3dbc154ba274f615cc77d2aa76f6ff9d40137c Signed-off-by: Brad Fitzpatrick --- net/speedtest/speedtest_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index 55dcbeea1abdf..69fdb6b5685c0 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -4,12 +4,22 @@ package speedtest import ( + "flag" "net" "testing" "time" + + "tailscale.com/cmd/testwrapper/flakytest" ) +var manualTest = flag.Bool("do-speedtest", false, "if true, run the speedtest TestDownload test. Otherwise skip it because it's slow and flaky; see https://github.com/tailscale/tailscale/issues/17338") + func TestDownload(t *testing.T) { + if !*manualTest { + t.Skip("skipping slow test without --do-speedtest") + } + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") + // start a listener and find the port where the server will be listening. l, err := net.Listen("tcp", ":0") if err != nil { From 1aaa1648c4e7fd5a690c17d87cf056816ebe4553 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 29 Sep 2025 17:44:59 +0100 Subject: [PATCH 0454/1093] README: update the version of Go in the README Updates #17064 Signed-off-by: Alex Chan --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2c9713a6f339c..70b92d411b9de 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ not open source. ## Building -We always require the latest Go release, currently Go 1.23. (While we build +We always require the latest Go release, currently Go 1.25. (While we build releases with our [Go fork](https://github.com/tailscale/go/), its use is not required.) From bdb69d1b1fc4ee08cfb13b5d0b7bab79e162bd4e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 14:03:32 -0700 Subject: [PATCH 0455/1093] net/dns/resolver: fix data race in test Fixes #17339 Change-Id: I486d2a0e0931d701923c1e0f8efbda99510ab19b Signed-off-by: Brad Fitzpatrick --- net/dns/resolver/forwarder.go | 20 ++++++++++-------- net/dns/resolver/forwarder_test.go | 34 +++++++++--------------------- 2 files changed, 21 insertions(+), 33 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c87fbd5041a93..105229fb81880 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -217,11 +217,12 @@ type resolverAndDelay struct { // forwarder forwards DNS packets to a number of upstream nameservers. type forwarder struct { - logf logger.Logf - netMon *netmon.Monitor // always non-nil - linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it - dialer *tsdial.Dialer - health *health.Tracker // always non-nil + logf logger.Logf + netMon *netmon.Monitor // always non-nil + linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it + dialer *tsdial.Dialer + health *health.Tracker // always non-nil + verboseFwd bool // if true, log all DNS forwarding controlKnobs *controlknobs.Knobs // or nil @@ -258,6 +259,7 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS dialer: dialer, health: health, controlKnobs: knobs, + verboseFwd: verboseDNSForward(), } f.ctx, f.ctxCancel = context.WithCancel(context.Background()) return f @@ -515,7 +517,7 @@ var ( // // send expects the reply to have the same txid as txidOut. func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) (ret []byte, err error) { - if verboseDNSForward() { + if f.verboseFwd { id := forwarderCount.Add(1) domain, typ, _ := nameFromQuery(fq.packet) f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id) @@ -978,7 +980,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } defer fq.closeOnCtxDone.Close() - if verboseDNSForward() { + if f.verboseFwd { domainSha256 := sha256.Sum256([]byte(domain)) domainSig := base64.RawStdEncoding.EncodeToString(domainSha256[:3]) f.logf("request(%d, %v, %d, %s) %d...", fq.txid, typ, len(domain), domainSig, len(fq.packet)) @@ -1023,7 +1025,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo metricDNSFwdErrorContext.Add(1) return fmt.Errorf("waiting to send response: %w", ctx.Err()) case responseChan <- packet{v, query.family, query.addr}: - if verboseDNSForward() { + if f.verboseFwd { f.logf("response(%d, %v, %d) = %d, nil", fq.txid, typ, len(domain), len(v)) } metricDNSFwdSuccess.Add(1) @@ -1053,7 +1055,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) case responseChan <- res: - if verboseDNSForward() { + if f.verboseFwd { f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) } return nil diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f77388ca721da..b5cc7d018bb96 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -12,7 +12,6 @@ import ( "io" "net" "net/netip" - "os" "reflect" "slices" "strings" @@ -23,7 +22,6 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" - "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -400,13 +398,6 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on return } -func enableDebug(tb testing.TB) { - const debugKnob = "TS_DEBUG_DNS_FORWARD_SEND" - oldVal := os.Getenv(debugKnob) - envknob.Setenv(debugKnob, "true") - tb.Cleanup(func() { envknob.Setenv(debugKnob, oldVal) }) -} - func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) { name := dns.MustNewName(domain) @@ -554,9 +545,11 @@ func mustRunTestQuery(tb testing.TB, request []byte, modify func(*forwarder), po return resp } -func TestForwarderTCPFallback(t *testing.T) { - enableDebug(t) +func beVerbose(f *forwarder) { + f.verboseFwd = true +} +func TestForwarderTCPFallback(t *testing.T) { const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -576,7 +569,7 @@ func TestForwarderTCPFallback(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -592,8 +585,6 @@ func TestForwarderTCPFallback(t *testing.T) { // Test to ensure that if the UDP listener is unresponsive, we always make a // TCP request even if we never get a response. func TestForwarderTCPFallbackTimeout(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -614,7 +605,7 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -624,8 +615,6 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } func TestForwarderTCPFallbackDisabled(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -646,6 +635,7 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { }) resp := mustRunTestQuery(t, request, func(fwd *forwarder) { + fwd.verboseFwd = true // Disable retries for this test. fwd.controlKnobs = &controlknobs.Knobs{} fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) @@ -668,8 +658,6 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { // Test to ensure that we propagate DNS errors func TestForwarderTCPFallbackError(t *testing.T) { - enableDebug(t) - const domain = "error-response.tailscale.com." // Our response is a SERVFAIL @@ -686,7 +674,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { } }) - resp, err := runTestQuery(t, request, nil, port) + resp, err := runTestQuery(t, request, beVerbose, port) if !sawRequest.Load() { t.Error("did not see DNS request") } @@ -706,8 +694,6 @@ func TestForwarderTCPFallbackError(t *testing.T) { // Test to ensure that if we have more than one resolver, and at least one of them // returns a successful response, we propagate it. func TestForwarderWithManyResolvers(t *testing.T) { - enableDebug(t) - const domain = "example.com." request := makeTestRequest(t, domain) @@ -810,7 +796,7 @@ func TestForwarderWithManyResolvers(t *testing.T) { for i := range tt.responses { ports[i] = runDNSServer(t, nil, tt.responses[i], func(isTCP bool, gotRequest []byte) {}) } - gotResponse, err := runTestQuery(t, request, nil, ports...) + gotResponse, err := runTestQuery(t, request, beVerbose, ports...) if err != nil { t.Fatalf("wanted nil, got %v", err) } @@ -869,7 +855,7 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { }) - res, err := runTestQuery(t, request, nil, port) + res, err := runTestQuery(t, request, beVerbose, port) if err != nil { t.Fatal(err) } From 54e50230a10dbbf1a251589b683291df780783d9 Mon Sep 17 00:00:00 2001 From: Brian Palmer Date: Mon, 29 Sep 2025 16:30:23 -0600 Subject: [PATCH 0456/1093] net/memnet: allow listener address reuse (#17342) Listen address reuse is allowed as soon as the previous listener is closed. There is no attempt made to emulate more complex address reuse logic. Updates tailscale/corp#28078 Change-Id: I56be1c4848e7b3f9fc97fd4ef13a2de9dcfab0f2 Signed-off-by: Brian Palmer --- net/memnet/listener.go | 6 ++++++ net/memnet/memnet.go | 5 +++++ net/memnet/memnet_test.go | 23 +++++++++++++++++++++++ 3 files changed, 34 insertions(+) create mode 100644 net/memnet/memnet_test.go diff --git a/net/memnet/listener.go b/net/memnet/listener.go index d84a2e443cbff..202026e160b27 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -22,6 +22,7 @@ type Listener struct { ch chan Conn closeOnce sync.Once closed chan struct{} + onClose func() // or nil // NewConn, if non-nil, is called to create a new pair of connections // when dialing. If nil, NewConn is used. @@ -44,9 +45,14 @@ func (l *Listener) Addr() net.Addr { // Close closes the pipe listener. func (l *Listener) Close() error { + var cleanup func() l.closeOnce.Do(func() { + cleanup = l.onClose close(l.closed) }) + if cleanup != nil { + cleanup() + } return nil } diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index 7c2435684059e..1e43df2daaaae 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -61,6 +61,11 @@ func (m *Network) Listen(network, address string) (net.Listener, error) { } ln := Listen(key) m.lns[key] = ln + ln.onClose = func() { + m.mu.Lock() + delete(m.lns, key) + m.mu.Unlock() + } return ln, nil } } diff --git a/net/memnet/memnet_test.go b/net/memnet/memnet_test.go new file mode 100644 index 0000000000000..38086cec05f3c --- /dev/null +++ b/net/memnet/memnet_test.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package memnet + +import "testing" + +func TestListenAddressReuse(t *testing.T) { + var nw Network + ln1, err := nw.Listen("tcp", "127.0.0.1:80") + if err != nil { + t.Fatalf("listen failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err == nil { + t.Errorf("listen on in-use address succeeded") + } + if err := ln1.Close(); err != nil { + t.Fatalf("close failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err != nil { + t.Errorf("listen on same address after close failed: %v", err) + } +} From 69c79cb9f3f9e2fe9ce4333c9a034591709e469b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 15:26:24 -0700 Subject: [PATCH 0457/1093] ipn/store, feature/condregister: move AWS + Kube store registration to condregister Otherwise they're uselessly imported by tsnet applications, even though they do nothing. tsnet applications wanting to use these already had to explicitly import them and use kubestore.New or awsstore.New and assign those to their tsnet.Server.Store fields. Updates #12614 Change-Id: I358e3923686ddf43a85e6923c3828ba2198991d4 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 97 ++--------------- cmd/tailscaled/depaware.txt | 4 +- cmd/tsidp/depaware.txt | 102 ++---------------- .../condregister/maybe_store_aws.go | 5 +- .../condregister/maybe_store_kube.go | 5 +- tsnet/depaware.txt | 102 ++---------------- 6 files changed, 40 insertions(+), 275 deletions(-) rename ipn/store/store_aws.go => feature/condregister/maybe_store_aws.go (76%) rename ipn/store/store_kube.go => feature/condregister/maybe_store_kube.go (74%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 223baa43c0a3d..3aa0a496ce764 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -5,81 +5,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/util/eventbus @@ -136,7 +61,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -796,8 +720,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ + tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/api-proxy from tailscale.com/cmd/k8s-operator @@ -1026,7 +949,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ bytes from archive/tar+ cmp from github.com/gaissmai/bart+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from github.com/emicklei/go-restful/v3+ compress/zlib from debug/pe+ container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp+ container/list from crypto/tls+ @@ -1091,7 +1014,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -1110,7 +1033,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + encoding/xml from github.com/emicklei/go-restful/v3+ errors from archive/tar+ expvar from github.com/prometheus/client_golang/prometheus+ flag from github.com/spf13/pflag+ @@ -1179,7 +1102,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from go/ast+ log from expvar+ log/internal from log+ @@ -1198,25 +1121,25 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ net from crypto/tls+ net/http from expvar+ net/http/httptrace from github.com/prometheus-community/pro-bing+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4051000a68a81..90cba0734d30c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -304,8 +304,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ tailscale.com/ipn/policy from tailscale.com/feature/portlist tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + L tailscale.com/ipn/store/awsstore from tailscale.com/feature/condregister + L tailscale.com/ipn/store/kubestore from tailscale.com/feature/condregister tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index dfb6553bdc5ad..fb97296bcaf27 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -5,81 +5,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -105,7 +30,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -238,12 +162,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal @@ -456,7 +376,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar bytes from archive/tar+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from golang.org/x/net/http2+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ @@ -521,7 +441,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -538,7 +458,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + encoding/xml from github.com/tailscale/goupnp+ errors from archive/tar+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ @@ -598,7 +518,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log @@ -613,26 +533,26 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from tailscale.com/cmd/tsidp os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from archive/tar+ diff --git a/ipn/store/store_aws.go b/feature/condregister/maybe_store_aws.go similarity index 76% rename from ipn/store/store_aws.go rename to feature/condregister/maybe_store_aws.go index 834b657d34df0..48ef06ecf1234 100644 --- a/ipn/store/store_aws.go +++ b/feature/condregister/maybe_store_aws.go @@ -3,16 +3,17 @@ //go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws -package store +package condregister import ( "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/awsstore" "tailscale.com/types/logger" ) func init() { - Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) if err != nil { return nil, err diff --git a/ipn/store/store_kube.go b/feature/condregister/maybe_store_kube.go similarity index 74% rename from ipn/store/store_kube.go rename to feature/condregister/maybe_store_kube.go index 7eac75c196990..0aa2c1692ff6b 100644 --- a/ipn/store/store_kube.go +++ b/feature/condregister/maybe_store_kube.go @@ -3,18 +3,19 @@ //go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube -package store +package condregister import ( "strings" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/kubestore" "tailscale.com/types/logger" ) func init() { - Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { + store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { secretName := strings.TrimPrefix(path, "kube:") return kubestore.New(logf, secretName) }) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index bda491f37cfdb..2e8ca0f0ae46b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -5,81 +5,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm LDW github.com/coder/websocket from tailscale.com/util/eventbus LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -105,7 +30,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -234,12 +158,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob LDW tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal @@ -449,7 +369,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) bytes from archive/tar+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from golang.org/x/net/http2+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ @@ -514,7 +434,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ DI crypto/x509/internal/macos from crypto/x509 @@ -531,7 +451,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ + encoding/xml from github.com/tailscale/goupnp+ errors from archive/tar+ expvar from tailscale.com/health+ flag from tailscale.com/util/testenv @@ -591,7 +511,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/unsafeheader from internal/reflectlite+ io from archive/tar+ io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log @@ -606,25 +526,25 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from archive/tar+ From 038cdb4640275e44fd8cf5a95f23d5d5b4987ba3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 26 Sep 2025 16:41:26 -0700 Subject: [PATCH 0458/1093] feature/clientupdate: move clientupdate to a modular feature, disabled for tsnet Updates #12614 Change-Id: I5f685dec84a5396b7c2b66f2788ae3d286e1ddc6 Signed-off-by: Brad Fitzpatrick --- client/web/web.go | 4 +- clientupdate/clientupdate.go | 9 +- cmd/k8s-operator/depaware.txt | 55 +- cmd/tailscale/cli/set.go | 23 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 61 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 67 ++- .../feature_clientupdate_disabled.go | 13 + .../feature_clientupdate_enabled.go | 13 + feature/clientupdate/clientupdate.go | 530 ++++++++++++++++++ feature/condregister/maybe_clientupdate.go | 8 + feature/featuretags/featuretags.go | 1 + feature/hooks.go | 17 + ipn/ipnlocal/autoupdate.go | 65 --- ipn/ipnlocal/autoupdate_disabled.go | 18 - ipn/ipnlocal/c2n.go | 190 ------- ipn/ipnlocal/local.go | 155 +---- ipn/ipnlocal/local_test.go | 6 +- ipn/ipnlocal/profiles.go | 4 +- ipn/ipnlocal/profiles_test.go | 5 +- ipn/localapi/localapi.go | 37 +- tsnet/depaware.txt | 67 ++- tstest/integration/integration_test.go | 5 +- 24 files changed, 749 insertions(+), 609 deletions(-) create mode 100644 feature/buildfeatures/feature_clientupdate_disabled.go create mode 100644 feature/buildfeatures/feature_clientupdate_enabled.go create mode 100644 feature/clientupdate/clientupdate.go create mode 100644 feature/condregister/maybe_clientupdate.go create mode 100644 feature/hooks.go delete mode 100644 ipn/ipnlocal/autoupdate.go delete mode 100644 ipn/ipnlocal/autoupdate_disabled.go diff --git a/client/web/web.go b/client/web/web.go index d88239843e190..2421403c16ab0 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -24,9 +24,9 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -1049,7 +1049,7 @@ func availableFeatures() map[string]bool { "advertise-routes": true, // available on all platforms "use-exit-node": featureknob.CanUseExitNode() == nil, "ssh": featureknob.CanRunTailscaleSSH() == nil, - "auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(), + "auto-update": version.IsUnstableBuild() && feature.CanAutoUpdate(), } return features } diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index ffd3fb03bb80d..84b289615f911 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -252,9 +253,13 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { var canAutoUpdateCache lazy.SyncValue[bool] -// CanAutoUpdate reports whether auto-updating via the clientupdate package +func init() { + feature.HookCanAutoUpdate.Set(canAutoUpdate) +} + +// canAutoUpdate reports whether auto-updating via the clientupdate package // is supported for the current os/distro. -func CanAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } +func canAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } func canAutoUpdateUncached() bool { if version.IsMacSysExt() { diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 3aa0a496ce764..a85f5731b22c3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -12,7 +12,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com @@ -60,7 +60,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/hdevalence/ed25519consensus from tailscale.com/tka github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -686,8 +686,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -780,7 +778,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -829,7 +827,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -843,7 +841,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -869,8 +867,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -907,7 +905,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ @@ -944,13 +942,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from github.com/gaissmai/bart+ compress/flate from compress/gzip+ compress/gzip from github.com/emicklei/go-restful/v3+ - compress/zlib from debug/pe+ + compress/zlib from github.com/emicklei/go-restful/v3+ container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp+ container/list from crypto/tls+ context from crypto/tls+ @@ -1034,10 +1031,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ encoding/json from expvar+ encoding/pem from crypto/tls+ encoding/xml from github.com/emicklei/go-restful/v3+ - errors from archive/tar+ + errors from bufio+ expvar from github.com/prometheus/client_golang/prometheus+ flag from github.com/spf13/pflag+ - fmt from archive/tar+ + fmt from compress/flate+ go/ast from go/doc+ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime @@ -1063,7 +1060,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -1100,8 +1097,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from go/ast+ log from expvar+ @@ -1110,7 +1107,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/slog/internal from log/slog log/slog/internal/buffer from log/slog maps from sigs.k8s.io/controller-runtime/pkg/predicate+ - math from archive/tar+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/google/go-cmp/cmp+ @@ -1132,29 +1129,29 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ os from crypto/internal/sysrand+ os/exec from github.com/godbus/dbus/v5+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from encoding/base32+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index c0ce0b1c137ac..1807ada1329c3 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -15,8 +15,8 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/clientupdate" "tailscale.com/cmd/tailscale/cli/ffcomplete" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/net/tsaddr" @@ -226,21 +226,14 @@ func runSet(ctx context.Context, args []string) (retErr error) { return err } } - if maskedPrefs.AutoUpdateSet.ApplySet { - if !clientupdate.CanAutoUpdate() { - return errors.New("automatic updates are not supported on this platform") + if maskedPrefs.AutoUpdateSet.ApplySet && buildfeatures.HasClientUpdate && version.IsMacSysExt() { + apply := "0" + if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { + apply = "1" } - // On macsys, tailscaled will set the Sparkle auto-update setting. It - // does not use clientupdate. - if version.IsMacSysExt() { - apply := "0" - if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { - apply = "1" - } - out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) - } + out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) } } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 2d724a9009bec..2df6007025c5a 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -77,7 +77,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 6cc3733a98280..42d8f9181936d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,7 +1,5 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) - filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus - filippo.io/edwards25519/field from filippo.io/edwards25519 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -13,7 +11,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -44,8 +41,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ - tailscale.com/clientupdate from tailscale.com/ipn/ipnlocal+ - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ @@ -65,7 +60,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ - tailscale.com/hostinfo from tailscale.com/clientupdate+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ @@ -116,7 +111,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile @@ -142,7 +137,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/cmd/tailscaled+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext @@ -161,7 +156,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -171,11 +165,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth 💣 tailscale.com/util/hashx from tailscale.com/util/deephash - tailscale.com/util/httpm from tailscale.com/clientupdate/distsign+ + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/osdiag from tailscale.com/ipn/localapi tailscale.com/util/osshare from tailscale.com/cmd/tailscaled @@ -195,8 +189,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth tailscale.com/util/zstdframe from tailscale.com/control/controlclient - tailscale.com/version from tailscale.com/clientupdate+ - tailscale.com/version/distro from tailscale.com/clientupdate+ + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ @@ -249,9 +243,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip compress/gzip from golang.org/x/net/http2+ @@ -329,10 +322,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - errors from archive/tar+ + errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tailscaled+ - fmt from archive/tar+ + fmt from compress/flate+ hash from crypto+ hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -348,7 +341,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/filepathlite from os+ internal/fmtsort from fmt internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -379,14 +372,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/mdlayher/netlink+ @@ -405,27 +398,27 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from tailscale.com/clientupdate+ + os/exec from tailscale.com/hostinfo+ os/signal from tailscale.com/cmd/tailscaled - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from internal/profile+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ - sync from archive/tar+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 90cba0734d30c..a3bac20aab30d 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -246,7 +246,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/feature/clientupdate LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ @@ -273,6 +273,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/feature/wakeonlan+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister + tailscale.com/feature/clientupdate from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index fb97296bcaf27..8a78af4932f2f 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -9,7 +9,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com @@ -28,8 +28,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + D github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -128,8 +128,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -208,7 +206,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -237,7 +235,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/cmd/tsidp+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext @@ -256,12 +254,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ @@ -270,7 +268,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/cmd/tsidp+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -296,8 +294,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -336,7 +334,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ @@ -371,9 +369,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ compress/gzip from golang.org/x/net/http2+ @@ -446,7 +443,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DW database/sql/driver from github.com/google/uuid + D database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -459,11 +456,11 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/json from expvar+ encoding/pem from crypto/tls+ encoding/xml from github.com/tailscale/goupnp+ - errors from archive/tar+ + errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -480,7 +477,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -516,14 +513,14 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -545,28 +542,28 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar os from crypto/internal/sysrand+ os/exec from github.com/godbus/dbus/v5+ os/signal from tailscale.com/cmd/tsidp - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/feature/buildfeatures/feature_clientupdate_disabled.go b/feature/buildfeatures/feature_clientupdate_disabled.go new file mode 100644 index 0000000000000..165c9cc9a409d --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = false diff --git a/feature/buildfeatures/feature_clientupdate_enabled.go b/feature/buildfeatures/feature_clientupdate_enabled.go new file mode 100644 index 0000000000000..3c3c7878c53a9 --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = true diff --git a/feature/clientupdate/clientupdate.go b/feature/clientupdate/clientupdate.go new file mode 100644 index 0000000000000..45fd21129b4e7 --- /dev/null +++ b/feature/clientupdate/clientupdate.go @@ -0,0 +1,530 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package clientupdate enables the client update feature. +package clientupdate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/clientupdate" + "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" + "tailscale.com/ipn/localapi" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/httpm" + "tailscale.com/version" + "tailscale.com/version/distro" +) + +func init() { + ipnext.RegisterExtension("clientupdate", newExt) + + // C2N + ipnlocal.RegisterC2N("GET /update", handleC2NUpdateGet) + ipnlocal.RegisterC2N("POST /update", handleC2NUpdatePost) + + // LocalAPI: + localapi.Register("update/install", serveUpdateInstall) + localapi.Register("update/progress", serveUpdateProgress) +} + +func newExt(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + logf: logf, + sb: sb, + + lastSelfUpdateState: ipnstate.UpdateFinished, + }, nil +} + +type extension struct { + logf logger.Logf + sb ipnext.SafeBackend + + mu sync.Mutex + + // c2nUpdateStatus is the status of c2n-triggered client update. + c2nUpdateStatus updateStatus + prefs ipn.PrefsView + state ipn.State + + lastSelfUpdateState ipnstate.SelfUpdateStatus + selfUpdateProgress []ipnstate.UpdateProgress + + // offlineAutoUpdateCancel stops offline auto-updates when called. It + // should be used via stopOfflineAutoUpdate and + // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are + // not running. + // + //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go + offlineAutoUpdateCancel func() +} + +func (e *extension) Name() string { return "clientupdate" } + +func (e *extension) Init(h ipnext.Host) error { + + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().BackendStateChange.Add(e.onBackendStateChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + return nil +} + +func (e *extension) Shutdown() error { + e.stopOfflineAutoUpdate() + return nil +} + +func (e *extension) onBackendStateChange(newState ipn.State) { + e.mu.Lock() + defer e.mu.Unlock() + e.state = newState + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) onChangeProfile(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.prefs = prefs + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) updateOfflineAutoUpdateLocked() { + want := e.prefs.Valid() && e.prefs.AutoUpdate().Apply.EqualBool(true) && + e.state != ipn.Running && e.state != ipn.Starting + + cur := e.offlineAutoUpdateCancel != nil + + if want && !cur { + e.maybeStartOfflineAutoUpdateLocked(e.prefs) + } else if !want && cur { + e.stopOfflineAutoUpdateLocked() + } +} + +type updateStatus struct { + started bool +} + +func (e *extension) clearSelfUpdateProgress() { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) + e.lastSelfUpdateState = ipnstate.UpdateFinished +} + +func (e *extension) GetSelfUpdateProgress() []ipnstate.UpdateProgress { + e.mu.Lock() + defer e.mu.Unlock() + res := make([]ipnstate.UpdateProgress, len(e.selfUpdateProgress)) + copy(res, e.selfUpdateProgress) + return res +} + +func (e *extension) DoSelfUpdate() { + e.mu.Lock() + updateState := e.lastSelfUpdateState + e.mu.Unlock() + // don't start an update if one is already in progress + if updateState == ipnstate.UpdateInProgress { + return + } + e.clearSelfUpdateProgress() + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) + up, err := clientupdate.NewUpdater(clientupdate.Arguments{ + Logf: func(format string, args ...any) { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) + }, + }) + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } + err = up.Update() + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } else { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) + } +} + +// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale +// self-update. A successful response does not indicate whether the update +// succeeded, only that the request was accepted. Clients should use +// serveUpdateProgress after pinging this endpoint to check how the update is +// going. +func serveUpdateInstall(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusAccepted) + + go ext.DoSelfUpdate() +} + +// serveUpdateProgress returns the status of an in-progress Tailscale self-update. +// This is provided as a slice of ipnstate.UpdateProgress structs with various +// log messages in order from oldest to newest. If an update is not in progress, +// the returned slice will be empty. +func serveUpdateProgress(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + ups := ext.GetSelfUpdateProgress() + + json.NewEncoder(w).Encode(ups) +} + +func (e *extension) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = append(e.selfUpdateProgress, up) + e.lastSelfUpdateState = up.Status +} + +func handleC2NUpdateGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + e.logf("c2n: GET /update received") + + res := e.newC2NUpdateResponse() + res.Started = e.c2nUpdateStarted() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func handleC2NUpdatePost(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + e.logf("c2n: POST /update received") + res := e.newC2NUpdateResponse() + defer func() { + if res.Err != "" { + e.logf("c2n: POST /update failed: %s", res.Err) + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + }() + + if !res.Enabled { + res.Err = "not enabled" + return + } + if !res.Supported { + res.Err = "not supported" + return + } + + // Do not update if we have active inbound SSH connections. Control can set + // force=true query parameter to override this. + if r.FormValue("force") != "true" && b.ActiveSSHConns() > 0 { + res.Err = "not updating due to active SSH connections" + return + } + + if err := e.startAutoUpdate("c2n"); err != nil { + res.Err = err.Error() + return + } + res.Started = true +} + +func (e *extension) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { + e.mu.Lock() + defer e.mu.Unlock() + + // If NewUpdater does not return an error, we can update the installation. + // + // Note that we create the Updater solely to check for errors; we do not + // invoke it here. For this purpose, it is ok to pass it a zero Arguments. + var upPref ipn.AutoUpdatePrefs + if e.prefs.Valid() { + upPref = e.prefs.AutoUpdate() + } + return tailcfg.C2NUpdateResponse{ + Enabled: envknob.AllowsRemoteUpdate() || upPref.Apply.EqualBool(true), + Supported: feature.CanAutoUpdate() && !version.IsMacSysExt(), + } +} + +func (e *extension) c2nUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + return e.c2nUpdateStatus.started +} + +func (e *extension) setC2NUpdateStarted(v bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.c2nUpdateStatus.started = v +} + +func (e *extension) trySetC2NUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + if e.c2nUpdateStatus.started { + return false + } + e.c2nUpdateStatus.started = true + return true +} + +// findCmdTailscale looks for the cmd/tailscale that corresponds to the +// currently running cmd/tailscaled. It's up to the caller to verify that the +// two match, but this function does its best to find the right one. Notably, it +// doesn't use $PATH for security reasons. +func findCmdTailscale() (string, error) { + self, err := os.Executable() + if err != nil { + return "", err + } + var ts string + switch runtime.GOOS { + case "linux": + if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { + ts = "/usr/bin/tailscale" + } + if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + switch distro.Get() { + case distro.QNAP: + // The volume under /share/ where qpkg are installed is not + // predictable. But the rest of the path is. + ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) + if err == nil && ok { + ts = filepath.Join(filepath.Dir(self), "tailscale") + } + case distro.Unraid: + if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { + ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" + } + } + case "windows": + ts = filepath.Join(filepath.Dir(self), "tailscale.exe") + case "freebsd", "openbsd": + if self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + default: + return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) + } + if ts != "" && regularFileExists(ts) { + return ts, nil + } + return "", errors.New("tailscale executable not found in expected place") +} + +func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { + defaultCmd := exec.Command(cmdTS, "update", "--yes") + if runtime.GOOS != "linux" { + return defaultCmd + } + if _, err := exec.LookPath("systemd-run"); err != nil { + return defaultCmd + } + + // When systemd-run is available, use it to run the update command. This + // creates a new temporary unit separate from the tailscaled unit. When + // tailscaled is restarted during the update, systemd won't kill this + // temporary update unit, which could cause unexpected breakage. + // + // We want to use a few optional flags: + // * --wait, to block the update command until completion (added in systemd 232) + // * --pipe, to collect stdout/stderr (added in systemd 235) + // * --collect, to clean up failed runs from memory (added in systemd 236) + // + // We need to check the version of systemd to figure out if those flags are + // available. + // + // The output will look like: + // + // systemd 255 (255.7-1-arch) + // +PAM +AUDIT ... other feature flags ... + systemdVerOut, err := exec.Command("systemd-run", "--version").Output() + if err != nil { + return defaultCmd + } + parts := strings.Fields(string(systemdVerOut)) + if len(parts) < 2 || parts[0] != "systemd" { + return defaultCmd + } + systemdVer, err := strconv.Atoi(parts[1]) + if err != nil { + return defaultCmd + } + if systemdVer >= 236 { + return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") + } else if systemdVer >= 235 { + return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") + } else if systemdVer >= 232 { + return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") + } else { + return exec.Command("systemd-run", cmdTS, "update", "--yes") + } +} + +func regularFileExists(path string) bool { + fi, err := os.Stat(path) + return err == nil && fi.Mode().IsRegular() +} + +// startAutoUpdate triggers an auto-update attempt. The actual update happens +// asynchronously. If another update is in progress, an error is returned. +func (e *extension) startAutoUpdate(logPrefix string) (retErr error) { + // Check if update was already started, and mark as started. + if !e.trySetC2NUpdateStarted() { + return errors.New("update already started") + } + defer func() { + // Clear the started flag if something failed. + if retErr != nil { + e.setC2NUpdateStarted(false) + } + }() + + cmdTS, err := findCmdTailscale() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + var ver struct { + Long string `json:"long"` + } + out, err := exec.Command(cmdTS, "version", "--json").Output() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + if err := json.Unmarshal(out, &ver); err != nil { + return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) + } + if ver.Long != version.Long() { + return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) + } + + cmd := tailscaleUpdateCmd(cmdTS) + buf := new(bytes.Buffer) + cmd.Stdout = buf + cmd.Stderr = buf + e.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start cmd/tailscale update: %w", err) + } + + go func() { + if err := cmd.Wait(); err != nil { + e.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) + } else { + e.logf("%s: update attempt complete", logPrefix) + } + e.setC2NUpdateStarted(false) + }() + return nil +} + +func (e *extension) stopOfflineAutoUpdate() { + e.mu.Lock() + defer e.mu.Unlock() + e.stopOfflineAutoUpdateLocked() +} + +func (e *extension) stopOfflineAutoUpdateLocked() { + if e.offlineAutoUpdateCancel == nil { + return + } + e.logf("offline auto-update: stopping update checks") + e.offlineAutoUpdateCancel() + e.offlineAutoUpdateCancel = nil +} + +// e.mu must be held +func (e *extension) maybeStartOfflineAutoUpdateLocked(prefs ipn.PrefsView) { + if !prefs.Valid() || !prefs.AutoUpdate().Apply.EqualBool(true) { + return + } + // AutoUpdate.Apply field in prefs can only be true for platforms that + // support auto-updates. But check it here again, just in case. + if !feature.CanAutoUpdate() { + return + } + // On macsys, auto-updates are managed by Sparkle. + if version.IsMacSysExt() { + return + } + + if e.offlineAutoUpdateCancel != nil { + // Already running. + return + } + ctx, cancel := context.WithCancel(context.Background()) + e.offlineAutoUpdateCancel = cancel + + e.logf("offline auto-update: starting update checks") + go e.offlineAutoUpdate(ctx) +} + +const offlineAutoUpdateCheckPeriod = time.Hour + +func (e *extension) offlineAutoUpdate(ctx context.Context) { + t := time.NewTicker(offlineAutoUpdateCheckPeriod) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + } + if err := e.startAutoUpdate("offline auto-update"); err != nil { + e.logf("offline auto-update: failed: %v", err) + } + } +} diff --git a/feature/condregister/maybe_clientupdate.go b/feature/condregister/maybe_clientupdate.go new file mode 100644 index 0000000000000..bc694f970c543 --- /dev/null +++ b/feature/condregister/maybe_clientupdate.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_clientupdate + +package condregister + +import _ "tailscale.com/feature/clientupdate" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c417647413952..2895360994bc3 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "capture": {"Capture", "Packet capture", nil}, "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, "cliconndiag": {"CLIConnDiag", "CLI connection error diagnostics", nil}, + "clientupdate": {"ClientUpdate", "Client auto-update support", nil}, "completion": {"Completion", "CLI shell completion", nil}, "dbus": {"DBus", "Linux DBus support", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, diff --git a/feature/hooks.go b/feature/hooks.go new file mode 100644 index 0000000000000..fc3971dda9dea --- /dev/null +++ b/feature/hooks.go @@ -0,0 +1,17 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +// HookCanAutoUpdate is a hook for the clientupdate package +// to conditionally initialize. +var HookCanAutoUpdate Hook[func() bool] + +// CanAutoUpdate reports whether the current binary is built with auto-update +// support and, if so, whether the current platform supports it. +func CanAutoUpdate() bool { + if f, ok := HookCanAutoUpdate.GetOk(); ok { + return f() + } + return false +} diff --git a/ipn/ipnlocal/autoupdate.go b/ipn/ipnlocal/autoupdate.go deleted file mode 100644 index b7d217a10b5b0..0000000000000 --- a/ipn/ipnlocal/autoupdate.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux || windows - -package ipnlocal - -import ( - "context" - "time" - - "tailscale.com/clientupdate" - "tailscale.com/ipn" - "tailscale.com/version" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - if b.offlineAutoUpdateCancel != nil { - b.logf("offline auto-update: stopping update checks") - b.offlineAutoUpdateCancel() - b.offlineAutoUpdateCancel = nil - } -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - if !prefs.AutoUpdate().Apply.EqualBool(true) { - return - } - // AutoUpdate.Apply field in prefs can only be true for platforms that - // support auto-updates. But check it here again, just in case. - if !clientupdate.CanAutoUpdate() { - return - } - // On macsys, auto-updates are managed by Sparkle. - if version.IsMacSysExt() { - return - } - - if b.offlineAutoUpdateCancel != nil { - // Already running. - return - } - ctx, cancel := context.WithCancel(context.Background()) - b.offlineAutoUpdateCancel = cancel - - b.logf("offline auto-update: starting update checks") - go b.offlineAutoUpdate(ctx) -} - -const offlineAutoUpdateCheckPeriod = time.Hour - -func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) { - t := time.NewTicker(offlineAutoUpdateCheckPeriod) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - } - if err := b.startAutoUpdate("offline auto-update"); err != nil { - b.logf("offline auto-update: failed: %v", err) - } - } -} diff --git a/ipn/ipnlocal/autoupdate_disabled.go b/ipn/ipnlocal/autoupdate_disabled.go deleted file mode 100644 index 88ed68c95fd48..0000000000000 --- a/ipn/ipnlocal/autoupdate_disabled.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !(linux || windows) - -package ipnlocal - -import ( - "tailscale.com/ipn" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - // Not supported on this platform. -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - // Not supported on this platform. -} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 2b48b19fa90bf..cbc4cae788bb4 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -5,23 +5,16 @@ package ipnlocal import ( "encoding/json" - "errors" "fmt" "io" "net/http" - "os" - "os/exec" "path" - "path/filepath" "reflect" - "runtime" "strconv" "strings" "time" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" - "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/posture" @@ -34,7 +27,6 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" - "tailscale.com/version/distro" ) // c2nHandlers maps an HTTP method and URI path (without query parameters) to @@ -60,10 +52,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // SSH req("/ssh/usernames"): handleC2NSSHUsernames, - // Auto-updates. - req("GET /update"): handleC2NUpdateGet, - req("POST /update"): handleC2NUpdatePost, - // Device posture. req("GET /posture/identity"): handleC2NPostureIdentityGet, @@ -337,50 +325,6 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } -func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /update received") - - res := b.newC2NUpdateResponse() - res.Started = b.c2nUpdateStarted() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: POST /update received") - res := b.newC2NUpdateResponse() - defer func() { - if res.Err != "" { - b.logf("c2n: POST /update failed: %s", res.Err) - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - }() - - if !res.Enabled { - res.Err = "not enabled" - return - } - if !res.Supported { - res.Err = "not supported" - return - } - - // Do not update if we have active inbound SSH connections. Control can set - // force=true query parameter to override this. - if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 { - res.Err = "not updating due to active SSH connections" - return - } - - if err := b.startAutoUpdate("c2n"); err != nil { - res.Err = err.Error() - return - } - res.Started = true -} - func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: GET /posture/identity received") @@ -423,137 +367,3 @@ func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) } - -func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { - // If NewUpdater does not return an error, we can update the installation. - // - // Note that we create the Updater solely to check for errors; we do not - // invoke it here. For this purpose, it is ok to pass it a zero Arguments. - prefs := b.Prefs().AutoUpdate() - return tailcfg.C2NUpdateResponse{ - Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true), - Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(), - } -} - -func (b *LocalBackend) c2nUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.c2nUpdateStatus.started -} - -func (b *LocalBackend) setC2NUpdateStarted(v bool) { - b.mu.Lock() - defer b.mu.Unlock() - b.c2nUpdateStatus.started = v -} - -func (b *LocalBackend) trySetC2NUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.c2nUpdateStatus.started { - return false - } - b.c2nUpdateStatus.started = true - return true -} - -// findCmdTailscale looks for the cmd/tailscale that corresponds to the -// currently running cmd/tailscaled. It's up to the caller to verify that the -// two match, but this function does its best to find the right one. Notably, it -// doesn't use $PATH for security reasons. -func findCmdTailscale() (string, error) { - self, err := os.Executable() - if err != nil { - return "", err - } - var ts string - switch runtime.GOOS { - case "linux": - if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { - ts = "/usr/bin/tailscale" - } - if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - switch distro.Get() { - case distro.QNAP: - // The volume under /share/ where qpkg are installed is not - // predictable. But the rest of the path is. - ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) - if err == nil && ok { - ts = filepath.Join(filepath.Dir(self), "tailscale") - } - case distro.Unraid: - if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { - ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" - } - } - case "windows": - ts = filepath.Join(filepath.Dir(self), "tailscale.exe") - case "freebsd", "openbsd": - if self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - default: - return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) - } - if ts != "" && regularFileExists(ts) { - return ts, nil - } - return "", errors.New("tailscale executable not found in expected place") -} - -func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { - defaultCmd := exec.Command(cmdTS, "update", "--yes") - if runtime.GOOS != "linux" { - return defaultCmd - } - if _, err := exec.LookPath("systemd-run"); err != nil { - return defaultCmd - } - - // When systemd-run is available, use it to run the update command. This - // creates a new temporary unit separate from the tailscaled unit. When - // tailscaled is restarted during the update, systemd won't kill this - // temporary update unit, which could cause unexpected breakage. - // - // We want to use a few optional flags: - // * --wait, to block the update command until completion (added in systemd 232) - // * --pipe, to collect stdout/stderr (added in systemd 235) - // * --collect, to clean up failed runs from memory (added in systemd 236) - // - // We need to check the version of systemd to figure out if those flags are - // available. - // - // The output will look like: - // - // systemd 255 (255.7-1-arch) - // +PAM +AUDIT ... other feature flags ... - systemdVerOut, err := exec.Command("systemd-run", "--version").Output() - if err != nil { - return defaultCmd - } - parts := strings.Fields(string(systemdVerOut)) - if len(parts) < 2 || parts[0] != "systemd" { - return defaultCmd - } - systemdVer, err := strconv.Atoi(parts[1]) - if err != nil { - return defaultCmd - } - if systemdVer >= 236 { - return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") - } else if systemdVer >= 235 { - return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") - } else if systemdVer >= 232 { - return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") - } else { - return exec.Command("systemd-run", cmdTS, "update", "--yes") - } -} - -func regularFileExists(path string) bool { - fi, err := os.Stat(path) - return err == nil && fi.Mode().IsRegular() -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index a95aef0f20239..72fc8808ced73 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6,7 +6,6 @@ package ipnlocal import ( - "bytes" "cmp" "context" "crypto/sha256" @@ -25,7 +24,6 @@ import ( "net/netip" "net/url" "os" - "os/exec" "reflect" "runtime" "slices" @@ -40,7 +38,6 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/control/controlknobs" "tailscale.com/drive" @@ -302,22 +299,11 @@ type LocalBackend struct { notifyWatchers map[string]*watchSession // by session ID lastStatusTime time.Time // status.AsOf value of the last processed status update componentLogUntil map[string]componentLogState - // c2nUpdateStatus is the status of c2n-triggered client update. - c2nUpdateStatus updateStatus - currentUser ipnauth.Actor + currentUser ipnauth.Actor - selfUpdateProgress []ipnstate.UpdateProgress - lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. capForcedNetfilter string // TODO(nickkhyl): move to nodeBackend - // offlineAutoUpdateCancel stops offline auto-updates when called. It - // should be used via stopOfflineAutoUpdate and - // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are - // note running. - // - //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go - offlineAutoUpdateCancel func() // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig @@ -433,10 +419,6 @@ func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() } -type updateStatus struct { - started bool -} - type metrics struct { // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. // This informs the user of how many routes are being advertised by the local node, excluding exit routes. @@ -517,8 +499,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, - selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), - lastSelfUpdateState: ipnstate.UpdateFinished, captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), @@ -1127,7 +1107,6 @@ func (b *LocalBackend) Shutdown() { defer cancel() b.sockstatLogger.Shutdown(ctx) } - b.stopOfflineAutoUpdate() b.unregisterSysPolicyWatch() if cc != nil { @@ -3412,7 +3391,7 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { // can still manually enable auto-updates on this node. return } - if clientupdate.CanAutoUpdate() { + if buildfeatures.HasClientUpdate && feature.CanAutoUpdate() { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) @@ -4100,7 +4079,12 @@ func (b *LocalBackend) checkFunnelEnabledLocked(p *ipn.Prefs) error { } func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { - if p.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if !buildfeatures.HasClientUpdate { + if p.AutoUpdate.Apply.EqualBool(true) { + return errors.New("Auto-update support is disabled in this build") + } + } + if p.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { return errors.New("Auto-updates are not supported on this platform.") } return nil @@ -4552,14 +4536,6 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.resetAlwaysOnOverrideLocked() } - if newp.AutoUpdate.Apply.EqualBool(true) { - if b.state != ipn.Running { - b.maybeStartOfflineAutoUpdate(newp.View()) - } - } else { - b.stopOfflineAutoUpdate() - } - unlock.UnlockEarly() if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { @@ -5467,12 +5443,6 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.pauseOrResumeControlClientLocked() - if newState == ipn.Running { - b.stopOfflineAutoUpdate() - } else { - b.maybeStartOfflineAutoUpdate(prefs) - } - unlock.UnlockEarly() // prefs may change irrespective of state; WantRunning should be explicitly @@ -6611,6 +6581,15 @@ func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) return cc.DoNoiseRequest(req) } +// ActiveSSHConns returns the number of active SSH connections, +// or 0 if SSH is not linked into the binary or available on the platform. +func (b *LocalBackend) ActiveSSHConns() int { + if b.sshServer == nil { + return 0 + } + return b.sshServer.NumActiveConns() +} + func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { b.mu.Lock() defer b.mu.Unlock() @@ -6941,54 +6920,6 @@ func (b *LocalBackend) DebugBreakDERPConns() error { return b.MagicConn().DebugBreakDERPConns() } -func (b *LocalBackend) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = append(b.selfUpdateProgress, up) - b.lastSelfUpdateState = up.Status -} - -func (b *LocalBackend) clearSelfUpdateProgress() { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) - b.lastSelfUpdateState = ipnstate.UpdateFinished -} - -func (b *LocalBackend) GetSelfUpdateProgress() []ipnstate.UpdateProgress { - b.mu.Lock() - defer b.mu.Unlock() - res := make([]ipnstate.UpdateProgress, len(b.selfUpdateProgress)) - copy(res, b.selfUpdateProgress) - return res -} - -func (b *LocalBackend) DoSelfUpdate() { - b.mu.Lock() - updateState := b.lastSelfUpdateState - b.mu.Unlock() - // don't start an update if one is already in progress - if updateState == ipnstate.UpdateInProgress { - return - } - b.clearSelfUpdateProgress() - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) - up, err := clientupdate.NewUpdater(clientupdate.Arguments{ - Logf: func(format string, args ...any) { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) - }, - }) - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } - err = up.Update() - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } else { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) - } -} - // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. func (b *LocalBackend) ObserveDNSResponse(res []byte) error { @@ -7603,58 +7534,6 @@ func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.Stable return true // no policy configured; allow all exit nodes } -// startAutoUpdate triggers an auto-update attempt. The actual update happens -// asynchronously. If another update is in progress, an error is returned. -func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { - // Check if update was already started, and mark as started. - if !b.trySetC2NUpdateStarted() { - return errors.New("update already started") - } - defer func() { - // Clear the started flag if something failed. - if retErr != nil { - b.setC2NUpdateStarted(false) - } - }() - - cmdTS, err := findCmdTailscale() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - var ver struct { - Long string `json:"long"` - } - out, err := exec.Command(cmdTS, "version", "--json").Output() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - if err := json.Unmarshal(out, &ver); err != nil { - return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) - } - if ver.Long != version.Long() { - return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) - } - - cmd := tailscaleUpdateCmd(cmdTS) - buf := new(bytes.Buffer) - cmd.Stdout = buf - cmd.Stderr = buf - b.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) - if err := cmd.Start(); err != nil { - return fmt.Errorf("failed to start cmd/tailscale update: %w", err) - } - - go func() { - if err := cmd.Wait(); err != nil { - b.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) - } else { - b.logf("%s: update attempt complete", logPrefix) - } - b.setC2NUpdateStarted(false) - }() - return nil -} - // srcIPHasCapForFilter is called by the packet filter when evaluating firewall // rules that require a source IP to have a certain node capability. // diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index fd78c341877c3..70923efde13ee 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -30,10 +30,10 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/appc/appctest" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + "tailscale.com/feature" _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" @@ -3710,7 +3710,7 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { // On platforms that don't support auto-update we can never // transition to auto-updates being enabled. The value should // remain unchanged after onTailnetDefaultAutoUpdate. - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { want = tt.before } if got := b.pm.CurrentPrefs().AutoUpdate().Apply; got != want { @@ -5455,7 +5455,7 @@ func TestEnableAutoUpdates(t *testing.T) { }) // Enabling may fail, depending on which environment we are running this // test in. - wantErr := !clientupdate.CanAutoUpdate() + wantErr := !feature.CanAutoUpdate() gotErr := err != nil if gotErr != wantErr { t.Fatalf("enabling auto-updates: got error: %v (%v); want error: %v", gotErr, err, wantErr) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 6e1db4ff25bbd..67e71aa70a098 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -13,8 +13,8 @@ import ( "slices" "strings" - "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -674,7 +674,7 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error // cause any EditPrefs calls to fail (other than disabling auto-updates). // // Reset AutoUpdate.Apply if we detect such invalid prefs. - if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { savedPrefs.AutoUpdate.Apply.Clear() } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 8dce388bcd7aa..60c92ff8d3493 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -12,7 +12,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "tailscale.com/clientupdate" + _ "tailscale.com/clientupdate" // for feature registration side effects + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -464,7 +465,7 @@ func TestProfileManagement(t *testing.T) { wantCurProfile = "user@2.example.com" checkProfiles(t) - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Logf("Save an invalid AutoUpdate pref value") prefs := pm.CurrentPrefs().AsStruct() prefs.AutoUpdate.Apply.Set(true) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index caebbe0cc6730..ab556702d72d3 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -27,8 +27,8 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -120,8 +120,6 @@ var handler = map[string]LocalAPIHandler{ "status": (*Handler).serveStatus, "suggest-exit-node": (*Handler).serveSuggestExitNode, "update/check": (*Handler).serveUpdateCheck, - "update/install": (*Handler).serveUpdateInstall, - "update/progress": (*Handler).serveUpdateProgress, "upload-client-metrics": (*Handler).serveUploadClientMetrics, "usermetrics": (*Handler).serveUserMetrics, "watch-ipn-bus": (*Handler).serveWatchIPNBus, @@ -1897,7 +1895,7 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { return } - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { // if we don't support auto-update, just say that we're up to date json.NewEncoder(w).Encode(tailcfg.ClientVersion{RunningLatest: true}) return @@ -1915,37 +1913,6 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(cv) } -// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale -// self-update. A successful response does not indicate whether the update -// succeeded, only that the request was accepted. Clients should use -// serveUpdateProgress after pinging this endpoint to check how the update is -// going. -func (h *Handler) serveUpdateInstall(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - w.WriteHeader(http.StatusAccepted) - - go h.b.DoSelfUpdate() -} - -// serveUpdateProgress returns the status of an in-progress Tailscale self-update. -// This is provided as a slice of ipnstate.UpdateProgress structs with various -// log messages in order from oldest to newest. If an update is not in progress, -// the returned slice will be empty. -func (h *Handler) serveUpdateProgress(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) - return - } - - ups := h.b.GetSelfUpdateProgress() - - json.NewEncoder(w).Encode(ups) -} - // serveDNSOSConfig serves the current system DNS configuration as a JSON object, if // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 2e8ca0f0ae46b..ba509e268a593 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -9,7 +9,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com @@ -28,8 +28,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + DI github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -124,8 +124,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -204,7 +202,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -232,7 +230,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext @@ -251,12 +249,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ @@ -265,7 +263,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -291,8 +289,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -329,7 +327,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ @@ -364,9 +362,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ compress/gzip from golang.org/x/net/http2+ @@ -439,7 +436,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/x509 from crypto/tls+ DI crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DWI database/sql/driver from github.com/google/uuid + DI database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -452,11 +449,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/json from expvar+ encoding/pem from crypto/tls+ encoding/xml from github.com/tailscale/goupnp+ - errors from archive/tar+ + errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/util/testenv - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -473,7 +470,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -509,14 +506,14 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ + io from bufio+ + io/fs from crypto/x509+ io/ioutil from github.com/digitalocean/go-smbios/smbios+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -537,28 +534,28 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from github.com/godbus/dbus/v5+ - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ + runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof LDW text/template from html/template LDW text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index fa148abbec8a7..c274c31a9060b 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -33,8 +33,9 @@ import ( "go4.org/mem" "tailscale.com/client/local" "tailscale.com/client/tailscale" - "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/feature" + _ "tailscale.com/feature/clientupdate" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" @@ -1125,7 +1126,7 @@ func TestLogoutRemovesAllPeers(t *testing.T) { } func TestAutoUpdateDefaults(t *testing.T) { - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Skip("auto-updates not supported on this platform") } tstest.Shard(t) From ba76578447a033f0b8033a90405cf9e0643ff12a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 15:50:17 -0700 Subject: [PATCH 0459/1093] ipn/ipnlocal, feature/posture: pull posture out into a modular feature Updates #12614 Change-Id: I9d08a1330b9c55e1a23e7979a707e11d8e090d79 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 3 - cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 4 +- .../buildfeatures/feature_posture_disabled.go | 13 ++ .../buildfeatures/feature_posture_enabled.go | 13 ++ feature/condregister/maybe_posture.go | 8 ++ feature/featuretags/featuretags.go | 1 + feature/posture/posture.go | 114 ++++++++++++++++++ ipn/ipnlocal/c2n.go | 49 -------- ipn/ipnlocal/local.go | 29 +---- tsnet/depaware.txt | 4 +- 12 files changed, 157 insertions(+), 88 deletions(-) create mode 100644 feature/buildfeatures/feature_posture_disabled.go create mode 100644 feature/buildfeatures/feature_posture_enabled.go create mode 100644 feature/condregister/maybe_posture.go create mode 100644 feature/posture/posture.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index a85f5731b22c3..1fd3c76304e09 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -17,7 +17,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/distribution/reference from tailscale.com/cmd/k8s-operator github.com/emicklei/go-restful/v3 from k8s.io/kube-openapi/pkg/common github.com/emicklei/go-restful/v3/log from github.com/emicklei/go-restful/v3 @@ -784,7 +783,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ @@ -1099,7 +1097,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/godbus/dbus/v5+ iter from go/ast+ log from expvar+ log/internal from log+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 42d8f9181936d..595296229b56a 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,6 +1,5 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) - github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart @@ -116,7 +115,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/cmd/tailscaled+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ tailscale.com/syncs from tailscale.com/cmd/tailscaled+ @@ -374,7 +372,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios iter from bytes+ log from expvar+ log/internal from log diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a3bac20aab30d..a5ae214a0f1fc 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -282,6 +282,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/posture from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ @@ -364,7 +365,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/portlist from tailscale.com/feature/portlist - tailscale.com/posture from tailscale.com/ipn/ipnlocal + tailscale.com/posture from tailscale.com/feature/posture tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 8a78af4932f2f..b6e794f8c2220 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -14,7 +14,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -212,7 +211,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ @@ -515,7 +513,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log diff --git a/feature/buildfeatures/feature_posture_disabled.go b/feature/buildfeatures/feature_posture_disabled.go new file mode 100644 index 0000000000000..a78b1a95720cf --- /dev/null +++ b/feature/buildfeatures/feature_posture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = false diff --git a/feature/buildfeatures/feature_posture_enabled.go b/feature/buildfeatures/feature_posture_enabled.go new file mode 100644 index 0000000000000..dcd9595f9ca96 --- /dev/null +++ b/feature/buildfeatures/feature_posture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = true diff --git a/feature/condregister/maybe_posture.go b/feature/condregister/maybe_posture.go new file mode 100644 index 0000000000000..6f14c27137127 --- /dev/null +++ b/feature/condregister/maybe_posture.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_posture + +package condregister + +import _ "tailscale.com/feature/posture" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 2895360994bc3..22b93e0a1a142 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -139,6 +139,7 @@ var Features = map[FeatureTag]FeatureMeta{ }, "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, + "posture": {"Posture", "Device posture checking support", nil}, "netlog": { Sym: "NetLog", Desc: "Network flow logging support", diff --git a/feature/posture/posture.go b/feature/posture/posture.go new file mode 100644 index 0000000000000..8e1945d7dbd0b --- /dev/null +++ b/feature/posture/posture.go @@ -0,0 +1,114 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package posture registers support for device posture checking, +// reporting machine-specific information to the control plane +// when enabled by the user and tailnet. +package posture + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/posture" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +func init() { + ipnext.RegisterExtension("posture", newExtension) + ipnlocal.RegisterC2N("GET /posture/identity", handleC2NPostureIdentityGet) +} + +func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { + e := &extension{ + logf: logger.WithPrefix(logf, "posture: "), + } + return e, nil +} + +type extension struct { + logf logger.Logf + + // lastKnownHardwareAddrs is a list of the previous known hardware addrs. + // Previously known hwaddrs are kept to work around an issue on Windows + // where all addresses might disappear. + // http://go/corp/25168 + lastKnownHardwareAddrs syncs.AtomicValue[[]string] +} + +func (e *extension) Name() string { return "posture" } +func (e *extension) Init(h ipnext.Host) error { return nil } +func (e *extension) Shutdown() error { return nil } + +func handleC2NPostureIdentityGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "posture extension not available", http.StatusInternalServerError) + return + } + e.logf("c2n: GET /posture/identity received") + + res := tailcfg.C2NPostureIdentityResponse{} + + // Only collect posture identity if enabled on the client, + // this will first check syspolicy, MDM settings like Registry + // on Windows or defaults on macOS. If they are not set, it falls + // back to the cli-flag, `--posture-checking`. + choice, err := b.PolicyClient().GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) + if err != nil { + e.logf( + "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", + b.Prefs().PostureChecking(), + err, + ) + } + + if choice.ShouldEnable(b.Prefs().PostureChecking()) { + res.SerialNumbers, err = posture.GetSerialNumbers(b.PolicyClient(), e.logf) + if err != nil { + e.logf("c2n: GetSerialNumbers returned error: %v", err) + } + + // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release + // and looks good in client metrics, remove this parameter and always report MAC + // addresses. + if r.FormValue("hwaddrs") == "true" { + res.IfaceHardwareAddrs, err = e.getHardwareAddrs() + if err != nil { + e.logf("c2n: GetHardwareAddrs returned error: %v", err) + } + } + } else { + res.PostureDisabled = true + } + + e.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +// getHardwareAddrs returns the hardware addresses for the machine. If the list +// of hardware addresses is empty, it will return the previously known hardware +// addresses. Both the current, and previously known hardware addresses might be +// empty. +func (e *extension) getHardwareAddrs() ([]string, error) { + addrs, err := posture.GetHardwareAddrs() + if err != nil { + return nil, err + } + + if len(addrs) == 0 { + e.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") + return e.lastKnownHardwareAddrs.Load(), nil + } + + e.lastKnownHardwareAddrs.Store(addrs) + return addrs, nil +} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index cbc4cae788bb4..38c65fee885dc 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -17,15 +17,12 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/ipn" "tailscale.com/net/sockstats" - "tailscale.com/posture" "tailscale.com/tailcfg" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" "tailscale.com/util/httpm" "tailscale.com/util/set" - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" ) @@ -52,9 +49,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // SSH req("/ssh/usernames"): handleC2NSSHUsernames, - // Device posture. - req("GET /posture/identity"): handleC2NPostureIdentityGet, - // App Connectors. req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet, @@ -324,46 +318,3 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } - -func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /posture/identity received") - - res := tailcfg.C2NPostureIdentityResponse{} - - // Only collect posture identity if enabled on the client, - // this will first check syspolicy, MDM settings like Registry - // on Windows or defaults on macOS. If they are not set, it falls - // back to the cli-flag, `--posture-checking`. - choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) - if err != nil { - b.logf( - "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", - b.Prefs().PostureChecking(), - err, - ) - } - - if choice.ShouldEnable(b.Prefs().PostureChecking()) { - res.SerialNumbers, err = posture.GetSerialNumbers(b.polc, b.logf) - if err != nil { - b.logf("c2n: GetSerialNumbers returned error: %v", err) - } - - // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release - // and looks good in client metrics, remove this parameter and always report MAC - // addresses. - if r.FormValue("hwaddrs") == "true" { - res.IfaceHardwareAddrs, err = b.getHardwareAddrs() - if err != nil { - b.logf("c2n: GetHardwareAddrs returned error: %v", err) - } - } - } else { - res.PostureDisabled = true - } - - b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 72fc8808ced73..c9fff50c3c971 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -68,7 +68,6 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" - "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tsd" @@ -344,12 +343,6 @@ type LocalBackend struct { // notified about. lastNotifiedDriveShares *views.SliceView[*drive.Share, drive.ShareView] - // lastKnownHardwareAddrs is a list of the previous known hardware addrs. - // Previously known hwaddrs are kept to work around an issue on Windows - // where all addresses might disappear. - // http://go/corp/25168 - lastKnownHardwareAddrs syncs.AtomicValue[[]string] - // lastSuggestedExitNode stores the last suggested exit node suggestion to // avoid unnecessary churn between multiple equally-good options. lastSuggestedExitNode tailcfg.StableNodeID @@ -419,6 +412,9 @@ func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() } +// PolicyClient returns the policy client for the backend. +func (b *LocalBackend) PolicyClient() policyclient.Client { return b.polc } + type metrics struct { // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. // This informs the user of how many routes are being advertised by the local node, excluding exit routes. @@ -6757,25 +6753,6 @@ func (b *LocalBackend) resetDialPlan() { } } -// getHardwareAddrs returns the hardware addresses for the machine. If the list -// of hardware addresses is empty, it will return the previously known hardware -// addresses. Both the current, and previously known hardware addresses might be -// empty. -func (b *LocalBackend) getHardwareAddrs() ([]string, error) { - addrs, err := posture.GetHardwareAddrs() - if err != nil { - return nil, err - } - - if len(addrs) == 0 { - b.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") - return b.lastKnownHardwareAddrs.Load(), nil - } - - b.lastKnownHardwareAddrs.Store(addrs) - return addrs, nil -} - // resetForProfileChangeLockedOnEntry resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ba509e268a593..0644a0692c7f4 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -14,7 +14,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -208,7 +207,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ @@ -508,7 +506,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/digitalocean/go-smbios/smbios+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log From 9aa16bf97b977e10b83900473bfd2dd8c3f043e8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 16:28:06 -0700 Subject: [PATCH 0460/1093] feature/featuretags, Makefile: fix bug with CLI build tag and depaware, add variant When I added dependency support to featuretag, I broke the handling of the non-omit build tags (as used by the "box" support for bundling the CLI into tailscaled). That then affected depaware. The depaware-minbox.txt this whole time recently has not included the CLI. So fix that, and also add a new depaware variant that's only the daemon, without the CLI. Updates #12614 Updates #17139 Change-Id: I4a4591942aa8c66ad8e3242052e3d9baa42902ca Signed-off-by: Brad Fitzpatrick --- Makefile | 4 + cmd/tailscaled/depaware-min.txt | 424 ++++++++++++++++++++++++ cmd/tailscaled/depaware-minbox.txt | 55 ++- feature/featuretags/featuretags.go | 3 - feature/featuretags/featuretags_test.go | 4 + 5 files changed, 477 insertions(+), 13 deletions(-) create mode 100644 cmd/tailscaled/depaware-min.txt diff --git a/Makefile b/Makefile index 95959fcf0ba42..05b984348d81c 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,8 @@ updatedeps: ## Update depaware deps tailscale.com/tsnet PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" @@ -44,6 +46,8 @@ depaware: ## Run depaware checks tailscale.com/tsnet PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt new file mode 100644 index 0000000000000..ee66d77006f92 --- /dev/null +++ b/cmd/tailscaled/depaware-min.txt @@ -0,0 +1,424 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mdlayher/genetlink from tailscale.com/net/tstun + 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/tailscale/hujson from tailscale.com/ipn/conffile + github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + 💣 go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/metrics from tailscale.com/health+ + tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/proxymap from tailscale.com/tsd + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogtype from tailscale.com/net/connstats + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/dirwalk from tailscale.com/metrics + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + 💣 tailscale.com/util/hashx from tailscale.com/util/deephash + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/must from tailscale.com/logpolicy+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ + golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/internal/socks from golang.org/x/net/proxy + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/proxy from tailscale.com/net/netns + golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from tailscale.com/derp + bufio from compress/flate+ + bytes from bufio+ + cmp from encoding/json+ + compress/flate from compress/gzip + compress/gzip from golang.org/x/net/http2+ + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from golang.org/x/net/http2+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from expvar+ + encoding/pem from crypto/tls+ + errors from bufio+ + expvar from tailscale.com/health+ + flag from tailscale.com/cmd/tailscaled+ + fmt from compress/flate+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from net/http/pprof+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from crypto/internal/fips140deps/godebug+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profile from net/http/pprof + internal/profilerecord from runtime+ + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall from internal/runtime/cgroup+ + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime+ + internal/unsafeheader from internal/reflectlite+ + io from bufio+ + io/fs from crypto/x509+ + iter from bytes+ + log from expvar+ + log/internal from log + maps from crypto/x509+ + math from compress/flate+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from expvar+ + net/http/httptrace from golang.org/x/net/http2+ + net/http/internal from net/http + net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http + net/http/pprof from tailscale.com/cmd/tailscaled+ + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from tailscale.com/hostinfo+ + os/signal from tailscale.com/cmd/tailscaled + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + regexp from internal/profile+ + regexp/syntax from regexp + runtime from crypto/internal/fips140+ + runtime/debug from github.com/klauspost/compress/zstd+ + runtime/pprof from net/http/pprof+ + runtime/trace from net/http/pprof + slices from crypto/tls+ + sort from compress/flate+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ + sync/atomic from context+ + syscall from crypto/internal/sysrand+ + text/tabwriter from runtime/pprof + time from compress/gzip+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 595296229b56a..86e75660ad8c7 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,5 +1,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart @@ -10,8 +12,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd @@ -19,11 +23,19 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli + github.com/mattn/go-isatty from github.com/mattn/go-colorable+ github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ + github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ + github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli + github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ + github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -34,16 +46,24 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli + tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscaled + tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli + tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/controlclient+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ @@ -56,11 +76,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature from tailscale.com/cmd/tailscaled+ tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled - tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli + tailscale.com/internal/noiseconn from tailscale.com/control/controlclient+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ @@ -72,13 +94,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/store from tailscale.com/cmd/tailscaled tailscale.com/ipn/store/mem from tailscale.com/ipn/store tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/licenses from tailscale.com/cmd/tailscale/cli tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/health+ - tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/ace from tailscale.com/control/controlhttp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ @@ -113,6 +136,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/cmd/tailscaled+ tailscale.com/proxymap from tailscale.com/tsd @@ -121,8 +145,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/control/controlclient+ - tailscale.com/tsconst from tailscale.com/net/netns + tailscale.com/tsconst from tailscale.com/net/netns+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ @@ -154,6 +179,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -172,6 +198,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/osdiag from tailscale.com/ipn/localapi tailscale.com/util/osshare from tailscale.com/cmd/tailscaled tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ @@ -241,11 +268,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp + archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ - compress/flate from compress/gzip + compress/flate from compress/gzip+ compress/gzip from golang.org/x/net/http2+ + compress/zlib from image/png container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -325,9 +354,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ + hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from net/http/pprof+ + image from github.com/skip2/go-qrcode+ + image/color from github.com/skip2/go-qrcode+ + image/png from github.com/skip2/go-qrcode internal/abi from hash/maphash+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -372,6 +405,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ + io/ioutil from github.com/skip2/go-qrcode iter from bytes+ log from expvar+ log/internal from log @@ -387,8 +421,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net from crypto/tls+ net/http from expvar+ net/http/httptrace from golang.org/x/net/http2+ - net/http/internal from net/http - net/http/internal/ascii from net/http + net/http/httputil from tailscale.com/cmd/tailscale/cli + net/http/internal from net/http+ + net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from crypto/x509+ @@ -396,7 +431,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from tailscale.com/hostinfo+ - os/signal from tailscale.com/cmd/tailscaled + os/signal from tailscale.com/cmd/tailscaled+ os/user from tailscale.com/ipn/ipnauth+ path from io/fs+ path/filepath from crypto/x509+ @@ -414,7 +449,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ - text/tabwriter from runtime/pprof + text/tabwriter from runtime/pprof+ time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 22b93e0a1a142..709d96eddf2d8 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -40,9 +40,6 @@ func Requires(ft FeatureTag) set.Set[FeatureTag] { s := set.Set[FeatureTag]{} var add func(FeatureTag) add = func(ft FeatureTag) { - if !ft.IsOmittable() { - return - } s.Add(ft) for _, dep := range Features[ft].Deps { add(dep) diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go index b1524ce4f20f7..893ab0e6a1c71 100644 --- a/feature/featuretags/featuretags_test.go +++ b/feature/featuretags/featuretags_test.go @@ -36,6 +36,10 @@ func TestRequires(t *testing.T) { in: "drive", want: setOf("drive"), }, + { + in: "cli", + want: setOf("cli"), + }, { in: "serve", want: setOf("serve", "netstack"), From a45473c4c58832073761c4619d1c912e2a49c7fa Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Tue, 30 Sep 2025 05:33:50 -0400 Subject: [PATCH 0461/1093] cmd/k8s-operator: add DNS policy and config support to ProxyClass (#16887) DNS configuration support to ProxyClass, allowing users to customize DNS resolution for Tailscale proxy pods. Fixes #16886 Signed-off-by: Raj Singh --- .../crds/tailscale.com_proxyclasses.yaml | 56 +++++++++++++++++++ .../deploy/manifests/operator.yaml | 56 +++++++++++++++++++ cmd/k8s-operator/sts.go | 6 ++ cmd/k8s-operator/sts_test.go | 13 +++++ k8s-operator/api.md | 2 + .../apis/v1alpha1/types_proxyclass.go | 11 ++++ .../apis/v1alpha1/zz_generated.deepcopy.go | 10 ++++ 7 files changed, 154 insertions(+) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index cb9e0b991a4eb..516e75f489129 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1046,6 +1046,62 @@ spec: type: object additionalProperties: type: string + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + type: object + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + type: array + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + type: object + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + type: string + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None imagePullSecrets: description: |- Proxy Pod's image pull Secrets. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 9c19554aa351d..520d17eae3d2f 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1574,6 +1574,62 @@ spec: Annotations must be valid Kubernetes annotations. https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set type: object + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None + type: string imagePullSecrets: description: |- Proxy Pod's image pull Secrets. diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 80c9ca806db10..6300341b7e75e 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -906,6 +906,12 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations ss.Spec.Template.Spec.PriorityClassName = wantsPod.PriorityClassName ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints + if wantsPod.DNSPolicy != nil { + ss.Spec.Template.Spec.DNSPolicy = *wantsPod.DNSPolicy + } + if wantsPod.DNSConfig != nil { + ss.Spec.Template.Spec.DNSConfig = wantsPod.DNSConfig + } // Update containers. updateContainer := func(overlay *tsapi.Container, base corev1.Container) corev1.Container { diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index e2cb2962fde48..ea28e77a14c36 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -87,6 +87,15 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, }, }, + DNSPolicy: ptr.To(corev1.DNSClusterFirstWithHostNet), + DNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"1.1.1.1", "8.8.8.8"}, + Searches: []string{"example.com", "test.local"}, + Options: []corev1.PodDNSConfigOption{ + {Name: "ndots", Value: ptr.To("2")}, + {Name: "edns0"}, + }, + }, TailscaleContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -200,6 +209,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.InitContainers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -239,6 +250,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index b1c56c0687044..d75a21e37337a 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -537,6 +537,8 @@ _Appears in:_ | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
      By default Tailscale Kubernetes operator does not apply any
      tolerations.
      https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
      By default Tailscale Kubernetes operator does not apply any topology spread constraints.
      https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | | `priorityClassName` _string_ | PriorityClassName for the proxy Pod.
      By default Tailscale Kubernetes operator does not apply any priority class.
      https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#dnspolicy-v1-core)_ | DNSPolicy defines how DNS will be configured for the proxy Pod.
      By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default).
      https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | Enum: [ClusterFirstWithHostNet ClusterFirst Default None]
      | +| `dnsConfig` _[PodDNSConfig](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#poddnsconfig-v1-core)_ | DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy.
      When DNSPolicy is set to "None", DNSConfig must be specified.
      https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | | | #### PortRange diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index ea4e6a27c49de..4026f90848ef1 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -303,6 +303,17 @@ type Pod struct { // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling // +optional PriorityClassName string `json:"priorityClassName,omitempty"` + // DNSPolicy defines how DNS will be configured for the proxy Pod. + // By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + // +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None + // +optional + DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + // DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + // When DNSPolicy is set to "None", DNSConfig must be specified. + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + // +optional + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` } // +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 3fd64c28e7a12..5684fd5f82b4e 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -574,6 +574,16 @@ func (in *Pod) DeepCopyInto(out *Pod) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(corev1.DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(corev1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. From bcd79b161acbf90dfcfe71cbde847a320a41b7fe Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 22:10:28 -0700 Subject: [PATCH 0462/1093] feature/featuretags: add option to turn off DNS Saves 328 KB (2.5%) off the minimal binary. For IoT devices that don't need MagicDNS (e.g. they don't make outbound connections), this provides a knob to disable all the DNS functionality. Rather than a massive refactor today, this uses constant false values as a deadcode sledgehammer, guided by shotizam to find the largest DNS functions which survived deadcode. A future refactor could make it so that the net/dns/resolver and publicdns packages don't even show up in the import graph (along with their imports) but really it's already pretty good looking with just these consts, so it's not at the top of my list to refactor it more soon. Also do the same in a few places with the ACME (cert) functionality, as I saw those while searching for DNS stuff. Updates #12614 Change-Id: I8e459f595c2fde68ca16503ff61c8ab339871f97 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 4 +++ feature/buildfeatures/feature_dns_disabled.go | 13 +++++++++ feature/buildfeatures/feature_dns_enabled.go | 13 +++++++++ feature/feature.go | 7 ++++- feature/featuretags/featuretags.go | 4 +++ ipn/ipnlocal/local.go | 9 ++++++ ipn/ipnlocal/node_backend.go | 4 +++ ipn/ipnlocal/peerapi.go | 10 ++++++- ipn/localapi/localapi.go | 8 +++++ net/dns/manager.go | 29 ++++++++++++++++++- net/dns/manager_linux.go | 3 +- net/dns/osconfig.go | 5 ++++ net/dns/publicdns/publicdns.go | 5 ++++ net/dns/resolver/debug.go | 4 +++ net/dns/resolver/forwarder.go | 7 +++++ net/dns/resolver/tsdns.go | 23 +++++++++++++++ 16 files changed, 144 insertions(+), 4 deletions(-) create mode 100644 feature/buildfeatures/feature_dns_disabled.go create mode 100644 feature/buildfeatures/feature_dns_enabled.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 6d18e306f5dd4..31f41eac83d0c 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -30,6 +30,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -1580,6 +1581,9 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er // SetDNS sends the SetDNSRequest request to the control plane server, // requesting a DNS record be created or updated. func (c *Direct) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) (err error) { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } metricSetDNS.Add(1) defer func() { if err != nil { diff --git a/feature/buildfeatures/feature_dns_disabled.go b/feature/buildfeatures/feature_dns_disabled.go new file mode 100644 index 0000000000000..30d7379cb9092 --- /dev/null +++ b/feature/buildfeatures/feature_dns_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = false diff --git a/feature/buildfeatures/feature_dns_enabled.go b/feature/buildfeatures/feature_dns_enabled.go new file mode 100644 index 0000000000000..962f2596bf5c9 --- /dev/null +++ b/feature/buildfeatures/feature_dns_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = true diff --git a/feature/feature.go b/feature/feature.go index 5976d7f5a5d0d..70f05d192ad3a 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -4,7 +4,12 @@ // Package feature tracks which features are linked into the binary. package feature -import "reflect" +import ( + "errors" + "reflect" +) + +var ErrUnavailable = errors.New("feature not included in this build") var in = map[string]bool{} diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 709d96eddf2d8..5c5352657b910 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -137,6 +137,10 @@ var Features = map[FeatureTag]FeatureMeta{ "portlist": {"PortList", "Optionally advertise listening service ports", nil}, "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, "posture": {"Posture", "Device posture checking support", nil}, + "dns": { + Sym: "DNS", + Desc: "MagicDNS and system DNS configuration support", + }, "netlog": { Sym: "NetLog", Desc: "Network flow logging support", diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c9fff50c3c971..3b55fd324b923 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -729,6 +729,9 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim // GetDNSOSConfig returns the base OS DNS configuration, as seen by the DNS manager. func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return dns.OSConfig{}, errors.New("DNS manager not available") @@ -740,6 +743,9 @@ func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { // the raw DNS response and the resolvers that are were able to handle the query (the internal forwarder // may race multiple resolvers). func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return nil, nil, errors.New("DNS manager not available") @@ -6189,6 +6195,9 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK // This is the low-level interface. Other layers will provide more // friendly options to get HTTPS certs. func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } req := &tailcfg.SetDNSRequest{ Version: 1, // TODO(bradfitz,maisem): use tailcfg.CurrentCapabilityVersion when using the Noise transport Type: "TXT", diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index a6e4b51f1bad5..b1ce9e07c404e 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -12,6 +12,7 @@ import ( "sync/atomic" "go4.org/netipx" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/net/tsaddr" @@ -630,6 +631,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. if nm == nil { return nil } + if !buildfeatures.HasDNS { + return &dns.Config{} + } // If the current node's key is expired, then we don't program any DNS // configuration into the operating system. This ensures that if the diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 9d2b49a383810..fb0d80d188df1 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -26,6 +26,7 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/net/netaddr" @@ -636,6 +637,10 @@ func (h *peerAPIHandler) handleServeMetrics(w http.ResponseWriter, r *http.Reque } func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) return @@ -649,6 +654,9 @@ func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Reques } func (h *peerAPIHandler) replyToDNSQueries() bool { + if !buildfeatures.HasDNS { + return false + } if h.isSelf { // If the peer is owned by the same user, just allow it // without further checks. @@ -700,7 +708,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool { // handleDNSQuery implements a DoH server (RFC 8484) over the peerapi. // It's not over HTTPS as the spec dictates, but rather HTTP-over-WireGuard. func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) { - if h.ps.resolver == nil { + if !buildfeatures.HasDNS || h.ps.resolver == nil { http.Error(w, "DNS not wired up", http.StatusNotImplemented) return } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ab556702d72d3..4045169423ac5 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1916,6 +1916,10 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { // serveDNSOSConfig serves the current system DNS configuration as a JSON object, if // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return @@ -1959,6 +1963,10 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return diff --git a/net/dns/manager.go b/net/dns/manager.go index 4a5c4925cf092..edf156eceebda 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -20,6 +20,7 @@ import ( "time" "tailscale.com/control/controlknobs" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" @@ -71,6 +72,9 @@ type Manager struct { // // knobs may be nil. func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string) *Manager { + if !buildfeatures.HasDNS { + return nil + } if dialer == nil { panic("nil Dialer") } @@ -97,7 +101,12 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, } // Resolver returns the Manager's DNS Resolver. -func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } +func (m *Manager) Resolver() *resolver.Resolver { + if !buildfeatures.HasDNS { + return nil + } + return m.resolver +} // RecompileDNSConfig recompiles the last attempted DNS configuration, which has // the side effect of re-querying the OS's interface nameservers. This should be used @@ -111,6 +120,9 @@ func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } // // It returns [ErrNoDNSConfig] if [Manager.Set] has never been called. func (m *Manager) RecompileDNSConfig() error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() if m.config != nil { @@ -120,6 +132,9 @@ func (m *Manager) RecompileDNSConfig() error { } func (m *Manager) Set(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() return m.setLocked(cfg) @@ -127,6 +142,9 @@ func (m *Manager) Set(cfg Config) error { // GetBaseConfig returns the current base OS DNS configuration as provided by the OSConfigurator. func (m *Manager) GetBaseConfig() (OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } return m.os.GetBaseConfig() } @@ -559,6 +577,9 @@ func (m *Manager) HandleTCPConn(conn net.Conn, srcAddr netip.AddrPort) { } func (m *Manager) Down() error { + if !buildfeatures.HasDNS { + return nil + } m.ctxCancel() if err := m.os.Close(); err != nil { return err @@ -568,6 +589,9 @@ func (m *Manager) Down() error { } func (m *Manager) FlushCaches() error { + if !buildfeatures.HasDNS { + return nil + } return flushCaches() } @@ -577,6 +601,9 @@ func (m *Manager) FlushCaches() error { // // health must not be nil func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { + if !buildfeatures.HasDNS { + return + } oscfg, err := NewOSConfigurator(logf, health, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index b2f8197ae8ba9..4304df2616e98 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -16,6 +16,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/types/logger" @@ -63,7 +64,7 @@ var ( // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { - if distro.Get() == distro.JetKVM { + if !buildfeatures.HasDNS || distro.Get() == distro.JetKVM { return NewNoopManager() } diff --git a/net/dns/osconfig.go b/net/dns/osconfig.go index 842c5ac607853..af4c0f01fc75b 100644 --- a/net/dns/osconfig.go +++ b/net/dns/osconfig.go @@ -11,6 +11,7 @@ import ( "slices" "strings" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/dnsname" ) @@ -158,6 +159,10 @@ func (a OSConfig) Equal(b OSConfig) bool { // Fixes https://github.com/tailscale/tailscale/issues/5669 func (a OSConfig) Format(f fmt.State, verb rune) { logger.ArgWriter(func(w *bufio.Writer) { + if !buildfeatures.HasDNS { + w.WriteString(`{DNS-unlinked}`) + return + } w.WriteString(`{Nameservers:[`) for i, ns := range a.Nameservers { if i != 0 { diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index 0dbd3ab8200f1..b8a7f88091617 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -17,6 +17,8 @@ import ( "strconv" "strings" "sync" + + "tailscale.com/feature/buildfeatures" ) // dohOfIP maps from public DNS IPs to their DoH base URL. @@ -163,6 +165,9 @@ const ( // populate is called once to initialize the knownDoH and dohIPsOfBase maps. func populate() { + if !buildfeatures.HasDNS { + return + } // Cloudflare // https://developers.cloudflare.com/1.1.1.1/ip-addresses/ addDoH("1.1.1.1", "https://cloudflare-dns.com/dns-query") diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index da195d49d41e5..0f9b106bb2eb4 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -12,10 +12,14 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" ) func init() { + if !buildfeatures.HasDNS { + return + } health.RegisterDebugHandler("dnsfwd", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { n, _ := strconv.Atoi(r.FormValue("n")) if n <= 0 { diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 105229fb81880..a7a8932e812e9 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -27,6 +27,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/publicdns" "tailscale.com/net/dnscache" @@ -249,6 +250,9 @@ type forwarder struct { } func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { + if !buildfeatures.HasDNS { + return nil + } if netMon == nil { panic("nil netMon") } @@ -750,6 +754,9 @@ var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DEBUG_DNS_FORWARD_USE_R // // See tailscale/tailscale#12027. func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { + if !buildfeatures.HasDNS { + return false + } switch runtime.GOOS { case "android", "ios": // On mobile platforms with lower memory limits (e.g., 50MB on iOS), diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 33fa9c3c07d4c..93cbf3839c923 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -25,6 +25,8 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/netaddr" @@ -254,6 +256,9 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, h func (r *Resolver) TestOnlySetHook(hook func(Config)) { r.saveConfigForTests = hook } func (r *Resolver) SetConfig(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } if r.saveConfigForTests != nil { r.saveConfigForTests(cfg) } @@ -279,6 +284,9 @@ func (r *Resolver) SetConfig(cfg Config) error { // Close shuts down the resolver and ensures poll goroutines have exited. // The Resolver cannot be used again after Close is called. func (r *Resolver) Close() { + if !buildfeatures.HasDNS { + return + } select { case <-r.closed: return @@ -296,6 +304,9 @@ func (r *Resolver) Close() { const dnsQueryTimeout = 10 * time.Second func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from netip.AddrPort) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSQueryLocal.Add(1) select { case <-r.closed: @@ -323,6 +334,9 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net // GetUpstreamResolvers returns the resolvers that would be used to resolve // the given FQDN. func (r *Resolver) GetUpstreamResolvers(name dnsname.FQDN) []*dnstype.Resolver { + if !buildfeatures.HasDNS { + return nil + } return r.forwarder.GetUpstreamResolvers(name) } @@ -351,6 +365,9 @@ func parseExitNodeQuery(q []byte) *response { // and a nil error. // TODO: figure out if we even need an error result. func (r *Resolver) HandlePeerDNSQuery(ctx context.Context, q []byte, from netip.AddrPort, allowName func(name string) bool) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSExitProxyQuery.Add(1) ch := make(chan packet, 1) @@ -427,6 +444,9 @@ var debugExitNodeDNSNetPkg = envknob.RegisterBool("TS_DEBUG_EXIT_NODE_DNS_NET_PK // response contains the pre-serialized response, which notably // includes the original question and its header. func handleExitNodeDNSQueryWithNetPkg(ctx context.Context, logf logger.Logf, resolver *net.Resolver, resp *response) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } logf = logger.WithPrefix(logf, "exitNodeDNSQueryWithNetPkg: ") if resp.Question.Class != dns.ClassINET { return nil, errors.New("unsupported class") @@ -1247,6 +1267,9 @@ func (r *Resolver) respondReverse(query []byte, name dnsname.FQDN, resp *respons // respond returns a DNS response to query if it can be resolved locally. // Otherwise, it returns errNotOurName. func (r *Resolver) respond(query []byte) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } parser := dnsParserPool.Get().(*dnsParser) defer dnsParserPool.Put(parser) From 3f5c560fd4566481379766ccf2d950c0c965b854 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 17:42:38 -0700 Subject: [PATCH 0463/1093] ipn/ipnlocal: drop h2c package, use net/http's support In Dec 2021 in d3d503d9977ed I had grand plans to make exit node DNS cheaper by using HTTP/2 over PeerAPI, at least on some platforms. I only did server-side support though and never made it to the client. In the ~4 years since, some things have happened: * Go 1.24 got support for http.Protocols (https://pkg.go.dev/net/http#Protocols) and doing UnencryptedHTTP2 ("HTTP2 with prior knowledge") * The old h2c upgrade mechanism was deprecated; see https://github.com/golang/go/issues/63565 and https://github.com/golang/go/issues/67816 * Go plans to deprecate x/net/http2 and move everything to the standard library. So this drops our use of the x/net/http2/h2c package and instead enables h2c (on all platforms now) using the standard library. This does mean we lose the deprecated h2c Upgrade support, but that's fine. If/when we do the h2c client support for ExitDNS, we'll have to probe the peer to see whether it supports it. Or have it reply with a header saying that future requests can us h2c. (It's tempting to use capver, but maybe people will disable that support anyway, so we should discover it at runtime instead.) Also do the same in the sessionrecording package. Updates #17305 Change-Id: If323f5ef32486effb18ed836888aa05c0efb701e Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +-- cmd/tailscaled/depaware-min.txt | 3 +-- cmd/tailscaled/depaware-minbox.txt | 3 +-- cmd/tailscaled/depaware.txt | 3 +-- cmd/tsidp/depaware.txt | 3 +-- ipn/ipnlocal/peerapi.go | 12 ++++-------- ipn/ipnlocal/peerapi_h2c.go | 20 -------------------- sessionrecording/connect.go | 11 +++++------ tsnet/depaware.txt | 3 +-- 9 files changed, 15 insertions(+), 46 deletions(-) delete mode 100644 ipn/ipnlocal/peerapi_h2c.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 1fd3c76304e09..b2fe54d6a0ff6 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -910,8 +910,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from k8s.io/apimachinery/pkg/util/net+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index ee66d77006f92..83fb32b21ecb0 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -220,8 +220,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 86e75660ad8c7..f9429c8608796 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -247,8 +247,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index a5ae214a0f1fc..24c619a2c967c 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -500,8 +500,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b6e794f8c2220..d933f3249fc31 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -338,8 +338,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index fb0d80d188df1..bd542e0f08b31 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -42,10 +42,6 @@ import ( var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error -// addH2C is non-nil on platforms where we want to add H2C -// ("cleartext" HTTP/2) support to the peerAPI. -var addH2C func(*http.Server) - // peerDNSQueryHandler is implemented by tsdns.Resolver. type peerDNSQueryHandler interface { HandlePeerDNSQuery(context.Context, []byte, netip.AddrPort, func(name string) bool) (res []byte, err error) @@ -195,11 +191,11 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) { peerUser: peerUser, } httpServer := &http.Server{ - Handler: h, - } - if addH2C != nil { - addH2C(httpServer) + Handler: h, + Protocols: new(http.Protocols), } + httpServer.Protocols.SetHTTP1(true) + httpServer.Protocols.SetUnencryptedHTTP2(true) // over WireGuard; "unencrypted" means no TLS go httpServer.Serve(netutil.NewOneConnListener(c, nil)) } diff --git a/ipn/ipnlocal/peerapi_h2c.go b/ipn/ipnlocal/peerapi_h2c.go deleted file mode 100644 index fbfa8639808ae..0000000000000 --- a/ipn/ipnlocal/peerapi_h2c.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ios && !android && !js - -package ipnlocal - -import ( - "net/http" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" -) - -func init() { - addH2C = func(s *http.Server) { - h2s := &http2.Server{} - s.Handler = h2c.NewHandler(s.Handler, h2s) - } -} diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index dc697d071dad2..ccb7e5fd95e4d 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -7,7 +7,6 @@ package sessionrecording import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -19,7 +18,6 @@ import ( "sync/atomic" "time" - "golang.org/x/net/http2" "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" @@ -312,14 +310,15 @@ func clientHTTP1(dialCtx context.Context, dial netx.DialFunc) *http.Client { // requests (HTTP/2 over plaintext). Unfortunately the same client does not // work for HTTP/1 so we need to split these up. func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { + var p http.Protocols + p.SetUnencryptedHTTP2(true) return &http.Client{ - Transport: &http2.Transport{ - // Allow "http://" scheme in URLs. - AllowHTTP: true, + Transport: &http.Transport{ + Protocols: &p, // Pretend like we're using TLS, but actually use the provided // DialFunc underneath. This is necessary to convince the transport // to actually dial. - DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 0644a0692c7f4..6e627f6f7fcfd 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -331,8 +331,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - LDW golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ From 2c956e30bea76678e7c2ec1204f2be398a64e94d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 17:57:04 -0700 Subject: [PATCH 0464/1093] ipn/ipnlocal: proxy h2c grpc using net/http.Transport instead of x/net/http2 (Kinda related: #17351) Updates #17305 Change-Id: I47df2612732a5713577164e74652bc9fa3cd14b3 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/serve.go | 22 +++++----- ipn/ipnlocal/serve_test.go | 88 +++++++++++++++++++++++++++++++++++++- 2 files changed, 98 insertions(+), 12 deletions(-) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index dc41424042ee9..3c967fd1e6403 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -34,7 +34,6 @@ import ( "unicode/utf8" "go4.org/mem" - "golang.org/x/net/http2" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/syncs" @@ -761,8 +760,8 @@ type reverseProxy struct { insecure bool backend string lb *LocalBackend - httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends - h2cTransport lazy.SyncValue[*http2.Transport] // transport for h2c backends + httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends + h2cTransport lazy.SyncValue[*http.Transport] // transport for h2c backends // closed tracks whether proxy is closed/currently closing. closed atomic.Bool } @@ -770,9 +769,7 @@ type reverseProxy struct { // close ensures that any open backend connections get closed. func (rp *reverseProxy) close() { rp.closed.Store(true) - if h2cT := rp.h2cTransport.Get(func() *http2.Transport { - return nil - }); h2cT != nil { + if h2cT := rp.h2cTransport.Get(func() *http.Transport { return nil }); h2cT != nil { h2cT.CloseIdleConnections() } if httpTransport := rp.httpTransport.Get(func() *http.Transport { @@ -843,14 +840,17 @@ func (rp *reverseProxy) getTransport() *http.Transport { // getH2CTransport returns the Transport used for GRPC requests to the backend. // The Transport gets created lazily, at most once. -func (rp *reverseProxy) getH2CTransport() *http2.Transport { - return rp.h2cTransport.Get(func() *http2.Transport { - return &http2.Transport{ - AllowHTTP: true, - DialTLSContext: func(ctx context.Context, network string, addr string, _ *tls.Config) (net.Conn, error) { +func (rp *reverseProxy) getH2CTransport() http.RoundTripper { + return rp.h2cTransport.Get(func() *http.Transport { + var p http.Protocols + p.SetUnencryptedHTTP2(true) + tr := &http.Transport{ + Protocols: &p, + DialTLSContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host) }, } + return tr }) } diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index a081ed27bd3e4..b4461d12f2ad0 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -15,6 +15,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "net/http/httptest" "net/netip" @@ -881,7 +882,7 @@ func mustCreateURL(t *testing.T, u string) url.URL { func newTestBackend(t *testing.T, opts ...any) *LocalBackend { var logf logger.Logf = logger.Discard - const debug = true + const debug = false if debug { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } @@ -1085,3 +1086,88 @@ func TestEncTailscaleHeaderValue(t *testing.T) { } } } + +func TestServeGRPCProxy(t *testing.T) { + const msg = "some-response\n" + backend := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Path-Was", r.RequestURI) + w.Header().Set("Proto-Was", r.Proto) + io.WriteString(w, msg) + })) + backend.EnableHTTP2 = true + backend.Config.Protocols = new(http.Protocols) + backend.Config.Protocols.SetHTTP1(true) + backend.Config.Protocols.SetUnencryptedHTTP2(true) + backend.Start() + defer backend.Close() + + backendURL := must.Get(url.Parse(backend.URL)) + + lb := newTestBackend(t) + rp := &reverseProxy{ + logf: t.Logf, + url: backendURL, + backend: backend.URL, + lb: lb, + } + + req := func(method, urlStr string, opt ...any) *http.Request { + req := httptest.NewRequest(method, urlStr, nil) + for _, o := range opt { + switch v := o.(type) { + case int: + req.ProtoMajor = v + case string: + req.Header.Set("Content-Type", v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + return req + } + + tests := []struct { + name string + req *http.Request + wantPath string + wantProto string + wantBody string + }{ + { + name: "non-gRPC", + req: req("GET", "http://foo/bar"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC-but-not-http2", + req: req("GET", "http://foo/bar", "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC--http2", + req: req("GET", "http://foo/bar", 2, "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/2.0", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rec := httptest.NewRecorder() + rp.ServeHTTP(rec, tt.req) + + res := rec.Result() + got := must.Get(io.ReadAll(res.Body)) + if got, want := res.Header.Get("Path-Was"), tt.wantPath; want != got { + t.Errorf("Path-Was %q, want %q", got, want) + } + if got, want := res.Header.Get("Proto-Was"), tt.wantProto; want != got { + t.Errorf("Proto-Was %q, want %q", got, want) + } + if string(got) != msg { + t.Errorf("got body %q, want %q", got, msg) + } + }) + } +} From 1803226945e2503bdd446a5054fc920853328c27 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 08:30:13 -0700 Subject: [PATCH 0465/1093] net/tstun: fix typo in doc Updates #cleanup Change-Id: Icaca974237cf678f3e036b1dfdd2f2e5082483db Signed-off-by: Brad Fitzpatrick --- net/tstun/tun.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tstun/tun.go b/net/tstun/tun.go index bfdaddf58b283..2891e9af4abf8 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -21,7 +21,7 @@ import ( "tailscale.com/types/logger" ) -// CrateTAP is the hook set by feature/tap. +// CreateTAP is the hook set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] // modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". From 9b997c8f2f96454f5771c4ec4c835e2334f93bb6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 08:21:02 -0700 Subject: [PATCH 0466/1093] feature/tpm: don't log to stderr in tests Fixes #17336 Change-Id: I7d2be4e8acf59116c57ce26049a6a5baa8f32436 Signed-off-by: Brad Fitzpatrick --- feature/tpm/tpm.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index e4c2b29e95971..b700637e65a15 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -23,6 +23,7 @@ import ( "github.com/google/go-tpm/tpm2/transport" "golang.org/x/crypto/nacl/secretbox" "tailscale.com/atomicfile" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -31,6 +32,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/testenv" ) var infoOnce = sync.OnceValue(info) @@ -49,13 +51,20 @@ func init() { } } +var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") + func info() *tailcfg.TPMInfo { + logf := logger.Discard + if !testenv.InTest() || verboseTPM() { + logf = log.New(log.Default().Writer(), "TPM: ", 0).Printf + } + tpm, err := open() if err != nil { - log.Printf("TPM: error opening: %v", err) + logf("error opening: %v", err) return nil } - log.Printf("TPM: successfully opened") + logf("successfully opened") defer tpm.Close() info := new(tailcfg.TPMInfo) @@ -84,12 +93,12 @@ func info() *tailcfg.TPMInfo { PropertyCount: 1, }.Execute(tpm) if err != nil { - log.Printf("TPM: GetCapability %v: %v", cap.prop, err) + logf("GetCapability %v: %v", cap.prop, err) continue } props, err := resp.CapabilityData.Data.TPMProperties() if err != nil { - log.Printf("TPM: GetCapability %v: %v", cap.prop, err) + logf("GetCapability %v: %v", cap.prop, err) continue } if len(props.TPMProperty) == 0 { @@ -97,6 +106,7 @@ func info() *tailcfg.TPMInfo { } cap.apply(info, props.TPMProperty[0].Value) } + logf("successfully read all properties") return info } From 442a3a779d29f78ba03cbd61509824f21c90cc59 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 09:12:42 -0700 Subject: [PATCH 0467/1093] feature, net/tshttpproxy: pull out support for using proxies as a feature Saves 139 KB. Also Synology support, which I saw had its own large-ish proxy parsing support on Linux, but support for proxies without Synology proxy support is reasonable, so I pulled that out as its own thing. Updates #12614 Change-Id: I22de285a3def7be77fdcf23e2bec7c83c9655593 Signed-off-by: Brad Fitzpatrick --- client/web/web.go | 5 ++++ clientupdate/distsign/distsign.go | 4 +-- cmd/derper/depaware.txt | 11 +++----- cmd/k8s-operator/depaware.txt | 4 ++- cmd/tailscale/cli/configure-synology-cert.go | 2 +- cmd/tailscale/cli/debug.go | 14 ++++------- cmd/tailscale/depaware.txt | 4 ++- cmd/tailscaled/debug.go | 19 ++++++++++---- cmd/tailscaled/depaware-min.txt | 4 +-- cmd/tailscaled/depaware-minbox.txt | 4 +-- cmd/tailscaled/depaware.txt | 4 ++- cmd/tailscaled/deps_test.go | 13 ++++++++++ cmd/tailscaled/proxy.go | 6 +++-- cmd/tsidp/depaware.txt | 4 ++- control/controlclient/direct.go | 9 ++++--- control/controlhttp/client.go | 13 +++++++--- derp/derphttp/derphttp_client.go | 23 +++++++++++------ .../feature_outboundproxy_disabled.go | 2 +- .../feature_outboundproxy_enabled.go | 2 +- .../feature_synology_disabled.go | 13 ++++++++++ .../buildfeatures/feature_synology_enabled.go | 13 ++++++++++ .../feature_useproxy_disabled.go | 13 ++++++++++ .../buildfeatures/feature_useproxy_enabled.go | 13 ++++++++++ feature/condregister/condregister.go | 10 +++++--- feature/condregister/useproxy/doc.go | 6 +++++ feature/condregister/useproxy/useproxy.go | 8 ++++++ feature/feature.go | 8 +++++- feature/featuretags/featuretags.go | 12 +++++++-- feature/hooks.go | 25 +++++++++++++++++++ feature/useproxy/useproxy.go | 18 +++++++++++++ ipn/ipnlocal/cert.go | 3 ++- logpolicy/logpolicy.go | 10 +++++--- net/dnsfallback/dnsfallback.go | 4 +-- net/netmon/interfaces_windows.go | 8 +++++- net/netmon/state.go | 11 +++++--- net/tshttpproxy/tshttpproxy_linux.go | 3 ++- tsnet/depaware.txt | 4 ++- tsnet/tsnet.go | 1 + .../tailscaled_deps_test_darwin.go | 1 - .../tailscaled_deps_test_freebsd.go | 1 - .../integration/tailscaled_deps_test_linux.go | 1 - .../tailscaled_deps_test_openbsd.go | 1 - .../tailscaled_deps_test_windows.go | 1 - wgengine/netstack/netstack_userping.go | 5 ++-- wgengine/userspace.go | 6 +++-- 45 files changed, 267 insertions(+), 79 deletions(-) create mode 100644 feature/buildfeatures/feature_synology_disabled.go create mode 100644 feature/buildfeatures/feature_synology_enabled.go create mode 100644 feature/buildfeatures/feature_useproxy_disabled.go create mode 100644 feature/buildfeatures/feature_useproxy_enabled.go create mode 100644 feature/condregister/useproxy/doc.go create mode 100644 feature/condregister/useproxy/useproxy.go create mode 100644 feature/useproxy/useproxy.go diff --git a/client/web/web.go b/client/web/web.go index 2421403c16ab0..dbd3d5df0be86 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -27,6 +27,7 @@ import ( "tailscale.com/envknob" "tailscale.com/envknob/featureknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -496,6 +497,10 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo // Client using system-specific auth. switch distro.Get() { case distro.Synology: + if !buildfeatures.HasSynology { + // Synology support not built in. + return false + } authorized, _ := authorizeSynology(r) return authorized case distro.QNAP: diff --git a/clientupdate/distsign/distsign.go b/clientupdate/distsign/distsign.go index eba4b9267b119..270ee4c1f9ace 100644 --- a/clientupdate/distsign/distsign.go +++ b/clientupdate/distsign/distsign.go @@ -55,7 +55,7 @@ import ( "github.com/hdevalence/ed25519consensus" "golang.org/x/crypto/blake2s" - "tailscale.com/net/tshttpproxy" + "tailscale.com/feature" "tailscale.com/types/logger" "tailscale.com/util/httpm" "tailscale.com/util/must" @@ -330,7 +330,7 @@ func fetch(url string, limit int64) ([]byte, error) { // limit bytes. On success, the returned value is a BLAKE2s hash of the file. func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() defer tr.CloseIdleConnections() hc := &http.Client{Transport: tr} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 08aa374d6f87a..8c122105f114e 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -2,16 +2,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ - W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate - W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/cmd/derper+ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ @@ -89,7 +86,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/feature from tailscale.com/tsweb+ - tailscale.com/feature/buildfeatures from tailscale.com/feature + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -113,7 +110,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/tlsdial from tailscale.com/derp/derphttp tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/local @@ -146,7 +142,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/net/netmon+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy tailscale.com/util/ctxkey from tailscale.com/tsweb+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -195,7 +190,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa L golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2/hpack from net/http+ golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ golang.org/x/net/internal/socks from golang.org/x/net/proxy diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b2fe54d6a0ff6..f8ae3d2616059 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -701,9 +701,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -777,7 +779,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 6ceef33ca2ae9..b5168ef92d11f 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_acme +//go:build linux && !ts_omit_acme && !ts_omit_synology package cli diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index c8a0d57c125b6..8d0357716804e 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -28,17 +28,17 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "golang.org/x/net/http/httpproxy" "golang.org/x/net/http2" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" + "tailscale.com/feature" + _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/hostinfo" "tailscale.com/internal/noiseconn" "tailscale.com/ipn" "tailscale.com/net/ace" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -992,14 +992,10 @@ func runTS2021(ctx context.Context, args []string) error { if err != nil { return err } - envConf := httpproxy.FromEnvironment() - if *envConf == (httpproxy.Config{}) { - log.Printf("HTTP proxy env: (none)") - } else { - log.Printf("HTTP proxy env: %+v", envConf) + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxy, err := proxyFromEnv(&http.Request{URL: u}) + log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } - proxy, err := tshttpproxy.ProxyFromEnvironment(&http.Request{URL: u}) - log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } machinePrivate := key.NewMachine() var dialer net.Dialer diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 2df6007025c5a..9fb7b63ed172d 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -96,9 +96,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ @@ -130,7 +132,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 85dd787c1b128..ebcbe54e08509 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -21,10 +21,11 @@ import ( "time" "tailscale.com/derp/derphttp" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/netmon" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/util/eventbus" @@ -124,9 +125,14 @@ func getURL(ctx context.Context, urlStr string) error { if err != nil { return fmt.Errorf("http.NewRequestWithContext: %v", err) } - proxyURL, err := tshttpproxy.ProxyFromEnvironment(req) - if err != nil { - return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + var proxyURL *url.URL + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxyURL, err = proxyFromEnv(req) + if err != nil { + return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + } + } } log.Printf("proxy: %v", proxyURL) tr := &http.Transport{ @@ -135,7 +141,10 @@ func getURL(ctx context.Context, urlStr string) error { DisableKeepAlives: true, } if proxyURL != nil { - auth, err := tshttpproxy.GetAuthHeader(proxyURL) + var auth string + if f, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + auth, err = f(proxyURL) + } if err == nil && auth != "" { tr.ProxyConnectHeader.Set("Proxy-Authorization", auth) } diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 83fb32b21ecb0..7e994300bd7ef 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -57,6 +57,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ @@ -110,7 +111,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/omit from tailscale.com/ipn/conffile @@ -219,7 +219,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f9429c8608796..d7f88c32c73e0 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -78,6 +78,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ tailscale.com/health from tailscale.com/cmd/tailscaled+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ @@ -133,7 +134,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - tailscale.com/net/tshttpproxy from tailscale.com/cmd/tailscaled+ tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local @@ -246,7 +246,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from net+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 24c619a2c967c..b1bb83d92d9b0 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -276,6 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/clientupdate from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister @@ -289,6 +290,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/tpm from tailscale.com/feature/condregister + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ @@ -357,7 +359,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a41a08f9df479..89d9db79690f3 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -222,3 +222,16 @@ func TestOmitGRO(t *testing.T) { }, }.Check(t) } + +func TestOmitUseProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_useproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "tshttproxy") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index 790b5e18ebe4d..85c3d91f9de96 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -17,10 +17,10 @@ import ( "net/http/httputil" "strings" + "tailscale.com/feature" "tailscale.com/net/proxymux" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/types/logger" ) @@ -104,7 +104,9 @@ func mkProxyStartFunc(socksListener, httpListener net.Listener) proxyStartFunc { }() addrs = append(addrs, socksListener.Addr().String()) } - tshttpproxy.SetSelfProxy(addrs...) + if set, ok := feature.HookProxySetSelfProxy.GetOk(); ok { + set(addrs...) + } } } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index d933f3249fc31..033ff6570ea78 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -143,9 +143,11 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -205,7 +207,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 31f41eac83d0c..3a40aa6fd24bb 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -42,7 +42,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/tempfork/httprec" "tailscale.com/tka" @@ -275,8 +274,12 @@ func NewDirect(opts Options) (*Direct, error) { var interceptedDial *atomic.Bool if httpc == nil { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if f, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + f(tr) + } + } tr.TLSClientConfig = tlsdial.Config(opts.HealthTracker, tr.TLSClientConfig) var dialFunc netx.DialFunc dialFunc, interceptedDial = makeScreenTimeDetectingDialFunc(opts.Dialer.SystemDial) diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index da9590c4809cf..f1ee7a6f94cb2 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -39,6 +39,8 @@ import ( "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/ace" "tailscale.com/net/dnscache" @@ -47,7 +49,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -81,7 +82,7 @@ func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) { if a.proxyFunc != nil { return a.proxyFunc } - return tshttpproxy.ProxyFromEnvironment + return feature.HookProxyFromEnvironment.GetOrNil() } // httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before @@ -463,8 +464,12 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad tr.Proxy = nil tr.DialContext = dialer } else { - tr.Proxy = a.getProxyFunc() - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = a.getProxyFunc() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } tr.DialContext = dnscache.Dialer(dialer, dns) } // Disable HTTP2, since h2 can't do protocol switching. diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 57f008a1ae3fe..db56c4a44c682 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -32,6 +32,8 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" @@ -39,7 +41,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -734,8 +735,12 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e Path: "/", // unused }, } - if proxyURL, err := tshttpproxy.ProxyFromEnvironment(proxyReq); err == nil && proxyURL != nil { - return c.dialNodeUsingProxy(ctx, n, proxyURL) + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if proxyURL, err := proxyFromEnv(proxyReq); err == nil && proxyURL != nil { + return c.dialNodeUsingProxy(ctx, n, proxyURL) + } + } } type res struct { @@ -865,10 +870,14 @@ func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, pr target := net.JoinHostPort(n.HostName, "443") var authHeader string - if v, err := tshttpproxy.GetAuthHeader(pu); err != nil { - c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) - } else if v != "" { - authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + if buildfeatures.HasUseProxy { + if getAuthHeader, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + if v, err := getAuthHeader(pu); err != nil { + c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) + } else if v != "" { + authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + } + } } if _, err := fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n%s\r\n", target, target, authHeader); err != nil { diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go index a84c24e6d0e0d..bf74db0600927 100644 --- a/feature/buildfeatures/feature_outboundproxy_disabled.go +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". // Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. // It's a const so it can be used for dead code elimination. const HasOutboundProxy = false diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go index c306bbeb205bc..53bb99d5c6a79 100644 --- a/feature/buildfeatures/feature_outboundproxy_enabled.go +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -7,7 +7,7 @@ package buildfeatures -// HasOutboundProxy is whether the binary was built with support for modular feature "Outbound localhost HTTP/SOCK5 proxy support". +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". // Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. // It's a const so it can be used for dead code elimination. const HasOutboundProxy = true diff --git a/feature/buildfeatures/feature_synology_disabled.go b/feature/buildfeatures/feature_synology_disabled.go new file mode 100644 index 0000000000000..0cdf084c32d8e --- /dev/null +++ b/feature/buildfeatures/feature_synology_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = false diff --git a/feature/buildfeatures/feature_synology_enabled.go b/feature/buildfeatures/feature_synology_enabled.go new file mode 100644 index 0000000000000..dde4123b61eb0 --- /dev/null +++ b/feature/buildfeatures/feature_synology_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = true diff --git a/feature/buildfeatures/feature_useproxy_disabled.go b/feature/buildfeatures/feature_useproxy_disabled.go new file mode 100644 index 0000000000000..9f29a9820eb99 --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = false diff --git a/feature/buildfeatures/feature_useproxy_enabled.go b/feature/buildfeatures/feature_useproxy_enabled.go new file mode 100644 index 0000000000000..9195f2fdce784 --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = true diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index 69e2b071cc19f..654483d1d7745 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -6,9 +6,13 @@ // to ensure all conditional features are registered. package condregister -// Portmapper is special in that the CLI also needs to link it in, -// so it's pulled out into its own package, rather than using a maybe_*.go -// file in condregister. import ( + // Portmapper is special in that the CLI also needs to link it in, + // so it's pulled out into its own package, rather than using a maybe_*.go + // file in condregister. _ "tailscale.com/feature/condregister/portmapper" + + // HTTP proxy support is also needed by the CLI, and tsnet, so it's its + // own package too. + _ "tailscale.com/feature/condregister/useproxy" ) diff --git a/feature/condregister/useproxy/doc.go b/feature/condregister/useproxy/doc.go new file mode 100644 index 0000000000000..1e8abb358fa83 --- /dev/null +++ b/feature/condregister/useproxy/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using proxies +// if it's not disabled via the ts_omit_useproxy build tag. +package useproxy diff --git a/feature/condregister/useproxy/useproxy.go b/feature/condregister/useproxy/useproxy.go new file mode 100644 index 0000000000000..bda6e49c0bb95 --- /dev/null +++ b/feature/condregister/useproxy/useproxy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_useproxy + +package useproxy + +import _ "tailscale.com/feature/useproxy" diff --git a/feature/feature.go b/feature/feature.go index 70f05d192ad3a..0d383b398ab60 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -50,7 +50,8 @@ func (h *Hook[Func]) Set(f Func) { } // Get returns the hook function, or panics if it hasn't been set. -// Use IsSet to check if it's been set. +// Use IsSet to check if it's been set, or use GetOrNil if you're +// okay with a nil return value. func (h *Hook[Func]) Get() Func { if !h.ok { panic("Get on unset feature hook, without IsSet") @@ -64,6 +65,11 @@ func (h *Hook[Func]) GetOk() (f Func, ok bool) { return h.f, h.ok } +// GetOrNil returns the hook function or nil if it hasn't been set. +func (h *Hook[Func]) GetOrNil() Func { + return h.f +} + // Hooks is a slice of funcs. // // As opposed to a single Hook, this is meant to be used when diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5c5352657b910..b85d1b9dc621b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -121,7 +121,7 @@ var Features = map[FeatureTag]FeatureMeta{ "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, "outboundproxy": { Sym: "OutboundProxy", - Desc: "Outbound localhost HTTP/SOCK5 proxy support", + Desc: "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale", Deps: []FeatureTag{"netstack"}, }, "osrouter": { @@ -172,6 +172,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Tailscale SSH support", Deps: []FeatureTag{"dbus", "netstack"}, }, + "synology": { + Sym: "Synology", + Desc: "Synology NAS integration (applies to Linux builds only)", + }, "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, "systray": { Sym: "SysTray", @@ -182,7 +186,11 @@ var Features = map[FeatureTag]FeatureMeta{ "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, "tpm": {"TPM", "TPM support", nil}, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "useproxy": { + Sym: "UseProxy", + Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", + }, + "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, diff --git a/feature/hooks.go b/feature/hooks.go index fc3971dda9dea..bc42bd8d97ba1 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -3,6 +3,11 @@ package feature +import ( + "net/http" + "net/url" +) + // HookCanAutoUpdate is a hook for the clientupdate package // to conditionally initialize. var HookCanAutoUpdate Hook[func() bool] @@ -15,3 +20,23 @@ func CanAutoUpdate() bool { } return false } + +// HookProxyFromEnvironment is a hook for feature/useproxy to register +// a function to use as http.ProxyFromEnvironment. +var HookProxyFromEnvironment Hook[func(*http.Request) (*url.URL, error)] + +// HookProxyInvalidateCache is a hook for feature/useproxy to register +// [tshttpproxy.InvalidateCache]. +var HookProxyInvalidateCache Hook[func()] + +// HookProxyGetAuthHeader is a hook for feature/useproxy to register +// [tshttpproxy.GetAuthHeader]. +var HookProxyGetAuthHeader Hook[func(*url.URL) (string, error)] + +// HookProxySetSelfProxy is a hook for feature/useproxy to register +// [tshttpproxy.SetSelfProxy]. +var HookProxySetSelfProxy Hook[func(...string)] + +// HookProxySetTransportGetProxyConnectHeader is a hook for feature/useproxy to register +// [tshttpproxy.SetTransportGetProxyConnectHeader]. +var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] diff --git a/feature/useproxy/useproxy.go b/feature/useproxy/useproxy.go new file mode 100644 index 0000000000000..a18e60577af85 --- /dev/null +++ b/feature/useproxy/useproxy.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using system proxies. +package useproxy + +import ( + "tailscale.com/feature" + "tailscale.com/net/tshttpproxy" +) + +func init() { + feature.HookProxyFromEnvironment.Set(tshttpproxy.ProxyFromEnvironment) + feature.HookProxyInvalidateCache.Set(tshttpproxy.InvalidateCache) + feature.HookProxyGetAuthHeader.Set(tshttpproxy.GetAuthHeader) + feature.HookProxySetSelfProxy.Set(tshttpproxy.SetSelfProxy) + feature.HookProxySetTransportGetProxyConnectHeader.Set(tshttpproxy.SetTransportGetProxyConnectHeader) +} diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index bf85affa637ef..ab49976c8aeea 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -35,6 +35,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -73,7 +74,7 @@ func (b *LocalBackend) certDir() (string, error) { // As a workaround for Synology DSM6 not having a "var" directory, use the // app's "etc" directory (on a small partition) to hold certs at least. // See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251 - if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { + if buildfeatures.HasSynology && d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { d = "/var/packages/Tailscale/etc" // base; we append "certs" below } if d == "" { diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index c802d481f9046..c1f3e553a168a 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -31,6 +31,7 @@ import ( "golang.org/x/term" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" @@ -44,7 +45,6 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/types/logger" @@ -870,8 +870,12 @@ func (opts TransportOptions) New() http.RoundTripper { tr.TLSClientConfig = opts.TLSClientConfig.Clone() } - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } // We do our own zstd compression on uploads, and responses never contain any payload, // so don't send "Accept-Encoding: gzip" to save a few bytes on the wire, since there diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 9843d46f91ab0..74b625970302b 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -26,11 +26,11 @@ import ( "time" "tailscale.com/atomicfile" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/slicesx" @@ -135,7 +135,7 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr dialer := netns.NewDialer(logf, netMon) tr := http.DefaultTransport.(*http.Transport).Clone() tr.DisableKeepAlives = true // This transport is meant to be used once. - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443")) } diff --git a/net/netmon/interfaces_windows.go b/net/netmon/interfaces_windows.go index 00b686e593b1e..d6625ead3cd05 100644 --- a/net/netmon/interfaces_windows.go +++ b/net/netmon/interfaces_windows.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsconst" ) @@ -22,7 +23,9 @@ const ( func init() { likelyHomeRouterIP = likelyHomeRouterIPWindows - getPAC = getPACWindows + if buildfeatures.HasUseProxy { + getPAC = getPACWindows + } } func likelyHomeRouterIPWindows() (ret netip.Addr, _ netip.Addr, ok bool) { @@ -244,6 +247,9 @@ const ( ) func getPACWindows() string { + if !buildfeatures.HasUseProxy { + return "" + } var res *uint16 r, _, e := detectAutoProxyConfigURL.Call( winHTTP_AUTO_DETECT_TYPE_DHCP|winHTTP_AUTO_DETECT_TYPE_DNS_A, diff --git a/net/netmon/state.go b/net/netmon/state.go index bd09607682bb4..cdb427d47340a 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -15,10 +15,11 @@ import ( "strings" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" "tailscale.com/util/mak" ) @@ -501,13 +502,15 @@ func getState(optTSInterfaceName string) (*State, error) { } } - if s.AnyInterfaceUp() { + if buildfeatures.HasUseProxy && s.AnyInterfaceUp() { req, err := http.NewRequest("GET", LoginEndpointForProxyDetermination, nil) if err != nil { return nil, err } - if u, err := tshttpproxy.ProxyFromEnvironment(req); err == nil && u != nil { - s.HTTPProxy = u.String() + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if u, err := proxyFromEnv(req); err == nil && u != nil { + s.HTTPProxy = u.String() + } } if getPAC != nil { s.PAC = getPAC() diff --git a/net/tshttpproxy/tshttpproxy_linux.go b/net/tshttpproxy/tshttpproxy_linux.go index b241c256d4798..7e086e4929bc7 100644 --- a/net/tshttpproxy/tshttpproxy_linux.go +++ b/net/tshttpproxy/tshttpproxy_linux.go @@ -9,6 +9,7 @@ import ( "net/http" "net/url" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -17,7 +18,7 @@ func init() { } func linuxSysProxyFromEnv(req *http.Request) (*url.URL, error) { - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return synologyProxyFromConfigCached(req) } return nil, nil diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6e627f6f7fcfd..858bb6d648419 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -139,9 +139,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ @@ -201,7 +203,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/control/controlclient+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock tailscale.com/net/udprelay/status from tailscale.com/client/local diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 08f08281a28f0..42e4198a0c5fd 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -31,6 +31,7 @@ import ( "tailscale.com/envknob" _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" + _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/internal/client/tailscale" diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 7a26300e56e0a..72615330d8970 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 7a26300e56e0a..72615330d8970 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 7a26300e56e0a..72615330d8970 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 7a26300e56e0a..72615330d8970 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -36,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index a5a0a428ffd3b..c2761d01949fe 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -46,7 +46,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" diff --git a/wgengine/netstack/netstack_userping.go b/wgengine/netstack/netstack_userping.go index ee635bd877dca..b35a6eca9e11b 100644 --- a/wgengine/netstack/netstack_userping.go +++ b/wgengine/netstack/netstack_userping.go @@ -13,6 +13,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -20,7 +21,7 @@ import ( // CAP_NET_RAW from tailscaled's binary. var setAmbientCapsRaw func(*exec.Cmd) -var isSynology = runtime.GOOS == "linux" && distro.Get() == distro.Synology +var isSynology = runtime.GOOS == "linux" && buildfeatures.HasSynology && distro.Get() == distro.Synology // sendOutboundUserPing sends a non-privileged ICMP (or ICMPv6) ping to dstIP with the given timeout. func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) error { @@ -61,7 +62,7 @@ func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) er ping = "/bin/ping" } cmd := exec.Command(ping, "-c", "1", "-W", "3", dstIP.String()) - if isSynology && os.Getuid() != 0 { + if buildfeatures.HasSynology && isSynology && os.Getuid() != 0 { // On DSM7 we run as non-root and need to pass // CAP_NET_RAW if our binary has it. setAmbientCapsRaw(cmd) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 158a6d06f60d4..049abcf1709e4 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -23,6 +23,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn/ipnstate" @@ -35,7 +36,6 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -559,7 +559,9 @@ func (e *userspaceEngine) consumeEventbusTopics(cli *eventbus.Client) func(*even case <-cli.Done(): return case changeDelta := <-changeDeltaSub.Events(): - tshttpproxy.InvalidateCache() + if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { + f() + } e.linkChange(&changeDelta) } } From b9cdef18c04b48a52235af4eadcd9a3193cafb3c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 12 Sep 2025 12:33:46 -0700 Subject: [PATCH 0468/1093] util/prompt: add a default and take default in non-interactive cases The Tailscale CLI is the primary configuration interface and as such it is used in scripts, container setups, and many other places that do not have a terminal available and should not be made to respond to prompts. The default is set to false where the "risky" API is being used by the CLI and true otherwise, this means that the `--yes` flags are only required under interactive runs and scripts do not need to be concerned with prompts or extra flags. Updates #19445 Signed-off-by: James Tucker --- cmd/tailscale/cli/network-lock.go | 2 +- cmd/tailscale/cli/risks.go | 2 +- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tailscale/cli/update.go | 2 +- util/prompt/prompt.go | 19 +++++++++++++++++-- 5 files changed, 21 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index ec3b01ad61291..9b2f6fbdb0738 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -384,7 +384,7 @@ Removal of a signing key(s) without resigning nodes (--re-sign=false) will cause any nodes signed by the the given key(s) to be locked out of the Tailscale network. Proceed with caution. `) - if !prompt.YesNo("Are you sure you want to remove the signing key(s)?") { + if !prompt.YesNo("Are you sure you want to remove the signing key(s)?", true) { fmt.Printf("aborting removal of signing key(s)\n") os.Exit(0) } diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index dfde87f640a16..d4572842bf758 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -66,7 +66,7 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { outln(riskMessage) printf("To skip this warning, use --accept-risk=%s\n", riskType) - if prompt.YesNo("Continue?") { + if prompt.YesNo("Continue?", false) { return nil } diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 058d80649fd3a..8831db2a9e135 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1086,7 +1086,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u if len(mounts) > 1 { msg := fmt.Sprintf("Are you sure you want to delete %d handlers under port %s?", len(mounts), portStr) - if !e.yes && !prompt.YesNo(msg) { + if !e.yes && !prompt.YesNo(msg, true) { return nil } } diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 7c0269f6a7687..7eb0dccace7a8 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -87,5 +87,5 @@ func confirmUpdate(ver string) bool { } msg := fmt.Sprintf("This will update Tailscale from %v to %v. Continue?", version.Short(), ver) - return prompt.YesNo(msg) + return prompt.YesNo(msg, true) } diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go index 4e589ceb32b52..a6d86fb481769 100644 --- a/util/prompt/prompt.go +++ b/util/prompt/prompt.go @@ -6,19 +6,34 @@ package prompt import ( "fmt" + "os" "strings" + + "github.com/mattn/go-isatty" ) // YesNo takes a question and prompts the user to answer the // question with a yes or no. It appends a [y/n] to the message. -func YesNo(msg string) bool { - fmt.Print(msg + " [y/n] ") +// +// If there is no TTY on both Stdin and Stdout, assume that we're in a script +// and return the dflt result. +func YesNo(msg string, dflt bool) bool { + if !(isatty.IsTerminal(os.Stdin.Fd()) && isatty.IsTerminal(os.Stdout.Fd())) { + return dflt + } + if dflt { + fmt.Print(msg + " [Y/n] ") + } else { + fmt.Print(msg + " [y/N] ") + } var resp string fmt.Scanln(&resp) resp = strings.ToLower(resp) switch resp { case "y", "yes", "sure": return true + case "": + return dflt } return false } From bbb16e4e72c58d43aa1ee356cb974d669a0a02fe Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 10:55:42 -0700 Subject: [PATCH 0469/1093] drive: don't use regexp package in leaf types package Even with ts_omit_drive, the drive package is currently still imported for some types. So it should be light. But it was depending on the "regexp" packge, which I'd like to remove from our minimal builds. Updates #12614 Change-Id: I5bf85d8eb15a739793723b1da11c370d3fcd2f32 Signed-off-by: Brad Fitzpatrick --- drive/remote.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drive/remote.go b/drive/remote.go index 9aeead710ad01..2c6fba894dbff 100644 --- a/drive/remote.go +++ b/drive/remote.go @@ -9,7 +9,6 @@ import ( "bytes" "errors" "net/http" - "regexp" "strings" ) @@ -21,10 +20,6 @@ var ( ErrInvalidShareName = errors.New("Share names may only contain the letters a-z, underscore _, parentheses (), or spaces") ) -var ( - shareNameRegex = regexp.MustCompile(`^[a-z0-9_\(\) ]+$`) -) - // AllowShareAs reports whether sharing files as a specific user is allowed. func AllowShareAs() bool { return !DisallowShareAs && doAllowShareAs() @@ -125,9 +120,26 @@ func NormalizeShareName(name string) (string, error) { // Trim whitespace name = strings.TrimSpace(name) - if !shareNameRegex.MatchString(name) { + if !validShareName(name) { return "", ErrInvalidShareName } return name, nil } + +func validShareName(name string) bool { + if name == "" { + return false + } + for _, r := range name { + if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' { + continue + } + switch r { + case '_', ' ', '(', ')': + continue + } + return false + } + return true +} From ee034d48fccbedf0fff24f065cf59e3410441f03 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 09:53:55 -0700 Subject: [PATCH 0470/1093] feature/featuretags: add a catch-all "Debug" feature flag Saves 168 KB. Updates #12614 Change-Id: Iaab3ae3efc6ddc7da39629ef13e5ec44976952ba Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 11 + cmd/tailscaled/depaware-min.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 4 +- control/controlclient/direct.go | 2 +- .../buildfeatures/feature_debug_disabled.go | 13 + .../buildfeatures/feature_debug_enabled.go | 13 + feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/c2n.go | 22 + ipn/ipnlocal/local.go | 20 +- ipn/localapi/debug.go | 465 ++++++++++++++++++ ipn/localapi/debugderp.go | 2 + ipn/localapi/localapi.go | 436 +--------------- ipn/localapi/syspolicy_api.go | 2 +- ipn/localapi/tailnetlock.go | 26 +- wgengine/magicsock/debughttp.go | 7 + 15 files changed, 573 insertions(+), 455 deletions(-) create mode 100644 feature/buildfeatures/feature_debug_disabled.go create mode 100644 feature/buildfeatures/feature_debug_enabled.go create mode 100644 ipn/localapi/debug.go diff --git a/client/local/local.go b/client/local/local.go index a3717ad776a2e..8da8f57e5acf8 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -31,6 +31,8 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" @@ -608,6 +610,9 @@ func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) er // the provided duration. If the duration is in the past, the debug logging // is disabled. func (lc *Client) SetComponentDebugLogging(ctx context.Context, component string, d time.Duration) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } body, err := lc.send(ctx, "POST", fmt.Sprintf("/localapi/v0/component-debug-logging?component=%s&secs=%d", url.QueryEscape(component), int64(d.Seconds())), 200, nil) @@ -862,6 +867,9 @@ func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Pref // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") if err != nil { return nil, err @@ -877,6 +885,9 @@ func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, err // It returns the raw DNS response bytes and the resolvers that were used to answer the query // (often just one, but can be more if we raced multiple resolvers). func (lc *Client) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, fmt.Sprintf("/localapi/v0/dns-query?name=%s&type=%s", url.QueryEscape(name), queryType)) if err != nil { return nil, nil, err diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 7e994300bd7ef..0fe1538fdaabf 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -106,7 +106,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/sockstats from tailscale.com/control/controlclient+ - tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ @@ -141,7 +141,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ - tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index d7f88c32c73e0..1932e9791283d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -129,7 +129,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/sockstats from tailscale.com/control/controlclient+ - tailscale.com/net/stun from tailscale.com/ipn/localapi+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ @@ -166,7 +166,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ - tailscale.com/types/nettype from tailscale.com/ipn/localapi+ + tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 3a40aa6fd24bb..54f2de1c93318 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1193,7 +1193,7 @@ func (c *Direct) handleDebugMessage(ctx context.Context, debug *tailcfg.Debug) e c.logf("exiting process with status %v per controlplane", *code) os.Exit(*code) } - if debug.DisableLogTail { + if buildfeatures.HasLogTail && debug.DisableLogTail { logtail.Disable() envknob.SetNoLogsNoSupport() } diff --git a/feature/buildfeatures/feature_debug_disabled.go b/feature/buildfeatures/feature_debug_disabled.go new file mode 100644 index 0000000000000..eb048c0826eb9 --- /dev/null +++ b/feature/buildfeatures/feature_debug_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = false diff --git a/feature/buildfeatures/feature_debug_enabled.go b/feature/buildfeatures/feature_debug_enabled.go new file mode 100644 index 0000000000000..12a2700a45761 --- /dev/null +++ b/feature/buildfeatures/feature_debug_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index b85d1b9dc621b..7cfc79f655618 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "clientupdate": {"ClientUpdate", "Client auto-update support", nil}, "completion": {"Completion", "CLI shell completion", nil}, "dbus": {"DBus", "Linux DBus support", nil}, + "debug": {"Debug", "various debug support, for things that don't have or need their own more specific feature", nil}, "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, "debugportmapper": { Sym: "DebugPortMapper", diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 38c65fee885dc..f064628fcdbab 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -15,6 +15,8 @@ import ( "time" "tailscale.com/control/controlclient" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" @@ -130,6 +132,10 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } ctx := r.Context() if r.Method != httpm.POST && r.Method != httpm.GET { http.Error(w, "method not allowed", http.StatusMethodNotAllowed) @@ -190,20 +196,36 @@ func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Reques } func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") w.Write(goroutines.ScrubbedGoroutineDump(true)) } func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } writeJSON(w, b.Prefs()) } func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") clientmetric.WritePrometheusExpositionFormat(w) } func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } component := r.FormValue("component") secs, _ := strconv.Atoi(r.FormValue("secs")) if secs == 0 { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3b55fd324b923..c3d7d3fb869eb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -557,12 +557,14 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) } - for _, component := range ipn.DebuggableComponents { - key := componentStateKey(component) - if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { - if until := time.Unix(ut, 0); until.After(b.clock.Now()) { - // conditional to avoid log spam at start when off - b.SetComponentDebugLogging(component, until) + if buildfeatures.HasDebug { + for _, component := range ipn.DebuggableComponents { + key := componentStateKey(component) + if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { + if until := time.Unix(ut, 0); until.After(b.clock.Now()) { + // conditional to avoid log spam at start when off + b.SetComponentDebugLogging(component, until) + } } } } @@ -666,6 +668,9 @@ func componentStateKey(component string) ipn.StateKey { // - magicsock // - sockstats func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Time) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() @@ -790,6 +795,9 @@ func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []b // enabled until, or the zero time if component's time is not currently // enabled. func (b *LocalBackend) GetComponentDebugLogging(component string) time.Time { + if !buildfeatures.HasDebug { + return time.Time{} + } b.mu.Lock() defer b.mu.Unlock() diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go new file mode 100644 index 0000000000000..b3b919d31ede2 --- /dev/null +++ b/ipn/localapi/debug.go @@ -0,0 +1,465 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debug + +package localapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "reflect" + "slices" + "strconv" + "sync" + "time" + + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" + "tailscale.com/ipn" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/httpm" +) + +func init() { + Register("component-debug-logging", (*Handler).serveComponentDebugLogging) + Register("debug", (*Handler).serveDebug) + Register("dev-set-state-store", (*Handler).serveDevSetStateStore) + Register("debug-bus-events", (*Handler).serveDebugBusEvents) + Register("debug-bus-graph", (*Handler).serveEventBusGraph) + Register("debug-derp-region", (*Handler).serveDebugDERPRegion) + Register("debug-dial-types", (*Handler).serveDebugDialTypes) + Register("debug-log", (*Handler).serveDebugLog) + Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches) + Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules) + Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges) +} + +func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "status access denied", http.StatusForbidden) + return + } + + ipStr := r.FormValue("ip") + if ipStr == "" { + http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) + return + } + ip, err := netip.ParseAddr(ipStr) + if err != nil { + http.Error(w, "invalid IP", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(chs) +} + +func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + component := r.FormValue("component") + secs, _ := strconv.Atoi(r.FormValue("secs")) + err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) + var res struct { + Error string + } + if err != nil { + res.Error = err.Error() + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug-dial-types access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + ip := r.FormValue("ip") + port := r.FormValue("port") + network := r.FormValue("network") + + addr := ip + ":" + port + if _, err := netip.ParseAddrPort(addr); err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "invalid address %q: %v", addr, err) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + var bareDialer net.Dialer + + dialer := h.b.Dialer() + + var peerDialer net.Dialer + peerDialer.Control = dialer.PeerDialControlFunc() + + // Kick off a dial with each available dialer in parallel. + dialers := []struct { + name string + dial func(context.Context, string, string) (net.Conn, error) + }{ + {"SystemDial", dialer.SystemDial}, + {"UserDial", dialer.UserDial}, + {"PeerDial", peerDialer.DialContext}, + {"BareDial", bareDialer.DialContext}, + } + type result struct { + name string + conn net.Conn + err error + } + results := make(chan result, len(dialers)) + + var wg sync.WaitGroup + for _, dialer := range dialers { + dialer := dialer // loop capture + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := dialer.dial(ctx, network, addr) + results <- result{dialer.name, conn, err} + }() + } + + wg.Wait() + for range len(dialers) { + res := <-results + fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) + if res.conn != nil { + res.conn.Close() + } + } +} + +func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, "debug not supported in this build", http.StatusNotImplemented) + return + } + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + // The action is normally in a POST form parameter, but + // some actions (like "notify") want a full JSON body, so + // permit some to have their action in a header. + var action string + switch v := r.Header.Get("Debug-Action"); v { + case "notify": + action = v + default: + action = r.FormValue("action") + } + var err error + switch action { + case "derp-set-homeless": + h.b.MagicConn().SetHomeless(true) + case "derp-unset-homeless": + h.b.MagicConn().SetHomeless(false) + case "rebind": + err = h.b.DebugRebind() + case "restun": + err = h.b.DebugReSTUN() + case "notify": + var n ipn.Notify + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugNotify(n) + case "notify-last-netmap": + h.b.DebugNotifyLastNetMap() + case "break-tcp-conns": + err = h.b.DebugBreakTCPConns() + case "break-derp-conns": + err = h.b.DebugBreakDERPConns() + case "force-netmap-update": + h.b.DebugForceNetmapUpdate() + case "control-knobs": + k := h.b.ControlKnobs() + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(k.AsDebugJSON()) + if err == nil { + return + } + case "pick-new-derp": + err = h.b.DebugPickNewDERP() + case "force-prefer-derp": + var n int + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugForcePreferDERP(n) + case "peer-relay-servers": + servers := h.b.DebugPeerRelayServers().Slice() + slices.SortFunc(servers, func(a, b netip.Addr) int { + return a.Compare(b) + }) + err = json.NewEncoder(w).Encode(servers) + if err == nil { + return + } + case "": + err = fmt.Errorf("missing parameter 'action'") + default: + err = fmt.Errorf("unknown action %q", action) + } + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilterRules) +} + +func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilter) +} + +// debugEventError provides the JSON encoding of internal errors from event processing. +type debugEventError struct { + Error string +} + +// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams +// events to the client. +func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { + // Require write access (~root) as the logs could contain something + // sensitive. + if !h.PermitWrite { + http.Error(w, "event bus access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusNoContent) + return + } + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming unsupported", http.StatusInternalServerError) + return + } + + io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") + f.Flush() + + mon := bus.Debugger().WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) + return + case <-mon.Done(): + return + case event := <-mon.Events(): + data := eventbus.DebugEvent{ + Count: i, + Type: reflect.TypeOf(event.Event).String(), + Event: event.Event, + From: event.From.Name(), + } + for _, client := range event.To { + data.To = append(data.To, client.Name()) + } + + if msg, err := json.Marshal(data); err != nil { + data.Event = debugEventError{Error: fmt.Sprintf( + "failed to marshal JSON for %T", event.Event, + )} + if errMsg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, + `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, + i, event.Event) + } else { + w.Write(errMsg) + } + } else { + w.Write(msg) + } + f.Flush() + i++ + } + } +} + +// serveEventBusGraph taps into the event bus and dumps out the active graph of +// publishers and subscribers. It does not represent anything about the messages +// exchanged. +func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + clients := debugger.Clients() + + graph := map[string]eventbus.DebugTopic{} + + for _, client := range clients { + for _, pub := range debugger.PublishTypes(client) { + topic, ok := graph[pub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: pub.Name()} + } + topic.Publisher = client.Name() + graph[pub.Name()] = topic + } + for _, sub := range debugger.SubscribeTypes(client) { + topic, ok := graph[sub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: sub.Name()} + } + topic.Subscribers = append(topic.Subscribers, client.Name()) + graph[sub.Name()] = topic + } + } + + // The top level map is not really needed for the client, convert to a list. + topics := eventbus.DebugTopics{} + for _, v := range graph { + topics.Topics = append(topics.Topics, v) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(topics) +} + +func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasLogTail { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + if !h.PermitRead { + http.Error(w, "debug-log access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + defer h.b.TryFlushLogs() // kick off upload after we're done logging + + type logRequestJSON struct { + Lines []string + Prefix string + } + + var logRequest logRequestJSON + if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + prefix := logRequest.Prefix + if prefix == "" { + prefix = "debug-log" + } + logf := logger.WithPrefix(h.logf, prefix+": ") + + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) + + for _, line := range logRequest.Lines { + logf("%s", line) + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 017b906922835..3edbc0856c8a3 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package localapi import ( diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 4045169423ac5..3948b4293086a 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -7,7 +7,6 @@ package localapi import ( "bytes" "cmp" - "context" "encoding/json" "errors" "fmt" @@ -16,7 +15,6 @@ import ( "net/http" "net/netip" "net/url" - "reflect" "runtime" "slices" "strconv" @@ -80,18 +78,7 @@ var handler = map[string]LocalAPIHandler{ "check-prefs": (*Handler).serveCheckPrefs, "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "component-debug-logging": (*Handler).serveComponentDebugLogging, - "debug": (*Handler).serveDebug, - "debug-bus-events": (*Handler).serveDebugBusEvents, - "debug-bus-graph": (*Handler).serveEventBusGraph, - "debug-derp-region": (*Handler).serveDebugDERPRegion, - "debug-dial-types": (*Handler).serveDebugDialTypes, - "debug-log": (*Handler).serveDebugLog, - "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, - "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, - "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, "derpmap": (*Handler).serveDERPMap, - "dev-set-state-store": (*Handler).serveDevSetStateStore, "dial": (*Handler).serveDial, "disconnect-control": (*Handler).disconnectControl, "dns-osconfig": (*Handler).serveDNSOSConfig, @@ -638,352 +625,6 @@ func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { h.b.UserMetricsRegistry().Handler(w, r) } -func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - // The action is normally in a POST form parameter, but - // some actions (like "notify") want a full JSON body, so - // permit some to have their action in a header. - var action string - switch v := r.Header.Get("Debug-Action"); v { - case "notify": - action = v - default: - action = r.FormValue("action") - } - var err error - switch action { - case "derp-set-homeless": - h.b.MagicConn().SetHomeless(true) - case "derp-unset-homeless": - h.b.MagicConn().SetHomeless(false) - case "rebind": - err = h.b.DebugRebind() - case "restun": - err = h.b.DebugReSTUN() - case "notify": - var n ipn.Notify - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugNotify(n) - case "notify-last-netmap": - h.b.DebugNotifyLastNetMap() - case "break-tcp-conns": - err = h.b.DebugBreakTCPConns() - case "break-derp-conns": - err = h.b.DebugBreakDERPConns() - case "force-netmap-update": - h.b.DebugForceNetmapUpdate() - case "control-knobs": - k := h.b.ControlKnobs() - w.Header().Set("Content-Type", "application/json") - err = json.NewEncoder(w).Encode(k.AsDebugJSON()) - if err == nil { - return - } - case "pick-new-derp": - err = h.b.DebugPickNewDERP() - case "force-prefer-derp": - var n int - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugForcePreferDERP(n) - case "peer-relay-servers": - servers := h.b.DebugPeerRelayServers().Slice() - slices.SortFunc(servers, func(a, b netip.Addr) int { - return a.Compare(b) - }) - err = json.NewEncoder(w).Encode(servers) - if err == nil { - return - } - case "": - err = fmt.Errorf("missing parameter 'action'") - default: - err = fmt.Errorf("unknown action %q", action) - } - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilterRules) -} - -func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilter) -} - -// EventError provides the JSON encoding of internal errors from event processing. -type EventError struct { - Error string -} - -// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams -// events to the client. -func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { - // Require write access (~root) as the logs could contain something - // sensitive. - if !h.PermitWrite { - http.Error(w, "event bus access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusNoContent) - return - } - - f, ok := w.(http.Flusher) - if !ok { - http.Error(w, "streaming unsupported", http.StatusInternalServerError) - return - } - - io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") - f.Flush() - - mon := bus.Debugger().WatchBus() - defer mon.Close() - - i := 0 - for { - select { - case <-r.Context().Done(): - fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) - return - case <-mon.Done(): - return - case event := <-mon.Events(): - data := eventbus.DebugEvent{ - Count: i, - Type: reflect.TypeOf(event.Event).String(), - Event: event.Event, - From: event.From.Name(), - } - for _, client := range event.To { - data.To = append(data.To, client.Name()) - } - - if msg, err := json.Marshal(data); err != nil { - data.Event = EventError{Error: fmt.Sprintf( - "failed to marshal JSON for %T", event.Event, - )} - if errMsg, err := json.Marshal(data); err != nil { - fmt.Fprintf(w, - `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, - i, event.Event) - } else { - w.Write(errMsg) - } - } else { - w.Write(msg) - } - f.Flush() - i++ - } - } -} - -// serveEventBusGraph taps into the event bus and dumps out the active graph of -// publishers and subscribers. It does not represent anything about the messages -// exchanged. -func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusPreconditionFailed) - return - } - - debugger := bus.Debugger() - clients := debugger.Clients() - - graph := map[string]eventbus.DebugTopic{} - - for _, client := range clients { - for _, pub := range debugger.PublishTypes(client) { - topic, ok := graph[pub.Name()] - if !ok { - topic = eventbus.DebugTopic{Name: pub.Name()} - } - topic.Publisher = client.Name() - graph[pub.Name()] = topic - } - for _, sub := range debugger.SubscribeTypes(client) { - topic, ok := graph[sub.Name()] - if !ok { - topic = eventbus.DebugTopic{Name: sub.Name()} - } - topic.Subscribers = append(topic.Subscribers, client.Name()) - graph[sub.Name()] = topic - } - } - - // The top level map is not really needed for the client, convert to a list. - topics := eventbus.DebugTopics{} - for _, v := range graph { - topics.Topics = append(topics.Topics, v) - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(topics) -} - -func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - component := r.FormValue("component") - secs, _ := strconv.Atoi(r.FormValue("secs")) - err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) - var res struct { - Error string - } - if err != nil { - res.Error = err.Error() - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug-dial-types access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - ip := r.FormValue("ip") - port := r.FormValue("port") - network := r.FormValue("network") - - addr := ip + ":" + port - if _, err := netip.ParseAddrPort(addr); err != nil { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "invalid address %q: %v", addr, err) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - defer cancel() - - var bareDialer net.Dialer - - dialer := h.b.Dialer() - - var peerDialer net.Dialer - peerDialer.Control = dialer.PeerDialControlFunc() - - // Kick off a dial with each available dialer in parallel. - dialers := []struct { - name string - dial func(context.Context, string, string) (net.Conn, error) - }{ - {"SystemDial", dialer.SystemDial}, - {"UserDial", dialer.UserDial}, - {"PeerDial", peerDialer.DialContext}, - {"BareDial", bareDialer.DialContext}, - } - type result struct { - name string - conn net.Conn - err error - } - results := make(chan result, len(dialers)) - - var wg sync.WaitGroup - for _, dialer := range dialers { - dialer := dialer // loop capture - - wg.Add(1) - go func() { - defer wg.Done() - conn, err := dialer.dial(ctx, network, addr) - results <- result{dialer.name, conn, err} - }() - } - - wg.Wait() - for range len(dialers) { - res := <-results - fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) - if res.conn != nil { - res.conn.Close() - } - } -} - // servePprofFunc is the implementation of Handler.servePprof, after auth, // for platforms where we want to link it in. var servePprofFunc func(http.ResponseWriter, *http.Request) @@ -1116,6 +757,10 @@ func (h *Handler) serveCheckUDPGROForwarding(w http.ResponseWriter, r *http.Requ } func (h *Handler) serveSetUDPGROForwarding(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasGRO { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if !h.PermitWrite { http.Error(w, "UDP GRO forwarding set access denied", http.StatusForbidden) return @@ -1149,34 +794,6 @@ func (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) { e.Encode(st) } -func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "status access denied", http.StatusForbidden) - return - } - - ipStr := r.FormValue("ip") - if ipStr == "" { - http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) - return - } - ip, err := netip.ParseAddr(ipStr) - if err != nil { - http.Error(w, "invalid IP", http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/json") - chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(chs) -} - // InUseOtherUserIPNStream reports whether r is a request for the watch-ipn-bus // handler. If so, it writes an ipn.Notify InUseOtherUser message to the user // and returns true. Otherwise it returns false, in which case it doesn't write @@ -1842,47 +1459,6 @@ func defBool(a string, def bool) bool { return v } -func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "debug-log access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - defer h.b.TryFlushLogs() // kick off upload after we're done logging - - type logRequestJSON struct { - Lines []string - Prefix string - } - - var logRequest logRequestJSON - if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - prefix := logRequest.Prefix - if prefix == "" { - prefix = "debug-log" - } - logf := logger.WithPrefix(h.logf, prefix+": ") - - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) - - for _, line := range logRequest.Lines { - logf("%s", line) - } - - w.WriteHeader(http.StatusNoContent) -} - // serveUpdateCheck returns the ClientVersion from Status, which contains // information on whether an update is available, and if so, what version, // *if* we support auto-updates on this platform. If we don't, this endpoint @@ -1917,7 +1493,7 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasDNS { - http.NotFound(w, r) + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) return } if r.Method != httpm.GET { @@ -1964,7 +1540,7 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasDNS { - http.NotFound(w, r) + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) return } if r.Method != httpm.GET { diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go index a438d352b52e1..edb82e042f2ce 100644 --- a/ipn/localapi/syspolicy_api.go +++ b/ipn/localapi/syspolicy_api.go @@ -17,7 +17,7 @@ import ( ) func init() { - handler["policy/"] = (*Handler).servePolicy + Register("policy/", (*Handler).servePolicy) } func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go index 7971509384cf0..4baadb7339871 100644 --- a/ipn/localapi/tailnetlock.go +++ b/ipn/localapi/tailnetlock.go @@ -18,19 +18,19 @@ import ( ) func init() { - handler["tka/affected-sigs"] = (*Handler).serveTKAAffectedSigs - handler["tka/cosign-recovery-aum"] = (*Handler).serveTKACosignRecoveryAUM - handler["tka/disable"] = (*Handler).serveTKADisable - handler["tka/force-local-disable"] = (*Handler).serveTKALocalDisable - handler["tka/generate-recovery-aum"] = (*Handler).serveTKAGenerateRecoveryAUM - handler["tka/init"] = (*Handler).serveTKAInit - handler["tka/log"] = (*Handler).serveTKALog - handler["tka/modify"] = (*Handler).serveTKAModify - handler["tka/sign"] = (*Handler).serveTKASign - handler["tka/status"] = (*Handler).serveTKAStatus - handler["tka/submit-recovery-aum"] = (*Handler).serveTKASubmitRecoveryAUM - handler["tka/verify-deeplink"] = (*Handler).serveTKAVerifySigningDeeplink - handler["tka/wrap-preauth-key"] = (*Handler).serveTKAWrapPreauthKey + Register("tka/affected-sigs", (*Handler).serveTKAAffectedSigs) + Register("tka/cosign-recovery-aum", (*Handler).serveTKACosignRecoveryAUM) + Register("tka/disable", (*Handler).serveTKADisable) + Register("tka/force-local-disable", (*Handler).serveTKALocalDisable) + Register("tka/generate-recovery-aum", (*Handler).serveTKAGenerateRecoveryAUM) + Register("tka/init", (*Handler).serveTKAInit) + Register("tka/log", (*Handler).serveTKALog) + Register("tka/modify", (*Handler).serveTKAModify) + Register("tka/sign", (*Handler).serveTKASign) + Register("tka/status", (*Handler).serveTKAStatus) + Register("tka/submit-recovery-aum", (*Handler).serveTKASubmitRecoveryAUM) + Register("tka/verify-deeplink", (*Handler).serveTKAVerifySigningDeeplink) + Register("tka/wrap-preauth-key", (*Handler).serveTKAWrapPreauthKey) } func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index a0159d21e592f..9aecab74b4278 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -13,6 +13,8 @@ import ( "strings" "time" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -24,6 +26,11 @@ import ( // /debug/magicsock) or via peerapi to a peer that's owned by the same // user (so they can e.g. inspect their phones). func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + c.mu.Lock() defer c.mu.Unlock() From be6cfa00cb5090c0922949bf9d543688a49131d6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 11:25:10 -0700 Subject: [PATCH 0471/1093] util/eventbus: when ts_omit_debugeventbus is set, don't import tsweb I'm trying to remove the "regexp" and "regexp/syntax" packages from our minimal builds. But tsweb pulls in regexp (via net/http/pprof etc) and util/eventbus was importing the tsweb for no reason. Updates #12614 Change-Id: Ifa8c371ece348f1dbf80d6b251381f3ed39d5fbd Signed-off-by: Brad Fitzpatrick --- util/eventbus/debughttp_off.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 7d9fb327c494f..ed491f1f27ef1 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -5,9 +5,7 @@ package eventbus -import "tailscale.com/tsweb" - -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { +func registerHTTPDebugger(d *Debugger, tsWebDebugHandler any) { // The event bus debugging UI uses html/template, which uses // reflection for method lookups. This forces the compiler to // retain a lot more code and information to make dynamic method From 840c7668e2e5eb5d3fa72913afc56544a3038fdd Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 30 Sep 2025 11:53:07 -0700 Subject: [PATCH 0472/1093] types/key: add IsZero method to HardwareAttestationKey (#17370) We will need this for unmarshaling node prefs: use the zero HardwareAttestationKey implementation when parsing and later check `IsZero` to see if anything was loaded. Updates #15830 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 2 ++ types/key/hardware_attestation.go | 1 + 2 files changed, 3 insertions(+) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 4b3018569b426..92617f9954616 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -262,3 +262,5 @@ func (ak *attestationKey) Clone() key.HardwareAttestationKey { pub: ak.pub, } } + +func (ak *attestationKey) IsZero() bool { return !ak.loaded() } diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index ac3914ab20896..9d4a21ee42706 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -32,6 +32,7 @@ type HardwareAttestationKey interface { json.Unmarshaler io.Closer Clone() HardwareAttestationKey + IsZero() bool } // HardwareAttestationPublicFromPlatformKey creates a HardwareAttestationPublic From 9386a101d88521325c460c4e56b092a801c07d1d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 11:54:16 -0700 Subject: [PATCH 0473/1093] cmd/tailscaled, ipn/localapi, util/eventbus: don't link in regexp when debug is omitted Saves 442 KB. Lock it with a new min test. Updates #12614 Change-Id: Ia7bf6f797b6cbf08ea65419ade2f359d390f8e91 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/debug.go | 21 ++++++++++-- cmd/tailscaled/depaware-min.txt | 20 ++++------- cmd/tailscaled/depaware-minbox.txt | 20 +++++------ cmd/tailscaled/deps_test.go | 55 +++++++++++++++++++++++------- cmd/tailscaled/tailscaled.go | 28 ++++++--------- ipn/localapi/pprof.go | 2 +- tsnet/depaware.txt | 8 ++--- util/eventbus/debug-demo/main.go | 4 +++ util/eventbus/debug.go | 4 --- util/eventbus/debughttp.go | 2 +- util/eventbus/debughttp_off.go | 14 ++------ 11 files changed, 100 insertions(+), 78 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index ebcbe54e08509..96f98d9d6fa54 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_debug package main @@ -16,6 +16,7 @@ import ( "log" "net/http" "net/http/httptrace" + "net/http/pprof" "net/url" "os" "time" @@ -39,7 +40,23 @@ var debugArgs struct { portmap bool } -var debugModeFunc = debugMode // so it can be addressable +func init() { + debugModeFunc := debugMode // to be addressable + subCommands["debug"] = &debugModeFunc + + hookNewDebugMux.Set(newDebugMux) +} + +func newDebugMux() *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/debug/metrics", servePrometheusMetrics) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + return mux +} func debugMode(args []string) error { fs := flag.NewFlagSet("debug", flag.ExitOnError) diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 0fe1538fdaabf..b779e8c1b2bd5 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -48,7 +48,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ - tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/ipn+ tailscale.com/envknob from tailscale.com/cmd/tailscaled+ @@ -58,7 +58,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister - tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient @@ -127,14 +127,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/ipn+ - tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/key from tailscale.com/control/controlbase+ tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ @@ -158,7 +157,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/eventbus from tailscale.com/control/controlclient+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth @@ -326,7 +325,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash from crypto+ hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem - html from net/http/pprof+ + html from tailscale.com/ipn/ipnlocal+ internal/abi from hash/maphash+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -347,7 +346,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/runtime/maps+ internal/reflectlite from context+ @@ -367,7 +365,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/syscall/execenv from os+ internal/syscall/unix from crypto/internal/sysrand+ internal/testlog from os - internal/trace/tracev2 from runtime+ + internal/trace/tracev2 from runtime internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -389,7 +387,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/internal from net/http net/http/internal/ascii from net/http net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from crypto/x509+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -400,12 +397,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from internal/profile+ - regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof + runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 1932e9791283d..20e1c791b5432 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -68,7 +68,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ - tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/ipn+ tailscale.com/envknob from tailscale.com/cmd/tailscaled+ @@ -79,7 +79,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ - tailscale.com/health from tailscale.com/cmd/tailscaled+ + tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli @@ -152,14 +152,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled tailscale.com/types/ipproto from tailscale.com/ipn+ - tailscale.com/types/key from tailscale.com/cmd/tailscaled+ + tailscale.com/types/key from tailscale.com/client/local+ tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ @@ -184,7 +183,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/cmd/tailscaled+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth @@ -356,7 +355,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem - html from net/http/pprof+ + html from tailscale.com/ipn/ipnlocal+ image from github.com/skip2/go-qrcode+ image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode @@ -380,7 +379,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/runtime/maps+ internal/reflectlite from context+ @@ -400,7 +398,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/syscall/execenv from os+ internal/syscall/unix from crypto/internal/sysrand+ internal/testlog from os - internal/trace/tracev2 from runtime+ + internal/trace/tracev2 from runtime internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ @@ -424,7 +422,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/cmd/tailscaled+ net/netip from crypto/x509+ net/textproto from golang.org/x/net/http/httpguts+ net/url from crypto/x509+ @@ -435,12 +432,11 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from internal/profile+ + regexp from tailscale.com/clientupdate regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof + runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 89d9db79690f3..fd5d318360ebb 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -4,9 +4,12 @@ package main import ( + "maps" + "slices" "strings" "testing" + "tailscale.com/feature/featuretags" "tailscale.com/tstest/deptest" ) @@ -90,19 +93,6 @@ func TestOmitDrive(t *testing.T) { }.Check(t) } -func TestOmitTailnetLock(t *testing.T) { - deptest.DepChecker{ - GOOS: "linux", - GOARCH: "amd64", - Tags: "ts_omit_tailnetlock,ts_include_cli", - OnDep: func(dep string) { - if strings.Contains(dep, "cbor") { - t.Errorf("unexpected dep with ts_omit_tailnetlock: %q", dep) - } - }, - }.Check(t) -} - func TestOmitPortmapper(t *testing.T) { deptest.DepChecker{ GOOS: "linux", @@ -235,3 +225,42 @@ func TestOmitUseProxy(t *testing.T) { }, }.Check(t) } + +func minTags() string { + var tags []string + for _, f := range slices.Sorted(maps.Keys(featuretags.Features)) { + if f.IsOmittable() { + tags = append(tags, f.OmitTag()) + } + } + return strings.Join(tags, ",") +} + +func TestMinTailscaledNoCLI(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags(), + OnDep: func(dep string) { + if strings.Contains(dep, "regexp") { + t.Errorf("unexpected dep: %q", dep) + } + if strings.Contains(dep, "cbor") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func TestMinTailscaledWithCLI(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags() + ",ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "cbor") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 2b0eec4826946..48eefbea741a0 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -18,7 +18,6 @@ import ( "log" "net" "net/http" - "net/http/pprof" "os" "os/signal" "path/filepath" @@ -145,7 +144,6 @@ var ( var subCommands = map[string]*func([]string) error{ "install-system-daemon": &installSystemDaemon, "uninstall-system-daemon": &uninstallSystemDaemon, - "debug": &debugModeFunc, "be-child": &beChildFunc, } @@ -194,7 +192,9 @@ func main() { printVersion := false flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") - flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") + if buildfeatures.HasDebug { + flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") + } flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) @@ -485,8 +485,8 @@ func run() (err error) { log.Printf("error in synology migration: %v", err) } - if args.debug != "" { - debugMux = newDebugMux() + if buildfeatures.HasDebug && args.debug != "" { + debugMux = hookNewDebugMux.Get()() } if f, ok := hookSetSysDrive.GetOk(); ok { @@ -550,7 +550,7 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, }() srv := ipnserver.New(logf, logID, sys.Bus.Get(), sys.NetMon.Get()) - if debugMux != nil { + if buildfeatures.HasDebug && debugMux != nil { debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus) } var lbErr syncs.AtomicValue[error] @@ -626,7 +626,7 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if onlyNetstack && !buildfeatures.HasNetstack { return nil, errors.New("userspace-networking support is not compiled in to this binary") } - if debugMux != nil { + if buildfeatures.HasDebug && debugMux != nil { if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) } @@ -820,16 +820,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return onlyNetstack, nil } -func newDebugMux() *http.ServeMux { - mux := http.NewServeMux() - mux.HandleFunc("/debug/metrics", servePrometheusMetrics) - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - return mux -} +var hookNewDebugMux feature.Hook[func() *http.ServeMux] func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") @@ -838,6 +829,9 @@ func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { } func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { + if !buildfeatures.HasDebug { + return + } ln, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("debug server: %v", err) diff --git a/ipn/localapi/pprof.go b/ipn/localapi/pprof.go index 8c9429b31385a..9476f721fb1ce 100644 --- a/ipn/localapi/pprof.go +++ b/ipn/localapi/pprof.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_debug // We don't include it on mobile where we're more memory constrained and // there's no CLI to get at the results anyway. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 858bb6d648419..037e6c264402c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -222,7 +222,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb from tailscale.com/util/eventbus + LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal tailscale.com/types/bools from tailscale.com/tsnet @@ -478,7 +478,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof + LDW internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ @@ -527,7 +527,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/ipn/localapi+ + LDW net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ @@ -542,7 +542,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof + LDW runtime/trace from net/http/pprof slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go index a6d232d882944..71894d2eab94e 100644 --- a/util/eventbus/debug-demo/main.go +++ b/util/eventbus/debug-demo/main.go @@ -14,12 +14,16 @@ import ( "net/netip" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsweb" "tailscale.com/types/key" "tailscale.com/util/eventbus" ) func main() { + if !buildfeatures.HasDebugEventBus { + log.Fatalf("debug-demo requires the \"debugeventbus\" feature enabled") + } b := eventbus.New() c := b.Client("RouteMonitor") go testPub[RouteAdded](c, 5*time.Second) diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index a055f078fc4f2..6d5463bece7b2 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -10,8 +10,6 @@ import ( "slices" "sync" "sync/atomic" - - "tailscale.com/tsweb" ) // A Debugger offers access to a bus's privileged introspection and @@ -137,8 +135,6 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { return client.subscribeTypes() } -func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { registerHTTPDebugger(d, td) } - // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index 617502b93752c..9e03676d07128 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -29,7 +29,7 @@ type httpDebugger struct { *Debugger } -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { +func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { dh := httpDebugger{d} td.Handle("bus", "Event bus", dh) td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor)) diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index ed491f1f27ef1..332525262aa29 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -5,14 +5,6 @@ package eventbus -func registerHTTPDebugger(d *Debugger, tsWebDebugHandler any) { - // The event bus debugging UI uses html/template, which uses - // reflection for method lookups. This forces the compiler to - // retain a lot more code and information to make dynamic method - // dispatch work, which is unacceptable bloat for the iOS build. - // We also disable it on Android while we're at it, as nobody - // is debugging Tailscale internals on Android. - // - // TODO: https://github.com/tailscale/tailscale/issues/15297 to - // bring the debug UI back to iOS somehow. -} +type tswebDebugHandler = any // actually *tsweb.DebugHandler; any to avoid import tsweb with ts_omit_debugeventbus + +func (*Debugger) RegisterHTTP(td tswebDebugHandler) {} From 6c6a1d834122b2fe54a3f781cff12698d70e71e8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 13:11:48 -0700 Subject: [PATCH 0474/1093] feature/appconnectors: start making it modular Saves 45 KB. Updates #12614 Change-Id: Iaeb73e69633878ce0a0f58c986024784bbe218f1 Signed-off-by: Brad Fitzpatrick --- appc/appconnector.go | 120 ---------------- appc/observe.go | 132 ++++++++++++++++++ appc/observe_disabled.go | 8 ++ cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 1 + feature/appconnectors/appconnectors.go | 39 ++++++ .../feature_appconnectors_disabled.go | 13 ++ .../feature_appconnectors_enabled.go | 13 ++ feature/condregister/maybe_appconnectors.go | 8 ++ feature/featuretags/featuretags.go | 1 + ipn/ipnlocal/c2n.go | 24 ---- ipn/ipnlocal/local.go | 32 ++++- ipn/ipnlocal/peerapi.go | 2 +- ipn/localapi/localapi.go | 23 ++- 15 files changed, 263 insertions(+), 157 deletions(-) create mode 100644 appc/observe.go create mode 100644 appc/observe_disabled.go create mode 100644 feature/appconnectors/appconnectors.go create mode 100644 feature/buildfeatures/feature_appconnectors_disabled.go create mode 100644 feature/buildfeatures/feature_appconnectors_enabled.go create mode 100644 feature/condregister/maybe_appconnectors.go diff --git a/appc/appconnector.go b/appc/appconnector.go index 89c6c9aeb9aa7..8d7dd54e8dc0e 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -18,13 +18,11 @@ import ( "sync" "time" - "golang.org/x/net/dns/dnsmessage" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" "tailscale.com/util/execqueue" - "tailscale.com/util/mak" "tailscale.com/util/slicesx" ) @@ -372,124 +370,6 @@ func (e *AppConnector) DomainRoutes() map[string][]netip.Addr { return drCopy } -// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS -// response is being returned over the PeerAPI. The response is parsed and -// matched against the configured domains, if matched the routeAdvertiser is -// advised to advertise the discovered route. -func (e *AppConnector) ObserveDNSResponse(res []byte) error { - var p dnsmessage.Parser - if _, err := p.Start(res); err != nil { - return err - } - if err := p.SkipAllQuestions(); err != nil { - return err - } - - // cnameChain tracks a chain of CNAMEs for a given query in order to reverse - // a CNAME chain back to the original query for flattening. The keys are - // CNAME record targets, and the value is the name the record answers, so - // for www.example.com CNAME example.com, the map would contain - // ["example.com"] = "www.example.com". - var cnameChain map[string]string - - // addressRecords is a list of address records found in the response. - var addressRecords map[string][]netip.Addr - - for { - h, err := p.AnswerHeader() - if err == dnsmessage.ErrSectionDone { - break - } - if err != nil { - return err - } - - if h.Class != dnsmessage.ClassINET { - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - - switch h.Type { - case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - - } - - domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") - if len(domain) == 0 { - continue - } - - if h.Type == dnsmessage.TypeCNAME { - res, err := p.CNAMEResource() - if err != nil { - return err - } - cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") - if len(cname) == 0 { - continue - } - mak.Set(&cnameChain, cname, domain) - continue - } - - switch h.Type { - case dnsmessage.TypeA: - r, err := p.AResource() - if err != nil { - return err - } - addr := netip.AddrFrom4(r.A) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - case dnsmessage.TypeAAAA: - r, err := p.AAAAResource() - if err != nil { - return err - } - addr := netip.AddrFrom16(r.AAAA) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - } - - e.mu.Lock() - defer e.mu.Unlock() - - for domain, addrs := range addressRecords { - domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) - - // domain and none of the CNAMEs in the chain are routed - if !isRouted { - continue - } - - // advertise each address we have learned for the routed domain, that - // was not already known. - var toAdvertise []netip.Prefix - for _, addr := range addrs { - if !e.isAddrKnownLocked(domain, addr) { - toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) - } - } - - if len(toAdvertise) > 0 { - e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) - e.scheduleAdvertisement(domain, toAdvertise...) - } - } - return nil -} - // starting from the given domain that resolved to an address, find it, or any // of the domains in the CNAME chain toward resolving it, that are routed // domains, returning the routed domain name and a bool indicating whether a diff --git a/appc/observe.go b/appc/observe.go new file mode 100644 index 0000000000000..06dc04f9dcfdf --- /dev/null +++ b/appc/observe.go @@ -0,0 +1,132 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package appc + +import ( + "net/netip" + "strings" + + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/util/mak" +) + +// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS +// response is being returned over the PeerAPI. The response is parsed and +// matched against the configured domains, if matched the routeAdvertiser is +// advised to advertise the discovered route. +func (e *AppConnector) ObserveDNSResponse(res []byte) error { + var p dnsmessage.Parser + if _, err := p.Start(res); err != nil { + return err + } + if err := p.SkipAllQuestions(); err != nil { + return err + } + + // cnameChain tracks a chain of CNAMEs for a given query in order to reverse + // a CNAME chain back to the original query for flattening. The keys are + // CNAME record targets, and the value is the name the record answers, so + // for www.example.com CNAME example.com, the map would contain + // ["example.com"] = "www.example.com". + var cnameChain map[string]string + + // addressRecords is a list of address records found in the response. + var addressRecords map[string][]netip.Addr + + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return err + } + + if h.Class != dnsmessage.ClassINET { + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + + switch h.Type { + case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + + } + + domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") + if len(domain) == 0 { + continue + } + + if h.Type == dnsmessage.TypeCNAME { + res, err := p.CNAMEResource() + if err != nil { + return err + } + cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") + if len(cname) == 0 { + continue + } + mak.Set(&cnameChain, cname, domain) + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + return err + } + addr := netip.AddrFrom4(r.A) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + return err + } + addr := netip.AddrFrom16(r.AAAA) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + } + + e.mu.Lock() + defer e.mu.Unlock() + + for domain, addrs := range addressRecords { + domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) + + // domain and none of the CNAMEs in the chain are routed + if !isRouted { + continue + } + + // advertise each address we have learned for the routed domain, that + // was not already known. + var toAdvertise []netip.Prefix + for _, addr := range addrs { + if !e.isAddrKnownLocked(domain, addr) { + toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) + } + } + + if len(toAdvertise) > 0 { + e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) + e.scheduleAdvertisement(domain, toAdvertise...) + } + } + return nil +} diff --git a/appc/observe_disabled.go b/appc/observe_disabled.go new file mode 100644 index 0000000000000..45aa285eaa758 --- /dev/null +++ b/appc/observe_disabled.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_appconnectors + +package appc + +func (e *AppConnector) ObserveDNSResponse(res []byte) error { return nil } diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index b779e8c1b2bd5..f37dde001600b 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -164,7 +164,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 20e1c791b5432..7e12a9c36e129 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -190,7 +190,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - tailscale.com/util/mak from tailscale.com/appc+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index b1bb83d92d9b0..9dde241caecb3 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -271,6 +271,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/appconnectors from tailscale.com/feature/condregister tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/clientupdate from tailscale.com/feature/condregister diff --git a/feature/appconnectors/appconnectors.go b/feature/appconnectors/appconnectors.go new file mode 100644 index 0000000000000..28f5ccde35acb --- /dev/null +++ b/feature/appconnectors/appconnectors.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package appconnectors registers support for Tailscale App Connectors. +package appconnectors + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" +) + +func init() { + ipnlocal.RegisterC2N("GET /appconnector/routes", handleC2NAppConnectorDomainRoutesGet) +} + +// handleC2NAppConnectorDomainRoutesGet handles returning the domains +// that the app connector is responsible for, as well as the resolved +// IP addresses for each domain. If the node is not configured as +// an app connector, an empty map is returned. +func handleC2NAppConnectorDomainRoutesGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + logf := b.Logger() + logf("c2n: GET /appconnector/routes received") + + var res tailcfg.C2NAppConnectorDomainRoutesResponse + appConnector := b.AppConnector() + if appConnector == nil { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + return + } + + res.Domains = appConnector.DomainRoutes() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} diff --git a/feature/buildfeatures/feature_appconnectors_disabled.go b/feature/buildfeatures/feature_appconnectors_disabled.go new file mode 100644 index 0000000000000..64ea8f86b4104 --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = false diff --git a/feature/buildfeatures/feature_appconnectors_enabled.go b/feature/buildfeatures/feature_appconnectors_enabled.go new file mode 100644 index 0000000000000..e00eaffa3e6fc --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = true diff --git a/feature/condregister/maybe_appconnectors.go b/feature/condregister/maybe_appconnectors.go new file mode 100644 index 0000000000000..70112d7810b10 --- /dev/null +++ b/feature/condregister/maybe_appconnectors.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package condregister + +import _ "tailscale.com/feature/appconnectors" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 7cfc79f655618..daf4c71ebfa9e 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -88,6 +88,7 @@ type FeatureMeta struct { // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ "acme": {"ACME", "ACME TLS certificate management", nil}, + "appconnectors": {"AppConnectors", "App Connectors support", nil}, "aws": {"AWS", "AWS integration", nil}, "bird": {"Bird", "Bird BGP integration", nil}, "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index f064628fcdbab..ae9e671263d6c 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -51,9 +51,6 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // SSH req("/ssh/usernames"): handleC2NSSHUsernames, - // App Connectors. - req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet, - // Linux netfilter. req("POST /netfilter-kind"): handleC2NSetNetfilterKind, } @@ -294,27 +291,6 @@ func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo()) } -// handleC2NAppConnectorDomainRoutesGet handles returning the domains -// that the app connector is responsible for, as well as the resolved -// IP addresses for each domain. If the node is not configured as -// an app connector, an empty map is returned. -func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /appconnector/routes received") - - var res tailcfg.C2NAppConnectorDomainRoutesResponse - appConnector := b.AppConnector() - if appConnector == nil { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - return - } - - res.Domains = appConnector.DomainRoutes() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: POST /netfilter-kind received") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c3d7d3fb869eb..5897614d0cced 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -398,9 +398,10 @@ type LocalBackend struct { } // HealthTracker returns the health tracker for the backend. -func (b *LocalBackend) HealthTracker() *health.Tracker { - return b.health -} +func (b *LocalBackend) HealthTracker() *health.Tracker { return b.health } + +// Logger returns the logger for the backend. +func (b *LocalBackend) Logger() logger.Logf { return b.logf } // UserMetricsRegistry returns the usermetrics registry for the backend func (b *LocalBackend) UserMetricsRegistry() *usermetric.Registry { @@ -4154,6 +4155,9 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P // MaybeClearAppConnector clears the routes from any AppConnector if // AdvertiseRoutes has been set in the MaskedPrefs. func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { + if !buildfeatures.HasAppConnectors { + return nil + } var err error if ac := b.AppConnector(); ac != nil && mp.AdvertiseRoutesSet { err = ac.ClearRoutes() @@ -4770,6 +4774,9 @@ func (b *LocalBackend) blockEngineUpdates(block bool) { // current network map and preferences. // b.mu must be held. func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs ipn.PrefsView) { + if !buildfeatures.HasAppConnectors { + return + } const appConnectorCapName = "tailscale.com/app-connectors" defer func() { if b.hostinfo != nil { @@ -4943,7 +4950,9 @@ func (b *LocalBackend) authReconfig() { b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) b.initPeerAPIListener() - b.readvertiseAppConnectorRoutes() + if buildfeatures.HasAppConnectors { + b.readvertiseAppConnectorRoutes() + } } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -6363,6 +6372,9 @@ func (b *LocalBackend) OfferingExitNode() bool { // OfferingAppConnector reports whether b is currently offering app // connector services. func (b *LocalBackend) OfferingAppConnector() bool { + if !buildfeatures.HasAppConnectors { + return false + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector != nil @@ -6372,6 +6384,9 @@ func (b *LocalBackend) OfferingAppConnector() bool { // // TODO(nickkhyl): move app connectors to [nodeBackend], or perhaps a feature package? func (b *LocalBackend) AppConnector() *appc.AppConnector { + if !buildfeatures.HasAppConnectors { + return nil + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector @@ -6917,6 +6932,9 @@ func (b *LocalBackend) DebugBreakDERPConns() error { // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. func (b *LocalBackend) ObserveDNSResponse(res []byte) error { + if !buildfeatures.HasAppConnectors { + return nil + } var appConnector *appc.AppConnector b.mu.Lock() if b.appConnector == nil { @@ -7020,6 +7038,9 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { + if !buildfeatures.HasAppConnectors { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() if b.pm.CurrentProfile().ID() == "" { @@ -7034,6 +7055,9 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { } func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { + if !buildfeatures.HasAppConnectors { + return nil, feature.ErrUnavailable + } if b.pm.CurrentProfile().ID() == "" { return &appc.RouteInfo{}, nil } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index bd542e0f08b31..4f99525f9e498 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -745,7 +745,7 @@ func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) // TODO(raggi): consider pushing the integration down into the resolver // instead to avoid re-parsing the DNS response for improved performance in // the future. - if h.ps.b.OfferingAppConnector() { + if buildfeatures.HasAppConnectors && h.ps.b.OfferingAppConnector() { if err := h.ps.b.ObserveDNSResponse(res); err != nil { h.logf("ObserveDNSResponse error: %v", err) // This is not fatal, we probably just failed to parse the upstream diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 3948b4293086a..b07df8b02f4db 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -72,7 +72,6 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "appc-route-info": (*Handler).serveGetAppcRouteInfo, "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, @@ -113,6 +112,12 @@ var handler = map[string]LocalAPIHandler{ "whois": (*Handler).serveWhoIs, } +func init() { + if buildfeatures.HasAppConnectors { + Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) + } +} + // Register registers a new LocalAPI handler for the given name. func Register(name string, fn LocalAPIHandler) { if _, ok := handler[name]; ok { @@ -934,11 +939,13 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - if err := h.b.MaybeClearAppConnector(mp); err != nil { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusInternalServerError) - json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) - return + if buildfeatures.HasAppConnectors { + if err := h.b.MaybeClearAppConnector(mp); err != nil { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) + return + } } var err error prefs, err = h.b.EditPrefsAs(mp, h.Actor) @@ -1666,6 +1673,10 @@ func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasAppConnectors { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return From f7afb9b6cadd6f8fbfe8243b20fd11e4f4e49c32 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 12:46:43 -0700 Subject: [PATCH 0475/1093] feature/featuretags, ipn/conffile: make HuJSON support in config files optional Saves 33 KB. Updates #12614 Change-Id: Ie701c230e0765281f409f29ed263910b9be9cc77 Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/deps_test.go | 10 ++++++++-- .../feature_hujsonconf_disabled.go | 13 +++++++++++++ .../feature_hujsonconf_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 1 + ipn/conffile/cloudconf.go | 5 +++++ ipn/conffile/conffile.go | 19 +++++++++++-------- ipn/conffile/conffile_hujson.go | 2 +- 9 files changed, 52 insertions(+), 13 deletions(-) create mode 100644 feature/buildfeatures/feature_hujsonconf_disabled.go create mode 100644 feature/buildfeatures/feature_hujsonconf_enabled.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index f37dde001600b..bada798db04de 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -24,7 +24,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 7e12a9c36e129..ef0d2a8ee52b8 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -36,7 +36,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode - github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index fd5d318360ebb..a9f125e19169b 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -253,13 +253,19 @@ func TestMinTailscaledNoCLI(t *testing.T) { } func TestMinTailscaledWithCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "hujson", + } deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", Tags: minTags() + ",ts_include_cli", OnDep: func(dep string) { - if strings.Contains(dep, "cbor") { - t.Errorf("unexpected dep: %q", dep) + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } } }, }.Check(t) diff --git a/feature/buildfeatures/feature_hujsonconf_disabled.go b/feature/buildfeatures/feature_hujsonconf_disabled.go new file mode 100644 index 0000000000000..cee076bc24527 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = false diff --git a/feature/buildfeatures/feature_hujsonconf_enabled.go b/feature/buildfeatures/feature_hujsonconf_enabled.go new file mode 100644 index 0000000000000..aefeeace5f0b9 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index daf4c71ebfa9e..347ccdec063b3 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -113,6 +113,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, + "hujsonconf": {"HuJSONConf", "HuJSON config file support", nil}, "iptables": {"IPTables", "Linux iptables support", nil}, "kube": {"Kube", "Kubernetes integration", nil}, "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, diff --git a/ipn/conffile/cloudconf.go b/ipn/conffile/cloudconf.go index 650611cf161fc..4475a2d7b799e 100644 --- a/ipn/conffile/cloudconf.go +++ b/ipn/conffile/cloudconf.go @@ -10,6 +10,8 @@ import ( "net/http" "strings" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/omit" ) @@ -35,6 +37,9 @@ func getEC2MetadataToken() (string, error) { } func readVMUserData() ([]byte, error) { + if !buildfeatures.HasAWS { + return nil, feature.ErrUnavailable + } // TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init // (NoCloud/ConfigDrive ISO), etc. diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index a2bafb8b7fd22..3a2aeffb3a0c6 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -8,11 +8,11 @@ package conffile import ( "bytes" "encoding/json" - "errors" "fmt" "os" "runtime" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" ) @@ -51,10 +51,6 @@ func Load(path string) (*Config, error) { // compile-time for deadcode elimination return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS) } - if hujsonStandardize == nil { - // Build tags are wrong in conffile_hujson.go - return nil, errors.New("[unexpected] config file loading not wired up") - } var c Config c.Path = path var err error @@ -68,14 +64,21 @@ func Load(path string) (*Config, error) { if err != nil { return nil, err } - c.Std, err = hujsonStandardize(c.Raw) - if err != nil { - return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + if buildfeatures.HasHuJSONConf && hujsonStandardize != nil { + c.Std, err = hujsonStandardize(c.Raw) + if err != nil { + return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + } + } else { + c.Std = c.Raw // config file must be valid JSON with ts_omit_hujsonconf } var ver struct { Version string `json:"version"` } if err := json.Unmarshal(c.Std, &ver); err != nil { + if !buildfeatures.HasHuJSONConf { + return nil, fmt.Errorf("error parsing config file %s, which must be valid standard JSON: %w", path, err) + } return nil, fmt.Errorf("error parsing config file %s: %w", path, err) } switch ver.Version { diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go index 6825a06386625..1e967f1bdcca2 100644 --- a/ipn/conffile/conffile_hujson.go +++ b/ipn/conffile/conffile_hujson.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_hujsonconf package conffile From 5b09913d640c2ab31c2c9c82d32b04a2c83ff2f7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 13:31:38 -0700 Subject: [PATCH 0476/1093] ipn/ipnlocal, engine: avoid runtime/pprof with two usages of ts_omit_debug Saves 258 KB. Updates #12614 Change-Id: I37c2f7f916480e3534883f338de4c64d08f7ef2b Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-min.txt | 4 +--- cmd/tailscaled/depaware-minbox.txt | 5 ++--- cmd/tailscaled/deps_test.go | 1 + ipn/ipnlocal/c2n_pprof.go | 2 +- wgengine/watchdog.go | 2 +- wgengine/watchdog_js.go | 17 ----------------- wgengine/watchdog_omit.go | 8 ++++++++ 7 files changed, 14 insertions(+), 25 deletions(-) delete mode 100644 wgengine/watchdog_js.go create mode 100644 wgengine/watchdog_omit.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index bada798db04de..3a6d0e7fd8da1 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -345,7 +345,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profilerecord from runtime+ + internal/profilerecord from runtime internal/race from internal/runtime/maps+ internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ @@ -398,7 +398,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de reflect from crypto/x509+ runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ @@ -406,7 +405,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ - text/tabwriter from runtime/pprof time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index ef0d2a8ee52b8..0dd36447f6903 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -378,7 +378,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profilerecord from runtime+ + internal/profilerecord from runtime internal/race from internal/runtime/maps+ internal/reflectlite from context+ internal/runtime/atomic from internal/runtime/exithook+ @@ -435,7 +435,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ - runtime/pprof from tailscale.com/ipn/ipnlocal+ slices from crypto/tls+ sort from compress/flate+ strconv from compress/flate+ @@ -443,7 +442,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de sync from compress/flate+ sync/atomic from context+ syscall from crypto/internal/sysrand+ - text/tabwriter from runtime/pprof+ + text/tabwriter from github.com/peterbourgon/ff/v3/ffcli+ time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a9f125e19169b..521eb3ceda569 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -256,6 +256,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { badSubstrs := []string{ "cbor", "hujson", + "pprof", } deptest.DepChecker{ GOOS: "linux", diff --git a/ipn/ipnlocal/c2n_pprof.go b/ipn/ipnlocal/c2n_pprof.go index b4bc35790973a..13237cc4fad2f 100644 --- a/ipn/ipnlocal/c2n_pprof.go +++ b/ipn/ipnlocal/c2n_pprof.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js && !wasm +//go:build !js && !wasm && !ts_omit_debug package ipnlocal diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 13bc48fb09d3e..0500e6f7fd4c7 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_debug package wgengine diff --git a/wgengine/watchdog_js.go b/wgengine/watchdog_js.go deleted file mode 100644 index 872ce36d5fd5d..0000000000000 --- a/wgengine/watchdog_js.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build js - -package wgengine - -import "tailscale.com/net/dns/resolver" - -type watchdogEngine struct { - Engine - wrap Engine -} - -func (e *watchdogEngine) GetResolver() (r *resolver.Resolver, ok bool) { - return nil, false -} diff --git a/wgengine/watchdog_omit.go b/wgengine/watchdog_omit.go new file mode 100644 index 0000000000000..1d175b41a87eb --- /dev/null +++ b/wgengine/watchdog_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || ts_omit_debug + +package wgengine + +func NewWatchdog(e Engine) Engine { return e } From 9781b7c25cbaae314f3ca95741d20c6125a89531 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 30 Sep 2025 20:45:12 -0500 Subject: [PATCH 0477/1093] ipn/ipnlocal: plumb logf into nodeBackend Updates #cleanup Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 8 ++++---- ipn/ipnlocal/local_test.go | 4 ++-- ipn/ipnlocal/node_backend.go | 9 ++++++--- ipn/ipnlocal/node_backend_test.go | 11 ++++++----- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5897614d0cced..c091e0c61e177 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -501,7 +501,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo needsCaptiveDetection: make(chan bool), } - nb := newNodeBackend(ctx, b.sys.Bus.Get()) + nb := newNodeBackend(ctx, b.logf, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -629,7 +629,7 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.sys.Bus.Get()) + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.logf, b.sys.Bus.Get()) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } @@ -4890,7 +4890,7 @@ func (b *LocalBackend) authReconfig() { hasPAC := b.prevIfState.HasPAC() disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) - dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) closing := b.shutdownCalled @@ -6797,7 +6797,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } - newNode := newNodeBackend(b.ctx, b.sys.Bus.Get()) + newNode := newNodeBackend(b.ctx, b.logf, b.sys.Bus.Get()) if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil { oldNode.shutdown(errNodeContextChanged) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 70923efde13ee..a984d66bff035 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4904,7 +4904,7 @@ func TestSuggestExitNode(t *testing.T) { allowList = set.SetOf(tt.allowPolicy) } - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) @@ -5357,7 +5357,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { tt.netMap.AllCaps = set.SetOf(slices.Collect(caps)) } - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index b1ce9e07c404e..95bf350ceeca0 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -65,6 +65,8 @@ import ( // Even if they're tied to the local node, instead of moving them here, we should extract the entire feature // into a separate package and have it install proper hooks. type nodeBackend struct { + logf logger.Logf + ctx context.Context // canceled by [nodeBackend.shutdown] ctxCancel context.CancelCauseFunc // cancels ctx @@ -104,9 +106,10 @@ type nodeBackend struct { nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { +func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *nodeBackend { ctx, ctxCancel := context.WithCancelCause(ctx) nb := &nodeBackend{ + logf: logf, ctx: ctx, ctxCancel: ctxCancel, eventClient: bus.Client("ipnlocal.nodeBackend"), @@ -520,10 +523,10 @@ func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f}) } -func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { +func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, versionOS string) *dns.Config { nb.mu.Lock() defer nb.mu.Unlock() - return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) + return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, nb.logf, versionOS) } func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index dc67d327c8041..b305837fd46c2 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -9,11 +9,12 @@ import ( "testing" "time" + "tailscale.com/tstest" "tailscale.com/util/eventbus" ) func TestNodeBackendReadiness(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // The node backend is not ready until [nodeBackend.ready] is called, // and [nodeBackend.Wait] should fail with [context.DeadlineExceeded]. @@ -44,7 +45,7 @@ func TestNodeBackendReadiness(t *testing.T) { } func TestNodeBackendShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") @@ -82,7 +83,7 @@ func TestNodeBackendShutdown(t *testing.T) { } func TestNodeBackendReadyAfterShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") nb.shutdown(shutdownCause) @@ -94,7 +95,7 @@ func TestNodeBackendReadyAfterShutdown(t *testing.T) { func TestNodeBackendParentContextCancellation(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) - nb := newNodeBackend(ctx, eventbus.New()) + nb := newNodeBackend(ctx, tstest.WhileTestRunningLogger(t), eventbus.New()) cancelCtx() @@ -111,7 +112,7 @@ func TestNodeBackendParentContextCancellation(t *testing.T) { } func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently // should not cause issues, and [nodeBackend.Wait] should unblock, From af1114e896fd16378dbf8f0584b0d55ebd46930b Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 1 Oct 2025 12:24:21 +0100 Subject: [PATCH 0478/1093] cmd/k8s-proxy: importing feature/condregister on cmd/k8s-proxy (#17383) https://github.com/tailscale/tailscale/pull/17346 moved the kube and aws arn store initializations to feature/condregister, under the assumption that anything using it would use kubestore.New. Unfortunately, cmd/k8s-proxy makes use of store.New, which compares the `:` supplied in the provided `path string` argument against known stores. If it doesn't find it, it fallsback to using a FileStore. Since cmd/k8s-proxy uses store.New to try and initialize a kube store in some cases (without importing feature/condregister), it silently creates a FileStore and that leads to misleading errors further along in execution. This fixes this issue by importing condregister, and successfully initializes a kube store. Updates #12614 Signed-off-by: chaosinthecrd --- cmd/k8s-proxy/k8s-proxy.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 7a77072140568..57a2632e2080c 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -31,6 +31,7 @@ import ( "k8s.io/utils/strings/slices" "tailscale.com/client/local" "tailscale.com/cmd/k8s-proxy/internal/config" + _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" From ebc370e517a4221a092c1c2a33cc7b749c651aa0 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Wed, 1 Oct 2025 14:44:15 +0100 Subject: [PATCH 0479/1093] ipn/ipnlocal: fail test if more notifies are put than expected The `put` callback runs on a different goroutine to the test, so calling t.Fatalf in put had no effect. `drain` is always called when checking what was put and is called from the test goroutine, so that's a good place to fail the test if the channel was too full. Updates #17363 Signed-off-by: James Sanderson --- ipn/ipnlocal/state_test.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 9c0aa66a94282..347aaf8b83c94 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -59,8 +59,9 @@ type notifyThrottler struct { // ch gets replaced frequently. Lock the mutex before getting or // setting it, but not while waiting on it. - mu sync.Mutex - ch chan ipn.Notify + mu sync.Mutex + ch chan ipn.Notify + putErr error // set by put if the channel is full } // expect tells the throttler to expect count upcoming notifications. @@ -81,7 +82,11 @@ func (nt *notifyThrottler) put(n ipn.Notify) { case ch <- n: return default: - nt.t.Fatalf("put: channel full: %v", n) + err := fmt.Errorf("put: channel full: %v", n) + nt.t.Log(err) + nt.mu.Lock() + nt.putErr = err + nt.mu.Unlock() } } @@ -91,8 +96,13 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { nt.t.Helper() nt.mu.Lock() ch := nt.ch + putErr := nt.putErr nt.mu.Unlock() + if putErr != nil { + nt.t.Fatalf("drain: previous call to put errored: %s", putErr) + } + nn := []ipn.Notify{} for i := range count { select { From 91fa51ca153e39e0bfaf2cb580a2071065230b97 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 07:30:39 -0700 Subject: [PATCH 0480/1093] ipn/store, feature/condregister: permit callers to empty import optonal ipn stores This permits other programs (in other repos) to conditionally import ipn/store/awsstore and/or ipn/store/kubestore and have them register themselves, rather than feature/condregister doing it. Updates tailscale/corp#32922 Change-Id: I2936229ce37fd2acf9be5bf5254d4a262d090ec1 Signed-off-by: Brad Fitzpatrick --- feature/condregister/maybe_store_aws.go | 17 +---------------- feature/condregister/maybe_store_kube.go | 16 +--------------- ipn/store/awsstore/store_aws.go | 13 ++++++++++++- ipn/store/awsstore/store_aws_test.go | 2 +- ipn/store/kubestore/store_kube.go | 8 ++++++++ 5 files changed, 23 insertions(+), 33 deletions(-) diff --git a/feature/condregister/maybe_store_aws.go b/feature/condregister/maybe_store_aws.go index 48ef06ecf1234..8358b49f05843 100644 --- a/feature/condregister/maybe_store_aws.go +++ b/feature/condregister/maybe_store_aws.go @@ -5,19 +5,4 @@ package condregister -import ( - "tailscale.com/ipn" - "tailscale.com/ipn/store" - "tailscale.com/ipn/store/awsstore" - "tailscale.com/types/logger" -) - -func init() { - store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { - ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) - if err != nil { - return nil, err - } - return awsstore.New(logf, ssmARN, opts...) - }) -} +import _ "tailscale.com/ipn/store/awsstore" diff --git a/feature/condregister/maybe_store_kube.go b/feature/condregister/maybe_store_kube.go index 0aa2c1692ff6b..bb795b05e2450 100644 --- a/feature/condregister/maybe_store_kube.go +++ b/feature/condregister/maybe_store_kube.go @@ -5,18 +5,4 @@ package condregister -import ( - "strings" - - "tailscale.com/ipn" - "tailscale.com/ipn/store" - "tailscale.com/ipn/store/kubestore" - "tailscale.com/types/logger" -) - -func init() { - store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { - secretName := strings.TrimPrefix(path, "kube:") - return kubestore.New(logf, secretName) - }) -} +import _ "tailscale.com/ipn/store/kubestore" diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 40bbbf0370822..78b72d0bc8f45 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws // Package awsstore contains an ipn.StateStore implementation using AWS SSM. package awsstore @@ -20,10 +20,21 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssm" ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/types/logger" ) +func init() { + store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + ssmARN, opts, err := ParseARNAndOpts(arg) + if err != nil { + return nil, err + } + return New(logf, ssmARN, opts...) + }) +} + const ( parameterNameRxStr = `^parameter(/.*)` ) diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index 3382635a7d333..3cc23e48d4b12 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws package awsstore diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 5b25471c75638..f48237c057142 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -16,6 +16,7 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" @@ -25,6 +26,13 @@ import ( "tailscale.com/util/mak" ) +func init() { + store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { + secretName := strings.TrimPrefix(path, "kube:") + return New(logf, secretName) + }) +} + const ( // timeout is the timeout for a single state update that includes calls to the API server to write or read a // state Secret and emit an Event. From c2f37c891c6c6c37c1320ad7edf77f94292c4fb5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 19:47:50 -0700 Subject: [PATCH 0481/1093] all: use Go 1.20's errors.Join instead of our multierr package Updates #7123 Change-Id: Ie9be6814831f661ad5636afcd51d063a0d7a907d Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 - cmd/k8s-operator/depaware.txt | 1 - cmd/tailscale/depaware.txt | 1 - cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 1 + cmd/tailscaled/tailscaled.go | 3 +-- cmd/tsidp/depaware.txt | 1 - control/controlclient/direct.go | 3 +-- control/controlclient/noise.go | 7 +++---- derp/xdp/xdp_linux.go | 3 +-- feature/tap/tap_linux.go | 4 ++-- health/health.go | 7 +++---- ipn/ipnlocal/local.go | 5 ++--- k8s-operator/sessionrecording/hijacker.go | 7 +++---- k8s-operator/sessionrecording/ws/conn.go | 3 +-- kube/kubeclient/client.go | 4 ++-- net/netcheck/standalone.go | 3 +-- net/ping/ping.go | 8 ++++---- prober/tls.go | 5 ++--- release/dist/dist.go | 3 +-- sessionrecording/connect.go | 3 +-- tsnet/depaware.txt | 1 - tstest/integration/tailscaled_deps_test_darwin.go | 1 - tstest/integration/tailscaled_deps_test_freebsd.go | 1 - tstest/integration/tailscaled_deps_test_linux.go | 1 - tstest/integration/tailscaled_deps_test_openbsd.go | 1 - tstest/integration/tailscaled_deps_test_windows.go | 1 - util/linuxfw/iptables.go | 5 ++--- util/winutil/restartmgr_windows.go | 3 +-- wgengine/netlog/netlog.go | 4 ++-- wgengine/router/osrouter/ifconfig_windows.go | 3 +-- wgengine/router/osrouter/router_linux.go | 3 +-- wgengine/wgcfg/device.go | 6 +++--- 35 files changed, 40 insertions(+), 67 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 8c122105f114e..7f0252148a0e2 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -151,7 +151,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/health+ - tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/derp/derpserver+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index f8ae3d2616059..e225cebf92bd0 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -840,7 +840,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 9fb7b63ed172d..cfa073a71d477 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -177,7 +177,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 3a6d0e7fd8da1..22f360ac520eb 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -164,7 +164,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0dd36447f6903..4b80f4a5637a4 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -190,7 +190,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 9dde241caecb3..5e92438e7933a 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -427,7 +427,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/lineiter from tailscale.com/hostinfo+ L tailscale.com/util/linuxfw from tailscale.com/wgengine/router/osrouter tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/multierr from tailscale.com/feature/taildrop tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 521eb3ceda569..c364a93069e11 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -257,6 +257,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { "cbor", "hujson", "pprof", + "multierr", // https://github.com/tailscale/tailscale/pull/17379 } deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 48eefbea741a0..8de473b7c718b 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -56,7 +56,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -701,7 +700,7 @@ func createEngine(logf logger.Logf, sys *tsd.System) (onlyNetstack bool, err err logf("wgengine.NewUserspaceEngine(tun %q) error: %v", name, err) errs = append(errs, err) } - return false, multierr.New(errs...) + return false, errors.Join(errs...) } // handleSubnetsInNetstack reports whether netstack should handle subnet routers diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 033ff6570ea78..9ced6f966ccb6 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -267,7 +267,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/cmd/tsidp+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 54f2de1c93318..199e1479bcdb5 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -54,7 +54,6 @@ import ( "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" - "tailscale.com/util/multierr" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -1307,7 +1306,7 @@ func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string out = tailcfg.OverTLSPublicKeyResponse{} k, err := key.ParseMachinePublicUntyped(mem.B(b)) if err != nil { - return nil, multierr.New(jsonErr, err) + return nil, errors.Join(jsonErr, err) } out.LegacyPublicKey = k return &out, nil diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index 4bd8cfc25ee96..a0f344664ab80 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -28,7 +28,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/singleflight" ) @@ -295,13 +294,13 @@ func (nc *NoiseClient) Close() error { nc.connPool = nil nc.mu.Unlock() - var errors []error + var errs []error for _, c := range conns { if err := c.Close(); err != nil { - errors = append(errors, err) + errs = append(errs, err) } } - return multierr.New(errors...) + return errors.Join(errs...) } // dial opens a new connection to tailcontrol, fetching the server noise key diff --git a/derp/xdp/xdp_linux.go b/derp/xdp/xdp_linux.go index 3ebe0a0520efc..309d9ee9a92b4 100644 --- a/derp/xdp/xdp_linux.go +++ b/derp/xdp/xdp_linux.go @@ -14,7 +14,6 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/link" "github.com/prometheus/client_golang/prometheus" - "tailscale.com/util/multierr" ) //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -type config -type counters_key -type counter_key_af -type counter_key_packets_bytes_action -type counter_key_prog_end bpf xdp.c -- -I headers @@ -110,7 +109,7 @@ func (s *STUNServer) Close() error { errs = append(errs, s.link.Close()) } errs = append(errs, s.objs.Close()) - return multierr.New(errs...) + return errors.Join(errs...) } type stunServerMetrics struct { diff --git a/feature/tap/tap_linux.go b/feature/tap/tap_linux.go index 58ac00593d3a8..53dcabc364d6b 100644 --- a/feature/tap/tap_linux.go +++ b/feature/tap/tap_linux.go @@ -6,6 +6,7 @@ package tap import ( "bytes" + "errors" "fmt" "net" "net/netip" @@ -29,7 +30,6 @@ import ( "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // TODO: this was randomly generated once. Maybe do it per process start? But @@ -482,7 +482,7 @@ func (t *tapDevice) Write(buffs [][]byte, offset int) (int, error) { wrote++ } } - return wrote, multierr.New(errs...) + return wrote, errors.Join(errs...) } func (t *tapDevice) MTU() (int, error) { diff --git a/health/health.go b/health/health.go index d60762e3159c3..c41256614c5e4 100644 --- a/health/health.go +++ b/health/health.go @@ -27,7 +27,6 @@ import ( "tailscale.com/util/cibuild" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -992,8 +991,8 @@ func (t *Tracker) selfCheckLocked() { // OverallError returns a summary of the health state. // -// If there are multiple problems, the error will be of type -// multierr.Error. +// If there are multiple problems, the error will be joined using +// [errors.Join]. func (t *Tracker) OverallError() error { if t.nil() { return nil @@ -1071,7 +1070,7 @@ func (t *Tracker) errorsLocked() []error { // This function is here for legacy compatibility purposes and is deprecated. func (t *Tracker) multiErrLocked() error { errs := t.errorsLocked() - return multierr.New(errs...) + return errors.Join(errs...) } var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c091e0c61e177..f214c5def1039 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -90,7 +90,6 @@ import ( "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/osuser" "tailscale.com/util/rands" "tailscale.com/util/set" @@ -3981,7 +3980,7 @@ func (b *LocalBackend) checkPrefsLocked(p *ipn.Prefs) error { if err := b.checkAutoUpdatePrefsLocked(p); err != nil { errs = append(errs, err) } - return multierr.New(errs...) + return errors.Join(errs...) } func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { @@ -4225,7 +4224,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn } } - return multierr.New(errs...) + return errors.Join(errs...) } // changeDisablesExitNodeLocked reports whether applying the change diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 789a9fdb9f6a3..ebd77641b9136 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -11,6 +11,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "net" @@ -19,7 +20,6 @@ import ( "net/netip" "strings" - "github.com/pkg/errors" "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" "tailscale.com/k8s-operator/sessionrecording/spdy" @@ -31,7 +31,6 @@ import ( "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" ) const ( @@ -166,7 +165,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } msg = msg + "; failure mode is 'fail closed'; closing connection." if err := closeConnWithWarning(conn, msg); err != nil { - return nil, multierr.New(errors.New(msg), err) + return nil, errors.Join(errors.New(msg), err) } return nil, errors.New(msg) } else { @@ -245,7 +244,7 @@ func closeConnWithWarning(conn net.Conn, msg string) error { b := io.NopCloser(bytes.NewBuffer([]byte(msg))) resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b} if err := resp.Write(conn); err != nil { - return multierr.New(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) + return errors.Join(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) } return conn.Close() } diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index a34379658caa2..a618f85fb7822 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/util/remotecommand" "tailscale.com/k8s-operator/sessionrecording/tsrecorder" "tailscale.com/sessionrecording" - "tailscale.com/util/multierr" ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. @@ -316,7 +315,7 @@ func (c *conn) Close() error { c.closed = true connCloseErr := c.Conn.Close() recCloseErr := c.rec.Close() - return multierr.New(connCloseErr, recCloseErr) + return errors.Join(connCloseErr, recCloseErr) } // writeBufHasIncompleteFragment returns true if the latest data message diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index 332b21106ecfb..0ed960f4ddcd4 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -15,6 +15,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "log" @@ -29,7 +30,6 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) const ( @@ -397,7 +397,7 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) } } if len(errs) > 0 { - return false, false, multierr.New(errs...) + return false, false, errors.Join(errs...) } canPatch, err = c.checkPermission(ctx, "patch", TypeSecrets, secretName) if err != nil { diff --git a/net/netcheck/standalone.go b/net/netcheck/standalone.go index c72d7005f7c7e..b4523a832d463 100644 --- a/net/netcheck/standalone.go +++ b/net/netcheck/standalone.go @@ -13,7 +13,6 @@ import ( "tailscale.com/net/stun" "tailscale.com/types/logger" "tailscale.com/types/nettype" - "tailscale.com/util/multierr" ) // Standalone creates the necessary UDP sockets on the given bindAddr and starts @@ -62,7 +61,7 @@ func (c *Client) Standalone(ctx context.Context, bindAddr string) error { // If both v4 and v6 failed, report an error, otherwise let one succeed. if len(errs) == 2 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/net/ping/ping.go b/net/ping/ping.go index 01f3dcf2c4976..1ff3862dc65a1 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -10,6 +10,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "log" @@ -24,7 +25,6 @@ import ( "golang.org/x/net/ipv6" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/multierr" ) const ( @@ -157,17 +157,17 @@ func (p *Pinger) Close() error { p.conns = nil p.mu.Unlock() - var errors []error + var errs []error for _, c := range conns { if err := c.Close(); err != nil { - errors = append(errors, err) + errs = append(errs, err) } } p.wg.Wait() p.cleanupOutstanding() - return multierr.New(errors...) + return errors.Join(errs...) } func (p *Pinger) run(ctx context.Context, conn net.PacketConn, typ string) { diff --git a/prober/tls.go b/prober/tls.go index 777b2b5089d8f..3ce5354357d71 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -7,14 +7,13 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "io" "net/http" "net/netip" "slices" "time" - - "tailscale.com/util/multierr" ) const expiresSoon = 7 * 24 * time.Hour // 7 days from now @@ -69,7 +68,7 @@ func probeTLS(ctx context.Context, config *tls.Config, dialHostPort string) erro func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr error) { var errs []error defer func() { - returnerr = multierr.New(errs...) + returnerr = errors.Join(errs...) }() latestAllowedExpiration := time.Now().Add(expiresSoon) diff --git a/release/dist/dist.go b/release/dist/dist.go index 802d9041bab23..6fb0102993cbd 100644 --- a/release/dist/dist.go +++ b/release/dist/dist.go @@ -20,7 +20,6 @@ import ( "sync" "time" - "tailscale.com/util/multierr" "tailscale.com/version/mkversion" ) @@ -176,7 +175,7 @@ func (b *Build) Build(targets []Target) (files []string, err error) { } sort.Strings(files) - return files, multierr.New(errs...) + return files, errors.Join(errs...) } // Once runs fn if Once hasn't been called with name before. diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index ccb7e5fd95e4d..a470969d8c68b 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -21,7 +21,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" - "tailscale.com/util/multierr" ) const ( @@ -91,7 +90,7 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial netx.Dia } return pw, attempts, errChan, nil } - return nil, attempts, nil, multierr.New(errs...) + return nil, attempts, nil, errors.Join(errs...) } // supportsV2 checks whether a recorder instance supports the /v2/record diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 037e6c264402c..b5f524088a552 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -262,7 +262,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 72615330d8970..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 72615330d8970..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 72615330d8970..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 72615330d8970..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -50,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index c2761d01949fe..f3cd5e75b9e36 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -60,7 +60,6 @@ import ( _ "tailscale.com/util/backoff" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 73da920863d96..5bd7c528b11b3 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -18,7 +18,6 @@ import ( "github.com/coreos/go-iptables/iptables" "tailscale.com/types/logger" - "tailscale.com/util/multierr" "tailscale.com/version/distro" ) @@ -67,7 +66,7 @@ func detectIptables() (int, error) { default: return 0, FWModeNotSupportedError{ Mode: FirewallModeIPTables, - Err: fmt.Errorf("iptables command run fail: %w", multierr.New(err, ip6err)), + Err: fmt.Errorf("iptables command run fail: %w", errors.Join(err, ip6err)), } } @@ -232,5 +231,5 @@ func clearRules(proto iptables.Protocol, logf logger.Logf) error { errs = append(errs, err) } - return multierr.New(errs...) + return errors.Join(errs...) } diff --git a/util/winutil/restartmgr_windows.go b/util/winutil/restartmgr_windows.go index a52e2fee9f933..6f549de557653 100644 --- a/util/winutil/restartmgr_windows.go +++ b/util/winutil/restartmgr_windows.go @@ -19,7 +19,6 @@ import ( "github.com/dblohm7/wingoes" "golang.org/x/sys/windows" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) var ( @@ -538,7 +537,7 @@ func (rps RestartableProcesses) Terminate(logf logger.Logf, exitCode uint32, tim } if len(errs) != 0 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 8fd225c90e862..34b78a2b595a6 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -10,6 +10,7 @@ package netlog import ( "context" "encoding/json" + "errors" "fmt" "io" "log" @@ -28,7 +29,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" - "tailscale.com/util/multierr" "tailscale.com/wgengine/router" ) @@ -272,5 +272,5 @@ func (nl *Logger) Shutdown(ctx context.Context) error { nl.addrs = nil nl.prefixes = nil - return multierr.New(err1, err2) + return errors.Join(err1, err2) } diff --git a/wgengine/router/osrouter/ifconfig_windows.go b/wgengine/router/osrouter/ifconfig_windows.go index 78ac8d45fb59f..cb87ad5f24114 100644 --- a/wgengine/router/osrouter/ifconfig_windows.go +++ b/wgengine/router/osrouter/ifconfig_windows.go @@ -18,7 +18,6 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" - "tailscale.com/util/multierr" "tailscale.com/wgengine/router" "tailscale.com/wgengine/winnet" @@ -831,5 +830,5 @@ func syncRoutes(ifc *winipcfg.IPAdapterAddresses, want []*routeData, dontDelete } } - return multierr.New(errs...) + return errors.Join(errs...) } diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 478935483ade6..1f825b917e44a 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -32,7 +32,6 @@ import ( "tailscale.com/types/preftype" "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" - "tailscale.com/util/multierr" "tailscale.com/version/distro" "tailscale.com/wgengine/router" ) @@ -488,7 +487,7 @@ func (r *linuxRouter) Set(cfg *router.Config) error { r.enableIPForwarding() } - return multierr.New(errs...) + return errors.Join(errs...) } var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ diff --git a/wgengine/wgcfg/device.go b/wgengine/wgcfg/device.go index 80fa159e38972..ee7eb91c93b66 100644 --- a/wgengine/wgcfg/device.go +++ b/wgengine/wgcfg/device.go @@ -4,6 +4,7 @@ package wgcfg import ( + "errors" "io" "sort" @@ -11,7 +12,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // NewDevice returns a wireguard-go Device configured for Tailscale use. @@ -31,7 +31,7 @@ func DeviceConfig(d *device.Device) (*Config, error) { cfg, fromErr := FromUAPI(r) r.Close() getErr := <-errc - err := multierr.New(getErr, fromErr) + err := errors.Join(getErr, fromErr) if err != nil { return nil, err } @@ -64,5 +64,5 @@ func ReconfigDevice(d *device.Device, cfg *Config, logf logger.Logf) (err error) toErr := cfg.ToUAPI(logf, w, prev) w.Close() setErr := <-errc - return multierr.New(setErr, toErr) + return errors.Join(setErr, toErr) } From 05a4c8e8392c216db5a4e951ffccc95e8a72d152 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 20:53:47 -0700 Subject: [PATCH 0482/1093] tsnet: remove AuthenticatedAPITransport (API-over-noise) support It never launched and I've lost hope of it launching and it's in my way now, so I guess it's time to say goodbye. Updates tailscale/corp#4383 Updates #17305 Change-Id: I2eb551d49f2fb062979cc307f284df4b3dfa5956 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/debug.go | 9 ------ control/controlclient/auto.go | 10 ------ control/controlclient/direct.go | 14 -------- control/controlclient/noise.go | 23 ------------- internal/noiseconn/conn.go | 16 --------- ipn/ipnlocal/local.go | 57 --------------------------------- tsnet/tsnet.go | 35 -------------------- 7 files changed, 164 deletions(-) diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 8d0357716804e..7e800dbc5c01c 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -1128,15 +1128,6 @@ func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDiale } defer nc.Close() - // Reserve a RoundTrip for the whoami request. - ok, _, err := nc.ReserveNewRequest(ctx) - if err != nil { - return fmt.Errorf("ReserveNewRequest: %w", err) - } - if !ok { - return errors.New("ReserveNewRequest failed") - } - // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index f5495f8546218..224838d56909a 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -845,13 +845,3 @@ func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error { func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) { return c.direct.DoNoiseRequest(req) } - -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - return c.direct.GetSingleUseNoiseRoundTripper(ctx) -} diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 199e1479bcdb5..ed84d63ff26af 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1606,20 +1606,6 @@ func (c *Direct) DoNoiseRequest(req *http.Request) (*http.Response, error) { return nc.Do(req) } -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Direct) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - nc, err := c.getNoiseClient() - if err != nil { - return nil, nil, err - } - return nc.GetSingleUseRoundTripper(ctx) -} - // doPingerPing sends a Ping to pr.IP using pinger, and sends an http request back to // pr.URL with ping response data. func doPingerPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, pinger Pinger, pingType tailcfg.PingType) { diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index a0f344664ab80..c001de0cdf7dd 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -181,29 +181,6 @@ func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { return np, nil } -// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once -// (and must be used once) to make a single HTTP request over the noise channel -// to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - for tries := 0; tries < 3; tries++ { - conn, err := nc.getConn(ctx) - if err != nil { - return nil, nil, err - } - ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx) - if err != nil { - return nil, nil, err - } - if ok { - return conn, earlyPayloadMaybeNil, nil - } - } - return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection") -} - // contextErr is an error that wraps another error and is used to indicate that // the error was because a context expired. type contextErr struct { diff --git a/internal/noiseconn/conn.go b/internal/noiseconn/conn.go index 7476b7ecc5a6a..29fd1a2832a2e 100644 --- a/internal/noiseconn/conn.go +++ b/internal/noiseconn/conn.go @@ -84,22 +84,6 @@ func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) } } -// ReserveNewRequest will reserve a new concurrent request on the connection. -// -// It returns whether the reservation was successful, and any early Noise -// payload if present. If a reservation was not successful, it will return -// false and nil for the early payload. -func (c *Conn) ReserveNewRequest(ctx context.Context) (bool, *tailcfg.EarlyNoise, error) { - earlyPayloadMaybeNil, err := c.GetEarlyPayload(ctx) - if err != nil { - return false, nil, err - } - if c.h2cc.ReserveNewRequest() { - return true, earlyPayloadMaybeNil, nil - } - return false, nil, nil -} - // CanTakeNewRequest reports whether the underlying HTTP/2 connection can take // a new request, meaning it has not been closed or received or sent a GOAWAY. func (c *Conn) CanTakeNewRequest() bool { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f214c5def1039..09f317f0f4ec9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -9,7 +9,6 @@ import ( "cmp" "context" "crypto/sha256" - "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" @@ -6540,62 +6539,6 @@ func (b *LocalBackend) MagicConn() *magicsock.Conn { return b.sys.MagicSock.Get() } -type keyProvingNoiseRoundTripper struct { - b *LocalBackend -} - -func (n keyProvingNoiseRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b := n.b - - var priv key.NodePrivate - - b.mu.Lock() - cc := b.ccAuto - if nm := b.NetMap(); nm != nil { - priv = nm.PrivateKey - } - b.mu.Unlock() - if cc == nil { - return nil, errors.New("no client") - } - if priv.IsZero() { - return nil, errors.New("no netmap or private key") - } - rt, ep, err := cc.GetSingleUseNoiseRoundTripper(req.Context()) - if err != nil { - return nil, err - } - if ep == nil || ep.NodeKeyChallenge.IsZero() { - go rt.RoundTrip(new(http.Request)) // return our reservation with a bogus request - return nil, errors.New("this coordination server does not support API calls over the Noise channel") - } - - // QueryEscape the node key since it has a colon in it. - nk := url.QueryEscape(priv.Public().String()) - req.SetBasicAuth(nk, "") - - // genNodeProofHeaderValue returns the Tailscale-Node-Proof header's value to prove - // to chalPub that we control claimedPrivate. - genNodeProofHeaderValue := func(claimedPrivate key.NodePrivate, chalPub key.ChallengePublic) string { - // TODO(bradfitz): cache this somewhere? - box := claimedPrivate.SealToChallenge(chalPub, []byte(chalPub.String())) - return claimedPrivate.Public().String() + " " + base64.StdEncoding.EncodeToString(box) - } - - // And prove we have the private key corresponding to the public key sent - // tin the basic auth username. - req.Header.Set("Tailscale-Node-Proof", genNodeProofHeaderValue(priv, ep.NodeKeyChallenge)) - - return rt.RoundTrip(req) -} - -// KeyProvingNoiseRoundTripper returns an http.RoundTripper that uses the LocalBackend's -// DoNoiseRequest method and mutates the request to add an authorization header -// to prove the client's nodekey. -func (b *LocalBackend) KeyProvingNoiseRoundTripper() http.RoundTripper { - return keyProvingNoiseRoundTripper{b} -} - // DoNoiseRequest sends a request to URL over the control plane // Noise connection. func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) { diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 42e4198a0c5fd..d14f1f16c24ae 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -931,41 +931,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() -// for now. -var I_Acknowledge_This_API_Is_Experimental = false - -// AuthenticatedAPITransport provides an HTTP transport that can be used with -// the control server API without needing additional authentication details. It -// authenticates using the current client's nodekey. -// -// It requires the user to set I_Acknowledge_This_API_Is_Experimental. -// -// For example: -// -// import "net/http" -// import "tailscale.com/client/tailscale/v2" -// import "tailscale.com/tsnet" -// -// var s *tsnet.Server -// ... -// rt, err := s.AuthenticatedAPITransport() -// // handler err ... -// var client tailscale.Client{HTTP: http.Client{ -// Timeout: 1*time.Minute, -// UserAgent: "your-useragent-here", -// Transport: rt, -// }} -func (s *Server) AuthenticatedAPITransport() (http.RoundTripper, error) { - if !I_Acknowledge_This_API_Is_Experimental { - return nil, errors.New("use of AuthenticatedAPITransport without setting I_Acknowledge_This_API_Is_Experimental") - } - if err := s.Start(); err != nil { - return nil, err - } - return s.lb.KeyProvingNoiseRoundTripper(), nil -} - // Listen announces only on the Tailscale network. // It will start the server if it has not been started yet. // From 6f7ce5eb5dc8540b6430557e48107ff5b46b385c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 1 Oct 2025 11:39:01 -0700 Subject: [PATCH 0483/1093] appc: factor app connector arguments into a Config type (#17389) Replace the positional arguments to NewAppConnector with a Config struct. Update the existing uses. Other than the API change, there are no functional changes in this commit. Updates #15160 Updates #17192 Change-Id: Ibf37f021372155a4db8aaf738f4b4f2c746bf623 Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 35 +++++++++++---- appc/appconnector_test.go | 87 ++++++++++++++++++++++++++++-------- ipn/ipnlocal/local.go | 7 ++- ipn/ipnlocal/local_test.go | 15 +++++-- ipn/ipnlocal/peerapi_test.go | 27 ++++++++--- 5 files changed, 133 insertions(+), 38 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 8d7dd54e8dc0e..8c1d49d22d671 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -162,17 +162,36 @@ type AppConnector struct { writeRateDay *rateLogger } +// Config carries the settings for an [AppConnector]. +type Config struct { + // Logf is the logger to which debug logs from the connector will be sent. + // It must be non-nil. + Logf logger.Logf + + // RouteAdvertiser allows the connector to update the set of advertised routes. + // It must be non-nil. + RouteAdvertiser RouteAdvertiser + + // RouteInfo, if non-nil, use used as the initial set of routes for the + // connector. If nil, the connector starts empty. + RouteInfo *RouteInfo + + // StoreRoutesFunc, if non-nil, is called when the connector's routes + // change, to allow the routes to be persisted. + StoreRoutesFunc func(*RouteInfo) error +} + // NewAppConnector creates a new AppConnector. -func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInfo *RouteInfo, storeRoutesFunc func(*RouteInfo) error) *AppConnector { +func NewAppConnector(c Config) *AppConnector { ac := &AppConnector{ - logf: logger.WithPrefix(logf, "appc: "), - routeAdvertiser: routeAdvertiser, - storeRoutesFunc: storeRoutesFunc, + logf: logger.WithPrefix(c.Logf, "appc: "), + routeAdvertiser: c.RouteAdvertiser, + storeRoutesFunc: c.StoreRoutesFunc, } - if routeInfo != nil { - ac.domains = routeInfo.Domains - ac.wildcards = routeInfo.Wildcards - ac.controlRoutes = routeInfo.Control + if c.RouteInfo != nil { + ac.domains = c.RouteInfo.Domains + ac.wildcards = c.RouteInfo.Wildcards + ac.controlRoutes = c.RouteInfo.Control } ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index c13835f39ed9a..12a39f0401fdd 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -28,9 +28,14 @@ func TestUpdateDomains(t *testing.T) { ctx := context.Background() var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: &appctest.RouteCollector{}, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) } a.UpdateDomains([]string{"example.com"}) @@ -63,9 +68,13 @@ func TestUpdateRoutes(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -112,9 +121,14 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) @@ -133,9 +147,14 @@ func TestDomainRoutes(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -159,9 +178,14 @@ func TestObserveDNSResponse(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } // a has no domains configured, so it should not advertise any routes @@ -248,9 +272,14 @@ func TestWildcardDomains(t *testing.T) { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -408,9 +437,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -453,9 +487,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -508,9 +547,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { var a *AppConnector if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a = NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = NewAppConnector(t.Logf, rc, nil, nil) + a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -649,7 +693,12 @@ func TestMetricBucketsAreSorted(t *testing.T) { func TestUpdateRoutesDeadlock(t *testing.T) { ctx := context.Background() rc := &appctest.RouteCollector{} - a := NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a := NewAppConnector(Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) advertiseCalled := new(atomic.Bool) unadvertiseCalled := new(atomic.Bool) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 09f317f0f4ec9..5e738572fa8ee 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4802,7 +4802,12 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } storeFunc = b.storeRouteInfo } - b.appConnector = appc.NewAppConnector(b.logf, b, ri, storeFunc) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: b.logf, + RouteAdvertiser: b, + RouteInfo: ri, + StoreRoutesFunc: storeFunc, + }) } if nm == nil { return diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index a984d66bff035..571f472cca8a6 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2309,9 +2309,11 @@ func TestOfferingAppConnector(t *testing.T) { t.Fatal("unexpected offering app connector") } if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, nil, &appc.RouteInfo{}, fakeStoreRoutes) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + }) } else { - b.appConnector = appc.NewAppConnector(t.Logf, nil, nil, nil) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf}) } if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") @@ -2370,9 +2372,14 @@ func TestObserveDNSResponse(t *testing.T) { rc := &appctest.RouteCollector{} if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - b.appConnector = appc.NewAppConnector(t.Logf, rc, nil, nil) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) } b.appConnector.UpdateDomains([]string{"example.com"}) b.appConnector.Wait(context.Background()) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index db01dd608b2a7..a6a5f6ff5e117 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -257,9 +257,14 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, &appc.RouteInfo{}, fakeStoreRoutes) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: &appctest.RouteCollector{}, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) } sys.Set(pm.Store()) sys.Set(eng) @@ -332,9 +337,14 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) var a *appc.AppConnector if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) @@ -399,9 +409,14 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) var a *appc.AppConnector if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + RouteAdvertiser: rc, + RouteInfo: &appc.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, + }) } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) From ce752b8a88214a2d45477aa8b77384175ebbdf18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 1 Oct 2025 14:59:38 -0400 Subject: [PATCH 0484/1093] net/netmon: remove usage of direct callbacks from netmon (#17292) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The callback itself is not removed as it is used in other repos, making it simpler for those to slowly transition to the eventbus. Updates #15160 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/debug.go | 33 ++++++++++++---- cmd/tailscaled/tailscaled.go | 11 +++++- cmd/tsconnect/wasm/wasm_js.go | 2 +- control/controlclient/controlclient_test.go | 2 + control/controlclient/direct_test.go | 8 +++- control/controlclient/noise_test.go | 3 ++ control/controlhttp/http_test.go | 7 +++- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/local_test.go | 8 +++- ipn/ipnlocal/network-lock_test.go | 4 +- ipn/ipnlocal/state_test.go | 1 + log/sockstatlog/logger.go | 4 +- log/sockstatlog/logger_test.go | 2 +- logpolicy/logpolicy.go | 7 ++++ logtail/config.go | 2 + logtail/logtail.go | 31 +++++++++++++++ logtail/logtail_test.go | 7 +++- net/dns/manager.go | 4 +- net/dns/manager_tcp_test.go | 10 ++++- net/dns/manager_test.go | 10 ++++- net/dns/resolver/forwarder_test.go | 2 +- net/dns/resolver/tsdns_test.go | 7 +++- net/netmon/loghelper.go | 22 +++++++++-- net/netmon/loghelper_test.go | 21 ++++------ net/tsdial/tsdial.go | 43 +++++++++++++++++++++ tsnet/tsnet.go | 2 + wgengine/netlog/netlog.go | 4 +- wgengine/userspace.go | 6 ++- 28 files changed, 217 insertions(+), 48 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 96f98d9d6fa54..bcc34fb0d3c5d 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -104,14 +104,10 @@ func runMonitor(ctx context.Context, loop bool) error { } defer mon.Close() - mon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if !delta.Major { - log.Printf("Network monitor fired; not a major change") - return - } - log.Printf("Network monitor fired. New state:") - dump(delta.New) - }) + eventClient := b.Client("debug.runMonitor") + m := eventClient.Monitor(changeDeltaWatcher(eventClient, ctx, dump)) + defer m.Close() + if loop { log.Printf("Starting link change monitor; initial state:") } @@ -124,6 +120,27 @@ func runMonitor(ctx context.Context, loop bool) error { select {} } +func changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, dump func(st *netmon.State)) func(*eventbus.Client) { + changeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-ec.Done(): + return + case delta := <-changeSub.Events(): + if !delta.Major { + log.Printf("Network monitor fired; not a major change") + return + } + log.Printf("Network monitor fired. New state:") + dump(delta.New) + } + } + } +} + func getURL(ctx context.Context, urlStr string) error { if urlStr == "login" { urlStr = "https://login.tailscale.com" diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 8de473b7c718b..27fec05a3383b 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -433,7 +433,13 @@ func run() (err error) { var publicLogID logid.PublicID if buildfeatures.HasLogTail { - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker.Get(), nil /* use log.Printf */) + + pol := logpolicy.Options{ + Collection: logtail.CollectionNode, + NetMon: netMon, + Health: sys.HealthTracker.Get(), + Bus: sys.Bus.Get(), + }.New() pol.SetVerbosityLevel(args.verbose) publicLogID = pol.PublicID logPol = pol @@ -470,7 +476,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, sys.HealthTracker.Get(), args.tunname) + dns.CleanUp(logf, netMon, sys.Bus.Get(), sys.HealthTracker.Get(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -616,6 +622,7 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID } dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used) + dialer.SetBus(sys.Bus.Get()) sys.Set(dialer) onlyNetstack, err := createEngine(logf, sys) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index fbf7968a01f11..2e81fa4a8a2e7 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -104,6 +104,7 @@ func newIPN(jsConfig js.Value) map[string]any { sys := tsd.NewSystem() sys.Set(store) dialer := &tsdial.Dialer{Logf: logf} + dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ Dialer: dialer, SetSubsystem: sys.Set, @@ -463,7 +464,6 @@ func (s *jsSSHSession) Run() { cols = s.pendingResizeCols } err = session.RequestPty("xterm", rows, cols, ssh.TerminalModes{}) - if err != nil { writeError("Pseudo Terminal", err) return diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 78646d76aca47..3914d10ef8310 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -223,6 +223,7 @@ func TestDirectProxyManual(t *testing.T) { dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ Persist: persist.Persist{}, @@ -300,6 +301,7 @@ func testHTTPS(t *testing.T, withProxy bool) { dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) dialer.SetSystemDialerForTest(func(ctx context.Context, network, addr string) (net.Conn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index bba76d6f05c0d..dd93dc7b33d61 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -27,13 +27,15 @@ func TestNewDirect(t *testing.T) { bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Bus: bus, } c, err := NewDirect(opts) @@ -105,13 +107,15 @@ func TestTsmpPing(t *testing.T) { bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Bus: bus, } diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index 4904016f2f082..d9c71cf274e71 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -22,6 +22,7 @@ import ( "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus/eventbustest" ) // maxAllowedNoiseVersion is the highest we expect the Tailscale @@ -175,6 +176,7 @@ func (tt noiseClientTest) run(t *testing.T) { serverPrivate := key.NewMachine() clientPrivate := key.NewMachine() chalPrivate := key.NewChallenge() + bus := eventbustest.NewBus(t) const msg = "Hello, client" h2 := &http2.Server{} @@ -194,6 +196,7 @@ func (tt noiseClientTest) run(t *testing.T) { defer hs.Close() dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) if nettest.PreferMemNetwork() { dialer.SetSystemDialerForTest(nw.Dial) } diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 6485761ac1eec..648b9e5ed88d5 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -149,6 +149,8 @@ func testControlHTTP(t *testing.T, param httpTestParam) { proxy := param.proxy client, server := key.NewMachine(), key.NewMachine() + bus := eventbustest.NewBus(t) + const testProtocolVersion = 1 const earlyWriteMsg = "Hello, world!" sch := make(chan serverResult, 1) @@ -218,6 +220,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { netMon := netmon.NewStatic() dialer := tsdial.NewDialer(netMon) + dialer.SetBus(bus) a := &Dialer{ Hostname: "localhost", HTTPPort: strconv.Itoa(httpLn.Addr().(*net.TCPAddr).Port), @@ -775,7 +778,7 @@ func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.A if allowFallback { host = fallbackAddr.String() } - + bus := eventbustest.NewBus(t) a := &Dialer{ Hostname: host, HTTPPort: httpPort, @@ -790,7 +793,7 @@ func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.A omitCertErrorLogging: true, testFallbackDelay: 50 * time.Millisecond, Clock: clock, - HealthTracker: health.NewTracker(eventbustest.NewBus(t)), + HealthTracker: health.NewTracker(bus), } start := time.Now() diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5e738572fa8ee..af5a4055024ca 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -526,7 +526,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo }() netMon := sys.NetMon.Get() - b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get()) + b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { log.Printf("error setting up sockstat logger: %v", err) } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 571f472cca8a6..ec65c67ee385d 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -480,7 +480,9 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { t.Log("Added fake userspace engine for testing") } if _, ok := sys.Dialer.GetOK(); !ok { - sys.Set(tsdial.NewDialer(netmon.NewStatic())) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) + sys.Set(dialer) t.Log("Added static dialer for testing") } lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) @@ -3108,12 +3110,14 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { b.hostinfo = hi k := key.NewMachine() var cc *mockControl + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) opts := controlclient.Options{ ServerURL: "https://example.com", GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Logf: b.logf, PolicyClient: polc, } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 0d3f7db43ff0f..c7c4c905f5ca1 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -54,6 +54,8 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := controlclient.Options{ ServerURL: "https://example.com", Hostinfo: hi, @@ -63,7 +65,7 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even HTTPTestClient: c, NoiseTestClient: c, Observer: observerFunc(func(controlclient.Status) {}), - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Bus: bus, } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 347aaf8b83c94..a387af035bbbb 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1668,6 +1668,7 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( sys := tsd.NewSystemWithBus(bus) sys.Set(dialer) sys.Set(dialer.NetMon()) + dialer.SetBus(bus) magicConn, err := magicsock.NewConn(magicsock.Options{ Logf: logf, diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 4f8909725d1f1..e0744de0f089a 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -26,6 +26,7 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" ) @@ -97,7 +98,7 @@ func SockstatLogID(logID logid.PublicID) logid.PrivateID { // // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. -func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker) (*Logger, error) { +func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (*Logger, error) { if !sockstats.IsAvailable || !buildfeatures.HasLogTail { return nil, nil } @@ -127,6 +128,7 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne PrivateID: SockstatLogID(logID), Collection: "sockstats.log.tailscale.io", Buffer: filch, + Bus: bus, CompressLogs: true, FlushDelayFn: func() time.Duration { // set flush delay to 100 years so it never flushes automatically diff --git a/log/sockstatlog/logger_test.go b/log/sockstatlog/logger_test.go index 31fb17e460141..e5c2feb2986d8 100644 --- a/log/sockstatlog/logger_test.go +++ b/log/sockstatlog/logger_test.go @@ -24,7 +24,7 @@ func TestResourceCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil) + lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index c1f3e553a168a..9c7e62ab0da11 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -50,6 +50,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/syspolicy/pkey" @@ -489,6 +490,11 @@ type Options struct { // If non-nil, it's used to construct the default HTTP client. Health *health.Tracker + // Bus is an optional parameter for communication on the eventbus. + // If non-nil, it's passed to logtail for use in interface monitoring. + // TODO(cmol): Make this non-optional when it's plumbed in by the clients. + Bus *eventbus.Bus + // Logf is an optional logger to use. // If nil, [log.Printf] will be used instead. Logf logger.Logf @@ -615,6 +621,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { Stderr: logWriter{console}, CompressLogs: true, MaxUploadSize: opts.MaxUploadSize, + Bus: opts.Bus, } if opts.Collection == logtail.CollectionNode { conf.MetricsDelta = clientmetric.EncodeLogTailMetricsDelta diff --git a/logtail/config.go b/logtail/config.go index a6c068c0c86c6..bf47dd8aa7b52 100644 --- a/logtail/config.go +++ b/logtail/config.go @@ -10,6 +10,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" ) // DefaultHost is the default host name to upload logs to when @@ -34,6 +35,7 @@ type Config struct { LowMemory bool // if true, logtail minimizes memory use Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now Stderr io.Writer // if set, logs are sent here instead of os.Stderr + Bus *eventbus.Bus // if set, uses the eventbus for awaitInternetUp instead of callback StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only Buffer Buffer // temp storage, if nil a MemoryBuffer CompressLogs bool // whether to compress the log uploads diff --git a/logtail/logtail.go b/logtail/logtail.go index 948c5a4605f05..675422890149c 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -32,6 +32,7 @@ import ( "tailscale.com/tstime" tslogger "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/set" "tailscale.com/util/truncate" "tailscale.com/util/zstdframe" @@ -120,6 +121,10 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { shutdownStart: make(chan struct{}), shutdownDone: make(chan struct{}), } + + if cfg.Bus != nil { + l.eventClient = cfg.Bus.Client("logtail.Logger") + } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -156,6 +161,7 @@ type Logger struct { privateID logid.PrivateID httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel + eventClient *eventbus.Client procID uint32 includeProcSequence bool @@ -221,6 +227,9 @@ func (l *Logger) Shutdown(ctx context.Context) error { l.httpc.CloseIdleConnections() }() + if l.eventClient != nil { + l.eventClient.Close() + } l.shutdownStartMu.Lock() select { case <-l.shutdownStart: @@ -417,6 +426,10 @@ func (l *Logger) internetUp() bool { } func (l *Logger) awaitInternetUp(ctx context.Context) { + if l.eventClient != nil { + l.awaitInternetUpBus(ctx) + return + } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { @@ -436,6 +449,24 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } +func (l *Logger) awaitInternetUpBus(ctx context.Context) { + if l.internetUp() { + return + } + sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) + defer sub.Close() + select { + case delta := <-sub.Events(): + if delta.New.AnyInterfaceUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + case <-ctx.Done(): + return + } +} + // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index b8c46c44840bc..a92f88b4bb03e 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -17,6 +17,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/tstest" "tailscale.com/tstime" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -30,6 +31,7 @@ func TestFastShutdown(t *testing.T) { l := NewLogger(Config{ BaseURL: testServ.URL, + Bus: eventbustest.NewBus(t), }, t.Logf) err := l.Shutdown(ctx) if err != nil { @@ -62,7 +64,10 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Cleanup(ts.srv.Close) - l := NewLogger(Config{BaseURL: ts.srv.URL}, t.Logf) + l := NewLogger(Config{ + BaseURL: ts.srv.URL, + Bus: eventbustest.NewBus(t), + }, t.Logf) // There is always an initial "logtail started" message body := <-ts.uploaded diff --git a/net/dns/manager.go b/net/dns/manager.go index edf156eceebda..de99fe646f786 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -30,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/policyclient" ) @@ -600,7 +601,7 @@ func (m *Manager) FlushCaches() error { // No other state needs to be instantiated before this runs. // // health must not be nil -func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { +func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health *health.Tracker, interfaceName string) { if !buildfeatures.HasDNS { return } @@ -611,6 +612,7 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, i } d := &tsdial.Dialer{Logf: logf} d.SetNetMon(netMon) + d.SetBus(bus) dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS) if err := dns.Down(); err != nil { logf("dns down: %v", err) diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index 46883a1e7db54..dcdc88c7a22bf 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -90,7 +90,10 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -175,7 +178,10 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(log, &f, health.NewTracker(bus), dialer, nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index b5a510862580b..92b660007cdd2 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -933,7 +933,10 @@ func TestManager(t *testing.T) { goos = "linux" } knobs := &controlknobs.Knobs{} - m := NewManager(t.Logf, &f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, knobs, goos) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1039,7 +1042,10 @@ func TestConfigRecompilation(t *testing.T) { SearchDomains: fqdns("foo.ts.net"), } - m := NewManager(t.Logf, f, health.NewTracker(eventbustest.NewBus(t)), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "darwin") var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index b5cc7d018bb96..ec491c581af99 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -122,7 +122,6 @@ func TestResolversWithDelays(t *testing.T) { } }) } - } func TestGetRCode(t *testing.T) { @@ -454,6 +453,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) + dialer.SetBus(bus) fwd := newForwarder(logf, netMon, nil, &dialer, health.NewTracker(bus), nil) if modify != nil { diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 0823ea139bc1a..f0dbb48b33f6e 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -353,10 +353,13 @@ func TestRDNSNameToIPv6(t *testing.T) { } func newResolver(t testing.TB) *Resolver { + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) return New(t.Logf, nil, // no link selector - tsdial.NewDialer(netmon.NewStatic()), - health.NewTracker(eventbustest.NewBus(t)), + dialer, + health.NewTracker(bus), nil, // no control knobs ) } diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 96991644c38b9..2e28e8cda7895 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -8,6 +8,7 @@ import ( "sync" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique @@ -17,13 +18,12 @@ import ( // done. func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - unregister := nm.RegisterChangeCallback(func(cd *ChangeDelta) { + nm.b.Monitor(nm.changeDeltaWatcher(nm.b, ctx, func(cd ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } - }) - context.AfterFunc(ctx, unregister) + })) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it @@ -42,3 +42,19 @@ func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) lo logf(format, args...) } } + +func (nm *Monitor) changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, fn func(ChangeDelta)) func(*eventbus.Client) { + sub := eventbus.Subscribe[ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + } +} diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index aeac9f03191aa..ca3b1284cfa0e 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -11,6 +11,7 @@ import ( "testing/synctest" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func TestLinkChangeLogLimiter(t *testing.T) { synctest.Test(t, syncTestLinkChangeLogLimiter) } @@ -61,21 +62,15 @@ func syncTestLinkChangeLogLimiter(t *testing.T) { // string cache and allow the next log to write to our log buffer. // // InjectEvent doesn't work because it's not a major event, so we - // instead reach into the netmon and grab the callback, and then call - // it ourselves. - mon.mu.Lock() - var cb func(*ChangeDelta) - for _, c := range mon.cbs { - cb = c - break - } - mon.mu.Unlock() - - cb(&ChangeDelta{Major: true}) + // instead inject the event ourselves. + injector := eventbustest.NewInjector(t, bus) + eventbustest.Inject(injector, ChangeDelta{Major: true}) + synctest.Wait() logf("hello %s", "world") - if got := logBuffer.String(); got != "hello world\nother message\nhello world\n" { - t.Errorf("unexpected log buffer contents: %q", got) + want := "hello world\nother message\nhello world\n" + if got := logBuffer.String(); got != want { + t.Errorf("unexpected log buffer contents, got: %q, want, %q", got, want) } // Canceling the context we passed to LinkChangeLogLimiter should diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index e4e4e9e8b0f92..bec196a2e7378 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -28,6 +28,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/testenv" "tailscale.com/version" @@ -86,6 +87,8 @@ type Dialer struct { dnsCache *dnscache.MessageCache // nil until first non-empty SetExitDNSDoH nextSysConnID int activeSysConns map[int]net.Conn // active connections not yet closed + eventClient *eventbus.Client + eventBusSubs eventbus.Monitor } // sysConn wraps a net.Conn that was created using d.SystemDial. @@ -158,6 +161,9 @@ func (d *Dialer) SetRoutes(routes, localRoutes []netip.Prefix) { } func (d *Dialer) Close() error { + if d.eventClient != nil { + d.eventBusSubs.Close() + } d.mu.Lock() defer d.mu.Unlock() d.closed = true @@ -186,6 +192,14 @@ func (d *Dialer) SetNetMon(netMon *netmon.Monitor) { d.netMonUnregister = nil } d.netMon = netMon + // Having multiple watchers could lead to problems, + // so remove the eventClient if it exists. + // This should really not happen, but better checking for it than not. + // TODO(cmol): Should this just be a panic? + if d.eventClient != nil { + d.eventBusSubs.Close() + d.eventClient = nil + } d.netMonUnregister = d.netMon.RegisterChangeCallback(d.linkChanged) } @@ -197,6 +211,35 @@ func (d *Dialer) NetMon() *netmon.Monitor { return d.netMon } +func (d *Dialer) SetBus(bus *eventbus.Bus) { + d.mu.Lock() + defer d.mu.Unlock() + if d.eventClient != nil { + panic("eventbus has already been set") + } + // Having multiple watchers could lead to problems, + // so unregister the callback if it exists. + if d.netMonUnregister != nil { + d.netMonUnregister() + } + d.eventClient = bus.Client("tsdial.Dialer") + d.eventBusSubs = d.eventClient.Monitor(d.linkChangeWatcher(d.eventClient)) +} + +func (d *Dialer) linkChangeWatcher(ec *eventbus.Client) func(*eventbus.Client) { + linkChangeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case cd := <-linkChangeSub.Events(): + d.linkChanged(&cd) + } + } + } +} + var ( metricLinkChangeConnClosed = clientmetric.NewCounter("tsdial_linkchange_closes") metricChangeDeltaNoDefaultRoute = clientmetric.NewCounter("tsdial_changedelta_no_default_route") diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d14f1f16c24ae..890193d0bbd16 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -592,6 +592,7 @@ func (s *Server) start() (reterr error) { closePool.add(s.netMon) s.dialer = &tsdial.Dialer{Logf: tsLogf} // mutated below (before used) + s.dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(tsLogf, wgengine.Config{ EventBus: sys.Bus.Get(), ListenPort: s.Port, @@ -767,6 +768,7 @@ func (s *Server) startLogger(closePool *closeOnErrorPool, health *health.Tracker Stderr: io.Discard, // log everything to Buffer Buffer: s.logbuffer, CompressLogs: true, + Bus: s.sys.Bus.Get(), HTTPC: &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, s.netMon, health, tsLogf)}, MetricsDelta: clientmetric.EncodeLogTailMetricsDelta, } diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 34b78a2b595a6..b7281e542859b 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -29,6 +29,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" + "tailscale.com/util/eventbus" "tailscale.com/wgengine/router" ) @@ -95,7 +96,7 @@ var testClient *http.Client // The IP protocol and source port are always zero. // The sock is used to populated the PhysicalTraffic field in Message. // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. -func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, logExitFlowEnabledEnabled bool) error { +func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { nl.mu.Lock() defer nl.mu.Unlock() if nl.logger != nil { @@ -112,6 +113,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo Collection: "tailtraffic.log.tailscale.io", PrivateID: nodeLogID, CopyPrivateID: domainLogID, + Bus: bus, Stderr: io.Discard, CompressLogs: true, HTTPC: httpc, diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 049abcf1709e4..30486f7a972f4 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -312,6 +312,9 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } if conf.Dialer == nil { conf.Dialer = &tsdial.Dialer{Logf: logf} + if conf.EventBus != nil { + conf.Dialer.SetBus(conf.EventBus) + } } var tsTUNDev *tstun.Wrapper @@ -379,6 +382,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) tunName, _ := conf.Tun.Name() conf.Dialer.SetTUNName(tunName) conf.Dialer.SetNetMon(e.netMon) + conf.Dialer.SetBus(e.eventBus) e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS) // TODO: there's probably a better place for this @@ -1035,7 +1039,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public()) - if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, logExitFlowEnabled); err != nil { + if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { e.logf("wgengine: Reconfig: error starting up network logger: %v", err) } e.networkLogger.ReconfigRoutes(routerCfg) From 67f108126930a019e2318a43d0ddd30c0c80fd13 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 1 Oct 2025 12:00:32 -0700 Subject: [PATCH 0485/1093] appc,ipn/ipnlocal: add a required event bus to the AppConnector type (#17390) Require the presence of the bus, but do not use it yet. Check for required fields and update tests and production use to plumb the necessary arguments. Updates #15160 Updates #17192 Change-Id: I8cefd2fdb314ca9945317d3320bd5ea6a92e8dcb Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 17 ++++++++++ appc/appconnector_test.go | 60 ++++++++++++++++++++++++------------ ipn/ipnlocal/local.go | 1 + ipn/ipnlocal/local_test.go | 10 ++++-- ipn/ipnlocal/peerapi_test.go | 13 ++++++-- 5 files changed, 75 insertions(+), 26 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 8c1d49d22d671..c86bf2d0fef96 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -22,6 +22,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" "tailscale.com/util/slicesx" ) @@ -136,7 +137,9 @@ type RouteInfo struct { // routes not yet served by the AppConnector the local node configuration is // updated to advertise the new route. type AppConnector struct { + // These fields are immutable after initialization. logf logger.Logf + eventBus *eventbus.Bus routeAdvertiser RouteAdvertiser // storeRoutesFunc will be called to persist routes if it is not nil. @@ -168,6 +171,10 @@ type Config struct { // It must be non-nil. Logf logger.Logf + // EventBus receives events when the collection of routes maintained by the + // connector is updated. It must be non-nil. + EventBus *eventbus.Bus + // RouteAdvertiser allows the connector to update the set of advertised routes. // It must be non-nil. RouteAdvertiser RouteAdvertiser @@ -183,8 +190,18 @@ type Config struct { // NewAppConnector creates a new AppConnector. func NewAppConnector(c Config) *AppConnector { + switch { + case c.Logf == nil: + panic("missing logger") + case c.EventBus == nil: + panic("missing event bus") + case c.RouteAdvertiser == nil: + panic("missing route advertiser") + } + ac := &AppConnector{ logf: logger.WithPrefix(c.Logf, "appc: "), + eventBus: c.EventBus, routeAdvertiser: c.RouteAdvertiser, storeRoutesFunc: c.StoreRoutesFunc, } diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 12a39f0401fdd..c23908c28231a 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -4,7 +4,6 @@ package appc import ( - "context" "net/netip" "reflect" "slices" @@ -16,6 +15,7 @@ import ( "tailscale.com/appc/appctest" "tailscale.com/tstest" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/slicesx" @@ -24,18 +24,20 @@ import ( func fakeStoreRoutes(*RouteInfo) error { return nil } func TestUpdateDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}}) } a.UpdateDomains([]string{"example.com"}) @@ -63,18 +65,20 @@ func TestUpdateDomains(t *testing.T) { } func TestUpdateRoutes(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -116,19 +120,21 @@ func TestUpdateRoutes(t *testing.T) { } func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) @@ -143,24 +149,26 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { } func TestDomainRoutes(t *testing.T) { + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - a.Wait(context.Background()) + a.Wait(t.Context()) want := map[string][]netip.Addr{ "example.com": {netip.MustParseAddr("192.0.0.8")}, @@ -173,19 +181,21 @@ func TestDomainRoutes(t *testing.T) { } func TestObserveDNSResponse(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } // a has no domains configured, so it should not advertise any routes @@ -267,19 +277,21 @@ func TestObserveDNSResponse(t *testing.T) { } func TestWildcardDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } a.updateDomains([]string{"*.example.com"}) @@ -422,8 +434,9 @@ func prefixes(in ...string) []netip.Prefix { } func TestUpdateRouteRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -439,12 +452,13 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -472,8 +486,9 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { } func TestUpdateDomainRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -489,12 +504,13 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -532,8 +548,9 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { } func TestUpdateWildcardRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -549,12 +566,13 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { if shouldStore { a = NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = NewAppConnector(Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -691,10 +709,12 @@ func TestMetricBucketsAreSorted(t *testing.T) { // back into AppConnector via authReconfig. If everything is called // synchronously, this results in a deadlock on AppConnector.mu. func TestUpdateRoutesDeadlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) rc := &appctest.RouteCollector{} a := NewAppConnector(Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index af5a4055024ca..e8952216b56fc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4804,6 +4804,7 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } b.appConnector = appc.NewAppConnector(appc.Config{ Logf: b.logf, + EventBus: b.sys.Bus.Get(), RouteAdvertiser: b, RouteInfo: ri, StoreRoutesFunc: storeFunc, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index ec65c67ee385d..6737266be80d4 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2307,15 +2307,17 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { func TestOfferingAppConnector(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() if b.OfferingAppConnector() { t.Fatal("unexpected offering app connector") } + rc := &appctest.RouteCollector{} if shouldStore { b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf}) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") @@ -2366,6 +2368,7 @@ func TestRouterAdvertiserIgnoresContainedRoutes(t *testing.T) { func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() // ensure no error when no app connector is configured if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -2376,12 +2379,13 @@ func TestObserveDNSResponse(t *testing.T) { if shouldStore { b.appConnector = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) + b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } b.appConnector.UpdateDomains([]string{"example.com"}) b.appConnector.Wait(context.Background()) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index a6a5f6ff5e117..43b3c49fc6520 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -259,12 +259,17 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { if shouldStore { a = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: sys.Bus.Get(), RouteAdvertiser: &appctest.RouteCollector{}, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: &appctest.RouteCollector{}}) + a = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: &appctest.RouteCollector{}, + }) } sys.Set(pm.Store()) sys.Set(eng) @@ -339,12 +344,13 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { if shouldStore { a = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: sys.Bus.Get(), RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) @@ -411,12 +417,13 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if shouldStore { a = appc.NewAppConnector(appc.Config{ Logf: t.Logf, + EventBus: sys.Bus.Get(), RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, RouteAdvertiser: rc}) + a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) } sys.Set(pm.Store()) sys.Set(eng) From 801aac59db732b7c6adafc882add0f3c71a8e48a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 11:41:42 -0700 Subject: [PATCH 0486/1093] Makefile, cmd/*/depaware.txt: split out vendor packages explicitly depaware was merging golang.org/x/foo and std's vendor/golang.org/x/foo packages (which could both be in the binary!), leading to confusing output, especially when I was working on eliminating duplicate packages imported under different names. This makes the depaware output longer and grosser, but doesn't hide reality from us. Updates #17305 Change-Id: I21cc3418014e127f6c1a81caf4e84213ce84ab57 Signed-off-by: Brad Fitzpatrick --- Makefile | 16 +++++++------- cmd/derper/depaware.txt | 33 ++++++++++++++++++---------- cmd/k8s-operator/depaware.txt | 24 +++++++++++++++----- cmd/stund/depaware.txt | 35 ++++++++++++++++-------------- cmd/tailscale/depaware.txt | 28 ++++++++++++++++++------ cmd/tailscaled/depaware-min.txt | 23 +++++++++++++++----- cmd/tailscaled/depaware-minbox.txt | 23 +++++++++++++++----- cmd/tailscaled/depaware.txt | 26 +++++++++++++++++----- cmd/tsidp/depaware.txt | 24 +++++++++++++++----- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- tsnet/depaware.txt | 24 +++++++++++++++----- 15 files changed, 189 insertions(+), 79 deletions(-) diff --git a/Makefile b/Makefile index 05b984348d81c..b78ef046913a7 100644 --- a/Makefile +++ b/Makefile @@ -18,35 +18,35 @@ lint: ## Run golangci-lint updatedeps: ## Update depaware deps # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ tailscale.com/cmd/tailscaled - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ tailscale.com/cmd/tailscaled - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7f0252148a0e2..0628afd63eeca 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -174,24 +174,17 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/tka - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/winutil+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http+ - golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/idna from golang.org/x/crypto/acme/autocert golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ @@ -208,6 +201,22 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/derper+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -368,7 +377,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e225cebf92bd0..89b50edc21a34 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -891,9 +891,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -908,9 +906,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from k8s.io/apimachinery/pkg/util/net+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ @@ -940,6 +938,22 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from github.com/gaissmai/bart+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 20f58ef2543b7..a5e4b9ba36ecb 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -87,29 +87,32 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz - golang.org/x/net/dns/dnsmessage from net - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http+ - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus - golang.org/x/text/secure/bidirule from golang.org/x/net/idna - golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ - golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ - golang.org/x/text/unicode/norm from golang.org/x/net/idna + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -268,7 +271,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from mime/multipart+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/signal from tailscale.com/cmd/stund diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index cfa073a71d477..80bb40c263673 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -211,9 +211,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/clientupdate/distsign+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from tailscale.com/control/controlbase golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -230,11 +228,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/f64 from github.com/fogleman/gg+ L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from net/http+ + golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 @@ -260,6 +258,22 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/tailscale/cli+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 22f360ac520eb..f3a6eb12c4be1 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -200,9 +200,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -214,9 +212,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping @@ -237,6 +234,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 4b80f4a5637a4..1b2fff01f8c8f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -227,9 +227,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -241,9 +239,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping @@ -264,6 +261,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from bufio+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5e92438e7933a..12c06f61195cb 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -484,9 +484,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/cryptobyte from tailscale.com/feature/tpm + golang.org/x/crypto/cryptobyte/asn1 from golang.org/x/crypto/cryptobyte+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -500,9 +500,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ @@ -530,6 +530,22 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 9ced6f966ccb6..76254c6cd2161 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -318,9 +318,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/ed25519 from gopkg.in/square/go-jose.v2 golang.org/x/crypto/hkdf from tailscale.com/control/controlbase @@ -336,9 +334,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ @@ -367,6 +365,22 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ diff --git a/flake.nix b/flake.nix index 8f1fe026d2d9c..e8ef03853badd 100644 --- a/flake.nix +++ b/flake.nix @@ -148,5 +148,5 @@ }); }; } -# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= diff --git a/go.mod b/go.mod index 6883d2552e447..bce634431cbd9 100644 --- a/go.mod +++ b/go.mod @@ -79,7 +79,7 @@ require ( github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/studio-b12/gowebdav v0.9.0 github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e - github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b + github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 diff --git a/go.mod.sri b/go.mod.sri index 781799de5eae1..a1d81c1a95dc7 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= diff --git a/go.sum b/go.sum index 72ddb730fdf84..5e2205575f416 100644 --- a/go.sum +++ b/go.sum @@ -972,8 +972,8 @@ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplB github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b h1:ewWb4cA+YO9/3X+v5UhdV+eKFsNBOPcGRh39Glshx/4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f h1:PDPGJtm9PFBLNudHGwkfUGp/FWvP+kXXJ0D1pB35F40= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= diff --git a/shell.nix b/shell.nix index 883d71befe9d6..1891111b2d5f6 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b5f524088a552..4dffb5000841f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -313,9 +313,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -329,9 +327,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/http2 from tailscale.com/control/controlclient+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ @@ -360,6 +358,22 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ From 78af49dd1acb287aa6a50c0ee8012c9f4b3d1783 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 15:07:55 -0700 Subject: [PATCH 0487/1093] control/ts2021: rename from internal/noiseconn in prep for controlclient split A following change will split out the controlclient.NoiseClient type out, away from the rest of the controlclient package which is relatively dependency heavy. A question was where to move it, and whether to make a new (a fifth!) package in the ts2021 dependency chain. @creachadair and I brainstormed and decided to merge internal/noiseconn and controlclient.NoiseClient into one package, with names ts2021.Conn and ts2021.Client. For ease of reviewing the subsequent PR, this is the first step that just renames the internal/noiseconn package to control/ts2021. Updates #17305 Change-Id: Ib5ea162dc1d336c1d805bdd9548d1702dd6e1468 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/debug.go | 4 ++-- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- control/controlclient/noise.go | 18 +++++++++--------- control/controlclient/noise_test.go | 4 ++-- {internal/noiseconn => control/ts2021}/conn.go | 10 ++++------ tsnet/depaware.txt | 2 +- 11 files changed, 24 insertions(+), 26 deletions(-) rename {internal/noiseconn => control/ts2021}/conn.go (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 89b50edc21a34..41a6c39e3718d 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -690,6 +690,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -710,7 +711,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 7e800dbc5c01c..224070842861f 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -31,10 +31,10 @@ import ( "golang.org/x/net/http2" "tailscale.com/client/tailscale/apitype" "tailscale.com/control/controlhttp" + "tailscale.com/control/ts2021" "tailscale.com/feature" _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/hostinfo" - "tailscale.com/internal/noiseconn" "tailscale.com/ipn" "tailscale.com/net/ace" "tailscale.com/net/netmon" @@ -1122,7 +1122,7 @@ func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDiale } // Now, create a Noise conn over the existing conn. - nc, err := noiseconn.New(conn.Conn, h2Transport, 0, nil) + nc, err := ts2021.New(conn.Conn, h2Transport, 0, nil) if err != nil { return fmt.Errorf("noiseconn.New: %w", err) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 80bb40c263673..3e100d4a76e7e 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck @@ -105,7 +106,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ - tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index f3a6eb12c4be1..87138e4dd8322 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -45,6 +45,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ @@ -60,7 +61,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 1b2fff01f8c8f..0fd7286e7712f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -65,6 +65,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp from tailscale.com/control/controlclient+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ @@ -82,7 +83,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 12c06f61195cb..26f27e986d743 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -255,6 +255,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ @@ -296,7 +297,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 76254c6cd2161..b423e0bb08f5c 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -132,6 +132,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -152,7 +153,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go index c001de0cdf7dd..1daa07620aa08 100644 --- a/control/controlclient/noise.go +++ b/control/controlclient/noise.go @@ -18,8 +18,8 @@ import ( "golang.org/x/net/http2" "tailscale.com/control/controlhttp" + "tailscale.com/control/ts2021" "tailscale.com/health" - "tailscale.com/internal/noiseconn" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -50,7 +50,7 @@ type NoiseClient struct { // sfDial ensures that two concurrent requests for a noise connection only // produce one shared one between the two callers. - sfDial singleflight.Group[struct{}, *noiseconn.Conn] + sfDial singleflight.Group[struct{}, *ts2021.Conn] dialer *tsdial.Dialer dnsCache *dnscache.Resolver @@ -72,9 +72,9 @@ type NoiseClient struct { // mu only protects the following variables. mu sync.Mutex closed bool - last *noiseconn.Conn // or nil + last *ts2021.Conn // or nil nextID int - connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close + connPool map[int]*ts2021.Conn // active connections not yet closed; see ts2021.Conn.Close } // NoiseOpts contains options for the NewNoiseClient function. All fields are @@ -195,12 +195,12 @@ func (e contextErr) Unwrap() error { return e.err } -// getConn returns a noiseconn.Conn that can be used to make requests to the +// getConn returns a ts2021.Conn that can be used to make requests to the // coordination server. It may return a cached connection or create a new one. // Dials are singleflighted, so concurrent calls to getConn may only dial once. // As such, context values may not be respected as there are no guarantees that // the context passed to getConn is the same as the context passed to dial. -func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) { +func (nc *NoiseClient) getConn(ctx context.Context) (*ts2021.Conn, error) { nc.mu.Lock() if last := nc.last; last != nil && last.CanTakeNewRequest() { nc.mu.Unlock() @@ -214,7 +214,7 @@ func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) { // canceled. Instead, we have to additionally check that the context // which was canceled is our context and retry if our context is still // valid. - conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) { + conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*ts2021.Conn, error) { c, err := nc.dial(ctx) if err != nil { if ctx.Err() != nil { @@ -282,7 +282,7 @@ func (nc *NoiseClient) Close() error { // dial opens a new connection to tailcontrol, fetching the server noise key // if not cached. -func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { +func (nc *NoiseClient) dial(ctx context.Context) (*ts2021.Conn, error) { nc.mu.Lock() connID := nc.nextID nc.nextID++ @@ -352,7 +352,7 @@ func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { return nil, err } - ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) + ncc, err := ts2021.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) if err != nil { return nil, err } diff --git a/control/controlclient/noise_test.go b/control/controlclient/noise_test.go index d9c71cf274e71..0022bdf880653 100644 --- a/control/controlclient/noise_test.go +++ b/control/controlclient/noise_test.go @@ -15,7 +15,7 @@ import ( "golang.org/x/net/http2" "tailscale.com/control/controlhttp/controlhttpserver" - "tailscale.com/internal/noiseconn" + "tailscale.com/control/ts2021" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -310,7 +310,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte - copy(notH2Frame[:], noiseconn.EarlyPayloadMagic) + copy(notH2Frame[:], ts2021.EarlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them diff --git a/internal/noiseconn/conn.go b/control/ts2021/conn.go similarity index 95% rename from internal/noiseconn/conn.go rename to control/ts2021/conn.go index 29fd1a2832a2e..99b1f24cbe7f8 100644 --- a/internal/noiseconn/conn.go +++ b/control/ts2021/conn.go @@ -1,12 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package noiseconn contains an internal-only wrapper around controlbase.Conn -// that properly handles the early payload sent by the server before the HTTP/2 -// session begins. -// -// See the documentation on the Conn type for more details. -package noiseconn +// Package ts2021 handles the details of the Tailscale 2021 control protocol +// that are after (above) the Noise layer. In particular, the +// "tailcfg.EarlyNoise" message and the subsequent HTTP/2 connection. +package ts2021 import ( "bytes" diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4dffb5000841f..3cf1d06e99084 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -128,6 +128,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/control/controlhttp from tailscale.com/control/controlclient tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ @@ -148,7 +149,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ From cca70ddbfc2727a2f38d9d178b52efcca842a256 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 1 Oct 2025 20:18:58 -0700 Subject: [PATCH 0488/1093] cmd/tailscaled: default --encrypt-state to true if TPM is available (#17376) Whenever running on a platform that has a TPM (and tailscaled can access it), default to encrypting the state. The user can still explicitly set this flag to disable encryption. Updates https://github.com/tailscale/corp/issues/32909 Signed-off-by: Andrew Lytvynov --- cmd/tailscaled/flag.go | 31 +++++++++++++++++++++++++++++++ cmd/tailscaled/tailscaled.go | 13 ++++++++----- feature/hooks.go | 12 ++++++++++++ feature/tpm/tpm.go | 10 ++++++++++ feature/tpm/tpm_test.go | 9 --------- ipn/ipnlocal/local.go | 6 +----- util/syspolicy/pkey/pkey.go | 4 +++- 7 files changed, 65 insertions(+), 20 deletions(-) create mode 100644 cmd/tailscaled/flag.go diff --git a/cmd/tailscaled/flag.go b/cmd/tailscaled/flag.go new file mode 100644 index 0000000000000..f640aceed45d8 --- /dev/null +++ b/cmd/tailscaled/flag.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import "strconv" + +// boolFlag is a flag.Value that tracks whether it was ever set. +type boolFlag struct { + set bool + v bool +} + +func (b *boolFlag) String() string { + if b == nil || !b.set { + return "unset" + } + return strconv.FormatBool(b.v) +} + +func (b *boolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.v = v + b.set = true + return nil +} + +func (b *boolFlag) IsBoolFlag() bool { return true } diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 27fec05a3383b..c3a4c8b054bdb 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -120,7 +120,7 @@ var args struct { debug string port uint16 statepath string - encryptState bool + encryptState boolFlag statedir string socketpath string birdSocketPath string @@ -197,7 +197,7 @@ func main() { flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) - flag.BoolVar(&args.encryptState, "encrypt-state", defaultEncryptState(), "encrypt the state file on disk; uses TPM on Linux and Windows, on all other platforms this flag is not supported") + flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") @@ -275,7 +275,10 @@ func main() { } } - if args.encryptState { + if !args.encryptState.set { + args.encryptState.v = defaultEncryptState() + } + if args.encryptState.v { if runtime.GOOS != "linux" && runtime.GOOS != "windows" { log.SetFlags(0) log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) @@ -351,7 +354,7 @@ func statePathOrDefault() string { if path == "" && args.statedir != "" { path = filepath.Join(args.statedir, "tailscaled.state") } - if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState { + if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState.v { path = store.TPMPrefix + path } return path @@ -909,6 +912,6 @@ func defaultEncryptState() bool { // (plan9/FreeBSD/etc). return false } - v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) + v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) return v } diff --git a/feature/hooks.go b/feature/hooks.go index bc42bd8d97ba1..2eade1eadc4f6 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -40,3 +40,15 @@ var HookProxySetSelfProxy Hook[func(...string)] // HookProxySetTransportGetProxyConnectHeader is a hook for feature/useproxy to register // [tshttpproxy.SetTransportGetProxyConnectHeader]. var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] + +// HookTPMAvailable is a hook that reports whether a TPM device is supported +// and available. +var HookTPMAvailable Hook[func() bool] + +// TPMAvailable reports whether a TPM device is supported and available. +func TPMAvailable() bool { + if f, ok := HookTPMAvailable.GetOk(); ok { + return f() + } + return false +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index b700637e65a15..b67cb4e3b23ff 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -39,6 +39,7 @@ var infoOnce = sync.OnceValue(info) func init() { feature.Register("tpm") + feature.HookTPMAvailable.Set(tpmSupported) hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) @@ -51,6 +52,15 @@ func init() { } } +func tpmSupported() bool { + tpm, err := open() + if err != nil { + return false + } + tpm.Close() + return true +} + var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") func info() *tailcfg.TPMInfo { diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index f4497f8c72732..5401fd5c38532 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -277,15 +277,6 @@ func TestMigrateStateToTPM(t *testing.T) { } } -func tpmSupported() bool { - tpm, err := open() - if err != nil { - return false - } - tpm.Close() - return true -} - type mockTPMSealProvider struct { path string data map[ipn.StateKey][]byte diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e8952216b56fc..9657686604761 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7559,11 +7559,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { case version.IsMacAppStore(): return opt.NewBool(true) case version.IsMacSysExt(): - // MacSys still stores its state in plaintext on disk in addition to - // the Keychain. A future release will clean up the on-disk state - // files. - // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := b.polc.GetBoolean(pkey.EncryptState, false) + sp, _ := b.polc.GetBoolean(pkey.EncryptState, true) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index 1ef969d723aea..79b4af1e615a1 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -136,7 +136,9 @@ const ( FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" // EncryptState is a boolean setting that specifies whether to encrypt the - // tailscaled state file with a TPM device. + // tailscaled state file. + // Windows and Linux use a TPM device, Apple uses the Keychain. + // It's a noop on other platforms. EncryptState Key = "EncryptState" // PostureChecking indicates if posture checking is enabled and the client shall gather From 7dfa26778e7ca36a34e7d50c0f80fb60f6f54540 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 30 Sep 2025 09:02:56 +0100 Subject: [PATCH 0489/1093] derp/derphttp: de-flake DERP HTTP clients tests with memnet and synctest Using memnet and synctest removes flakiness caused by real networking and subtle timing differences. Additionally, remove the `t.Logf` call inside the server's shutdown goroutine that was causing a false positive data race detection. The race detector is flagging a double write during this `t.Logf` call. This is a common pattern, noted in golang/go#40343 and elsehwere in this file, where using `t.Logf` after a test has finished can interact poorly with the test runner. This is a long-standing issue which became more common after rewriting this test to use memnet and synctest. Fixed #17355 Signed-off-by: Alex Chan --- derp/derphttp/derphttp_test.go | 312 +++++++++++++++++---------------- 1 file changed, 158 insertions(+), 154 deletions(-) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 36c11f4fc25cc..76681d4984252 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -8,6 +8,7 @@ import ( "context" "crypto/tls" "encoding/json" + "errors" "flag" "fmt" "maps" @@ -18,11 +19,13 @@ import ( "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/derp/derpserver" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/tailcfg" @@ -224,24 +227,21 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server) { +func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server, ln *memnet.Listener) { s = derpserver.New(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), Handler: derpserver.Handler(s), } - ln, err := net.Listen("tcp4", "localhost:0") - if err != nil { - t.Fatal(err) - } + ln = memnet.Listen("localhost:0") + serverURL = "http://" + ln.Addr().String() s.SetMeshKey(testMeshKey) go func() { if err := httpsrv.Serve(ln); err != nil { - if err == http.ErrServerClosed { - t.Logf("server closed") + if errors.Is(err, net.ErrClosed) { return } panic(err) @@ -250,7 +250,7 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpse return } -func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *derphttp.Client) { +func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string, ln *memnet.Listener) (c *derphttp.Client) { c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatal(err) @@ -260,6 +260,7 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW t.Fatal(err) } c.MeshKey = k + c.SetURLDialer(ln.Dial) return } @@ -267,170 +268,171 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW // updates after a different thread breaks and reconnects the connection, while // the watcher is waiting on recv(). func TestBreakWatcherConnRecv(t *testing.T) { - // TODO(bradfitz): use synctest + memnet instead - - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) - - var wg sync.WaitGroup - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher.Close() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + watcherChan := make(chan int, 1) + defer close(watcherChan) + errChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 + go func() { + defer wg.Done() + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyErr := func(err error) { + select { + case errChan <- err: + case <-ctx.Done(): + } + } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) + }() - watcherChan := make(chan int, 1) - defer close(watcherChan) - errChan := make(chan error, 1) + synctest.Wait() - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyErr := func(err error) { + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { select { - case errChan <- err: - case <-ctx.Done(): + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) + } + case err := <-errChan: + if err.Error() != "derp.Recv: EOF" { + t.Fatalf("expected notifyError connection error to be EOF, got %v", err) + } } - } - - watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) - }() - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) - } - case err := <-errChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher.BreakConnection(watcher) + // re-establish connection by sending a packet + watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } - timer.Reset(5 * time.Second) - watcher.BreakConnection(watcher) - // re-establish connection by sending a packet - watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - } - cancel() // Cancel the context to stop the watcher loop. - wg.Wait() + cancel() // Cancel the context to stop the watcher loop. + wg.Wait() + }) } // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is not waiting on recv(). func TestBreakWatcherConn(t *testing.T) { - // TODO(bradfitz): use synctest + memnet instead - - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) - - var wg sync.WaitGroup - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher1.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + watcherChan := make(chan int, 1) + breakerChan := make(chan bool, 1) + errorChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 + go func() { + defer wg.Done() + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + select { + case <-ctx.Done(): + return + // Wait for breaker to run + case <-breakerChan: + } + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyError := func(err error) { + errorChan <- err + } - ctx, cancel := context.WithCancel(context.Background()) + watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) + }() - watcherChan := make(chan int, 1) - breakerChan := make(chan bool, 1) - errorChan := make(chan error, 1) + synctest.Wait() - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { select { - case <-ctx.Done(): - return - // Wait for breaker to run - case <-breakerChan: + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) + } + case err := <-errorChan: + if !errors.Is(err, net.ErrClosed) { + t.Fatalf("expected notifyError connection error to fail with ErrClosed, got %v", err) + } } - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyError := func(err error) { - errorChan <- err - } - - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) - }() - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) - } - case err := <-errorChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher1.BreakConnection(watcher1) + // re-establish connection by sending a packet + watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) + // signal that the breaker is done + breakerChan <- true } - watcher1.BreakConnection(watcher1) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - // signal that the breaker is done - breakerChan <- true - - timer.Reset(5 * time.Second) - } - watcher1.Close() - cancel() - wg.Wait() + watcher1.Close() + cancel() + wg.Wait() + }) } func noopAdd(derp.PeerPresentMessage) {} @@ -444,12 +446,13 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) defer watcher.Close() // Test connecting to ourselves, and that we get hung up on. @@ -518,13 +521,14 @@ func TestNotifyError(t *testing.T) { defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() // Test early error notification when c.connect fails. - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) watcher.SetURLDialer(netx.DialFunc(func(ctx context.Context, network, addr string) (net.Conn, error) { t.Helper() return nil, fmt.Errorf("test error: %s", addr) From 16e0abe0311b8fe6417b5225c2d608951ebf1a85 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 2 Oct 2025 13:29:03 +0100 Subject: [PATCH 0490/1093] build_docker.sh: support including extra files (#17405) mkctr already has support for including extra files in the built container image. Wire up a new optional environment variable to thread that through to mkctr. The operator e2e tests will use this to bake additional trusted CAs into the test image without significantly departing from the normal build or deployment process for our containers. Updates tailscale/corp#32085 Change-Id: Ica94ed270da13782c4f5524fdc949f9218f79477 Signed-off-by: Tom Proctor --- build_docker.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/build_docker.sh b/build_docker.sh index 37f00bf53e3d9..4552f8d8ee0d3 100755 --- a/build_docker.sh +++ b/build_docker.sh @@ -38,6 +38,7 @@ TARGET="${TARGET:-${DEFAULT_TARGET}}" TAGS="${TAGS:-${DEFAULT_TAGS}}" BASE="${BASE:-${DEFAULT_BASE}}" PLATFORM="${PLATFORM:-}" # default to all platforms +FILES="${FILES:-}" # default to no extra files # OCI annotations that will be added to the image. # https://github.com/opencontainers/image-spec/blob/main/annotations.md ANNOTATIONS="${ANNOTATIONS:-${DEFAULT_ANNOTATIONS}}" @@ -62,6 +63,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/containerboot ;; k8s-operator) @@ -80,6 +82,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/operator ;; k8s-nameserver) @@ -98,6 +101,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/k8s-nameserver ;; tsidp) @@ -116,6 +120,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/tsidp ;; k8s-proxy) @@ -134,6 +139,7 @@ case "$TARGET" in --push="${PUSH}" \ --target="${PLATFORM}" \ --annotations="${ANNOTATIONS}" \ + --files="${FILES}" \ /usr/local/bin/k8s-proxy ;; *) From aa5b2ce83be402eca9fa3862d257072274261229 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 2 Oct 2025 13:30:00 +0100 Subject: [PATCH 0491/1093] cmd/k8s-operator: add .gitignore for generated chart CRDs (#17406) Add a .gitignore for the chart version of the CRDs that we never commit, because the static manifest CRD files are the canonical version. This makes it easier to deploy the CRDs via the helm chart in a way that reflects the production workflow without making the git checkout "dirty". Given that the chart CRDs are ignored, we can also now safely generate them for the kube-generate-all Makefile target without being a nuisance to the state of the git checkout. Added a slightly more robust repo root detection to the generation logic to make sure the command works from the context of both the Makefile and the image builder command we run for releases in corp. Updates tailscale/corp#32085 Change-Id: Id44a4707c183bfaf95a160911ec7a42ffb1a1287 Signed-off-by: Tom Proctor --- cmd/k8s-operator/deploy/chart/templates/.gitignore | 10 ++++++++++ cmd/k8s-operator/generate/main.go | 9 +++++++-- cmd/k8s-operator/operator.go | 3 +++ 3 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 cmd/k8s-operator/deploy/chart/templates/.gitignore diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore new file mode 100644 index 0000000000000..ae7c682d9fd15 --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -0,0 +1,10 @@ +# Don't add helm chart CRDs to git. Canonical CRD files live in +# cmd/k8s-operator/deploy/crds. +# +# Generate for local usage with: +# go run tailscale.com/cmd/k8s-operator/generate helmcrd +/connector.yaml +/dnsconfig.yaml +/proxyclass.yaml +/proxygroup.yaml +/recorder.yaml diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 25435a47cf14a..6904f1df02ec0 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -41,11 +41,16 @@ func main() { if len(os.Args) < 2 { log.Fatalf("usage ./generate [staticmanifests|helmcrd]") } - repoRoot := "../../" + gitOut, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() + if err != nil { + log.Fatalf("error determining git root: %v: %s", err, gitOut) + } + + repoRoot := strings.TrimSpace(string(gitOut)) switch os.Args[1] { case "helmcrd": // insert CRDs to Helm templates behind a installCRDs=true conditional check log.Print("Adding CRDs to Helm templates") - if err := generate("./"); err != nil { + if err := generate(repoRoot); err != nil { log.Fatalf("error adding CRDs to Helm templates: %v", err) } return diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 1d988eb033078..89c8ff3e205bf 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -67,6 +67,9 @@ import ( // Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart. //go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests +// Generate the helm chart's CRDs (which are ignored from git). +//go:generate go run tailscale.com/cmd/k8s-operator/generate helmcrd + // Generate CRD API docs. //go:generate go run github.com/elastic/crd-ref-docs --renderer=markdown --source-path=../../k8s-operator/apis/ --config=../../k8s-operator/api-docs-config.yaml --output-path=../../k8s-operator/api.md From c45f8813b4651f3486955104a9ea5bd1075733a2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 30 Sep 2025 14:47:40 -0700 Subject: [PATCH 0492/1093] feature/featuretags, all: add build features, use existing ones in more places Saves 270 KB. Updates #12614 Change-Id: I4c3fe06d32c49edb3a4bb0758a8617d83f291cf5 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 5 +- cmd/tailscaled/depaware-minbox.txt | 5 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 16 +++- cmd/tsidp/depaware.txt | 2 +- .../feature_bakedroots_disabled.go | 13 +++ .../feature_bakedroots_enabled.go | 13 +++ .../buildfeatures/feature_cloud_disabled.go | 13 +++ .../buildfeatures/feature_cloud_enabled.go | 13 +++ .../feature_listenrawdisco_disabled.go | 13 +++ .../feature_listenrawdisco_enabled.go | 13 +++ .../feature_unixsocketidentity_disabled.go | 13 +++ .../feature_unixsocketidentity_enabled.go | 13 +++ feature/featuretags/featuretags.go | 74 ++++++++------- ipn/ipnauth/ipnauth.go | 20 ++-- .../ipnauth_omit_unixsocketidentity.go | 25 +++++ ...th_notwindows.go => ipnauth_unix_creds.go} | 2 +- ipn/ipnlocal/c2n.go | 53 +++++++---- ipn/ipnlocal/local.go | 7 +- ipn/ipnlocal/peerapi.go | 48 +++++----- ipn/ipnserver/actor.go | 6 ++ ipn/ipnserver/proxyconnect.go | 6 ++ ipn/ipnserver/server.go | 17 ++++ net/netns/socks.go | 2 +- net/tlsdial/blockblame/blockblame.go | 94 +++++++++++-------- net/tlsdial/tlsdial.go | 37 ++++---- safesocket/safesocket.go | 8 +- tsnet/depaware.txt | 2 +- util/clientmetric/clientmetric.go | 22 +++-- util/cloudenv/cloudenv.go | 7 ++ wgengine/magicsock/cloudinfo.go | 7 ++ wgengine/magicsock/magicsock_default.go | 2 +- wgengine/magicsock/magicsock_linux.go | 2 + wgengine/userspace.go | 2 +- 35 files changed, 410 insertions(+), 169 deletions(-) create mode 100644 feature/buildfeatures/feature_bakedroots_disabled.go create mode 100644 feature/buildfeatures/feature_bakedroots_enabled.go create mode 100644 feature/buildfeatures/feature_cloud_disabled.go create mode 100644 feature/buildfeatures/feature_cloud_enabled.go create mode 100644 feature/buildfeatures/feature_listenrawdisco_disabled.go create mode 100644 feature/buildfeatures/feature_listenrawdisco_enabled.go create mode 100644 feature/buildfeatures/feature_unixsocketidentity_disabled.go create mode 100644 feature/buildfeatures/feature_unixsocketidentity_enabled.go create mode 100644 ipn/ipnauth/ipnauth_omit_unixsocketidentity.go rename ipn/ipnauth/{ipnauth_notwindows.go => ipnauth_unix_creds.go} (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 41a6c39e3718d..aac465a30fbc5 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -112,7 +112,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 87138e4dd8322..accaab8f0a653 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -22,9 +22,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device @@ -221,10 +220,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ - golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 0fd7286e7712f..f558c4c0b368f 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -28,7 +28,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 @@ -36,7 +36,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device @@ -248,10 +247,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ - golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/net/proxy from tailscale.com/net/netns golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 26f27e986d743..7e6dff7dfbd80 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -174,7 +174,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web W 💣 github.com/tailscale/wf from tailscale.com/wf 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index c364a93069e11..1ec1998d7034e 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -237,16 +237,22 @@ func minTags() string { } func TestMinTailscaledNoCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "regexp", + "golang.org/x/net/proxy", + "internal/socks", + "github.com/tailscale/peercred", + } deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", Tags: minTags(), OnDep: func(dep string) { - if strings.Contains(dep, "regexp") { - t.Errorf("unexpected dep: %q", dep) - } - if strings.Contains(dep, "cbor") { - t.Errorf("unexpected dep: %q", dep) + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } } }, }.Check(t) diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index b423e0bb08f5c..f39f4fbf0c756 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -58,7 +58,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn diff --git a/feature/buildfeatures/feature_bakedroots_disabled.go b/feature/buildfeatures/feature_bakedroots_disabled.go new file mode 100644 index 0000000000000..f203bc1b06d44 --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = false diff --git a/feature/buildfeatures/feature_bakedroots_enabled.go b/feature/buildfeatures/feature_bakedroots_enabled.go new file mode 100644 index 0000000000000..69cf2c34ccf6a --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = true diff --git a/feature/buildfeatures/feature_cloud_disabled.go b/feature/buildfeatures/feature_cloud_disabled.go new file mode 100644 index 0000000000000..3b877a9c68d40 --- /dev/null +++ b/feature/buildfeatures/feature_cloud_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = false diff --git a/feature/buildfeatures/feature_cloud_enabled.go b/feature/buildfeatures/feature_cloud_enabled.go new file mode 100644 index 0000000000000..8fd748de56c7e --- /dev/null +++ b/feature/buildfeatures/feature_cloud_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = true diff --git a/feature/buildfeatures/feature_listenrawdisco_disabled.go b/feature/buildfeatures/feature_listenrawdisco_disabled.go new file mode 100644 index 0000000000000..2911780636cb7 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = false diff --git a/feature/buildfeatures/feature_listenrawdisco_enabled.go b/feature/buildfeatures/feature_listenrawdisco_enabled.go new file mode 100644 index 0000000000000..4a4f85ae37319 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = true diff --git a/feature/buildfeatures/feature_unixsocketidentity_disabled.go b/feature/buildfeatures/feature_unixsocketidentity_disabled.go new file mode 100644 index 0000000000000..d64e48b825eac --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = false diff --git a/feature/buildfeatures/feature_unixsocketidentity_enabled.go b/feature/buildfeatures/feature_unixsocketidentity_enabled.go new file mode 100644 index 0000000000000..463ac2ced3636 --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 347ccdec063b3..e9d566a861afc 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -87,41 +87,47 @@ type FeatureMeta struct { // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ - "acme": {"ACME", "ACME TLS certificate management", nil}, - "appconnectors": {"AppConnectors", "App Connectors support", nil}, - "aws": {"AWS", "AWS integration", nil}, - "bird": {"Bird", "Bird BGP integration", nil}, - "captiveportal": {"CaptivePortal", "Captive portal detection", nil}, - "capture": {"Capture", "Packet capture", nil}, - "cli": {"CLI", "embed the CLI into the tailscaled binary", nil}, - "cliconndiag": {"CLIConnDiag", "CLI connection error diagnostics", nil}, - "clientupdate": {"ClientUpdate", "Client auto-update support", nil}, - "completion": {"Completion", "CLI shell completion", nil}, - "dbus": {"DBus", "Linux DBus support", nil}, - "debug": {"Debug", "various debug support, for things that don't have or need their own more specific feature", nil}, - "debugeventbus": {"DebugEventBus", "eventbus debug support", nil}, + "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, + "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, + "aws": {Sym: "AWS", Desc: "AWS integration"}, + "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, + "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, + "capture": {Sym: "Capture", Desc: "Packet capture"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, + "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, + "clientupdate": {Sym: "ClientUpdate", Desc: "Client auto-update support"}, + "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "dbus": {Sym: "DBus", Desc: "Linux DBus support"}, + "debug": {Sym: "Debug", Desc: "various debug support, for things that don't have or need their own more specific feature"}, + "debugeventbus": {Sym: "DebugEventBus", Desc: "eventbus debug support"}, "debugportmapper": { Sym: "DebugPortMapper", Desc: "portmapper debug support", Deps: []FeatureTag{"portmapper"}, }, - "desktop_sessions": {"DesktopSessions", "Desktop sessions support", nil}, - "doctor": {"Doctor", "Diagnose possible issues with Tailscale and its host environment", nil}, - "drive": {"Drive", "Tailscale Drive (file server) support", nil}, + "desktop_sessions": {Sym: "DesktopSessions", Desc: "Desktop sessions support"}, + "doctor": {Sym: "Doctor", Desc: "Diagnose possible issues with Tailscale and its host environment"}, + "drive": {Sym: "Drive", Desc: "Tailscale Drive (file server) support"}, "gro": { Sym: "GRO", Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, - "hujsonconf": {"HuJSONConf", "HuJSON config file support", nil}, - "iptables": {"IPTables", "Linux iptables support", nil}, - "kube": {"Kube", "Kubernetes integration", nil}, - "linuxdnsfight": {"LinuxDNSFight", "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)", nil}, + "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, + "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, + "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "listenrawdisco": { + Sym: "ListenRawDisco", + Desc: "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)", + }, "logtail": { Sym: "LogTail", Desc: "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)", }, - "oauthkey": {"OAuthKey", "OAuth secret-to-authkey resolution support", nil}, + "oauthkey": {Sym: "OAuthKey", Desc: "OAuth secret-to-authkey resolution support"}, "outboundproxy": { Sym: "OutboundProxy", Desc: "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale", @@ -137,9 +143,9 @@ var Features = map[FeatureTag]FeatureMeta{ // by some other feature are missing, then it's an error by default unless you accept // that it's okay to proceed without that meta feature. }, - "portlist": {"PortList", "Optionally advertise listening service ports", nil}, - "portmapper": {"PortMapper", "NAT-PMP/PCP/UPnP port mapping support", nil}, - "posture": {"Posture", "Device posture checking support", nil}, + "portlist": {Sym: "PortList", Desc: "Optionally advertise listening service ports"}, + "portmapper": {Sym: "PortMapper", Desc: "NAT-PMP/PCP/UPnP port mapping support"}, + "posture": {Sym: "Posture", Desc: "Device posture checking support"}, "dns": { Sym: "DNS", Desc: "MagicDNS and system DNS configuration support", @@ -149,13 +155,13 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Network flow logging support", Deps: []FeatureTag{"logtail"}, }, - "netstack": {"Netstack", "gVisor netstack (userspace networking) support", nil}, + "netstack": {Sym: "Netstack", Desc: "gVisor netstack (userspace networking) support"}, "networkmanager": { Sym: "NetworkManager", Desc: "Linux NetworkManager integration", Deps: []FeatureTag{"dbus"}, }, - "relayserver": {"RelayServer", "Relay server", nil}, + "relayserver": {Sym: "RelayServer", Desc: "Relay server"}, "resolved": { Sym: "Resolved", Desc: "Linux systemd-resolved integration", @@ -179,21 +185,25 @@ var Features = map[FeatureTag]FeatureMeta{ Sym: "Synology", Desc: "Synology NAS integration (applies to Linux builds only)", }, - "syspolicy": {"SystemPolicy", "System policy configuration (MDM) support", nil}, + "syspolicy": {Sym: "SystemPolicy", Desc: "System policy configuration (MDM) support"}, "systray": { Sym: "SysTray", Desc: "Linux system tray", Deps: []FeatureTag{"dbus"}, }, - "taildrop": {"Taildrop", "Taildrop (file sending) support", nil}, - "tailnetlock": {"TailnetLock", "Tailnet Lock support", nil}, - "tap": {"Tap", "Experimental Layer 2 (ethernet) support", nil}, - "tpm": {"TPM", "TPM support", nil}, + "taildrop": {Sym: "Taildrop", Desc: "Taildrop (file sending) support"}, + "tailnetlock": {Sym: "TailnetLock", Desc: "Tailnet Lock support"}, + "tap": {Sym: "Tap", Desc: "Experimental Layer 2 (ethernet) support"}, + "tpm": {Sym: "TPM", Desc: "TPM support"}, + "unixsocketidentity": { + Sym: "UnixSocketIdentity", + Desc: "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)", + }, "useproxy": { Sym: "UseProxy", Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", }, - "wakeonlan": {"WakeOnLAN", "Wake-on-LAN support", nil}, + "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index 513daf5b3a7e6..1395a39ae2fbd 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -14,7 +14,6 @@ import ( "runtime" "strconv" - "github.com/tailscale/peercred" "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/safesocket" @@ -63,8 +62,8 @@ type ConnIdentity struct { notWindows bool // runtime.GOOS != "windows" // Fields used when NotWindows: - isUnixSock bool // Conn is a *net.UnixConn - creds *peercred.Creds // or nil if peercred.Get was not implemented on this OS + isUnixSock bool // Conn is a *net.UnixConn + creds PeerCreds // or nil if peercred.Get was not implemented on this OS // Used on Windows: // TODO(bradfitz): merge these into the peercreds package and @@ -97,9 +96,18 @@ func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { return "" } -func (ci *ConnIdentity) Pid() int { return ci.pid } -func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } -func (ci *ConnIdentity) Creds() *peercred.Creds { return ci.creds } +func (ci *ConnIdentity) Pid() int { return ci.pid } +func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } +func (ci *ConnIdentity) Creds() PeerCreds { return ci.creds } + +// PeerCreds is the interface for a github.com/tailscale/peercred.Creds, +// if linked into the binary. +// +// (It's not used on some platforms, or if ts_omit_unixsocketidentity is set.) +type PeerCreds interface { + UserID() (uid string, ok bool) + PID() (pid int, ok bool) +} var metricIssue869Workaround = clientmetric.NewCounter("issue_869_workaround") diff --git a/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go new file mode 100644 index 0000000000000..defe7d89c409b --- /dev/null +++ b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows && ts_omit_unixsocketidentity + +package ipnauth + +import ( + "net" + + "tailscale.com/types/logger" +) + +// GetConnIdentity extracts the identity information from the connection +// based on the user who owns the other end of the connection. +// and couldn't. The returned connIdentity has NotWindows set to true. +func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { + return &ConnIdentity{conn: c, notWindows: true}, nil +} + +// WindowsToken is unsupported when GOOS != windows and always returns +// ErrNotImplemented. +func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { + return nil, ErrNotImplemented +} diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_unix_creds.go similarity index 95% rename from ipn/ipnauth/ipnauth_notwindows.go rename to ipn/ipnauth/ipnauth_unix_creds.go index f5dc07a8cbeb0..8ce2ac8a4bc68 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows +//go:build !windows && !ts_omit_unixsocketidentity package ipnauth diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index ae9e671263d6c..e2dfecec2c930 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -10,6 +10,7 @@ import ( "net/http" "path" "reflect" + "runtime" "strconv" "strings" "time" @@ -33,26 +34,34 @@ import ( // exists for that, a map entry with an empty method is used as a fallback. var c2nHandlers = map[methodAndPath]c2nHandler{ // Debug. - req("/echo"): handleC2NEcho, - req("/debug/goroutines"): handleC2NDebugGoroutines, - req("/debug/prefs"): handleC2NDebugPrefs, - req("/debug/metrics"): handleC2NDebugMetrics, - req("/debug/component-logging"): handleC2NDebugComponentLogging, - req("/debug/logheap"): handleC2NDebugLogHeap, - req("/debug/netmap"): handleC2NDebugNetMap, - - // PPROF - We only expose a subset of typical pprof endpoints for security. - req("/debug/pprof/heap"): handleC2NPprof, - req("/debug/pprof/allocs"): handleC2NPprof, - - req("POST /logtail/flush"): handleC2NLogtailFlush, - req("POST /sockstats"): handleC2NSockStats, - - // SSH - req("/ssh/usernames"): handleC2NSSHUsernames, - - // Linux netfilter. - req("POST /netfilter-kind"): handleC2NSetNetfilterKind, + req("/echo"): handleC2NEcho, +} + +func init() { + if buildfeatures.HasSSH { + RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) + } + if buildfeatures.HasLogTail { + RegisterC2N("POST /logtail/flush", handleC2NLogtailFlush) + } + if buildfeatures.HasDebug { + RegisterC2N("POST /sockstats", handleC2NSockStats) + + // pprof: + // we only expose a subset of typical pprof endpoints for security. + RegisterC2N("/debug/pprof/heap", handleC2NPprof) + RegisterC2N("/debug/pprof/allocs", handleC2NPprof) + + RegisterC2N("/debug/goroutines", handleC2NDebugGoroutines) + RegisterC2N("/debug/prefs", handleC2NDebugPrefs) + RegisterC2N("/debug/metrics", handleC2NDebugMetrics) + RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging) + RegisterC2N("/debug/logheap", handleC2NDebugLogHeap) + RegisterC2N("/debug/netmap", handleC2NDebugNetMap) + } + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind) + } } // RegisterC2N registers a new c2n handler for the given pattern. @@ -265,6 +274,10 @@ func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) { } func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasSSH { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } var req tailcfg.C2NSSHUsernamesRequest if r.Method == "POST" { if err := json.NewDecoder(r.Body).Decode(&req); err != nil { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9657686604761..9e2fbb999fc5f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1442,7 +1442,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi cn := b.currentNode() nid, ok := cn.NodeByAddr(ipp.Addr()) - if !ok { + if !ok && buildfeatures.HasNetstack { var ip netip.Addr if ipp.Port() != 0 { var protos []string @@ -5015,6 +5015,9 @@ func (b *LocalBackend) SetVarRoot(dir string) { // // It should only be called before the LocalBackend is used. func (b *LocalBackend) SetLogFlusher(flushFunc func()) { + if !buildfeatures.HasLogTail { + return + } b.logFlushFunc = flushFunc } @@ -5023,7 +5026,7 @@ func (b *LocalBackend) SetLogFlusher(flushFunc func()) { // // TryFlushLogs should not block. func (b *LocalBackend) TryFlushLogs() bool { - if b.logFlushFunc == nil { + if !buildfeatures.HasLogTail || b.logFlushFunc == nil { return false } b.logFlushFunc() diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 4f99525f9e498..9ad3e3c362570 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -354,33 +354,35 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - if strings.HasPrefix(r.URL.Path, "/dns-query") { + if buildfeatures.HasDNS && strings.HasPrefix(r.URL.Path, "/dns-query") { metricDNSCalls.Add(1) h.handleDNSQuery(w, r) return } - switch r.URL.Path { - case "/v0/goroutines": - h.handleServeGoroutines(w, r) - return - case "/v0/env": - h.handleServeEnv(w, r) - return - case "/v0/metrics": - h.handleServeMetrics(w, r) - return - case "/v0/magicsock": - h.handleServeMagicsock(w, r) - return - case "/v0/dnsfwd": - h.handleServeDNSFwd(w, r) - return - case "/v0/interfaces": - h.handleServeInterfaces(w, r) - return - case "/v0/sockstats": - h.handleServeSockStats(w, r) - return + if buildfeatures.HasDebug { + switch r.URL.Path { + case "/v0/goroutines": + h.handleServeGoroutines(w, r) + return + case "/v0/env": + h.handleServeEnv(w, r) + return + case "/v0/metrics": + h.handleServeMetrics(w, r) + return + case "/v0/magicsock": + h.handleServeMagicsock(w, r) + return + case "/v0/dnsfwd": + h.handleServeDNSFwd(w, r) + return + case "/v0/interfaces": + h.handleServeInterfaces(w, r) + return + case "/v0/sockstats": + h.handleServeSockStats(w, r) + return + } } if ph, ok := peerAPIHandlers[r.URL.Path]; ok { ph(h, w, r) diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 924417a33e54a..628e3c37cfc0b 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -12,6 +12,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/types/logger" @@ -237,6 +238,11 @@ func connIsLocalAdmin(logf logger.Logf, ci *ipnauth.ConnIdentity, operatorUID st // Linux. fallthrough case "linux": + if !buildfeatures.HasUnixSocketIdentity { + // Everybody is an admin if support for unix socket identities + // is omitted for the build. + return true + } uid, ok := ci.Creds().UserID() if !ok { return false diff --git a/ipn/ipnserver/proxyconnect.go b/ipn/ipnserver/proxyconnect.go index 030c4efe4a6b0..7d41273bdc52a 100644 --- a/ipn/ipnserver/proxyconnect.go +++ b/ipn/ipnserver/proxyconnect.go @@ -10,6 +10,8 @@ import ( "net" "net/http" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/logpolicy" ) @@ -23,6 +25,10 @@ import ( // precludes that from working and instead the GUI fails to dial out. // So, go through tailscaled (with a CONNECT request) instead. func (s *Server) handleProxyConnectConn(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasOutboundProxy { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } ctx := r.Context() if r.Method != "CONNECT" { panic("[unexpected] miswired") diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 6c382a57e9bd2..d473252e134a8 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -15,6 +15,7 @@ import ( "net" "net/http" "os/user" + "runtime" "strconv" "strings" "sync" @@ -24,6 +25,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" @@ -120,6 +122,10 @@ func (s *Server) awaitBackend(ctx context.Context) (_ *ipnlocal.LocalBackend, ok // This is primarily for the Windows GUI, because wintun can take awhile to // come up. See https://github.com/tailscale/tailscale/issues/6522. func (s *Server) serveServerStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } ctx := r.Context() w.Header().Set("Content-Type", "application/json") @@ -382,6 +388,9 @@ func isAllDigit(s string) bool { // connection. It's intended to give your non-root webserver access // (www-data, caddy, nginx, etc) to certs. func (a *actor) CanFetchCerts() bool { + if !buildfeatures.HasACME { + return false + } if a.ci.IsUnixSock() && a.ci.Creds() != nil { connUID, ok := a.ci.Creds().UserID() if ok && connUID == userIDFromString(envknob.String("TS_PERMIT_CERT_UID")) { @@ -398,6 +407,10 @@ func (a *actor) CanFetchCerts() bool { // // onDone must be called when the HTTP request is done. func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (onDone func(), err error) { + if runtime.GOOS != "windows" && !buildfeatures.HasUnixSocketIdentity { + return func() {}, nil + } + if actor == nil { return nil, errors.New("internal error: nil actor") } @@ -538,6 +551,10 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { // Windows and via $DEBUG_LISTENER/debug/ipn when tailscaled's --debug flag // is used to run a debug server. func (s *Server) ServeHTMLStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } lb := s.lb.Load() if lb == nil { http.Error(w, "no LocalBackend", http.StatusServiceUnavailable) diff --git a/net/netns/socks.go b/net/netns/socks.go index ee8dfa20eec7f..9a137db7f5b18 100644 --- a/net/netns/socks.go +++ b/net/netns/socks.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !js && !android +//go:build !ios && !js && !android && !ts_omit_useproxy package netns diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go index 57dc7a6e6d885..5b48dc009b980 100644 --- a/net/tlsdial/blockblame/blockblame.go +++ b/net/tlsdial/blockblame/blockblame.go @@ -9,13 +9,19 @@ package blockblame import ( "crypto/x509" "strings" + "sync" + + "tailscale.com/feature/buildfeatures" ) // VerifyCertificate checks if the given certificate c is issued by a firewall manufacturer // that is known to block Tailscale connections. It returns true and the Manufacturer of // the equipment if it is, or false and nil if it is not. func VerifyCertificate(c *x509.Certificate) (m *Manufacturer, ok bool) { - for _, m := range Manufacturers { + if !buildfeatures.HasDebug { + return nil, false + } + for _, m := range manufacturers() { if m.match != nil && m.match(c) { return m, true } @@ -33,46 +39,56 @@ type Manufacturer struct { match matchFunc } -var Manufacturers = []*Manufacturer{ - { - Name: "Aruba Networks", - match: issuerContains("Aruba"), - }, - { - Name: "Cisco", - match: issuerContains("Cisco"), - }, - { - Name: "Fortinet", - match: matchAny( - issuerContains("Fortinet"), - certEmail("support@fortinet.com"), - ), - }, - { - Name: "Huawei", - match: certEmail("mobile@huawei.com"), - }, - { - Name: "Palo Alto Networks", - match: matchAny( - issuerContains("Palo Alto Networks"), - issuerContains("PAN-FW"), - ), - }, - { - Name: "Sophos", - match: issuerContains("Sophos"), - }, - { - Name: "Ubiquiti", - match: matchAny( - issuerContains("UniFi"), - issuerContains("Ubiquiti"), - ), - }, +func manufacturers() []*Manufacturer { + manufacturersOnce.Do(func() { + manufacturersList = []*Manufacturer{ + { + Name: "Aruba Networks", + match: issuerContains("Aruba"), + }, + { + Name: "Cisco", + match: issuerContains("Cisco"), + }, + { + Name: "Fortinet", + match: matchAny( + issuerContains("Fortinet"), + certEmail("support@fortinet.com"), + ), + }, + { + Name: "Huawei", + match: certEmail("mobile@huawei.com"), + }, + { + Name: "Palo Alto Networks", + match: matchAny( + issuerContains("Palo Alto Networks"), + issuerContains("PAN-FW"), + ), + }, + { + Name: "Sophos", + match: issuerContains("Sophos"), + }, + { + Name: "Ubiquiti", + match: matchAny( + issuerContains("UniFi"), + issuerContains("Ubiquiti"), + ), + }, + } + }) + return manufacturersList } +var ( + manufacturersOnce sync.Once + manufacturersList []*Manufacturer +) + type matchFunc func(*x509.Certificate) bool func issuerContains(s string) matchFunc { diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 80f3bfc06c4e8..ee4771d8db613 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -28,6 +28,7 @@ import ( "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/net/bakedroots" @@ -36,12 +37,6 @@ import ( var counterFallbackOK int32 // atomic -// If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys -// in a way that WireShark can read. -// -// See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format -var sslKeyLogFile = os.Getenv("SSLKEYLOGFILE") - var debug = envknob.RegisterBool("TS_DEBUG_TLS_DIAL") // tlsdialWarningPrinted tracks whether we've printed a warning about a given @@ -80,13 +75,19 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { // the real TCP connection) because host is the ultimate hostname, but this // tls.Config is used for both the proxy and the ultimate target. - if n := sslKeyLogFile; n != "" { - f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatal(err) + if buildfeatures.HasDebug { + // If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys + // in a way that WireShark can read. + // + // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format + if n := os.Getenv("SSLKEYLOGFILE"); n != "" { + f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) + conf.KeyLogWriter = f } - log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) - conf.KeyLogWriter = f } if conf.InsecureSkipVerify { @@ -164,10 +165,12 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { if debug() { log.Printf("tlsdial(sys %q): %v", dialedHost, errSys) } + if !buildfeatures.HasBakedRoots || (errSys == nil && !debug()) { + return errSys + } - // Always verify with our baked-in Let's Encrypt certificate, - // so we can log an informational message. This is useful for - // detecting SSL MiTM. + // If we have baked-in LetsEncrypt roots and we either failed above, or + // debug logging is enabled, also verify with LetsEncrypt. opts.Roots = bakedroots.Get() _, bakedErr := cs.PeerCertificates[0].Verify(opts) if debug() { @@ -239,8 +242,8 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { if debug() { log.Printf("tlsdial(sys %q/%q): %v", c.ServerName, certDNSName, errSys) } - if errSys == nil { - return nil + if !buildfeatures.HasBakedRoots || errSys == nil { + return errSys } opts.Roots = bakedroots.Get() _, err := certs[0].Verify(opts) diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index ea79edab044c1..287cdca599f77 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -13,6 +13,7 @@ import ( "time" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" ) type closeable interface { @@ -108,7 +109,12 @@ func LocalTCPPortAndToken() (port int, token string, err error) { // PlatformUsesPeerCreds reports whether the current platform uses peer credentials // to authenticate connections. -func PlatformUsesPeerCreds() bool { return GOOSUsesPeerCreds(runtime.GOOS) } +func PlatformUsesPeerCreds() bool { + if !buildfeatures.HasUnixSocketIdentity { + return false + } + return GOOSUsesPeerCreds(runtime.GOOS) +} // GOOSUsesPeerCreds is like PlatformUsesPeerCreds but takes a // runtime.GOOS value instead of using the current one. diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 3cf1d06e99084..1f9609745dddd 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -58,7 +58,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LDAI github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 5c11160194fdc..2243ec3deaff9 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -18,6 +18,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/util/set" ) @@ -130,15 +131,17 @@ func (m *Metric) Publish() { metrics[m.name] = m sortedDirty = true - if m.f != nil { - lastLogVal = append(lastLogVal, scanEntry{f: m.f}) - } else { - if len(valFreeList) == 0 { - valFreeList = make([]int64, 256) + if buildfeatures.HasLogTail { + if m.f != nil { + lastLogVal = append(lastLogVal, scanEntry{f: m.f}) + } else { + if len(valFreeList) == 0 { + valFreeList = make([]int64, 256) + } + m.v = &valFreeList[0] + valFreeList = valFreeList[1:] + lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } - m.v = &valFreeList[0] - valFreeList = valFreeList[1:] - lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } m.regIdx = len(unsorted) @@ -319,6 +322,9 @@ const ( // - increment a metric: (decrements if negative) // 'I' + hex(varint(wireid)) + hex(varint(value)) func EncodeLogTailMetricsDelta() string { + if !buildfeatures.HasLogTail { + return "" + } mu.Lock() defer mu.Unlock() diff --git a/util/cloudenv/cloudenv.go b/util/cloudenv/cloudenv.go index be60ca0070e54..f55f7dfb0794a 100644 --- a/util/cloudenv/cloudenv.go +++ b/util/cloudenv/cloudenv.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -51,6 +52,9 @@ const ( // ResolverIP returns the cloud host's recursive DNS server or the // empty string if not available. func (c Cloud) ResolverIP() string { + if !buildfeatures.HasCloud { + return "" + } switch c { case GCP: return GoogleMetadataAndDNSIP @@ -92,6 +96,9 @@ var cloudAtomic syncs.AtomicValue[Cloud] // Get returns the current cloud, or the empty string if unknown. func Get() Cloud { + if !buildfeatures.HasCloud { + return "" + } if c, ok := cloudAtomic.LoadOk(); ok { return c } diff --git a/wgengine/magicsock/cloudinfo.go b/wgengine/magicsock/cloudinfo.go index 1de369631314c..0db56b3f6c514 100644 --- a/wgengine/magicsock/cloudinfo.go +++ b/wgengine/magicsock/cloudinfo.go @@ -17,6 +17,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" ) @@ -34,6 +35,9 @@ type cloudInfo struct { } func newCloudInfo(logf logger.Logf) *cloudInfo { + if !buildfeatures.HasCloud { + return nil + } tr := &http.Transport{ DisableKeepAlives: true, Dial: (&net.Dialer{ @@ -53,6 +57,9 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { // if the tailscaled process is running in a known cloud and there are any such // IPs present. func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { + if !buildfeatures.HasCloud { + return nil, nil + } switch ci.cloud { case cloudenv.AWS: ret, err := ci.getAWS(ctx) diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 1c315034a6f75..88759d3acc2e3 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_listenrawdisco package magicsock diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index cad0e9b5e3134..f37e19165141f 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_listenrawdisco + package magicsock import ( diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 30486f7a972f4..735181ec70f2f 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -435,7 +435,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.tundev.PreFilterPacketOutboundToWireGuardEngineIntercept = e.handleLocalPackets - if envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { + if buildfeatures.HasDebug && envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { if e.tundev.PreFilterPacketInboundFromWireGuard != nil { return nil, errors.New("unexpected PreFilterIn already set") } From 1d93bdce20ddd2887651e4c2324dd4e113cd864a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 08:53:48 -0700 Subject: [PATCH 0493/1093] control/controlclient: remove x/net/http2, use net/http Saves 352 KB, removing one of our two HTTP/2 implementations linked into the binary. Fixes #17305 Updates #15015 Change-Id: I53a04b1f2687dca73c8541949465038b69aa6ade Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/debug.go | 87 ++-- cmd/tailscale/depaware.txt | 20 +- cmd/tailscale/tailscale_test.go | 1 - cmd/tailscaled/depaware-min.txt | 15 +- cmd/tailscaled/depaware-minbox.txt | 15 +- cmd/tailscaled/depaware.txt | 9 +- cmd/tailscaled/deps_test.go | 3 + cmd/tsidp/depaware.txt | 9 +- control/controlclient/direct.go | 37 +- control/controlclient/noise.go | 394 ------------------ control/controlhttp/constants.go | 4 +- control/ts2021/client.go | 289 +++++++++++++ .../noise_test.go => ts2021/client_test.go} | 93 +++-- control/ts2021/conn.go | 57 +-- net/tsdial/tsdial.go | 9 +- tsnet/depaware.txt | 9 +- 17 files changed, 467 insertions(+), 586 deletions(-) delete mode 100644 control/controlclient/noise.go create mode 100644 control/ts2021/client.go rename control/{controlclient/noise_test.go => ts2021/client_test.go} (80%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index aac465a30fbc5..eae1354a1ca68 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -687,7 +687,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 224070842861f..2836ae29814e7 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -17,6 +17,7 @@ import ( "log" "net" "net/http" + "net/http/httptrace" "net/http/httputil" "net/netip" "net/url" @@ -28,17 +29,18 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "golang.org/x/net/http2" "tailscale.com/client/tailscale/apitype" - "tailscale.com/control/controlhttp" "tailscale.com/control/ts2021" "tailscale.com/feature" _ "tailscale.com/feature/condregister/useproxy" + "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/ace" + "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" + "tailscale.com/net/tsdial" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -1062,22 +1064,8 @@ func runTS2021(ctx context.Context, args []string) error { if err := json.Unmarshal(b, dialPlan); err != nil { return fmt.Errorf("unmarshaling dial plan JSON file: %w", err) } - } - - noiseDialer := &controlhttp.Dialer{ - Hostname: ts2021Args.host, - HTTPPort: "80", - HTTPSPort: "443", - MachineKey: machinePrivate, - ControlKey: keys.PublicKey, - ProtocolVersion: uint16(ts2021Args.version), - DialPlan: dialPlan, - Dialer: dialFunc, - Logf: logf, - NetMon: netMon, - } - if ts2021Args.aceHost != "" { - noiseDialer.DialPlan = &tailcfg.ControlDialPlan{ + } else if ts2021Args.aceHost != "" { + dialPlan = &tailcfg.ControlDialPlan{ Candidates: []tailcfg.ControlIPCandidate{ { ACEHost: ts2021Args.aceHost, @@ -1086,9 +1074,25 @@ func runTS2021(ctx context.Context, args []string) error { }, } } + + opts := ts2021.ClientOpts{ + ServerURL: "https://" + ts2021Args.host, + DialPlan: func() *tailcfg.ControlDialPlan { + return dialPlan + }, + Logf: logf, + NetMon: netMon, + PrivKey: machinePrivate, + ServerPubKey: keys.PublicKey, + Dialer: tsdial.NewFromFuncForDebug(logf, dialFunc), + DNSCache: &dnscache.Resolver{}, + HealthTracker: &health.Tracker{}, + } + + // TODO: ProtocolVersion: uint16(ts2021Args.version), const tries = 2 for i := range tries { - err := tryConnect(ctx, keys.PublicKey, noiseDialer) + err := tryConnect(ctx, keys.PublicKey, opts) if err != nil { log.Printf("error on attempt %d/%d: %v", i+1, tries, err) continue @@ -1098,44 +1102,37 @@ func runTS2021(ctx context.Context, args []string) error { return nil } -func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDialer *controlhttp.Dialer) error { - conn, err := noiseDialer.Dial(ctx) - log.Printf("controlhttp.Dial = %p, %v", conn, err) - if err != nil { - return err - } - log.Printf("did noise handshake") - - gotPeer := conn.Peer() - if gotPeer != controlPublic { - log.Printf("peer = %v, want %v", gotPeer, controlPublic) - return errors.New("key mismatch") - } +func tryConnect(ctx context.Context, controlPublic key.MachinePublic, opts ts2021.ClientOpts) error { - log.Printf("final underlying conn: %v / %v", conn.LocalAddr(), conn.RemoteAddr()) - - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Second, + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + log.Printf("GotConn: %T", ci.Conn) + ncc, ok := ci.Conn.(*ts2021.Conn) + if !ok { + return + } + log.Printf("did noise handshake") + log.Printf("final underlying conn: %v / %v", ncc.LocalAddr(), ncc.RemoteAddr()) + gotPeer := ncc.Peer() + if gotPeer != controlPublic { + log.Fatalf("peer = %v, want %v", gotPeer, controlPublic) + } + }, }) - if err != nil { - return fmt.Errorf("http2.ConfigureTransports: %w", err) - } - // Now, create a Noise conn over the existing conn. - nc, err := ts2021.New(conn.Conn, h2Transport, 0, nil) + nc, err := ts2021.NewClient(opts) if err != nil { - return fmt.Errorf("noiseconn.New: %w", err) + return fmt.Errorf("NewNoiseClient: %w", err) } - defer nc.Close() // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. - whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" + whoamiURL := "https://" + ts2021Args.host + "/machine/whoami" req, err := http.NewRequestWithContext(ctx, "GET", whoamiURL, nil) if err != nil { return err } - resp, err := nc.RoundTrip(req) + resp, err := nc.Do(req) if err != nil { return fmt.Errorf("RoundTrip whoami request: %w", err) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 3e100d4a76e7e..6facd19f98531 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -18,6 +18,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/gaissmai/bart from tailscale.com/net/tsdial + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ @@ -83,7 +86,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ - tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli tailscale.com/derp from tailscale.com/derp/derphttp+ @@ -119,7 +122,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli tailscale.com/net/neterror from tailscale.com/net/netcheck+ - tailscale.com/net/netknob from tailscale.com/net/netns + tailscale.com/net/netknob from tailscale.com/net/netns+ 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ @@ -132,6 +135,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/paths from tailscale.com/client/local+ @@ -229,13 +233,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/idna from golang.org/x/net/http/httpproxy+ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -342,7 +342,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from golang.org/x/net/http2+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -441,14 +441,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep net from crypto/tls+ net/http from expvar+ net/http/cgi from tailscale.com/cmd/tailscale/cli - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/exec from github.com/atotto/clipboard+ diff --git a/cmd/tailscale/tailscale_test.go b/cmd/tailscale/tailscale_test.go index dc477fb6e4357..a7a3c2323cb8f 100644 --- a/cmd/tailscale/tailscale_test.go +++ b/cmd/tailscale/tailscale_test.go @@ -19,7 +19,6 @@ func TestDeps(t *testing.T) { "gvisor.dev/gvisor/pkg/tcpip/header": "https://github.com/tailscale/tailscale/issues/9756", "tailscale.com/wgengine/filter": "brings in bart, etc", "github.com/bits-and-blooms/bitset": "unneeded in CLI", - "github.com/gaissmai/bart": "unneeded in CLI", "tailscale.com/net/ipset": "unneeded in CLI", }, }.Check(t) diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index accaab8f0a653..be13c7b680959 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -41,7 +41,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -212,12 +212,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/icmp from tailscale.com/net/ping - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/idna from golang.org/x/net/http/httpguts golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ @@ -251,7 +248,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip - compress/gzip from golang.org/x/net/http2+ + compress/gzip from net/http container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -313,7 +310,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from golang.org/x/net/http2+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509 @@ -391,7 +388,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http net/http/internal/httpcommon from net/http diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f558c4c0b368f..a91aa8afdce07 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -61,10 +61,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient+ + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ - tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient+ tailscale.com/derp from tailscale.com/derp/derphttp+ tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ @@ -239,12 +239,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ @@ -279,7 +276,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from net/http+ compress/zlib from image/png container/list from crypto/tls+ context from crypto/tls+ @@ -342,7 +339,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from golang.org/x/net/http2+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ crypto/x509/pkix from crypto/x509 @@ -425,7 +422,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/httputil from tailscale.com/cmd/tailscale/cli net/http/internal from net/http+ net/http/internal/ascii from net/http+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7e6dff7dfbd80..00c1a0ac42200 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -252,7 +252,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -501,13 +501,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -551,7 +548,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bytes from archive/tar+ cmp from slices+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ W compress/zlib from debug/pe container/heap from github.com/jellydator/ttlcache/v3+ container/list from crypto/tls+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 1ec1998d7034e..c54f014f62830 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -276,5 +276,8 @@ func TestMinTailscaledWithCLI(t *testing.T) { } } }, + BadDeps: map[string]string{ + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + }, }.Check(t) } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f39f4fbf0c756..4ddc5eda1462f 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -129,7 +129,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -335,13 +335,10 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -385,7 +382,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index ed84d63ff26af..a3f908da41652 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -28,6 +28,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" + "tailscale.com/control/ts2021" "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" @@ -95,8 +96,8 @@ type Direct struct { serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic - sfGroup singleflight.Group[struct{}, *NoiseClient] // protects noiseClient creation. - noiseClient *NoiseClient + sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. + noiseClient *ts2021.Client persist persist.PersistView authKey string @@ -329,7 +330,7 @@ func NewDirect(opts Options) (*Direct, error) { } } if opts.NoiseTestClient != nil { - c.noiseClient = &NoiseClient{ + c.noiseClient = &ts2021.Client{ Client: opts.NoiseTestClient, } c.serverNoiseKey = key.NewMachine().Public() // prevent early error before hitting test client @@ -359,9 +360,7 @@ func (c *Direct) Close() error { } } c.noiseClient = nil - if tr, ok := c.httpc.Transport.(*http.Transport); ok { - tr.CloseIdleConnections() - } + c.httpc.CloseIdleConnections() return nil } @@ -703,8 +702,8 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if err != nil { return regen, opt.URL, nil, err } - addLBHeader(req, request.OldNodeKey) - addLBHeader(req, request.NodeKey) + ts2021.AddLBHeader(req, request.OldNodeKey) + ts2021.AddLBHeader(req, request.NodeKey) res, err := httpc.Do(req) if err != nil { @@ -1012,7 +1011,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap if err != nil { return err } - addLBHeader(req, nodeKey) + ts2021.AddLBHeader(req, nodeKey) res, err := httpc.Do(req) if err != nil { @@ -1507,7 +1506,7 @@ func sleepAsRequested(ctx context.Context, logf logger.Logf, d time.Duration, cl } // getNoiseClient returns the noise client, creating one if one doesn't exist. -func (c *Direct) getNoiseClient() (*NoiseClient, error) { +func (c *Direct) getNoiseClient() (*ts2021.Client, error) { c.mu.Lock() serverNoiseKey := c.serverNoiseKey nc := c.noiseClient @@ -1522,13 +1521,13 @@ func (c *Direct) getNoiseClient() (*NoiseClient, error) { if c.dialPlan != nil { dp = c.dialPlan.Load } - nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*NoiseClient, error) { + nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*ts2021.Client, error) { k, err := c.getMachinePrivKey() if err != nil { return nil, err } c.logf("[v1] creating new noise client") - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := ts2021.NewClient(ts2021.ClientOpts{ PrivKey: k, ServerPubKey: serverNoiseKey, ServerURL: c.serverURL, @@ -1562,7 +1561,7 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er if err != nil { return err } - res, err := nc.post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) + res, err := nc.Post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) if err != nil { return err } @@ -1696,7 +1695,7 @@ func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthySt // Best effort, no logging: ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) defer cancel() - res, err := np.post(ctx, "/machine/update-health", nodeKey, req) + res, err := np.Post(ctx, "/machine/update-health", nodeKey, req) if err != nil { return } @@ -1741,7 +1740,7 @@ func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) e ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - res, err := nc.doWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) + res, err := nc.DoWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) if err != nil { return err } @@ -1782,7 +1781,7 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ panic("tainted client") } - res, err := nc.post(ctx, "/machine/audit-log", nodeKey, req) + res, err := nc.Post(ctx, "/machine/audit-log", nodeKey, req) if err != nil { return fmt.Errorf("%w: %w", errHTTPPostFailure, err) } @@ -1794,12 +1793,6 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ return nil } -func addLBHeader(req *http.Request, nodeKey key.NodePublic) { - if !nodeKey.IsZero() { - req.Header.Add(tailcfg.LBHeader, nodeKey.String()) - } -} - // makeScreenTimeDetectingDialFunc returns dialFunc, optionally wrapped (on // Apple systems) with a func that sets the returned atomic.Bool for whether // Screen Time seemed to intercept the connection. diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go deleted file mode 100644 index 1daa07620aa08..0000000000000 --- a/control/controlclient/noise.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package controlclient - -import ( - "bytes" - "cmp" - "context" - "encoding/json" - "errors" - "math" - "net/http" - "net/netip" - "net/url" - "sync" - "time" - - "golang.org/x/net/http2" - "tailscale.com/control/controlhttp" - "tailscale.com/control/ts2021" - "tailscale.com/health" - "tailscale.com/net/dnscache" - "tailscale.com/net/netmon" - "tailscale.com/net/tsdial" - "tailscale.com/tailcfg" - "tailscale.com/tstime" - "tailscale.com/types/key" - "tailscale.com/types/logger" - "tailscale.com/util/mak" - "tailscale.com/util/singleflight" -) - -// NoiseClient provides a http.Client to connect to tailcontrol over -// the ts2021 protocol. -type NoiseClient struct { - // Client is an HTTP client to talk to the coordination server. - // It automatically makes a new Noise connection as needed. - // It does not support node key proofs. To do that, call - // noiseClient.getConn instead to make a connection. - *http.Client - - // h2t is the HTTP/2 transport we use a bit to create new - // *http2.ClientConns. We don't use its connection pool and we don't use its - // dialing. We use it for exactly one reason: its idle timeout that can only - // be configured via the HTTP/1 config. And then we call NewClientConn (with - // an existing Noise connection) on the http2.Transport which sets up an - // http2.ClientConn using that idle timeout from an http1.Transport. - h2t *http2.Transport - - // sfDial ensures that two concurrent requests for a noise connection only - // produce one shared one between the two callers. - sfDial singleflight.Group[struct{}, *ts2021.Conn] - - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - privKey key.MachinePrivate - serverPubKey key.MachinePublic - host string // the host part of serverURL - httpPort string // the default port to dial - httpsPort string // the fallback Noise-over-https port or empty if none - - // dialPlan optionally returns a ControlDialPlan previously received - // from the control server; either the function or the return value can - // be nil. - dialPlan func() *tailcfg.ControlDialPlan - - logf logger.Logf - netMon *netmon.Monitor - health *health.Tracker - - // mu only protects the following variables. - mu sync.Mutex - closed bool - last *ts2021.Conn // or nil - nextID int - connPool map[int]*ts2021.Conn // active connections not yet closed; see ts2021.Conn.Close -} - -// NoiseOpts contains options for the NewNoiseClient function. All fields are -// required unless otherwise specified. -type NoiseOpts struct { - // PrivKey is this node's private key. - PrivKey key.MachinePrivate - // ServerPubKey is the public key of the server. - ServerPubKey key.MachinePublic - // ServerURL is the URL of the server to connect to. - ServerURL string - // Dialer's SystemDial function is used to connect to the server. - Dialer *tsdial.Dialer - // DNSCache is the caching Resolver to use to connect to the server. - // - // This field can be nil. - DNSCache *dnscache.Resolver - // Logf is the log function to use. This field can be nil. - Logf logger.Logf - // NetMon is the network monitor that, if set, will be used to get the - // network interface state. This field can be nil; if so, the current - // state will be looked up dynamically. - NetMon *netmon.Monitor - // HealthTracker, if non-nil, is the health tracker to use. - HealthTracker *health.Tracker - // DialPlan, if set, is a function that should return an explicit plan - // on how to connect to the server. - DialPlan func() *tailcfg.ControlDialPlan -} - -// NewNoiseClient returns a new noiseClient for the provided server and machine key. -// serverURL is of the form https://: (no trailing slash). -// -// netMon may be nil, if non-nil it's used to do faster interface lookups. -// dialPlan may be nil -func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { - logf := opts.Logf - u, err := url.Parse(opts.ServerURL) - if err != nil { - return nil, err - } - - if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.New("invalid ServerURL scheme, must be http or https") - } - - var httpPort string - var httpsPort string - addr, _ := netip.ParseAddr(u.Hostname()) - isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" - if port := u.Port(); port != "" { - // If there is an explicit port specified, entirely rely on the scheme, - // unless it's http with a private host in which case we never try using HTTPS. - if u.Scheme == "https" { - httpPort = "" - httpsPort = port - } else if u.Scheme == "http" { - httpPort = port - httpsPort = "443" - if isPrivateHost { - logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) - httpsPort = "" - } - } - } else if u.Scheme == "http" && isPrivateHost { - // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, - // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. - httpPort = "80" - httpsPort = "" - } else { - // Otherwise, use the standard ports - httpPort = "80" - httpsPort = "443" - } - - np := &NoiseClient{ - serverPubKey: opts.ServerPubKey, - privKey: opts.PrivKey, - host: u.Hostname(), - httpPort: httpPort, - httpsPort: httpsPort, - dialer: opts.Dialer, - dnsCache: opts.DNSCache, - dialPlan: opts.DialPlan, - logf: opts.Logf, - netMon: opts.NetMon, - health: opts.HealthTracker, - } - - // Create the HTTP/2 Transport using a net/http.Transport - // (which only does HTTP/1) because it's the only way to - // configure certain properties on the http2.Transport. But we - // never actually use the net/http.Transport for any HTTP/1 - // requests. - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Minute, - }) - if err != nil { - return nil, err - } - np.h2t = h2Transport - - np.Client = &http.Client{Transport: np} - return np, nil -} - -// contextErr is an error that wraps another error and is used to indicate that -// the error was because a context expired. -type contextErr struct { - err error -} - -func (e contextErr) Error() string { - return e.err.Error() -} - -func (e contextErr) Unwrap() error { - return e.err -} - -// getConn returns a ts2021.Conn that can be used to make requests to the -// coordination server. It may return a cached connection or create a new one. -// Dials are singleflighted, so concurrent calls to getConn may only dial once. -// As such, context values may not be respected as there are no guarantees that -// the context passed to getConn is the same as the context passed to dial. -func (nc *NoiseClient) getConn(ctx context.Context) (*ts2021.Conn, error) { - nc.mu.Lock() - if last := nc.last; last != nil && last.CanTakeNewRequest() { - nc.mu.Unlock() - return last, nil - } - nc.mu.Unlock() - - for { - // We singeflight the dial to avoid making multiple connections, however - // that means that we can't simply cancel the dial if the context is - // canceled. Instead, we have to additionally check that the context - // which was canceled is our context and retry if our context is still - // valid. - conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*ts2021.Conn, error) { - c, err := nc.dial(ctx) - if err != nil { - if ctx.Err() != nil { - return nil, contextErr{ctx.Err()} - } - return nil, err - } - return c, nil - }) - var ce contextErr - if err == nil || !errors.As(err, &ce) { - return conn, err - } - if ctx.Err() == nil { - // The dial failed because of a context error, but our context - // is still valid. Retry. - continue - } - // The dial failed because our context was canceled. Return the - // underlying error. - return nil, ce.Unwrap() - } -} - -func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) { - ctx := req.Context() - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} - -// connClosed removes the connection with the provided ID from the pool -// of active connections. -func (nc *NoiseClient) connClosed(id int) { - nc.mu.Lock() - defer nc.mu.Unlock() - conn := nc.connPool[id] - if conn != nil { - delete(nc.connPool, id) - if nc.last == conn { - nc.last = nil - } - } -} - -// Close closes all the underlying noise connections. -// It is a no-op and returns nil if the connection is already closed. -func (nc *NoiseClient) Close() error { - nc.mu.Lock() - nc.closed = true - conns := nc.connPool - nc.connPool = nil - nc.mu.Unlock() - - var errs []error - for _, c := range conns { - if err := c.Close(); err != nil { - errs = append(errs, err) - } - } - return errors.Join(errs...) -} - -// dial opens a new connection to tailcontrol, fetching the server noise key -// if not cached. -func (nc *NoiseClient) dial(ctx context.Context) (*ts2021.Conn, error) { - nc.mu.Lock() - connID := nc.nextID - nc.nextID++ - nc.mu.Unlock() - - if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { - // Panic, because a test should have started failing several - // thousand version numbers before getting to this point. - panic("capability version is too high to fit in the wire protocol") - } - - var dialPlan *tailcfg.ControlDialPlan - if nc.dialPlan != nil { - dialPlan = nc.dialPlan() - } - - // If we have a dial plan, then set our timeout as slightly longer than - // the maximum amount of time contained therein; we assume that - // explicit instructions on timeouts are more useful than a single - // hard-coded timeout. - // - // The default value of 5 is chosen so that, when there's no dial plan, - // we retain the previous behaviour of 10 seconds end-to-end timeout. - timeoutSec := 5.0 - if dialPlan != nil { - for _, c := range dialPlan.Candidates { - if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { - timeoutSec = v - } - } - } - - // After we establish a connection, we need some time to actually - // upgrade it into a Noise connection. With a ballpark worst-case RTT - // of 1000ms, give ourselves an extra 5 seconds to complete the - // handshake. - timeoutSec += 5 - - // Be extremely defensive and ensure that the timeout is in the range - // [5, 60] seconds (e.g. if we accidentally get a negative number). - if timeoutSec > 60 { - timeoutSec = 60 - } else if timeoutSec < 5 { - timeoutSec = 5 - } - - timeout := time.Duration(timeoutSec * float64(time.Second)) - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - clientConn, err := (&controlhttp.Dialer{ - Hostname: nc.host, - HTTPPort: nc.httpPort, - HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), - MachineKey: nc.privKey, - ControlKey: nc.serverPubKey, - ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion), - Dialer: nc.dialer.SystemDial, - DNSCache: nc.dnsCache, - DialPlan: dialPlan, - Logf: nc.logf, - NetMon: nc.netMon, - HealthTracker: nc.health, - Clock: tstime.StdClock{}, - }).Dial(ctx) - if err != nil { - return nil, err - } - - ncc, err := ts2021.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) - if err != nil { - return nil, err - } - - nc.mu.Lock() - if nc.closed { - nc.mu.Unlock() - ncc.Close() // Needs to be called without holding the lock. - return nil, errors.New("noise client closed") - } - defer nc.mu.Unlock() - mak.Set(&nc.connPool, connID, ncc) - nc.last = ncc - return ncc, nil -} - -// post does a POST to the control server at the given path, JSON-encoding body. -// The provided nodeKey is an optional load balancing hint. -func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - return nc.doWithBody(ctx, "POST", path, nodeKey, body) -} - -func (nc *NoiseClient) doWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - jbody, err := json.Marshal(body) - if err != nil { - return nil, err - } - req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) - if err != nil { - return nil, err - } - addLBHeader(req, nodeKey) - req.Header.Set("Content-Type", "application/json") - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 58fed1b76ac3a..359410ae9d29c 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -78,8 +78,8 @@ type Dialer struct { // dropped. Logf logger.Logf - // NetMon is the [netmon.Monitor] to use for this Dialer. It must be - // non-nil. + // NetMon is the [netmon.Monitor] to use for this Dialer. + // It is optional. NetMon *netmon.Monitor // HealthTracker, if non-nil, is the health tracker to use. diff --git a/control/ts2021/client.go b/control/ts2021/client.go new file mode 100644 index 0000000000000..9a9a3ded86944 --- /dev/null +++ b/control/ts2021/client.go @@ -0,0 +1,289 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ts2021 + +import ( + "bytes" + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net" + "net/http" + "net/netip" + "net/url" + "sync" + "time" + + "tailscale.com/control/controlhttp" + "tailscale.com/health" + "tailscale.com/net/dnscache" + "tailscale.com/net/netmon" + "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/types/key" + "tailscale.com/types/logger" +) + +// Client provides a http.Client to connect to tailcontrol over +// the ts2021 protocol. +type Client struct { + // Client is an HTTP client to talk to the coordination server. + // It automatically makes a new Noise connection as needed. + *http.Client + + logf logger.Logf // non-nil + opts ClientOpts + host string // the host part of serverURL + httpPort string // the default port to dial + httpsPort string // the fallback Noise-over-https port or empty if none + + // mu protects the following + mu sync.Mutex + closed bool +} + +// ClientOpts contains options for the [NewClient] function. All fields are +// required unless otherwise specified. +type ClientOpts struct { + // ServerURL is the URL of the server to connect to. + ServerURL string + + // PrivKey is this node's private key. + PrivKey key.MachinePrivate + + // ServerPubKey is the public key of the server. + // It is of the form https://: (no trailing slash). + ServerPubKey key.MachinePublic + + // Dialer's SystemDial function is used to connect to the server. + Dialer *tsdial.Dialer + + // Optional fields follow + + // Logf is the log function to use. + // If nil, log.Printf is used. + Logf logger.Logf + + // NetMon is the network monitor that will be used to get the + // network interface state. This field can be nil; if so, the current + // state will be looked up dynamically. + NetMon *netmon.Monitor + + // DNSCache is the caching Resolver to use to connect to the server. + // + // This field can be nil. + DNSCache *dnscache.Resolver + + // HealthTracker, if non-nil, is the health tracker to use. + HealthTracker *health.Tracker + + // DialPlan, if set, is a function that should return an explicit plan + // on how to connect to the server. + DialPlan func() *tailcfg.ControlDialPlan + + // ProtocolVersion, if non-zero, specifies an alternate + // protocol version to use instead of the default, + // of [tailcfg.CurrentCapabilityVersion]. + ProtocolVersion uint16 +} + +// NewClient returns a new noiseClient for the provided server and machine key. +// +// netMon may be nil, if non-nil it's used to do faster interface lookups. +// dialPlan may be nil +func NewClient(opts ClientOpts) (*Client, error) { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + if opts.ServerURL == "" { + return nil, errors.New("ServerURL is required") + } + if opts.PrivKey.IsZero() { + return nil, errors.New("PrivKey is required") + } + if opts.ServerPubKey.IsZero() { + return nil, errors.New("ServerPubKey is required") + } + if opts.Dialer == nil { + return nil, errors.New("Dialer is required") + } + + u, err := url.Parse(opts.ServerURL) + if err != nil { + return nil, fmt.Errorf("invalid ClientOpts.ServerURL: %w", err) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.New("invalid ServerURL scheme, must be http or https") + } + + httpPort, httpsPort := "80", "443" + addr, _ := netip.ParseAddr(u.Hostname()) + isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" + if port := u.Port(); port != "" { + // If there is an explicit port specified, entirely rely on the scheme, + // unless it's http with a private host in which case we never try using HTTPS. + if u.Scheme == "https" { + httpPort = "" + httpsPort = port + } else if u.Scheme == "http" { + httpPort = port + httpsPort = "443" + if isPrivateHost { + logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) + httpsPort = "" + } + } + } else if u.Scheme == "http" && isPrivateHost { + // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, + // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. + httpPort = "80" + httpsPort = "" + } + + np := &Client{ + opts: opts, + host: u.Hostname(), + httpPort: httpPort, + httpsPort: httpsPort, + logf: logf, + } + + tr := &http.Transport{ + Protocols: new(http.Protocols), + MaxConnsPerHost: 1, + } + // We force only HTTP/2 for this transport, which is what the control server + // speaks inside the ts2021 Noise encryption. But Go doesn't know about that, + // so we use "SetUnencryptedHTTP2" even though it's actually encrypted. + tr.Protocols.SetUnencryptedHTTP2(true) + tr.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return np.dial(ctx) + } + + np.Client = &http.Client{Transport: tr} + return np, nil +} + +// Close closes all the underlying noise connections. +// It is a no-op and returns nil if the connection is already closed. +func (nc *Client) Close() error { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.closed = true + nc.Client.CloseIdleConnections() + return nil +} + +// dial opens a new connection to tailcontrol, fetching the server noise key +// if not cached. +func (nc *Client) dial(ctx context.Context) (*Conn, error) { + if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { + // Panic, because a test should have started failing several + // thousand version numbers before getting to this point. + panic("capability version is too high to fit in the wire protocol") + } + + var dialPlan *tailcfg.ControlDialPlan + if nc.opts.DialPlan != nil { + dialPlan = nc.opts.DialPlan() + } + + // If we have a dial plan, then set our timeout as slightly longer than + // the maximum amount of time contained therein; we assume that + // explicit instructions on timeouts are more useful than a single + // hard-coded timeout. + // + // The default value of 5 is chosen so that, when there's no dial plan, + // we retain the previous behaviour of 10 seconds end-to-end timeout. + timeoutSec := 5.0 + if dialPlan != nil { + for _, c := range dialPlan.Candidates { + if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { + timeoutSec = v + } + } + } + + // After we establish a connection, we need some time to actually + // upgrade it into a Noise connection. With a ballpark worst-case RTT + // of 1000ms, give ourselves an extra 5 seconds to complete the + // handshake. + timeoutSec += 5 + + // Be extremely defensive and ensure that the timeout is in the range + // [5, 60] seconds (e.g. if we accidentally get a negative number). + if timeoutSec > 60 { + timeoutSec = 60 + } else if timeoutSec < 5 { + timeoutSec = 5 + } + + timeout := time.Duration(timeoutSec * float64(time.Second)) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + chd := &controlhttp.Dialer{ + Hostname: nc.host, + HTTPPort: nc.httpPort, + HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), + MachineKey: nc.opts.PrivKey, + ControlKey: nc.opts.ServerPubKey, + ProtocolVersion: cmp.Or(nc.opts.ProtocolVersion, uint16(tailcfg.CurrentCapabilityVersion)), + Dialer: nc.opts.Dialer.SystemDial, + DNSCache: nc.opts.DNSCache, + DialPlan: dialPlan, + Logf: nc.logf, + NetMon: nc.opts.NetMon, + HealthTracker: nc.opts.HealthTracker, + Clock: tstime.StdClock{}, + } + clientConn, err := chd.Dial(ctx) + if err != nil { + return nil, err + } + + ncc := NewConn(clientConn.Conn) + + nc.mu.Lock() + if nc.closed { + nc.mu.Unlock() + ncc.Close() // Needs to be called without holding the lock. + return nil, errors.New("noise client closed") + } + defer nc.mu.Unlock() + return ncc, nil +} + +// post does a POST to the control server at the given path, JSON-encoding body. +// The provided nodeKey is an optional load balancing hint. +func (nc *Client) Post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + return nc.DoWithBody(ctx, "POST", path, nodeKey, body) +} + +func (nc *Client) DoWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + jbody, err := json.Marshal(body) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) + if err != nil { + return nil, err + } + AddLBHeader(req, nodeKey) + req.Header.Set("Content-Type", "application/json") + return nc.Do(req) +} + +// AddLBHeader adds the load balancer header to req if nodeKey is non-zero. +func AddLBHeader(req *http.Request, nodeKey key.NodePublic) { + if !nodeKey.IsZero() { + req.Header.Add(tailcfg.LBHeader, nodeKey.String()) + } +} diff --git a/control/controlclient/noise_test.go b/control/ts2021/client_test.go similarity index 80% rename from control/controlclient/noise_test.go rename to control/ts2021/client_test.go index 0022bdf880653..72fa1f44264c3 100644 --- a/control/controlclient/noise_test.go +++ b/control/ts2021/client_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package controlclient +package ts2021 import ( "context" @@ -10,19 +10,20 @@ import ( "io" "math" "net/http" + "net/http/httptrace" + "sync/atomic" "testing" "time" "golang.org/x/net/http2" "tailscale.com/control/controlhttp/controlhttpserver" - "tailscale.com/control/ts2021" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/must" ) // maxAllowedNoiseVersion is the highest we expect the Tailscale @@ -55,14 +56,23 @@ func TestNoiseClientHTTP2Upgrade_earlyPayload(t *testing.T) { }.run(t) } -func makeClientWithURL(t *testing.T, url string) *NoiseClient { - nc, err := NewNoiseClient(NoiseOpts{ - Logf: t.Logf, - ServerURL: url, +var ( + testPrivKey = key.NewMachine() + testServerPub = key.NewMachine().Public() +) + +func makeClientWithURL(t *testing.T, url string) *Client { + nc, err := NewClient(ClientOpts{ + Logf: t.Logf, + PrivKey: testPrivKey, + ServerPubKey: testServerPub, + ServerURL: url, + Dialer: tsdial.NewDialer(netmon.NewStatic()), }) if err != nil { t.Fatal(err) } + t.Cleanup(func() { nc.Close() }) return nc } @@ -176,7 +186,6 @@ func (tt noiseClientTest) run(t *testing.T) { serverPrivate := key.NewMachine() clientPrivate := key.NewMachine() chalPrivate := key.NewChallenge() - bus := eventbustest.NewBus(t) const msg = "Hello, client" h2 := &http2.Server{} @@ -196,12 +205,11 @@ func (tt noiseClientTest) run(t *testing.T) { defer hs.Close() dialer := tsdial.NewDialer(netmon.NewStatic()) - dialer.SetBus(bus) if nettest.PreferMemNetwork() { dialer.SetSystemDialerForTest(nw.Dial) } - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := NewClient(ClientOpts{ PrivKey: clientPrivate, ServerPubKey: serverPrivate.Public(), ServerURL: hs.URL, @@ -212,28 +220,39 @@ func (tt noiseClientTest) run(t *testing.T) { t.Fatal(err) } - // Get a conn and verify it read its early payload before the http/2 - // handshake. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - c, err := nc.getConn(ctx) - if err != nil { - t.Fatal(err) - } - payload, err := c.GetEarlyPayload(ctx) - if err != nil { - t.Fatal("timed out waiting for didReadHeaderCh") - } + var sawConn atomic.Bool + trace := httptrace.WithClientTrace(t.Context(), &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + ncc, ok := ci.Conn.(*Conn) + if !ok { + // This trace hook sees two dials: the lower-level controlhttp upgrade's + // dial (a tsdial.sysConn), and then the *ts2021.Conn we want. + // Ignore the first one. + return + } + sawConn.Store(true) - gotNonNil := payload != nil - if gotNonNil != tt.sendEarlyPayload { - t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) - } - if payload != nil { - if payload.NodeKeyChallenge != chalPrivate.Public() { - t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) - } - } + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + payload, err := ncc.GetEarlyPayload(ctx) + if err != nil { + t.Errorf("GetEarlyPayload: %v", err) + return + } + + gotNonNil := payload != nil + if gotNonNil != tt.sendEarlyPayload { + t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) + } + if payload != nil { + if payload.NodeKeyChallenge != chalPrivate.Public() { + t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) + } + } + }, + }) + req := must.Get(http.NewRequestWithContext(trace, "GET", "https://unused.example/", nil)) checkRes := func(t *testing.T, res *http.Response) { t.Helper() @@ -247,15 +266,19 @@ func (tt noiseClientTest) run(t *testing.T) { } } - // And verify we can do HTTP/2 against that conn. - res, err := (&http.Client{Transport: c}).Get("https://unused.example/") + // Verify we can do HTTP/2 against that conn. + res, err := nc.Do(req) if err != nil { t.Fatal(err) } checkRes(t, res) + if !sawConn.Load() { + t.Error("ClientTrace.GotConn never saw the *ts2021.Conn") + } + // And try using the high-level nc.post API as well. - res, err = nc.post(context.Background(), "/", key.NodePublic{}, nil) + res, err = nc.Post(context.Background(), "/", key.NodePublic{}, nil) if err != nil { t.Fatal(err) } @@ -310,7 +333,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte - copy(notH2Frame[:], ts2021.EarlyPayloadMagic) + copy(notH2Frame[:], EarlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them diff --git a/control/ts2021/conn.go b/control/ts2021/conn.go index 99b1f24cbe7f8..ecf184d3c3819 100644 --- a/control/ts2021/conn.go +++ b/control/ts2021/conn.go @@ -13,10 +13,8 @@ import ( "encoding/json" "errors" "io" - "net/http" "sync" - "golang.org/x/net/http2" "tailscale.com/control/controlbase" "tailscale.com/tailcfg" ) @@ -27,11 +25,11 @@ import ( // the pool when the connection is closed, properly handles an optional "early // payload" that's sent prior to beginning the HTTP/2 session, and provides a // way to return a connection to a pool when the connection is closed. +// +// Use [NewConn] to build a new Conn if you want [Conn.GetEarlyPayload] to work. +// Otherwise making a Conn directly, only setting Conn, is fine. type Conn struct { *controlbase.Conn - id int - onClose func(int) - h2cc *http2.ClientConn readHeaderOnce sync.Once // guards init of reader field reader io.Reader // (effectively Conn.Reader after header) @@ -40,31 +38,18 @@ type Conn struct { earlyPayloadErr error } -// New creates a new Conn that wraps the given controlbase.Conn. +// NewConn creates a new Conn that wraps the given controlbase.Conn. // // h2t is the HTTP/2 transport to use for the connection; a new // http2.ClientConn will be created that reads from the returned Conn. // // connID should be a unique ID for this connection. When the Conn is closed, // the onClose function will be called with the connID if it is non-nil. -func New(conn *controlbase.Conn, h2t *http2.Transport, connID int, onClose func(int)) (*Conn, error) { - ncc := &Conn{ +func NewConn(conn *controlbase.Conn) *Conn { + return &Conn{ Conn: conn, - id: connID, - onClose: onClose, earlyPayloadReady: make(chan struct{}), } - h2cc, err := h2t.NewClientConn(ncc) - if err != nil { - return nil, err - } - ncc.h2cc = h2cc - return ncc, nil -} - -// RoundTrip implements the http.RoundTripper interface. -func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { - return c.h2cc.RoundTrip(r) } // GetEarlyPayload waits for the early Noise payload to arrive. @@ -74,6 +59,15 @@ func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { // early Noise payload is ready (if any) and will return the same result for // the lifetime of the Conn. func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) { + if c.earlyPayloadReady == nil { + return nil, errors.New("Conn was not created with NewConn; early payload not supported") + } + select { + case <-c.earlyPayloadReady: + return c.earlyPayload, c.earlyPayloadErr + default: + go c.readHeaderOnce.Do(c.readHeader) + } select { case <-c.earlyPayloadReady: return c.earlyPayload, c.earlyPayloadErr @@ -82,12 +76,6 @@ func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) } } -// CanTakeNewRequest reports whether the underlying HTTP/2 connection can take -// a new request, meaning it has not been closed or received or sent a GOAWAY. -func (c *Conn) CanTakeNewRequest() bool { - return c.h2cc.CanTakeNewRequest() -} - // The first 9 bytes from the server to client over Noise are either an HTTP/2 // settings frame (a normal HTTP/2 setup) or, as we added later, an "early payload" // header that's also 9 bytes long: 5 bytes (EarlyPayloadMagic) followed by 4 bytes @@ -122,7 +110,9 @@ func (c *Conn) Read(p []byte) (n int, err error) { // c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for // future reads. func (c *Conn) readHeader() { - defer close(c.earlyPayloadReady) + if c.earlyPayloadReady != nil { + defer close(c.earlyPayloadReady) + } setErr := func(err error) { c.reader = returnErrReader{err} @@ -156,14 +146,3 @@ func (c *Conn) readHeader() { } c.reader = c.Conn } - -// Close closes the connection. -func (c *Conn) Close() error { - if err := c.Conn.Close(); err != nil { - return err - } - if c.onClose != nil { - c.onClose(c.id) - } - return nil -} diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index bec196a2e7378..87b58f2a02e4d 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -45,6 +45,13 @@ func NewDialer(netMon *netmon.Monitor) *Dialer { return d } +// NewFromFuncForDebug is like NewDialer but takes a netx.DialFunc +// and no netMon. It's meant exclusively for the "tailscale debug ts2021" +// debug command, and perhaps tests. +func NewFromFuncForDebug(logf logger.Logf, dial netx.DialFunc) *Dialer { + return &Dialer{sysDialForTest: dial, Logf: logf} +} + // Dialer dials out of tailscaled, while taking care of details while // handling the dozens of edge cases depending on the server mode // (TUN, netstack), the OS network sandboxing style (macOS/iOS @@ -420,7 +427,7 @@ func (d *Dialer) SetSystemDialerForTest(fn netx.DialFunc) { // Control and (in the future, as of 2022-04-27) DERPs.. func (d *Dialer) SystemDial(ctx context.Context, network, addr string) (net.Conn, error) { d.mu.Lock() - if d.netMon == nil { + if d.netMon == nil && d.sysDialForTest == nil { d.mu.Unlock() if testenv.InTest() { panic("SystemDial requires a netmon.Monitor; call SetNetMon first") diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 1f9609745dddd..a0d9f9ebbeb87 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -125,7 +125,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -328,13 +328,10 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy - golang.org/x/net/http2 from tailscale.com/control/controlclient+ - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy @@ -378,7 +375,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ From 3c32f87624ca2cbe384dc4b7a2e3b1925c672e5d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 2 Oct 2025 09:18:55 -0700 Subject: [PATCH 0494/1093] feature/relayserver: use eventbus.Monitor to simplify lifecycle management (#17234) Instead of using separate channels to manage the lifecycle of the eventbus client, use the recently-added eventbus.Monitor, which handles signaling the processing loop to stop and waiting for it to complete. This allows us to simplify some of the setup and cleanup code in the relay server. Updates #15160 Change-Id: Ia1a47ce2e5a31bc8f546dca4c56c3141a40d67af Signed-off-by: M. J. Fromberger --- feature/relayserver/relayserver.go | 135 +++++++++++------------- feature/relayserver/relayserver_test.go | 12 +-- 2 files changed, 70 insertions(+), 77 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 91d07484c1137..95bf29a111407 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -82,11 +82,11 @@ type extension struct { logf logger.Logf bus *eventbus.Bus - mu sync.Mutex // guards the following fields - shutdown bool + mu sync.Mutex // guards the following fields + shutdown bool + port *int // ipn.Prefs.RelayServerPort, nil if disabled - disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return - busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns + eventSubs *eventbus.Monitor // nil if not connected to eventbus debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } @@ -119,15 +119,13 @@ func (e *extension) handleBusLifetimeLocked() { if !busShouldBeRunning { e.disconnectFromBusLocked() return - } - if e.busDoneCh != nil { + } else if e.eventSubs != nil { return // already running } - port := *e.port - e.disconnectFromBusCh = make(chan struct{}) - e.busDoneCh = make(chan struct{}) + + ec := e.bus.Client("relayserver.extension") e.debugSessionsCh = make(chan chan []status.ServerSession) - go e.consumeEventbusTopics(port) + e.eventSubs = ptr.To(ec.Monitor(e.consumeEventbusTopics(ec, *e.port))) } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { @@ -175,77 +173,72 @@ var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { // consumeEventbusTopics serves endpoint allocation requests over the eventbus. // It also serves [relayServer] debug information on a channel. -// consumeEventbusTopics must never acquire [extension.mu], which can be held by -// other goroutines while waiting to receive on [extension.busDoneCh] or the +// consumeEventbusTopics must never acquire [extension.mu], which can be held +// by other goroutines while waiting to receive on [extension.eventSubs] or the // inner [extension.debugSessionsCh] channel. -func (e *extension) consumeEventbusTopics(port int) { - defer close(e.busDoneCh) +func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*eventbus.Client) { + reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](ec) + respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](ec) + debugSessionsCh := e.debugSessionsCh - eventClient := e.bus.Client("relayserver.extension") - reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](eventClient) - respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](eventClient) - defer eventClient.Close() - - var rs relayServer // lazily initialized - defer func() { - if rs != nil { - rs.Close() - } - }() - for { - select { - case <-e.disconnectFromBusCh: - return - case <-eventClient.Done(): - return - case respCh := <-e.debugSessionsCh: - if rs == nil { - // Don't initialize the server simply for a debug request. - respCh <- nil - continue + return func(ec *eventbus.Client) { + var rs relayServer // lazily initialized + defer func() { + if rs != nil { + rs.Close() } - sessions := rs.GetSessions() - respCh <- sessions - case req := <-reqSub.Events(): - if rs == nil { - var err error - rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) + }() + for { + select { + case <-ec.Done(): + return + case respCh := <-debugSessionsCh: + if rs == nil { + // Don't initialize the server simply for a debug request. + respCh <- nil + continue + } + sessions := rs.GetSessions() + respCh <- sessions + case req := <-reqSub.Events(): + if rs == nil { + var err error + rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) + continue + } + } + se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) if err != nil { - e.logf("error initializing server: %v", err) + e.logf("error allocating endpoint: %v", err) continue } - } - se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) - if err != nil { - e.logf("error allocating endpoint: %v", err) - continue - } - respPub.Publish(magicsock.UDPRelayAllocResp{ - ReqRxFromNodeKey: req.RxFromNodeKey, - ReqRxFromDiscoKey: req.RxFromDiscoKey, - Message: &disco.AllocateUDPRelayEndpointResponse{ - Generation: req.Message.Generation, - UDPRelayEndpoint: disco.UDPRelayEndpoint{ - ServerDisco: se.ServerDisco, - ClientDisco: se.ClientDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, + respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, }, - }, - }) + }) + } } } } func (e *extension) disconnectFromBusLocked() { - if e.busDoneCh != nil { - close(e.disconnectFromBusCh) - <-e.busDoneCh - e.busDoneCh = nil - e.disconnectFromBusCh = nil + if e.eventSubs != nil { + e.eventSubs.Close() + e.eventSubs = nil e.debugSessionsCh = nil } } @@ -270,7 +263,7 @@ func (e *extension) serverStatus() status.ServerStatus { UDPPort: nil, Sessions: nil, } - if e.port == nil || e.busDoneCh == nil { + if e.port == nil || e.eventSubs == nil { return st } st.UDPPort = ptr.To(*e.port) @@ -281,7 +274,7 @@ func (e *extension) serverStatus() status.ServerStatus { resp := <-ch st.Sessions = resp return st - case <-e.busDoneCh: + case <-e.eventSubs.Done(): return st } } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index d3fc36a83674a..89c004dc7bbc8 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -101,8 +101,8 @@ func Test_extension_profileStateChanged(t *testing.T) { } defer e.disconnectFromBusLocked() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantBusRunning != (e.busDoneCh != nil) { - t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + if tt.wantBusRunning != (e.eventSubs != nil) { + t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) @@ -118,7 +118,7 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { name string shutdown bool port *int - busDoneCh chan struct{} + eventSubs *eventbus.Monitor hasNodeAttrDisableRelayServer bool wantBusRunning bool }{ @@ -157,13 +157,13 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { bus: eventbus.New(), shutdown: tt.shutdown, port: tt.port, - busDoneCh: tt.busDoneCh, + eventSubs: tt.eventSubs, hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, } e.handleBusLifetimeLocked() defer e.disconnectFromBusLocked() - if tt.wantBusRunning != (e.busDoneCh != nil) { - t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + if tt.wantBusRunning != (e.eventSubs != nil) { + t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) } }) } From 127a9672079213bdcf8d4f92c53e3442e231745b Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 2 Oct 2025 09:31:42 -0700 Subject: [PATCH 0495/1093] appc,*: publish events for route updates and storage (#17392) Add and wire up event publishers for these two event types in the AppConnector. Nothing currently subscribes to them, so this is harmless. Subscribers for these events will be added in a near-future commit. As part of this, move the appc.RouteInfo type to the types/appctype package. It does not contain any package-specific details from appc. Beside it, add appctype.RouteUpdate to carry route update event state, likewise not specific to appc. Update all usage of the appc.* types throughout to use appctype.* instead, and update depaware files to reflect these changes. Add a Close method to the AppConnector to make sure the client gets cleaned up when the connector is dropped (we re-create connectors). Update the unit tests in the appc package to also check the events published alongside calls to the RouteAdvertiser. For now the tests still rely on the RouteAdvertiser for correctness; this is OK for now as the two methods are always performed together. In the near future, we need to rework the tests so not require that, but that will require building some more test fixtures that we can handle separately. Updates #15160 Updates #17192 Change-Id: I184670ba2fb920e0d2cb2be7c6816259bca77afe Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 70 +++++++---- appc/appconnector_test.go | 189 +++++++++++++++++++++++++++-- client/local/local.go | 8 +- cmd/derper/depaware.txt | 7 +- cmd/k8s-operator/depaware.txt | 4 +- cmd/tailscale/cli/appcroutes.go | 6 +- cmd/tailscale/depaware.txt | 5 +- cmd/tailscaled/depaware-min.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 4 +- cmd/tailscaled/depaware.txt | 4 +- cmd/tsidp/depaware.txt | 4 +- ipn/ipnlocal/local.go | 20 +-- ipn/ipnlocal/local_test.go | 13 +- ipn/ipnlocal/peerapi_test.go | 7 +- ipn/localapi/localapi.go | 4 +- tsnet/depaware.txt | 4 +- types/appctype/appconnector.go | 20 +++ 17 files changed, 294 insertions(+), 79 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index c86bf2d0fef96..2918840656377 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -12,12 +12,14 @@ package appc import ( "context" "fmt" + "maps" "net/netip" "slices" "strings" "sync" "time" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/clientmetric" @@ -114,19 +116,6 @@ func metricStoreRoutes(rate, nRoutes int64) { recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN) } -// RouteInfo is a data structure used to persist the in memory state of an AppConnector -// so that we can know, even after a restart, which routes came from ACLs and which were -// learned from domains. -type RouteInfo struct { - // Control is the routes from the 'routes' section of an app connector acl. - Control []netip.Prefix `json:",omitempty"` - // Domains are the routes discovered by observing DNS lookups for configured domains. - Domains map[string][]netip.Addr `json:",omitempty"` - // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, - // its result is added to Domains. - Wildcards []string `json:",omitempty"` -} - // AppConnector is an implementation of an AppConnector that performs // its function as a subsystem inside of a tailscale node. At the control plane // side App Connector routing is configured in terms of domains rather than IP @@ -141,9 +130,12 @@ type AppConnector struct { logf logger.Logf eventBus *eventbus.Bus routeAdvertiser RouteAdvertiser + pubClient *eventbus.Client + updatePub *eventbus.Publisher[appctype.RouteUpdate] + storePub *eventbus.Publisher[appctype.RouteInfo] // storeRoutesFunc will be called to persist routes if it is not nil. - storeRoutesFunc func(*RouteInfo) error + storeRoutesFunc func(*appctype.RouteInfo) error // mu guards the fields that follow mu sync.Mutex @@ -181,11 +173,11 @@ type Config struct { // RouteInfo, if non-nil, use used as the initial set of routes for the // connector. If nil, the connector starts empty. - RouteInfo *RouteInfo + RouteInfo *appctype.RouteInfo // StoreRoutesFunc, if non-nil, is called when the connector's routes // change, to allow the routes to be persisted. - StoreRoutesFunc func(*RouteInfo) error + StoreRoutesFunc func(*appctype.RouteInfo) error } // NewAppConnector creates a new AppConnector. @@ -198,10 +190,14 @@ func NewAppConnector(c Config) *AppConnector { case c.RouteAdvertiser == nil: panic("missing route advertiser") } + ec := c.EventBus.Client("appc.AppConnector") ac := &AppConnector{ logf: logger.WithPrefix(c.Logf, "appc: "), eventBus: c.EventBus, + pubClient: ec, + updatePub: eventbus.Publish[appctype.RouteUpdate](ec), + storePub: eventbus.Publish[appctype.RouteInfo](ec), routeAdvertiser: c.RouteAdvertiser, storeRoutesFunc: c.StoreRoutesFunc, } @@ -228,6 +224,14 @@ func (e *AppConnector) ShouldStoreRoutes() bool { // storeRoutesLocked takes the current state of the AppConnector and persists it func (e *AppConnector) storeRoutesLocked() error { + if e.storePub.ShouldPublish() { + e.storePub.Publish(appctype.RouteInfo{ + // Clone here, as the subscriber will handle these outside our lock. + Control: slices.Clone(e.controlRoutes), + Domains: maps.Clone(e.domains), + Wildcards: slices.Clone(e.wildcards), + }) + } if !e.ShouldStoreRoutes() { return nil } @@ -240,7 +244,8 @@ func (e *AppConnector) storeRoutesLocked() error { e.writeRateMinute.update(numRoutes) e.writeRateDay.update(numRoutes) - return e.storeRoutesFunc(&RouteInfo{ + // TODO(creachdair): Remove this once it's delivered over the event bus. + return e.storeRoutesFunc(&appctype.RouteInfo{ Control: e.controlRoutes, Domains: e.domains, Wildcards: e.wildcards, @@ -283,6 +288,18 @@ func (e *AppConnector) Wait(ctx context.Context) { e.queue.Wait(ctx) } +// Close closes the connector and cleans up resources associated with it. +// It is safe (and a noop) to call Close on nil. +func (e *AppConnector) Close() { + if e == nil { + return + } + e.mu.Lock() + defer e.mu.Unlock() + e.queue.Shutdown() // TODO(creachadair): Should we wait for it too? + e.pubClient.Close() +} + func (e *AppConnector) updateDomains(domains []string) { e.mu.Lock() defer e.mu.Unlock() @@ -323,11 +340,15 @@ func (e *AppConnector) updateDomains(domains []string) { toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen())) } } - e.queue.Add(func() { - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) - } - }) + + if len(toRemove) != 0 { + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) + e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove}) + } } e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards) @@ -377,6 +398,10 @@ nextRoute: e.logf("failed to unadvertise routes: %v: %v", toRemove, err) } }) + e.updatePub.Publish(appctype.RouteUpdate{ + Advertise: routes, + Unadvertise: toRemove, + }) e.controlRoutes = routes if err := e.storeRoutesLocked(); err != nil { @@ -464,6 +489,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) return } + e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes}) e.mu.Lock() defer e.mu.Unlock() diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index c23908c28231a..91f0185d0b23d 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -4,6 +4,8 @@ package appc import ( + stdcmp "cmp" + "fmt" "net/netip" "reflect" "slices" @@ -11,9 +13,12 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc/appctest" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" @@ -21,7 +26,7 @@ import ( "tailscale.com/util/slicesx" ) -func fakeStoreRoutes(*RouteInfo) error { return nil } +func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } func TestUpdateDomains(t *testing.T) { ctx := t.Context() @@ -33,14 +38,15 @@ func TestUpdateDomains(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}}) } - a.UpdateDomains([]string{"example.com"}) + t.Cleanup(a.Close) + a.UpdateDomains([]string{"example.com"}) a.Wait(ctx) if got, want := a.Domains().AsSlice(), []string{"example.com"}; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) @@ -68,6 +74,7 @@ func TestUpdateRoutes(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -75,11 +82,14 @@ func TestUpdateRoutes(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + RouteInfo: &appctype.RouteInfo{}, + StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + a.updateDomains([]string{"*.example.com"}) // This route should be collapsed into the range @@ -116,6 +126,20 @@ func TestUpdateRoutes(t *testing.T) { if !slices.EqualFunc(rc.RemovedRoutes(), wantRemoved, prefixEqual) { t.Fatalf("unexpected removed routes: %v", rc.RemovedRoutes()) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.0.1/32", "192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -123,6 +147,7 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -130,12 +155,14 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")} @@ -145,12 +172,23 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) { t.Fatalf("got %v, want %v", rc.Routes(), routes) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestDomainRoutes(t *testing.T) { bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -158,12 +196,13 @@ func TestDomainRoutes(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) @@ -177,6 +216,13 @@ func TestDomainRoutes(t *testing.T) { if got := a.DomainRoutes(); !reflect.DeepEqual(got, want) { t.Fatalf("DomainRoutes: got %v, want %v", got, want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -184,6 +230,7 @@ func TestObserveDNSResponse(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -191,12 +238,13 @@ func TestObserveDNSResponse(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) // a has no domains configured, so it should not advertise any routes if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -273,6 +321,22 @@ func TestObserveDNSResponse(t *testing.T) { if !slices.Contains(a.domains["example.com"], netip.MustParseAddr("192.0.2.1")) { t.Errorf("missing %v from %v", "192.0.2.1", a.domains["exmaple.com"]) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), // from initial DNS response, via example.com + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.9/32")}), // from CNAME response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.10/32")}), // from CNAME response, mid-chain + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("2001:db8::1/128")}), // v6 DNS response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.0/24")}), // additional prefix + eventbustest.Type[appctype.RouteInfo](), + // N.B. no update for 192.0.2.1 as it is already covered + ); err != nil { + t.Error(err) + } } } @@ -280,6 +344,7 @@ func TestWildcardDomains(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} var a *AppConnector if shouldStore { @@ -287,12 +352,13 @@ func TestWildcardDomains(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) if err := a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")); err != nil { @@ -319,6 +385,13 @@ func TestWildcardDomains(t *testing.T) { if len(a.wildcards) != 1 { t.Errorf("expected only one wildcard domain, got %v", a.wildcards) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -437,6 +510,7 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -454,12 +528,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -482,6 +558,13 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.2/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32", "1.2.3.2/32")}), // no duplicates here + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -489,6 +572,7 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -506,12 +590,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "b.example.com"}, []netip.Prefix{}) @@ -544,6 +630,22 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } @@ -551,6 +653,7 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -568,12 +671,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) } + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "*.b.example.com"}, []netip.Prefix{}) @@ -606,6 +711,22 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } @@ -708,17 +829,23 @@ func TestMetricBucketsAreSorted(t *testing.T) { // routeAdvertiser, calls to Advertise/UnadvertiseRoutes can end up calling // back into AppConnector via authReconfig. If everything is called // synchronously, this results in a deadlock on AppConnector.mu. +// +// TODO(creachadair, 2025-09-18): Remove this along with the advertiser +// interface once the LocalBackend is switched to use the event bus and the +// tests have been updated not to need it. func TestUpdateRoutesDeadlock(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} a := NewAppConnector(Config{ Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) + t.Cleanup(a.Close) advertiseCalled := new(atomic.Bool) unadvertiseCalled := new(atomic.Bool) @@ -762,4 +889,42 @@ func TestUpdateRoutesDeadlock(t *testing.T) { if want := []netip.Prefix{netip.MustParsePrefix("127.0.0.1/32")}; !slices.Equal(slices.Compact(rc.Routes()), want) { t.Fatalf("got %v, want %v", rc.Routes(), want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32", "127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32"), Unadvertise: prefixes("127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } +} + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +// eqUpdate generates an eventbus test filter that matches a appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want), + cmpopts.SortSlices(stdcmp.Less[string]), + ); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } } diff --git a/client/local/local.go b/client/local/local.go index 8da8f57e5acf8..9faeadca33b38 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -27,7 +27,6 @@ import ( "sync" "time" - "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" @@ -40,6 +39,7 @@ import ( "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/util/eventbus" @@ -1387,10 +1387,10 @@ func (lc *Client) ShutdownTailscaled(ctx context.Context) error { return err } -func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appc.RouteInfo, error) { +func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appctype.RouteInfo, error) { body, err := lc.get200(ctx, "/localapi/v0/appc-route-info") if err != nil { - return appc.RouteInfo{}, err + return appctype.RouteInfo{}, err } - return decodeJSON[appc.RouteInfo](body) + return decodeJSON[appctype.RouteInfo](body) } diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 0628afd63eeca..278d54b1fd6d9 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -74,7 +74,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/client/local 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local @@ -124,6 +123,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/tsweb from tailscale.com/cmd/derper+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ + tailscale.com/types/appctype from tailscale.com/client/local tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/tailcfg+ @@ -140,14 +140,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ tailscale.com/util/cibuild from tailscale.com/health - tailscale.com/util/clientmetric from tailscale.com/net/netmon+ + tailscale.com/util/clientmetric from tailscale.com/net/netmon tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/tsweb+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/hostinfo+ tailscale.com/util/eventbus from tailscale.com/net/netmon+ - tailscale.com/util/execqueue from tailscale.com/appc 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/health+ @@ -183,7 +182,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/exp/constraints from tailscale.com/util/winutil+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/dns/dnsmessage from tailscale.com/net/dnscache golang.org/x/net/idna from golang.org/x/crypto/acme/autocert golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index eae1354a1ca68..be9ac3a089ee8 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -679,7 +679,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ @@ -802,7 +802,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go index 83443f56c8dc0..4a1ba87e35bcc 100644 --- a/cmd/tailscale/cli/appcroutes.go +++ b/cmd/tailscale/cli/appcroutes.go @@ -12,7 +12,7 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/appc" + "tailscale.com/types/appctype" ) var appcRoutesArgs struct { @@ -51,7 +51,7 @@ https://tailscale.com/kb/1281/app-connectors `), } -func getAllOutput(ri *appc.RouteInfo) (string, error) { +func getAllOutput(ri *appctype.RouteInfo) (string, error) { domains, err := json.MarshalIndent(ri.Domains, " ", " ") if err != nil { return "", err @@ -76,7 +76,7 @@ type domainCount struct { count int } -func getSummarizeLearnedOutput(ri *appc.RouteInfo) string { +func getSummarizeLearnedOutput(ri *appctype.RouteInfo) string { x := make([]domainCount, len(ri.Domains)) i := 0 maxDomainWidth := 0 diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 6facd19f98531..7b32fc2b45f52 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -73,7 +73,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/client/local+ 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli @@ -150,6 +149,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ + tailscale.com/types/appctype from tailscale.com/client/local+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/ipn+ @@ -175,7 +175,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ tailscale.com/util/eventbus from tailscale.com/client/local+ - tailscale.com/util/execqueue from tailscale.com/appc tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ @@ -232,7 +231,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/f64 from github.com/fogleman/gg+ L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpproxy+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index be13c7b680959..ba35ecd4a19ef 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -35,7 +35,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled @@ -126,7 +126,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index a91aa8afdce07..e98c0da488e0b 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -48,7 +48,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -151,7 +151,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 00c1a0ac42200..21e333af733d0 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -240,7 +240,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ @@ -387,7 +387,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 4ddc5eda1462f..dfd338410eaa5 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -121,7 +121,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -229,7 +229,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9e2fbb999fc5f..2af78b2befd24 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1108,6 +1108,7 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } + b.appConnector.Close() b.mu.Unlock() b.webClientShutdown() @@ -4783,25 +4784,28 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i }() if !prefs.AppConnector().Advertise { + b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = nil return } shouldAppCStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() if b.appConnector == nil || b.appConnector.ShouldStoreRoutes() != shouldAppCStoreRoutes { - var ri *appc.RouteInfo - var storeFunc func(*appc.RouteInfo) error + var ri *appctype.RouteInfo + var storeFunc func(*appctype.RouteInfo) error if shouldAppCStoreRoutes { var err error ri, err = b.readRouteInfoLocked() if err != nil { - ri = &appc.RouteInfo{} + ri = &appctype.RouteInfo{} if err != ipn.ErrStateNotExist { b.logf("Unsuccessful Read RouteInfo: ", err) } } storeFunc = b.storeRouteInfo } + + b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = appc.NewAppConnector(appc.Config{ Logf: b.logf, EventBus: b.sys.Bus.Get(), @@ -6988,7 +6992,7 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" -func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { +func (b *LocalBackend) storeRouteInfo(ri *appctype.RouteInfo) error { if !buildfeatures.HasAppConnectors { return feature.ErrUnavailable } @@ -7005,16 +7009,16 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { return b.pm.WriteState(key, bs) } -func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { +func (b *LocalBackend) readRouteInfoLocked() (*appctype.RouteInfo, error) { if !buildfeatures.HasAppConnectors { return nil, feature.ErrUnavailable } if b.pm.CurrentProfile().ID() == "" { - return &appc.RouteInfo{}, nil + return &appctype.RouteInfo{}, nil } key := namespaceKeyForCurrentProfile(b.pm, routeInfoStateStoreKey) bs, err := b.pm.Store().ReadState(key) - ri := &appc.RouteInfo{} + ri := &appctype.RouteInfo{} if err != nil { return nil, err } @@ -7027,7 +7031,7 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { // ReadRouteInfo returns the app connector route information that is // stored in prefs to be consistent across restarts. It should be up // to date with the RouteInfo in memory being used by appc. -func (b *LocalBackend) ReadRouteInfo() (*appc.RouteInfo, error) { +func (b *LocalBackend) ReadRouteInfo() (*appctype.RouteInfo, error) { b.mu.Lock() defer b.mu.Unlock() return b.readRouteInfoLocked() diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 6737266be80d4..a662793dbac20 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -49,6 +49,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/tstest/deptest" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" "tailscale.com/types/key" @@ -74,7 +75,7 @@ import ( "tailscale.com/wgengine/wgcfg" ) -func fakeStoreRoutes(*appc.RouteInfo) error { return nil } +func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } func inRemove(ip netip.Addr) bool { for _, pfx := range removeFromDefaultRoute { @@ -2314,7 +2315,7 @@ func TestOfferingAppConnector(t *testing.T) { rc := &appctest.RouteCollector{} if shouldStore { b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appc.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, + Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) @@ -2381,7 +2382,7 @@ func TestObserveDNSResponse(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { @@ -2548,7 +2549,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appc.RouteInfo{ + if err := b.storeRouteInfo(&appctype.RouteInfo{ Domains: map[string][]netip.Addr{ "example.com": {ip}, }, @@ -5501,10 +5502,10 @@ func TestReadWriteRouteInfo(t *testing.T) { b.pm.currentProfile = prof1.View() // set up routeInfo - ri1 := &appc.RouteInfo{} + ri1 := &appctype.RouteInfo{} ri1.Wildcards = []string{"1"} - ri2 := &appc.RouteInfo{} + ri2 := &appctype.RouteInfo{} ri2.Wildcards = []string{"2"} // read before write diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 43b3c49fc6520..a16d55b8c2072 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -23,6 +23,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/eventbus/eventbustest" @@ -261,7 +262,7 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { @@ -346,7 +347,7 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { @@ -419,7 +420,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc, - RouteInfo: &appc.RouteInfo{}, + RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, }) } else { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index b07df8b02f4db..723081e625452 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -23,7 +23,6 @@ import ( "time" "golang.org/x/net/dns/dnsmessage" - "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" "tailscale.com/feature" @@ -38,6 +37,7 @@ import ( "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tstime" + "tailscale.com/types/appctype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -1684,7 +1684,7 @@ func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) res, err := h.b.ReadRouteInfo() if err != nil { if errors.Is(err, ipn.ErrStateNotExist) { - res = &appc.RouteInfo{} + res = &appctype.RouteInfo{} } else { WriteErrorJSON(w, err) return diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index a0d9f9ebbeb87..739d0b33bf891 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -117,7 +117,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -224,7 +224,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tstime/rate from tailscale.com/wgengine/filter LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ diff --git a/types/appctype/appconnector.go b/types/appctype/appconnector.go index f4ced65a41b14..567ab755f0598 100644 --- a/types/appctype/appconnector.go +++ b/types/appctype/appconnector.go @@ -73,3 +73,23 @@ type AppConnectorAttr struct { // tag of the form tag:. Connectors []string `json:"connectors,omitempty"` } + +// RouteInfo is a data structure used to persist the in memory state of an AppConnector +// so that we can know, even after a restart, which routes came from ACLs and which were +// learned from domains. +type RouteInfo struct { + // Control is the routes from the 'routes' section of an app connector acl. + Control []netip.Prefix `json:",omitempty"` + // Domains are the routes discovered by observing DNS lookups for configured domains. + Domains map[string][]netip.Addr `json:",omitempty"` + // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, + // its result is added to Domains. + Wildcards []string `json:",omitempty"` +} + +// RouteUpdate records a set of routes that should be advertised and a set of +// routes that should be unadvertised in event bus updates. +type RouteUpdate struct { + Advertise []netip.Prefix + Unadvertise []netip.Prefix +} From 3ae7a351b4b2e9f33ca9f63dbc4128065de0e22d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 18:34:11 -0700 Subject: [PATCH 0496/1093] feature/featuretags: make clientmetrics optional Saves 57 KB Updates #12614 Change-Id: If7eebec12b3cb30ae6264171d36a258c04b05a70 Signed-off-by: Brad Fitzpatrick --- .../feature_clientmetrics_disabled.go | 13 ++++++++++ .../feature_clientmetrics_enabled.go | 13 ++++++++++ feature/featuretags/featuretags.go | 1 + ipn/localapi/localapi.go | 5 ++++ util/clientmetric/clientmetric.go | 2 ++ util/clientmetric/omit.go | 24 +++++++++++++++++++ 6 files changed, 58 insertions(+) create mode 100644 feature/buildfeatures/feature_clientmetrics_disabled.go create mode 100644 feature/buildfeatures/feature_clientmetrics_enabled.go create mode 100644 util/clientmetric/omit.go diff --git a/feature/buildfeatures/feature_clientmetrics_disabled.go b/feature/buildfeatures/feature_clientmetrics_disabled.go new file mode 100644 index 0000000000000..721908bb079a2 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = false diff --git a/feature/buildfeatures/feature_clientmetrics_enabled.go b/feature/buildfeatures/feature_clientmetrics_enabled.go new file mode 100644 index 0000000000000..deaeb6e69b1c3 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index e9d566a861afc..5792a1927e70b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -97,6 +97,7 @@ var Features = map[FeatureTag]FeatureMeta{ "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, + "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, "clientupdate": {Sym: "ClientUpdate", Desc: "Client auto-update support"}, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, "dbus": {Sym: "DBus", Desc: "Linux DBus support"}, diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 723081e625452..7f6452ad344b2 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1218,6 +1218,11 @@ func (h *Handler) serveHandlePushMessage(w http.ResponseWriter, r *http.Request) } func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasClientMetrics { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(struct{}{}) + return + } if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 2243ec3deaff9..65223e6a9375a 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_clientmetrics + // Package clientmetric provides client-side metrics whose values // get occasionally logged. package clientmetric diff --git a/util/clientmetric/omit.go b/util/clientmetric/omit.go new file mode 100644 index 0000000000000..5349fc7244cd7 --- /dev/null +++ b/util/clientmetric/omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_clientmetrics + +package clientmetric + +type Metric struct{} + +func (*Metric) Add(int64) {} +func (*Metric) Set(int64) {} +func (*Metric) Value() int64 { return 0 } +func (*Metric) Register(expvarInt any) {} +func (*Metric) UnregisterAll() {} + +func HasPublished(string) bool { panic("unreachable") } +func EncodeLogTailMetricsDelta() string { return "" } +func WritePrometheusExpositionFormat(any) {} + +var zeroMetric Metric + +func NewCounter(string) *Metric { return &zeroMetric } +func NewGauge(string) *Metric { return &zeroMetric } +func NewAggregateCounter(string) *Metric { return &zeroMetric } From 2cd518a8b651b0018a7fed84df45cc82e62987db Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 15:19:27 -0700 Subject: [PATCH 0497/1093] control/controlclient: optimize zstd decode of KeepAlive messages Maybe it matters? At least globally across all nodes? Fixes #17343 Change-Id: I3f61758ea37de527e16602ec1a6e453d913b3195 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 18 +++++++++++++-- control/controlclient/map.go | 3 +++ control/controlclient/map_test.go | 38 +++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index a3f908da41652..069affbd1a0f6 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1091,7 +1091,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap vlogf("netmap: read body after %v", time.Since(t0).Round(time.Millisecond)) var resp tailcfg.MapResponse - if err := c.decodeMsg(msg, &resp); err != nil { + if err := sess.decodeMsg(msg, &resp); err != nil { vlogf("netmap: decode error: %v", err) return err } @@ -1240,12 +1240,23 @@ func decode(res *http.Response, v any) error { var jsonEscapedZero = []byte(`\u0000`) +const justKeepAliveStr = `{"KeepAlive":true}` + // decodeMsg is responsible for uncompressing msg and unmarshaling into v. -func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { +func (sess *mapSession) decodeMsg(compressedMsg []byte, v *tailcfg.MapResponse) error { + // Fast path for common case of keep-alive message. + // See tailscale/tailscale#17343. + if sess.keepAliveZ != nil && bytes.Equal(compressedMsg, sess.keepAliveZ) { + v.KeepAlive = true + return nil + } + b, err := zstdframe.AppendDecode(nil, compressedMsg) if err != nil { return err } + sess.ztdDecodesForTest++ + if DevKnob.DumpNetMaps() { var buf bytes.Buffer json.Indent(&buf, b, "", " ") @@ -1258,6 +1269,9 @@ func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { if err := json.Unmarshal(b, v); err != nil { return fmt.Errorf("response: %v", err) } + if v.KeepAlive && string(b) == justKeepAliveStr { + sess.keepAliveZ = compressedMsg + } return nil } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 22cea5acaa2f7..eafdb2d565a76 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -57,6 +57,9 @@ type mapSession struct { altClock tstime.Clock // if nil, regular time is used cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context + keepAliveZ []byte // if non-nil, the learned zstd encoding of the just-KeepAlive message for this session + ztdDecodesForTest int // for testing + // sessionAliveCtx is a Background-based context that's alive for the // duration of the mapSession that we own the lifetime of. It's closed by // sessionAliveCtxClose. diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 4e8c911e3d10e..2be4b6ad70b2d 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -4,6 +4,7 @@ package controlclient import ( + "bytes" "context" "encoding/json" "fmt" @@ -33,6 +34,7 @@ import ( "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/zstdframe" ) func eps(s ...string) []netip.AddrPort { @@ -1445,3 +1447,39 @@ func TestNetmapForMapResponseForDebug(t *testing.T) { t.Errorf("mismatch\nnm1: %s\nnm2: %s\n", logger.AsJSON(nm1), logger.AsJSON(nm2)) } } + +func TestLearnZstdOfKeepAlive(t *testing.T) { + keepAliveMsgZstd := (func() []byte { + msg := must.Get(json.Marshal(tailcfg.MapResponse{ + KeepAlive: true, + })) + return zstdframe.AppendEncode(nil, msg, zstdframe.FastestCompression) + })() + + sess := newTestMapSession(t, nil) + + // The first time we see a zstd keep-alive message, we learn how + // the server encodes that. + var mr tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr)) + if !mr.KeepAlive { + t.Fatal("mr.KeepAlive false; want true") + } + if !bytes.Equal(sess.keepAliveZ, keepAliveMsgZstd) { + t.Fatalf("sess.keepAlive = %q; want %q", sess.keepAliveZ, keepAliveMsgZstd) + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } + + // The second time on the session where we see that message, we + // decode it without needing to decompress. + var mr2 tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr2)) + if !mr2.KeepAlive { + t.Fatal("mr2.KeepAlive false; want true") + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } +} From a208cb9fd5ac7a3e8a7ca37daf0c1560ee84e35f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 1 Oct 2025 19:18:46 -0700 Subject: [PATCH 0498/1093] feature/featuretags: add features for c2n, peerapi, advertise/use routes/exit nodes Saves 262 KB so far. I'm sure I missed some places, but shotizam says these were the low hanging fruit. Updates #12614 Change-Id: Ia31c01b454f627e6d0470229aae4e19d615e45e3 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 3 + .../feature_advertiseexitnode_disabled.go | 13 ++ .../feature_advertiseexitnode_enabled.go | 13 ++ .../feature_advertiseroutes_disabled.go | 13 ++ .../feature_advertiseroutes_enabled.go | 13 ++ feature/buildfeatures/feature_c2n_disabled.go | 13 ++ feature/buildfeatures/feature_c2n_enabled.go | 13 ++ .../feature_peerapiclient_disabled.go | 13 ++ .../feature_peerapiclient_enabled.go | 13 ++ .../feature_peerapiserver_disabled.go | 13 ++ .../feature_peerapiserver_enabled.go | 13 ++ .../feature_useexitnode_disabled.go | 13 ++ .../feature_useexitnode_enabled.go | 13 ++ .../feature_useroutes_disabled.go | 13 ++ .../feature_useroutes_enabled.go | 13 ++ feature/featuretags/featuretags.go | 75 ++++++++- ipn/ipnlocal/c2n.go | 16 +- ipn/ipnlocal/local.go | 157 +++++++++++++----- ipn/ipnlocal/node_backend.go | 29 ++-- ipn/ipnlocal/peerapi.go | 14 ++ ipn/ipnlocal/prefs_metrics.go | 4 + ipn/localapi/localapi.go | 24 ++- ipn/prefs.go | 4 + net/dns/resolver/forwarder.go | 4 + net/netmon/interfaces_linux.go | 4 + net/netmon/netmon.go | 4 + net/netmon/state.go | 3 + net/portmapper/portmapper.go | 12 +- net/tsdial/tsdial.go | 13 +- 29 files changed, 469 insertions(+), 79 deletions(-) create mode 100644 feature/buildfeatures/feature_advertiseexitnode_disabled.go create mode 100644 feature/buildfeatures/feature_advertiseexitnode_enabled.go create mode 100644 feature/buildfeatures/feature_advertiseroutes_disabled.go create mode 100644 feature/buildfeatures/feature_advertiseroutes_enabled.go create mode 100644 feature/buildfeatures/feature_c2n_disabled.go create mode 100644 feature/buildfeatures/feature_c2n_enabled.go create mode 100644 feature/buildfeatures/feature_peerapiclient_disabled.go create mode 100644 feature/buildfeatures/feature_peerapiclient_enabled.go create mode 100644 feature/buildfeatures/feature_peerapiserver_disabled.go create mode 100644 feature/buildfeatures/feature_peerapiserver_enabled.go create mode 100644 feature/buildfeatures/feature_useexitnode_disabled.go create mode 100644 feature/buildfeatures/feature_useexitnode_enabled.go create mode 100644 feature/buildfeatures/feature_useroutes_disabled.go create mode 100644 feature/buildfeatures/feature_useroutes_enabled.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 069affbd1a0f6..c77e93e1caec7 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1409,6 +1409,9 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { answerHeadPing(c.logf, httpc, pr) return case "c2n": + if !buildfeatures.HasC2N { + return + } if !useNoise && !envknob.Bool("TS_DEBUG_PERMIT_HTTP_C2N") { c.logf("refusing to answer c2n ping without noise") return diff --git a/feature/buildfeatures/feature_advertiseexitnode_disabled.go b/feature/buildfeatures/feature_advertiseexitnode_disabled.go new file mode 100644 index 0000000000000..d4fdcec22db3c --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = false diff --git a/feature/buildfeatures/feature_advertiseexitnode_enabled.go b/feature/buildfeatures/feature_advertiseexitnode_enabled.go new file mode 100644 index 0000000000000..28246143ecb3c --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = true diff --git a/feature/buildfeatures/feature_advertiseroutes_disabled.go b/feature/buildfeatures/feature_advertiseroutes_disabled.go new file mode 100644 index 0000000000000..59042720f3870 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = false diff --git a/feature/buildfeatures/feature_advertiseroutes_enabled.go b/feature/buildfeatures/feature_advertiseroutes_enabled.go new file mode 100644 index 0000000000000..118fcd55d64e4 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = true diff --git a/feature/buildfeatures/feature_c2n_disabled.go b/feature/buildfeatures/feature_c2n_disabled.go new file mode 100644 index 0000000000000..bc37e9e7bfd23 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = false diff --git a/feature/buildfeatures/feature_c2n_enabled.go b/feature/buildfeatures/feature_c2n_enabled.go new file mode 100644 index 0000000000000..5950e71571652 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = true diff --git a/feature/buildfeatures/feature_peerapiclient_disabled.go b/feature/buildfeatures/feature_peerapiclient_disabled.go new file mode 100644 index 0000000000000..83cc2bdfeef5c --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = false diff --git a/feature/buildfeatures/feature_peerapiclient_enabled.go b/feature/buildfeatures/feature_peerapiclient_enabled.go new file mode 100644 index 0000000000000..0bd3f50a869ca --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = true diff --git a/feature/buildfeatures/feature_peerapiserver_disabled.go b/feature/buildfeatures/feature_peerapiserver_disabled.go new file mode 100644 index 0000000000000..4a4f32b8a4065 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = false diff --git a/feature/buildfeatures/feature_peerapiserver_enabled.go b/feature/buildfeatures/feature_peerapiserver_enabled.go new file mode 100644 index 0000000000000..17d0547b80946 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = true diff --git a/feature/buildfeatures/feature_useexitnode_disabled.go b/feature/buildfeatures/feature_useexitnode_disabled.go new file mode 100644 index 0000000000000..51bec8046cb35 --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = false diff --git a/feature/buildfeatures/feature_useexitnode_enabled.go b/feature/buildfeatures/feature_useexitnode_enabled.go new file mode 100644 index 0000000000000..f7ab414de9477 --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = true diff --git a/feature/buildfeatures/feature_useroutes_disabled.go b/feature/buildfeatures/feature_useroutes_disabled.go new file mode 100644 index 0000000000000..ecf9d022bed74 --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = false diff --git a/feature/buildfeatures/feature_useroutes_enabled.go b/feature/buildfeatures/feature_useroutes_enabled.go new file mode 100644 index 0000000000000..c0a59322ecdc1 --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5792a1927e70b..db7f2d2728cb8 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -82,6 +82,12 @@ type FeatureMeta struct { Sym string // exported Go symbol for boolean const Desc string // human-readable description Deps []FeatureTag // other features this feature requires + + // ImplementationDetail is whether the feature is an internal implementation + // detail. That is, it's not something a user wuold care about having or not + // having, but we'd like to able to omit from builds if no other + // user-visible features depend on it. + ImplementationDetail bool } // Features are the known Tailscale features that can be selectively included or @@ -90,17 +96,45 @@ var Features = map[FeatureTag]FeatureMeta{ "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, "aws": {Sym: "AWS", Desc: "AWS integration"}, - "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, - "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "advertiseexitnode": { + Sym: "AdvertiseExitNode", + Desc: "Run an exit node", + Deps: []FeatureTag{ + "peerapiserver", // to run the ExitDNS server + "advertiseroutes", + }, + }, + "advertiseroutes": { + Sym: "AdvertiseRoutes", + Desc: "Advertise routes for other nodes to use", + Deps: []FeatureTag{ + "c2n", // for control plane to probe health for HA subnet router leader election + }, + }, + "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, + "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "c2n": { + Sym: "C2N", + Desc: "Control-to-node (C2N) support", + ImplementationDetail: true, + }, "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, "capture": {Sym: "Capture", Desc: "Packet capture"}, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, - "clientupdate": {Sym: "ClientUpdate", Desc: "Client auto-update support"}, - "completion": {Sym: "Completion", Desc: "CLI shell completion"}, - "dbus": {Sym: "DBus", Desc: "Linux DBus support"}, + "clientupdate": { + Sym: "ClientUpdate", + Desc: "Client auto-update support", + Deps: []FeatureTag{"c2n"}, + }, + "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "dbus": { + Sym: "DBus", + Desc: "Linux DBus support", + ImplementationDetail: true, + }, "debug": {Sym: "Debug", Desc: "various debug support, for things that don't have or need their own more specific feature"}, "debugeventbus": {Sym: "DebugEventBus", Desc: "eventbus debug support"}, "debugportmapper": { @@ -144,6 +178,16 @@ var Features = map[FeatureTag]FeatureMeta{ // by some other feature are missing, then it's an error by default unless you accept // that it's okay to proceed without that meta feature. }, + "peerapiclient": { + Sym: "PeerAPIClient", + Desc: "PeerAPI client support", + ImplementationDetail: true, + }, + "peerapiserver": { + Sym: "PeerAPIServer", + Desc: "PeerAPI server support", + ImplementationDetail: true, + }, "portlist": {Sym: "PortList", Desc: "Optionally advertise listening service ports"}, "portmapper": {Sym: "PortMapper", Desc: "NAT-PMP/PCP/UPnP port mapping support"}, "posture": {Sym: "Posture", Desc: "Device posture checking support"}, @@ -180,7 +224,7 @@ var Features = map[FeatureTag]FeatureMeta{ "ssh": { Sym: "SSH", Desc: "Tailscale SSH support", - Deps: []FeatureTag{"dbus", "netstack"}, + Deps: []FeatureTag{"c2n", "dbus", "netstack"}, }, "synology": { Sym: "Synology", @@ -192,7 +236,13 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Linux system tray", Deps: []FeatureTag{"dbus"}, }, - "taildrop": {Sym: "Taildrop", Desc: "Taildrop (file sending) support"}, + "taildrop": { + Sym: "Taildrop", + Desc: "Taildrop (file sending) support", + Deps: []FeatureTag{ + "peerapiclient", "peerapiserver", // assume Taildrop is both sides for now + }, + }, "tailnetlock": {Sym: "TailnetLock", Desc: "Tailnet Lock support"}, "tap": {Sym: "Tap", Desc: "Experimental Layer 2 (ethernet) support"}, "tpm": {Sym: "TPM", Desc: "TPM support"}, @@ -200,6 +250,15 @@ var Features = map[FeatureTag]FeatureMeta{ Sym: "UnixSocketIdentity", Desc: "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)", }, + "useroutes": { + Sym: "UseRoutes", + Desc: "Use routes advertised by other nodes", + }, + "useexitnode": { + Sym: "UseExitNode", + Desc: "Use exit nodes", + Deps: []FeatureTag{"peerapiclient", "useroutes"}, + }, "useproxy": { Sym: "UseProxy", Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index e2dfecec2c930..4b5b581aa9e44 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -32,12 +32,17 @@ import ( // c2nHandlers maps an HTTP method and URI path (without query parameters) to // its handler. The exact method+path match is preferred, but if no entry // exists for that, a map entry with an empty method is used as a fallback. -var c2nHandlers = map[methodAndPath]c2nHandler{ - // Debug. - req("/echo"): handleC2NEcho, -} +var c2nHandlers map[methodAndPath]c2nHandler func init() { + c2nHandlers = map[methodAndPath]c2nHandler{} + if buildfeatures.HasC2N { + // Echo is the basic "ping" handler as used by the control plane to probe + // whether a node is reachable. In particular, it's important for + // high-availability subnet routers for the control plane to probe which of + // several candidate nodes is reachable and actually alive. + RegisterC2N("/echo", handleC2NEcho) + } if buildfeatures.HasSSH { RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) } @@ -69,6 +74,9 @@ func init() { // A pattern is like "GET /foo" (specific to an HTTP method) or "/foo" (all // methods). It panics if the pattern is already registered. func RegisterC2N(pattern string, h func(*LocalBackend, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasC2N { + return + } k := req(pattern) if _, ok := c2nHandlers[k]; ok { panic(fmt.Sprintf("c2n: duplicate handler for %q", pattern)) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2af78b2befd24..38f98f8fbb6f9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -550,10 +550,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // Following changes are triggered via the eventbus. b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) - if tunWrap, ok := b.sys.Tun.GetOK(); ok { - tunWrap.PeerAPIPort = b.GetPeerAPIPort - } else { - b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + if buildfeatures.HasPeerAPIServer { + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.PeerAPIPort = b.GetPeerAPIPort + } else { + b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + } } if buildfeatures.HasDebug { @@ -972,15 +974,17 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.updateFilterLocked(prefs) updateExitNodeUsageWarning(prefs, delta.New, b.health) - cn := b.currentNode() - nm := cn.NetMap() - if peerAPIListenAsync && nm != nil && b.state == ipn.Running { - want := nm.GetAddresses().Len() - have := len(b.peerAPIListeners) - b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) - if have < want { - b.logf("linkChange: peerAPIListeners too low; trying again") - b.goTracker.Go(b.initPeerAPIListener) + if buildfeatures.HasPeerAPIServer { + cn := b.currentNode() + nm := cn.NetMap() + if peerAPIListenAsync && nm != nil && b.state == ipn.Running { + want := nm.GetAddresses().Len() + have := len(b.peerAPIListeners) + b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) + if have < want { + b.logf("linkChange: peerAPIListeners too low; trying again") + b.goTracker.Go(b.initPeerAPIListener) + } } } } @@ -1368,7 +1372,7 @@ func peerStatusFromNode(ps *ipnstate.PeerStatus, n tailcfg.NodeView) { ps.PublicKey = n.Key() ps.ID = n.StableID() ps.Created = n.Created() - ps.ExitNodeOption = tsaddr.ContainsExitRoutes(n.AllowedIPs()) + ps.ExitNodeOption = buildfeatures.HasUseExitNode && tsaddr.ContainsExitRoutes(n.AllowedIPs()) if n.Tags().Len() != 0 { v := n.Tags() ps.Tags = &v @@ -1897,6 +1901,9 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasUseExitNode { + return false + } if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) @@ -2002,7 +2009,7 @@ func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { b.mu.Unlock() } - if policy.HasChanged(pkey.AllowedSuggestedExitNodes) { + if buildfeatures.HasUseExitNode && policy.HasChanged(pkey.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { @@ -2073,6 +2080,9 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // mustationsAreWorthyOfRecalculatingSuggestedExitNode reports whether any mutation type in muts is // worthy of recalculating the suggested exit node. func mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts []netmap.NodeMutation, cn *nodeBackend, sid tailcfg.StableNodeID) bool { + if !buildfeatures.HasUseExitNode { + return false + } for _, m := range muts { n, ok := cn.NodeByID(m.NodeIDBeingMutated()) if !ok { @@ -2126,6 +2136,9 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // // b.mu must be held. func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false + } // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. // // However, to maintain forward compatibility with future auto exit node expressions, @@ -2170,6 +2183,9 @@ func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged // // b.mu must be held. func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false + } // If we have a desired IP on file, try to find the corresponding node. if !prefs.ExitNodeIP.IsValid() { return false @@ -2455,6 +2471,11 @@ func (b *LocalBackend) Start(opts ipn.Options) error { } } + var c2nHandler http.Handler + if buildfeatures.HasC2N { + c2nHandler = http.HandlerFunc(b.handleC2N) + } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, @@ -2475,7 +2496,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { PopBrowserURL: b.tellClientToBrowseToURL, Dialer: b.Dialer(), Observer: b, - C2NHandler: http.HandlerFunc(b.handleC2N), + C2NHandler: c2nHandler, DialPlan: &b.dialPlan, // pointer because it can't be copied ControlKnobs: b.sys.ControlKnobs(), Shutdown: ccShutdown, @@ -2623,31 +2644,33 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } if prefs.Valid() { - for _, r := range prefs.AdvertiseRoutes().All() { - if r.Bits() == 0 { - // When offering a default route to the world, we - // filter out locally reachable LANs, so that the - // default route effectively appears to be a "guest - // wifi": you get internet access, but to additionally - // get LAN access the LAN(s) need to be offered - // explicitly as well. - localInterfaceRoutes, hostIPs, err := interfaceRoutes() - if err != nil { - b.logf("getting local interface routes: %v", err) - continue - } - s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) - if err != nil { - b.logf("computing default route filter: %v", err) - continue + if buildfeatures.HasAdvertiseRoutes { + for _, r := range prefs.AdvertiseRoutes().All() { + if r.Bits() == 0 { + // When offering a default route to the world, we + // filter out locally reachable LANs, so that the + // default route effectively appears to be a "guest + // wifi": you get internet access, but to additionally + // get LAN access the LAN(s) need to be offered + // explicitly as well. + localInterfaceRoutes, hostIPs, err := interfaceRoutes() + if err != nil { + b.logf("getting local interface routes: %v", err) + continue + } + s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) + if err != nil { + b.logf("computing default route filter: %v", err) + continue + } + localNetsB.AddSet(s) + } else { + localNetsB.AddPrefix(r) + // When advertising a non-default route, we assume + // this is a corporate subnet that should be present + // in the audit logs. + logNetsB.AddPrefix(r) } - localNetsB.AddSet(s) - } else { - localNetsB.AddPrefix(r) - // When advertising a non-default route, we assume - // this is a corporate subnet that should be present - // in the audit logs. - logNetsB.AddPrefix(r) } } @@ -2658,7 +2681,7 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { // The correct filter rules are synthesized by the coordination server // and sent down, but the address needs to be part of the 'local net' for the // filter package to even bother checking the filter rules, so we set them here. - if prefs.AppConnector().Advertise { + if buildfeatures.HasAppConnectors && prefs.AppConnector().Advertise { localNetsB.Add(netip.MustParseAddr("0.0.0.0")) localNetsB.Add(netip.MustParseAddr("::0")) } @@ -3712,6 +3735,9 @@ func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg } func (b *LocalBackend) pingPeerAPI(ctx context.Context, ip netip.Addr) (peer tailcfg.NodeView, peerBase string, err error) { + if !buildfeatures.HasPeerAPIClient { + return peer, peerBase, feature.ErrUnavailable + } var zero tailcfg.NodeView ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -4051,6 +4077,9 @@ var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ // updateExitNodeUsageWarning updates a warnable meant to notify users of // configuration issues that could break exit node usage. func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTracker *health.Tracker) { + if !buildfeatures.HasUseExitNode { + return + } var msg string if p.ExitNodeIP().IsValid() || p.ExitNodeID() != "" { warn, _ := netutil.CheckReversePathFiltering(state) @@ -4070,6 +4099,9 @@ func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { if !tryingToUseExitNode { return nil } + if !buildfeatures.HasUseExitNode { + return feature.ErrUnavailable + } if err := featureknob.CanUseExitNode(); err != nil { return err @@ -4110,6 +4142,9 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P defer unlock() p0 := b.pm.CurrentPrefs() + if !buildfeatures.HasUseExitNode { + return p0, nil + } if v && p0.ExitNodeID() != "" { // Already on. return p0, nil @@ -4240,6 +4275,9 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // // b.mu must be held. func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + if !buildfeatures.HasUseExitNode { + return false + } if !change.AutoExitNodeSet && !change.ExitNodeIDSet && !change.ExitNodeIPSet { // The change does not affect exit node usage. return false @@ -4577,6 +4615,9 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // GetPeerAPIPort returns the port number for the peerapi server // running on the provided IP. func (b *LocalBackend) GetPeerAPIPort(ip netip.Addr) (port uint16, ok bool) { + if !buildfeatures.HasPeerAPIServer { + return 0, false + } b.mu.Lock() defer b.mu.Unlock() for _, pln := range b.peerAPIListeners { @@ -4936,10 +4977,12 @@ func (b *LocalBackend) authReconfig() { // Keep the dialer updated about whether we're supposed to use // an exit node's DNS server (so SOCKS5/HTTP outgoing dials // can use it for name resolution) - if dohURLOK { - b.dialer.SetExitDNSDoH(dohURL) - } else { - b.dialer.SetExitDNSDoH("") + if buildfeatures.HasUseExitNode { + if dohURLOK { + b.dialer.SetExitDNSDoH(dohURL) + } else { + b.dialer.SetExitDNSDoH("") + } } cfg, err := nmcfg.WGCfg(nm, b.logf, flags, prefs.ExitNodeID()) @@ -5064,6 +5107,9 @@ func (b *LocalBackend) TailscaleVarRoot() string { // // b.mu must be held. func (b *LocalBackend) closePeerAPIListenersLocked() { + if !buildfeatures.HasPeerAPIServer { + return + } b.peerAPIServer = nil for _, pln := range b.peerAPIListeners { pln.Close() @@ -5079,6 +5125,9 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { + if !buildfeatures.HasPeerAPIServer { + return + } b.logf("[v1] initPeerAPIListener: entered") b.mu.Lock() defer b.mu.Unlock() @@ -5903,6 +5952,9 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { // RefreshExitNode determines which exit node to use based on the current // prefs and netmap and switches to it if needed. func (b *LocalBackend) RefreshExitNode() { + if !buildfeatures.HasUseExitNode { + return + } if b.resolveExitNode() { b.authReconfig() } @@ -5918,6 +5970,9 @@ func (b *LocalBackend) RefreshExitNode() { // // b.mu must not be held. func (b *LocalBackend) resolveExitNode() (changed bool) { + if !buildfeatures.HasUseExitNode { + return false + } b.mu.Lock() defer b.mu.Unlock() @@ -6468,6 +6523,9 @@ func (b *LocalBackend) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpd // // If exitNodeID is the zero valid, it returns "", false. func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } if exitNodeID.IsZero() { return "", false } @@ -7084,6 +7142,9 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // // b.mu.lock() must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode @@ -7101,6 +7162,9 @@ func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggest } func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() return b.suggestExitNodeLocked() @@ -7117,6 +7181,9 @@ func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { // refreshAllowedSuggestions rebuilds the set of permitted exit nodes // from the current [pkey.AllowedSuggestedExitNodes] value. func (b *LocalBackend) refreshAllowedSuggestions() { + if !buildfeatures.HasUseExitNode { + return + } b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() b.allowedSuggestedExitNodes = fillAllowedSuggestions(b.polc) diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 95bf350ceeca0..22e965fa62c9d 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -530,6 +530,9 @@ func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, } func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } nb.mu.Lock() defer nb.mu.Unlock() return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) @@ -769,18 +772,20 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // If we're using an exit node and that exit node is new enough (1.19.x+) // to run a DoH DNS proxy, then send all our DNS traffic through it, // unless we find resolvers with UseWithExitNode set, in which case we use that. - if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) - if len(filtered) > 0 { - addDefault(filtered) - } else { - // If no default global resolvers with the override - // are configured, configure the exit node's resolver. - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) - } + if buildfeatures.HasUseExitNode { + if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { + filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) + if len(filtered) > 0 { + addDefault(filtered) + } else { + // If no default global resolvers with the override + // are configured, configure the exit node's resolver. + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + } - addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) - return dcfg + addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) + return dcfg + } } // If the user has set default resolvers ("override local DNS"), prefer to @@ -788,7 +793,7 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // node resolvers, use those as the default. if len(nm.DNS.Resolvers) > 0 { addDefault(nm.DNS.Resolvers) - } else { + } else if buildfeatures.HasUseExitNode { if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok { addDefault(resolvers) } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 9ad3e3c362570..a045086d468fa 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -26,6 +26,7 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" @@ -131,6 +132,9 @@ type peerAPIListener struct { } func (pln *peerAPIListener) Close() error { + if !buildfeatures.HasPeerAPIServer { + return nil + } if pln.ln != nil { return pln.ln.Close() } @@ -138,6 +142,9 @@ func (pln *peerAPIListener) Close() error { } func (pln *peerAPIListener) serve() { + if !buildfeatures.HasPeerAPIServer { + return + } if pln.ln == nil { return } @@ -319,6 +326,9 @@ func peerAPIRequestShouldGetSecurityHeaders(r *http.Request) bool { // // It panics if the path is already registered. func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasPeerAPIServer { + return + } if _, ok := peerAPIHandlers[path]; ok { panic(fmt.Sprintf("duplicate PeerAPI handler %q", path)) } @@ -337,6 +347,10 @@ var ( ) func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasPeerAPIServer { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if err := h.validatePeerAPIRequest(r); err != nil { metricInvalidRequests.Add(1) h.logf("invalid request from %v: %v", h.remoteAddr, err) diff --git a/ipn/ipnlocal/prefs_metrics.go b/ipn/ipnlocal/prefs_metrics.go index fa768ba3ce238..34c5f5504fac4 100644 --- a/ipn/ipnlocal/prefs_metrics.go +++ b/ipn/ipnlocal/prefs_metrics.go @@ -6,6 +6,7 @@ package ipnlocal import ( "errors" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/clientmetric" @@ -85,6 +86,9 @@ func (e *prefsMetricsEditEvent) record() error { // false otherwise. The caller is responsible for ensuring that the id belongs to // an exit node. func (e *prefsMetricsEditEvent) exitNodeType(id tailcfg.StableNodeID) (props []exitNodeProperty, isNode bool) { + if !buildfeatures.HasUseExitNode { + return nil, false + } var peer tailcfg.NodeView if peer, isNode = e.node.PeerByStableID(id); isNode { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 7f6452ad344b2..d7cd42c755fd1 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -72,7 +72,6 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "bugreport": (*Handler).serveBugReport, "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, @@ -90,21 +89,17 @@ var handler = map[string]LocalAPIHandler{ "logtap": (*Handler).serveLogTap, "metrics": (*Handler).serveMetrics, "ping": (*Handler).servePing, - "pprof": (*Handler).servePprof, "prefs": (*Handler).servePrefs, "query-feature": (*Handler).serveQueryFeature, "reload-config": (*Handler).reloadConfig, "reset-auth": (*Handler).serveResetAuth, - "set-dns": (*Handler).serveSetDNS, "set-expiry-sooner": (*Handler).serveSetExpirySooner, "set-gui-visible": (*Handler).serveSetGUIVisible, "set-push-device-token": (*Handler).serveSetPushDeviceToken, "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, "shutdown": (*Handler).serveShutdown, "start": (*Handler).serveStart, "status": (*Handler).serveStatus, - "suggest-exit-node": (*Handler).serveSuggestExitNode, "update/check": (*Handler).serveUpdateCheck, "upload-client-metrics": (*Handler).serveUploadClientMetrics, "usermetrics": (*Handler).serveUserMetrics, @@ -116,6 +111,17 @@ func init() { if buildfeatures.HasAppConnectors { Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) } + if buildfeatures.HasUseExitNode { + Register("suggest-exit-node", (*Handler).serveSuggestExitNode) + Register("set-use-exit-node-enabled", (*Handler).serveSetUseExitNodeEnabled) + } + if buildfeatures.HasACME { + Register("set-dns", (*Handler).serveSetDNS) + } + if buildfeatures.HasDebug { + Register("bugreport", (*Handler).serveBugReport) + Register("pprof", (*Handler).servePprof) + } } // Register registers a new LocalAPI handler for the given name. @@ -1291,6 +1297,10 @@ func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) return @@ -1629,6 +1639,10 @@ func dnsMessageTypeForString(s string) (t dnsmessage.Type, err error) { // serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return diff --git a/ipn/prefs.go b/ipn/prefs.go index a2149950ddc1e..8a5b17af6ac16 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -20,6 +20,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/drive" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" @@ -787,6 +788,9 @@ func (p *Prefs) AdvertisesExitNode() bool { // SetAdvertiseExitNode mutates p (if non-nil) to add or remove the two // /0 exit node routes. func (p *Prefs) SetAdvertiseExitNode(runExit bool) { + if !buildfeatures.HasAdvertiseExitNode { + return + } if p == nil { return } diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index a7a8932e812e9..86f0f5b8c48c4 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -27,6 +27,7 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/publicdns" @@ -530,6 +531,9 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe }() } if strings.HasPrefix(rr.name.Addr, "http://") { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } return f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) } if strings.HasPrefix(rr.name.Addr, "https://") { diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index d0fb15ababe9e..a9b93c0a1ff49 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -22,6 +22,7 @@ import ( "github.com/mdlayher/netlink" "go4.org/mem" "golang.org/x/sys/unix" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/util/lineiter" ) @@ -41,6 +42,9 @@ ens18 00000000 0100000A 0003 0 0 0 00000000 ens18 0000000A 00000000 0001 0 0 0 0000FFFF 0 0 0 */ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if procNetRouteErr.Load() { // If we failed to read /proc/net/route previously, don't keep trying. return ret, myIP, false diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index fcac9c4ee2bee..f7d1b1107e379 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -181,6 +182,9 @@ func (m *Monitor) SetTailscaleInterfaceName(ifName string) { // It's the same as interfaces.LikelyHomeRouterIP, but it caches the // result until the monitor detects a network change. func (m *Monitor) GatewayAndSelfIP() (gw, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if m.static { return } diff --git a/net/netmon/state.go b/net/netmon/state.go index cdb427d47340a..73497e93f73be 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -573,6 +573,9 @@ var disableLikelyHomeRouterIPSelf = envknob.RegisterBool("TS_DEBUG_DISABLE_LIKEL // the LAN using that gateway. // This is used as the destination for UPnP, NAT-PMP, PCP, etc queries. func LikelyHomeRouterIP() (gateway, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } // If we don't have a way to get the home router IP, then we can't do // anything; just return. if likelyHomeRouterIP == nil { diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 024c6dc784d67..9368d1c4ee05b 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -20,6 +20,7 @@ import ( "go4.org/mem" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -262,10 +263,13 @@ func NewClient(c Config) *Client { panic("nil EventBus") } ret := &Client{ - logf: c.Logf, - netMon: c.NetMon, - ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon - onChange: c.OnChange, + logf: c.Logf, + netMon: c.NetMon, + onChange: c.OnChange, + } + if buildfeatures.HasPortMapper { + // TODO(bradfitz): move this to method on netMon + ret.ipAndGateway = netmon.LikelyHomeRouterIP } ret.pubClient = c.EventBus.Client("portmapper") ret.updates = eventbus.Publish[portmappertype.Mapping](ret.pubClient) diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 87b58f2a02e4d..a0e2a11a472f0 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -19,6 +19,8 @@ import ( "time" "github.com/gaissmai/bart" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/dnscache" "tailscale.com/net/netknob" "tailscale.com/net/netmon" @@ -135,6 +137,9 @@ func (d *Dialer) TUNName() string { // // For example, "http://100.68.82.120:47830/dns-query". func (d *Dialer) SetExitDNSDoH(doh string) { + if !buildfeatures.HasUseExitNode { + return + } d.mu.Lock() defer d.mu.Unlock() if d.exitDNSDoHBase == doh { @@ -372,7 +377,7 @@ func (d *Dialer) userDialResolve(ctx context.Context, network, addr string) (net } var r net.Resolver - if exitDNSDoH != "" { + if buildfeatures.HasUseExitNode && buildfeatures.HasPeerAPIClient && exitDNSDoH != "" { r.PreferGo = true r.Dial = func(ctx context.Context, network, address string) (net.Conn, error) { return &dohConn{ @@ -509,6 +514,9 @@ func (d *Dialer) UserDial(ctx context.Context, network, addr string) (net.Conn, // network must a "tcp" type, and addr must be an ip:port. Name resolution // is not supported. func (d *Dialer) dialPeerAPI(ctx context.Context, network, addr string) (net.Conn, error) { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } switch network { case "tcp", "tcp6", "tcp4": default: @@ -551,6 +559,9 @@ func (d *Dialer) getPeerDialer() *net.Dialer { // The returned Client must not be mutated; it's owned by the Dialer // and shared by callers. func (d *Dialer) PeerAPIHTTPClient() *http.Client { + if !buildfeatures.HasPeerAPIClient { + panic("unreachable") + } d.peerClientOnce.Do(func() { t := http.DefaultTransport.(*http.Transport).Clone() t.Dial = nil From 24e38eb7294a057776a9942185460456ca1ebf95 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 2 Oct 2025 12:01:59 -0700 Subject: [PATCH 0499/1093] control/controlclient,health,ipn/ipnlocal,health: fix deadlock by deleting health reporting A recent change (009d702adfa0fc) introduced a deadlock where the /machine/update-health network request to report the client's health status update to the control plane was moved to being synchronous within the eventbus's pump machinery. I started to instead make the health reporting be async, but then we realized in the three years since we added that, it's barely been used and doesn't pay for itself, for how many HTTP requests it makes. Instead, delete it all and replace it with a c2n handler, which provides much more helpful information. Fixes tailscale/corp#32952 Change-Id: I9e8a5458269ebfdda1c752d7bbb8af2780d71b04 Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 31 ------------------------- control/controlclient/direct.go | 41 --------------------------------- health/state.go | 3 +++ ipn/ipnlocal/c2n.go | 10 ++++++++ tailcfg/tailcfg.go | 6 ++++- 5 files changed, 18 insertions(+), 73 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 224838d56909a..9f5bf38aeecc6 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -12,7 +12,6 @@ import ( "sync/atomic" "time" - "tailscale.com/health" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -23,7 +22,6 @@ import ( "tailscale.com/types/structs" "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" - "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" ) @@ -123,8 +121,6 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - eventSubs eventbus.Monitor - mu sync.Mutex // mutex guards the following fields wantLoggedIn bool // whether the user wants to be logged in per last method call @@ -195,10 +191,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { shutdownFn: opts.Shutdown, } - // Set up eventbus client and subscriber - ec := opts.Bus.Client("controlClient.Auto") - c.eventSubs = ec.Monitor(c.consumeEventbusTopics(ec)) - c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) @@ -208,27 +200,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { return c, nil } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (c *Auto) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { - healthChangeSub := eventbus.Subscribe[health.Change](ec) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case change := <-healthChangeSub.Events(): - if change.WarnableChanged { - c.direct.ReportWarnableChange(change.Warnable, change.UnhealthyState) - } - } - } - } -} - // SetPaused controls whether HTTP activity should be paused. // // The client can be paused and unpaused repeatedly, unlike Start and Shutdown, which can only be used once. @@ -782,8 +753,6 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } func (c *Auto) Shutdown() { - c.eventSubs.Close() - c.mu.Lock() if c.closed { c.mu.Unlock() diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index c77e93e1caec7..de577bea42b9a 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1678,47 +1678,6 @@ func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailc return nil } -// ReportWarnableChange reports to the control plane a change to this node's -// health. w must be non-nil. us can be nil to indicate a healthy state for w. -func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthyState) { - if w == health.NetworkStatusWarnable || w == health.IPNStateWarnable || w == health.LoginStateWarnable { - // We don't report these. These include things like the network is down - // (in which case we can't report anyway) or the user wanted things - // stopped, as opposed to the more unexpected failure types in the other - // subsystems. - return - } - np, err := c.getNoiseClient() - if err != nil { - // Don't report errors to control if the server doesn't support noise. - return - } - nodeKey, ok := c.GetPersist().PublicNodeKeyOK() - if !ok { - return - } - if c.panicOnUse { - panic("tainted client") - } - // TODO(angott): at some point, update `Subsys` in the request to be `Warnable` - req := &tailcfg.HealthChangeRequest{ - Subsys: string(w.Code), - NodeKey: nodeKey, - } - if us != nil { - req.Error = us.Text - } - - // Best effort, no logging: - ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) - defer cancel() - res, err := np.Post(ctx, "/machine/update-health", nodeKey, req) - if err != nil { - return - } - res.Body.Close() -} - // SetDeviceAttrs does a synchronous call to the control plane to update // the node's attributes. // diff --git a/health/state.go b/health/state.go index 116518629f27e..2efff92b14774 100644 --- a/health/state.go +++ b/health/state.go @@ -14,6 +14,9 @@ import ( // State contains the health status of the backend, and is // provided to the client UI via LocalAPI through ipn.Notify. +// +// It is also exposed via c2n for debugging purposes, so try +// not to change its structure too gratuitously. type State struct { // Each key-value pair in Warnings represents a Warnable that is currently // unhealthy. If a Warnable is healthy, it will not be present in this map. diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 4b5b581aa9e44..0c228060faf63 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -18,6 +18,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" @@ -63,6 +64,7 @@ func init() { RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging) RegisterC2N("/debug/logheap", handleC2NDebugLogHeap) RegisterC2N("/debug/netmap", handleC2NDebugNetMap) + RegisterC2N("/debug/health", handleC2NDebugHealth) } if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind) @@ -145,6 +147,14 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } } +func handleC2NDebugHealth(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + var st *health.State + if buildfeatures.HasDebug && b.health != nil { + st = b.health.CurrentState() + } + writeJSON(w, st) +} + func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasDebug { http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 88cda044f6d7f..01ecc96b37c35 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -172,7 +172,8 @@ type CapabilityVersion int // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. // - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) // - 127: 2025-09-19: can handle C2N /debug/netmap. -const CurrentCapabilityVersion CapabilityVersion = 127 +// - 128: 2025-10-02: can handle C2N /debug/health. +const CurrentCapabilityVersion CapabilityVersion = 128 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2734,6 +2735,9 @@ type SetDNSResponse struct{} // node health changes to: // // POST https:///machine/update-health. +// +// As of 2025-10-02, we stopped sending this to the control plane proactively. +// It was never useful enough with its current design and needs more thought. type HealthChangeRequest struct { Subsys string // a health.Subsystem value in string form Error string // or empty if cleared From cd523eae52c220ed8731cee349efd77c1aa4a5fe Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 2 Oct 2025 16:01:55 -0700 Subject: [PATCH 0500/1093] ipn/ipnlocal: introduce the concept of client-side-reachability (#17367) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The control plane will sometimes determine that a node is not online, while the node is still able to connect to its peers. This patch doesn’t solve this problem, but it does mitigate it. This PR introduces the `client-side-reachability` node attribute that switches the node to completely ignore the online signal from control. In the future, the client itself should collect reachability data from active Wireguard flows and Tailscale pings. Updates #17366 Updates tailscale/corp#30379 Updates tailscale/corp#32686 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 12 +++++- ipn/ipnlocal/node_backend.go | 34 ++++++++++++++++ ipn/ipnlocal/node_backend_test.go | 68 +++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 6 +++ 4 files changed, 118 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 38f98f8fbb6f9..199ee724853cd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7240,6 +7240,10 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta // the lowest latency to this device. For peers without a DERP home, we look for // geographic proximity to this device's DERP home. func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP @@ -7248,7 +7252,7 @@ func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSugg // since the netmap doesn't include delta updates (e.g., home DERP or Online // status changes) from the control plane since the last full update. candidates := nb.AppendMatchingPeers(nil, func(peer tailcfg.NodeView) bool { - if !peer.Valid() || !peer.Online().Get() { + if !peer.Valid() || !nb.PeerIsReachable(ctx, peer) { return false } if allowList != nil && !allowList.Contains(peer.StableID()) { @@ -7367,6 +7371,10 @@ var ErrNoNetMap = errors.New("no network map, try again later") // the node’s [tailcfg.Location]. To be eligible for consideration, the node // must have NodeAttrSuggestExitNode in its CapMap. func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + nm := nb.NetMap() if nm == nil { return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap @@ -7386,7 +7394,7 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf if !p.Valid() { return false } - if !p.Online().Get() { + if !nb.PeerIsReachable(ctx, p) { return false } if allowed != nil && !allowed.Contains(p.StableID()) { diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 22e965fa62c9d..3408d4cbb325d 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -362,6 +362,40 @@ func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { return peerAPIBase(nm, p) } +// PeerIsReachable reports whether the current node can reach p. If the ctx is +// done, this function may return a result based on stale reachability data. +func (nb *nodeBackend) PeerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + if !nb.SelfHasCap(tailcfg.NodeAttrClientSideReachability) { + // Legacy behavior is to always trust the control plane, which + // isn’t always correct because the peer could be slow to check + // in so that control marks it as offline. + // See tailscale/corp#32686. + return p.Online().Get() + } + + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() + + if self := nm.SelfNode; self.Valid() && self.ID() == p.ID() { + // This node can always reach itself. + return true + } + return nb.peerIsReachable(ctx, p) +} + +func (nb *nodeBackend) peerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + // TODO(sfllaw): The following does not actually test for client-side + // reachability. This would require a mechanism that tracks whether the + // current node can actually reach this peer, either because they are + // already communicating or because they can ping each other. + // + // Instead, it makes the client ignore p.Online completely. + // + // See tailscale/corp#32686. + return true +} + func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { for _, pfx := range n.Addresses().All() { if pfx.IsSingleIP() && pred(pfx.Addr()) { diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index b305837fd46c2..f6698bd4bc920 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -9,7 +9,10 @@ import ( "testing" "time" + "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/netmap" + "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) @@ -122,3 +125,68 @@ func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { nb.Wait(context.Background()) } + +func TestNodeBackendReachability(t *testing.T) { + for _, tc := range []struct { + name string + + // Cap sets [tailcfg.NodeAttrClientSideReachability] on the self + // node. + // + // When disabled, the client relies on the control plane sending + // an accurate peer.Online flag. When enabled, the client + // ignores peer.Online and determines whether it can reach the + // peer node. + cap bool + + peer tailcfg.Node + want bool + }{ + { + name: "disabled/offline", + cap: false, + peer: tailcfg.Node{ + Online: ptr.To(false), + }, + want: false, + }, + { + name: "disabled/online", + cap: false, + peer: tailcfg.Node{ + Online: ptr.To(true), + }, + want: true, + }, + { + name: "enabled/offline", + cap: true, + peer: tailcfg.Node{ + Online: ptr.To(false), + }, + want: true, + }, + { + name: "enabled/online", + cap: true, + peer: tailcfg.Node{ + Online: ptr.To(true), + }, + want: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) + nb.netMap = &netmap.NetworkMap{} + if tc.cap { + nb.netMap.AllCaps.Make() + nb.netMap.AllCaps.Add(tailcfg.NodeAttrClientSideReachability) + } + + got := nb.PeerIsReachable(t.Context(), tc.peer.View()) + if got != tc.want { + t.Errorf("got %v, want %v", got, tc.want) + } + }) + } +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 01ecc96b37c35..96e7fbbd907c6 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2692,6 +2692,12 @@ const ( // numbers, apostrophe, spaces, and hyphens. This may not be true for the default. // Values can look like "foo.com" or "Foo's Test Tailnet - Staging". NodeAttrTailnetDisplayName NodeCapability = "tailnet-display-name" + + // NodeAttrClientSideReachability configures the node to determine + // reachability itself when choosing connectors. When absent, the + // default behavior is to trust the control plane when it claims that a + // node is no longer online, but that is not a reliable signal. + NodeAttrClientSideReachability = "client-side-reachability" ) // SetDNSRequest is a request to add a DNS record. From 206d98e84be6cc309f3fbe9eb34844f0c7883a28 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 2 Oct 2025 18:29:54 -0700 Subject: [PATCH 0501/1093] control/controlclient: restore aggressive Direct.Close teardown In the earlier http2 package migration (1d93bdce20ddd2, #17394) I had removed Direct.Close's tracking of the connPool, thinking it wasn't necessary. Some tests (in another repo) are strict and like it to tear down the world and wait, to check for leaked goroutines. And they caught this letting some goroutines idle past Close, even if they'd eventually close down on their own. This restores the connPool accounting and the aggressife close. Updates #17305 Updates #17394 Change-Id: I5fed283a179ff7c3e2be104836bbe58b05130cc7 Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 4 ++-- control/ts2021/client.go | 32 +++++++++++++++++++++++++++----- control/ts2021/conn.go | 14 ++++++++++++-- util/set/handle.go | 16 ++++++++++++---- 4 files changed, 53 insertions(+), 13 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index de577bea42b9a..482affe33095d 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -64,7 +64,7 @@ import ( // Direct is the client that connects to a tailcontrol server for a node. type Direct struct { - httpc *http.Client // HTTP client used to talk to tailcontrol + httpc *http.Client // HTTP client used to do TLS requests to control (just https://controlplane.tailscale.com/key?v=123) interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial dialer *tsdial.Dialer dnsCache *dnscache.Resolver @@ -97,7 +97,7 @@ type Direct struct { serverNoiseKey key.MachinePublic sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. - noiseClient *ts2021.Client + noiseClient *ts2021.Client // also protected by mu persist persist.PersistView authKey string diff --git a/control/ts2021/client.go b/control/ts2021/client.go index 9a9a3ded86944..e0b82b89c9a6e 100644 --- a/control/ts2021/client.go +++ b/control/ts2021/client.go @@ -28,6 +28,8 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/mak" + "tailscale.com/util/set" ) // Client provides a http.Client to connect to tailcontrol over @@ -44,8 +46,9 @@ type Client struct { httpsPort string // the fallback Noise-over-https port or empty if none // mu protects the following - mu sync.Mutex - closed bool + mu sync.Mutex + closed bool + connPool set.HandleSet[*Conn] // all live connections } // ClientOpts contains options for the [NewClient] function. All fields are @@ -175,9 +178,15 @@ func NewClient(opts ClientOpts) (*Client, error) { // It is a no-op and returns nil if the connection is already closed. func (nc *Client) Close() error { nc.mu.Lock() - defer nc.mu.Unlock() + live := nc.connPool nc.closed = true + nc.mu.Unlock() + + for _, c := range live { + c.Close() + } nc.Client.CloseIdleConnections() + return nil } @@ -249,18 +258,31 @@ func (nc *Client) dial(ctx context.Context) (*Conn, error) { return nil, err } - ncc := NewConn(clientConn.Conn) - nc.mu.Lock() + + handle := set.NewHandle() + ncc := NewConn(clientConn.Conn, func() { nc.noteConnClosed(handle) }) + mak.Set(&nc.connPool, handle, ncc) + if nc.closed { nc.mu.Unlock() ncc.Close() // Needs to be called without holding the lock. return nil, errors.New("noise client closed") } + defer nc.mu.Unlock() return ncc, nil } +// noteConnClosed notes that the *Conn with the given handle has closed and +// should be removed from the live connPool (which is usually of size 0 or 1, +// except perhaps briefly 2 during a network failure and reconnect). +func (nc *Client) noteConnClosed(handle set.Handle) { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.connPool.Delete(handle) +} + // post does a POST to the control server at the given path, JSON-encoding body. // The provided nodeKey is an optional load balancing hint. func (nc *Client) Post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { diff --git a/control/ts2021/conn.go b/control/ts2021/conn.go index ecf184d3c3819..52d663272a8c6 100644 --- a/control/ts2021/conn.go +++ b/control/ts2021/conn.go @@ -31,6 +31,7 @@ import ( type Conn struct { *controlbase.Conn + onClose func() // or nil readHeaderOnce sync.Once // guards init of reader field reader io.Reader // (effectively Conn.Reader after header) earlyPayloadReady chan struct{} // closed after earlyPayload is set (including set to nil) @@ -44,11 +45,12 @@ type Conn struct { // http2.ClientConn will be created that reads from the returned Conn. // // connID should be a unique ID for this connection. When the Conn is closed, -// the onClose function will be called with the connID if it is non-nil. -func NewConn(conn *controlbase.Conn) *Conn { +// the onClose function will be called if it is non-nil. +func NewConn(conn *controlbase.Conn, onClose func()) *Conn { return &Conn{ Conn: conn, earlyPayloadReady: make(chan struct{}), + onClose: sync.OnceFunc(onClose), } } @@ -103,6 +105,14 @@ func (c *Conn) Read(p []byte) (n int, err error) { return c.reader.Read(p) } +// Close closes the connection. +func (c *Conn) Close() error { + if c.onClose != nil { + defer c.onClose() + } + return c.Conn.Close() +} + // readHeader reads the optional "early payload" from the server that arrives // after the Noise handshake but before the HTTP/2 session begins. // diff --git a/util/set/handle.go b/util/set/handle.go index 471ceeba2d523..9c6b6dab0549b 100644 --- a/util/set/handle.go +++ b/util/set/handle.go @@ -9,20 +9,28 @@ package set type HandleSet[T any] map[Handle]T // Handle is an opaque comparable value that's used as the map key in a -// HandleSet. The only way to get one is to call HandleSet.Add. +// HandleSet. type Handle struct { v *byte } +// NewHandle returns a new handle value. +func NewHandle() Handle { + return Handle{new(byte)} +} + // Add adds the element (map value) e to the set. // -// It returns the handle (map key) with which e can be removed, using a map -// delete. +// It returns a new handle (map key) with which e can be removed, using a map +// delete or the [HandleSet.Delete] method. func (s *HandleSet[T]) Add(e T) Handle { - h := Handle{new(byte)} + h := NewHandle() if *s == nil { *s = make(HandleSet[T]) } (*s)[h] = e return h } + +// Delete removes the element with handle h from the set. +func (s HandleSet[T]) Delete(h Handle) { delete(s, h) } From 304dabce17cbde7698568c8144159c2b4f8ad9b1 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 3 Oct 2025 14:08:50 +0100 Subject: [PATCH 0502/1093] ipn/ipnauth: fix a null pointer panic in GetConnIdentity When running integration tests on macOS, we get a panic from a nil pointer dereference when calling `ci.creds.PID()`. This panic occurs because the `ci.creds != nil` check is insufficient after a recent refactoring (c45f881) that changed `ci.creds` from a pointer to the `PeerCreds` interface. Now `ci.creds` always compares as non-nil, so we enter this block even when the underlying value is nil. The integration tests fail on macOS when `peercred.Get()` returns the error `unix.GetsockoptInt: socket is not connected`. This error isn't new, and the previous code was ignoring it correctly. Since we trust that `peercred` returns either a usable value or an error, checking for a nil error is a sufficient and correct gate to prevent the method call and avoid the panic. Fixes #17421 Signed-off-by: Alex Chan --- ipn/ipnauth/ipnauth_unix_creds.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ipn/ipnauth/ipnauth_unix_creds.go b/ipn/ipnauth/ipnauth_unix_creds.go index 8ce2ac8a4bc68..89a9ceaa99388 100644 --- a/ipn/ipnauth/ipnauth_unix_creds.go +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -18,12 +18,13 @@ import ( func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { ci = &ConnIdentity{conn: c, notWindows: true} _, ci.isUnixSock = c.(*net.UnixConn) - if ci.creds, err = peercred.Get(c); ci.creds != nil { + if creds, err := peercred.Get(c); err == nil { + ci.creds = creds ci.pid, _ = ci.creds.PID() } else if err == peercred.ErrNotImplemented { // peercred.Get is not implemented on this OS (such as OpenBSD) // Just leave creds as nil, as documented. - } else if err != nil { + } else { return nil, err } return ci, nil From 8d4ea55cc126a0ca3f7aacb78bc9cdd1b3924d48 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 3 Oct 2025 17:19:38 +0100 Subject: [PATCH 0503/1093] cmd/k8s-proxy: switching to using ipn/store/kubestore (#17402) kubestore init function has now been moved to a more explicit path of ipn/store/kubestore meaning we can now avoid the generic import of feature/condregister. Updates #12614 Signed-off-by: chaosinthecrd --- cmd/k8s-proxy/k8s-proxy.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 57a2632e2080c..9b2bb67494659 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -31,10 +31,12 @@ import ( "k8s.io/utils/strings/slices" "tailscale.com/client/local" "tailscale.com/cmd/k8s-proxy/internal/config" - _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" + + // we need to import this package so that the `kube:` ipn store gets registered + _ "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" healthz "tailscale.com/kube/health" From 59a39841c371ff03f8a52b7d7a6b0b2207b83d4f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 3 Oct 2025 12:48:22 +0100 Subject: [PATCH 0504/1093] tstest/integration: mark TestClientSideJailing as flaky Updates #17419 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index c274c31a9060b..481de57fd124b 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -828,6 +828,7 @@ func TestOneNodeUpWindowsStyle(t *testing.T) { // jailed node cannot initiate connections to the other node however the other // node can initiate connections to the jailed node. func TestClientSideJailing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17419") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) From f42be719de9ef38d1dc22ea48f590a01a227bfe5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 2 Oct 2025 22:04:12 -0700 Subject: [PATCH 0505/1093] all: use buildfeature constants in a few more places Saves 21 KB. Updates #12614 Change-Id: I0cd3e735937b0f5c0fcc9f09a24476b1c4ac9a15 Signed-off-by: Brad Fitzpatrick --- cmd/stund/depaware.txt | 2 +- envknob/envknob.go | 18 +++++++++++-- ipn/ipnauth/ipnauth.go | 8 ++++++ ipn/ipnauth/policy.go | 4 +++ ipn/ipnlocal/local.go | 60 ++++++++++++++++++++++++------------------ 5 files changed, 63 insertions(+), 29 deletions(-) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index a5e4b9ba36ecb..5eadfc0d15bd5 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -51,7 +51,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb - tailscale.com/feature/buildfeatures from tailscale.com/feature + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr diff --git a/envknob/envknob.go b/envknob/envknob.go index e581eb27e11cb..9dea8f74d15df 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -32,6 +32,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" "tailscale.com/version" @@ -463,7 +464,12 @@ var allowRemoteUpdate = RegisterBool("TS_ALLOW_ADMIN_CONSOLE_REMOTE_UPDATE") // AllowsRemoteUpdate reports whether this node has opted-in to letting the // Tailscale control plane initiate a Tailscale update (e.g. on behalf of an // admin on the admin console). -func AllowsRemoteUpdate() bool { return allowRemoteUpdate() } +func AllowsRemoteUpdate() bool { + if !buildfeatures.HasClientUpdate { + return false + } + return allowRemoteUpdate() +} // SetNoLogsNoSupport enables no-logs-no-support mode. func SetNoLogsNoSupport() { @@ -474,6 +480,9 @@ func SetNoLogsNoSupport() { var notInInit atomic.Bool func assertNotInInit() { + if !buildfeatures.HasDebug { + return + } if notInInit.Load() { return } @@ -533,6 +542,11 @@ func ApplyDiskConfigError() error { return applyDiskConfigErr } // for App Store builds // - /etc/tailscale/tailscaled-env.txt for tailscaled-on-macOS (homebrew, etc) func ApplyDiskConfig() (err error) { + if runtime.GOOS == "linux" && !(buildfeatures.HasDebug || buildfeatures.HasSynology) { + // This function does nothing on Linux, unless you're + // using TS_DEBUG_ENV_FILE or are on Synology. + return nil + } var f *os.File defer func() { if err != nil { @@ -593,7 +607,7 @@ func getPlatformEnvFiles() []string { filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt"), } case "linux": - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return []string{"/etc/tailscale/tailscaled-env.txt"} } case "darwin": diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index 1395a39ae2fbd..497f30f8c198e 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -15,6 +15,7 @@ import ( "strconv" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/safesocket" "tailscale.com/types/logger" @@ -77,6 +78,13 @@ type ConnIdentity struct { // It's suitable for passing to LookupUserFromID (os/user.LookupId) on any // operating system. func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + // This function is only implemented on non-Windows for simulating + // Windows in tests. But that test (per comments below) is broken + // anyway. So disable this testing path in non-debug builds + // and just do the thing that optimizes away. + return "" + } if envknob.GOOS() != "windows" { return "" } diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index 42366dbd94990..eeee324352387 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -8,6 +8,7 @@ import ( "fmt" "tailscale.com/client/tailscale/apitype" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/syspolicy/pkey" @@ -51,6 +52,9 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { + if !buildfeatures.HasSystemPolicy { + return nil + } if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 199ee724853cd..7488a06a9f783 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1135,7 +1135,7 @@ func (b *LocalBackend) Shutdown() { } func (b *LocalBackend) awaitNoGoroutinesInTest() { - if !testenv.InTest() { + if !buildfeatures.HasDebug || !testenv.InTest() { return } ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second) @@ -1836,6 +1836,9 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasSystemPolicy { + return false + } if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -5328,7 +5331,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC NetfilterKind: netfilterKind, } - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { // Issue 1995: we don't use iptables on Synology. rs.NetfilterMode = preftype.NetfilterOff } @@ -5339,7 +5342,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC // likely to break some functionality, but if the user expressed a // preference for routing remotely, we want to avoid leaking // traffic at the expense of functionality. - if prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid() { + if buildfeatures.HasUseExitNode && (prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid()) { var default4, default6 bool for _, route := range rs.Routes { switch route { @@ -5411,7 +5414,7 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.RoutableIPs = prefs.AdvertiseRoutes().AsSlice() hi.RequestTags = prefs.AdvertiseTags().AsSlice() hi.ShieldsUp = prefs.ShieldsUp() - hi.AllowsUpdate = envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true) + hi.AllowsUpdate = buildfeatures.HasClientUpdate && (envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true)) b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) @@ -6076,18 +6079,22 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.health.SetControlHealth(nil) } - if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { - b.capForcedNetfilter = "iptables" - } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { - b.capForcedNetfilter = "nftables" - } else { - b.capForcedNetfilter = "" // empty string means client can auto-detect + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { + b.capForcedNetfilter = "iptables" + } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { + b.capForcedNetfilter = "nftables" + } else { + b.capForcedNetfilter = "" // empty string means client can auto-detect + } } b.MagicConn().SetSilentDisco(b.ControlKnobs().SilentDisco.Load()) b.MagicConn().SetProbeUDPLifetime(b.ControlKnobs().ProbeUDPLifetime.Load()) - b.setDebugLogsByCapabilityLocked(nm) + if buildfeatures.HasDebug { + b.setDebugLogsByCapabilityLocked(nm) + } // See the netns package for documentation on what this capability does. netns.SetBindToInterfaceByRoute(nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) @@ -6104,25 +6111,26 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } } - if nm == nil { - // If there is no netmap, the client is going into a "turned off" - // state so reset the metrics. - b.metrics.approvedRoutes.Set(0) - return - } - - if nm.SelfNode.Valid() { - var approved float64 - for _, route := range nm.SelfNode.AllowedIPs().All() { - if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { - approved++ + if buildfeatures.HasAdvertiseRoutes { + if nm == nil { + // If there is no netmap, the client is going into a "turned off" + // state so reset the metrics. + b.metrics.approvedRoutes.Set(0) + } else if nm.SelfNode.Valid() { + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } } + b.metrics.approvedRoutes.Set(approved) } - b.metrics.approvedRoutes.Set(approved) } - if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { - f(b, nm) + if buildfeatures.HasDrive && nm != nil { + if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { + f(b, nm) + } } } From 9c3aec58badd142c2f8442aaaf38a7ae167ecae0 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 3 Oct 2025 16:29:50 -0700 Subject: [PATCH 0506/1093] ipn/ipnlocal: remove junk from suggestExitNodeUsingTrafficSteering (#17436) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch removes some code that didn’t get removed before merging the changes in #16580. Updates #cleanup Updates #16551 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7488a06a9f783..8cdb498766909 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7397,7 +7397,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf panic("missing traffic-steering capability") } - var force tailcfg.NodeView nodes := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { if !p.Valid() { return false @@ -7416,9 +7415,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf } return true }) - if force.Valid() { - nodes = append(nodes[:0], force) - } scores := make(map[tailcfg.NodeID]int, len(nodes)) score := func(n tailcfg.NodeView) int { From 447cbdd1d0515858acf2fee0c20e8bbc7ac6359e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 11:05:37 -0700 Subject: [PATCH 0507/1093] health: make it omittable Saves 86 KB. And stop depending on expvar and usermetrics when disabled, in prep to removing all the expvar/metrics/tsweb stuff. Updates #12614 Change-Id: I35d2479ddd1d39b615bab32b1fa940ae8cbf9b11 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 6 +- cmd/tailscaled/depaware-minbox.txt | 6 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- .../buildfeatures/feature_health_disabled.go | 13 + .../buildfeatures/feature_health_enabled.go | 13 + feature/featuretags/featuretags.go | 1 + health/health.go | 86 ++-- health/health_test.go | 13 +- health/state.go | 3 +- health/usermetrics.go | 52 +++ health/usermetrics_omit.go | 8 + health/warnings.go | 388 ++++++++++-------- tsnet/depaware.txt | 2 +- wgengine/magicsock/magicsock.go | 2 +- 17 files changed, 370 insertions(+), 231 deletions(-) create mode 100644 feature/buildfeatures/feature_health_disabled.go create mode 100644 feature/buildfeatures/feature_health_enabled.go create mode 100644 health/usermetrics.go create mode 100644 health/usermetrics_omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index be9ac3a089ee8..97eebf1d5023e 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -741,7 +741,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7b32fc2b45f52..81d5f3e0d8d02 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -112,7 +112,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index ba35ecd4a19ef..25594b124bab7 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -76,7 +76,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -179,7 +179,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ - tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth tailscale.com/util/zstdframe from tailscale.com/control/controlclient @@ -324,7 +324,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/health+ + expvar from tailscale.com/metrics+ flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index e98c0da488e0b..3829737e634bc 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -99,7 +99,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -206,7 +206,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ - tailscale.com/util/usermetric from tailscale.com/health+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/util/winutil from tailscale.com/ipn/ipnauth tailscale.com/util/zstdframe from tailscale.com/control/controlclient @@ -353,7 +353,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/health+ + expvar from tailscale.com/metrics+ flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 21e333af733d0..5f40d9417d6fc 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -321,7 +321,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index dfd338410eaa5..2563cb2fa9b38 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -169,7 +169,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock diff --git a/feature/buildfeatures/feature_health_disabled.go b/feature/buildfeatures/feature_health_disabled.go new file mode 100644 index 0000000000000..2f2bcf240a455 --- /dev/null +++ b/feature/buildfeatures/feature_health_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = false diff --git a/feature/buildfeatures/feature_health_enabled.go b/feature/buildfeatures/feature_health_enabled.go new file mode 100644 index 0000000000000..00ce3684eb6db --- /dev/null +++ b/feature/buildfeatures/feature_health_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index db7f2d2728cb8..041b68ec5610a 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -150,6 +150,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, + "health": {Sym: "Health", Desc: "Health checking support"}, "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, diff --git a/health/health.go b/health/health.go index c41256614c5e4..cbfa599c56eaf 100644 --- a/health/health.go +++ b/health/health.go @@ -8,7 +8,6 @@ package health import ( "context" "errors" - "expvar" "fmt" "maps" "net/http" @@ -20,14 +19,13 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -132,12 +130,15 @@ type Tracker struct { lastLoginErr error localLogConfigErr error tlsConnectionErrors map[string]error // map[ServerName]error - metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] + metricHealthMessage any // nil or *metrics.MultiLabelMap[metricHealthMessageLabel] } // NewTracker contructs a new [Tracker] and attaches the given eventbus. // NewTracker will panic is no eventbus is given. func NewTracker(bus *eventbus.Bus) *Tracker { + if !buildfeatures.HasHealth { + return &Tracker{} + } if bus == nil { panic("no eventbus set") } @@ -221,6 +222,9 @@ const legacyErrorArgKey = "LegacyError" // temporarily (2024-06-14) while we migrate the old health infrastructure based // on Subsystems to the new Warnables architecture. func (s Subsystem) Warnable() *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } w, ok := subsystemsWarnables[s] if !ok { panic(fmt.Sprintf("health: no Warnable for Subsystem %q", s)) @@ -230,10 +234,15 @@ func (s Subsystem) Warnable() *Warnable { var registeredWarnables = map[WarnableCode]*Warnable{} +var noopWarnable Warnable + // Register registers a new Warnable with the health package and returns it. // Register panics if the Warnable was already registered, because Warnables // should be unique across the program. func Register(w *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } if registeredWarnables[w.Code] != nil { panic(fmt.Sprintf("health: a Warnable with code %q was already registered", w.Code)) } @@ -245,6 +254,9 @@ func Register(w *Warnable) *Warnable { // unregister removes a Warnable from the health package. It should only be used // for testing purposes. func unregister(w *Warnable) { + if !buildfeatures.HasHealth { + return + } if registeredWarnables[w.Code] == nil { panic(fmt.Sprintf("health: attempting to unregister Warnable %q that was not registered", w.Code)) } @@ -317,6 +329,9 @@ func StaticMessage(s string) func(Args) string { // some lost Tracker plumbing, we want to capture stack trace // samples when it occurs. func (t *Tracker) nil() bool { + if !buildfeatures.HasHealth { + return true + } if t != nil { return false } @@ -385,37 +400,10 @@ func (w *Warnable) IsVisible(ws *warningState, clockNow func() time.Time) bool { return clockNow().Sub(ws.BrokenSince) >= w.TimeToVisible } -// SetMetricsRegistry sets up the metrics for the Tracker. It takes -// a usermetric.Registry and registers the metrics there. -func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { - if reg == nil || t.metricHealthMessage != nil { - return - } - - t.metricHealthMessage = usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( - reg, - "tailscaled_health_messages", - "gauge", - "Number of health messages broken down by type.", - ) - - t.metricHealthMessage.Set(metricHealthMessageLabel{ - Type: MetricLabelWarning, - }, expvar.Func(func() any { - if t.nil() { - return 0 - } - t.mu.Lock() - defer t.mu.Unlock() - t.updateBuiltinWarnablesLocked() - return int64(len(t.stringsLocked())) - })) -} - // IsUnhealthy reports whether the current state is unhealthy because the given // warnable is set. func (t *Tracker) IsUnhealthy(w *Warnable) bool { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return false } t.mu.Lock() @@ -429,7 +417,7 @@ func (t *Tracker) IsUnhealthy(w *Warnable) bool { // SetUnhealthy takes ownership of args. The args can be nil if no additional information is // needed for the unhealthy state. func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -438,7 +426,7 @@ func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { } func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { - if w == nil { + if !buildfeatures.HasHealth || w == nil { return } @@ -489,7 +477,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // SetHealthy removes any warningState for the given Warnable. func (t *Tracker) SetHealthy(w *Warnable) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -498,7 +486,7 @@ func (t *Tracker) SetHealthy(w *Warnable) { } func (t *Tracker) setHealthyLocked(w *Warnable) { - if t.warnableVal[w] == nil { + if !buildfeatures.HasHealth || t.warnableVal[w] == nil { // Nothing to remove return } @@ -1009,7 +997,7 @@ func (t *Tracker) OverallError() error { // each Warning to show a localized version of them instead. This function is // here for legacy compatibility purposes and is deprecated. func (t *Tracker) Strings() []string { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return nil } t.mu.Lock() @@ -1018,6 +1006,9 @@ func (t *Tracker) Strings() []string { } func (t *Tracker) stringsLocked() []string { + if !buildfeatures.HasHealth { + return nil + } result := []string{} for w, ws := range t.warnableVal { if !w.IsVisible(ws, t.now) { @@ -1078,6 +1069,9 @@ var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") // updateBuiltinWarnablesLocked performs a number of checks on the state of the backend, // and adds/removes Warnings from the Tracker as needed. func (t *Tracker) updateBuiltinWarnablesLocked() { + if !buildfeatures.HasHealth { + return + } t.updateWarmingUpWarnableLocked() if w, show := t.showUpdateWarnable(); show { @@ -1316,11 +1310,17 @@ func (s *ReceiveFuncStats) Name() string { } func (s *ReceiveFuncStats) Enter() { + if !buildfeatures.HasHealth { + return + } s.numCalls.Add(1) s.inCall.Store(true) } func (s *ReceiveFuncStats) Exit() { + if !buildfeatures.HasHealth { + return + } s.inCall.Store(false) } @@ -1329,7 +1329,7 @@ func (s *ReceiveFuncStats) Exit() { // // If t is nil, it returns nil. func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { - if t == nil { + if !buildfeatures.HasHealth || t == nil { return nil } t.initOnce.Do(t.doOnceInit) @@ -1337,6 +1337,9 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { } func (t *Tracker) doOnceInit() { + if !buildfeatures.HasHealth { + return + } for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] f.name = (ReceiveFunc(i)).String() @@ -1385,10 +1388,3 @@ func (t *Tracker) LastNoiseDialWasRecent() bool { t.lastNoiseDial = now return dur < 2*time.Minute } - -const MetricLabelWarning = "warning" - -type metricHealthMessageLabel struct { - // TODO: break down by warnable.severity as well? - Type string -} diff --git a/health/health_test.go b/health/health_test.go index 3ada377556909..3b5ebbb38d22a 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/metrics" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -497,7 +498,11 @@ func TestHealthMetric(t *testing.T) { tr.applyUpdates = tt.apply tr.latestVersion = tt.cv tr.SetMetricsRegistry(&usermetric.Registry{}) - if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { + m, ok := tr.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + if val := m.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) } for _, w := range tr.CurrentState().Warnings { @@ -634,7 +639,11 @@ func TestControlHealth(t *testing.T) { var r usermetric.Registry ht.SetMetricsRegistry(&r) - got := ht.metricHealthMessage.Get(metricHealthMessageLabel{ + m, ok := ht.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + got := m.Get(metricHealthMessageLabel{ Type: MetricLabelWarning, }).String() want := strconv.Itoa( diff --git a/health/state.go b/health/state.go index 2efff92b14774..e6d937b6a8f02 100644 --- a/health/state.go +++ b/health/state.go @@ -9,6 +9,7 @@ import ( "encoding/json" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" ) @@ -120,7 +121,7 @@ func (w *Warnable) unhealthyState(ws *warningState) *UnhealthyState { // The returned State is a snapshot of shared memory, and the caller should not // mutate the returned value. func (t *Tracker) CurrentState() *State { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return &State{} } diff --git a/health/usermetrics.go b/health/usermetrics.go new file mode 100644 index 0000000000000..110c57b57971c --- /dev/null +++ b/health/usermetrics.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_health && !ts_omit_usermetrics + +package health + +import ( + "expvar" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/util/usermetric" +) + +const MetricLabelWarning = "warning" + +type metricHealthMessageLabel struct { + // TODO: break down by warnable.severity as well? + Type string +} + +// SetMetricsRegistry sets up the metrics for the Tracker. It takes +// a usermetric.Registry and registers the metrics there. +func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { + if !buildfeatures.HasHealth { + return + } + + if reg == nil || t.metricHealthMessage != nil { + return + } + + m := usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( + reg, + "tailscaled_health_messages", + "gauge", + "Number of health messages broken down by type.", + ) + + m.Set(metricHealthMessageLabel{ + Type: MetricLabelWarning, + }, expvar.Func(func() any { + if t.nil() { + return 0 + } + t.mu.Lock() + defer t.mu.Unlock() + t.updateBuiltinWarnablesLocked() + return int64(len(t.stringsLocked())) + })) + t.metricHealthMessage = m +} diff --git a/health/usermetrics_omit.go b/health/usermetrics_omit.go new file mode 100644 index 0000000000000..9d5e35b861681 --- /dev/null +++ b/health/usermetrics_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_health || ts_omit_usermetrics + +package health + +func (t *Tracker) SetMetricsRegistry(any) {} diff --git a/health/warnings.go b/health/warnings.go index 3997e66b39ad0..26577130d9f1c 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -8,234 +8,278 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/version" ) +func condRegister(f func() *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return nil + } + return f() +} + /** This file contains definitions for the Warnables maintained within this `health` package. */ // updateAvailableWarnable is a Warnable that warns the user that an update is available. -var updateAvailableWarnable = Register(&Warnable{ - Code: "update-available", - Title: "Update available", - Severity: SeverityLow, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var updateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "update-available", + Title: "Update available", + Severity: SeverityLow, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // securityUpdateAvailableWarnable is a Warnable that warns the user that an important security update is available. -var securityUpdateAvailableWarnable = Register(&Warnable{ - Code: "security-update-available", - Title: "Security update available", - Severity: SeverityMedium, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var securityUpdateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "security-update-available", + Title: "Security update available", + Severity: SeverityMedium, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // unstableWarnable is a Warnable that warns the user that they are using an unstable version of Tailscale // so they won't be surprised by all the issues that may arise. -var unstableWarnable = Register(&Warnable{ - Code: "is-using-unstable-version", - Title: "Using an unstable version", - Severity: SeverityLow, - Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), +var unstableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "is-using-unstable-version", + Title: "Using an unstable version", + Severity: SeverityLow, + Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), + } }) // NetworkStatusWarnable is a Warnable that warns the user that the network is down. -var NetworkStatusWarnable = Register(&Warnable{ - Code: "network-status", - Title: "Network down", - Severity: SeverityMedium, - Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 5 * time.Second, +var NetworkStatusWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "network-status", + Title: "Network down", + Severity: SeverityMedium, + Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 5 * time.Second, + } }) // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. -var IPNStateWarnable = Register(&Warnable{ - Code: "wantrunning-false", - Title: "Tailscale off", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is stopped."), +var IPNStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "wantrunning-false", + Title: "Tailscale off", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is stopped."), + } }) // localLogWarnable is a Warnable that warns the user that the local log is misconfigured. -var localLogWarnable = Register(&Warnable{ - Code: "local-log-config-error", - Title: "Local log misconfiguration", - Severity: SeverityLow, - Text: func(args Args) string { - return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) - }, +var localLogWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "local-log-config-error", + Title: "Local log misconfiguration", + Severity: SeverityLow, + Text: func(args Args) string { + return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) + }, + } }) // LoginStateWarnable is a Warnable that warns the user that they are logged out, // and provides the last login error if available. -var LoginStateWarnable = Register(&Warnable{ - Code: "login-state", - Title: "Logged out", - Severity: SeverityMedium, - Text: func(args Args) string { - if args[ArgError] != "" { - return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) - } else { - return "You are logged out." - } - }, - DependsOn: []*Warnable{IPNStateWarnable}, +var LoginStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "login-state", + Title: "Logged out", + Severity: SeverityMedium, + Text: func(args Args) string { + if args[ArgError] != "" { + return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) + } else { + return "You are logged out." + } + }, + DependsOn: []*Warnable{IPNStateWarnable}, + } }) // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. -var notInMapPollWarnable = Register(&Warnable{ - Code: "not-in-map-poll", - Title: "Out of sync", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), - // 8 minutes reflects a maximum maintenance window for the coordination server. - TimeToVisible: 8 * time.Minute, +var notInMapPollWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "not-in-map-poll", + Title: "Out of sync", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), + // 8 minutes reflects a maximum maintenance window for the coordination server. + TimeToVisible: 8 * time.Minute, + } }) // noDERPHomeWarnable is a Warnable that warns the user that Tailscale doesn't have a home DERP. -var noDERPHomeWarnable = Register(&Warnable{ - Code: "no-derp-home", - Title: "No home relay server", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPHomeWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "no-derp-home", + Title: "No home relay server", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. -var noDERPConnectionWarnable = Register(&Warnable{ - Code: "no-derp-connection", - Title: "Relay server unavailable", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - - // Technically noDERPConnectionWarnable could be used to warn about - // failure to connect to a specific DERP server (e.g. your home is derp1 - // but you're trying to connect to a peer's derp4 and are unable) but as - // of 2024-09-25 we only use this for connecting to your home DERP, so - // we depend on noDERPHomeWarnable which is the ability to figure out - // what your DERP home even is. - noDERPHomeWarnable, - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) - } else { - return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) - } - }, - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPConnectionWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "no-derp-connection", + Title: "Relay server unavailable", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + + // Technically noDERPConnectionWarnable could be used to warn about + // failure to connect to a specific DERP server (e.g. your home is derp1 + // but you're trying to connect to a peer's derp4 and are unable) but as + // of 2024-09-25 we only use this for connecting to your home DERP, so + // we depend on noDERPHomeWarnable which is the ability to figure out + // what your DERP home even is. + noDERPHomeWarnable, + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) + } else { + return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) + } + }, + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // derpTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't // heard from the home DERP region for a while. -var derpTimeoutWarnable = Register(&Warnable{ - Code: "derp-timed-out", - Title: "Relay server timed out", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected - noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) - } else { - return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) - } - }, +var derpTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "derp-timed-out", + Title: "Relay server timed out", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected + noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) + } else { + return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) + } + }, + } }) // derpRegionErrorWarnable is a Warnable that warns the user that a DERP region is reporting an issue. -var derpRegionErrorWarnable = Register(&Warnable{ - Code: "derp-region-error", - Title: "Relay server error", - Severity: SeverityLow, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) - }, +var derpRegionErrorWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "derp-region-error", + Title: "Relay server error", + Severity: SeverityLow, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) + }, + } }) // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. -var noUDP4BindWarnable = Register(&Warnable{ - Code: "no-udp4-bind", - Title: "NAT traversal setup failure", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), - ImpactsConnectivity: true, +var noUDP4BindWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "no-udp4-bind", + Title: "NAT traversal setup failure", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), + ImpactsConnectivity: true, + } }) // mapResponseTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't received a network map from the coordination server in a while. -var mapResponseTimeoutWarnable = Register(&Warnable{ - Code: "mapresponse-timeout", - Title: "Network map response timeout", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) - }, +var mapResponseTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "mapresponse-timeout", + Title: "Network map response timeout", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) + }, + } }) // tlsConnectionFailedWarnable is a Warnable that warns the user that Tailscale could not establish an encrypted connection with a server. -var tlsConnectionFailedWarnable = Register(&Warnable{ - Code: "tls-connection-failed", - Title: "Encrypted connection failed", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) - }, +var tlsConnectionFailedWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "tls-connection-failed", + Title: "Encrypted connection failed", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) + }, + } }) // magicsockReceiveFuncWarnable is a Warnable that warns the user that one of the Magicsock functions is not running. -var magicsockReceiveFuncWarnable = Register(&Warnable{ - Code: "magicsock-receive-func-error", - Title: "MagicSock function not running", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) - }, +var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "magicsock-receive-func-error", + Title: "MagicSock function not running", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) + }, + } }) // testWarnable is a Warnable that is used within this package for testing purposes only. -var testWarnable = Register(&Warnable{ - Code: "test-warnable", - Title: "Test warnable", - Severity: SeverityLow, - Text: func(args Args) string { - return args[ArgError] - }, +var testWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "test-warnable", + Title: "Test warnable", + Severity: SeverityLow, + Text: func(args Args) string { + return args[ArgError] + }, + } }) // applyDiskConfigWarnable is a Warnable that warns the user that there was an error applying the envknob config stored on disk. -var applyDiskConfigWarnable = Register(&Warnable{ - Code: "apply-disk-config", - Title: "Could not apply configuration", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) - }, +var applyDiskConfigWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "apply-disk-config", + Title: "Could not apply configuration", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) + }, + } }) // warmingUpWarnableDuration is the duration for which the warmingUpWarnable is reported by the backend after the user @@ -245,9 +289,11 @@ const warmingUpWarnableDuration = 5 * time.Second // warmingUpWarnable is a Warnable that is reported by the backend when it is starting up, for a maximum time of // warmingUpWarnableDuration. The GUIs use the presence of this Warnable to prevent showing any other warnings until // the backend is fully started. -var warmingUpWarnable = Register(&Warnable{ - Code: "warming-up", - Title: "Tailscale is starting", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is starting. Please wait."), +var warmingUpWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: "warming-up", + Title: "Tailscale is starting", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is starting. Please wait."), + } }) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 739d0b33bf891..f8d7bf7a88c0f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -165,7 +165,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/health+ + tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e3cf249c55ebc..b6cb7b336636a 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1710,7 +1710,7 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu var epCache epAddrEndpointCache return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (_ int, retErr error) { - if healthItem != nil { + if buildfeatures.HasHealth && healthItem != nil { healthItem.Enter() defer healthItem.Exit() defer func() { From 141eb64d3fe2d00c47ca6a77372e84d265e15edd Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 13:31:49 -0700 Subject: [PATCH 0508/1093] wgengine/router/osrouter: fix data race in magicsock port update callback As found by @cmol in #17423. Updates #17423 Change-Id: I1492501f74ca7b57a8c5278ea6cb87a56a4086b9 Signed-off-by: Brad Fitzpatrick --- wgengine/router/osrouter/router_linux.go | 36 +++++++++++++----------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 1f825b917e44a..cf1a9f02716a5 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -86,8 +86,8 @@ type linuxRouter struct { cmd commandRunner nfr linuxfw.NetfilterRunner - magicsockPortV4 uint16 - magicsockPortV6 uint16 + magicsockPortV4 atomic.Uint32 // actually a uint16 + magicsockPortV6 atomic.Uint32 // actually a uint16 } func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { @@ -546,7 +546,7 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { } } - var magicsockPort *uint16 + var magicsockPort *atomic.Uint32 switch network { case "udp4": magicsockPort = &r.magicsockPortV4 @@ -566,27 +566,29 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { // set the port, we'll make the firewall rule when netfilter turns back on if r.netfilterMode == netfilterOff { - *magicsockPort = port + magicsockPort.Store(uint32(port)) return nil } - if *magicsockPort == port { + cur := magicsockPort.Load() + + if cur == uint32(port) { return nil } - if *magicsockPort != 0 { - if err := r.nfr.DelMagicsockPortRule(*magicsockPort, network); err != nil { + if cur != 0 { + if err := r.nfr.DelMagicsockPortRule(uint16(cur), network); err != nil { return fmt.Errorf("del magicsock port rule: %w", err) } } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { + if err := r.nfr.AddMagicsockPortRule(uint16(port), network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } - *magicsockPort = port + magicsockPort.Store(uint32(port)) return nil } @@ -658,13 +660,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if r.magicsockPortV4 != 0 { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { + if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { + if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { + if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } @@ -698,13 +700,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if r.magicsockPortV4 != 0 { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { + if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { + if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { + if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } From 223ced84b571df1e2045d3977459374bc43f5515 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 17:32:17 -0700 Subject: [PATCH 0509/1093] feature/ace: make ACE modular Updates #12614 Change-Id: Iaee75d8831c4ba5c9705d7877bb78044424c6da1 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 - cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 5 ++-- cmd/tsidp/depaware.txt | 1 - control/controlhttp/client.go | 16 +++++++----- feature/ace/ace.go | 25 +++++++++++++++++++ feature/buildfeatures/feature_ace_disabled.go | 13 ++++++++++ feature/buildfeatures/feature_ace_enabled.go | 13 ++++++++++ feature/condregister/maybe_ace.go | 8 ++++++ feature/featuretags/featuretags.go | 1 + net/ace/ace.go | 2 ++ tsnet/depaware.txt | 1 - 14 files changed, 77 insertions(+), 14 deletions(-) create mode 100644 feature/ace/ace.go create mode 100644 feature/buildfeatures/feature_ace_disabled.go create mode 100644 feature/buildfeatures/feature_ace_enabled.go create mode 100644 feature/condregister/maybe_ace.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 97eebf1d5023e..85d912fab45bb 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -742,7 +742,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 81d5f3e0d8d02..b0b4359e48de3 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -113,7 +113,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ tailscale.com/metrics from tailscale.com/tsweb+ - tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 25594b124bab7..626fe5acd7572 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -77,7 +77,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 3829737e634bc..37909089df344 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -100,7 +100,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5f40d9417d6fc..ff3cc5c1009ff 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -252,7 +252,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/ts2021 + tailscale.com/control/controlhttp from tailscale.com/control/ts2021+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ tailscale.com/control/ts2021 from tailscale.com/control/controlclient @@ -272,6 +272,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/ace from tailscale.com/feature/condregister tailscale.com/feature/appconnectors from tailscale.com/feature/condregister tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/capture from tailscale.com/feature/condregister @@ -322,7 +323,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp + tailscale.com/net/ace from tailscale.com/feature/ace tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 2563cb2fa9b38..47c8086c548fa 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -170,7 +170,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index f1ee7a6f94cb2..06a2131fdcb2b 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -42,7 +42,6 @@ import ( "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/health" - "tailscale.com/net/ace" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" "tailscale.com/net/netutil" @@ -395,6 +394,8 @@ var macOSScreenTime = health.Register(&health.Warnable{ ImpactsConnectivity: true, }) +var HookMakeACEDialer feature.Hook[func(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc] + // tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to @@ -424,11 +425,14 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad } if optACEHost != "" { - dialer = (&ace.Dialer{ - ACEHost: optACEHost, - ACEHostIP: optAddr, // may be zero - NetDialer: dialer, - }).Dial + if !buildfeatures.HasACE { + return nil, feature.ErrUnavailable + } + f, ok := HookMakeACEDialer.GetOk() + if !ok { + return nil, feature.ErrUnavailable + } + dialer = f(dialer, optACEHost, optAddr) } // On macOS, see if Screen Time is blocking things. diff --git a/feature/ace/ace.go b/feature/ace/ace.go new file mode 100644 index 0000000000000..b6d36543c5281 --- /dev/null +++ b/feature/ace/ace.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace registers support for Alternate Connectivity Endpoints (ACE). +package ace + +import ( + "net/netip" + + "tailscale.com/control/controlhttp" + "tailscale.com/net/ace" + "tailscale.com/net/netx" +) + +func init() { + controlhttp.HookMakeACEDialer.Set(mkDialer) +} + +func mkDialer(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc { + return (&ace.Dialer{ + ACEHost: aceHost, + ACEHostIP: optIP, // may be zero + NetDialer: dialer, + }).Dial +} diff --git a/feature/buildfeatures/feature_ace_disabled.go b/feature/buildfeatures/feature_ace_disabled.go new file mode 100644 index 0000000000000..b4808d4976b02 --- /dev/null +++ b/feature/buildfeatures/feature_ace_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = false diff --git a/feature/buildfeatures/feature_ace_enabled.go b/feature/buildfeatures/feature_ace_enabled.go new file mode 100644 index 0000000000000..4812f9a61cd4c --- /dev/null +++ b/feature/buildfeatures/feature_ace_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = true diff --git a/feature/condregister/maybe_ace.go b/feature/condregister/maybe_ace.go new file mode 100644 index 0000000000000..07023171144a5 --- /dev/null +++ b/feature/condregister/maybe_ace.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_ace + +package condregister + +import _ "tailscale.com/feature/ace" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 041b68ec5610a..5884d48d50959 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -93,6 +93,7 @@ type FeatureMeta struct { // Features are the known Tailscale features that can be selectively included or // excluded via build tags, and a description of each. var Features = map[FeatureTag]FeatureMeta{ + "ace": {Sym: "ACE", Desc: "Alternate Connectivity Endpoints"}, "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, "aws": {Sym: "AWS", Desc: "AWS integration"}, diff --git a/net/ace/ace.go b/net/ace/ace.go index 1bb64d64d19ab..47e780313cadd 100644 --- a/net/ace/ace.go +++ b/net/ace/ace.go @@ -28,6 +28,8 @@ type Dialer struct { ACEHostIP netip.Addr // optional; if non-zero, use this IP instead of DNS ACEPort int // zero means 443 + // NetDialer optionally specifies the underlying dialer to use to reach the + // ACEHost. If nil, net.Dialer.DialContext is used. NetDialer func(ctx context.Context, network, address string) (net.Conn, error) } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f8d7bf7a88c0f..5df2c41b9455f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -166,7 +166,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/metrics from tailscale.com/net/tstun+ - tailscale.com/net/ace from tailscale.com/control/controlhttp tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ From db65f3fcf87035c64b810a01bba60745e48b2444 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 3 Oct 2025 17:42:40 -0700 Subject: [PATCH 0510/1093] ipn/ipnlocal: use buildfeature consts in a few more places Updates #12614 Change-Id: I561d434d9829172a3d7f6933399237924ff80490 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 15 +++++++++++---- tailcfg/tailcfg.go | 4 ++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8cdb498766909..9a47b7cb13964 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5416,7 +5416,9 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.ShieldsUp = prefs.ShieldsUp() hi.AllowsUpdate = buildfeatures.HasClientUpdate && (envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true)) - b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + if buildfeatures.HasAdvertiseRoutes { + b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + } var sshHostKeys []string if buildfeatures.HasSSH && prefs.RunSSH() && envknob.CanSSHD() { @@ -5445,7 +5447,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // DNS records are needed, so we can save bandwidth and not send // WireIngress. hi.WireIngress = b.shouldWireInactiveIngressLocked() - hi.AppConnector.Set(prefs.AppConnector().Advertise) + + if buildfeatures.HasAppConnectors { + hi.AppConnector.Set(prefs.AppConnector().Advertise) + } // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node // was selected, if any. @@ -5461,8 +5466,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // ExitNodeID here; [LocalBackend.ResolveExitNode] will be called once // the netmap and/or net report have been received to both pick the exit // node and notify control of the change. - if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { - hi.ExitNodeID = prefs.ExitNodeID() + if buildfeatures.HasUseExitNode { + if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { + hi.ExitNodeID = prefs.ExitNodeID() + } } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 96e7fbbd907c6..da53b777e1cd6 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -1088,6 +1089,9 @@ func (ni *NetInfo) String() string { } func (ni *NetInfo) portMapSummary() string { + if !buildfeatures.HasPortMapper { + return "x" + } if !ni.HavePortMap && ni.UPnP == "" && ni.PMP == "" && ni.PCP == "" { return "?" } From 2e381557b856f4a8969e6a4b3f1104b77830c3e7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 09:18:57 -0700 Subject: [PATCH 0511/1093] feature/c2n: move answerC2N code + deps out of control/controlclient c2n was already a conditional feature, but it didn't have a feature/c2n directory before (rather, it was using consts + DCE). This adds it, and moves some code, which removes the httprec dependency. Also, remove some unnecessary code from our httprec fork. Updates #12614 Change-Id: I2fbe538e09794c517038e35a694a363312c426a2 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware-min.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 3 +- cmd/tailscaled/depaware.txt | 5 ++- cmd/tsidp/depaware.txt | 5 ++- control/controlclient/direct.go | 58 +++---------------------- feature/c2n/c2n.go | 70 ++++++++++++++++++++++++++++++ feature/condregister/maybe_c2n.go | 8 ++++ tempfork/httprec/httprec.go | 38 ---------------- tsnet/depaware.txt | 5 ++- tsnet/tsnet.go | 1 + 11 files changed, 99 insertions(+), 100 deletions(-) create mode 100644 feature/c2n/c2n.go create mode 100644 feature/condregister/maybe_c2n.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 85d912fab45bb..6e2a83e3c40d0 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -700,6 +700,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet @@ -791,7 +792,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 626fe5acd7572..9210b4377293b 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -117,7 +117,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tka from tailscale.com/control/controlclient+ tailscale.com/tsconst from tailscale.com/net/netns tailscale.com/tsd from tailscale.com/cmd/tailscaled+ @@ -211,7 +210,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts golang.org/x/net/internal/iana from golang.org/x/net/icmp+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 37909089df344..b183609f37f2c 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -142,7 +142,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ tailscale.com/tka from tailscale.com/control/controlclient+ tailscale.com/tsconst from tailscale.com/net/netns+ @@ -239,7 +238,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index ff3cc5c1009ff..3e0930fcb0b9f 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -275,6 +275,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/ace from tailscale.com/feature/condregister tailscale.com/feature/appconnectors from tailscale.com/feature/condregister tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/feature/condregister tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/clientupdate from tailscale.com/feature/condregister tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled @@ -379,7 +380,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ @@ -502,7 +503,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 47c8086c548fa..2a87061e4f8c9 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -142,6 +142,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet @@ -218,7 +219,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ @@ -334,7 +335,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 482affe33095d..9223553693aba 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -4,7 +4,6 @@ package controlclient import ( - "bufio" "bytes" "cmp" "context" @@ -44,7 +43,6 @@ import ( "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" - "tailscale.com/tempfork/httprec" "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/key" @@ -1389,6 +1387,10 @@ func (c *Direct) isUniquePingRequest(pr *tailcfg.PingRequest) bool { return true } +// HookAnswerC2NPing is where feature/c2n conditionally registers support +// for handling C2N (control-to-node) HTTP requests. +var HookAnswerC2NPing feature.Hook[func(logger.Logf, http.Handler, *http.Client, *tailcfg.PingRequest)] + func (c *Direct) answerPing(pr *tailcfg.PingRequest) { httpc := c.httpc useNoise := pr.URLIsNoise || pr.Types == "c2n" @@ -1416,7 +1418,9 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { c.logf("refusing to answer c2n ping without noise") return } - answerC2NPing(c.logf, c.c2nHandler, httpc, pr) + if f, ok := HookAnswerC2NPing.GetOk(); ok { + f(c.logf, c.c2nHandler, httpc, pr) + } return } for _, t := range strings.Split(pr.Types, ",") { @@ -1451,54 +1455,6 @@ func answerHeadPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest) { } } -func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { - if c2nHandler == nil { - logf("answerC2NPing: c2nHandler not defined") - return - } - hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) - if err != nil { - logf("answerC2NPing: ReadRequest: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) - } - handlerTimeout := time.Minute - if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { - handlerTimeout, _ = time.ParseDuration(v) - } - handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) - defer cancel() - hreq = hreq.WithContext(handlerCtx) - rec := httprec.NewRecorder() - c2nHandler.ServeHTTP(rec, hreq) - cancel() - - c2nResBuf := new(bytes.Buffer) - rec.Result().Write(c2nResBuf) - - replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) - if err != nil { - logf("answerC2NPing: NewRequestWithContext: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: sending POST ping to %v ...", pr.URL) - } - t0 := clock.Now() - _, err = c.Do(req) - d := time.Since(t0).Round(time.Millisecond) - if err != nil { - logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) - } else if pr.Log { - logf("answerC2NPing complete to %v (after %v)", pr.URL, d) - } -} - // sleepAsRequest implements the sleep for a tailcfg.Debug message requesting // that the client sleep. The complication is that while we're sleeping (if for // a long time), we need to periodically reset the watchdog timer before it diff --git a/feature/c2n/c2n.go b/feature/c2n/c2n.go new file mode 100644 index 0000000000000..ae942e31d0d95 --- /dev/null +++ b/feature/c2n/c2n.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package c2n registers support for C2N (Control-to-Node) communications. +package c2n + +import ( + "bufio" + "bytes" + "context" + "net/http" + "time" + + "tailscale.com/control/controlclient" + "tailscale.com/tailcfg" + "tailscale.com/tempfork/httprec" + "tailscale.com/types/logger" +) + +func init() { + controlclient.HookAnswerC2NPing.Set(answerC2NPing) +} + +func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { + if c2nHandler == nil { + logf("answerC2NPing: c2nHandler not defined") + return + } + hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) + if err != nil { + logf("answerC2NPing: ReadRequest: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) + } + handlerTimeout := time.Minute + if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { + handlerTimeout, _ = time.ParseDuration(v) + } + handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) + defer cancel() + hreq = hreq.WithContext(handlerCtx) + rec := httprec.NewRecorder() + c2nHandler.ServeHTTP(rec, hreq) + cancel() + + c2nResBuf := new(bytes.Buffer) + rec.Result().Write(c2nResBuf) + + replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) + if err != nil { + logf("answerC2NPing: NewRequestWithContext: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: sending POST ping to %v ...", pr.URL) + } + t0 := time.Now() + _, err = c.Do(req) + d := time.Since(t0).Round(time.Millisecond) + if err != nil { + logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) + } else if pr.Log { + logf("answerC2NPing complete to %v (after %v)", pr.URL, d) + } +} diff --git a/feature/condregister/maybe_c2n.go b/feature/condregister/maybe_c2n.go new file mode 100644 index 0000000000000..c222af533a37d --- /dev/null +++ b/feature/condregister/maybe_c2n.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_c2n + +package condregister + +import _ "tailscale.com/feature/c2n" diff --git a/tempfork/httprec/httprec.go b/tempfork/httprec/httprec.go index 13786aaf60e05..07ca673fea885 100644 --- a/tempfork/httprec/httprec.go +++ b/tempfork/httprec/httprec.go @@ -14,9 +14,6 @@ import ( "net/http" "net/textproto" "strconv" - "strings" - - "golang.org/x/net/http/httpguts" ) // ResponseRecorder is an implementation of [http.ResponseWriter] that @@ -59,10 +56,6 @@ func NewRecorder() *ResponseRecorder { } } -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder]. -const DefaultRemoteAddr = "1.2.3.4" - // Header implements [http.ResponseWriter]. It returns the response // headers to mutate within a handler. To test the headers that were // written after a handler completes, use the [ResponseRecorder.Result] method and see @@ -206,37 +199,6 @@ func (rw *ResponseRecorder) Result() *http.Response { res.Body = http.NoBody } res.ContentLength = parseContentLength(res.Header.Get("Content-Length")) - - if trailers, ok := rw.snapHeader["Trailer"]; ok { - res.Trailer = make(http.Header, len(trailers)) - for _, k := range trailers { - for _, k := range strings.Split(k, ",") { - k = http.CanonicalHeaderKey(textproto.TrimString(k)) - if !httpguts.ValidTrailerHeader(k) { - // Ignore since forbidden by RFC 7230, section 4.1.2. - continue - } - vv, ok := rw.HeaderMap[k] - if !ok { - continue - } - vv2 := make([]string, len(vv)) - copy(vv2, vv) - res.Trailer[k] = vv2 - } - } - } - for k, vv := range rw.HeaderMap { - if !strings.HasPrefix(k, http.TrailerPrefix) { - continue - } - if res.Trailer == nil { - res.Trailer = make(http.Header) - } - for _, v := range vv { - res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v) - } - } return res } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 5df2c41b9455f..6e6ea40509a12 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -138,6 +138,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet @@ -214,7 +215,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ @@ -327,7 +328,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ - golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 890193d0bbd16..2944f63595a48 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,6 +29,7 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/c2n" _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" _ "tailscale.com/feature/condregister/useproxy" From 3c7e35167133003531d217e9597fd9e6477fc3d3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 11:43:17 -0700 Subject: [PATCH 0512/1093] net/connstats: make it modular (omittable) Saves only 12 KB, but notably removes some deps on packages that future changes can then eliminate entirely. Updates #12614 Change-Id: Ibf830d3ee08f621d0a2011b1d4cd175427ef50df Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/depaware-min.txt | 3 +-- cmd/tailscaled/depaware-minbox.txt | 3 +-- cmd/tailscaled/deps_test.go | 1 + .../feature_connstats_disabled.go | 13 ++++++++++ .../feature_connstats_enabled.go | 13 ++++++++++ feature/featuretags/featuretags.go | 6 ++++- net/connstats/stats.go | 2 ++ net/connstats/stats_omit.go | 24 +++++++++++++++++ net/tstun/wrap.go | 26 ++++++++++++------- wgengine/magicsock/magicsock.go | 10 ++++--- wgengine/netlog/netlog.go | 21 +++++++++------ 11 files changed, 97 insertions(+), 25 deletions(-) create mode 100644 feature/buildfeatures/feature_connstats_disabled.go create mode 100644 feature/buildfeatures/feature_connstats_enabled.go create mode 100644 net/connstats/stats_omit.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 9210b4377293b..6ed602dc1d0d1 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -134,7 +134,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -217,7 +216,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ golang.org/x/term from tailscale.com/logpolicy diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index b183609f37f2c..70fed796fb1e3 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -160,7 +160,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -245,7 +244,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ - golang.org/x/sync/errgroup from github.com/mdlayher/socket+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ golang.org/x/term from tailscale.com/logpolicy diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index c54f014f62830..2dd140f2315f2 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -243,6 +243,7 @@ func TestMinTailscaledNoCLI(t *testing.T) { "golang.org/x/net/proxy", "internal/socks", "github.com/tailscale/peercred", + "tailscale.com/types/netlogtype", } deptest.DepChecker{ GOOS: "linux", diff --git a/feature/buildfeatures/feature_connstats_disabled.go b/feature/buildfeatures/feature_connstats_disabled.go new file mode 100644 index 0000000000000..d9aac0e80961d --- /dev/null +++ b/feature/buildfeatures/feature_connstats_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_connstats + +package buildfeatures + +// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. +// It's a const so it can be used for dead code elimination. +const HasConnStats = false diff --git a/feature/buildfeatures/feature_connstats_enabled.go b/feature/buildfeatures/feature_connstats_enabled.go new file mode 100644 index 0000000000000..c0451ce1e7f74 --- /dev/null +++ b/feature/buildfeatures/feature_connstats_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_connstats + +package buildfeatures + +// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. +// It's a const so it can be used for dead code elimination. +const HasConnStats = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5884d48d50959..4ae4e1b863a3a 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -130,7 +130,11 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"c2n"}, }, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "connstats": { + Sym: "ConnStats", + Desc: "Track per-packet connection statistics", + }, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "dbus": { Sym: "DBus", Desc: "Linux DBus support", diff --git a/net/connstats/stats.go b/net/connstats/stats.go index 4e6d8e109aaad..44b2762547f85 100644 --- a/net/connstats/stats.go +++ b/net/connstats/stats.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_connstats + // Package connstats maintains statistics about connections // flowing through a TUN device (which operate at the IP layer). package connstats diff --git a/net/connstats/stats_omit.go b/net/connstats/stats_omit.go new file mode 100644 index 0000000000000..15d16c9e449e3 --- /dev/null +++ b/net/connstats/stats_omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_connstats + +package connstats + +import ( + "context" + "net/netip" + "time" +) + +type Statistics struct{} + +func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical any)) *Statistics { + return &Statistics{} +} + +func (s *Statistics) UpdateTxVirtual(b []byte) {} +func (s *Statistics) UpdateRxVirtual(b []byte) {} +func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} +func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} +func (s *Statistics) Shutdown(context.Context) error { return nil } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index c94844c90a28e..a6d88075dbbc7 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -973,8 +973,10 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(p.Buffer()) + if buildfeatures.HasConnStats { + if stats := t.stats.Load(); stats != nil { + stats.UpdateTxVirtual(p.Buffer()) + } } buffsPos++ } @@ -1098,9 +1100,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) } - if stats := t.stats.Load(); stats != nil { - for i := 0; i < n; i++ { - stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + if buildfeatures.HasConnStats { + if stats := t.stats.Load(); stats != nil { + for i := 0; i < n; i++ { + stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + } } } @@ -1266,9 +1270,11 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { - if stats := t.stats.Load(); stats != nil { - for i := range buffs { - stats.UpdateRxVirtual((buffs)[i][offset:]) + if buildfeatures.HasConnStats { + if stats := t.stats.Load(); stats != nil { + for i := range buffs { + stats.UpdateRxVirtual((buffs)[i][offset:]) + } } } return t.tdev.Write(buffs, offset) @@ -1490,7 +1496,9 @@ func (t *Wrapper) Unwrap() tun.Device { // SetStatistics specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (t *Wrapper) SetStatistics(stats *connstats.Statistics) { - t.stats.Store(stats) + if buildfeatures.HasConnStats { + t.stats.Store(stats) + } } var ( diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b6cb7b336636a..76fbfb3b450b9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1865,8 +1865,10 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + if buildfeatures.HasConnStats { + if stats := c.stats.Load(); stats != nil { + stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + } } if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who @@ -3743,7 +3745,9 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { // SetStatistics specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (c *Conn) SetStatistics(stats *connstats.Statistics) { - c.stats.Store(stats) + if buildfeatures.HasConnStats { + c.stats.Store(stats) + } } // SetHomeless sets whether magicsock should idle harder and not have a DERP diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index b7281e542859b..7e1938d27ac3c 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -19,6 +19,7 @@ import ( "sync" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -130,20 +131,24 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) + if buildfeatures.HasConnStats { + nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.mu.Lock() + addrs := nl.addrs + prefixes := nl.prefixes + nl.mu.Unlock() + recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) + }) + } // Register the connection tracker into the TUN device. if tun == nil { tun = noopDevice{} } nl.tun = tun - nl.tun.SetStatistics(nl.stats) + if buildfeatures.HasConnStats { + nl.tun.SetStatistics(nl.stats) + } // Register the connection tracker into magicsock. if sock == nil { From 3aa8b6d683bdf59a383719a8fff2adbcc85d0fb1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 4 Oct 2025 15:05:41 -0700 Subject: [PATCH 0513/1093] wgengine/magicsock: remove misleading unexpected log message (#17445) Switching to a Geneve-encapsulated (peer relay) path in endpoint.handlePongConnLocked is expected around port rebinds, which end up clearing endpoint.bestAddr. Fixes tailscale/corp#33036 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 1f36aabd3baf8..f4c8b14694058 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1768,11 +1768,6 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // we don't clear direct UDP paths on disco ping timeout (see // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { - if src.vni.IsSet() { - // This would be unexpected. Switching to a Geneve-encapsulated - // path should only happen in de.relayEndpointReady(). - de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) - } de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ When: time.Now(), From e44e28efcd95596c0a86270c177ef912119bf851 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Sat, 4 Oct 2025 20:27:57 -0700 Subject: [PATCH 0514/1093] wgengine/magicsock: fix relayManager deadlock (#17449) Updates tailscale/corp#32978 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager.go | 5 +++- wgengine/magicsock/relaymanager_test.go | 39 +++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 4680832d96bb8..a9dca70ae2228 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -758,7 +758,10 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay ctx: ctx, cancel: cancel, } - if byServerDisco == nil { + // We must look up byServerDisco again. The previous value may have been + // deleted from the outer map when cleaning up duplicate work. + byServerDisco, ok = r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] + if !ok { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] = byServerDisco } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index e4891f5678a24..6ae21b8fbfe85 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -7,6 +7,7 @@ import ( "testing" "tailscale.com/disco" + udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" "tailscale.com/util/set" ) @@ -78,3 +79,41 @@ func TestRelayManagerGetServers(t *testing.T) { t.Errorf("got %v != want %v", got, servers) } } + +// Test for http://go/corp/32978 +func TestRelayManager_handleNewServerEndpointRunLoop(t *testing.T) { + rm := relayManager{} + rm.init() + <-rm.runLoopStoppedCh // prevent runLoop() from starting, we will inject/handle events in the test + ep := &endpoint{} + conn := newConn(t.Logf) + ep.c = conn + serverDisco := key.NewDisco().Public() + rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ + ep: ep, + }, + se: udprelay.ServerEndpoint{ + ServerDisco: serverDisco, + LamportID: 1, + VNI: 1, + }, + }) + rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ + ep: ep, + }, + se: udprelay.ServerEndpoint{ + ServerDisco: serverDisco, + LamportID: 2, + VNI: 2, + }, + }) + rm.stopWorkRunLoop(ep) + if len(rm.handshakeWorkByServerDiscoByEndpoint) != 0 || + len(rm.handshakeWorkByServerDiscoVNI) != 0 || + len(rm.handshakeWorkAwaitingPong) != 0 || + len(rm.addrPortVNIToHandshakeWork) != 0 { + t.Fatal("stranded relayHandshakeWork state") + } +} From 6820ec5bbb3e1b1c3a3fa77324d2fb55e6efee95 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 14:17:19 -0700 Subject: [PATCH 0515/1093] wgengine: stop importing flowtrack when unused Updates #12614 Change-Id: I42b5c4d623d356af4bee5bbdabaaf0f6822f2bf4 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- net/packet/tsmp.go | 5 ----- tsnet/depaware.txt | 2 +- wgengine/pendopen.go | 12 ++++++++++-- wgengine/pendopen_omit.go | 24 ++++++++++++++++++++++++ wgengine/userspace.go | 3 +-- 10 files changed, 41 insertions(+), 15 deletions(-) create mode 100644 wgengine/pendopen_omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6e2a83e3c40d0..6ecbd3df8c31a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -753,7 +753,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 6ed602dc1d0d1..6d7a11623e243 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -86,7 +86,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 70fed796fb1e3..5a71cebd42a05 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -110,7 +110,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3e0930fcb0b9f..1bd165c171045 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -335,7 +335,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 2a87061e4f8c9..f348a394f8be1 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -181,7 +181,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index d78d10d36d3bb..0ea321e84eb2a 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -15,7 +15,6 @@ import ( "fmt" "net/netip" - "tailscale.com/net/flowtrack" "tailscale.com/types/ipproto" ) @@ -58,10 +57,6 @@ type TailscaleRejectedHeader struct { const rejectFlagBitMaybeBroken = 0x1 -func (rh TailscaleRejectedHeader) Flow() flowtrack.Tuple { - return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) -} - func (rh TailscaleRejectedHeader) String() string { return fmt.Sprintf("TSMP-reject-flow{%s %s > %s}: %s", rh.Proto, rh.Src, rh.Dst, rh.Reason) } diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6e6ea40509a12..014ea109c2d68 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -177,7 +177,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 28d1f4f9d59e4..7eaf43e52a816 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package wgengine import ( @@ -20,6 +22,8 @@ import ( "tailscale.com/wgengine/filter" ) +type flowtrackTuple = flowtrack.Tuple + const tcpTimeoutBeforeDebug = 5 * time.Second type pendingOpenFlow struct { @@ -56,6 +60,10 @@ func (e *userspaceEngine) noteFlowProblemFromPeer(f flowtrack.Tuple, problem pac of.problem = problem } +func tsRejectFlow(rh packet.TailscaleRejectedHeader) flowtrack.Tuple { + return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) +} + func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { res = filter.Accept // always @@ -66,8 +74,8 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp return } if rh.MaybeBroken { - e.noteFlowProblemFromPeer(rh.Flow(), rh.Reason) - } else if f := rh.Flow(); e.removeFlow(f) { + e.noteFlowProblemFromPeer(tsRejectFlow(rh), rh.Reason) + } else if f := tsRejectFlow(rh); e.removeFlow(f) { e.logf("open-conn-track: flow %v %v > %v rejected due to %v", rh.Proto, rh.Src, rh.Dst, rh.Reason) } return diff --git a/wgengine/pendopen_omit.go b/wgengine/pendopen_omit.go new file mode 100644 index 0000000000000..013425d357f26 --- /dev/null +++ b/wgengine/pendopen_omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug + +package wgengine + +import ( + "tailscale.com/net/packet" + "tailscale.com/net/tstun" + "tailscale.com/wgengine/filter" +) + +type flowtrackTuple = struct{} + +type pendingOpenFlow struct{} + +func (*userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} + +func (*userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 735181ec70f2f..be0fe50318b40 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -29,7 +29,6 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/dns/resolver" - "tailscale.com/net/flowtrack" "tailscale.com/net/ipset" "tailscale.com/net/netmon" "tailscale.com/net/packet" @@ -147,7 +146,7 @@ type userspaceEngine struct { statusCallback StatusCallback peerSequence []key.NodePublic endpoints []tailcfg.Endpoint - pendOpen map[flowtrack.Tuple]*pendingOpenFlow // see pendopen.go + pendOpen map[flowtrackTuple]*pendingOpenFlow // see pendopen.go // pongCallback is the map of response handlers waiting for disco or TSMP // pong callbacks. The map key is a random slice of bytes. From f80c7e7c23e3201c62c3bb132ba66e87d9f06e6c Mon Sep 17 00:00:00 2001 From: kscooo Date: Thu, 2 Oct 2025 11:25:17 +0800 Subject: [PATCH 0516/1093] net/wsconn: clarify package comment Explain that this file stays forked from coder/websocket until we can depend on an upstream release for the helper. Updates #cleanup Signed-off-by: kscooo --- net/wsconn/wsconn.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 22b511ea81273..3c83ffd8c320f 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -2,9 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause // Package wsconn contains an adapter type that turns -// a websocket connection into a net.Conn. It a temporary fork of the -// netconn.go file from the github.com/coder/websocket package while we wait for -// https://github.com/nhooyr/websocket/pull/350 to be merged. +// a websocket connection into a net.Conn. package wsconn import ( From cf520a33713e96b6ab9d2dfd50eee84e2f63d8b4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 14:04:36 -0700 Subject: [PATCH 0517/1093] feature/featuretags: add LazyWG modular feature Due to iOS memory limitations in 2020 (see https://tailscale.com/blog/go-linker, etc) and wireguard-go using multiple goroutines per peer, commit 16a9cfe2f4ce7d introduced some convoluted pathsways through Tailscale to look at packets before they're delivered to wireguard-go and lazily reconfigure wireguard on the fly before delivering a packet, only telling wireguard about peers that are active. We eventually want to remove that code and integrate wireguard-go's configuration with Tailscale's existing netmap tracking. To make it easier to find that code later, this makes it modular. It saves 12 KB (of disk) to turn it off (at the expense of lots of RAM), but that's not really the point. The point is rather making it obvious (via the new constants) where this code even is. Updates #12614 Change-Id: I113b040f3e35f7d861c457eaa710d35f47cee1cb Signed-off-by: Brad Fitzpatrick --- .../buildfeatures/feature_lazywg_disabled.go | 13 +++++ .../buildfeatures/feature_lazywg_enabled.go | 13 +++++ feature/featuretags/featuretags.go | 1 + net/tstun/wrap.go | 22 +++++--- wgengine/magicsock/magicsock.go | 3 +- wgengine/userspace.go | 52 ++++++++++++------- 6 files changed, 76 insertions(+), 28 deletions(-) create mode 100644 feature/buildfeatures/feature_lazywg_disabled.go create mode 100644 feature/buildfeatures/feature_lazywg_enabled.go diff --git a/feature/buildfeatures/feature_lazywg_disabled.go b/feature/buildfeatures/feature_lazywg_disabled.go new file mode 100644 index 0000000000000..ce81d80bab6a1 --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = false diff --git a/feature/buildfeatures/feature_lazywg_enabled.go b/feature/buildfeatures/feature_lazywg_enabled.go new file mode 100644 index 0000000000000..259357f7f86ef --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 4ae4e1b863a3a..429431ec69e38 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -159,6 +159,7 @@ var Features = map[FeatureTag]FeatureMeta{ "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, "listenrawdisco": { Sym: "ListenRawDisco", diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index a6d88075dbbc7..7f25784a0c8bc 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -312,7 +312,9 @@ func (t *Wrapper) now() time.Time { // // The map ownership passes to the Wrapper. It must be non-nil. func (t *Wrapper) SetDestIPActivityFuncs(m map[netip.Addr]func()) { - t.destIPActivity.Store(m) + if buildfeatures.HasLazyWG { + t.destIPActivity.Store(m) + } } // SetDiscoKey sets the current discovery key. @@ -948,12 +950,14 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { for _, data := range res.data { p.Decode(data[res.dataOffset:]) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } - if captHook != nil { + if buildfeatures.HasCapture && captHook != nil { captHook(packet.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) } if !t.disableFilter { @@ -1085,9 +1089,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i pc.snat(p) invertGSOChecksum(pkt, gso) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 76fbfb3b450b9..81ca49d3d6fb3 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -468,7 +468,8 @@ type Options struct { // NoteRecvActivity, if provided, is a func for magicsock to call // whenever it receives a packet from a a peer if it's been more // than ~10 seconds since the last one. (10 seconds is somewhat - // arbitrary; the sole user just doesn't need or want it called on + // arbitrary; the sole user, lazy WireGuard configuration, + // just doesn't need or want it called on // every packet, just every minute or two for WireGuard timeouts, // and 10 seconds seems like a good trade-off between often enough // and not too often.) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index be0fe50318b40..c88ab78a1334a 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -404,19 +404,21 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } magicsockOpts := magicsock.Options{ - EventBus: e.eventBus, - Logf: logf, - Port: conf.ListenPort, - EndpointsFunc: endpointsFn, - DERPActiveFunc: e.RequestStatus, - IdleFunc: e.tundev.IdleDuration, - NoteRecvActivity: e.noteRecvActivity, - NetMon: e.netMon, - HealthTracker: e.health, - Metrics: conf.Metrics, - ControlKnobs: conf.ControlKnobs, - OnPortUpdate: onPortUpdate, - PeerByKeyFunc: e.PeerByKey, + EventBus: e.eventBus, + Logf: logf, + Port: conf.ListenPort, + EndpointsFunc: endpointsFn, + DERPActiveFunc: e.RequestStatus, + IdleFunc: e.tundev.IdleDuration, + NetMon: e.netMon, + HealthTracker: e.health, + Metrics: conf.Metrics, + ControlKnobs: conf.ControlKnobs, + OnPortUpdate: onPortUpdate, + PeerByKeyFunc: e.PeerByKey, + } + if buildfeatures.HasLazyWG { + magicsockOpts.NoteRecvActivity = e.noteRecvActivity } var err error @@ -748,15 +750,22 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // the past 5 minutes. That's more than WireGuard's key // rotation time anyway so it's no harm if we remove it // later if it's been inactive. - activeCutoff := e.timeNow().Add(-lazyPeerIdleThreshold) + var activeCutoff mono.Time + if buildfeatures.HasLazyWG { + activeCutoff = e.timeNow().Add(-lazyPeerIdleThreshold) + } // Not all peers can be trimmed from the network map (see // isTrimmablePeer). For those that are trimmable, keep track of // their NodeKey and Tailscale IPs. These are the ones we'll need // to install tracking hooks for to watch their send/receive // activity. - trackNodes := make([]key.NodePublic, 0, len(full.Peers)) - trackIPs := make([]netip.Addr, 0, len(full.Peers)) + var trackNodes []key.NodePublic + var trackIPs []netip.Addr + if buildfeatures.HasLazyWG { + trackNodes = make([]key.NodePublic, 0, len(full.Peers)) + trackIPs = make([]netip.Addr, 0, len(full.Peers)) + } // Don't re-alloc the map; the Go compiler optimizes map clears as of // Go 1.11, so we can re-use the existing + allocated map. @@ -770,7 +779,7 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node for i := range full.Peers { p := &full.Peers[i] nk := p.PublicKey - if !e.isTrimmablePeer(p, len(full.Peers)) { + if !buildfeatures.HasLazyWG || !e.isTrimmablePeer(p, len(full.Peers)) { min.Peers = append(min.Peers, *p) if discoChanged[nk] { needRemoveStep = true @@ -803,7 +812,9 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node return nil } - e.updateActivityMapsLocked(trackNodes, trackIPs) + if buildfeatures.HasLazyWG { + e.updateActivityMapsLocked(trackNodes, trackIPs) + } if needRemoveStep { minner := min @@ -839,6 +850,9 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // // e.wgLock must be held. func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic, trackIPs []netip.Addr) { + if !buildfeatures.HasLazyWG { + return + } // Generate the new map of which nodekeys we want to track // receive times for. mr := map[key.NodePublic]mono.Time{} // TODO: only recreate this if set of keys changed @@ -943,7 +957,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, peerMTUEnable := e.magicConn.ShouldPMTUD() isSubnetRouter := false - if e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { + if buildfeatures.HasBird && e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { isSubnetRouter = hasOverlap(nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs()) e.logf("[v1] Reconfig: hasOverlap(%v, %v) = %v; isSubnetRouter=%v lastIsSubnetRouter=%v", nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs(), From f208bf8cb11e792e6c8411990995939117265016 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 08:23:11 -0700 Subject: [PATCH 0518/1093] types/lazy: document difference from sync.OnceValue Updates #8419 Updates github.com/golang#62202 Change-Id: I0c082c4258fb7a95a17054f270dc32019bcc7581 Signed-off-by: Brad Fitzpatrick --- types/lazy/lazy.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index f5d7be4940a11..f537758fa6415 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -23,6 +23,9 @@ var nilErrPtr = ptr.To[error](nil) // Recursive use of a SyncValue from its own fill function will deadlock. // // SyncValue is safe for concurrent use. +// +// Unlike [sync.OnceValue], the linker can do better dead code elimination +// with SyncValue. See https://github.com/golang/go/issues/62202. type SyncValue[T any] struct { once sync.Once v T From 6db895774426688c1d11c6d2d6365970532ad2c8 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 09:13:03 +0100 Subject: [PATCH 0519/1093] tstest/integration: mark TestPeerRelayPing as flaky Updates #17251 Signed-off-by: Alex Chan --- tstest/integration/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 481de57fd124b..f7c133f5c5871 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1644,6 +1644,7 @@ func TestEncryptStateMigration(t *testing.T) { // relay between all 3 nodes, and "tailscale debug peer-relay-sessions" returns // expected values. func TestPeerRelayPing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17251") tstest.Shard(t) tstest.Parallel(t) From 44e1d735c32f20eeba4573db65f16d009feb63c5 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 6 Oct 2025 09:41:52 -0700 Subject: [PATCH 0520/1093] tailcfg: bump CapVer for magicsock deadlock fix (#17450) The fix that was applied in e44e28efcd95596c0a86270c177ef912119bf851. Updates tailscale/corp#32978 Signed-off-by: Jordan Whited --- tailcfg/tailcfg.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index da53b777e1cd6..7484c74664948 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -174,7 +174,8 @@ type CapabilityVersion int // - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) // - 127: 2025-09-19: can handle C2N /debug/netmap. // - 128: 2025-10-02: can handle C2N /debug/health. -const CurrentCapabilityVersion CapabilityVersion = 128 +// - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) +const CurrentCapabilityVersion CapabilityVersion = 129 // ID is an integer ID for a user, node, or login allocated by the // control plane. From 541a4ed5b483087c11fb190c443ff1510fb8932f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 09:03:10 -0700 Subject: [PATCH 0521/1093] all: use buildfeatures consts in a few more places Saves ~25 KB. Updates #12614 Change-Id: I7b976e57819a0d2692824d779c8cc98033df0d30 Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 3 ++ cmd/tailscale/cli/up.go | 4 ++- control/controlclient/direct.go | 2 +- ipn/ipnlocal/local.go | 22 +++++++++--- ipn/localapi/localapi.go | 10 ++++-- ipn/prefs.go | 64 +++++++++++++++++++-------------- 6 files changed, 70 insertions(+), 35 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 9faeadca33b38..a4a871dd8b2db 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -752,6 +752,9 @@ func (lc *Client) PushFile(ctx context.Context, target tailcfg.StableNodeID, siz // machine is properly configured to forward IP packets as a subnet router // or exit node. func (lc *Client) CheckIPForwarding(ctx context.Context) error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } body, err := lc.get200(ctx, "/localapi/v0/check-ip-forwarding") if err != nil { return err diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 3c0883ec8ee04..90c9c23af7c37 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -24,6 +24,7 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" + "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" @@ -1136,7 +1137,8 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { } func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { - if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { + if buildfeatures.HasAdvertiseRoutes && len(prefs.AdvertiseRoutes) > 0 || + buildfeatures.HasAppConnectors && prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding // into a single HTTP req. if err := localClient.CheckIPForwarding(ctx); err != nil { diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 9223553693aba..5f26e2ba13760 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -945,7 +945,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap ConnectionHandleForTest: connectionHandleForTest, } var extraDebugFlags []string - if hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && + if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { extraDebugFlags = append(extraDebugFlags, "warn-ip-forwarding-off") } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9a47b7cb13964..b0a8d99851874 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -586,7 +586,12 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) - healthChangeSub := eventbus.Subscribe[health.Change](ec) + + var healthChange <-chan health.Change + if buildfeatures.HasHealth { + healthChangeSub := eventbus.Subscribe[health.Change](ec) + healthChange = healthChangeSub.Events() + } changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) var portlist <-chan PortlistServices @@ -604,7 +609,7 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus b.onClientVersion(&clientVersion) case au := <-autoUpdateSub.Events(): b.onTailnetDefaultAutoUpdate(au.Value) - case change := <-healthChangeSub.Events(): + case change := <-healthChange: b.onHealthChange(change) case changeDelta := <-changeDeltaSub.Events(): b.linkChange(&changeDelta) @@ -996,6 +1001,9 @@ var ( ) func (b *LocalBackend) onHealthChange(change health.Change) { + if !buildfeatures.HasHealth { + return + } if change.WarnableChanged { w := change.Warnable us := change.UnhealthyState @@ -6025,10 +6033,10 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { // // b.mu must be held. func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { - if b.applySysPolicyLocked(prefs) { + if buildfeatures.HasSystemPolicy && b.applySysPolicyLocked(prefs) { changed = true } - if b.resolveExitNodeInPrefsLocked(prefs) { + if buildfeatures.HasUseExitNode && b.resolveExitNodeInPrefsLocked(prefs) { changed = true } if changed { @@ -6043,6 +6051,9 @@ func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + if !buildfeatures.HasUseExitNode { + return false + } if b.resolveAutoExitNodeLocked(prefs) { changed = true } @@ -6338,6 +6349,9 @@ func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) { } func (b *LocalBackend) CheckIPForwarding() error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } if b.sys.IsNetstackRouter() { return nil } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d7cd42c755fd1..74ff96f9ff5c0 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -72,15 +72,12 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "check-ip-forwarding": (*Handler).serveCheckIPForwarding, "check-prefs": (*Handler).serveCheckPrefs, "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, "derpmap": (*Handler).serveDERPMap, "dial": (*Handler).serveDial, "disconnect-control": (*Handler).disconnectControl, - "dns-osconfig": (*Handler).serveDNSOSConfig, - "dns-query": (*Handler).serveDNSQuery, "goroutines": (*Handler).serveGoroutines, "handle-push-message": (*Handler).serveHandlePushMessage, "id-token": (*Handler).serveIDToken, @@ -111,6 +108,9 @@ func init() { if buildfeatures.HasAppConnectors { Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) } + if buildfeatures.HasAdvertiseRoutes { + Register("check-ip-forwarding", (*Handler).serveCheckIPForwarding) + } if buildfeatures.HasUseExitNode { Register("suggest-exit-node", (*Handler).serveSuggestExitNode) Register("set-use-exit-node-enabled", (*Handler).serveSetUseExitNodeEnabled) @@ -122,6 +122,10 @@ func init() { Register("bugreport", (*Handler).serveBugReport) Register("pprof", (*Handler).servePprof) } + if buildfeatures.HasDNS { + Register("dns-osconfig", (*Handler).serveDNSOSConfig) + Register("dns-query", (*Handler).serveDNSQuery) + } } // Register registers a new LocalAPI handler for the given name. diff --git a/ipn/prefs.go b/ipn/prefs.go index 8a5b17af6ac16..4a0680bbab536 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -532,12 +532,16 @@ func (p *Prefs) Pretty() string { return p.pretty(runtime.GOOS) } func (p *Prefs) pretty(goos string) string { var sb strings.Builder sb.WriteString("Prefs{") - fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) - fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) - if p.RunSSH { + if buildfeatures.HasUseRoutes { + fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) + } + if buildfeatures.HasDNS { + fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) + } + if buildfeatures.HasSSH && p.RunSSH { sb.WriteString("ssh=true ") } - if p.RunWebClient { + if buildfeatures.HasWebClient && p.RunWebClient { sb.WriteString("webclient=true ") } if p.LoggedOut { @@ -552,26 +556,30 @@ func (p *Prefs) pretty(goos string) string { if p.ShieldsUp { sb.WriteString("shields=true ") } - if p.ExitNodeIP.IsValid() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) - } else if !p.ExitNodeID.IsZero() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) - } - if p.AutoExitNode.IsSet() { - fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) - } - if len(p.AdvertiseRoutes) > 0 || goos == "linux" { - fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) - } - if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { - fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + if buildfeatures.HasUseExitNode { + if p.ExitNodeIP.IsValid() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) + } else if !p.ExitNodeID.IsZero() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) + } + if p.AutoExitNode.IsSet() { + fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) + } } - if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { - // Only print if we're advertising any routes, or the user has - // turned off stateful filtering (NoStatefulFiltering=true ⇒ - // StatefulFiltering=false). - bb, _ := p.NoStatefulFiltering.Get() - fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + if buildfeatures.HasAdvertiseRoutes { + if len(p.AdvertiseRoutes) > 0 || goos == "linux" { + fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) + } + if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { + fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + } + if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { + // Only print if we're advertising any routes, or the user has + // turned off stateful filtering (NoStatefulFiltering=true ⇒ + // StatefulFiltering=false). + bb, _ := p.NoStatefulFiltering.Get() + fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + } } if len(p.AdvertiseTags) > 0 { fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ",")) @@ -594,9 +602,13 @@ func (p *Prefs) pretty(goos string) string { if p.NetfilterKind != "" { fmt.Fprintf(&sb, "netfilterKind=%s ", p.NetfilterKind) } - sb.WriteString(p.AutoUpdate.Pretty()) - sb.WriteString(p.AppConnector.Pretty()) - if p.RelayServerPort != nil { + if buildfeatures.HasClientUpdate { + sb.WriteString(p.AutoUpdate.Pretty()) + } + if buildfeatures.HasAppConnectors { + sb.WriteString(p.AppConnector.Pretty()) + } + if buildfeatures.HasRelayServer && p.RelayServerPort != nil { fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort) } if p.Persist != nil { From 525f9921fe680f52c67a4d2c5b51c332d77bfe51 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 08:05:35 -0700 Subject: [PATCH 0522/1093] cmd/testwrapper/flakytest: use t.Attr annotation on flaky tests Updates #17460 Change-Id: I7381e9a6dd73514c73deb6b863749eef1a87efdc Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/flakytest/flakytest.go | 11 ++++++++++- cmd/testwrapper/flakytest/flakytest_test.go | 3 ++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index 6302900cbd3ab..856cb28ef275a 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -27,7 +27,7 @@ const FlakyTestLogMessage = "flakytest: this is a known flaky test" // starting at 1. const FlakeAttemptEnv = "TS_TESTWRAPPER_ATTEMPT" -var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/tailscale/[a-zA-Z0-9_.-]+/issues/\d+\z`) +var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/issues/\d+\z`) var ( rootFlakesMu sync.Mutex @@ -49,6 +49,15 @@ func Mark(t testing.TB, issue string) { // spamming people running tests without the wrapper) fmt.Fprintf(os.Stderr, "%s: %s\n", FlakyTestLogMessage, issue) } + t.Attr("flaky-test-issue-url", issue) + + // The Attr method above also emits human-readable output, so this t.Logf + // is somewhat redundant, but we keep it for compatibility with + // old test runs, so cmd/testwrapper doesn't need to be modified. + // TODO(bradfitz): switch testwrapper to look for Action "attr" + // instead: + // "Action":"attr","Package":"tailscale.com/cmd/testwrapper/flakytest","Test":"TestMarked_Root","Key":"flaky-test-issue-url","Value":"https://github.com/tailscale/tailscale/issues/0"} + // And then remove this Logf a month or so after that. t.Logf("flakytest: issue tracking this flaky test: %s", issue) // Record the root test name as flakey. diff --git a/cmd/testwrapper/flakytest/flakytest_test.go b/cmd/testwrapper/flakytest/flakytest_test.go index 64cbfd9a3cd1f..9b744de13d446 100644 --- a/cmd/testwrapper/flakytest/flakytest_test.go +++ b/cmd/testwrapper/flakytest/flakytest_test.go @@ -14,7 +14,8 @@ func TestIssueFormat(t *testing.T) { want bool }{ {"https://github.com/tailscale/cOrp/issues/1234", true}, - {"https://github.com/otherproject/corp/issues/1234", false}, + {"https://github.com/otherproject/corp/issues/1234", true}, + {"https://not.huyb/tailscale/corp/issues/1234", false}, {"https://github.com/tailscale/corp/issues/", false}, } for _, testCase := range testCases { From ea8e991d69c02ce8c9b65fda70ac56a4707416bf Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 6 Oct 2025 16:43:27 +0000 Subject: [PATCH 0523/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 1 - licenses/apple.md | 2 +- licenses/tailscale.md | 1 - licenses/windows.md | 27 ++------------------------- 4 files changed, 3 insertions(+), 28 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 881f3ed3df9ea..f578c17cb19e8 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -24,7 +24,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 6b6d470457227..4c50e95595742 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -68,7 +68,7 @@ See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index b15b937440c9d..0ef5bcf61d5f8 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -73,7 +73,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 37c41ca3fc05f..f6704cf32bb5a 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -10,29 +10,11 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) @@ -40,12 +22,10 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gregjones/httpcache](https://pkg.go.dev/github.com/gregjones/httpcache) ([MIT](https://github.com/gregjones/httpcache/blob/901d90724c79/LICENSE.txt)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) @@ -62,24 +42,21 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.27.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.28.0:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) From d816454a88f3f0276294c8f5150ba7b7e0471552 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 12:02:16 -0700 Subject: [PATCH 0524/1093] feature/featuretags: make usermetrics modular Saves ~102 KB from the min build. Updates #12614 Change-Id: Ie1d4f439321267b9f98046593cb289ee3c4d6249 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/debug.go | 8 +++++ cmd/tailscaled/depaware-min.txt | 7 ++--- cmd/tailscaled/depaware-minbox.txt | 7 ++--- cmd/tailscaled/depaware.txt | 2 +- cmd/tailscaled/deps_test.go | 3 ++ cmd/tailscaled/tailscaled.go | 8 ----- cmd/tsidp/depaware.txt | 2 +- .../feature_usermetrics_disabled.go | 13 +++++++++ .../feature_usermetrics_enabled.go | 13 +++++++++ feature/featuretags/featuretags.go | 4 +++ ipn/localapi/localapi.go | 4 ++- net/tstun/wrap.go | 5 ++-- tsnet/depaware.txt | 2 +- util/usermetric/metrics.go | 13 +++++---- util/usermetric/omit.go | 29 +++++++++++++++++++ util/usermetric/usermetric.go | 6 ++++ 17 files changed, 97 insertions(+), 31 deletions(-) create mode 100644 feature/buildfeatures/feature_usermetrics_disabled.go create mode 100644 feature/buildfeatures/feature_usermetrics_enabled.go create mode 100644 util/usermetric/omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6ecbd3df8c31a..2c4cd9e85c1b5 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -742,7 +742,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index bcc34fb0d3c5d..b16cb28e0df54 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -28,7 +28,9 @@ import ( "tailscale.com/ipn" "tailscale.com/net/netmon" "tailscale.com/tailcfg" + "tailscale.com/tsweb/varz" "tailscale.com/types/key" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -58,6 +60,12 @@ func newDebugMux() *http.ServeMux { return mux } +func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + varz.Handler(w, r) + clientmetric.WritePrometheusExpositionFormat(w) +} + func debugMode(args []string) error { fs := flag.NewFlagSet("debug", flag.ExitOnError) fs.BoolVar(&debugArgs.ifconfig, "ifconfig", false, "If true, print network interface state") diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 6d7a11623e243..ed7ddee2a0ded 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -76,7 +76,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/connstats from tailscale.com/net/tstun+ @@ -123,7 +122,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -150,7 +148,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/control/controlclient+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -205,7 +202,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ + golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ @@ -321,7 +318,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/metrics+ + expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 5a71cebd42a05..93a884c1ec2ee 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -99,7 +99,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock @@ -149,7 +148,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ tailscale.com/tstime/rate from tailscale.com/wgengine/filter - tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -177,7 +175,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -233,7 +230,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ + golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem golang.org/x/net/bpf from github.com/mdlayher/genetlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ @@ -351,7 +348,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/json from expvar+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/metrics+ + expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1bd165c171045..7ef5c2ede1b5d 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -323,7 +323,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/ace from tailscale.com/feature/ace tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 2dd140f2315f2..a66706db29a80 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -265,6 +265,9 @@ func TestMinTailscaledWithCLI(t *testing.T) { "hujson", "pprof", "multierr", // https://github.com/tailscale/tailscale/pull/17379 + "tailscale.com/metrics", + "tailscale.com/tsweb/varz", + "dirwalk", } deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index c3a4c8b054bdb..62df4067d0a24 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -51,11 +51,9 @@ import ( "tailscale.com/safesocket" "tailscale.com/syncs" "tailscale.com/tsd" - "tailscale.com/tsweb/varz" "tailscale.com/types/flagtype" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/util/clientmetric" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -831,12 +829,6 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo var hookNewDebugMux feature.Hook[func() *http.ServeMux] -func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - varz.Handler(w, r) - clientmetric.WritePrometheusExpositionFormat(w) -} - func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { if !buildfeatures.HasDebug { return diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index f348a394f8be1..fb7c59ebcca92 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -170,7 +170,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/feature/buildfeatures/feature_usermetrics_disabled.go b/feature/buildfeatures/feature_usermetrics_disabled.go new file mode 100644 index 0000000000000..092c89c3b543f --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = false diff --git a/feature/buildfeatures/feature_usermetrics_enabled.go b/feature/buildfeatures/feature_usermetrics_enabled.go new file mode 100644 index 0000000000000..813e3c3477b66 --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 429431ec69e38..a751f65fbb05f 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -270,6 +270,10 @@ var Features = map[FeatureTag]FeatureMeta{ Sym: "UseProxy", Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", }, + "usermetrics": { + Sym: "UserMetrics", + Desc: "Usermetrics (documented, stable) metrics support", + }, "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, "webclient": { Sym: "WebClient", Desc: "Web client support", diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 74ff96f9ff5c0..fb2c964e7a471 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -99,7 +99,6 @@ var handler = map[string]LocalAPIHandler{ "status": (*Handler).serveStatus, "update/check": (*Handler).serveUpdateCheck, "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "usermetrics": (*Handler).serveUserMetrics, "watch-ipn-bus": (*Handler).serveWatchIPNBus, "whois": (*Handler).serveWhoIs, } @@ -126,6 +125,9 @@ func init() { Register("dns-osconfig", (*Handler).serveDNSOSConfig) Register("dns-query", (*Handler).serveDNSQuery) } + if buildfeatures.HasUserMetrics { + Register("usermetrics", (*Handler).serveUserMetrics) + } } // Register registers a new LocalAPI handler for the given name. diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 7f25784a0c8bc..fb93ca21eaaba 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,7 +24,6 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/feature/buildfeatures" - tsmetrics "tailscale.com/metrics" "tailscale.com/net/connstats" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" @@ -213,8 +212,8 @@ type Wrapper struct { } type metrics struct { - inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] - outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] + inboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] + outboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] } func registerMetrics(reg *usermetric.Registry) *metrics { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 014ea109c2d68..4c3d8018fbf5b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -166,7 +166,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/net/tstun+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index 044b4d65f7120..be425fb87fd6c 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -10,15 +10,15 @@ package usermetric import ( "sync" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" ) // Metrics contains user-facing metrics that are used by multiple packages. type Metrics struct { initOnce sync.Once - droppedPacketsInbound *metrics.MultiLabelMap[DropLabels] - droppedPacketsOutbound *metrics.MultiLabelMap[DropLabels] + droppedPacketsInbound *MultiLabelMap[DropLabels] + droppedPacketsOutbound *MultiLabelMap[DropLabels] } // DropReason is the reason why a packet was dropped. @@ -55,6 +55,9 @@ type DropLabels struct { // initOnce initializes the common metrics. func (r *Registry) initOnce() { + if !buildfeatures.HasUserMetrics { + return + } r.m.initOnce.Do(func() { r.m.droppedPacketsInbound = NewMultiLabelMapWithRegistry[DropLabels]( r, @@ -73,13 +76,13 @@ func (r *Registry) initOnce() { // DroppedPacketsOutbound returns the outbound dropped packet metric, creating it // if necessary. -func (r *Registry) DroppedPacketsOutbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsOutbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsOutbound } // DroppedPacketsInbound returns the inbound dropped packet metric. -func (r *Registry) DroppedPacketsInbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsInbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsInbound } diff --git a/util/usermetric/omit.go b/util/usermetric/omit.go new file mode 100644 index 0000000000000..0611990abe89e --- /dev/null +++ b/util/usermetric/omit.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_usermetrics + +package usermetric + +type Registry struct { + m Metrics +} + +func (*Registry) NewGauge(name, help string) *Gauge { return nil } + +type MultiLabelMap[T comparable] = noopMap[T] + +type noopMap[T comparable] struct{} + +type Gauge struct{} + +func (*Gauge) Set(float64) {} + +func NewMultiLabelMapWithRegistry[T comparable](m *Registry, name string, promType, helpText string) *MultiLabelMap[T] { + return nil +} + +func (*noopMap[T]) Add(T, int64) {} +func (*noopMap[T]) Set(T, any) {} + +func (r *Registry) Handler(any, any) {} // no-op HTTP handler diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index 74e9447a64bbb..1805a5dbee626 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_usermetrics + // Package usermetric provides a container and handler // for user-facing metrics. package usermetric @@ -25,6 +27,10 @@ type Registry struct { m Metrics } +// MultiLabelMap is an alias for metrics.MultiLabelMap in the common case, +// or an alias to a lighter type when usermetrics are omitted from the build. +type MultiLabelMap[T comparable] = metrics.MultiLabelMap[T] + // NewMultiLabelMapWithRegistry creates and register a new // MultiLabelMap[T] variable with the given name and returns it. // The variable is registered with the userfacing metrics package. From 7407f404d996594de43a546cfabbd40d776a4e22 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Mon, 6 Oct 2025 22:48:43 +0100 Subject: [PATCH 0525/1093] ipn/ipnlocal: fix setAuthURL / setWgengineStatus race condition (#17408) If we received a wg engine status while processing an auth URL, there was a race condition where the authURL could be reset to "" immediately after we set it. To fix this we need to check that we are moving from a non-Running state to a Running state rather than always resetting the URL when we "move" into a Running state even if that is the current state. We also need to make sure that we do not return from stopEngineAndWait until the engine is stopped: before, we would return as soon as we received any engine status update, but that might have been an update already in-flight before we asked the engine to stop. Now we wait until we see an update that is indicative of a stopped engine, or we see that the engine is unblocked again, which indicates that the engine stopped and then started again while we were waiting before we checked the state. Updates #17388 Signed-off-by: James Sanderson Co-authored-by: Nick Khyl --- ipn/ipnlocal/local.go | 64 +++++++---- ipn/ipnlocal/local_test.go | 9 ++ ipn/ipnlocal/state_test.go | 229 +++++++++++++++++++++++++++++++++++++ 3 files changed, 278 insertions(+), 24 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b0a8d99851874..c560fdae160cb 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -313,9 +313,8 @@ type LocalBackend struct { serveListeners map[netip.AddrPort]*localListener // listeners for local serve traffic serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *reverseProxy - // statusLock must be held before calling statusChanged.Wait() or + // mu must be held before calling statusChanged.Wait() or // statusChanged.Broadcast(). - statusLock sync.Mutex statusChanged *sync.Cond // dialPlan is any dial plan that we've received from the control @@ -542,7 +541,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.setTCPPortsIntercepted(nil) - b.statusChanged = sync.NewCond(&b.statusLock) + b.statusChanged = sync.NewCond(&b.mu) b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() @@ -2265,14 +2264,15 @@ func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { b.send(ipn.Notify{Engine: &es}) } +// broadcastStatusChanged must not be called with b.mu held. func (b *LocalBackend) broadcastStatusChanged() { // The sync.Cond docs say: "It is allowed but not required for the caller to hold c.L during the call." - // In this particular case, we must acquire b.statusLock. Otherwise we might broadcast before + // In this particular case, we must acquire b.mu. Otherwise we might broadcast before // the waiter (in requestEngineStatusAndWait) starts to wait, in which case // the waiter can get stuck indefinitely. See PR 2865. - b.statusLock.Lock() + b.mu.Lock() b.statusChanged.Broadcast() - b.statusLock.Unlock() + b.mu.Unlock() } // SetNotifyCallback sets the function to call when the backend has something to @@ -3343,11 +3343,12 @@ func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient if !b.seamlessRenewalEnabled() || keyExpired { b.blockEngineUpdates(true) b.stopEngineAndWait() + + if b.State() == ipn.Running { + b.enterState(ipn.Starting) + } } b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) - if b.State() == ipn.Running { - b.enterState(ipn.Starting) - } } // validPopBrowserURL reports whether urlStr is a valid value for a @@ -5513,7 +5514,13 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { - b.resetAuthURLLocked() + // TODO(zofrex): Is this needed? As of 2025-10-03 it doesn't seem to be + // necessary when logging in or authenticating. When do we need to reset it + // here, rather than the other places it is reset? We should test if it is + // necessary and add unit tests to cover those cases, or remove it. + if oldState != ipn.Running { + b.resetAuthURLLocked() + } // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it @@ -5750,29 +5757,38 @@ func (u unlockOnce) UnlockEarly() { } // stopEngineAndWait deconfigures the local network data plane, and -// waits for it to deliver a status update before returning. -// -// TODO(danderson): this may be racy. We could unblock upon receiving -// a status update that predates the "I've shut down" update. +// waits for it to deliver a status update indicating it has stopped +// before returning. func (b *LocalBackend) stopEngineAndWait() { b.logf("stopEngineAndWait...") b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - b.requestEngineStatusAndWait() + b.requestEngineStatusAndWaitForStopped() b.logf("stopEngineAndWait: done.") } -// Requests the wgengine status, and does not return until the status -// was delivered (to the usual callback). -func (b *LocalBackend) requestEngineStatusAndWait() { - b.logf("requestEngineStatusAndWait") +// Requests the wgengine status, and does not return until a status was +// delivered (to the usual callback) that indicates the engine is stopped. +func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { + b.logf("requestEngineStatusAndWaitForStopped") - b.statusLock.Lock() - defer b.statusLock.Unlock() + b.mu.Lock() + defer b.mu.Unlock() b.goTracker.Go(b.e.RequestStatus) - b.logf("requestEngineStatusAndWait: waiting...") - b.statusChanged.Wait() // temporarily releases lock while waiting - b.logf("requestEngineStatusAndWait: got status update.") + b.logf("requestEngineStatusAndWaitForStopped: waiting...") + for { + b.statusChanged.Wait() // temporarily releases lock while waiting + + if !b.blocked { + b.logf("requestEngineStatusAndWaitForStopped: engine is no longer blocked, must have stopped and started again, not safe to wait.") + break + } + if b.engineStatus.NumLive == 0 && b.engineStatus.LiveDERPs == 0 { + b.logf("requestEngineStatusAndWaitForStopped: engine is stopped.") + break + } + b.logf("requestEngineStatusAndWaitForStopped: engine is still running. Waiting...") + } } // setControlClientLocked sets the control client to cc, diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index a662793dbac20..bc8bd2a67cff0 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1505,6 +1505,15 @@ func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { } } +func wantStateNotify(want ipn.State) wantedNotification { + return wantedNotification{ + name: "State=" + want.String(), + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.State != nil && *n.State == want + }, + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a387af035bbbb..d773f722762c2 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1561,6 +1561,235 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } +// TestStateMachineURLRace tests that wgengine updates arriving in the middle of +// processing an auth URL doesn't result in the auth URL being cleared. +func TestStateMachineURLRace(t *testing.T) { + runTestStateMachineURLRace(t, false) +} + +func TestStateMachineURLRaceSeamless(t *testing.T) { + runTestStateMachineURLRace(t, true) +} + +func runTestStateMachineURLRace(t *testing.T, seamless bool) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) + + t.Logf("Start") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.NeedsLogin)}) + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + nw.check() + + t.Logf("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + if seamless { + b.sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Starting)}) + cc.send(nil, "", true, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }) + nw.check() + + t.Logf("Running") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Running)}) + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + nw.check() + + t.Logf("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + stop := make(chan struct{}) + stopSpamming := sync.OnceFunc(func() { + stop <- struct{}{} + }) + // if seamless renewal is enabled, the engine won't be disabled, and we won't + // ever call stopSpamming, so make sure it does get called + defer stopSpamming() + + // Intercept updates between the engine and localBackend, so that we can see + // when the "stopped" update comes in and ensure we stop sending our "we're + // up" updates after that point. + b.e.SetStatusCallback(func(s *wgengine.Status, err error) { + // This is not one of our fake status updates, this is generated from the + // engine in response to LocalBackend calling RequestStatus. Stop spamming + // our fake statuses. + // + // TODO(zofrex): This is fragile, it works right now but would break if the + // calling pattern of RequestStatus changes. We should ensure that we keep + // sending "we're up" statuses right until Reconfig is called with + // zero-valued configs, and after that point only send "stopped" statuses. + stopSpamming() + + // Once stopSpamming returns we are guaranteed to not send any more updates, + // so we can now send the real update (indicating shutdown) and be certain + // it will be received after any fake updates we sent. This is possibly a + // stronger guarantee than we get from the real engine? + b.setWgengineStatus(s, err) + }) + + // time needs to be >= last time for the status to be accepted, send all our + // spam with the same stale time so that when a real update comes in it will + // definitely be accepted. + time := b.lastStatusTime + + // Flood localBackend with a lot of wgengine status updates, so if there are + // any race conditions in the multiple locks/unlocks that happen as we process + // the received auth URL, we will hit them. + go func() { + t.Logf("sending lots of fake wgengine status updates") + for { + select { + case <-stop: + t.Logf("stopping fake wgengine status updates") + return + default: + b.setWgengineStatus(&wgengine.Status{AsOf: time, DERPs: 1}, nil) + } + } + }() + + t.Logf("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + cc.send(nil, url1, false, nil) + + // Don't need to wait on anything else - once .send completes, authURL should + // be set, and once .send has completed, any opportunities for a WG engine + // status update to trample it have ended as well. + if b.authURL == "" { + t.Fatalf("expected authURL to be set") + } +} + +func TestWGEngineDownThenUpRace(t *testing.T) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) + + t.Logf("Start") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.NeedsLogin)}) + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + nw.check() + + t.Logf("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Starting)}) + cc.send(nil, "", true, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }) + nw.check() + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Running)}) + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + nw.check() + + t.Logf("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + var timeLock sync.RWMutex + timestamp := b.lastStatusTime + + engineShutdown := make(chan struct{}) + gotShutdown := sync.OnceFunc(func() { + t.Logf("engineShutdown") + engineShutdown <- struct{}{} + }) + + b.e.SetStatusCallback(func(s *wgengine.Status, err error) { + timeLock.Lock() + if s.AsOf.After(timestamp) { + timestamp = s.AsOf + } + timeLock.Unlock() + + if err != nil || (s.DERPs == 0 && len(s.Peers) == 0) { + gotShutdown() + } else { + b.setWgengineStatus(s, err) + } + }) + + t.Logf("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + + done := make(chan struct{}) + var wg sync.WaitGroup + + wg.Go(func() { + t.Log("cc.send starting") + cc.send(nil, url1, false, nil) // will block until engine stops + t.Log("cc.send returned") + }) + + <-engineShutdown // will get called once cc.send is blocked + gotShutdown = sync.OnceFunc(func() { + t.Logf("engineShutdown") + engineShutdown <- struct{}{} + }) + + wg.Go(func() { + t.Log("StartLoginInteractive starting") + b.StartLoginInteractive(t.Context()) // will also block until engine stops + t.Log("StartLoginInteractive returned") + }) + + <-engineShutdown // will get called once StartLoginInteractive is blocked + + st := controlclient.Status{} + st.SetStateForTest(controlclient.StateAuthenticated) + b.SetControlClientStatus(cc, st) + + timeLock.RLock() + b.setWgengineStatus(&wgengine.Status{AsOf: timestamp}, nil) // engine is down event finally arrives + b.setWgengineStatus(&wgengine.Status{AsOf: timestamp, DERPs: 1}, nil) // engine is back up + timeLock.RUnlock() + + go func() { + wg.Wait() + done <- struct{}{} + }() + + t.Log("waiting for .send and .StartLoginInteractive to return") + + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timed out waiting") + } + + t.Log("both returned") +} + func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( firstAutoUserID = tailcfg.UserID(10000) From e0f222b686ca4e542c6d83075f08a7e34dd69d34 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 6 Oct 2025 15:04:17 -0700 Subject: [PATCH 0526/1093] appc,ipn/ipnlocal: receive AppConnector updates via the event bus (#17411) Add subscribers for AppConnector events Make the RouteAdvertiser interface optional We cannot yet remove it because the tests still depend on it to verify correctness. We will need to separately update the test fixtures to remove that dependency. Publish RouteInfo via the event bus, so we do not need a callback to do that. Replace it with a flag that indicates whether to treat the route info the connector has as "definitive" for filtering purposes. Update the tests to simplify the construction of AppConnector values now that a store callback is no longer required. Also fix a couple of pre-existing racy tests that were hidden by not being concurrent in the same way production is. Updates #15160 Updates #17192 Change-Id: Id39525c0f02184e88feaf0d8a3c05504850e47ee Signed-off-by: M. J. Fromberger --- appc/appconnector.go | 111 +++++++++++------------ appc/appconnector_test.go | 166 ++++++++++++----------------------- ipn/ipnlocal/local.go | 61 ++++++++----- ipn/ipnlocal/local_test.go | 88 +++++++++++++------ ipn/ipnlocal/peerapi_test.go | 79 ++++++++--------- 5 files changed, 238 insertions(+), 267 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 2918840656377..e7b5032f0edc4 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -134,8 +134,9 @@ type AppConnector struct { updatePub *eventbus.Publisher[appctype.RouteUpdate] storePub *eventbus.Publisher[appctype.RouteInfo] - // storeRoutesFunc will be called to persist routes if it is not nil. - storeRoutesFunc func(*appctype.RouteInfo) error + // hasStoredRoutes records whether the connector was initialized with + // persisted route information. + hasStoredRoutes bool // mu guards the fields that follow mu sync.Mutex @@ -168,16 +169,14 @@ type Config struct { EventBus *eventbus.Bus // RouteAdvertiser allows the connector to update the set of advertised routes. - // It must be non-nil. RouteAdvertiser RouteAdvertiser // RouteInfo, if non-nil, use used as the initial set of routes for the // connector. If nil, the connector starts empty. RouteInfo *appctype.RouteInfo - // StoreRoutesFunc, if non-nil, is called when the connector's routes - // change, to allow the routes to be persisted. - StoreRoutesFunc func(*appctype.RouteInfo) error + // HasStoredRoutes indicates that the connector should assume stored routes. + HasStoredRoutes bool } // NewAppConnector creates a new AppConnector. @@ -187,8 +186,6 @@ func NewAppConnector(c Config) *AppConnector { panic("missing logger") case c.EventBus == nil: panic("missing event bus") - case c.RouteAdvertiser == nil: - panic("missing route advertiser") } ec := c.EventBus.Client("appc.AppConnector") @@ -199,7 +196,7 @@ func NewAppConnector(c Config) *AppConnector { updatePub: eventbus.Publish[appctype.RouteUpdate](ec), storePub: eventbus.Publish[appctype.RouteInfo](ec), routeAdvertiser: c.RouteAdvertiser, - storeRoutesFunc: c.StoreRoutesFunc, + hasStoredRoutes: c.HasStoredRoutes, } if c.RouteInfo != nil { ac.domains = c.RouteInfo.Domains @@ -218,13 +215,19 @@ func NewAppConnector(c Config) *AppConnector { // ShouldStoreRoutes returns true if the appconnector was created with the controlknob on // and is storing its discovered routes persistently. -func (e *AppConnector) ShouldStoreRoutes() bool { - return e.storeRoutesFunc != nil -} +func (e *AppConnector) ShouldStoreRoutes() bool { return e.hasStoredRoutes } // storeRoutesLocked takes the current state of the AppConnector and persists it -func (e *AppConnector) storeRoutesLocked() error { +func (e *AppConnector) storeRoutesLocked() { if e.storePub.ShouldPublish() { + // log write rate and write size + numRoutes := int64(len(e.controlRoutes)) + for _, rs := range e.domains { + numRoutes += int64(len(rs)) + } + e.writeRateMinute.update(numRoutes) + e.writeRateDay.update(numRoutes) + e.storePub.Publish(appctype.RouteInfo{ // Clone here, as the subscriber will handle these outside our lock. Control: slices.Clone(e.controlRoutes), @@ -232,24 +235,6 @@ func (e *AppConnector) storeRoutesLocked() error { Wildcards: slices.Clone(e.wildcards), }) } - if !e.ShouldStoreRoutes() { - return nil - } - - // log write rate and write size - numRoutes := int64(len(e.controlRoutes)) - for _, rs := range e.domains { - numRoutes += int64(len(rs)) - } - e.writeRateMinute.update(numRoutes) - e.writeRateDay.update(numRoutes) - - // TODO(creachdair): Remove this once it's delivered over the event bus. - return e.storeRoutesFunc(&appctype.RouteInfo{ - Control: e.controlRoutes, - Domains: e.domains, - Wildcards: e.wildcards, - }) } // ClearRoutes removes all route state from the AppConnector. @@ -259,7 +244,8 @@ func (e *AppConnector) ClearRoutes() error { e.controlRoutes = nil e.domains = nil e.wildcards = nil - return e.storeRoutesLocked() + e.storeRoutesLocked() + return nil } // UpdateDomainsAndRoutes starts an asynchronous update of the configuration @@ -331,9 +317,9 @@ func (e *AppConnector) updateDomains(domains []string) { } } - // Everything left in oldDomains is a domain we're no longer tracking - // and if we are storing route info we can unadvertise the routes - if e.ShouldStoreRoutes() { + // Everything left in oldDomains is a domain we're no longer tracking and we + // can unadvertise the routes. + if e.hasStoredRoutes { toRemove := []netip.Prefix{} for _, addrs := range oldDomains { for _, a := range addrs { @@ -342,11 +328,13 @@ func (e *AppConnector) updateDomains(domains []string) { } if len(toRemove) != 0 { - e.queue.Add(func() { - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) - } - }) + if ra := e.routeAdvertiser; ra != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) + } e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove}) } } @@ -369,11 +357,10 @@ func (e *AppConnector) updateRoutes(routes []netip.Prefix) { var toRemove []netip.Prefix - // If we're storing routes and know e.controlRoutes is a good - // representation of what should be in AdvertisedRoutes we can stop - // advertising routes that used to be in e.controlRoutes but are not - // in routes. - if e.ShouldStoreRoutes() { + // If we know e.controlRoutes is a good representation of what should be in + // AdvertisedRoutes we can stop advertising routes that used to be in + // e.controlRoutes but are not in routes. + if e.hasStoredRoutes { toRemove = routesWithout(e.controlRoutes, routes) } @@ -390,23 +377,23 @@ nextRoute: } } - e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes: %v: %v", routes, err) - } - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes: %v: %v", toRemove, err) - } - }) + if e.routeAdvertiser != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes: %v: %v", routes, err) + } + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes: %v: %v", toRemove, err) + } + }) + } e.updatePub.Publish(appctype.RouteUpdate{ Advertise: routes, Unadvertise: toRemove, }) e.controlRoutes = routes - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() } // Domains returns the currently configured domain list. @@ -485,9 +472,11 @@ func (e *AppConnector) isAddrKnownLocked(domain string, addr netip.Addr) bool { // associated with the given domain. func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) { e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) - return + if e.routeAdvertiser != nil { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) + return + } } e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes}) e.mu.Lock() @@ -503,9 +492,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref e.logf("[v2] advertised route for %v: %v", domain, addr) } } - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() }) } diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 91f0185d0b23d..5c362d6fd1217 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -26,24 +26,15 @@ import ( "tailscale.com/util/slicesx" ) -func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } - func TestUpdateDomains(t *testing.T) { ctx := t.Context() bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: &appctest.RouteCollector{}}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.UpdateDomains([]string{"example.com"}) @@ -76,18 +67,12 @@ func TestUpdateRoutes(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) @@ -149,18 +134,12 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) @@ -190,18 +169,12 @@ func TestDomainRoutes(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -232,18 +205,12 @@ func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) // a has no domains configured, so it should not advertise any routes @@ -346,18 +313,12 @@ func TestWildcardDomains(t *testing.T) { for _, shouldStore := range []bool{false, true} { w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) @@ -522,18 +483,12 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) // nothing has yet been advertised @@ -584,18 +539,12 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -665,18 +614,12 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = NewAppConnector(Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) t.Cleanup(a.Close) assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -842,8 +785,7 @@ func TestUpdateRoutesDeadlock(t *testing.T) { Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, + HasStoredRoutes: true, }) t.Cleanup(a.Close) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c560fdae160cb..bf6fab8ce108e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -592,6 +592,8 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus healthChange = healthChangeSub.Events() } changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + routeUpdateSub := eventbus.Subscribe[appctype.RouteUpdate](ec) + storeRoutesSub := eventbus.Subscribe[appctype.RouteInfo](ec) var portlist <-chan PortlistServices if buildfeatures.HasPortList { @@ -612,10 +614,31 @@ func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus b.onHealthChange(change) case changeDelta := <-changeDeltaSub.Events(): b.linkChange(&changeDelta) + case pl := <-portlist: if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans b.setPortlistServices(pl) } + case ru := <-routeUpdateSub.Events(): + // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under + // one profile to arrive and be applied after a switch to another profile. + // We need to find a way to ensure that changes to the backend state are applied + // consistently in the presnce of profile changes, which currently may not happen in + // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) + } + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) + } + case ri := <-storeRoutesSub.Events(): + // Whether or not routes should be stored can change over time. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if shouldStoreRoutes { + if err := b.storeRouteInfo(ri); err != nil { + b.logf("appc: failed to store route info: %v", err) + } + } } } } @@ -4836,35 +4859,27 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } }() + // App connectors have been disabled. if !prefs.AppConnector().Advertise { b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = nil return } - shouldAppCStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() - if b.appConnector == nil || b.appConnector.ShouldStoreRoutes() != shouldAppCStoreRoutes { - var ri *appctype.RouteInfo - var storeFunc func(*appctype.RouteInfo) error - if shouldAppCStoreRoutes { - var err error - ri, err = b.readRouteInfoLocked() - if err != nil { - ri = &appctype.RouteInfo{} - if err != ipn.ErrStateNotExist { - b.logf("Unsuccessful Read RouteInfo: ", err) - } - } - storeFunc = b.storeRouteInfo + // We don't (yet) have an app connector configured, or the configured + // connector has a different route persistence setting. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if b.appConnector == nil || (shouldStoreRoutes != b.appConnector.ShouldStoreRoutes()) { + ri, err := b.readRouteInfoLocked() + if err != nil && err != ipn.ErrStateNotExist { + b.logf("Unsuccessful Read RouteInfo: %v", err) } - b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = appc.NewAppConnector(appc.Config{ Logf: b.logf, EventBus: b.sys.Bus.Get(), - RouteAdvertiser: b, RouteInfo: ri, - StoreRoutesFunc: storeFunc, + HasStoredRoutes: shouldStoreRoutes, }) } if nm == nil { @@ -7008,9 +7023,9 @@ func (b *LocalBackend) ObserveDNSResponse(res []byte) error { // ErrDisallowedAutoRoute is returned by AdvertiseRoute when a route that is not allowed is requested. var ErrDisallowedAutoRoute = errors.New("route is not allowed") -// AdvertiseRoute implements the appc.RouteAdvertiser interface. It sets a new -// route advertisement if one is not already present in the existing routes. -// If the route is disallowed, ErrDisallowedAutoRoute is returned. +// AdvertiseRoute implements the appctype.RouteAdvertiser interface. It sets a +// new route advertisement if one is not already present in the existing +// routes. If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() var newRoutes []netip.Prefix @@ -7066,8 +7081,8 @@ func coveredRouteRangeNoDefault(finalRoutes []netip.Prefix, ipp netip.Prefix) bo return false } -// UnadvertiseRoute implements the appc.RouteAdvertiser interface. It removes -// a route advertisement if one is present in the existing routes. +// UnadvertiseRoute implements the appctype.RouteAdvertiser interface. It +// removes a route advertisement if one is present in the existing routes. func (b *LocalBackend) UnadvertiseRoute(toRemove ...netip.Prefix) error { currentRoutes := b.Prefs().AdvertiseRoutes().AsSlice() finalRoutes := currentRoutes[:0] @@ -7095,7 +7110,7 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" -func (b *LocalBackend) storeRouteInfo(ri *appctype.RouteInfo) error { +func (b *LocalBackend) storeRouteInfo(ri appctype.RouteInfo) error { if !buildfeatures.HasAppConnectors { return feature.ErrUnavailable } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bc8bd2a67cff0..168f76268afb7 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -75,8 +75,6 @@ import ( "tailscale.com/wgengine/wgcfg" ) -func fakeStoreRoutes(*appctype.RouteInfo) error { return nil } - func inRemove(ip netip.Addr) bool { for _, pfx := range removeFromDefaultRoute { if pfx.Contains(ip) { @@ -2321,14 +2319,9 @@ func TestOfferingAppConnector(t *testing.T) { if b.OfferingAppConnector() { t.Fatal("unexpected offering app connector") } - rc := &appctest.RouteCollector{} - if shouldStore { - b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc, RouteInfo: &appctype.RouteInfo{}, StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, EventBus: bus, HasStoredRoutes: shouldStore, + }) if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") } @@ -2379,6 +2372,7 @@ func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) bus := b.sys.Bus.Get() + w := eventbustest.NewWatcher(t, bus) // ensure no error when no app connector is configured if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -2386,28 +2380,30 @@ func TestObserveDNSResponse(t *testing.T) { } rc := &appctest.RouteCollector{} - if shouldStore { - b.appConnector = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: bus, - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - b.appConnector = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: bus, RouteAdvertiser: rc}) - } - b.appConnector.UpdateDomains([]string{"example.com"}) - b.appConnector.Wait(context.Background()) + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + a.UpdateDomains([]string{"example.com"}) + a.Wait(t.Context()) + b.appConnector = a if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - b.appConnector.Wait(context.Background()) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Fatalf("got routes %v, want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -2558,7 +2554,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appctype.RouteInfo{ + if err := b.storeRouteInfo(appctype.RouteInfo{ Domains: map[string][]netip.Addr{ "example.com": {ip}, }, @@ -5511,10 +5507,10 @@ func TestReadWriteRouteInfo(t *testing.T) { b.pm.currentProfile = prof1.View() // set up routeInfo - ri1 := &appctype.RouteInfo{} + ri1 := appctype.RouteInfo{} ri1.Wildcards = []string{"1"} - ri2 := &appctype.RouteInfo{} + ri2 := appctype.RouteInfo{} ri2.Wildcards = []string{"2"} // read before write @@ -7066,3 +7062,41 @@ func toStrings[T ~string](in []T) []string { } return out } + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +func mustPrefix(ss ...string) (out []netip.Prefix) { + for _, s := range ss { + out = append(out, netip.MustParsePrefix(s)) + } + return +} + +// eqUpdate generates an eventbus test filter that matches an appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +// +// TODO(creachadair): This is copied from the appc test package, but we can't +// put it into the appctest package because the appc tests depend on it and +// that makes a cycle. Clean up those tests and put this somewhere common. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want)); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } +} diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index a16d55b8c2072..7c2e677a4f2f4 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -256,22 +256,12 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: &appctest.RouteCollector{}, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: &appctest.RouteCollector{}, - }) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -329,11 +319,11 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { for _, shouldStore := range []bool{false, true} { - ctx := context.Background() var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) rc := &appctest.RouteCollector{} ht := health.NewTracker(sys.Bus.Get()) @@ -341,18 +331,13 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -362,7 +347,7 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.AResource( @@ -392,12 +377,18 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -408,24 +399,20 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(appc.Config{ - Logf: t.Logf, - EventBus: sys.Bus.Get(), - RouteAdvertiser: rc, - RouteInfo: &appctype.RouteInfo{}, - StoreRoutesFunc: fakeStoreRoutes, - }) - } else { - a = appc.NewAppConnector(appc.Config{Logf: t.Logf, EventBus: sys.Bus.Get(), RouteAdvertiser: rc}) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -482,6 +469,12 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } From 192f8d28042d69634ab17e2a7f9bab0fc5c13688 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 6 Oct 2025 15:43:42 -0700 Subject: [PATCH 0527/1093] wgengine/magicsock: add more handleNewServerEndpointRunLoop tests (#17469) Updates tailscale/corp#32978 Signed-off-by: Jordan Whited --- wgengine/magicsock/relaymanager_test.go | 195 ++++++++++++++++++++---- 1 file changed, 166 insertions(+), 29 deletions(-) diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index 6ae21b8fbfe85..d400818394c47 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -80,40 +80,177 @@ func TestRelayManagerGetServers(t *testing.T) { } } -// Test for http://go/corp/32978 func TestRelayManager_handleNewServerEndpointRunLoop(t *testing.T) { - rm := relayManager{} - rm.init() - <-rm.runLoopStoppedCh // prevent runLoop() from starting, we will inject/handle events in the test - ep := &endpoint{} + wantHandshakeWorkCount := func(t *testing.T, rm *relayManager, n int) { + t.Helper() + byServerDiscoByEndpoint := 0 + for _, v := range rm.handshakeWorkByServerDiscoByEndpoint { + byServerDiscoByEndpoint += len(v) + } + byServerDiscoVNI := len(rm.handshakeWorkByServerDiscoVNI) + if byServerDiscoByEndpoint != n || + byServerDiscoVNI != n || + byServerDiscoByEndpoint != byServerDiscoVNI { + t.Fatalf("want handshake work count %d byServerDiscoByEndpoint=%d byServerDiscoVNI=%d", + n, + byServerDiscoByEndpoint, + byServerDiscoVNI, + ) + } + } + conn := newConn(t.Logf) - ep.c = conn - serverDisco := key.NewDisco().Public() - rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ - wlb: endpointWithLastBest{ - ep: ep, + epA := &endpoint{c: conn} + epB := &endpoint{c: conn} + serverDiscoA := key.NewDisco().Public() + serverDiscoB := key.NewDisco().Public() + + serverAendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport1VNI1LastBestMatching := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA, lastBestIsTrusted: true, lastBest: addrQuality{relayServerDisco: serverDiscoA}}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport2VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 1}, + } + serverAendpointALamport2VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 2}, + } + serverAendpointBLamport1VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epB}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 2}, + } + serverBendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoB, LamportID: 1, VNI: 1}, + } + + tests := []struct { + name string + events []newRelayServerEndpointEvent + want []newRelayServerEndpointEvent + }{ + { + // Test for http://go/corp/32978 + name: "eq server+ep neq VNI higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+ep neq VNI lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointBLamport1VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointBLamport1VNI2, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+endpoint+vni higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, }, - se: udprelay.ServerEndpoint{ - ServerDisco: serverDisco, - LamportID: 1, - VNI: 1, + { + name: "eq server+endpoint+vni lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, }, - }) - rm.handleNewServerEndpointRunLoop(newRelayServerEndpointEvent{ - wlb: endpointWithLastBest{ - ep: ep, + { + name: "eq endpoint+vni+lamport neq server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, }, - se: udprelay.ServerEndpoint{ - ServerDisco: serverDisco, - LamportID: 2, - VNI: 2, + { + name: "trusted last best with matching server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1LastBestMatching, + }, + want: []newRelayServerEndpointEvent{}, }, - }) - rm.stopWorkRunLoop(ep) - if len(rm.handshakeWorkByServerDiscoByEndpoint) != 0 || - len(rm.handshakeWorkByServerDiscoVNI) != 0 || - len(rm.handshakeWorkAwaitingPong) != 0 || - len(rm.addrPortVNIToHandshakeWork) != 0 { - t.Fatal("stranded relayHandshakeWork state") + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rm := &relayManager{} + rm.init() + <-rm.runLoopStoppedCh // prevent runLoop() from starting + + // feed events + for _, event := range tt.events { + rm.handleNewServerEndpointRunLoop(event) + } + + // validate state + wantHandshakeWorkCount(t, rm, len(tt.want)) + for _, want := range tt.want { + byServerDisco, ok := rm.handshakeWorkByServerDiscoByEndpoint[want.wlb.ep] + if !ok { + t.Fatal("work not found by endpoint") + } + workByServerDiscoByEndpoint, ok := byServerDisco[want.se.ServerDisco] + if !ok { + t.Fatal("work not found by server disco by endpoint") + } + workByServerDiscoVNI, ok := rm.handshakeWorkByServerDiscoVNI[serverDiscoVNI{want.se.ServerDisco, want.se.VNI}] + if !ok { + t.Fatal("work not found by server disco + VNI") + } + if workByServerDiscoByEndpoint != workByServerDiscoVNI { + t.Fatal("workByServerDiscoByEndpoint != workByServerDiscoVNI") + } + } + + // cleanup + for _, event := range tt.events { + rm.stopWorkRunLoop(event.wlb.ep) + } + wantHandshakeWorkCount(t, rm, 0) + }) } } From 059f53e67a3fbee151da70638a517ed4d511a749 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 13:10:58 -0700 Subject: [PATCH 0528/1093] feature/condlite/expvar: add expvar stub package when metrics not needed Saves ~53 KB from the min build. Updates #12614 Change-Id: I73f9544a9feea06027c6ebdd222d712ada851299 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware-min.txt | 8 ++++---- cmd/tailscaled/depaware-minbox.txt | 8 ++++---- cmd/tailscaled/depaware.txt | 1 + cmd/tailscaled/deps_test.go | 1 + cmd/tsidp/depaware.txt | 1 + feature/condlite/expvar/expvar.go | 12 ++++++++++++ feature/condlite/expvar/omit.go | 11 +++++++++++ tsnet/depaware.txt | 1 + wgengine/magicsock/magicsock.go | 3 +-- 10 files changed, 37 insertions(+), 10 deletions(-) create mode 100644 feature/condlite/expvar/expvar.go create mode 100644 feature/condlite/expvar/omit.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2c4cd9e85c1b5..e0678267476b2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -701,6 +701,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index ed7ddee2a0ded..2cf0f156180ce 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -54,6 +54,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal tailscale.com/feature from tailscale.com/cmd/tailscaled+ tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister @@ -315,10 +316,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ encoding/hex from crypto/x509+ - encoding/json from expvar+ + encoding/json from github.com/gaissmai/bart+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ @@ -369,7 +369,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de io from bufio+ io/fs from crypto/x509+ iter from bytes+ - log from expvar+ + log from github.com/klauspost/compress/zstd+ log/internal from log maps from crypto/x509+ math from compress/flate+ @@ -381,7 +381,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ - net/http from expvar+ + net/http from tailscale.com/cmd/tailscaled+ net/http/httptrace from net/http+ net/http/internal from net/http net/http/internal/ascii from net/http diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 93a884c1ec2ee..483a32c712724 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -74,6 +74,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal tailscale.com/feature from tailscale.com/cmd/tailscaled+ tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ @@ -345,10 +346,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/base64 from encoding/json+ encoding/binary from compress/gzip+ encoding/hex from crypto/x509+ - encoding/json from expvar+ + encoding/json from github.com/gaissmai/bart+ encoding/pem from crypto/tls+ errors from bufio+ - expvar from tailscale.com/wgengine/magicsock flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ @@ -404,7 +404,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de io/fs from crypto/x509+ io/ioutil from github.com/skip2/go-qrcode iter from bytes+ - log from expvar+ + log from github.com/klauspost/compress/zstd+ log/internal from log maps from crypto/x509+ math from compress/flate+ @@ -416,7 +416,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ - net/http from expvar+ + net/http from net/http/httputil+ net/http/httptrace from net/http+ net/http/httputil from tailscale.com/cmd/tailscale/cli net/http/internal from net/http+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7ef5c2ede1b5d..d58cebec2b4a0 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -278,6 +278,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/c2n from tailscale.com/feature/condregister tailscale.com/feature/capture from tailscale.com/feature/condregister tailscale.com/feature/clientupdate from tailscale.com/feature/condregister + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a66706db29a80..3c3115f4210ad 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -282,6 +282,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { }, BadDeps: map[string]string{ "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", }, }.Check(t) } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index fb7c59ebcca92..ba7bc46cd1a9f 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -143,6 +143,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet diff --git a/feature/condlite/expvar/expvar.go b/feature/condlite/expvar/expvar.go new file mode 100644 index 0000000000000..edc16ac771b13 --- /dev/null +++ b/feature/condlite/expvar/expvar.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics) + +// Package expvar contains type aliases for expvar types, to allow conditionally +// excluding the package from builds. +package expvar + +import "expvar" + +type Int = expvar.Int diff --git a/feature/condlite/expvar/omit.go b/feature/condlite/expvar/omit.go new file mode 100644 index 0000000000000..a21d94deb48eb --- /dev/null +++ b/feature/condlite/expvar/omit.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics + +// excluding the package from builds. +package expvar + +type Int int64 + +func (*Int) Add(int64) {} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4c3d8018fbf5b..e6e986f9222e5 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -139,6 +139,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/feature from tailscale.com/ipn/ipnext+ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 81ca49d3d6fb3..112085053bc00 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -11,7 +11,6 @@ import ( "context" "encoding/binary" "errors" - "expvar" "fmt" "io" "net" @@ -29,11 +28,11 @@ import ( "github.com/tailscale/wireguard-go/device" "go4.org/mem" "golang.org/x/net/ipv6" - "tailscale.com/control/controlknobs" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/feature/buildfeatures" + "tailscale.com/feature/condlite/expvar" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" From 0415a56b6c91435eeeef83cc2d6bea91990ac861 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 6 Oct 2025 20:59:47 -0700 Subject: [PATCH 0529/1093] ipn/ipnlocal: fix another racy test (#17472) Some of the test cases access fields of the backend that are supposed to be locked while the test is running, which can trigger the race detector. I fixed a few of these in #17411, but I missed these two cases. Updates #15160 Updates #17192 Change-Id: I45664d5e34320ecdccd2844e0f8b228145aaf603 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/peerapi_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 7c2e677a4f2f4..3c9f57f1fcf6a 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -422,7 +422,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"www.example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( @@ -463,7 +463,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { From 10cb59fa879b1e21daf30f8809efe774a27418fa Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 16:55:18 -0700 Subject: [PATCH 0530/1093] build_dist.sh: keep --extra-small making a usable build, add --min Historically, and until recently, --extra-small produced a usable build. When I recently made osrouter be modular in 39e35379d41fc788 (which is useful in, say, tsnet builds) after also making netstack modular, that meant --min now lacked both netstack support for routing and system support for routing, making no way to get packets into wireguard. That's not a nice default to users. (we've documented build_dist.sh in our KB) Restore --extra-small to making a usable build, and add --min for benchmarking purposes. Updates #12614 Change-Id: I649e41e324a36a0ca94953229c9914046b5dc497 Signed-off-by: Brad Fitzpatrick --- build_dist.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/build_dist.sh b/build_dist.sh index 564e30221db1c..c05644711cfa3 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -41,6 +41,14 @@ while [ "$#" -gt 1 ]; do fi shift ldflags="$ldflags -w -s" + tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min --add=osrouter)" + ;; + --min) + # --min is like --extra-small but even smaller, removing all features, + # even if it results in a useless binary (e.g. removing both netstack + + # osrouter). It exists for benchmarking purposes only. + shift + ldflags="$ldflags -w -s" tags="${tags:+$tags,},$(GOOS= GOARCH= $go run ./cmd/featuretags --min)" ;; --box) From 28b1b4c3c19225dcda6e44fda964c96a9fe1f9b2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 16:48:41 -0700 Subject: [PATCH 0531/1093] cmd/tailscaled: guard some flag work with buildfeatures checks Updates #12614 Change-Id: Iec6f15d33a6500e7b0b7e8f5c098f7c00334460f Signed-off-by: Brad Fitzpatrick --- cmd/tailscaled/tailscaled.go | 64 ++++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 62df4067d0a24..a46457face6df 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -79,13 +79,11 @@ func defaultTunName() string { case "aix", "solaris", "illumos": return "userspace-networking" case "linux": - switch distro.Get() { - case distro.Synology: + if buildfeatures.HasSynology && buildfeatures.HasNetstack && distro.Get() == distro.Synology { // Try TUN, but fall back to userspace networking if needed. // See https://github.com/tailscale/tailscale-synology/issues/35 return "tailscale0,userspace-networking" } - } return "tailscale0" } @@ -195,10 +193,14 @@ func main() { flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) - flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) + if buildfeatures.HasTPM { + flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) + } flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") - flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + if buildfeatures.HasBird { + flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + } flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") @@ -252,7 +254,7 @@ func main() { log.Fatalf("--socket is required") } - if args.birdSocketPath != "" && createBIRDClient == nil { + if buildfeatures.HasBird && args.birdSocketPath != "" && createBIRDClient == nil { log.SetFlags(0) log.Fatalf("--bird-socket is not supported on %s", runtime.GOOS) } @@ -273,28 +275,30 @@ func main() { } } - if !args.encryptState.set { - args.encryptState.v = defaultEncryptState() - } - if args.encryptState.v { - if runtime.GOOS != "linux" && runtime.GOOS != "windows" { - log.SetFlags(0) - log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) + if buildfeatures.HasTPM { + if !args.encryptState.set { + args.encryptState.v = defaultEncryptState() } - // Check if we have TPM support in this build. - if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported in this build of tailscaled") - } - // Check if we have TPM access. - if !hostinfo.New().TPM.Present() { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") - } - // Check for conflicting prefix in --state, like arn: or kube:. - if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { - log.SetFlags(0) - log.Fatal("--encrypt-state can only be used with --state set to a local file path") + if args.encryptState.v { + if runtime.GOOS != "linux" && runtime.GOOS != "windows" { + log.SetFlags(0) + log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM support in this build. + if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported in this build of tailscaled") + } + // Check if we have TPM access. + if !hostinfo.New().TPM.Present() { + log.SetFlags(0) + log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + log.SetFlags(0) + log.Fatal("--encrypt-state can only be used with --state set to a local file path") + } } } @@ -308,8 +312,10 @@ func main() { err := run() - // Remove file sharing from Windows shell (noop in non-windows) - osshare.SetFileSharingEnabled(false, logger.Discard) + if buildfeatures.HasTaildrop { + // Remove file sharing from Windows shell (noop in non-windows) + osshare.SetFileSharingEnabled(false, logger.Discard) + } if err != nil { log.Fatal(err) From 316afe7d02babc24001b23ccfefd28eaa26adb7c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 4 Oct 2025 17:40:09 -0700 Subject: [PATCH 0532/1093] util/checkchange: stop using deephash everywhere Saves 45 KB from the min build, no longer pulling in deephash or util/hashx, both with unsafe code. It can actually be more efficient to not use deephash, as you don't have to walk all bytes of all fields recursively to answer that two things are not equal. Instead, you can just return false at the first difference you see. And then with views (as we use ~everywhere nowadays), the cloning the old value isn't expensive, as it's just a pointer under the hood. Updates #12614 Change-Id: I7b08616b8a09b3ade454bb5e0ac5672086fe8aec Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware-min.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 3 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tailscaled/deps_test.go | 4 ++ cmd/tsidp/depaware.txt | 3 +- ipn/ipnlocal/local.go | 74 +++++++++++++++++------- net/dns/config.go | 21 +++++++ tailcfg/tailcfg.go | 2 +- tailcfg/tailcfg_clone.go | 37 +++++++++++- tailcfg/tailcfg_view.go | 93 +++++++++++++++++++++++++++++- tsnet/depaware.txt | 3 +- util/checkchange/checkchange.go | 25 ++++++++ wgengine/router/router.go | 13 +++++ wgengine/userspace.go | 56 ++++++++++++------ wgengine/wgcfg/config.go | 33 +++++++++++ wgengine/wgcfg/config_test.go | 41 +++++++++++++ 17 files changed, 367 insertions(+), 50 deletions(-) create mode 100644 util/checkchange/checkchange.go create mode 100644 wgengine/wgcfg/config_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index e0678267476b2..d1a63a188091c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -825,12 +825,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 2cf0f156180ce..1ef3568d1d1fe 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -144,17 +144,16 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/tkatype from tailscale.com/control/controlclient+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/control/controlclient+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth - 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 483a32c712724..a7f5d2e0edab6 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -170,18 +170,17 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/tkatype from tailscale.com/control/controlclient+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth - 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ tailscale.com/util/mak from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d58cebec2b4a0..541e9f3fc1972 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -412,12 +412,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 3c3115f4210ad..0711bafba729e 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -244,6 +244,8 @@ func TestMinTailscaledNoCLI(t *testing.T) { "internal/socks", "github.com/tailscale/peercred", "tailscale.com/types/netlogtype", + "deephash", + "util/hashx", } deptest.DepChecker{ GOOS: "linux", @@ -268,6 +270,8 @@ func TestMinTailscaledWithCLI(t *testing.T) { "tailscale.com/metrics", "tailscale.com/tsweb/varz", "dirwalk", + "deephash", + "util/hashx", } deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index ba7bc46cd1a9f..eb20869474689 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -252,12 +252,13 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bf6fab8ce108e..c8b49de75bff0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -83,8 +83,8 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" @@ -262,13 +262,13 @@ type LocalBackend struct { // of [LocalBackend]'s own state that is not tied to the node context. currentNodeAtomic atomic.Pointer[nodeBackend] - conf *conffile.Config // latest parsed config, or nil if not in declarative mode - pm *profileManager // mu guards access - filterHash deephash.Sum // TODO(nickkhyl): move to nodeBackend - httpTestClient *http.Client // for controlclient. nil by default, used by tests. - ccGen clientGen // function for producing controlclient; lazily populated - sshServer SSHServer // or nil, initialized lazily. - appConnector *appc.AppConnector // or nil, initialized when configured. + conf *conffile.Config // latest parsed config, or nil if not in declarative mode + pm *profileManager // mu guards access + lastFilterInputs *filterInputs + httpTestClient *http.Client // for controlclient. nil by default, used by tests. + ccGen clientGen // function for producing controlclient; lazily populated + sshServer SSHServer // or nil, initialized lazily. + appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. notifyCancel context.CancelFunc cc controlclient.Client // TODO(nickkhyl): move to nodeBackend @@ -2626,6 +2626,36 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("The coordination server sent an invalid packet filter permitting traffic to unlocked nodes; rejecting all packets for safety"), }) +// filterInputs holds the inputs to the packet filter. +// +// Any field changes or additions here should be accompanied by a change to +// [filterInputs.Equal] and [filterInputs.Clone] if necessary. (e.g. non-view +// and non-value fields) +type filterInputs struct { + HaveNetmap bool + Addrs views.Slice[netip.Prefix] + FilterMatch views.Slice[filter.Match] + LocalNets views.Slice[netipx.IPRange] + LogNets views.Slice[netipx.IPRange] + ShieldsUp bool + SSHPolicy tailcfg.SSHPolicyView +} + +func (fi *filterInputs) Equal(o *filterInputs) bool { + if fi == nil || o == nil { + return fi == o + } + return reflect.DeepEqual(fi, o) +} + +func (fi *filterInputs) Clone() *filterInputs { + if fi == nil { + return nil + } + v := *fi // all fields are shallow copyable + return &v +} + // updateFilterLocked updates the packet filter in wgengine based on the // given netMap and user preferences. // @@ -2722,20 +2752,20 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } localNets, _ := localNetsB.IPSet() logNets, _ := logNetsB.IPSet() - var sshPol tailcfg.SSHPolicy - if haveNetmap && netMap.SSHPolicy != nil { - sshPol = *netMap.SSHPolicy - } - - changed := deephash.Update(&b.filterHash, &struct { - HaveNetmap bool - Addrs views.Slice[netip.Prefix] - FilterMatch []filter.Match - LocalNets []netipx.IPRange - LogNets []netipx.IPRange - ShieldsUp bool - SSHPolicy tailcfg.SSHPolicy - }{haveNetmap, addrs, packetFilter, localNets.Ranges(), logNets.Ranges(), shieldsUp, sshPol}) + var sshPol tailcfg.SSHPolicyView + if buildfeatures.HasSSH && haveNetmap && netMap.SSHPolicy != nil { + sshPol = netMap.SSHPolicy.View() + } + + changed := checkchange.Update(&b.lastFilterInputs, &filterInputs{ + HaveNetmap: haveNetmap, + Addrs: addrs, + FilterMatch: views.SliceOf(packetFilter), + LocalNets: views.SliceOf(localNets.Ranges()), + LogNets: views.SliceOf(logNets.Ranges()), + ShieldsUp: shieldsUp, + SSHPolicy: sshPol, + }) if !changed { return } diff --git a/net/dns/config.go b/net/dns/config.go index b2c7c428593ff..22caf6ef54909 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -8,6 +8,7 @@ import ( "bufio" "fmt" "net/netip" + "reflect" "slices" "sort" @@ -188,3 +189,23 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { } return true } + +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + return &Config{ + DefaultResolvers: slices.Clone(c.DefaultResolvers), + Routes: make(map[dnsname.FQDN][]*dnstype.Resolver, len(c.Routes)), + SearchDomains: slices.Clone(c.SearchDomains), + Hosts: make(map[dnsname.FQDN][]netip.Addr, len(c.Hosts)), + OnlyIPv6: c.OnlyIPv6, + } +} + +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return reflect.DeepEqual(c, o) +} diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 7484c74664948..3edc9aef0254e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -5,7 +5,7 @@ // the node and the coordination server. package tailcfg -//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService --clonefunc +//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy --clonefunc import ( "bytes" diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 95f8905b84e69..9aa7673886bc6 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -651,9 +651,35 @@ var _VIPServiceCloneNeedsRegeneration = VIPService(struct { Active bool }{}) +// Clone makes a deep copy of SSHPolicy. +// The result aliases no memory with the original. +func (src *SSHPolicy) Clone() *SSHPolicy { + if src == nil { + return nil + } + dst := new(SSHPolicy) + *dst = *src + if src.Rules != nil { + dst.Rules = make([]*SSHRule, len(src.Rules)) + for i := range dst.Rules { + if src.Rules[i] == nil { + dst.Rules[i] = nil + } else { + dst.Rules[i] = src.Rules[i].Clone() + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyCloneNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService. +// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy. func Clone(dst, src any) bool { switch src := src.(type) { case *User: @@ -836,6 +862,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *SSHPolicy: + switch dst := dst.(type) { + case *SSHPolicy: + *dst = *src.Clone() + return true + case **SSHPolicy: + *dst = src.Clone() + return true + } } return false } diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index e44d0bbef326b..88dd90096ab55 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -21,7 +21,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy // View returns a read-only view of User. func (p *User) View() UserView { @@ -2604,3 +2604,94 @@ var _VIPServiceViewNeedsRegeneration = VIPService(struct { Ports []ProtoPortRange Active bool }{}) + +// View returns a read-only view of SSHPolicy. +func (p *SSHPolicy) View() SSHPolicyView { + return SSHPolicyView{ж: p} +} + +// SSHPolicyView provides a read-only view over SSHPolicy. +// +// Its methods should only be called if `Valid()` returns true. +type SSHPolicyView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *SSHPolicy +} + +// Valid reports whether v's underlying value is non-nil. +func (v SSHPolicyView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v SSHPolicyView) AsStruct() *SSHPolicy { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPolicyView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPolicyView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *SSHPolicyView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x SSHPolicy + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPolicyView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHPolicy + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// Rules are the rules to process for an incoming SSH connection. The first +// matching rule takes its action and stops processing further rules. +// +// When an incoming connection first starts, all rules are evaluated in +// "none" auth mode, where the client hasn't even been asked to send a +// public key. All SSHRule.Principals requiring a public key won't match. If +// a rule matches on the first pass and its Action is reject, the +// authentication fails with that action's rejection message, if any. +// +// If the first pass rule evaluation matches nothing without matching an +// Action with Reject set, the rules are considered to see whether public +// keys might still result in a match. If not, "none" auth is terminated +// before proceeding to public key mode. If so, the client is asked to try +// public key authentication and the rules are evaluated again for each of +// the client's present keys. +func (v SSHPolicyView) Rules() views.SliceView[*SSHRule, SSHRuleView] { + return views.SliceOfViews[*SSHRule, SSHRuleView](v.ж.Rules) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyViewNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index e6e986f9222e5..9dd8f0d656c6c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -247,12 +247,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go new file mode 100644 index 0000000000000..4d18730f16e0f --- /dev/null +++ b/util/checkchange/checkchange.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package checkchange defines a utility for determining whether a value +// has changed since the last time it was checked. +package checkchange + +// EqualCloner is an interface for types that can be compared for equality +// and can be cloned. +type EqualCloner[T any] interface { + Equal(T) bool + Clone() T +} + +// Update sets *old to a clone of new if they are not equal, returning whether +// they were different. +// +// It only modifies *old if they are different. old must be non-nil. +func Update[T EqualCloner[T]](old *T, new T) (changed bool) { + if new.Equal(*old) { + return false + } + *old = new.Clone() + return true +} diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 7723138f4b587..df65e697d10c5 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -11,6 +11,7 @@ import ( "net/netip" "reflect" "runtime" + "slices" "github.com/tailscale/wireguard-go/tun" "tailscale.com/feature" @@ -146,3 +147,15 @@ func (a *Config) Equal(b *Config) bool { } return reflect.DeepEqual(a, b) } + +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + c2 := *c + c2.LocalAddrs = slices.Clone(c.LocalAddrs) + c2.Routes = slices.Clone(c.Routes) + c2.LocalRoutes = slices.Clone(c.LocalRoutes) + c2.SubnetRoutes = slices.Clone(c.SubnetRoutes) + return &c2 +} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index c88ab78a1334a..e971f0e39e1a7 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -10,8 +10,10 @@ import ( "errors" "fmt" "io" + "maps" "math" "net/netip" + "reflect" "runtime" "slices" "strings" @@ -45,8 +47,8 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" @@ -128,9 +130,9 @@ type userspaceEngine struct { wgLock sync.Mutex // serializes all wgdev operations; see lock order comment below lastCfgFull wgcfg.Config lastNMinPeers int - lastRouterSig deephash.Sum // of router.Config - lastEngineSigFull deephash.Sum // of full wireguard config - lastEngineSigTrim deephash.Sum // of trimmed wireguard config + lastRouter *router.Config + lastEngineFull *wgcfg.Config // of full wireguard config, not trimmed + lastEngineInputs *maybeReconfigInputs lastDNSConfig *dns.Config lastIsSubnetRouter bool // was the node a primary subnet router in the last run. recvActivityAt map[key.NodePublic]mono.Time @@ -725,6 +727,29 @@ func (e *userspaceEngine) isActiveSinceLocked(nk key.NodePublic, ip netip.Addr, return timePtr.LoadAtomic().After(t) } +// maybeReconfigInputs holds the inputs to the maybeReconfigWireguardLocked +// function. If these things don't change between calls, there's nothing to do. +type maybeReconfigInputs struct { + WGConfig *wgcfg.Config + TrimmedNodes map[key.NodePublic]bool + TrackNodes views.Slice[key.NodePublic] + TrackIPs views.Slice[netip.Addr] +} + +func (i *maybeReconfigInputs) Equal(o *maybeReconfigInputs) bool { + return reflect.DeepEqual(i, o) +} + +func (i *maybeReconfigInputs) Clone() *maybeReconfigInputs { + if i == nil { + return nil + } + v := *i + v.WGConfig = i.WGConfig.Clone() + v.TrimmedNodes = maps.Clone(i.TrimmedNodes) + return &v +} + // discoChanged are the set of peers whose disco keys have changed, implying they've restarted. // If a peer is in this set and was previously in the live wireguard config, // it needs to be first removed and then re-added to flush out its wireguard session key. @@ -803,12 +828,12 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node } e.lastNMinPeers = len(min.Peers) - if changed := deephash.Update(&e.lastEngineSigTrim, &struct { - WGConfig *wgcfg.Config - TrimmedNodes map[key.NodePublic]bool - TrackNodes []key.NodePublic - TrackIPs []netip.Addr - }{&min, e.trimmedNodes, trackNodes, trackIPs}); !changed { + if changed := checkchange.Update(&e.lastEngineInputs, &maybeReconfigInputs{ + WGConfig: &min, + TrimmedNodes: e.trimmedNodes, + TrackNodes: views.SliceOf(trackNodes), + TrackIPs: views.SliceOf(trackIPs), + }); !changed { return nil } @@ -937,7 +962,6 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, e.wgLock.Lock() defer e.wgLock.Unlock() e.tundev.SetWGConfig(cfg) - e.lastDNSConfig = dnsCfg peerSet := make(set.Set[key.NodePublic], len(cfg.Peers)) e.mu.Lock() @@ -965,14 +989,12 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, } isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter - engineChanged := deephash.Update(&e.lastEngineSigFull, cfg) - routerChanged := deephash.Update(&e.lastRouterSig, &struct { - RouterConfig *router.Config - DNSConfig *dns.Config - }{routerCfg, dnsCfg}) + engineChanged := checkchange.Update(&e.lastEngineFull, cfg) + dnsChanged := checkchange.Update(&e.lastDNSConfig, dnsCfg) + routerChanged := checkchange.Update(&e.lastRouter, routerCfg) listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() - if !engineChanged && !routerChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { + if !engineChanged && !routerChanged && !dnsChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { return ErrNoChanges } newLogIDs := cfg.NetworkLogging diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 154dc0a304773..926964a4bdc20 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -6,6 +6,7 @@ package wgcfg import ( "net/netip" + "slices" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -35,6 +36,20 @@ type Config struct { } } +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return c.Name == o.Name && + c.NodeID == o.NodeID && + c.PrivateKey.Equal(o.PrivateKey) && + c.MTU == o.MTU && + c.NetworkLogging == o.NetworkLogging && + slices.Equal(c.Addresses, o.Addresses) && + slices.Equal(c.DNS, o.DNS) && + slices.EqualFunc(c.Peers, o.Peers, Peer.Equal) +} + type Peer struct { PublicKey key.NodePublic DiscoKey key.DiscoPublic // present only so we can handle restarts within wgengine, not passed to WireGuard @@ -50,6 +65,24 @@ type Peer struct { WGEndpoint key.NodePublic } +func addrPtrEq(a, b *netip.Addr) bool { + if a == nil || b == nil { + return a == b + } + return *a == *b +} + +func (p Peer) Equal(o Peer) bool { + return p.PublicKey == o.PublicKey && + p.DiscoKey == o.DiscoKey && + slices.Equal(p.AllowedIPs, o.AllowedIPs) && + p.IsJailed == o.IsJailed && + p.PersistentKeepalive == o.PersistentKeepalive && + addrPtrEq(p.V4MasqAddr, o.V4MasqAddr) && + addrPtrEq(p.V6MasqAddr, o.V6MasqAddr) && + p.WGEndpoint == o.WGEndpoint +} + // PeerWithKey returns the Peer with key k and reports whether it was found. func (config Config) PeerWithKey(k key.NodePublic) (Peer, bool) { for _, p := range config.Peers { diff --git a/wgengine/wgcfg/config_test.go b/wgengine/wgcfg/config_test.go new file mode 100644 index 0000000000000..5ac3b7cd56376 --- /dev/null +++ b/wgengine/wgcfg/config_test.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package wgcfg + +import ( + "reflect" + "testing" +) + +// Tests that [Config.Equal] tests all fields of [Config], even ones +// that might get added in the future. +func TestConfigEqual(t *testing.T) { + rt := reflect.TypeFor[Config]() + for i := range rt.NumField() { + sf := rt.Field(i) + switch sf.Name { + case "Name", "NodeID", "PrivateKey", "MTU", "Addresses", "DNS", "Peers", + "NetworkLogging": + // These are compared in [Config.Equal]. + default: + t.Errorf("Have you added field %q to Config.Equal? Do so if not, and then update TestConfigEqual", sf.Name) + } + } +} + +// Tests that [Peer.Equal] tests all fields of [Peer], even ones +// that might get added in the future. +func TestPeerEqual(t *testing.T) { + rt := reflect.TypeFor[Peer]() + for i := range rt.NumField() { + sf := rt.Field(i) + switch sf.Name { + case "PublicKey", "DiscoKey", "AllowedIPs", "IsJailed", + "PersistentKeepalive", "V4MasqAddr", "V6MasqAddr", "WGEndpoint": + // These are compared in [Peer.Equal]. + default: + t.Errorf("Have you added field %q to Peer.Equal? Do so if not, and then update TestPeerEqual", sf.Name) + } + } +} From eabc62a9ddc45646bf55f20928832b6c4e4ad2d8 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Tue, 7 Oct 2025 11:52:41 +0100 Subject: [PATCH 0533/1093] ipn/ipnlocal: don't send LoginFinished unless auth was in progress (#17266) Before we introduced seamless, the "blocked" state was used to track: * Whether a login was required for connectivity, and therefore we should keep the engine deconfigured until that happened * Whether authentication was in progress "blocked" would stop authReconfig from running. We want this when a login is required: if your key has expired we want to deconfigure the engine and keep it down, so that you don't keep using exit nodes (which won't work because your key has expired). Taking the engine down while auth was in progress was undesirable, so we don't do that with seamless renewal. However, not entering the "blocked" state meant that we needed to change the logic for when to send LoginFinished on the IPN bus after seeing StateAuthenticated from the controlclient. Initially we changed the "if blocked" check to "if blocked or seamless is enabled" which was correct in other places. In this place however, it introduced a bug: we are sending LoginFinished every time we see StateAuthenticated, which happens even on a down & up, or a profile switch. This in turn made it harder for UI clients to track when authentication is complete. Instead we should only send it out if we were blocked (i.e. seamless is disabled, or our key expired) or an auth was in progress. Updates tailscale/corp#31476 Updates tailscale/corp#32645 Fixes #17363 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 3 ++- ipn/ipnlocal/state_test.go | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c8b49de75bff0..c07cc42a1b8dd 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1600,6 +1600,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } wasBlocked := b.blocked + authWasInProgress := b.authURL != "" keyExpiryExtended := false if st.NetMap != nil { wasExpired := b.keyExpired @@ -1617,7 +1618,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.blockEngineUpdates(false) } - if st.LoginFinished() && (wasBlocked || b.seamlessRenewalEnabled()) { + if st.LoginFinished() && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine b.blockEngineUpdates(false) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index d773f722762c2..a4b9ba1f452c0 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -348,6 +348,14 @@ func (b *LocalBackend) nonInteractiveLoginForStateTest() { // predictable, but maybe a bit less thorough. This is more of an overall // state machine test than a test of the wgengine+magicsock integration. func TestStateMachine(t *testing.T) { + runTestStateMachine(t, false) +} + +func TestStateMachineSeamless(t *testing.T) { + runTestStateMachine(t, true) +} + +func runTestStateMachine(t *testing.T, seamless bool) { envknob.Setenv("TAILSCALE_USE_WIP_CODE", "1") defer envknob.Setenv("TAILSCALE_USE_WIP_CODE", "") c := qt.New(t) @@ -545,6 +553,13 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user1" cc.persist.NodeID = "node1" + + // even if seamless is being enabled by default rather than by policy, this is + // the point where it will first get enabled. + if seamless { + sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + cc.send(nil, "", true, &netmap.NetworkMap{}) { nn := notifies.drain(3) From 63f7a400a8fbe89eaa9b2ba559a4300df842fcc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 7 Oct 2025 09:30:27 -0400 Subject: [PATCH 0534/1093] wgengine/{magicsock,userspace,router}: move portupdates to the eventbus (#17423) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also pull out interface method only needed in Linux. Instead of having userspace do the call into the router, just let the router pick up the change itself. Updates #15160 Signed-off-by: Claus Lensbøl --- wgengine/magicsock/magicsock.go | 19 ++- wgengine/router/callback.go | 7 - wgengine/router/osrouter/router_linux.go | 122 ++++++++++-------- wgengine/router/osrouter/router_openbsd.go | 7 - wgengine/router/osrouter/router_plan9.go | 7 - .../router/osrouter/router_userspace_bsd.go | 7 - wgengine/router/osrouter/router_windows.go | 7 - wgengine/router/router.go | 16 +-- wgengine/router/router_fake.go | 5 - wgengine/userspace.go | 8 -- 10 files changed, 83 insertions(+), 122 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 112085053bc00..c7d07c27708f7 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -67,6 +67,7 @@ import ( "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgint" ) @@ -179,6 +180,7 @@ type Conn struct { // config changes between magicsock and wireguard. syncPub *eventbus.Publisher[syncPoint] allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] + portUpdatePub *eventbus.Publisher[router.PortUpdate] // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -393,10 +395,6 @@ type Conn struct { // wgPinger is the WireGuard only pinger used for latency measurements. wgPinger lazy.SyncValue[*ping.Pinger] - // onPortUpdate is called with the new port when magicsock rebinds to - // a new port. - onPortUpdate func(port uint16, network string) - // getPeerByKey optionally specifies a function to look up a peer's // wireguard state by its public key. If nil, it's not used. getPeerByKey func(key.NodePublic) (_ wgint.Peer, ok bool) @@ -492,10 +490,6 @@ type Options struct { // If nil, they're ignored and not updated. ControlKnobs *controlknobs.Knobs - // OnPortUpdate is called with the new port when magicsock rebinds to - // a new port. - OnPortUpdate func(port uint16, network string) - // PeerByKeyFunc optionally specifies a function to look up a peer's // WireGuard state by its public key. If nil, it's not used. // In regular use, this will be wgengine.(*userspaceEngine).PeerByKey. @@ -735,6 +729,7 @@ func NewConn(opts Options) (*Conn, error) { cli := c.eventBus.Client("magicsock.Conn") c.syncPub = eventbus.Publish[syncPoint](cli) c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](cli) + c.portUpdatePub = eventbus.Publish[router.PortUpdate](cli) c.eventSubs = cli.Monitor(c.consumeEventbusTopics(cli)) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) @@ -759,7 +754,6 @@ func NewConn(opts Options) (*Conn, error) { c.netMon = opts.NetMon c.health = opts.HealthTracker - c.onPortUpdate = opts.OnPortUpdate c.getPeerByKey = opts.PeerByKeyFunc if err := c.rebind(keepCurrentPort); err != nil { @@ -3533,7 +3527,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur c.logf("magicsock: unable to bind %v port %d: %v", network, port, err) continue } - if c.onPortUpdate != nil { + if c.portUpdatePub.ShouldPublish() { _, gotPortStr, err := net.SplitHostPort(pconn.LocalAddr().String()) if err != nil { c.logf("could not parse port from %s: %w", pconn.LocalAddr().String(), err) @@ -3542,7 +3536,10 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur if err != nil { c.logf("could not parse port from %s: %w", gotPort, err) } else { - c.onPortUpdate(uint16(gotPort), network) + c.portUpdatePub.Publish(router.PortUpdate{ + UDPPort: uint16(gotPort), + EndpointNetwork: network, + }) } } } diff --git a/wgengine/router/callback.go b/wgengine/router/callback.go index 1d90912778226..c1838539ba2a3 100644 --- a/wgengine/router/callback.go +++ b/wgengine/router/callback.go @@ -56,13 +56,6 @@ func (r *CallbackRouter) Set(rcfg *Config) error { return r.SetBoth(r.rcfg, r.dcfg) } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *CallbackRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - // SetDNS implements dns.OSConfigurator. func (r *CallbackRouter) SetDNS(dcfg dns.OSConfig) error { r.mu.Lock() diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index cf1a9f02716a5..835a9050f9565 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -14,6 +14,7 @@ import ( "os/exec" "strconv" "strings" + "sync" "sync/atomic" "syscall" "time" @@ -54,21 +55,14 @@ const ( ) type linuxRouter struct { - closed atomic.Bool - logf func(fmt string, args ...any) - tunname string - netMon *netmon.Monitor - health *health.Tracker - eventSubs eventbus.Monitor - rulesAddedPub *eventbus.Publisher[AddIPRules] - unregNetMon func() - addrs map[netip.Prefix]bool - routes map[netip.Prefix]bool - localRoutes map[netip.Prefix]bool - snatSubnetRoutes bool - statefulFiltering bool - netfilterMode preftype.NetfilterMode - netfilterKind string + closed atomic.Bool + logf func(fmt string, args ...any) + tunname string + netMon *netmon.Monitor + health *health.Tracker + eventSubs eventbus.Monitor + rulesAddedPub *eventbus.Publisher[AddIPRules] + unregNetMon func() // ruleRestorePending is whether a timer has been started to // restore deleted ip rules. @@ -86,8 +80,16 @@ type linuxRouter struct { cmd commandRunner nfr linuxfw.NetfilterRunner - magicsockPortV4 atomic.Uint32 // actually a uint16 - magicsockPortV6 atomic.Uint32 // actually a uint16 + mu sync.Mutex + addrs map[netip.Prefix]bool + routes map[netip.Prefix]bool + localRoutes map[netip.Prefix]bool + snatSubnetRoutes bool + statefulFiltering bool + netfilterMode preftype.NetfilterMode + netfilterKind string + magicsockPortV4 uint16 + magicsockPortV6 uint16 } func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { @@ -169,6 +171,7 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon // [eventbus.Client] is closed. func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { ruleDeletedSub := eventbus.Subscribe[netmon.RuleDeleted](ec) + portUpdateSub := eventbus.Subscribe[router.PortUpdate](ec) return func(ec *eventbus.Client) { for { select { @@ -176,6 +179,11 @@ func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus. return case rs := <-ruleDeletedSub.Events(): r.onIPRuleDeleted(rs.Table, rs.Priority) + case pu := <-portUpdateSub.Events(): + r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) + if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { + r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) + } } } } @@ -355,7 +363,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { } func (r *linuxRouter) Up() error { - if err := r.setNetfilterMode(netfilterOff); err != nil { + r.mu.Lock() + defer r.mu.Unlock() + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return fmt.Errorf("setting netfilter mode: %w", err) } if err := r.addIPRules(); err != nil { @@ -369,6 +379,8 @@ func (r *linuxRouter) Up() error { } func (r *linuxRouter) Close() error { + r.mu.Lock() + defer r.mu.Unlock() r.closed.Store(true) if r.unregNetMon != nil { r.unregNetMon() @@ -380,7 +392,7 @@ func (r *linuxRouter) Close() error { if err := r.delIPRules(); err != nil { return err } - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return err } if err := r.delRoutes(); err != nil { @@ -394,10 +406,10 @@ func (r *linuxRouter) Close() error { return nil } -// setupNetfilter initializes the NetfilterRunner in r.nfr. It expects r.nfr +// setupNetfilterLocked initializes the NetfilterRunner in r.nfr. It expects r.nfr // to be nil, or the current netfilter to be set to netfilterOff. // kind should be either a linuxfw.FirewallMode, or the empty string for auto. -func (r *linuxRouter) setupNetfilter(kind string) error { +func (r *linuxRouter) setupNetfilterLocked(kind string) error { r.netfilterKind = kind var err error @@ -411,24 +423,26 @@ func (r *linuxRouter) setupNetfilter(kind string) error { // Set implements the Router interface. func (r *linuxRouter) Set(cfg *router.Config) error { + r.mu.Lock() + defer r.mu.Unlock() var errs []error if cfg == nil { cfg = &shutdownConfig } if cfg.NetfilterKind != r.netfilterKind { - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { err = fmt.Errorf("could not disable existing netfilter: %w", err) errs = append(errs, err) } else { r.nfr = nil - if err := r.setupNetfilter(cfg.NetfilterKind); err != nil { + if err := r.setupNetfilterLocked(cfg.NetfilterKind); err != nil { errs = append(errs, err) } } } - if err := r.setNetfilterMode(cfg.NetfilterMode); err != nil { + if err := r.setNetfilterModeLocked(cfg.NetfilterMode); err != nil { errs = append(errs, err) } @@ -470,11 +484,11 @@ func (r *linuxRouter) Set(cfg *router.Config) error { case cfg.StatefulFiltering == r.statefulFiltering: // state already correct, nothing to do. case cfg.StatefulFiltering: - if err := r.addStatefulRule(); err != nil { + if err := r.addStatefulRuleLocked(); err != nil { errs = append(errs, err) } default: - if err := r.delStatefulRule(); err != nil { + if err := r.delStatefulRuleLocked(); err != nil { errs = append(errs, err) } } @@ -538,15 +552,17 @@ func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *router.Confi r.health.SetHealthy(dockerStatefulFilteringWarnable) } -// UpdateMagicsockPort implements the Router interface. -func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { +// updateMagicsockPort implements the Router interface. +func (r *linuxRouter) updateMagicsockPort(port uint16, network string) error { + r.mu.Lock() + defer r.mu.Unlock() if r.nfr == nil { - if err := r.setupNetfilter(r.netfilterKind); err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return fmt.Errorf("could not setup netfilter: %w", err) } } - var magicsockPort *atomic.Uint32 + var magicsockPort *uint16 switch network { case "udp4": magicsockPort = &r.magicsockPortV4 @@ -566,45 +582,41 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { // set the port, we'll make the firewall rule when netfilter turns back on if r.netfilterMode == netfilterOff { - magicsockPort.Store(uint32(port)) + *magicsockPort = port return nil } - cur := magicsockPort.Load() - - if cur == uint32(port) { + if *magicsockPort == port { return nil } - if cur != 0 { - if err := r.nfr.DelMagicsockPortRule(uint16(cur), network); err != nil { + if *magicsockPort != 0 { + if err := r.nfr.DelMagicsockPortRule(*magicsockPort, network); err != nil { return fmt.Errorf("del magicsock port rule: %w", err) } } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(uint16(port), network); err != nil { + if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } - magicsockPort.Store(uint32(port)) + *magicsockPort = port return nil } -// setNetfilterMode switches the router to the given netfilter +// setNetfilterModeLocked switches the router to the given netfilter // mode. Netfilter state is created or deleted appropriately to // reflect the new mode, and r.snatSubnetRoutes is updated to reflect // the current state of subnet SNATing. -func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { +func (r *linuxRouter) setNetfilterModeLocked(mode preftype.NetfilterMode) error { if !platformCanNetfilter() { mode = netfilterOff } if r.nfr == nil { - var err error - r.nfr, err = linuxfw.New(r.logf, r.netfilterKind) - if err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return err } } @@ -660,13 +672,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { - if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { + if r.magicsockPortV4 != 0 { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { + if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } @@ -700,13 +712,13 @@ func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { if err := r.nfr.AddBase(r.tunname); err != nil { return err } - if mport := uint16(r.magicsockPortV4.Load()); mport != 0 { - if err := r.nfr.AddMagicsockPortRule(mport, "udp4"); err != nil { + if r.magicsockPortV4 != 0 { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV4, "udp4"); err != nil { return fmt.Errorf("could not add magicsock port rule v4: %w", err) } } - if mport := uint16(r.magicsockPortV6.Load()); mport != 0 && r.getV6FilteringAvailable() { - if err := r.nfr.AddMagicsockPortRule(mport, "udp6"); err != nil { + if r.magicsockPortV6 != 0 && r.getV6FilteringAvailable() { + if err := r.nfr.AddMagicsockPortRule(r.magicsockPortV6, "udp6"); err != nil { return fmt.Errorf("could not add magicsock port rule v6: %w", err) } } @@ -1483,9 +1495,9 @@ func (r *linuxRouter) delSNATRule() error { return nil } -// addStatefulRule adds a netfilter rule to perform stateful filtering from +// addStatefulRuleLocked adds a netfilter rule to perform stateful filtering from // subnets onto the tailnet. -func (r *linuxRouter) addStatefulRule() error { +func (r *linuxRouter) addStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } @@ -1493,9 +1505,9 @@ func (r *linuxRouter) addStatefulRule() error { return r.nfr.AddStatefulRule(r.tunname) } -// delStatefulRule removes the netfilter rule to perform stateful filtering +// delStatefulRuleLocked removes the netfilter rule to perform stateful filtering // from subnets onto the tailnet. -func (r *linuxRouter) delStatefulRule() error { +func (r *linuxRouter) delStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } diff --git a/wgengine/router/osrouter/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go index 8f35993096858..55b485f0e7a9e 100644 --- a/wgengine/router/osrouter/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -238,13 +238,6 @@ func (r *openbsdRouter) Set(cfg *router.Config) error { return errq } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *openbsdRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *openbsdRouter) Close() error { cleanUp(r.logf, r.tunname) return nil diff --git a/wgengine/router/osrouter/router_plan9.go b/wgengine/router/osrouter/router_plan9.go index 5872aa7fc0e19..a5b461a6fff67 100644 --- a/wgengine/router/osrouter/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -115,13 +115,6 @@ func (r *plan9Router) Set(cfg *router.Config) error { return nil } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *plan9Router) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *plan9Router) Close() error { // TODO(bradfitz): unbind return nil diff --git a/wgengine/router/osrouter/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go index cdaf3adeae1b2..70ef2b6bf3ca9 100644 --- a/wgengine/router/osrouter/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -206,13 +206,6 @@ func (r *userspaceBSDRouter) Set(cfg *router.Config) (reterr error) { return reterr } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *userspaceBSDRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *userspaceBSDRouter) Close() error { return nil } diff --git a/wgengine/router/osrouter/router_windows.go b/wgengine/router/osrouter/router_windows.go index 05bf210e82a7d..a1acbe3b67287 100644 --- a/wgengine/router/osrouter/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -114,13 +114,6 @@ func hasDefaultRoute(routes []netip.Prefix) bool { return false } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *winRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *winRouter) Close() error { r.firewall.clear() diff --git a/wgengine/router/router.go b/wgengine/router/router.go index df65e697d10c5..04cc898876557 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -35,14 +35,6 @@ type Router interface { // implementation should handle gracefully. Set(*Config) error - // UpdateMagicsockPort tells the OS network stack what port magicsock - // is currently listening on, so it can be threaded through firewalls - // and such. This is distinct from Set() since magicsock may rebind - // ports independently from the Config changing. - // - // network should be either "udp4" or "udp6". - UpdateMagicsockPort(port uint16, network string) error - // Close closes the router. Close() error } @@ -56,6 +48,14 @@ type NewOpts struct { Bus *eventbus.Bus // required } +// PortUpdate is an eventbus value, reporting the port and address family +// magicsock is currently listening on, so it can be threaded through firewalls +// and such. +type PortUpdate struct { + UDPPort uint16 + EndpointNetwork string // either "udp4" or "udp6". +} + // HookNewUserspaceRouter is the registration point for router implementations // to register a constructor for userspace routers. It's meant for implementations // in wgengine/router/osrouter. diff --git a/wgengine/router/router_fake.go b/wgengine/router/router_fake.go index 549867ecaa342..db35fc9eebe15 100644 --- a/wgengine/router/router_fake.go +++ b/wgengine/router/router_fake.go @@ -27,11 +27,6 @@ func (r fakeRouter) Set(cfg *Config) error { return nil } -func (r fakeRouter) UpdateMagicsockPort(_ uint16, _ string) error { - r.logf("[v1] warning: fakeRouter.UpdateMagicsockPort: not implemented.") - return nil -} - func (r fakeRouter) Close() error { r.logf("[v1] warning: fakeRouter.Close: not implemented.") return nil diff --git a/wgengine/userspace.go b/wgengine/userspace.go index e971f0e39e1a7..b8a136da78675 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -398,13 +398,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) e.RequestStatus() } - onPortUpdate := func(port uint16, network string) { - e.logf("onPortUpdate(port=%v, network=%s)", port, network) - - if err := e.router.UpdateMagicsockPort(port, network); err != nil { - e.logf("UpdateMagicsockPort(port=%v, network=%s) failed: %v", port, network, err) - } - } magicsockOpts := magicsock.Options{ EventBus: e.eventBus, Logf: logf, @@ -416,7 +409,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) HealthTracker: e.health, Metrics: conf.Metrics, ControlKnobs: conf.ControlKnobs, - OnPortUpdate: onPortUpdate, PeerByKeyFunc: e.PeerByKey, } if buildfeatures.HasLazyWG { From 232b928974500c3b5617a47f3a8a7cf911d9e194 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 6 Oct 2025 22:08:38 -0700 Subject: [PATCH 0535/1093] feature/linkspeed: move cosmetic tstun netlink code out to modular feature Part of making all netlink monitoring code optional. Updates #17311 (how I got started down this path) Updates #12614 Change-Id: Ic80d8a7a44dc261c4b8678b3c2241c3b3778370d Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 3 +-- cmd/tailscaled/depaware-min.txt | 3 +-- cmd/tailscaled/depaware-minbox.txt | 3 +-- cmd/tailscaled/depaware.txt | 3 ++- cmd/tailscaled/deps_test.go | 5 +++-- cmd/tsidp/depaware.txt | 3 +-- .../buildfeatures/feature_linkspeed_disabled.go | 13 +++++++++++++ feature/buildfeatures/feature_linkspeed_enabled.go | 13 +++++++++++++ feature/condregister/maybe_linkspeed.go | 8 ++++++++ feature/featuretags/featuretags.go | 4 ++++ feature/linkspeed/doc.go | 6 ++++++ .../linkspeed/linkspeed_linux.go | 9 +++++++-- net/tstun/linkattrs_notlinux.go | 12 ------------ net/tstun/tun.go | 14 +++++++++++--- tsnet/depaware.txt | 3 +-- 15 files changed, 72 insertions(+), 30 deletions(-) create mode 100644 feature/buildfeatures/feature_linkspeed_disabled.go create mode 100644 feature/buildfeatures/feature_linkspeed_enabled.go create mode 100644 feature/condregister/maybe_linkspeed.go create mode 100644 feature/linkspeed/doc.go rename net/tstun/linkattrs_linux.go => feature/linkspeed/linkspeed_linux.go (91%) delete mode 100644 net/tstun/linkattrs_notlinux.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d1a63a188091c..9851cf9af037c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -74,7 +74,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag - L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ @@ -907,7 +906,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 1ef3568d1d1fe..30974287c7022 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -19,7 +19,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink @@ -204,7 +203,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index a7f5d2e0edab6..32c84d7440d23 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -25,7 +25,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli github.com/mattn/go-isatty from github.com/mattn/go-colorable+ - github.com/mdlayher/genetlink from tailscale.com/net/tstun 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink @@ -232,7 +231,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/set golang.org/x/exp/maps from tailscale.com/ipn/store/mem - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/icmp from tailscale.com/net/ping diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 541e9f3fc1972..60bf623e24bc9 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -143,7 +143,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/kortschak/wol from tailscale.com/feature/wakeonlan LD github.com/kr/fs from github.com/pkg/sftp - L github.com/mdlayher/genetlink from tailscale.com/net/tstun + L github.com/mdlayher/genetlink from tailscale.com/feature/linkspeed L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables @@ -285,6 +285,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister + L tailscale.com/feature/linkspeed from tailscale.com/feature/condregister L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister tailscale.com/feature/portlist from tailscale.com/feature/condregister tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 0711bafba729e..b98c53eb55cf5 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -285,8 +285,9 @@ func TestMinTailscaledWithCLI(t *testing.T) { } }, BadDeps: map[string]string{ - "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", - "expvar": "unexpected expvar dep", + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", + "github.com/mdlayher/genetlink": "unexpected genetlink dep", }, }.Check(t) } diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index eb20869474689..0ae8761e5b297 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -38,7 +38,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ @@ -335,7 +334,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy diff --git a/feature/buildfeatures/feature_linkspeed_disabled.go b/feature/buildfeatures/feature_linkspeed_disabled.go new file mode 100644 index 0000000000000..19e254a740ff7 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = false diff --git a/feature/buildfeatures/feature_linkspeed_enabled.go b/feature/buildfeatures/feature_linkspeed_enabled.go new file mode 100644 index 0000000000000..939858a162910 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = true diff --git a/feature/condregister/maybe_linkspeed.go b/feature/condregister/maybe_linkspeed.go new file mode 100644 index 0000000000000..46064b39a5935 --- /dev/null +++ b/feature/condregister/maybe_linkspeed.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linkspeed + +package condregister + +import _ "tailscale.com/feature/linkspeed" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index a751f65fbb05f..9c87586dbcbd7 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -161,6 +161,10 @@ var Features = map[FeatureTag]FeatureMeta{ "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "linkspeed": { + Sym: "LinkSpeed", + Desc: "Set link speed on TUN device for better OS integration (Linux only)", + }, "listenrawdisco": { Sym: "ListenRawDisco", Desc: "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)", diff --git a/feature/linkspeed/doc.go b/feature/linkspeed/doc.go new file mode 100644 index 0000000000000..2d2fcf0929808 --- /dev/null +++ b/feature/linkspeed/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package linkspeed registers support for setting the TUN link speed on Linux, +// to better integrate with system monitoring tools. +package linkspeed diff --git a/net/tstun/linkattrs_linux.go b/feature/linkspeed/linkspeed_linux.go similarity index 91% rename from net/tstun/linkattrs_linux.go rename to feature/linkspeed/linkspeed_linux.go index 320385ba694dc..90e33d4c9fea4 100644 --- a/net/tstun/linkattrs_linux.go +++ b/feature/linkspeed/linkspeed_linux.go @@ -1,17 +1,22 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !android +//go:build linux && !android -package tstun +package linkspeed import ( "github.com/mdlayher/genetlink" "github.com/mdlayher/netlink" "github.com/tailscale/wireguard-go/tun" "golang.org/x/sys/unix" + "tailscale.com/net/tstun" ) +func init() { + tstun.HookSetLinkAttrs.Set(setLinkAttrs) +} + // setLinkSpeed sets the advertised link speed of the TUN interface. func setLinkSpeed(iface tun.Device, mbps int) error { name, err := iface.Name() diff --git a/net/tstun/linkattrs_notlinux.go b/net/tstun/linkattrs_notlinux.go deleted file mode 100644 index 77d227934083e..0000000000000 --- a/net/tstun/linkattrs_notlinux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package tstun - -import "github.com/tailscale/wireguard-go/tun" - -func setLinkAttrs(iface tun.Device) error { - return nil -} diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 2891e9af4abf8..19b0a53f5be6c 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -18,12 +18,16 @@ import ( "github.com/tailscale/wireguard-go/tun" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" ) -// CreateTAP is the hook set by feature/tap. +// CreateTAP is the hook maybe set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] +// HookSetLinkAttrs is the hook maybe set by feature/linkspeed. +var HookSetLinkAttrs feature.Hook[func(tun.Device) error] + // modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". var modprobeTunHook feature.Hook[func() error] @@ -78,8 +82,12 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { dev.Close() return nil, "", err } - if err := setLinkAttrs(dev); err != nil { - logf("setting link attributes: %v", err) + if buildfeatures.HasLinkSpeed { + if f, ok := HookSetLinkAttrs.GetOk(); ok { + if err := f(dev); err != nil { + logf("setting link attributes: %v", err) + } + } } name, err := interfaceName(dev) if err != nil { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9dd8f0d656c6c..339d188776252 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -38,7 +38,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ @@ -328,7 +327,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy From a9334576ea233d873938bf8240e8373642efd488 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 7 Oct 2025 12:24:58 +0100 Subject: [PATCH 0536/1093] ipn/ipnlocal: use named arguments for `mockControl.send()` Updates #cleanup Signed-off-by: Alex Chan --- ipn/ipnlocal/local_test.go | 10 +++--- ipn/ipnlocal/state_test.go | 74 +++++++++++++++++++++----------------- 2 files changed, 46 insertions(+), 38 deletions(-) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 168f76268afb7..c8367d14d3fb4 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -6145,7 +6145,7 @@ func TestLoginNotifications(t *testing.T) { t.Fatal(err) } - lb.cc.(*mockControl).send(nil, loginURL, false, nil) + lb.cc.(*mockControl).send(sendOpt{url: loginURL}) var wg sync.WaitGroup wg.Add(len(sessions)) @@ -6810,7 +6810,7 @@ func TestSrcCapPacketFilter(t *testing.T) { must.Do(k.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) controlClient := lb.cc.(*mockControl) - controlClient.send(nil, "", false, &netmap.NetworkMap{ + controlClient.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, }).View(), @@ -6839,7 +6839,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }, }}, }}, - }) + }}) f := lb.GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) @@ -7015,10 +7015,10 @@ func TestDisplayMessageIPNBus(t *testing.T) { cc := lb.cc.(*mockControl) // Assert that we are logged in and authorized, and also send our DisplayMessages - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), DisplayMessages: msgs, - }) + }}) // Tell the health tracker that we are in a map poll because // mockControl doesn't tell it diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index a4b9ba1f452c0..fca01f1056fcb 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -182,9 +182,17 @@ func (cc *mockControl) populateKeys() (newKeys bool) { return newKeys } +type sendOpt struct { + err error + url string + loginFinished bool + nm *netmap.NetworkMap +} + // send publishes a controlclient.Status notification upstream. // (In our tests here, upstream is the ipnlocal.Local instance.) -func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netmap.NetworkMap) { +func (cc *mockControl) send(opts sendOpt) { + err, url, loginFinished, nm := opts.err, opts.url, opts.loginFinished, opts.nm if loginFinished { cc.mu.Lock() cc.authBlocked = false @@ -211,7 +219,7 @@ func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { cc.persist.UserProfile = *selfUser.AsStruct() } cc.persist.NodeID = nm.SelfNode.StableID() - cc.send(nil, "", true, nm) + cc.send(sendOpt{loginFinished: true, nm: nm}) } func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { @@ -480,7 +488,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { }, }) url1 := "https://localhost:1/1" - cc.send(nil, url1, false, nil) + cc.send(sendOpt{url: url1}) { cc.assertCalls() @@ -533,7 +541,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nLogin2 (url response)") notifies.expect(1) url2 := "https://localhost:1/2" - cc.send(nil, url2, false, nil) + cc.send(sendOpt{url: url2}) { cc.assertCalls() @@ -560,7 +568,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { sys.ControlKnobs().SeamlessKeyRenewal.Store(true) } - cc.send(nil, "", true, &netmap.NetworkMap{}) + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{}}) { nn := notifies.drain(3) // Arguably it makes sense to unpause now, since the machine @@ -589,9 +597,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // but the current code is brittle. // (ie. I suspect it would be better to change false->true in send() // below, and do the same in the real controlclient.) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -752,7 +760,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { // an interactive login URL to visit. notifies.expect(2) url3 := "https://localhost:1/3" - cc.send(nil, url3, false, nil) + cc.send(sendOpt{url: url3}) { nn := notifies.drain(2) cc.assertCalls("Login") @@ -763,9 +771,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user2" cc.persist.NodeID = "node2" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) t.Logf("\n\nLoginFinished3") { nn := notifies.drain(3) @@ -833,9 +841,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // the control server at all when stopped). t.Logf("\n\nStart4 -> netmap") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls("pause") @@ -880,7 +888,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.expect(1) b.StartLoginInteractive(context.Background()) url4 := "https://localhost:1/4" - cc.send(nil, url4, false, nil) + cc.send(sendOpt{url: url4}) { nn := notifies.drain(1) // It might seem like WantRunning should switch to true here, @@ -902,9 +910,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user3" cc.persist.NodeID = "node3" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(3) // BUG: pause() being called here is a bad sign. @@ -950,9 +958,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // Control server accepts our valid key from before. t.Logf("\n\nLoginFinished5") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls() @@ -965,10 +973,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { } t.Logf("\n\nExpireKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -980,10 +988,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nExtendKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(time.Minute), SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -1118,9 +1126,9 @@ func TestWGEngineStatusRace(t *testing.T) { wantState(ipn.NeedsLogin) // Assert that we are logged in and authorized. - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) wantState(ipn.Starting) // Simulate multiple concurrent callbacks from wgengine. @@ -1397,9 +1405,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) - cc().send(nil, "", false, &netmap.NetworkMap{ + cc().send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), - }) + }}) }, wantState: ipn.NeedsLogin, wantCfg: &wgcfg.Config{}, @@ -1526,9 +1534,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) - cc().send(nil, "", false, &netmap.NetworkMap{ + cc().send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), - }) + }}) }, // Even with seamless, if the key we are using expires, we want to disconnect: wantState: ipn.NeedsLogin, @@ -1616,9 +1624,9 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { nw.watch(0, []wantedNotification{ wantStateNotify(ipn.Starting)}) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) nw.check() t.Logf("Running") @@ -1682,7 +1690,7 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { t.Logf("Re-auth (receive URL)") url1 := "https://localhost:1/1" - cc.send(nil, url1, false, nil) + cc.send(sendOpt{url: url1}) // Don't need to wait on anything else - once .send completes, authURL should // be set, and once .send has completed, any opportunities for a WG engine @@ -1718,9 +1726,9 @@ func TestWGEngineDownThenUpRace(t *testing.T) { nw.watch(0, []wantedNotification{ wantStateNotify(ipn.Starting)}) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) nw.check() nw.watch(0, []wantedNotification{ @@ -1762,7 +1770,7 @@ func TestWGEngineDownThenUpRace(t *testing.T) { wg.Go(func() { t.Log("cc.send starting") - cc.send(nil, url1, false, nil) // will block until engine stops + cc.send(sendOpt{url: url1}) // will block until engine stops t.Log("cc.send returned") }) From 5c1e26b42fa60db7eb7b87ce50d9b7e0befce008 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 7 Oct 2025 07:34:29 -0700 Subject: [PATCH 0537/1093] ipn/localapi: dead code eliminate unreachable/useless LocalAPI handlers when disabled Saves ~94 KB from the min build. Updates #12614 Change-Id: I3b0b8a47f80b9fd3b1038c2834b60afa55bf02c2 Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 3 ++ ipn/ipnlocal/local.go | 5 +- ipn/localapi/localapi.go | 111 +++++++++++++++++++++------------------ 3 files changed, 66 insertions(+), 53 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index a4a871dd8b2db..582c7b8487957 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -381,6 +381,9 @@ func (lc *Client) UserMetrics(ctx context.Context) ([]byte, error) { // // IncrementCounter does not support gauge metrics or negative delta values. func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) error { + if !buildfeatures.HasClientMetrics { + return nil + } type metricUpdate struct { Name string `json:"name"` Type string `json:"type"` diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index c07cc42a1b8dd..6f991ffae945a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4621,7 +4621,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.updateFilterLocked(newp.View()) - if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { + if buildfeatures.HasSSH && oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { b.goTracker.Go(b.sshServer.Shutdown) b.sshServer = nil @@ -5917,6 +5917,9 @@ func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { // // b.mu must be held. func (b *LocalBackend) setExposeRemoteWebClientAtomicBoolLocked(prefs ipn.PrefsView) { + if !buildfeatures.HasWebClient { + return + } shouldExpose := prefs.Valid() && prefs.RunWebClient() b.exposeRemoteWebClientAtomicBool.Store(shouldExpose) } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index fb2c964e7a471..32dc2963feb44 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -71,36 +71,20 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "check-prefs": (*Handler).serveCheckPrefs, - "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, - "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "derpmap": (*Handler).serveDERPMap, - "dial": (*Handler).serveDial, - "disconnect-control": (*Handler).disconnectControl, - "goroutines": (*Handler).serveGoroutines, - "handle-push-message": (*Handler).serveHandlePushMessage, - "id-token": (*Handler).serveIDToken, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "logtap": (*Handler).serveLogTap, - "metrics": (*Handler).serveMetrics, - "ping": (*Handler).servePing, - "prefs": (*Handler).servePrefs, - "query-feature": (*Handler).serveQueryFeature, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "set-gui-visible": (*Handler).serveSetGUIVisible, - "set-push-device-token": (*Handler).serveSetPushDeviceToken, - "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "shutdown": (*Handler).serveShutdown, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "update/check": (*Handler).serveUpdateCheck, - "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "watch-ipn-bus": (*Handler).serveWatchIPNBus, - "whois": (*Handler).serveWhoIs, + "check-prefs": (*Handler).serveCheckPrefs, + "derpmap": (*Handler).serveDERPMap, + "goroutines": (*Handler).serveGoroutines, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "ping": (*Handler).servePing, + "prefs": (*Handler).servePrefs, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "shutdown": (*Handler).serveShutdown, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "whois": (*Handler).serveWhoIs, } func init() { @@ -109,6 +93,17 @@ func init() { } if buildfeatures.HasAdvertiseRoutes { Register("check-ip-forwarding", (*Handler).serveCheckIPForwarding) + Register("check-udp-gro-forwarding", (*Handler).serveCheckUDPGROForwarding) + Register("set-udp-gro-forwarding", (*Handler).serveSetUDPGROForwarding) + } + if buildfeatures.HasUseExitNode && runtime.GOOS == "linux" { + Register("check-reverse-path-filtering", (*Handler).serveCheckReversePathFiltering) + } + if buildfeatures.HasClientMetrics { + Register("upload-client-metrics", (*Handler).serveUploadClientMetrics) + } + if buildfeatures.HasClientUpdate { + Register("update/check", (*Handler).serveUpdateCheck) } if buildfeatures.HasUseExitNode { Register("suggest-exit-node", (*Handler).serveSuggestExitNode) @@ -121,6 +116,9 @@ func init() { Register("bugreport", (*Handler).serveBugReport) Register("pprof", (*Handler).servePprof) } + if buildfeatures.HasDebug || buildfeatures.HasServe { + Register("watch-ipn-bus", (*Handler).serveWatchIPNBus) + } if buildfeatures.HasDNS { Register("dns-osconfig", (*Handler).serveDNSOSConfig) Register("dns-query", (*Handler).serveDNSQuery) @@ -128,6 +126,36 @@ func init() { if buildfeatures.HasUserMetrics { Register("usermetrics", (*Handler).serveUserMetrics) } + if buildfeatures.HasServe { + Register("query-feature", (*Handler).serveQueryFeature) + } + if buildfeatures.HasOutboundProxy || buildfeatures.HasSSH { + Register("dial", (*Handler).serveDial) + } + if buildfeatures.HasClientMetrics || buildfeatures.HasDebug { + Register("metrics", (*Handler).serveMetrics) + } + if buildfeatures.HasDebug || buildfeatures.HasAdvertiseRoutes { + Register("disconnect-control", (*Handler).disconnectControl) + } + // Alpha/experimental/debug features. These should be moved to + // their own features if/when they graduate. + if buildfeatures.HasDebug { + Register("id-token", (*Handler).serveIDToken) + Register("alpha-set-device-attrs", (*Handler).serveSetDeviceAttrs) // see tailscale/corp#24690 + Register("handle-push-message", (*Handler).serveHandlePushMessage) + Register("set-push-device-token", (*Handler).serveSetPushDeviceToken) + } + if buildfeatures.HasDebug || runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + Register("set-gui-visible", (*Handler).serveSetGUIVisible) + } + if buildfeatures.HasLogTail { + // TODO(bradfitz): separate out logtail tap functionality from upload + // functionality to make this possible? But seems unlikely people would + // want just this. They could "tail -f" or "journalctl -f" their logs + // themselves. + Register("logtap", (*Handler).serveLogTap) + } } // Register registers a new LocalAPI handler for the given name. @@ -580,15 +608,6 @@ func (h *Handler) serveGoroutines(w http.ResponseWriter, r *http.Request) { func (h *Handler) serveLogTap(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - if !buildfeatures.HasLogTail { - // TODO(bradfitz): separate out logtail tap functionality from upload - // functionality to make this possible? But seems unlikely people would - // want just this. They could "tail -f" or "journalctl -f" their logs - // themselves. - http.Error(w, "logtap not supported in this build", http.StatusNotImplemented) - return - } - // Require write access (~root) as the logs could contain something // sensitive. if !h.PermitWrite { @@ -662,7 +681,7 @@ func (h *Handler) servePprof(w http.ResponseWriter, r *http.Request) { // disconnectControl is the handler for local API /disconnect-control endpoint that shuts down control client, so that // node no longer communicates with control. Doing this makes control consider this node inactive. This can be used -// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the +// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the // peers to switch over to another replica whilst still maintaining th existing peer connections. func (h *Handler) disconnectControl(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { @@ -1230,11 +1249,6 @@ func (h *Handler) serveHandlePushMessage(w http.ResponseWriter, r *http.Request) } func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Request) { - if !buildfeatures.HasClientMetrics { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(struct{}{}) - return - } if r.Method != httpm.POST { http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return @@ -1498,13 +1512,6 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } - - if !feature.CanAutoUpdate() { - // if we don't support auto-update, just say that we're up to date - json.NewEncoder(w).Encode(tailcfg.ClientVersion{RunningLatest: true}) - return - } - cv := h.b.StatusWithoutPeers().ClientVersion // ipnstate.Status documentation notes that ClientVersion may be nil on some // platforms where this information is unavailable. In that case, return a From 98a0ccc18aa3e5894b1219f6f4322d400f37fa8d Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 7 Oct 2025 19:32:22 +0100 Subject: [PATCH 0538/1093] cmd/tailscaled: default state encryption off for incompatible args (#17480) Since #17376, containerboot crashes on startup in k8s because state encryption is enabled by default without first checking that it's compatible with the selected state store. Make sure we only default state encryption to enabled if it's not going to immediately clash with other bits of tailscaled config. Updates tailscale/corp#32909 Change-Id: I76c586772750d6da188cc97b647c6e0c1a8734f0 Signed-off-by: Tom Proctor --- cmd/tailscaled/tailscaled.go | 66 +++++++++++++++++++++--------------- 1 file changed, 38 insertions(+), 28 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index a46457face6df..92c44f4c13ff3 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -276,30 +276,7 @@ func main() { } if buildfeatures.HasTPM { - if !args.encryptState.set { - args.encryptState.v = defaultEncryptState() - } - if args.encryptState.v { - if runtime.GOOS != "linux" && runtime.GOOS != "windows" { - log.SetFlags(0) - log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) - } - // Check if we have TPM support in this build. - if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported in this build of tailscaled") - } - // Check if we have TPM access. - if !hostinfo.New().TPM.Present() { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") - } - // Check for conflicting prefix in --state, like arn: or kube:. - if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { - log.SetFlags(0) - log.Fatal("--encrypt-state can only be used with --state set to a local file path") - } - } + handleTPMFlags() } if args.disableLogs { @@ -902,14 +879,47 @@ func applyIntegrationTestEnvKnob() { } } -func defaultEncryptState() bool { +// handleTPMFlags validates the --encrypt-state flag if set, and defaults +// state encryption on if it's supported and compatible with other settings. +func handleTPMFlags() { + switch { + case args.encryptState.v: + // Explicitly enabled, validate. + if err := canEncryptState(); err != nil { + log.SetFlags(0) + log.Fatal(err) + } + case !args.encryptState.set: + policyEncrypt, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) + if !policyEncrypt { + // Default disabled, no need to validate. + return + } + // Default enabled if available. + if err := canEncryptState(); err == nil { + args.encryptState.v = true + } + } +} + +// canEncryptState returns an error if state encryption can't be enabled, +// either due to availability or compatibility with other settings. +func canEncryptState() error { if runtime.GOOS != "windows" && runtime.GOOS != "linux" { // TPM encryption is only configurable on Windows and Linux. Other // platforms either use system APIs and are not configurable // (Android/Apple), or don't support any form of encryption yet // (plan9/FreeBSD/etc). - return false + return fmt.Errorf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM access. + if !feature.TPMAvailable() { + return errors.New("--encrypt-state is not supported on this device or a TPM is not accessible") + } + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + return errors.New("--encrypt-state can only be used with --state set to a local file path") } - v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) - return v + + return nil } From ad6cf2f8f369ae54652a0808cda872ca558ab429 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 16:43:22 -0700 Subject: [PATCH 0539/1093] util/eventbus: add a function-based subscriber type (#17432) Originally proposed by @bradfitz in #17413. In practice, a lot of subscribers have only one event type of interest, or a small number of mostly independent ones. In that case, the overhead of running and maintaining a goroutine to select on multiple channels winds up being more noisy than we'd like for the user of the API. For this common case, add a new SubscriberFunc[T] type that delivers events to a callback owned by the subscriber, directly on the goroutine belonging to the client itself. This frees the consumer from the need to maintain their own goroutine to pull events from the channel, and to watch for closure of the subscriber. Before: s := eventbus.Subscribe[T](eventClient) go func() { for { select { case <-s.Done(): return case e := <-s.Events(): doSomethingWith(e) } } }() // ... s.Close() After: func doSomethingWithT(e T) { ... } s := eventbus.SubscribeFunc(eventClient, doSomethingWithT) // ... s.Close() Moreover, unless the caller wants to explicitly stop the subscriber separately from its governing client, it need not capture the SubscriberFunc value at all. One downside of this approach is that a slow or deadlocked callback could block client's service routine and thus stall all other subscriptions on that client, However, this can already happen more broadly if a subscriber fails to service its delivery channel in a timely manner, it just feeds back more immediately. Updates #17487 Change-Id: I64592d786005177aa9fd445c263178ed415784d5 Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 247 +++++++++++++++++++++++++++---------- util/eventbus/client.go | 23 ++++ util/eventbus/subscribe.go | 79 +++++++++--- 3 files changed, 270 insertions(+), 79 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index f9e7ee3dd0459..de292cf1adb5b 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "testing" + "testing/synctest" "time" "github.com/creachadair/taskgroup" @@ -64,6 +65,55 @@ func TestBus(t *testing.T) { } } +func TestSubscriberFunc(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + exp := expectEvents(t, EventA{12345}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { exp.Got(e) }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() + c.Close() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) + + t.Run("SubscriberPublishes", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + pa := eventbus.Publish[EventA](c) + pb := eventbus.Publish[EventB](c) + exp := expectEvents(t, EventA{127}, EventB{128}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + exp.Got(e) + pb.Publish(EventB{Counter: e.Counter + 1}) + }) + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + exp.Got(e) + }) + + pa.Publish(EventA{127}) + + synctest.Wait() + c.Close() + if !exp.Empty() { + t.Errorf("unepxected extra events: %+v", exp.want) + } + }) + }) +} + func TestBusMultipleConsumers(t *testing.T) { b := eventbus.New() defer b.Close() @@ -111,80 +161,149 @@ func TestBusMultipleConsumers(t *testing.T) { } } -func TestSpam(t *testing.T) { - b := eventbus.New() - defer b.Close() +func TestClientMixedSubscribers(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + var gotA EventA + s1 := eventbus.Subscribe[EventA](c) - const ( - publishers = 100 - eventsPerPublisher = 20 - wantEvents = publishers * eventsPerPublisher - subscribers = 100 - ) - - var g taskgroup.Group - - received := make([][]EventA, subscribers) - for i := range subscribers { - c := b.Client(fmt.Sprintf("Subscriber%d", i)) - defer c.Close() - s := eventbus.Subscribe[EventA](c) - g.Go(func() error { - for range wantEvents { + var gotB EventB + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + t.Logf("func sub received %[1]T %+[1]v", e) + gotB = e + }) + + go func() { + for { select { - case evt := <-s.Events(): - received[i] = append(received[i], evt) - case <-s.Done(): - t.Errorf("queue done before expected number of events received") - return errors.New("queue prematurely closed") - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for expected bus event after %d events", len(received[i])) - return errors.New("timeout") + case <-s1.Done(): + return + case e := <-s1.Events(): + t.Logf("chan sub received %[1]T %+[1]v", e) + gotA = e } } - return nil - }) - } + }() + + p1 := eventbus.Publish[EventA](c) + p2 := eventbus.Publish[EventB](c) + + go p1.Publish(EventA{12345}) + go p2.Publish(EventB{67890}) - published := make([][]EventA, publishers) - for i := range publishers { - g.Run(func() { + synctest.Wait() + c.Close() + synctest.Wait() + + if diff := cmp.Diff(gotB, EventB{67890}); diff != "" { + t.Errorf("Chan sub (-got, +want):\n%s", diff) + } + if diff := cmp.Diff(gotA, EventA{12345}); diff != "" { + t.Errorf("Func sub (-got, +want):\n%s", diff) + } + }) +} + +func TestSpam(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + const ( + publishers = 100 + eventsPerPublisher = 20 + wantEvents = publishers * eventsPerPublisher + subscribers = 100 + ) + + var g taskgroup.Group + + // A bunch of subscribers receiving on channels. + chanReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("Subscriber%d", i)) + defer c.Close() + + s := eventbus.Subscribe[EventA](c) + g.Go(func() error { + for range wantEvents { + select { + case evt := <-s.Events(): + chanReceived[i] = append(chanReceived[i], evt) + case <-s.Done(): + t.Errorf("queue done before expected number of events received") + return errors.New("queue prematurely closed") + case <-time.After(5 * time.Second): + t.Logf("timed out waiting for expected bus event after %d events", len(chanReceived[i])) + return errors.New("timeout") + } + } + return nil + }) + } + + // A bunch of subscribers receiving via a func. + funcReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("SubscriberFunc%d", i)) + defer c.Close() + eventbus.SubscribeFunc(c, func(e EventA) { + funcReceived[i] = append(funcReceived[i], e) + }) + } + + published := make([][]EventA, publishers) + for i := range publishers { c := b.Client(fmt.Sprintf("Publisher%d", i)) p := eventbus.Publish[EventA](c) - for j := range eventsPerPublisher { - evt := EventA{i*eventsPerPublisher + j} - p.Publish(evt) - published[i] = append(published[i], evt) - } - }) - } + g.Run(func() { + defer c.Close() + for j := range eventsPerPublisher { + evt := EventA{i*eventsPerPublisher + j} + p.Publish(evt) + published[i] = append(published[i], evt) + } + }) + } - if err := g.Wait(); err != nil { - t.Fatal(err) - } - var last []EventA - for i, got := range received { - if len(got) != wantEvents { - // Receiving goroutine already reported an error, we just need - // to fail early within the main test goroutine. - t.FailNow() + if err := g.Wait(); err != nil { + t.Fatal(err) } - if last == nil { - continue + synctest.Wait() + + tests := []struct { + name string + recv [][]EventA + }{ + {"Subscriber", chanReceived}, + {"SubscriberFunc", funcReceived}, } - if diff := cmp.Diff(got, last); diff != "" { - t.Errorf("Subscriber %d did not see the same events as %d (-got+want):\n%s", i, i-1, diff) + for _, tc := range tests { + for i, got := range tc.recv { + if len(got) != wantEvents { + t.Errorf("%s %d: got %d events, want %d", tc.name, i, len(got), wantEvents) + } + if i == 0 { + continue + } + if diff := cmp.Diff(got, tc.recv[i-1]); diff != "" { + t.Errorf("%s %d did not see the same events as %d (-got+want):\n%s", tc.name, i, i-1, diff) + } + } } - last = got - } - for i, sent := range published { - if got := len(sent); got != eventsPerPublisher { - t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + for i, sent := range published { + if got := len(sent); got != eventsPerPublisher { + t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + } } - } - // TODO: check that the published sequences are proper - // subsequences of the received slices. + // TODO: check that the published sequences are proper + // subsequences of the received slices. + }) } func TestClient_Done(t *testing.T) { @@ -366,10 +485,12 @@ func expectEvents(t *testing.T, want ...any) *queueChecker { func (q *queueChecker) Got(v any) { q.t.Helper() if q.Empty() { - q.t.Fatalf("queue got unexpected %v", v) + q.t.Errorf("queue got unexpected %v", v) + return } if v != q.want[0] { - q.t.Fatalf("queue got %#v, want %#v", v, q.want[0]) + q.t.Errorf("queue got %#v, want %#v", v, q.want[0]) + return } q.want = q.want[1:] } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 7c02688860861..9e3f3ee76cc31 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -147,6 +147,29 @@ func Subscribe[T any](c *Client) *Subscriber[T] { return s } +// SubscribeFunc is like [Subscribe], but calls the provided func for each +// event of type T. +// +// A SubscriberFunc calls f synchronously from the client's goroutine. +// This means the callback must not block for an extended period of time, +// as this will block the subscriber and slow event processing for all +// subscriptions on c. +func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] { + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot SubscribeFunc on a closed client") + } + + r := c.subscribeStateLocked() + s := newSubscriberFunc[T](r, f) + r.addSubscriber(s) + return s +} + // Publish returns a publisher for event type T using the given client. // It panics if c is closed. func Publish[T any](c *Client) *Publisher[T] { diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ef155e621ae1a..56da413efa5e4 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -61,45 +61,45 @@ func newSubscribeState(c *Client) *subscribeState { return ret } -func (q *subscribeState) pump(ctx context.Context) { +func (s *subscribeState) pump(ctx context.Context) { var vals queue[DeliveredEvent] acceptCh := func() chan DeliveredEvent { if vals.Full() { return nil } - return q.write + return s.write } for { if !vals.Empty() { val := vals.Peek() - sub := q.subscriberFor(val.Event) + sub := s.subscriberFor(val.Event) if sub == nil { // Raced with unsubscribe. vals.Drop() continue } - if !sub.dispatch(ctx, &vals, acceptCh, q.snapshot) { + if !sub.dispatch(ctx, &vals, acceptCh, s.snapshot) { return } - if q.debug.active() { - q.debug.run(DeliveredEvent{ + if s.debug.active() { + s.debug.run(DeliveredEvent{ Event: val.Event, From: val.From, - To: q.client, + To: s.client, }) } } else { // Keep the cases in this select in sync with - // Subscriber.dispatch below. The only difference should be - // that this select doesn't deliver queued values to - // anyone, and unconditionally accepts new values. + // Subscriber.dispatch and SubscriberFunc.dispatch below. + // The only difference should be that this select doesn't deliver + // queued values to anyone, and unconditionally accepts new values. select { - case val := <-q.write: + case val := <-s.write: vals.Add(val) case <-ctx.Done(): return - case ch := <-q.snapshot: + case ch := <-s.snapshot: ch <- vals.Snapshot() } } @@ -152,10 +152,10 @@ func (s *subscribeState) deleteSubscriber(t reflect.Type) { s.client.deleteSubscriber(t, s) } -func (q *subscribeState) subscriberFor(val any) subscriber { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - return q.outputs[reflect.TypeOf(val)] +func (s *subscribeState) subscriberFor(val any) subscriber { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + return s.outputs[reflect.TypeOf(val)] } // Close closes the subscribeState. It implicitly closes all Subscribers @@ -177,6 +177,7 @@ func (s *subscribeState) closed() <-chan struct{} { } // A Subscriber delivers one type of event from a [Client]. +// Events are sent to the [Subscriber.Events] channel. type Subscriber[T any] struct { stop stopFlag read chan T @@ -252,3 +253,49 @@ func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers s.unregister() } + +// A SubscriberFunc delivers one type of event from a [Client]. +// Events are forwarded synchronously to a function provided at construction. +type SubscriberFunc[T any] struct { + stop stopFlag + read func(T) + unregister func() +} + +func newSubscriberFunc[T any](r *subscribeState, f func(T)) *SubscriberFunc[T] { + return &SubscriberFunc[T]{ + read: f, + unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + } +} + +// Close closes the SubscriberFunc, indicating the caller no longer wishes to +// receive this event type. After Close, no further events will be passed to +// the callback. +// +// If the [Bus] from which s was created is closed, s is implicitly closed and +// does not need to be closed separately. +func (s *SubscriberFunc[T]) Close() { s.stop.Stop(); s.unregister() } + +// subscribeType implements part of the subscriber interface. +func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } + +// dispatch implements part of the subscriber interface. +func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { + // Keep the cases in this select in sync with subscribeState.pump + // above. The only different should be that this select + // delivers a value by calling s.read. + select { + case val := <-acceptCh(): + vals.Add(val) + case <-ctx.Done(): + return false + case ch := <-snapshot: + ch <- vals.Snapshot() + default: + } + t := vals.Peek().Event.(T) + s.read(t) + vals.Drop() + return true +} From f25e47cdeb61cfb7c4f1187aafd33add6d1c31a4 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 8 Oct 2025 10:01:25 -0400 Subject: [PATCH 0540/1093] flake.nix: use tailscale go fork (#17486) Move our nix flake to use Tailscale's go toolchain instead of upstream go. Fixes #17494 Signed-off-by: Mike O'Driscoll --- flake.nix | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index e8ef03853badd..7b97c8a13bd78 100644 --- a/flake.nix +++ b/flake.nix @@ -46,8 +46,9 @@ systems, flake-compat, }: let - go125Version = "1.25.1"; - goHash = "sha256-0BDBCc7pTYDv5oHqtGvepJGskGv0ZYPDLp8NuwvRpZQ="; + goVersion = "1.25.1"; + toolChainRev = nixpkgs.lib.fileContents ./go.toolchain.rev; + gitHash = "sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk="; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { @@ -55,10 +56,12 @@ overlays = [ (final: prev: { go_1_25 = prev.go_1_25.overrideAttrs { - version = go125Version; - src = prev.fetchurl { - url = "https://go.dev/dl/go${go125Version}.src.tar.gz"; - hash = goHash; + version = goVersion; + src = prev.fetchFromGitHub { + owner = "tailscale"; + repo = "go"; + rev = toolChainRev; + hash = gitHash; }; }; }) From cd2a3425cb54a66f1531229d99c9af840e0a5807 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 8 Oct 2025 15:15:12 +0100 Subject: [PATCH 0541/1093] cmd/tsrecorder: adds sending api level logging to tsrecorder (#16960) Updates #17141 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/depaware.txt | 32 +- cmd/k8s-operator/sts.go | 2 +- flake.nix | 2 +- go.mod | 2 + go.mod.sri | 2 +- go.sum | 4 + k8s-operator/api-proxy/proxy.go | 143 ++++- k8s-operator/api-proxy/proxy_events_test.go | 548 ++++++++++++++++++++ sessionrecording/connect.go | 91 ++++ sessionrecording/connect_test.go | 102 +++- sessionrecording/event.go | 104 ++++ sessionrecording/header.go | 1 - shell.nix | 2 +- 13 files changed, 1014 insertions(+), 21 deletions(-) create mode 100644 k8s-operator/api-proxy/proxy_events_test.go create mode 100644 sessionrecording/event.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 9851cf9af037c..da43ac1772629 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -6,6 +6,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus + github.com/blang/semver/v4 from k8s.io/component-base/metrics 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket @@ -60,6 +61,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/tka + W 💣 github.com/inconshreveable/mousetrap from github.com/spf13/cobra github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -87,17 +89,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ - github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics + github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+ github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd + github.com/spf13/cobra from k8s.io/component-base/cli/flag + github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -124,6 +127,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 + go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/codes from go.opentelemetry.io/otel/trace + 💣 go.opentelemetry.io/otel/internal from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/internal/attribute from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/trace from k8s.io/component-base/metrics + go.opentelemetry.io/otel/trace/embedded from go.opentelemetry.io/otel/trace go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ go.uber.org/zap/buffer from go.uber.org/zap/internal/bufferpool+ @@ -283,8 +292,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/api/meta/testrestmapper from k8s.io/client-go/testing k8s.io/apimachinery/pkg/api/resource from k8s.io/api/autoscaling/v1+ k8s.io/apimachinery/pkg/api/validation from k8s.io/apimachinery/pkg/util/managedfields/internal+ + k8s.io/apimachinery/pkg/api/validation/path from k8s.io/apiserver/pkg/endpoints/request 💣 k8s.io/apimachinery/pkg/apis/meta/internalversion from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ - k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata + k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata+ k8s.io/apimachinery/pkg/apis/meta/internalversion/validation from k8s.io/client-go/util/watchlist 💣 k8s.io/apimachinery/pkg/apis/meta/v1 from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/apis/meta/v1/unstructured from k8s.io/apimachinery/pkg/runtime/serializer/versioning+ @@ -327,13 +337,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/util/uuid from sigs.k8s.io/controller-runtime/pkg/internal/controller+ k8s.io/apimachinery/pkg/util/validation from k8s.io/apimachinery/pkg/api/validation+ k8s.io/apimachinery/pkg/util/validation/field from k8s.io/apimachinery/pkg/api/errors+ + k8s.io/apimachinery/pkg/util/version from k8s.io/apiserver/pkg/features+ k8s.io/apimachinery/pkg/util/wait from k8s.io/client-go/tools/cache+ k8s.io/apimachinery/pkg/util/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/version from k8s.io/client-go/discovery+ k8s.io/apimachinery/pkg/watch from k8s.io/apimachinery/pkg/apis/meta/v1+ k8s.io/apimachinery/third_party/forked/golang/json from k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/third_party/forked/golang/reflect from k8s.io/apimachinery/pkg/conversion + k8s.io/apiserver/pkg/authentication/user from k8s.io/apiserver/pkg/endpoints/request + k8s.io/apiserver/pkg/endpoints/request from tailscale.com/k8s-operator/api-proxy + k8s.io/apiserver/pkg/features from k8s.io/apiserver/pkg/endpoints/request k8s.io/apiserver/pkg/storage/names from tailscale.com/cmd/k8s-operator + k8s.io/apiserver/pkg/util/feature from k8s.io/apiserver/pkg/endpoints/request+ k8s.io/client-go/applyconfigurations/admissionregistration/v1 from k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1+ k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 @@ -603,6 +618,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/util/keyutil from k8s.io/client-go/util/cert k8s.io/client-go/util/watchlist from k8s.io/client-go/dynamic+ k8s.io/client-go/util/workqueue from k8s.io/client-go/transport+ + k8s.io/component-base/cli/flag from k8s.io/component-base/featuregate + k8s.io/component-base/featuregate from k8s.io/apiserver/pkg/features+ + k8s.io/component-base/metrics from k8s.io/component-base/metrics/legacyregistry+ + k8s.io/component-base/metrics/legacyregistry from k8s.io/component-base/metrics/prometheus/feature + k8s.io/component-base/metrics/prometheus/feature from k8s.io/component-base/featuregate + k8s.io/component-base/metrics/prometheusextension from k8s.io/component-base/metrics + k8s.io/component-base/version from k8s.io/component-base/featuregate+ k8s.io/klog/v2 from k8s.io/apimachinery/pkg/api/meta+ k8s.io/klog/v2/internal/buffer from k8s.io/klog/v2 k8s.io/klog/v2/internal/clock from k8s.io/klog/v2 @@ -1162,7 +1184,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sync/atomic from context+ syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ - text/template from html/template + text/template from html/template+ text/template/parse from html/template+ time from compress/gzip+ unicode from bytes+ diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 6300341b7e75e..c52ffce85495b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -63,7 +63,7 @@ const ( AnnotationHostname = "tailscale.com/hostname" annotationTailnetTargetIPOld = "tailscale.com/ts-tailnet-target-ip" AnnotationTailnetTargetIP = "tailscale.com/tailnet-ip" - //MagicDNS name of tailnet node. + // MagicDNS name of tailnet node. AnnotationTailnetTargetFQDN = "tailscale.com/tailnet-fqdn" AnnotationProxyGroup = "tailscale.com/proxy-group" diff --git a/flake.nix b/flake.nix index 7b97c8a13bd78..9481248f0596c 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= +# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/go.mod b/go.mod index bce634431cbd9..965a447b95886 100644 --- a/go.mod +++ b/go.mod @@ -136,6 +136,7 @@ require ( github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect @@ -186,6 +187,7 @@ require ( go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + k8s.io/component-base v0.32.0 // indirect ) require ( diff --git a/go.mod.sri b/go.mod.sri index a1d81c1a95dc7..f94054422c6d7 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= +sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/go.sum b/go.sum index 5e2205575f416..bc386d1fdb37f 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,8 @@ github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJ github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= @@ -1546,6 +1548,8 @@ k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= +k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index a0f2f930b8067..fdb79815222d7 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -6,10 +6,13 @@ package apiproxy import ( + "bytes" "context" "crypto/tls" + "encoding/json" "errors" "fmt" + "io" "net" "net/http" "net/http/httputil" @@ -19,13 +22,16 @@ import ( "time" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/client-go/rest" "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/k8s-operator/sessionrecording" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -83,12 +89,13 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn } ap := &APIServerProxy{ - log: zlog, - lc: lc, - authMode: mode == kubetypes.APIServerProxyModeAuth, - https: https, - upstreamURL: u, - ts: ts, + log: zlog, + lc: lc, + authMode: mode == kubetypes.APIServerProxyModeAuth, + https: https, + upstreamURL: u, + ts: ts, + sendEventFunc: sessionrecording.SendEvent, } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -183,6 +190,8 @@ type APIServerProxy struct { ts *tsnet.Server hs *http.Server upstreamURL *url.URL + + sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error } // serveDefault is the default handler for Kubernetes API server requests. @@ -192,7 +201,16 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { ap.authError(w, err) return } + + if err = ap.recordRequestAsEvent(r, who); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + counterNumRequestsProxied.Add(1) + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } @@ -220,7 +238,7 @@ func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) } -func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType sessionrecording.SessionType, proto ksr.Protocol) { +func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType ksr.SessionType, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -232,6 +250,14 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.authError(w, err) return } + + if err = ap.recordRequestAsEvent(r, who); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + counterNumRequestsProxied.Add(1) failOpen, addrs, err := determineRecorderConfig(who) if err != nil { @@ -283,6 +309,107 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } +func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse) error { + failOpen, addrs, err := determineRecorderConfig(who) + if err != nil { + return fmt.Errorf("error trying to determine whether the kubernetes api request needs to be recorded: %w", err) + } + if len(addrs) == 0 { + if failOpen { + return nil + } else { + return fmt.Errorf("forbidden: kubernetes api request must be recorded, but no recorders are available") + } + } + + factory := &request.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + + reqInfo, err := factory.NewRequestInfo(req) + if err != nil { + return fmt.Errorf("error parsing request %s %s: %w", req.Method, req.URL.Path, err) + } + + kubeReqInfo := sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: reqInfo.IsResourceRequest, + Path: reqInfo.Path, + Verb: reqInfo.Verb, + APIPrefix: reqInfo.APIPrefix, + APIGroup: reqInfo.APIGroup, + APIVersion: reqInfo.APIVersion, + Namespace: reqInfo.Namespace, + Resource: reqInfo.Resource, + Subresource: reqInfo.Subresource, + Name: reqInfo.Name, + Parts: reqInfo.Parts, + FieldSelector: reqInfo.FieldSelector, + LabelSelector: reqInfo.LabelSelector, + } + event := &sessionrecording.Event{ + Timestamp: time.Now().Unix(), + Kubernetes: kubeReqInfo, + Type: sessionrecording.KubernetesAPIEventType, + UserAgent: req.UserAgent(), + Request: sessionrecording.Request{ + Method: req.Method, + Path: req.URL.String(), + QueryParameters: req.URL.Query(), + }, + Source: sessionrecording.Source{ + NodeID: who.Node.StableID, + Node: strings.TrimSuffix(who.Node.Name, "."), + }, + } + + if !who.Node.IsTagged() { + event.Source.NodeUser = who.UserProfile.LoginName + event.Source.NodeUserID = who.UserProfile.ID + } else { + event.Source.NodeTags = who.Node.Tags + } + + bodyBytes, err := io.ReadAll(req.Body) + if err != nil { + return fmt.Errorf("failed to read body: %w", err) + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + event.Request.Body = bodyBytes + + var errs []error + // TODO: ChaosInTheCRD ensure that if there are multiple addrs timing out we don't experience slowdown on client waiting for response. + fail := true + for _, addr := range addrs { + data := new(bytes.Buffer) + if err := json.NewEncoder(data).Encode(event); err != nil { + return fmt.Errorf("error marshaling request event: %w", err) + } + + if err := ap.sendEventFunc(addr, data, ap.ts.Dial); err != nil { + if apiSupportErr, ok := err.(sessionrecording.EventAPINotSupportedErr); ok { + ap.log.Warnf(apiSupportErr.Error()) + fail = false + } else { + err := fmt.Errorf("error sending event to recorder with address %q: %v", addr.String(), err) + errs = append(errs, err) + } + } else { + return nil + } + } + + merr := errors.Join(errs...) + if fail && failOpen { + msg := fmt.Sprintf("[unexpected] failed to send event to recorders with errors: %s", merr.Error()) + msg = msg + "; failure mode is 'fail open'; continuing request without recording." + ap.log.Warn(msg) + return nil + } + + return merr +} + func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { r.URL.Scheme = ap.upstreamURL.Scheme r.URL.Host = ap.upstreamURL.Host diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go new file mode 100644 index 0000000000000..230927dc07cb6 --- /dev/null +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -0,0 +1,548 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package apiproxy + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/netip" + "net/url" + "reflect" + "testing" + + "go.uber.org/zap" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" +) + +type fakeSender struct { + sent map[netip.AddrPort][]byte + err error + calls int +} + +func (s *fakeSender) Send(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error { + s.calls++ + if s.err != nil { + return s.err + } + if s.sent == nil { + s.sent = make(map[netip.AddrPort][]byte) + } + data, _ := io.ReadAll(event) + s.sent[ap] = data + return nil +} + +func (s *fakeSender) Reset() { + s.sent = nil + s.err = nil + s.calls = 0 +} + +func TestRecordRequestAsEvent(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + sender := &fakeSender{} + ap := &APIServerProxy{ + log: zl.Sugar(), + ts: &tsnet.Server{}, + sendEventFunc: sender.Send, + } + + defaultWho := &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + }, + UserProfile: &tailcfg.UserProfile{ + ID: 1, + LoginName: "user@example.com", + }, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234"]}`), + tailcfg.RawMessage(`{"enforceRecorder": true}`), + }, + }, + } + + defaultSource := sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeUser: "user@example.com", + NodeUserID: 1, + } + + tests := []struct { + name string + req func() *http.Request + who *apitype.WhoIsResponse + setupSender func() + wantErr bool + wantEvent *sessionrecording.Event + wantNumCalls int + }{ + { + name: "request-with-dot-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo.bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo.bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo.bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo.bar", + Parts: []string{"pods", "foo.bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-dash-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo-bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo-bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo-bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo-bar", + Parts: []string{"pods", "foo-bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-query-parameter", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?watch=true", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?watch=true", + Body: nil, + QueryParameters: url.Values{"watch": []string{"true"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "watch", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-label-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?labelSelector=app%3Dfoo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?labelSelector=app%3Dfoo", + Body: nil, + QueryParameters: url.Values{"labelSelector": []string{"app=foo"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + LabelSelector: "app=foo", + }, + Source: defaultSource, + }, + }, + { + name: "request-with-field-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?fieldSelector=status.phase%3DRunning", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?fieldSelector=status.phase%3DRunning", + Body: nil, + QueryParameters: url.Values{"fieldSelector": []string{"status.phase=Running"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + FieldSelector: "status.phase=Running", + }, + Source: defaultSource, + }, + }, + { + name: "request-for-non-existent-resource", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/foo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/foo", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/foo", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "foo", + Parts: []string{"foo"}, + }, + Source: defaultSource, + }, + }, + { + name: "basic-request", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "multiple-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234", "127.0.0.1:5678"]}`), + }, + }, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + }, + { + name: "request-with-body", + req: func() *http.Request { + req := httptest.NewRequest("POST", "/api/v1/pods", bytes.NewBufferString(`{"foo":"bar"}`)) + req.Header.Set("Content-Type", "application/json") + return req + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "POST", + Path: "/api/v1/pods", + Body: json.RawMessage(`{"foo":"bar"}`), + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "create", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "tagged-node", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + Tags: []string{"tag:foo"}, + }, + UserProfile: &tailcfg.UserProfile{}, + CapMap: defaultWho.CapMap, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeTags: []string{"tag:foo"}, + }, + }, + }, + { + name: "no-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{}, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 0, + }, + { + name: "error-sending", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { + sender.Reset() + sender.err = errors.New("send error") + }, + wantErr: true, + wantNumCalls: 1, + }, + { + name: "request-for-crd", + req: func() *http.Request { + return httptest.NewRequest("GET", "/apis/custom.example.com/v1/myresources", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/apis/custom.example.com/v1/myresources", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/apis/custom.example.com/v1/myresources", + Verb: "list", + APIPrefix: "apis", + APIGroup: "custom.example.com", + APIVersion: "v1", + Resource: "myresources", + Parts: []string{"myresources"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-proxy-verb", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo/proxy", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Subresource: "proxy", + Name: "foo", + Parts: []string{"pods", "foo", "proxy"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-complex-path", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "services", + Subresource: "proxy-subpath", + Name: "foo:8080", + Parts: []string{"services", "foo:8080", "proxy-subpath", "more", "segments"}, + }, + Source: defaultSource, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.setupSender() + + req := tt.req() + err := ap.recordRequestAsEvent(req, tt.who) + + if (err != nil) != tt.wantErr { + t.Fatalf("recordRequestAsEvent() error = %v, wantErr %v", err, tt.wantErr) + } + + if sender.calls != tt.wantNumCalls { + t.Fatalf("expected %d calls to sender, got %d", tt.wantNumCalls, sender.calls) + } + + if tt.wantEvent != nil { + for _, sentData := range sender.sent { + var got sessionrecording.Event + if err := json.Unmarshal(sentData, &got); err != nil { + t.Fatalf("failed to unmarshal sent event: %v", err) + } + + got.Timestamp = 0 + tt.wantEvent.Timestamp = got.Timestamp + + got.UserAgent = "" + tt.wantEvent.UserAgent = "" + + if !bytes.Equal(got.Request.Body, tt.wantEvent.Request.Body) { + t.Errorf("sent event body does not match wanted event body.\nGot: %s\nWant: %s", string(got.Request.Body), string(tt.wantEvent.Request.Body)) + } + got.Request.Body = nil + tt.wantEvent.Request.Body = nil + + if !reflect.DeepEqual(&got, tt.wantEvent) { + t.Errorf("sent event does not match wanted event.\nGot: %#v\nWant: %#v", &got, tt.wantEvent) + } + } + } + }) + } +} diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index a470969d8c68b..8abf9dd7e9142 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -110,6 +110,97 @@ func supportsV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) bool { return resp.StatusCode == http.StatusOK && resp.ProtoMajor > 1 } +// supportsEvent checks whether a recorder instance supports the /v2/event +// endpoint. +func supportsEvent(ctx context.Context, hc *http.Client, ap netip.AddrPort) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, http2ProbeTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, httpm.HEAD, fmt.Sprintf("http://%s/v2/event", ap), nil) + if err != nil { + return false, err + } + resp, err := hc.Do(req) + if err != nil { + return false, err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return true, nil + } + + if resp.StatusCode != http.StatusNotFound { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return false, fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return false, fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return false, nil +} + +const addressNotSupportEventv2 = `recorder at address %q does not support "/v2/event" endpoint` + +type EventAPINotSupportedErr struct { + ap netip.AddrPort +} + +func (e EventAPINotSupportedErr) Error() string { + return fmt.Sprintf(addressNotSupportEventv2, e.ap) +} + +// SendEvent sends an event the tsrecorders /v2/event endpoint. +func SendEvent(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) (retErr error) { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() + + client := clientHTTP1(ctx, dial) + + supported, err := supportsEvent(ctx, client, ap) + if err != nil { + return fmt.Errorf("error checking support for `/v2/event` endpoint: %w", err) + } + + if !supported { + return EventAPINotSupportedErr{ + ap: ap, + } + } + + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/v2/event", ap.String()), event) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + // connectV1 connects to the legacy /record endpoint on the recorder. It is // used for backwards-compatibility with older tsrecorder instances. // diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index c0fcf6d40c617..cacf061d79b79 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -9,11 +9,13 @@ import ( "crypto/rand" "crypto/sha256" "encoding/json" + "fmt" "io" "net" "net/http" "net/http/httptest" "net/netip" + "strings" "testing" "time" @@ -148,9 +150,9 @@ func TestConnectToRecorder(t *testing.T) { // Wire up h2c-compatible HTTP/2 server. This is optional // because the v1 recorder didn't support HTTP/2 and we try to // mimic that. - h2s := &http2.Server{} - srv.Config.Handler = h2c.NewHandler(mux, h2s) - if err := http2.ConfigureServer(srv.Config, h2s); err != nil { + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { t.Errorf("configuring HTTP/2 support in server: %v", err) } } @@ -187,3 +189,97 @@ func TestConnectToRecorder(t *testing.T) { }) } } + +func TestSendEvent(t *testing.T) { + t.Run("supported", func(t *testing.T) { + eventBody := `{"foo":"bar"}` + eventRecieved := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + t.Error(err) + } + eventRecieved <- body + w.WriteHeader(http.StatusOK) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, bytes.NewBufferString(eventBody), d.DialContext) + if err != nil { + t.Fatalf("SendEvent: %v", err) + } + + if recv := string(<-eventRecieved); recv != eventBody { + t.Errorf("mismatch in event body, sent %q, received %q", eventBody, recv) + } + }) + + t.Run("not_supported", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), fmt.Sprintf(addressNotSupportEventv2, srv.Listener.Addr().String())) { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("server_error", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), "server returned non-OK status") { + t.Fatalf("unexpected error: %v", err) + } + }) +} diff --git a/sessionrecording/event.go b/sessionrecording/event.go new file mode 100644 index 0000000000000..41d8f2d5806b4 --- /dev/null +++ b/sessionrecording/event.go @@ -0,0 +1,104 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package sessionrecording + +import ( + "net/url" + + "tailscale.com/tailcfg" +) + +const ( + KubernetesAPIEventType = "kubernetes-api-request" +) + +// Event represents the top-level structure of a tsrecorder event. +type Event struct { + // Type specifies the kind of event being recorded (e.g., "kubernetes-api-request"). + Type string `json:"type"` + + // ID is a reference of the path that this event is stored at in tsrecorder + ID string `json:"id"` + + // Timestamp is the time when the event was recorded represented as a unix timestamp. + Timestamp int64 `json:"timestamp"` + + // UserAgent is the UerAgent specified in the request, which helps identify + // the client software that initiated the request. + UserAgent string `json:"userAgent"` + + // Request holds details of the HTTP request. + Request Request `json:"request"` + + // Kubernetes contains Kubernetes-specific information about the request (if + // the type is `kubernetes-api-request`) + Kubernetes KubernetesRequestInfo `json:"kubernetes"` + + // Source provides details about the client that initiated the request. + Source Source `json:"source"` +} + +// copied from https://github.com/kubernetes/kubernetes/blob/11ade2f7dd264c2f52a4a1342458abbbaa3cb2b1/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go#L44 +// KubernetesRequestInfo contains Kubernetes specific information in the request (if the type is `kubernetes-api-request`) +type KubernetesRequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. + // for non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + + Namespace string + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with /{resource}/{name} + Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string +} + +type Source struct { + // Node is the FQDN of the node originating the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node originating the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` + + // Tailscale-specific fields: + // NodeTags is the list of tags on the node originating the connection (if any). + NodeTags []string `json:"nodeTags,omitempty"` + + // NodeUserID is the user ID of the node originating the connection (if not tagged). + NodeUserID tailcfg.UserID `json:"nodeUserID,omitempty"` // if not tagged + + // NodeUser is the LoginName of the node originating the connection (if not tagged). + NodeUser string `json:"nodeUser,omitempty"` +} + +// Request holds information about a request. +type Request struct { + Method string `json:"method"` + Path string `json:"path"` + Body []byte `json:"body"` + QueryParameters url.Values `json:"queryParameters"` +} diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 545bf06bd5984..2208522168dec 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -62,7 +62,6 @@ type CastHeader struct { ConnectionID string `json:"connectionID"` // Fields that are only set for Kubernetes API server proxy session recordings: - Kubernetes *Kubernetes `json:"kubernetes,omitempty"` } diff --git a/shell.nix b/shell.nix index 1891111b2d5f6..ec345998afe30 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-jsmQ0S1Uh1cU/kr0onYLJY9VYcFx297QZjQALM3wX10= +# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= From 0586d5d40d0f3804a94a0a074b539fa81e547118 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 8 Oct 2025 15:15:42 +0100 Subject: [PATCH 0542/1093] k8s-operator/sessionrecording: gives the connection to the recorder from the hijacker a dedicated context (#17403) The hijacker on k8s-proxy's reverse proxy is used to stream recordings to tsrecorder as they pass through the proxy to the kubernetes api server. The connection to the recorder was using the client's (e.g., kubectl) context, rather than a dedicated one. This was causing the recording stream to get cut off in scenarios where the client cancelled the context before streaming could be completed. By using a dedicated context, we can continue streaming even if the client cancels the context (for example if the client request completes). Fixes #17404 Signed-off-by: chaosinthecrd --- k8s-operator/sessionrecording/hijacker.go | 13 +++++++++++-- k8s-operator/sessionrecording/hijacker_test.go | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index ebd77641b9136..2d6c94710e866 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -122,7 +122,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, fmt.Errorf("error hijacking connection: %w", err) } - conn, err := h.setUpRecording(h.req.Context(), reqConn) + conn, err := h.setUpRecording(reqConn) if err != nil { return nil, nil, fmt.Errorf("error setting up session recording: %w", err) } @@ -133,7 +133,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { // spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording // logic. If connecting to the recorder fails or an error is received during the // session and spdyHijacker.failOpen is false, connection will be closed. -func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { +func (h *Hijacker) setUpRecording(conn net.Conn) (_ net.Conn, retErr error) { const ( // https://docs.asciinema.org/manual/asciicast/v2/ asciicastv2 = 2 @@ -147,6 +147,14 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, errChan <-chan error ) h.log.Infof("kubectl %s session will be recorded, recorders: %v, fail open policy: %t", h.sessionType, h.addrs, h.failOpen) + // NOTE: (ChaosInTheCRD) we want to use a dedicated context here, rather than the context from the request, + // otherwise the context can be cancelled by the client (kubectl) while we are still streaming to tsrecorder. + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() qp := h.req.URL.Query() container := strings.Join(qp[containerKey], "") var recorderAddr net.Addr @@ -213,6 +221,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } go func() { + defer cancel() var err error select { case <-ctx.Done(): diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index cac6f55c7c7d7..fb45820a71b86 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -95,7 +95,7 @@ func Test_Hijacker(t *testing.T) { proto: tt.proto, } ctx := context.Background() - _, err := h.setUpRecording(ctx, tc) + _, err := h.setUpRecording(tc) if (err != nil) != tt.wantsSetupErr { t.Errorf("spdyHijacker.setupRecording() error = %v, wantErr %v", err, tt.wantsSetupErr) return From 2d1014ead197a25350ab6e45efeaab3077244776 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Wed, 8 Oct 2025 15:34:50 +0100 Subject: [PATCH 0543/1093] ipn/ipnlocal: fix data race on captiveCtx in enterStateLockedOnEntry (#17495) Updates #17491 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 6f991ffae945a..e04ef9e6c02f7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5573,8 +5573,9 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // can be shut down if we transition away from Running. if buildfeatures.HasCaptivePortal { if b.captiveCancel == nil { - b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, b.captiveCtx) }) + captiveCtx, captiveCancel := context.WithCancel(b.ctx) + b.captiveCtx, b.captiveCancel = captiveCtx, captiveCancel + b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, captiveCtx) }) } } } else if oldState == ipn.Running { From 2a3d67e9b78a7f8d9a2f20ebcc8658f409fe4d1a Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 16:50:34 -0700 Subject: [PATCH 0544/1093] wgengine: use eventbus.SubscribeFunc in userspaceEngine Updates #15160 Updates #17487 Change-Id: Id852098c4f9c2fdeab9151b0b8c14dceff73b99d Signed-off-by: M. J. Fromberger --- wgengine/userspace.go | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index b8a136da78675..fa2379288d0ee 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -94,9 +94,8 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. - // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - eventSubs eventbus.Monitor + eventBus *eventbus.Bus + eventClient *eventbus.Client logf logger.Logf wgLogger *wglog.Logger // a wireguard-go logging wrapper @@ -539,34 +538,18 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } - cli := e.eventBus.Client("userspaceEngine") - e.eventSubs = cli.Monitor(e.consumeEventbusTopics(cli)) + ec := e.eventBus.Client("userspaceEngine") + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { + if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { + f() + } + e.linkChange(&cd) + }) + e.eventClient = ec e.logf("Engine created.") return e, nil } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (e *userspaceEngine) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { - changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](cli) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case changeDelta := <-changeDeltaSub.Events(): - if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { - f() - } - e.linkChange(&changeDelta) - } - } - } -} - // echoRespondToAll is an inbound post-filter responding to all echo requests. func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { if p.IsEchoRequest() { @@ -1257,7 +1240,7 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { - e.eventSubs.Close() + e.eventClient.Close() e.mu.Lock() if e.closing { e.mu.Unlock() From 583373057741016248bc0ce21adab2e48b1b7391 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 16:55:07 -0700 Subject: [PATCH 0545/1093] wgengine/router: use eventbus.SubscribeFunc in linuxRouter Updates #15160 Updates #17487 Change-Id: Ib798e2321e55a078c8bd37f366fe4e73054e4520 Signed-off-by: M. J. Fromberger --- wgengine/router/osrouter/router_linux.go | 40 +++++++----------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 835a9050f9565..58bd0513ab768 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -60,7 +60,7 @@ type linuxRouter struct { tunname string netMon *netmon.Monitor health *health.Tracker - eventSubs eventbus.Monitor + eventClient *eventbus.Client rulesAddedPub *eventbus.Publisher[AddIPRules] unregNetMon func() @@ -120,7 +120,16 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon } ec := bus.Client("router-linux") r.rulesAddedPub = eventbus.Publish[AddIPRules](ec) - r.eventSubs = ec.Monitor(r.consumeEventbusTopics(ec)) + eventbus.SubscribeFunc(ec, func(rs netmon.RuleDeleted) { + r.onIPRuleDeleted(rs.Table, rs.Priority) + }) + eventbus.SubscribeFunc(ec, func(pu router.PortUpdate) { + r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) + if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { + r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) + } + }) + r.eventClient = ec if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) @@ -164,31 +173,6 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon return r, nil } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (r *linuxRouter) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { - ruleDeletedSub := eventbus.Subscribe[netmon.RuleDeleted](ec) - portUpdateSub := eventbus.Subscribe[router.PortUpdate](ec) - return func(ec *eventbus.Client) { - for { - select { - case <-ec.Done(): - return - case rs := <-ruleDeletedSub.Events(): - r.onIPRuleDeleted(rs.Table, rs.Priority) - case pu := <-portUpdateSub.Events(): - r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) - if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { - r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) - } - } - } - } -} - // ipCmdSupportsFwmask returns true if the system 'ip' binary supports using a // fwmark stanza with a mask specified. To our knowledge, everything except busybox // pre-1.33 supports this. @@ -385,7 +369,7 @@ func (r *linuxRouter) Close() error { if r.unregNetMon != nil { r.unregNetMon() } - r.eventSubs.Close() + r.eventClient.Close() if err := r.downInterface(); err != nil { return err } From 241ea1c98bdfc6e28497340aa57ff46b7604ed68 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 17:03:39 -0700 Subject: [PATCH 0546/1093] wgengine/magicsock: use eventbus.SubscribeFunc in Conn Updates #15160 Updates #17487 Change-Id: Ic9eb8d82b21d9dc38cb3c681b87101dfbc95af16 Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 71 ++++++++++----------------------- 1 file changed, 21 insertions(+), 50 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index c7d07c27708f7..492dff2ce39fb 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -156,7 +156,7 @@ type Conn struct { // struct. Initialized once at construction, then constant. eventBus *eventbus.Bus - eventSubs eventbus.Monitor + eventClient *eventbus.Client logf logger.Logf epFunc func([]tailcfg.Endpoint) derpActiveFunc func() @@ -625,43 +625,6 @@ func newConn(logf logger.Logf) *Conn { return c } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (c *Conn) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - pmSub := eventbus.Subscribe[portmappertype.Mapping](cli) - filterSub := eventbus.Subscribe[FilterUpdate](cli) - nodeViewsSub := eventbus.Subscribe[NodeViewsUpdate](cli) - nodeMutsSub := eventbus.Subscribe[NodeMutationsUpdate](cli) - syncSub := eventbus.Subscribe[syncPoint](cli) - allocRelayEndpointSub := eventbus.Subscribe[UDPRelayAllocResp](cli) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case <-pmSub.Events(): - c.onPortMapChanged() - case filterUpdate := <-filterSub.Events(): - c.onFilterUpdate(filterUpdate) - case nodeViews := <-nodeViewsSub.Events(): - c.onNodeViewsUpdate(nodeViews) - case nodeMuts := <-nodeMutsSub.Events(): - c.onNodeMutationsUpdate(nodeMuts) - case syncPoint := <-syncSub.Events(): - c.dlogf("magicsock: received sync point after reconfig") - syncPoint.Signal() - case allocResp := <-allocRelayEndpointSub.Events(): - c.onUDPRelayAllocResp(allocResp) - } - } - } -} - func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { c.mu.Lock() defer c.mu.Unlock() @@ -726,11 +689,20 @@ func NewConn(opts Options) (*Conn, error) { // Set up publishers and subscribers. Subscribe calls must return before // NewConn otherwise published events can be missed. - cli := c.eventBus.Client("magicsock.Conn") - c.syncPub = eventbus.Publish[syncPoint](cli) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](cli) - c.portUpdatePub = eventbus.Publish[router.PortUpdate](cli) - c.eventSubs = cli.Monitor(c.consumeEventbusTopics(cli)) + ec := c.eventBus.Client("magicsock.Conn") + c.eventClient = ec + c.syncPub = eventbus.Publish[syncPoint](ec) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec) + c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec) + eventbus.SubscribeFunc(ec, c.onPortMapChanged) + eventbus.SubscribeFunc(ec, c.onFilterUpdate) + eventbus.SubscribeFunc(ec, c.onNodeViewsUpdate) + eventbus.SubscribeFunc(ec, c.onNodeMutationsUpdate) + eventbus.SubscribeFunc(ec, func(sp syncPoint) { + c.dlogf("magicsock: received sync point after reconfig") + sp.Signal() + }) + eventbus.SubscribeFunc(ec, c.onUDPRelayAllocResp) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) c.donec = c.connCtx.Done() @@ -3307,13 +3279,12 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { - // Close the [eventbus.Client] and wait for c.consumeEventbusTopics to + // Close the [eventbus.Client] to wait for subscribers to // return before acquiring c.mu: - // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can - // deadlock with c.Close(). - // 2. Conn.consumeEventbusTopics event handlers may not guard against - // undesirable post/in-progress Conn.Close() behaviors. - c.eventSubs.Close() + // 1. Event handlers also acquire c.mu, they can deadlock with c.Close(). + // 2. Event handlers may not guard against undesirable post/in-progress + // Conn.Close() behaviors. + c.eventClient.Close() c.mu.Lock() defer c.mu.Unlock() @@ -3410,7 +3381,7 @@ func (c *Conn) shouldDoPeriodicReSTUNLocked() bool { return true } -func (c *Conn) onPortMapChanged() { c.ReSTUN("portmap-changed") } +func (c *Conn) onPortMapChanged(portmappertype.Mapping) { c.ReSTUN("portmap-changed") } // ReSTUN triggers an address discovery. // The provided why string is for debug logging only. From 109cb50d5fd50127652349abe997347bfad52c32 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 7 Oct 2025 17:10:53 -0700 Subject: [PATCH 0547/1093] ipn/ipnlocal: use eventbus.SubscribeFunc in expiryManager Updates #15160 Updates #17487 Change-Id: I8721e3ac1af505630edca7c5cb50695b0aad832a Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/expiry.go | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 849e28610d33e..8ea63d21a4fb0 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -43,7 +43,7 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock - eventSubs eventbus.Monitor + eventClient *eventbus.Client } func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { @@ -53,30 +53,13 @@ func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { clock: tstime.StdClock{}, } - cli := bus.Client("ipnlocal.expiryManager") - em.eventSubs = cli.Monitor(em.consumeEventbusTopics(cli)) + em.eventClient = bus.Client("ipnlocal.expiryManager") + eventbus.SubscribeFunc(em.eventClient, func(ct controlclient.ControlTime) { + em.onControlTime(ct.Value) + }) return em } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (em *expiryManager) consumeEventbusTopics(cli *eventbus.Client) func(*eventbus.Client) { - controlTimeSub := eventbus.Subscribe[controlclient.ControlTime](cli) - return func(cli *eventbus.Client) { - for { - select { - case <-cli.Done(): - return - case time := <-controlTimeSub.Events(): - em.onControlTime(time.Value) - } - } - } -} - // onControlTime is called whenever we receive a new timestamp from the control // server to store the delta. func (em *expiryManager) onControlTime(t time.Time) { @@ -245,7 +228,7 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } -func (em *expiryManager) close() { em.eventSubs.Close() } +func (em *expiryManager) close() { em.eventClient.Close() } // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded From 9556a0c6da5b5e8186477711c2003a07e5831fda Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 07:50:56 -0700 Subject: [PATCH 0548/1093] control/ts2021: fix data race during concurrent Close and conn ending Fixes tailscale/corp#33125 Change-Id: I9911f5059d5ebe42ecf7db9becb2326cca240765 Signed-off-by: Brad Fitzpatrick --- control/ts2021/client.go | 1 + 1 file changed, 1 insertion(+) diff --git a/control/ts2021/client.go b/control/ts2021/client.go index e0b82b89c9a6e..ca10b1d1b5bc6 100644 --- a/control/ts2021/client.go +++ b/control/ts2021/client.go @@ -180,6 +180,7 @@ func (nc *Client) Close() error { nc.mu.Lock() live := nc.connPool nc.closed = true + nc.connPool = nil // stop noteConnClosed from mutating it as we loop over it (in live) below nc.mu.Unlock() for _, c := range live { From 57bd875856652e1cc6a6c2ab63ee252dfd6b4980 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 8 Oct 2025 11:36:38 -0400 Subject: [PATCH 0549/1093] control/controlclient: add missing comment (#17498) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #cleanup Signed-off-by: Claus Lensbøl --- control/controlclient/direct.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 5f26e2ba13760..61886482d8ef2 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -139,7 +139,7 @@ type Options struct { Dialer *tsdial.Dialer // non-nil C2NHandler http.Handler // or nil ControlKnobs *controlknobs.Knobs // or nil to ignore - Bus *eventbus.Bus + Bus *eventbus.Bus // non-nil, for setting up publishers // Observer is called when there's a change in status to report // from the control client. From 9a72513fa49b98c906b6d3e1935a12bffd3f53a4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 07:38:10 -0700 Subject: [PATCH 0550/1093] go.toolchain.rev: bump Go to 1.25.2 Updates tailscale/go#135 Change-Id: I89cfb49b998b2fd0264f8d5f4a61af839cd06626 Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 1 + cmd/k8s-operator/depaware.txt | 2 +- cmd/stund/depaware.txt | 1 + cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- go.mod | 2 +- go.toolchain.rev | 2 +- tsnet/depaware.txt | 2 +- 11 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 278d54b1fd6d9..2fa1fed45dd90 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -341,6 +341,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index da43ac1772629..d4fdb87fc5766 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1119,7 +1119,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 5eadfc0d15bd5..8cd2e49beb052 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -237,6 +237,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b0b4359e48de3..8c2fb0e9221eb 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -409,7 +409,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 30974287c7022..fe50dface5e57 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -355,6 +355,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ internal/runtime/syscall from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 32c84d7440d23..a4999825e38b3 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -389,6 +389,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ internal/runtime/syscall from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 60bf623e24bc9..c7d571f1e016e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -680,7 +680,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 0ae8761e5b297..894b4a07821b1 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -510,7 +510,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/go.mod b/go.mod index 965a447b95886..0c6d33fa09fe0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.1 +go 1.25.2 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 1fd4f3df25747..d5de7955850fd 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -aa85d1541af0921f830f053f29d91971fa5838f6 +a80a86e575c5b7b23b78540e947335d22f74d274 diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 339d188776252..d602c7b2f4733 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -503,7 +503,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ LA internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ From 4543ea5c8a2f9c9e45ddc2beb4d0635bd99cd079 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 8 Oct 2025 09:53:32 -0700 Subject: [PATCH 0551/1093] wgengine/magicsock: start peer relay path discovery sooner (#17485) This commit also shuffles the hasPeerRelayServers atomic load to happen sooner, reducing the cost for clients with no peer relay servers. Updates tailscale/corp#33099 Signed-off-by: Jordan Whited --- wgengine/magicsock/endpoint.go | 38 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index f4c8b14694058..7deafb7528ca2 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -879,14 +879,6 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { // discoverUDPRelayPathsLocked starts UDP relay path discovery. func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { - if !de.c.hasPeerRelayServers.Load() { - // Changes in this value between its access and the logic following - // are fine, we will eventually do the "right" thing during future path - // discovery. The worst case is we suppress path discovery for the - // current cycle, or we unnecessarily call into [relayManager] and do - // some wasted work. - return - } de.lastUDPRelayPathDiscovery = now lastBest := de.bestAddr lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) @@ -899,6 +891,14 @@ func (de *endpoint) wantUDPRelayPathDiscoveryLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } + if !de.c.hasPeerRelayServers.Load() { + // Changes in this value between its access and a call to + // [endpoint.discoverUDPRelayPathsLocked] are fine, we will eventually + // do the "right" thing during future path discovery. The worst case is + // we suppress path discovery for the current cycle, or we unnecessarily + // call into [relayManager] and do some wasted work. + return false + } if !de.relayCapable { return false } @@ -1013,14 +1013,18 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst // order to also try all candidate direct paths. fallthrough default: - // Ping all candidate direct paths. This work overlaps with what - // [de.heartbeat] will periodically fire when it calls - // [de.sendDiscoPingsLocked], but a user-initiated [pingCLI] is a - // "do it now" operation that should not be subject to + // Ping all candidate direct paths and start peer relay path discovery, + // if appropriate. This work overlaps with what [de.heartbeat] will + // periodically fire when it calls [de.sendDiscoPingsLocked] and + // [de.discoveryUDPRelayPathsLocked], but a user-initiated [pingCLI] is + // a "do it now" operation that should not be subject to // [heartbeatInterval] tick or [discoPingInterval] rate-limiting. for ep := range de.endpointState { de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } } @@ -1046,14 +1050,10 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } else if !udpAddr.isDirect() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } - // TODO(jwhited): consider triggering UDP relay path discovery here under - // certain conditions. We currently only trigger it in heartbeat(), which - // is both good and bad. It's good because the first heartbeat() tick is 3s - // after the first packet, which gives us time to discover a UDP direct - // path and potentially avoid what would be wasted UDP relay path discovery - // work. It's bad because we might not discover a UDP direct path, and we - // incur a 3s delay before we try to discover a UDP relay path. de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now de.mu.Unlock() From 06f12186d9f4672ac0a0a493e29a260ca47afda6 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 17:17:52 +0100 Subject: [PATCH 0552/1093] tstest/integration: test `tailscale up` when device approval is required This patch extends the integration tests for `tailscale up` to include tailnets where new devices need to be approved. It doesn't change the CLI, because it's mostly working correctly already -- these tests are just to prevent future regressions. I've added support for `MachineAuthorized` to mock control, and I've refactored `TestOneNodeUpAuth` to be more flexible. It now takes a sequence of steps to run and asserts whether we got a login URL and/or machine approval URL after each step. Updates tailscale/corp#31476 Updates #17361 Signed-off-by: Alex Chan --- tstest/integration/integration.go | 26 +- tstest/integration/integration_test.go | 267 ++++++++++++------ tstest/integration/testcontrol/testcontrol.go | 34 ++- 3 files changed, 224 insertions(+), 103 deletions(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 3788f61495a08..374dffebe7734 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -1099,20 +1099,40 @@ func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { type authURLParserWriter struct { buf bytes.Buffer - fn func(urlStr string) error + // Handle login URLs, and count how many times they were seen + authURLFn func(urlStr string) error + // Handle machine approval URLs, and count how many times they were seen. + deviceApprovalURLFn func(urlStr string) error } +// Note: auth URLs from testcontrol look slightly different to real auth URLs, +// e.g. http://127.0.0.1:60456/auth/96af2ff7e04ae1499a9a var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) +// Looks for any device approval URL, which is any URL ending with `/admin` +// e.g. http://127.0.0.1:60456/admin +var deviceApprovalURLRx = regexp.MustCompile(`(https?://\S+/admin)[^\S]`) + func (w *authURLParserWriter) Write(p []byte) (n int, err error) { n, err = w.buf.Write(p) + + defer w.buf.Reset() // so it's not matched again + m := authURLRx.FindSubmatch(w.buf.Bytes()) if m != nil { urlStr := string(m[1]) - w.buf.Reset() // so it's not matched again - if err := w.fn(urlStr); err != nil { + if err := w.authURLFn(urlStr); err != nil { return 0, err } } + + m = deviceApprovalURLRx.FindSubmatch(w.buf.Bytes()) + if m != nil && w.deviceApprovalURLFn != nil { + urlStr := string(m[1]) + if err := w.deviceApprovalURLFn(urlStr); err != nil { + return 0, err + } + } + return n, err } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index f7c133f5c5871..46b5c4fc7af2d 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -268,7 +268,65 @@ func TestStateSavedOnStart(t *testing.T) { d1.MustCleanShutdown(t) } +// This handler receives auth URLs, and logs into control. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple login URLs. +func completeLogin(t *testing.T, control *testcontrol.Server, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + if control.CompleteAuth(urlStr) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple auth URLs") + t.Error(err) + return err + } + t.Logf("completed login to %s", urlStr) + return nil + } else { + err := fmt.Errorf("failed to complete initial login to %q", urlStr) + t.Fatal(err) + return err + } + } +} + +// This handler receives device approval URLs, and approves the device. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple device approval URLs. +func completeDeviceApproval(t *testing.T, node *TestNode, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + control := node.env.Control + nodeKey := node.MustStatus().Self.PublicKey + t.Logf("saw device approval URL %q", urlStr) + if control.CompleteDeviceApproval(&nodeKey) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple device approval URLs") + t.Error(err) + return err + } + t.Log("completed device approval") + return nil + } else { + err := errors.New("failed to complete device approval") + t.Fatal(err) + return err + } + } +} + func TestOneNodeUpAuth(t *testing.T) { + type step struct { + args []string + // + // Do we expect to log in again with a new /auth/ URL? + wantAuthURL bool + // + // Do we expect to need a device approval URL? + wantDeviceApprovalURL bool + } + for _, tt := range []struct { name string args []string @@ -276,65 +334,112 @@ func TestOneNodeUpAuth(t *testing.T) { // What auth key should we use for control? authKey string // - // Is tailscaled already logged in before we run this `up` command? - alreadyLoggedIn bool + // Do we require device approval in the tailnet? + requireDeviceApproval bool // - // Do we need to log in again with a new /auth/ URL? - needsNewAuthURL bool + // What CLI commands should we run in this test? + steps []step }{ { - name: "up", - args: []string{"up"}, - needsNewAuthURL: true, + name: "up", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-machine-auth", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth", + steps: []step{ + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + }, }, { - name: "up-with-force-reauth", - args: []string{"up", "--force-reauth"}, - needsNewAuthURL: true, + name: "up-with-auth-key-with-machine-auth", + authKey: "opensesame", + steps: []step{ + { + args: []string{"up", "--auth-key=opensesame"}, + wantAuthURL: false, + wantDeviceApprovalURL: true, + }, + }, + requireDeviceApproval: true, }, { - name: "up-with-auth-key", - args: []string{"up", "--auth-key=opensesame"}, - authKey: "opensesame", - needsNewAuthURL: false, + name: "up-with-force-reauth-and-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, }, { - name: "up-with-force-reauth-and-auth-key", - args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, - authKey: "opensesame", - needsNewAuthURL: false, + name: "up-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up"}, wantAuthURL: false}, + }, }, { - name: "up-after-login", - args: []string{"up"}, - alreadyLoggedIn: true, - needsNewAuthURL: false, + name: "up-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up"}, wantAuthURL: false, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, }, { - name: "up-with-force-reauth-after-login", - args: []string{"up", "--force-reauth"}, - alreadyLoggedIn: true, - needsNewAuthURL: true, + name: "up-with-force-reauth-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, }, { - name: "up-with-auth-key-after-login", - args: []string{"up", "--auth-key=opensesame"}, - authKey: "opensesame", - alreadyLoggedIn: true, - needsNewAuthURL: false, + name: "up-with-force-reauth-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, }, { - name: "up-with-force-reauth-and-auth-key-after-login", - args: []string{"up", "--force-reauth", "--auth-key=opensesame"}, - authKey: "opensesame", - alreadyLoggedIn: true, - needsNewAuthURL: false, + name: "up-with-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-with-force-reauth-and-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, }, } { tstest.Shard(t) for _, useSeamlessKeyRenewal := range []bool{true, false} { - t.Run(fmt.Sprintf("%s-seamless-%t", tt.name, useSeamlessKeyRenewal), func(t *testing.T) { + name := tt.name + if useSeamlessKeyRenewal { + name += "-with-seamless" + } + t.Run(name, func(t *testing.T) { tstest.Parallel(t) env := NewTestEnv(t, ConfigureControl( @@ -345,6 +450,10 @@ func TestOneNodeUpAuth(t *testing.T) { control.RequireAuth = true } + if tt.requireDeviceApproval { + control.RequireMachineAuth = true + } + control.AllNodesSameUser = true if useSeamlessKeyRenewal { @@ -359,69 +468,45 @@ func TestOneNodeUpAuth(t *testing.T) { d1 := n1.StartDaemon() defer d1.MustCleanShutdown(t) - cmdArgs := append(tt.args, "--login-server="+env.ControlURL()) - - // This handler looks for /auth/ URLs in the stdout from "tailscale up", - // and if it sees them, completes the auth process. - // - // It counts how many auth URLs it's seen. - var authCountAtomic atomic.Int32 - authURLHandler := &authURLParserWriter{fn: func(urlStr string) error { - t.Logf("saw auth URL %q", urlStr) - if env.Control.CompleteAuth(urlStr) { - if authCountAtomic.Add(1) > 1 { - err := errors.New("completed multiple auth URLs") - t.Error(err) - return err - } - t.Logf("completed login to %s", urlStr) - return nil - } else { - err := fmt.Errorf("Failed to complete initial login to %q", urlStr) - t.Fatal(err) - return err + for i, step := range tt.steps { + t.Logf("Running step %d", i) + cmdArgs := append(step.args, "--login-server="+env.ControlURL()) + + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + var deviceApprovalURLCount atomic.Int32 + + handler := &authURLParserWriter{ + authURLFn: completeLogin(t, env.Control, &authURLCount), + deviceApprovalURLFn: completeDeviceApproval(t, n1, &deviceApprovalURLCount), } - }} - - // If we should be logged in at the start of the test case, go ahead - // and run the login command. - // - // Otherwise, just wait for tailscaled to be listening. - if tt.alreadyLoggedIn { - t.Logf("Running initial login: %s", strings.Join(cmdArgs, " ")) + cmd := n1.Tailscale(cmdArgs...) - cmd.Stdout = authURLHandler + cmd.Stdout = handler + cmd.Stdout = handler cmd.Stderr = cmd.Stdout if err := cmd.Run(); err != nil { t.Fatalf("up: %v", err) } - authCountAtomic.Store(0) - n1.AwaitRunning() - } else { - n1.AwaitListening() - } - - st := n1.MustStatus() - t.Logf("Status: %s", st.BackendState) - t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) - cmd := n1.Tailscale(cmdArgs...) - cmd.Stdout = authURLHandler - cmd.Stderr = cmd.Stdout - - if err := cmd.Run(); err != nil { - t.Fatalf("up: %v", err) - } - t.Logf("Got IP: %v", n1.AwaitIP4()) + n1.AwaitRunning() - n1.AwaitRunning() + var wantAuthURLCount int32 + if step.wantAuthURL { + wantAuthURLCount = 1 + } + if n := authURLCount.Load(); n != wantAuthURLCount { + t.Errorf("Auth URLs completed = %d; want %d", n, wantAuthURLCount) + } - var expectedAuthUrls int32 - if tt.needsNewAuthURL { - expectedAuthUrls = 1 - } - if n := authCountAtomic.Load(); n != expectedAuthUrls { - t.Errorf("Auth URLs completed = %d; want %d", n, expectedAuthUrls) + var wantDeviceApprovalURLCount int32 + if step.wantDeviceApprovalURL { + wantDeviceApprovalURLCount = 1 + } + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } } }) } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index ac7804918f6cc..58ca956ce4024 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -50,14 +50,15 @@ const msgLimit = 1 << 20 // encrypted message length limit // Server is a control plane server. Its zero value is ready for use. // Everything is stored in-memory in one tailnet. type Server struct { - Logf logger.Logf // nil means to use the log package - DERPMap *tailcfg.DERPMap // nil means to use prod DERP map - RequireAuth bool - RequireAuthKey string // required authkey for all nodes - Verbose bool - DNSConfig *tailcfg.DNSConfig // nil means no DNS config - MagicDNSDomain string - C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func + Logf logger.Logf // nil means to use the log package + DERPMap *tailcfg.DERPMap // nil means to use prod DERP map + RequireAuth bool + RequireAuthKey string // required authkey for all nodes + RequireMachineAuth bool + Verbose bool + DNSConfig *tailcfg.DNSConfig // nil means no DNS config + MagicDNSDomain string + C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func // PeerRelayGrants, if true, inserts relay capabilities into the wildcard // grants rules. @@ -686,6 +687,21 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { return true } +func (s *Server) CompleteDeviceApproval(nodeKey *key.NodePublic) bool { + s.mu.Lock() + defer s.mu.Unlock() + + node, ok := s.nodes[*nodeKey] + if !ok { + return false + } + + sendUpdate(s.updates[node.ID], updateSelfChanged) + + node.MachineAuthorized = true + return true +} + func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.MachinePublic) { msg, err := io.ReadAll(io.LimitReader(r.Body, msgLimit)) r.Body.Close() @@ -761,7 +777,7 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. s.nodes = map[key.NodePublic]*tailcfg.Node{} } _, ok := s.nodes[nk] - machineAuthorized := true // TODO: add Server.RequireMachineAuth + machineAuthorized := !s.RequireMachineAuth if !ok { nodeID := len(s.nodes) + 1 From bb6bd465702d930af0a86acac1a38d1e9c669d97 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 18:36:52 +0100 Subject: [PATCH 0553/1093] tstest/integration: log all the output printed by `tailscale up` Updates tailscale/corp#31476 Updates #17361 Signed-off-by: Alex Chan --- tstest/integration/integration.go | 3 +++ tstest/integration/integration_test.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 374dffebe7734..6700205cf8f55 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -1098,6 +1098,7 @@ func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type authURLParserWriter struct { + t *testing.T buf bytes.Buffer // Handle login URLs, and count how many times they were seen authURLFn func(urlStr string) error @@ -1114,6 +1115,8 @@ var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) var deviceApprovalURLRx = regexp.MustCompile(`(https?://\S+/admin)[^\S]`) func (w *authURLParserWriter) Write(p []byte) (n int, err error) { + w.t.Helper() + w.t.Logf("received bytes: %s", string(p)) n, err = w.buf.Write(p) defer w.buf.Reset() // so it's not matched again diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 46b5c4fc7af2d..29a036cd60082 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -477,7 +477,7 @@ func TestOneNodeUpAuth(t *testing.T) { var authURLCount atomic.Int32 var deviceApprovalURLCount atomic.Int32 - handler := &authURLParserWriter{ + handler := &authURLParserWriter{t: t, authURLFn: completeLogin(t, env.Control, &authURLCount), deviceApprovalURLFn: completeDeviceApproval(t, n1, &deviceApprovalURLCount), } From b7fe1cea9f17a05d5076c17b95c967013aa1c3d6 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 6 Oct 2025 17:17:52 +0100 Subject: [PATCH 0554/1093] cmd/tailscale/cli: only print authURLs and device approval URLs once This patch fixes several issues related to printing login and device approval URLs, especially when `tailscale up` is interrupted: 1. Only print a login URL that will cause `tailscale up` to complete. Don't print expired URLs or URLs from previous login attempts. 2. Print the device approval URL if you run `tailscale up` after previously completing a login, but before approving the device. 3. Use the correct control URL for device approval if you run a bare `tailscale up` after previously completing a login, but before approving the device. 4. Don't print the device approval URL more than once (or at least, not consecutively). Updates tailscale/corp#31476 Updates #17361 ## How these fixes work This patch went through a lot of trial and error, and there may still be bugs! These notes capture the different scenarios and considerations as we wrote it, which are also captured by integration tests. 1. We were getting stale login URLs from the initial IPN state notification. When the IPN watcher was moved to before Start() in c011369, we mistakenly continued to request the initial state. This is only necessary if you start watching after you call Start(), because you may have missed some notifications. By getting the initial state before calling Start(), we'd get a stale login URL. If you clicked that URL, you could complete the login in the control server (if it wasn't expired), but your instance of `tailscale up` would hang, because it's listening for login updates from a different login URL. In this patch, we no longer request the initial state, and so we don't print a stale URL. 2. Once you skip the initial state from IPN, the following sequence: * Run `tailscale up` * Log into a tailnet with device approval * ^C after the device approval URL is printed, but without approving * Run `tailscale up` again means that nothing would ever be printed. `tailscale up` would send tailscaled the pref `WantRunning: true`, but that was already the case so nothing changes. You never get any IPN notifications, and in particular you never get a state change to `NeedsMachineAuth`. This means we'd never print the device approval URL. In this patch, we add a hard-coded rule that if you're doing a simple up (which won't trigger any other IPN notifications) and you start in the `NeedsMachineAuth` state, we print the device approval message without waiting for an IPN notification. 3. Consider the following sequence: * Run `tailscale up --login-server=` * Log into a tailnet with device approval * ^C after the device approval URL is printed, but without approving * Run `tailscale up` again We'd print the device approval URL for the default control server, rather than the real control server, because we were using the `prefs` from the CLI arguments (which are all the defaults) rather than the `curPrefs` (which contain the custom login server). In this patch, we use the `prefs` if the user has specified any settings (and other code will ensure this is a complete set of settings) or `curPrefs` if it's a simple `tailscale up`. 4. Consider the following sequence: you've logged in, but not completed device approval, and you run `down` and `up` in quick succession. * `up`: sees state=NeedsMachineAuth * `up`: sends `{wantRunning: true}`, prints out the device approval URL * `down`: changes state to Stopped * `up`: changes state to Starting * tailscaled: changes state to NeedsMachineAuth * `up`: gets an IPN notification with the state change, and prints a second device approval URL Either URL works, but this is annoying for the user. In this patch, we track whether the last printed URL was the device approval URL, and if so, we skip printing it a second time. Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 48 ++++- tstest/integration/integration_test.go | 181 +++++++++++++++++- tstest/integration/testcontrol/testcontrol.go | 10 +- 3 files changed, 226 insertions(+), 13 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 90c9c23af7c37..07e008aab69c7 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -357,6 +357,13 @@ func netfilterModeFromFlag(v string) (_ preftype.NetfilterMode, warning string, // It returns simpleUp if we're running a simple "tailscale up" to // transition to running from a previously-logged-in but down state, // without changing any settings. +// +// Note this can also mutate prefs to add implicit preferences for the +// user operator. +// +// TODO(alexc): the name of this function is confusing, and perhaps a +// sign that it's doing too much. Consider refactoring this so it's just +// telling the caller what to do next, but not changing anything itself. func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, justEditMP *ipn.MaskedPrefs, err error) { if !env.upArgs.reset { applyImplicitPrefs(prefs, curPrefs, env) @@ -497,6 +504,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs := curPrefs + if cmd == "up" { // "tailscale up" should not be able to change the // profile name. @@ -546,10 +555,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE // or we could miss IPN notifications. // // In particular, if we're doing a force-reauth, we could miss the - // notification with the auth URL we should print for the user. The - // initial state could contain the auth URL, but only if IPN is in the - // NeedsLogin state -- sometimes it's in Starting, and we don't get the URL. - watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) + // notification with the auth URL we should print for the user. + watcher, err := localClient.WatchIPNBus(watchCtx, 0) if err != nil { return err } @@ -591,6 +598,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs = prefs if upArgs.forceReauth || !st.HaveNodeKey { err := localClient.StartLoginInteractive(ctx) if err != nil { @@ -604,7 +612,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE go func() { var printed bool // whether we've yet printed anything to stdout or stderr - var lastURLPrinted string + lastURLPrinted := "" // If we're doing a force-reauth, we need to get two notifications: // @@ -617,6 +625,15 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE ipnIsRunning := false waitingForKeyChange := upArgs.forceReauth + // If we're doing a simple up (i.e. `tailscale up`, no flags) and + // the initial state is NeedsMachineAuth, then we never receive a + // state notification from ipn, so we print the device approval URL + // immediately. + if simpleUp && st.BackendState == ipn.NeedsMachineAuth.String() { + printed = true + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) + } + for { n, err := watcher.Next() if err != nil { @@ -629,11 +646,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } if s := n.State; s != nil && *s == ipn.NeedsMachineAuth { printed = true - if env.upArgs.json { - printUpDoneJSON(ipn.NeedsMachineAuth, "") - } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) - } + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) } if s := n.State; s != nil { ipnIsRunning = *s == ipn.Running @@ -737,6 +750,21 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } +func printDeviceApprovalInfo(printJson bool, prefs *ipn.Prefs, lastURLPrinted *string) { + if printJson { + printUpDoneJSON(ipn.NeedsMachineAuth, "") + } else { + deviceApprovalURL := prefs.AdminPageURL(policyclient.Get()) + + if lastURLPrinted != nil && deviceApprovalURL == *lastURLPrinted { + return + } + + *lastURLPrinted = deviceApprovalURL + errf("\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", deviceApprovalURL) + } +} + // upWorthWarning reports whether the health check message s is worth warning // about during "tailscale up". Many of the health checks are noisy or confusing // or very ephemeral and happen especially briefly at startup. diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 29a036cd60082..2e85bc8be2bb9 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -294,13 +294,14 @@ func completeLogin(t *testing.T, control *testcontrol.Server, counter *atomic.In // This handler receives device approval URLs, and approves the device. // // It counts how many URLs it sees, and will fail the test if it -// sees multiple device approval URLs. +// sees multiple device approval URLs, or if you try to approve a device +// with the wrong control server. func completeDeviceApproval(t *testing.T, node *TestNode, counter *atomic.Int32) func(string) error { return func(urlStr string) error { control := node.env.Control nodeKey := node.MustStatus().Self.PublicKey t.Logf("saw device approval URL %q", urlStr) - if control.CompleteDeviceApproval(&nodeKey) { + if control.CompleteDeviceApproval(node.env.ControlURL(), urlStr, &nodeKey) { if counter.Add(1) > 1 { err := errors.New("completed multiple device approval URLs") t.Error(err) @@ -513,6 +514,182 @@ func TestOneNodeUpAuth(t *testing.T) { } } +// Returns true if the error returned by [exec.Run] fails with a non-zero +// exit code, false otherwise. +func isNonZeroExitCode(err error) bool { + if err == nil { + return false + } + + exitError, ok := err.(*exec.ExitError) + if !ok { + return false + } + + return exitError.ExitCode() != 0 +} + +// If we interrupt `tailscale up` and then run it again, we should only +// print a single auth URL. +func TestOneNodeUpInterruptedAuth(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.AllNodesSameUser = true + }, + )) + + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + cmdArgs := []string{"up", "--login-server=" + env.ControlURL()} + + // The first time we run the command, we wait for an auth URL to be + // printed, and then we cancel the command -- equivalent to ^C. + // + // At this point, we've connected to control to get an auth URL, + // and printed it in the CLI, but not clicked it. + t.Logf("Running command for the first time: %s", strings.Join(cmdArgs, " ")) + cmd1 := n.Tailscale(cmdArgs...) + + // This handler watches for auth URLs in stdout, then cancels the + // running `tailscale up` CLI command. + cmd1.Stdout = &authURLParserWriter{t: t, authURLFn: func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + cmd1.Process.Kill() + return nil + }} + cmd1.Stderr = cmd1.Stdout + + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we didn't click the auth URL, we should still be in NeedsLogin. + n.AwaitBackendState("NeedsLogin") + + // The second time we run the command, we click the first auth URL we see + // and check that we log in correctly. + // + // In #17361, there was a bug where we'd print two auth URLs, and you could + // click either auth URL and log in to control, but logging in through the + // first URL would leave `tailscale up` hanging. + // + // Using `authURLHandler` ensures we only print the new, correct auth URL. + // + // If we print both URLs, it will throw an error because it only expects + // to log in with one auth URL. + // + // If we only print the stale auth URL, the test will timeout because + // `tailscale up` will never return. + t.Logf("Running command for the second time: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmdArgs...) + cmd2.Stdout = &authURLParserWriter{ + t: t, authURLFn: completeLogin(t, env.Control, &authURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + if urls := authURLCount.Load(); urls != 1 { + t.Errorf("Auth URLs completed = %d; want %d", urls, 1) + } + + n.AwaitRunning() +} + +// If we interrupt `tailscale up` and login successfully, but don't +// complete the device approval, we should see the device approval URL +// when we run `tailscale up` a second time. +func TestOneNodeUpInterruptedDeviceApproval(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.RequireMachineAuth = true + control.AllNodesSameUser = true + }, + )) + + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + // The first time we run the command, we: + // + // * set a custom login URL + // * wait for an auth URL to be printed + // * click it to complete the login process + // * wait for a device approval URL to be printed + // * cancel the command, equivalent to ^C + // + // At this point, we've logged in to control, but our node isn't + // approved to connect to the tailnet. + cmd1Args := []string{"up", "--login-server=" + env.ControlURL()} + t.Logf("Running command: %s", strings.Join(cmd1Args, " ")) + cmd1 := n.Tailscale(cmd1Args...) + + handler1 := &authURLParserWriter{t: t, + authURLFn: completeLogin(t, env.Control, &atomic.Int32{}), + deviceApprovalURLFn: func(urlStr string) error { + t.Logf("saw device approval URL %q", urlStr) + cmd1.Process.Kill() + return nil + }, + } + cmd1.Stdout = handler1 + cmd1.Stderr = cmd1.Stdout + + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we logged in but we didn't complete the device approval, we + // should be in state NeedsMachineAuth. + n.AwaitBackendState("NeedsMachineAuth") + + // The second time we run the command, we expect not to get an auth URL + // and go straight to the device approval URL. We don't need to pass the + // login server, because `tailscale up` should remember our control URL. + cmd2Args := []string{"up"} + t.Logf("Running command: %s", strings.Join(cmd2Args, " ")) + + var deviceApprovalURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmd2Args...) + cmd2.Stdout = &authURLParserWriter{t: t, + authURLFn: func(urlStr string) error { + t.Fatalf("got unexpected auth URL: %q", urlStr) + cmd2.Process.Kill() + return nil + }, + deviceApprovalURLFn: completeDeviceApproval(t, n, &deviceApprovalURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + wantDeviceApprovalURLCount := int32(1) + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } + + n.AwaitRunning() +} + func TestConfigFileAuthKey(t *testing.T) { tstest.SkipOnUnshardedCI(t) tstest.Shard(t) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 58ca956ce4024..f9a33705b7f56 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -687,7 +687,11 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { return true } -func (s *Server) CompleteDeviceApproval(nodeKey *key.NodePublic) bool { +// Complete the device approval for this node. +// +// This function returns false if the node does not exist, or you try to +// approve a device against a different control server. +func (s *Server) CompleteDeviceApproval(controlUrl string, urlStr string, nodeKey *key.NodePublic) bool { s.mu.Lock() defer s.mu.Unlock() @@ -696,6 +700,10 @@ func (s *Server) CompleteDeviceApproval(nodeKey *key.NodePublic) bool { return false } + if urlStr != controlUrl+"/admin" { + return false + } + sendUpdate(s.updates[node.ID], updateSelfChanged) node.MachineAuthorized = true From 7edb5b7d4394e322298b7c6f86ce73215224b5bc Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 8 Oct 2025 14:37:47 -0400 Subject: [PATCH 0555/1093] flake.nix: update Nix to use tailscale/go 1.25.2 (#17500) Update Nix flake to use go 1.25.2 Create the hash from the toolchain rev file automatically from update-flake.sh Updates tailscale/go#135 Signed-off-by: Mike O'Driscoll --- flake.nix | 6 +++--- go.toolchain.rev.sri | 1 + go.toolchain.version | 1 + pull-toolchain.sh | 6 +++++- update-flake.sh | 8 ++++++++ 5 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 go.toolchain.rev.sri create mode 100644 go.toolchain.version diff --git a/flake.nix b/flake.nix index 9481248f0596c..726757f7a76b7 100644 --- a/flake.nix +++ b/flake.nix @@ -46,9 +46,9 @@ systems, flake-compat, }: let - goVersion = "1.25.1"; + goVersion = nixpkgs.lib.fileContents ./go.toolchain.version; toolChainRev = nixpkgs.lib.fileContents ./go.toolchain.rev; - gitHash = "sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk="; + gitHash = nixpkgs.lib.fileContents ./go.toolchain.rev.sri; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { @@ -61,7 +61,7 @@ owner = "tailscale"; repo = "go"; rev = toolChainRev; - hash = gitHash; + sha256 = gitHash; }; }; }) diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri new file mode 100644 index 0000000000000..9cbf36b930e46 --- /dev/null +++ b/go.toolchain.rev.sri @@ -0,0 +1 @@ +sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk= diff --git a/go.toolchain.version b/go.toolchain.version new file mode 100644 index 0000000000000..61b813d5e6327 --- /dev/null +++ b/go.toolchain.version @@ -0,0 +1 @@ +1.25.2 diff --git a/pull-toolchain.sh b/pull-toolchain.sh index f5a19e7d75de1..eb8febf6bb32d 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -11,6 +11,10 @@ if [ "$upstream" != "$current" ]; then echo "$upstream" >go.toolchain.rev fi -if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev)" ]; then +./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version + +./update-flake.sh + +if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev go.toolchain.rev.sri go.toolchain.version)" ]; then echo "pull-toolchain.sh: changes imported. Use git commit to make them permanent." >&2 fi diff --git a/update-flake.sh b/update-flake.sh index 4561183b89f3f..c22572b860248 100755 --- a/update-flake.sh +++ b/update-flake.sh @@ -10,6 +10,14 @@ rm -rf "$OUT" ./tool/go run tailscale.com/cmd/nardump --sri "$OUT" >go.mod.sri rm -rf "$OUT" +GOOUT=$(mktemp -d -t gocross-XXXXXX) +GOREV=$(xargs < ./go.toolchain.rev) +TARBALL="$GOOUT/go-$GOREV.tar.gz" +curl -Ls -o "$TARBALL" "https://github.com/tailscale/go/archive/$GOREV.tar.gz" +tar -xzf "$TARBALL" -C "$GOOUT" +./tool/go run tailscale.com/cmd/nardump --sri "$GOOUT/go-$GOREV" > go.toolchain.rev.sri +rm -rf "$GOOUT" + # nix-direnv only watches the top-level nix file for changes. As a # result, when we change a referenced SRI file, we have to cause some # change to shell.nix and flake.nix as well, so that nix-direnv From f270c3158a3d568ffbe5387b3cf0cbed042b67d3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 09:27:41 -0700 Subject: [PATCH 0556/1093] net/dns, ipn/ipnlocal: fix regressions from change moving away from deephash I got sidetracked apparently and never finished writing this Clone code in 316afe7d02babc (#17448). (It really should use views instead.) And then I missed one of the users of "routerChanged" that was broken up into "routerChanged" vs "dnsChanged". This broke integration tests elsewhere. Fixes #17506 Change-Id: I533bf0fcf3da9ac6eb4a6cdef03b8df2c1fb4c8e Signed-off-by: Brad Fitzpatrick --- net/dns/config.go | 11 +++++- net/dns/config_test.go | 66 +++++++++++++++++++++++++++++++++ util/checkchange/checkchange.go | 2 +- wgengine/userspace.go | 22 +++++++++-- 4 files changed, 94 insertions(+), 7 deletions(-) create mode 100644 net/dns/config_test.go diff --git a/net/dns/config.go b/net/dns/config.go index 22caf6ef54909..6c170f19baaa1 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -7,6 +7,7 @@ package dns import ( "bufio" "fmt" + "maps" "net/netip" "reflect" "slices" @@ -190,15 +191,21 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { return true } +// Clone makes a shallow clone of c. +// +// The returned Config still references slices and maps from c. +// +// TODO(bradfitz): use cmd/{viewer,cloner} for these and make the +// caller use views instead. func (c *Config) Clone() *Config { if c == nil { return nil } return &Config{ DefaultResolvers: slices.Clone(c.DefaultResolvers), - Routes: make(map[dnsname.FQDN][]*dnstype.Resolver, len(c.Routes)), + Routes: maps.Clone(c.Routes), SearchDomains: slices.Clone(c.SearchDomains), - Hosts: make(map[dnsname.FQDN][]netip.Addr, len(c.Hosts)), + Hosts: maps.Clone(c.Hosts), OnlyIPv6: c.OnlyIPv6, } } diff --git a/net/dns/config_test.go b/net/dns/config_test.go new file mode 100644 index 0000000000000..684dea6bc60d2 --- /dev/null +++ b/net/dns/config_test.go @@ -0,0 +1,66 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package dns + +import ( + "net/netip" + "reflect" + "testing" + + "tailscale.com/types/dnstype" + "tailscale.com/util/dnsname" +) + +func TestConfigClone(t *testing.T) { + tests := []struct { + name string + conf *Config + }{ + { + name: "nil", + conf: nil, + }, + { + name: "empty", + conf: &Config{}, + }, + { + name: "full", + conf: &Config{ + DefaultResolvers: []*dnstype.Resolver{ + { + Addr: "abc", + BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + UseWithExitNode: true, + }, + }, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{ + "foo.bar.": { + { + Addr: "abc", + BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, + UseWithExitNode: true, + }, + }, + }, + SearchDomains: []dnsname.FQDN{"bar.baz."}, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "host.bar.": {netip.MustParseAddr("5.6.7.8")}, + }, + OnlyIPv6: true, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.conf.Clone() + if !reflect.DeepEqual(got, tt.conf) { + t.Error("Cloned result is not reflect.DeepEqual") + } + if !got.Equal(tt.conf) { + t.Error("Cloned result is not Equal") + } + }) + } +} diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go index 4d18730f16e0f..8ba64720d7e14 100644 --- a/util/checkchange/checkchange.go +++ b/util/checkchange/checkchange.go @@ -17,7 +17,7 @@ type EqualCloner[T any] interface { // // It only modifies *old if they are different. old must be non-nil. func Update[T EqualCloner[T]](old *T, new T) (changed bool) { - if new.Equal(*old) { + if (*old).Equal(new) { return false } *old = new.Clone() diff --git a/wgengine/userspace.go b/wgengine/userspace.go index fa2379288d0ee..9f42dae2a8676 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -965,8 +965,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter engineChanged := checkchange.Update(&e.lastEngineFull, cfg) - dnsChanged := checkchange.Update(&e.lastDNSConfig, dnsCfg) + dnsChanged := buildfeatures.HasDNS && checkchange.Update(&e.lastDNSConfig, dnsCfg) routerChanged := checkchange.Update(&e.lastRouter, routerCfg) + listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() if !engineChanged && !routerChanged && !dnsChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { @@ -987,7 +988,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // instead have ipnlocal populate a map of DNS IP => linkName and // put that in the *dns.Config instead, and plumb it down to the // dns.Manager. Maybe also with isLocalAddr above. - e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + if buildfeatures.HasDNS { + e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + } // See if any peers have changed disco keys, which means they've restarted. // If so, we need to update the wireguard-go/device.Device in two phases: @@ -1063,7 +1066,18 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, if err != nil { return err } + } + // We've historically re-set DNS even after just a router change. While + // refactoring in tailscale/tailscale#17448 and and + // tailscale/tailscale#17499, I'm erring on the side of keeping that + // historical quirk for now (2025-10-08), lest it's load bearing in + // unexpected ways + // + // TODO(bradfitz): try to do the "configuring DNS" part below only if + // dnsChanged, not routerChanged. The "resolver.ShouldUseRoutes" part + // probably needs to keep happening for both. + if buildfeatures.HasDNS && (routerChanged || dnsChanged) { if resolver.ShouldUseRoutes(e.controlKnobs) { e.logf("wgengine: Reconfig: user dialer") e.dialer.SetRoutes(routerCfg.Routes, routerCfg.LocalRoutes) @@ -1075,7 +1089,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // DNS managers refuse to apply settings if the device has no // assigned address. e.logf("wgengine: Reconfig: configuring DNS") - err = e.dns.Set(*dnsCfg) + err := e.dns.Set(*dnsCfg) e.health.SetDNSHealth(err) if err != nil { return err @@ -1097,7 +1111,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, } } - if isSubnetRouterChanged && e.birdClient != nil { + if buildfeatures.HasBird && isSubnetRouterChanged && e.birdClient != nil { e.logf("wgengine: Reconfig: configuring BIRD") var err error if isSubnetRouter { From 91239327100db0bc588530d5a44172add767f195 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 18:16:15 -0700 Subject: [PATCH 0557/1093] net/dns, wgengine: use viewer/cloner for Config Per earlier TODO. Updates #17506 Change-Id: I21fe851c4bcced98fcee844cb428ca9c2f6b0588 Signed-off-by: Brad Fitzpatrick --- net/dns/config.go | 22 +------ net/dns/config_test.go | 66 -------------------- net/dns/dns_clone.go | 74 ++++++++++++++++++++++ net/dns/dns_view.go | 138 +++++++++++++++++++++++++++++++++++++++++ wgengine/userspace.go | 13 ++-- 5 files changed, 222 insertions(+), 91 deletions(-) delete mode 100644 net/dns/config_test.go create mode 100644 net/dns/dns_clone.go create mode 100644 net/dns/dns_view.go diff --git a/net/dns/config.go b/net/dns/config.go index 6c170f19baaa1..2425b304dffd8 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:generate go run tailscale.com/cmd/viewer --type=Config --clonefunc + // Package dns contains code to configure and manage DNS settings. package dns import ( "bufio" "fmt" - "maps" "net/netip" "reflect" "slices" @@ -191,25 +192,6 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { return true } -// Clone makes a shallow clone of c. -// -// The returned Config still references slices and maps from c. -// -// TODO(bradfitz): use cmd/{viewer,cloner} for these and make the -// caller use views instead. -func (c *Config) Clone() *Config { - if c == nil { - return nil - } - return &Config{ - DefaultResolvers: slices.Clone(c.DefaultResolvers), - Routes: maps.Clone(c.Routes), - SearchDomains: slices.Clone(c.SearchDomains), - Hosts: maps.Clone(c.Hosts), - OnlyIPv6: c.OnlyIPv6, - } -} - func (c *Config) Equal(o *Config) bool { if c == nil || o == nil { return c == o diff --git a/net/dns/config_test.go b/net/dns/config_test.go deleted file mode 100644 index 684dea6bc60d2..0000000000000 --- a/net/dns/config_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package dns - -import ( - "net/netip" - "reflect" - "testing" - - "tailscale.com/types/dnstype" - "tailscale.com/util/dnsname" -) - -func TestConfigClone(t *testing.T) { - tests := []struct { - name string - conf *Config - }{ - { - name: "nil", - conf: nil, - }, - { - name: "empty", - conf: &Config{}, - }, - { - name: "full", - conf: &Config{ - DefaultResolvers: []*dnstype.Resolver{ - { - Addr: "abc", - BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, - UseWithExitNode: true, - }, - }, - Routes: map[dnsname.FQDN][]*dnstype.Resolver{ - "foo.bar.": { - { - Addr: "abc", - BootstrapResolution: []netip.Addr{netip.MustParseAddr("1.2.3.4")}, - UseWithExitNode: true, - }, - }, - }, - SearchDomains: []dnsname.FQDN{"bar.baz."}, - Hosts: map[dnsname.FQDN][]netip.Addr{ - "host.bar.": {netip.MustParseAddr("5.6.7.8")}, - }, - OnlyIPv6: true, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := tt.conf.Clone() - if !reflect.DeepEqual(got, tt.conf) { - t.Error("Cloned result is not reflect.DeepEqual") - } - if !got.Equal(tt.conf) { - t.Error("Cloned result is not Equal") - } - }) - } -} diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go new file mode 100644 index 0000000000000..807bfce23df8b --- /dev/null +++ b/net/dns/dns_clone.go @@ -0,0 +1,74 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package dns + +import ( + "net/netip" + + "tailscale.com/types/dnstype" + "tailscale.com/util/dnsname" +) + +// Clone makes a deep copy of Config. +// The result aliases no memory with the original. +func (src *Config) Clone() *Config { + if src == nil { + return nil + } + dst := new(Config) + *dst = *src + if src.DefaultResolvers != nil { + dst.DefaultResolvers = make([]*dnstype.Resolver, len(src.DefaultResolvers)) + for i := range dst.DefaultResolvers { + if src.DefaultResolvers[i] == nil { + dst.DefaultResolvers[i] = nil + } else { + dst.DefaultResolvers[i] = src.DefaultResolvers[i].Clone() + } + } + } + if dst.Routes != nil { + dst.Routes = map[dnsname.FQDN][]*dnstype.Resolver{} + for k := range src.Routes { + dst.Routes[k] = append([]*dnstype.Resolver{}, src.Routes[k]...) + } + } + dst.SearchDomains = append(src.SearchDomains[:0:0], src.SearchDomains...) + if dst.Hosts != nil { + dst.Hosts = map[dnsname.FQDN][]netip.Addr{} + for k := range src.Hosts { + dst.Hosts[k] = append([]netip.Addr{}, src.Hosts[k]...) + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigCloneNeedsRegeneration = Config(struct { + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + OnlyIPv6 bool +}{}) + +// Clone duplicates src into dst and reports whether it succeeded. +// To succeed, must be of types <*T, *T> or <*T, **T>, +// where T is one of Config. +func Clone(dst, src any) bool { + switch src := src.(type) { + case *Config: + switch dst := dst.(type) { + case *Config: + *dst = *src.Clone() + return true + case **Config: + *dst = src.Clone() + return true + } + } + return false +} diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go new file mode 100644 index 0000000000000..c7ce376cba8db --- /dev/null +++ b/net/dns/dns_view.go @@ -0,0 +1,138 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale/cmd/viewer; DO NOT EDIT. + +package dns + +import ( + jsonv1 "encoding/json" + "errors" + "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/dnstype" + "tailscale.com/types/views" + "tailscale.com/util/dnsname" +) + +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Config + +// View returns a read-only view of Config. +func (p *Config) View() ConfigView { + return ConfigView{ж: p} +} + +// ConfigView provides a read-only view over Config. +// +// Its methods should only be called if `Valid()` returns true. +type ConfigView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Config +} + +// Valid reports whether v's underlying value is non-nil. +func (v ConfigView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v ConfigView) AsStruct() *Config { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *ConfigView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x Config + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Config + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// DefaultResolvers are the DNS resolvers to use for DNS names +// which aren't covered by more specific per-domain routes below. +// If empty, the OS's default resolvers (the ones that predate +// Tailscale altering the configuration) are used. +func (v ConfigView) DefaultResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.DefaultResolvers) +} + +// Routes maps a DNS suffix to the resolvers that should be used +// for queries that fall within that suffix. +// If a query doesn't match any entry in Routes, the +// DefaultResolvers are used. +// A Routes entry with no resolvers means the route should be +// authoritatively answered using the contents of Hosts. +func (v ConfigView) Routes() views.MapFn[dnsname.FQDN, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { + return views.MapFnOf(v.ж.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) + }) +} + +// SearchDomains are DNS suffixes to try when expanding +// single-label queries. +func (v ConfigView) SearchDomains() views.Slice[dnsname.FQDN] { + return views.SliceOf(v.ж.SearchDomains) +} + +// Hosts maps DNS FQDNs to their IPs, which can be a mix of IPv4 +// and IPv6. +// Queries matching entries in Hosts are resolved locally by +// 100.100.100.100 without leaving the machine. +// Adding an entry to Hosts merely creates the record. If you want +// it to resolve, you also need to add appropriate routes to +// Routes. +func (v ConfigView) Hosts() views.MapSlice[dnsname.FQDN, netip.Addr] { + return views.MapSliceOf(v.ж.Hosts) +} + +// OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) +// instead of the IPv4 version (100.100.100.100). +func (v ConfigView) OnlyIPv6() bool { return v.ж.OnlyIPv6 } +func (v ConfigView) Equal(v2 ConfigView) bool { return v.ж.Equal(v2.ж) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigViewNeedsRegeneration = Config(struct { + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + OnlyIPv6 bool +}{}) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 9f42dae2a8676..d1ca21f4d672b 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -132,8 +132,8 @@ type userspaceEngine struct { lastRouter *router.Config lastEngineFull *wgcfg.Config // of full wireguard config, not trimmed lastEngineInputs *maybeReconfigInputs - lastDNSConfig *dns.Config - lastIsSubnetRouter bool // was the node a primary subnet router in the last run. + lastDNSConfig dns.ConfigView // or invalid if none + lastIsSubnetRouter bool // was the node a primary subnet router in the last run. recvActivityAt map[key.NodePublic]mono.Time trimmedNodes map[key.NodePublic]bool // set of node keys of peers currently excluded from wireguard config sentActivityAt map[netip.Addr]*mono.Time // value is accessed atomically @@ -965,8 +965,11 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter engineChanged := checkchange.Update(&e.lastEngineFull, cfg) - dnsChanged := buildfeatures.HasDNS && checkchange.Update(&e.lastDNSConfig, dnsCfg) routerChanged := checkchange.Update(&e.lastRouter, routerCfg) + dnsChanged := buildfeatures.HasDNS && !e.lastDNSConfig.Equal(dnsCfg.View()) + if dnsChanged { + e.lastDNSConfig = dnsCfg.View() + } listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() @@ -1322,8 +1325,8 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { e.wgLock.Lock() dnsCfg := e.lastDNSConfig e.wgLock.Unlock() - if dnsCfg != nil { - if err := e.dns.Set(*dnsCfg); err != nil { + if dnsCfg.Valid() { + if err := e.dns.Set(*dnsCfg.AsStruct()); err != nil { e.logf("wgengine: error setting DNS config after major link change: %v", err) } else if err := e.reconfigureVPNIfNecessary(); err != nil { e.logf("wgengine: error reconfiguring VPN after major link change: %v", err) From 0f4dec928e8f690a8cd36e7bd399228e129a2e7d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Oct 2025 18:42:13 -0700 Subject: [PATCH 0558/1093] feature/featuretags: make bird depend on advertiseroutes Updates #cleanup Change-Id: I87082919064a5652c0d976cadd6d159787bb224a Signed-off-by: Brad Fitzpatrick --- feature/featuretags/featuretags.go | 6 +++++- wgengine/userspace.go | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 9c87586dbcbd7..c944d65ebcdbb 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -113,7 +113,11 @@ var Features = map[FeatureTag]FeatureMeta{ }, }, "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, - "bird": {Sym: "Bird", Desc: "Bird BGP integration"}, + "bird": { + Sym: "Bird", + Desc: "Bird BGP integration", + Deps: []FeatureTag{"advertiseroutes"}, + }, "c2n": { Sym: "C2N", Desc: "Control-to-node (C2N) support", diff --git a/wgengine/userspace.go b/wgengine/userspace.go index d1ca21f4d672b..8856a3eaf4d11 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -962,7 +962,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs(), isSubnetRouter, isSubnetRouter, e.lastIsSubnetRouter) } - isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter + isSubnetRouterChanged := buildfeatures.HasAdvertiseRoutes && isSubnetRouter != e.lastIsSubnetRouter engineChanged := checkchange.Update(&e.lastEngineFull, cfg) routerChanged := checkchange.Update(&e.lastRouter, routerCfg) From e2233b794247bf20d022d0ebefa99ad39bbad591 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 11:45:03 -0700 Subject: [PATCH 0559/1093] feature/relayserver: init server at config time instead of request time (#17484) The lazy init led to confusion and a belief that was something was wrong. It's reasonable to expect the daemon to listen on the port at the time it's configured. Updates tailscale/corp#33094 Signed-off-by: Jordan Whited --- cmd/tailscaled/depaware.txt | 2 +- feature/relayserver/relayserver.go | 19 +++++++------------ feature/relayserver/relayserver_test.go | 3 +++ net/udprelay/server.go | 1 + 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c7d571f1e016e..6ca10f80cf0bf 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -367,7 +367,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver - tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ + tailscale.com/net/udprelay/endpoint from tailscale.com/net/udprelay+ tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 95bf29a111407..df2fb4cb7c165 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -21,10 +21,8 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" - "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" - "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" @@ -91,13 +89,6 @@ type extension struct { hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } -// relayServer is the interface of [udprelay.Server]. -type relayServer interface { - AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) - Close() error - GetSessions() []status.ServerSession -} - // Name implements [ipnext.Extension]. func (e *extension) Name() string { return featureName @@ -182,7 +173,11 @@ func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*e debugSessionsCh := e.debugSessionsCh return func(ec *eventbus.Client) { - var rs relayServer // lazily initialized + rs, err := udprelay.NewServer(e.logf, port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) + } + defer func() { if rs != nil { rs.Close() @@ -194,7 +189,6 @@ func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*e return case respCh := <-debugSessionsCh: if rs == nil { - // Don't initialize the server simply for a debug request. respCh <- nil continue } @@ -202,7 +196,8 @@ func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*e respCh <- sessions case req := <-reqSub.Events(): if rs == nil { - var err error + // The server may have previously failed to initialize if + // the configured port was in use, try again. rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) if err != nil { e.logf("error initializing server: %v", err) diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 89c004dc7bbc8..65c503524c5de 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -8,6 +8,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tsd" + "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) @@ -96,6 +97,7 @@ func Test_extension_profileStateChanged(t *testing.T) { sys := tsd.NewSystem() bus := sys.Bus.Get() e := &extension{ + logf: logger.Discard, port: tt.fields.port, bus: bus, } @@ -154,6 +156,7 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &extension{ + logf: logger.Discard, bus: eventbus.New(), shutdown: tt.shutdown, port: tt.port, diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 424c7a61731f1..83831dd698164 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -536,6 +536,7 @@ func (s *Server) listenOn(port int) error { s.uc6 = bc s.uc6Port = uint16(portUint) } + s.logf("listening on %s:%d", network, portUint) } return nil } From d72370a6eb6e9d78b56a84a8f59d9e6f276ab85c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 15:09:07 -0700 Subject: [PATCH 0560/1093] wgengine/magicsock: remove unused arg in deregisterMetrics (#17513) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 492dff2ce39fb..082639866c1e0 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -840,7 +840,7 @@ func registerMetrics(reg *usermetric.Registry) *metrics { // deregisterMetrics unregisters the underlying usermetrics expvar counters // from clientmetrics. -func deregisterMetrics(m *metrics) { +func deregisterMetrics() { metricRecvDataPacketsIPv4.UnregisterAll() metricRecvDataPacketsIPv6.UnregisterAll() metricRecvDataPacketsDERP.UnregisterAll() @@ -3329,7 +3329,7 @@ func (c *Conn) Close() error { pinger.Close() } - deregisterMetrics(c.metrics) + deregisterMetrics() return nil } From adf308a06407754c94fd71f7497c63178294ba6d Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 15:18:31 -0700 Subject: [PATCH 0561/1093] wgengine/magicsock: add clientmetrics for RX bytes by af & conn type (#17512) Updates tailscale/corp#33206 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 17 +++++++++++++++++ wgengine/magicsock/magicsock_test.go | 2 ++ 2 files changed, 19 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 082639866c1e0..873c76a0989a3 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -803,6 +803,11 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) metricRecvDataPacketsPeerRelayIPv4.Register(&m.inboundPacketsPeerRelayIPv4Total) metricRecvDataPacketsPeerRelayIPv6.Register(&m.inboundPacketsPeerRelayIPv6Total) + metricRecvDataBytesIPv4.Register(&m.inboundBytesIPv4Total) + metricRecvDataBytesIPv6.Register(&m.inboundBytesIPv6Total) + metricRecvDataBytesDERP.Register(&m.inboundBytesDERPTotal) + metricRecvDataBytesPeerRelayIPv4.Register(&m.inboundBytesPeerRelayIPv4Total) + metricRecvDataBytesPeerRelayIPv6.Register(&m.inboundBytesPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) @@ -846,6 +851,11 @@ func deregisterMetrics() { metricRecvDataPacketsDERP.UnregisterAll() metricRecvDataPacketsPeerRelayIPv4.UnregisterAll() metricRecvDataPacketsPeerRelayIPv6.UnregisterAll() + metricRecvDataBytesIPv4.UnregisterAll() + metricRecvDataBytesIPv6.UnregisterAll() + metricRecvDataBytesDERP.UnregisterAll() + metricRecvDataBytesPeerRelayIPv4.UnregisterAll() + metricRecvDataBytesPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() metricSendPeerRelay.UnregisterAll() @@ -3935,6 +3945,13 @@ var ( metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") + // Data bytes (non-disco) + metricRecvDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_derp") + metricRecvDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv4") + metricRecvDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv6") + metricRecvDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv4") + metricRecvDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv6") + // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") metricSendDiscoDERP = clientmetric.NewCounter("magicsock_disco_send_derp") diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 1f533ddef4628..3468798c18d92 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1302,6 +1302,8 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) + c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, metricIPv4RxBytes*2) + c.Assert(metricRecvDataBytesDERP.Value(), qt.Equals, metricDERPRxBytes*2) } // tests that having a endpoint.String prevents wireguard-go's From 16a05c76803e0b7d72c555812209e73480fc1582 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 9 Oct 2025 16:03:37 -0700 Subject: [PATCH 0562/1093] wgengine/magicsock: fix docs for send clientmetrics (#17514) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 873c76a0989a3..844a607cfecb1 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3929,13 +3929,20 @@ var ( metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") metricSendDERPDropped = clientmetric.NewCounter("magicsock_send_derp_dropped") - metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") - metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") metricSendPeerRelayError = clientmetric.NewCounter("magicsock_send_peer_relay_error") - metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") + // Sends (data) + // + // Note: Prior to v1.78 metricSendUDP & metricSendDERP counted sends of data + // AND disco packets. They were updated in v1.78 to only count data packets. + // metricSendPeerRelay was added in v1.86 and has always counted only data + // packets. + metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") + metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") + metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") + // Data packets (non-disco) metricSendData = clientmetric.NewCounter("magicsock_send_data") metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") From 154d36f73d305e147b2410263a2899fb54646909 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 9 Oct 2025 23:58:03 -0700 Subject: [PATCH 0563/1093] wgengine/magicsock: do not apply node view updates to a closed Conn (#17517) Fixes #17516 Change-Id: Iae2dab42d6f7bc618478d360a1005537c1fa1bbd Signed-off-by: M. J. Fromberger --- wgengine/magicsock/magicsock.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 844a607cfecb1..b17aa11ae1285 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2958,8 +2958,13 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { filt := c.filt self := c.self peers := c.peers + isClosed := c.closed c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + if isClosed { + return // nothing to do here, the conn is closed and the update is no longer relevant + } + if peersChanged || relayClientChanged { if !relayClientEnabled { c.relayManager.handleRelayServersSet(nil) From 072e6a39f49faa4d209fcbb328fe2fb8d38f9e7f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 10 Oct 2025 11:22:33 +0200 Subject: [PATCH 0564/1093] tsweb/varz: add support for ShardedInt metrics Fixes tailscale/corp#33236 Signed-off-by: Anton Tolchanov --- cmd/stund/depaware.txt | 2 +- tsweb/varz/varz.go | 4 ++++ tsweb/varz/varz_test.go | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 8cd2e49beb052..be3e0e0cf8f13 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -58,7 +58,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb - tailscale.com/syncs from tailscale.com/metrics + tailscale.com/syncs from tailscale.com/metrics+ tailscale.com/tailcfg from tailscale.com/version tailscale.com/tsweb from tailscale.com/cmd/stund+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index aca2878b74f29..b1c66b859e8cf 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -25,6 +25,7 @@ import ( "golang.org/x/exp/constraints" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/version" ) @@ -136,6 +137,9 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { case *expvar.Int: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) return + case *syncs.ShardedInt: + fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) + return case *expvar.Float: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "gauge"), name, v.Value()) return diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index f7a9d880199e2..5bbacbe356940 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -13,6 +13,7 @@ import ( "testing" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/tstest" "tailscale.com/util/racebuild" "tailscale.com/version" @@ -283,6 +284,20 @@ foo_foo_a 1 foo_foo_b 1 `) + "\n", }, + { + "metrics_sharded_int", + "counter_api_status_code", + func() *syncs.ShardedInt { + m := syncs.NewShardedInt() + m.Add(40) + m.Add(2) + return m + }(), + strings.TrimSpace(` +# TYPE api_status_code counter +api_status_code 42 + `) + "\n", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 08eae9affda8ca75993e216bf5da9fe80ce0d358 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 10 Oct 2025 11:27:55 +0100 Subject: [PATCH 0565/1093] sessionrecording: add destination to struct for tsrecorder (#17520) when tsrecorder receives events, it populates this field with information about the node the request was sent to. Updates #17141 Signed-off-by: chaosinthecrd --- sessionrecording/event.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/sessionrecording/event.go b/sessionrecording/event.go index 41d8f2d5806b4..8f8172cc4b303 100644 --- a/sessionrecording/event.go +++ b/sessionrecording/event.go @@ -37,6 +37,9 @@ type Event struct { // Source provides details about the client that initiated the request. Source Source `json:"source"` + + // Destination provides details about the node receiving the request. + Destination Destination `json:"destination"` } // copied from https://github.com/kubernetes/kubernetes/blob/11ade2f7dd264c2f52a4a1342458abbbaa3cb2b1/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go#L44 @@ -95,6 +98,17 @@ type Source struct { NodeUser string `json:"nodeUser,omitempty"` } +type Destination struct { + // Node is the FQDN of the node receiving the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node receiving the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` +} + // Request holds information about a request. type Request struct { Method string `json:"method"` From f157f3288d3f35ac348c237b514d7e7b092393ac Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 10 Oct 2025 11:02:35 -0400 Subject: [PATCH 0566/1093] cmd/tailscale/cli,ipn/conffile: add declarative config mode for Services (#17435) This commit adds the subcommands `get-config` and `set-config` to Serve, which can be used to read the current Tailscale Services configuration in a standard syntax and provide a configuration to declaratively apply with that same syntax. Both commands must be provided with either `--service=svc:service` for one service, or `--all` for all services. When writing a config, `--set-config --all` will overwrite all existing Services configuration, and `--set-config --service=svc:service` will overwrite all configuration for that particular Service. Incremental changes are not supported. Fixes tailscale/corp#30983. cmd/tailscale/cli: hide serve "get-config"/"set-config" commands for now tailscale/corp#33152 tracks unhiding them when docs exist. Signed-off-by: Naman Sood --- cmd/tailscale/cli/serve_legacy.go | 1 + cmd/tailscale/cli/serve_v2.go | 300 +++++++++++++++++++++++++++++- cmd/tailscale/depaware.txt | 3 + ipn/conffile/serveconf.go | 239 ++++++++++++++++++++++++ tailcfg/proto_port_range.go | 16 +- tailcfg/tailcfg.go | 10 + 6 files changed, 556 insertions(+), 13 deletions(-) create mode 100644 ipn/conffile/serveconf.go diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index b60e9833bc86f..95808fdf2eb34 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -172,6 +172,7 @@ type serveEnv struct { yes bool // update without prompt service tailcfg.ServiceName // service name tun bool // redirect traffic to OS for service + allServices bool // apply config file to all services lc localServeClient // localClient interface, specific to serve diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 8831db2a9e135..9b0af2cad7a0c 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -28,10 +28,13 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" "tailscale.com/util/mak" "tailscale.com/util/prompt" + "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -128,6 +131,22 @@ const ( serveTypeTUN ) +func serveTypeFromConfString(sp conffile.ServiceProtocol) (st serveType, ok bool) { + switch sp { + case conffile.ProtoHTTP: + return serveTypeHTTP, true + case conffile.ProtoHTTPS, conffile.ProtoHTTPSInsecure, conffile.ProtoFile: + return serveTypeHTTPS, true + case conffile.ProtoTCP: + return serveTypeTCP, true + case conffile.ProtoTLSTerminatedTCP: + return serveTypeTLSTerminatedTCP, true + case conffile.ProtoTUN: + return serveTypeTUN, true + } + return -1, false +} + const noService tailcfg.ServiceName = "" var infoMap = map[serveMode]commandInfo{ @@ -232,6 +251,33 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", Exec: e.runServeAdvertise, }, + { + Name: "get-config", + ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), + ShortHelp: "Get service configuration to save to a file", + LongHelp: hidden + "Get the configuration for services that this node is currently hosting in a\n" + + "format that can later be provided to set-config. This can be used to declaratively set\n" + + "configuration for a service host.", + Exec: e.runServeGetConfig, + FlagSet: e.newFlags("serve-get-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "read config from all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "read config from a particular service") + }), + }, + { + Name: "set-config", + ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), + ShortHelp: "Define service configuration from a file", + LongHelp: hidden + "Read the provided configuration file and use it to declaratively set the configuration\n" + + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + + "all services are overwritten.", + Exec: e.runServeSetConfig, + FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "apply config to all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "apply config to a particular service") + }), + }, }, } } @@ -540,7 +586,7 @@ func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { if len(args) == 0 { - return fmt.Errorf("error: missing service name argument") + return errors.New("error: missing service name argument") } if len(args) != 1 { fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") @@ -553,6 +599,258 @@ func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { return e.addServiceToPrefs(ctx, svc) } +func (e *serveEnv) runServeGetConfig(ctx context.Context, args []string) (err error) { + forSingleService := e.service.Validate() == nil + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return err + } + + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + advertised := set.SetOf(prefs.AdvertiseServices) + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return err + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + + handleService := func(svcName tailcfg.ServiceName, serviceConfig *ipn.ServiceConfig) (*conffile.ServiceDetailsFile, error) { + var sdf conffile.ServiceDetailsFile + // Leave unset for true case since that's the default. + if !advertised.Contains(svcName.String()) { + sdf.Advertised.Set(false) + } + + if serviceConfig.Tun { + mak.Set(&sdf.Endpoints, &tailcfg.ProtoPortRange{Ports: tailcfg.PortRangeAny}, &conffile.Target{ + Protocol: conffile.ProtoTUN, + Destination: "", + DestinationPorts: tailcfg.PortRange{}, + }) + } + + for port, config := range serviceConfig.TCP { + sniName := fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix) + ppr := tailcfg.ProtoPortRange{Proto: int(ipproto.TCP), Ports: tailcfg.PortRange{First: port, Last: port}} + if config.TCPForward != "" { + var proto conffile.ServiceProtocol + if config.TerminateTLS != "" { + proto = conffile.ProtoTLSTerminatedTCP + } else { + proto = conffile.ProtoTCP + } + destHost, destPortStr, err := net.SplitHostPort(config.TCPForward) + if err != nil { + return nil, fmt.Errorf("parse TCPForward=%q: %w", config.TCPForward, err) + } + destPort, err := strconv.ParseUint(destPortStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("parse port %q: %w", destPortStr, err) + } + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: proto, + Destination: destHost, + DestinationPorts: tailcfg.PortRange{First: uint16(destPort), Last: uint16(destPort)}, + }) + } else if config.HTTP || config.HTTPS { + webKey := ipn.HostPort(net.JoinHostPort(sniName, strconv.FormatUint(uint64(port), 10))) + handlers, ok := serviceConfig.Web[webKey] + if !ok { + return nil, fmt.Errorf("service %q: HTTP/HTTPS is set but no handlers in config", svcName) + } + defaultHandler, ok := handlers.Handlers["/"] + if !ok { + return nil, fmt.Errorf("service %q: root handler not set", svcName) + } + if defaultHandler.Path != "" { + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ProtoFile, + Destination: defaultHandler.Path, + DestinationPorts: tailcfg.PortRange{}, + }) + } else if defaultHandler.Proxy != "" { + proto, rest, ok := strings.Cut(defaultHandler.Proxy, "://") + if !ok { + return nil, fmt.Errorf("service %q: invalid proxy handler %q", svcName, defaultHandler.Proxy) + } + host, portStr, err := net.SplitHostPort(rest) + if err != nil { + return nil, fmt.Errorf("service %q: invalid proxy handler %q: %w", svcName, defaultHandler.Proxy, err) + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("service %q: parse port %q: %w", svcName, portStr, err) + } + + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ServiceProtocol(proto), + Destination: host, + DestinationPorts: tailcfg.PortRange{First: uint16(port), Last: uint16(port)}, + }) + } + } + } + + return &sdf, nil + } + + var j []byte + + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + var scf conffile.ServicesConfigFile + scf.Version = "0.0.1" + for svcName, serviceConfig := range sc.Services { + sdf, err := handleService(svcName, serviceConfig) + if err != nil { + return err + } + mak.Set(&scf.Services, svcName, sdf) + } + j, err = json.MarshalIndent(scf, "", " ") + if err != nil { + return err + } + } else if forSingleService { + serviceConfig, ok := sc.Services[e.service] + if !ok { + j = []byte("{}") + } else { + sdf, err := handleService(e.service, serviceConfig) + if err != nil { + return err + } + sdf.Version = "0.0.1" + j, err = json.MarshalIndent(sdf, "", " ") + if err != nil { + return err + } + } + } else { + return errors.New("must specify either --service=svc: or --all") + } + + j = append(j, '\n') + _, err = e.stdout().Write(j) + return err +} + +func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err error) { + if len(args) != 1 { + return errors.New("must specify filename") + } + forSingleService := e.service.Validate() == nil + + var scf *conffile.ServicesConfigFile + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + scf, err = conffile.LoadServicesConfig(args[0], "") + } else if forSingleService { + scf, err = conffile.LoadServicesConfig(args[0], e.service.String()) + } else { + return errors.New("must specify either --service=svc: or --all") + } + if err != nil { + return fmt.Errorf("could not read config from file %q: %w", args[0], err) + } + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("getting client status: %w", err) + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("getting current serve config: %w", err) + } + + // Clear all existing config. + if forSingleService { + if sc.Services != nil { + if sc.Services[e.service] != nil { + delete(sc.Services, e.service) + } + } + } else { + sc.Services = map[tailcfg.ServiceName]*ipn.ServiceConfig{} + } + advertisedServices := set.Set[string]{} + + for name, details := range scf.Services { + for ppr, ep := range details.Endpoints { + if ep.Protocol == conffile.ProtoTUN { + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix) + if err != nil { + return err + } + // TUN mode is exclusive. + break + } + + if ppr.Proto != int(ipproto.TCP) { + return fmt.Errorf("service %q: source ports must be TCP", name) + } + serveType, _ := serveTypeFromConfString(ep.Protocol) + for port := ppr.Ports.First; port <= ppr.Ports.Last; port++ { + var target string + if ep.Protocol == conffile.ProtoFile { + target = ep.Destination + } else { + // map source port range 1-1 to destination port range + destPort := ep.DestinationPorts.First + (port - ppr.Ports.First) + portStr := fmt.Sprint(destPort) + target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) + } + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix) + if err != nil { + return fmt.Errorf("service %q: %w", name, err) + } + } + } + if v, set := details.Advertised.Get(); !set || v { + advertisedServices.Add(name.String()) + } + } + + var changed bool + var servicesList []string + if e.allServices { + servicesList = advertisedServices.Slice() + changed = true + } else if advertisedServices.Contains(e.service.String()) { + // If allServices wasn't set, the only service that could have been + // advertised is the one that was provided as a flag. + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + if !slices.Contains(prefs.AdvertiseServices, e.service.String()) { + servicesList = append(prefs.AdvertiseServices, e.service.String()) + changed = true + } + } + if changed { + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: servicesList, + }, + }) + if err != nil { + return err + } + } + + return e.lc.SetServeConfig(ctx, sc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8c2fb0e9221eb..0d3a006a19afd 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -61,6 +61,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp + github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 @@ -109,6 +110,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscale/cli tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ @@ -137,6 +139,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/net/tsdial from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/udprelay/status from tailscale.com/client/local+ + tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ diff --git a/ipn/conffile/serveconf.go b/ipn/conffile/serveconf.go new file mode 100644 index 0000000000000..bb63c1ac5571a --- /dev/null +++ b/ipn/conffile/serveconf.go @@ -0,0 +1,239 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package conffile + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "strings" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/util/mak" +) + +// ServicesConfigFile is the config file format for services configuration. +type ServicesConfigFile struct { + // Version is always "0.0.1" and always present. + Version string `json:"version"` + + Services map[tailcfg.ServiceName]*ServiceDetailsFile `json:"services,omitzero"` +} + +// ServiceDetailsFile is the config syntax for an individual Tailscale Service. +type ServiceDetailsFile struct { + // Version is always "0.0.1", set if and only if this is not inside a + // [ServiceConfigFile]. + Version string `json:"version,omitzero"` + + // Endpoints are sets of reverse proxy mappings from ProtoPortRanges on a + // Service to Targets (proto+destination+port) on remote destinations (or + // localhost). + // For example, "tcp:443" -> "tcp://localhost:8000" is an endpoint definition + // mapping traffic on the TCP port 443 of the Service to port 8080 on localhost. + // The Proto in the key must be populated. + // As a special case, if the only mapping provided is "*" -> "TUN", that + // enables TUN/L3 mode, where packets are delivered to the Tailscale network + // interface with the understanding that the user will deal with them manually. + Endpoints map[*tailcfg.ProtoPortRange]*Target `json:"endpoints"` + + // Advertised is a flag that tells control whether or not the client thinks + // it is ready to host a particular Tailscale Service. If unset, it is + // assumed to be true. + Advertised opt.Bool `json:"advertised,omitzero"` +} + +// ServiceProtocol is the protocol of a Target. +type ServiceProtocol string + +const ( + ProtoHTTP ServiceProtocol = "http" + ProtoHTTPS ServiceProtocol = "https" + ProtoHTTPSInsecure ServiceProtocol = "https+insecure" + ProtoTCP ServiceProtocol = "tcp" + ProtoTLSTerminatedTCP ServiceProtocol = "tls-terminated-tcp" + ProtoFile ServiceProtocol = "file" + ProtoTUN ServiceProtocol = "TUN" +) + +// Target is a destination for traffic to go to when it arrives at a Tailscale +// Service host. +type Target struct { + // The protocol over which to communicate with the Destination. + // Protocol == ProtoTUN is a special case, activating "TUN mode" where + // packets are delivered to the Tailscale TUN interface and then manually + // handled by the user. + Protocol ServiceProtocol + + // If Protocol is ProtoFile, then Destination is a file path. + // If Protocol is ProtoTUN, then Destination is empty. + // Otherwise, it is a host. + Destination string + + // If Protocol is not ProtoFile or ProtoTUN, then DestinationPorts is the + // set of ports on which to connect to the host referred to by Destination. + DestinationPorts tailcfg.PortRange +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (t *Target) UnmarshalJSON(buf []byte) error { + return jsonv2.Unmarshal(buf, t) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (t *Target) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + var str string + if err := jsonv2.UnmarshalDecode(dec, &str); err != nil { + return err + } + + // The TUN case does not look like a standard :// arrangement, + // so handled separately. + if str == "TUN" { + t.Protocol = ProtoTUN + t.Destination = "" + t.DestinationPorts = tailcfg.PortRangeAny + return nil + } + + proto, rest, found := strings.Cut(str, "://") + if !found { + return errors.New("handler not of form ://") + } + + switch ServiceProtocol(proto) { + case ProtoFile: + target := path.Clean(rest) + t.Protocol = ProtoFile + t.Destination = target + t.DestinationPorts = tailcfg.PortRange{} + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + host, portRange, err := tailcfg.ParseHostPortRange(rest) + if err != nil { + return err + } + t.Protocol = ServiceProtocol(proto) + t.Destination = host + t.DestinationPorts = portRange + default: + return errors.New("unsupported protocol") + } + + return nil +} + +func (t *Target) MarshalText() ([]byte, error) { + var out string + switch t.Protocol { + case ProtoFile: + out = fmt.Sprintf("%s://%s", t.Protocol, t.Destination) + case ProtoTUN: + out = "TUN" + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + out = fmt.Sprintf("%s://%s", t.Protocol, net.JoinHostPort(t.Destination, t.DestinationPorts.String())) + default: + return nil, errors.New("unsupported protocol") + } + return []byte(out), nil +} + +func LoadServicesConfig(filename string, forService string) (*ServicesConfigFile, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + var json []byte + if hujsonStandardize != nil { + json, err = hujsonStandardize(data) + if err != nil { + return nil, err + } + } else { + json = data + } + var ver struct { + Version string `json:"version"` + } + if err = jsonv2.Unmarshal(json, &ver); err != nil { + return nil, fmt.Errorf("could not parse config file version: %w", err) + } + switch ver.Version { + case "": + return nil, errors.New("config file must have \"version\" field") + case "0.0.1": + return loadConfigV0(json, forService) + } + return nil, fmt.Errorf("unsupported config file version %q", ver.Version) +} + +func loadConfigV0(json []byte, forService string) (*ServicesConfigFile, error) { + var scf ServicesConfigFile + if svcName := tailcfg.AsServiceName(forService); svcName != "" { + var sdf ServiceDetailsFile + err := jsonv2.Unmarshal(json, &sdf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + mak.Set(&scf.Services, svcName, &sdf) + + } else { + err := jsonv2.Unmarshal(json, &scf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + } + for svcName, svc := range scf.Services { + if forService == "" && svc.Version != "" { + return nil, errors.New("services cannot be versioned separately from config file") + } + if err := svcName.Validate(); err != nil { + return nil, err + } + if svc.Endpoints == nil { + return nil, fmt.Errorf("service %q: missing \"endpoints\" field", svcName) + } + var sourcePorts []tailcfg.PortRange + foundTUN := false + foundNonTUN := false + for ppr, target := range svc.Endpoints { + if target.Protocol == "TUN" { + if ppr.Proto != 0 || ppr.Ports != tailcfg.PortRangeAny { + return nil, fmt.Errorf("service %q: destination \"TUN\" can only be used with source \"*\"", svcName) + } + foundTUN = true + } else { + if ppr.Ports.Last-ppr.Ports.First != target.DestinationPorts.Last-target.DestinationPorts.First { + return nil, fmt.Errorf("service %q: source and destination port ranges must be of equal size", svcName.String()) + } + foundNonTUN = true + } + if foundTUN && foundNonTUN { + return nil, fmt.Errorf("service %q: cannot mix TUN mode with non-TUN mode", svcName) + } + if pr := findOverlappingRange(sourcePorts, ppr.Ports); pr != nil { + return nil, fmt.Errorf("service %q: source port ranges %q and %q overlap", svcName, pr.String(), ppr.Ports.String()) + } + sourcePorts = append(sourcePorts, ppr.Ports) + } + } + return &scf, nil +} + +// findOverlappingRange finds and returns a reference to a [tailcfg.PortRange] +// in haystack that overlaps with needle. It returns nil if it doesn't find one. +func findOverlappingRange(haystack []tailcfg.PortRange, needle tailcfg.PortRange) *tailcfg.PortRange { + for _, pr := range haystack { + if pr.Contains(needle.First) || pr.Contains(needle.Last) || needle.Contains(pr.First) || needle.Contains(pr.Last) { + return &pr + } + } + return nil +} diff --git a/tailcfg/proto_port_range.go b/tailcfg/proto_port_range.go index f65c58804d44d..03505dbd131e7 100644 --- a/tailcfg/proto_port_range.go +++ b/tailcfg/proto_port_range.go @@ -5,7 +5,6 @@ package tailcfg import ( "errors" - "fmt" "strconv" "strings" @@ -70,14 +69,7 @@ func (ppr ProtoPortRange) String() string { buf.Write(text) buf.Write([]byte(":")) } - pr := ppr.Ports - if pr.First == pr.Last { - fmt.Fprintf(&buf, "%d", pr.First) - } else if pr == PortRangeAny { - buf.WriteByte('*') - } else { - fmt.Fprintf(&buf, "%d-%d", pr.First, pr.Last) - } + buf.WriteString(ppr.Ports.String()) return buf.String() } @@ -104,7 +96,7 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { if !strings.Contains(ipProtoPort, ":") { ipProtoPort = "*:" + ipProtoPort } - protoStr, portRange, err := parseHostPortRange(ipProtoPort) + protoStr, portRange, err := ParseHostPortRange(ipProtoPort) if err != nil { return nil, err } @@ -126,9 +118,9 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { return ppr, nil } -// parseHostPortRange parses hostport as HOST:PORTS where HOST is +// ParseHostPortRange parses hostport as HOST:PORTS where HOST is // returned unchanged and PORTS is is either "*" or PORTLOW-PORTHIGH ranges. -func parseHostPortRange(hostport string) (host string, ports PortRange, err error) { +func ParseHostPortRange(hostport string) (host string, ports PortRange, err error) { hostport = strings.ToLower(hostport) colon := strings.LastIndexByte(hostport, ':') if colon < 0 { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 3edc9aef0254e..b2c1a402c18f3 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -17,6 +17,7 @@ import ( "net/netip" "reflect" "slices" + "strconv" "strings" "time" @@ -1478,6 +1479,15 @@ func (pr PortRange) Contains(port uint16) bool { var PortRangeAny = PortRange{0, 65535} +func (pr PortRange) String() string { + if pr.First == pr.Last { + return strconv.FormatUint(uint64(pr.First), 10) + } else if pr == PortRangeAny { + return "*" + } + return fmt.Sprintf("%d-%d", pr.First, pr.Last) +} + // NetPortRange represents a range of ports that's allowed for one or more IPs. type NetPortRange struct { _ structs.Incomparable From 0a33aae823eb5604f7698ce1dad99605eaed97c2 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 10 Oct 2025 09:03:38 -0700 Subject: [PATCH 0567/1093] util/eventbus: run subscriber functions in a goroutine (#17510) With a channel subscriber, the subscription processing always occurs on another goroutine. The SubscriberFunc (prior to this commit) runs its callbacks on the client's own goroutine. This changes the semantics, though: In addition to more directly pushing back on the publisher, a publisher and subscriber can deadlock in a SubscriberFunc but succeed on a Subscriber. They should behave equivalently regardless which interface they use. Arguably the caller should deal with this by creating its own goroutine if it needs to. However, that loses much of the benefit of the SubscriberFunc API, as it will need to manage the lifecycle of that goroutine. So, for practical ergonomics, let's make the SubscriberFunc do this management on the user's behalf. (We discussed doing this in #17432, but decided not to do it yet). We can optimize this approach further, if we need to, without changing the API. Updates #17487 Change-Id: I19ea9e8f246f7b406711f5a16518ef7ff21a1ac9 Signed-off-by: M. J. Fromberger --- util/eventbus/subscribe.go | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 56da413efa5e4..c35c7e7f05682 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -214,7 +214,7 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump - // above. The only different should be that this select + // above. The only difference should be that this select // delivers a value on s.read. select { case s.read <- t: @@ -282,20 +282,30 @@ func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFo // dispatch implements part of the subscriber interface. func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { + t := vals.Peek().Event.(T) + callDone := make(chan struct{}) + go s.runCallback(t, callDone) // Keep the cases in this select in sync with subscribeState.pump - // above. The only different should be that this select + // above. The only difference should be that this select // delivers a value by calling s.read. - select { - case val := <-acceptCh(): - vals.Add(val) - case <-ctx.Done(): - return false - case ch := <-snapshot: - ch <- vals.Snapshot() - default: + for { + select { + case <-callDone: + vals.Drop() + return true + case val := <-acceptCh(): + vals.Add(val) + case <-ctx.Done(): + return false + case ch := <-snapshot: + ch <- vals.Snapshot() + } } - t := vals.Peek().Event.(T) - s.read(t) - vals.Drop() - return true +} + +// runCallback invokes the callback on v and closes ch when it returns. +// This should be run in a goroutine. +func (s *SubscriberFunc[T]) runCallback(v T, ch chan struct{}) { + defer close(ch) + s.read(v) } From af15ee9c5f1018a202d4c38043f2686ae3233a91 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 10 Oct 2025 09:28:27 -0700 Subject: [PATCH 0568/1093] wgengine/magicsock: add clientmetrics for TX bytes/packets by af & conn type (#17515) Updates tailscale/corp#33206 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 30 ++++++++++++++++++++++++++++ wgengine/magicsock/magicsock_test.go | 4 ++++ 2 files changed, 34 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b17aa11ae1285..f855936ce3385 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -808,6 +808,16 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataBytesDERP.Register(&m.inboundBytesDERPTotal) metricRecvDataBytesPeerRelayIPv4.Register(&m.inboundBytesPeerRelayIPv4Total) metricRecvDataBytesPeerRelayIPv6.Register(&m.inboundBytesPeerRelayIPv6Total) + metricSendDataPacketsIPv4.Register(&m.outboundPacketsIPv4Total) + metricSendDataPacketsIPv6.Register(&m.outboundPacketsIPv6Total) + metricSendDataPacketsDERP.Register(&m.outboundPacketsDERPTotal) + metricSendDataPacketsPeerRelayIPv4.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendDataPacketsPeerRelayIPv6.Register(&m.outboundPacketsPeerRelayIPv6Total) + metricSendDataBytesIPv4.Register(&m.outboundBytesIPv4Total) + metricSendDataBytesIPv6.Register(&m.outboundBytesIPv6Total) + metricSendDataBytesDERP.Register(&m.outboundBytesDERPTotal) + metricSendDataBytesPeerRelayIPv4.Register(&m.outboundBytesPeerRelayIPv4Total) + metricSendDataBytesPeerRelayIPv6.Register(&m.outboundBytesPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) @@ -856,6 +866,16 @@ func deregisterMetrics() { metricRecvDataBytesDERP.UnregisterAll() metricRecvDataBytesPeerRelayIPv4.UnregisterAll() metricRecvDataBytesPeerRelayIPv6.UnregisterAll() + metricSendDataPacketsIPv4.UnregisterAll() + metricSendDataPacketsIPv6.UnregisterAll() + metricSendDataPacketsDERP.UnregisterAll() + metricSendDataPacketsPeerRelayIPv4.UnregisterAll() + metricSendDataPacketsPeerRelayIPv6.UnregisterAll() + metricSendDataBytesIPv4.UnregisterAll() + metricSendDataBytesIPv6.UnregisterAll() + metricSendDataBytesDERP.UnregisterAll() + metricSendDataBytesPeerRelayIPv4.UnregisterAll() + metricSendDataBytesPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() metricSendPeerRelay.UnregisterAll() @@ -3956,6 +3976,11 @@ var ( metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") + metricSendDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_send_data_derp") + metricSendDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv4") + metricSendDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv6") + metricSendDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv4") + metricSendDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv6") // Data bytes (non-disco) metricRecvDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_derp") @@ -3963,6 +3988,11 @@ var ( metricRecvDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv6") metricRecvDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv4") metricRecvDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv6") + metricSendDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_derp") + metricSendDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv4") + metricSendDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv6") + metricSendDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv4") + metricSendDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv6") // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 3468798c18d92..d1d62a26e0e65 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1300,6 +1300,10 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { // the metrics by 2 to get the expected value. // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420 c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricSendDataPacketsIPv4.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricSendDataPacketsDERP.Value(), qt.Equals, metricDERPTxPackets*2) + c.Assert(metricSendDataBytesIPv4.Value(), qt.Equals, metricIPv4TxBytes*2) + c.Assert(metricSendDataBytesDERP.Value(), qt.Equals, metricDERPTxBytes*2) c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, metricIPv4RxBytes*2) From 8e98ecb5f7cf2dc8c36b482030bea0c45ab1d123 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 10 Oct 2025 12:34:27 -0400 Subject: [PATCH 0569/1093] net/netmon: handle net.IPAddr types during interface address parsing (#17523) updates tailscale/tailscale#16836 Android's altNetInterfaces implementation now returns net.IPAddr types which netmon wasn't handling. Signed-off-by: Jonathan Nobels --- net/netmon/netmon_test.go | 40 +++++++++++++++++++++++++++++++++++++++ net/netmon/state.go | 8 ++++++++ 2 files changed, 48 insertions(+) diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 5fcdcc6ccd64e..358dc0373ec14 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -7,6 +7,7 @@ import ( "flag" "net" "net/netip" + "reflect" "sync/atomic" "testing" "time" @@ -267,6 +268,45 @@ func TestIsMajorChangeFrom(t *testing.T) { }) } } +func TestForeachInterface(t *testing.T) { + tests := []struct { + name string + addrs []net.Addr + want []string + }{ + { + name: "Mixed_IPv4_and_IPv6", + addrs: []net.Addr{ + &net.IPNet{IP: net.IPv4(1, 2, 3, 4), Mask: net.CIDRMask(24, 32)}, + &net.IPAddr{IP: net.IP{5, 6, 7, 8}, Zone: ""}, + &net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, + &net.IPAddr{IP: net.ParseIP("2001:db8::2"), Zone: ""}, + }, + want: []string{"1.2.3.4", "5.6.7.8", "2001:db8::1", "2001:db8::2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got []string + ifaces := InterfaceList{ + { + Interface: &net.Interface{Name: "eth0"}, + AltAddrs: tt.addrs, + }, + } + ifaces.ForeachInterface(func(iface Interface, prefixes []netip.Prefix) { + for _, prefix := range prefixes { + ip := prefix.Addr() + got = append(got, ip.String()) + } + }) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} type testOSMon struct { osMon diff --git a/net/netmon/state.go b/net/netmon/state.go index 73497e93f73be..27e3524e8d7c9 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -183,6 +183,10 @@ func (ifaces InterfaceList) ForeachInterfaceAddress(fn func(Interface, netip.Pre if pfx, ok := netaddr.FromStdIPNet(v); ok { fn(iface, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + fn(iface, netip.PrefixFrom(ip, ip.BitLen())) + } } } } @@ -215,6 +219,10 @@ func (ifaces InterfaceList) ForeachInterface(fn func(Interface, []netip.Prefix)) if pfx, ok := netaddr.FromStdIPNet(v); ok { pfxs = append(pfxs, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + pfxs = append(pfxs, netip.PrefixFrom(ip, ip.BitLen())) + } } } sort.Slice(pfxs, func(i, j int) bool { From a2dc517d7d4f571a5fe67c906d5ab885baf62f49 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Fri, 10 Oct 2025 10:08:24 -0700 Subject: [PATCH 0570/1093] all: specify explicit JSON format for time.Duration (#17307) The default representation of time.Duration has different JSON representation between v1 and v2. Apply an explicit format flag that uses the v1 representation so that this behavior does not change if serialized with v2. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- net/speedtest/speedtest.go | 2 +- tailcfg/tailcfg.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/speedtest/speedtest.go b/net/speedtest/speedtest.go index 7ab0881cc22f9..a462dbeece42b 100644 --- a/net/speedtest/speedtest.go +++ b/net/speedtest/speedtest.go @@ -24,7 +24,7 @@ const ( // conduct the test. type config struct { Version int `json:"version"` - TestDuration time.Duration `json:"time"` + TestDuration time.Duration `json:"time,format:nano"` Direction Direction `json:"direction"` } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b2c1a402c18f3..e9f97bdc47054 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2903,7 +2903,7 @@ type SSHAction struct { // SessionDuration, if non-zero, is how long the session can stay open // before being forcefully terminated. - SessionDuration time.Duration `json:"sessionDuration,omitempty"` + SessionDuration time.Duration `json:"sessionDuration,omitempty,format:nano"` // AllowAgentForwarding, if true, allows accepted connections to forward // the ssh agent if requested. From e45557afc0e46c9148a4e509e639b4024cf6f197 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 10 Oct 2025 10:28:36 -0700 Subject: [PATCH 0571/1093] types/persist: add AttestationKey (#17281) Extend Persist with AttestationKey to record a hardware-backed attestation key for the node's identity. Add a flag to tailscaled to allow users to control the use of hardware-backed keys to bind node identity to individual machines. Updates tailscale/corp#31269 Change-Id: Idcf40d730a448d85f07f1bebf387f086d4c58be3 Signed-off-by: Patrick O'Doherty --- cmd/cloner/cloner.go | 7 +++- cmd/cloner/cloner_test.go | 49 ++++++++++++++++++++++++ cmd/cloner/clonerex/clonerex.go | 25 +++++++++++- cmd/cloner/clonerex/clonerex_clone.go | 30 ++++++++++++++- cmd/derper/depaware.txt | 2 +- cmd/stund/depaware.txt | 5 ++- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/tailscaled.go | 55 +++++++++++++++++++-------- control/controlclient/direct.go | 24 ++++++++++++ feature/hooks.go | 19 +++++++++ feature/tpm/attestation.go | 15 +++++++- feature/tpm/tpm.go | 2 + ipn/ipnlocal/hwattest.go | 48 +++++++++++++++++++++++ ipn/ipnlocal/local.go | 38 +++++++++++++++--- ipn/ipnlocal/local_test.go | 21 ++++++++++ ipn/ipnlocal/profiles.go | 16 ++++++-- ipn/ipnlocal/profiles_test.go | 1 + ipn/prefs.go | 1 + ipn/prefs_test.go | 2 +- tailcfg/tailcfg.go | 11 ++++-- types/persist/persist.go | 18 ++++++++- types/persist/persist_clone.go | 4 ++ types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 10 +++-- util/syspolicy/pkey/pkey.go | 4 ++ util/syspolicy/policy_keys.go | 1 + 26 files changed, 370 insertions(+), 42 deletions(-) create mode 100644 ipn/ipnlocal/hwattest.go diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 15a808141e626..544d00518e113 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -121,7 +121,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { continue } if !hasBasicUnderlying(ft) { - writef("dst.%s = *src.%s.Clone()", fname, fname) + // don't dereference if the underlying type is an interface + if _, isInterface := ft.Underlying().(*types.Interface); isInterface { + writef("if src.%s != nil { dst.%s = src.%s.Clone() }", fname, fname, fname) + } else { + writef("dst.%s = *src.%s.Clone()", fname, fname) + } continue } } diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index cf1063714afda..3556c14bc109e 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -59,3 +59,52 @@ func TestSliceContainer(t *testing.T) { }) } } + +func TestInterfaceContainer(t *testing.T) { + examples := []struct { + name string + in *clonerex.InterfaceContainer + }{ + { + name: "nil", + in: nil, + }, + { + name: "zero", + in: &clonerex.InterfaceContainer{}, + }, + { + name: "with_interface", + in: &clonerex.InterfaceContainer{ + Interface: &clonerex.CloneableImpl{Value: 42}, + }, + }, + { + name: "with_nil_interface", + in: &clonerex.InterfaceContainer{ + Interface: nil, + }, + }, + } + + for _, ex := range examples { + t.Run(ex.name, func(t *testing.T) { + out := ex.in.Clone() + if !reflect.DeepEqual(ex.in, out) { + t.Errorf("Clone() = %v, want %v", out, ex.in) + } + + // Verify no aliasing: modifying the clone should not affect the original + if ex.in != nil && ex.in.Interface != nil { + if impl, ok := out.Interface.(*clonerex.CloneableImpl); ok { + impl.Value = 999 + if origImpl, ok := ex.in.Interface.(*clonerex.CloneableImpl); ok { + if origImpl.Value == 999 { + t.Errorf("Clone() aliased memory with original") + } + } + } + } + }) + } +} diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index 96bf8a0bd6e9d..6463f91442a32 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer // Package clonerex is an example package for the cloner tool. package clonerex @@ -9,3 +9,26 @@ package clonerex type SliceContainer struct { Slice []*int } + +// Cloneable is an interface with a Clone method. +type Cloneable interface { + Clone() Cloneable +} + +// CloneableImpl is a concrete type that implements Cloneable. +type CloneableImpl struct { + Value int +} + +func (c *CloneableImpl) Clone() Cloneable { + if c == nil { + return nil + } + return &CloneableImpl{Value: c.Value} +} + +// InterfaceContainer has a pointer to an interface field, which tests +// the special handling for interface types in the cloner. +type InterfaceContainer struct { + Interface Cloneable +} diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index e334a4e3a1bf4..533d7e723d3ea 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -35,9 +35,28 @@ var _SliceContainerCloneNeedsRegeneration = SliceContainer(struct { Slice []*int }{}) +// Clone makes a deep copy of InterfaceContainer. +// The result aliases no memory with the original. +func (src *InterfaceContainer) Clone() *InterfaceContainer { + if src == nil { + return nil + } + dst := new(InterfaceContainer) + *dst = *src + if src.Interface != nil { + dst.Interface = src.Interface.Clone() + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct { + Interface Cloneable +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of SliceContainer. +// where T is one of SliceContainer,InterfaceContainer. func Clone(dst, src any) bool { switch src := src.(type) { case *SliceContainer: @@ -49,6 +68,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *InterfaceContainer: + switch dst := dst.(type) { + case *InterfaceContainer: + *dst = *src.Clone() + return true + case **InterfaceContainer: + *dst = src.Clone() + return true + } } return false } diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 2fa1fed45dd90..b8dd28e6bf435 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -132,7 +132,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn tailscale.com/types/opt from tailscale.com/envknob+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/ipn tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index be3e0e0cf8f13..bd8eebb7b1d27 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -59,16 +59,17 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb tailscale.com/syncs from tailscale.com/metrics+ - tailscale.com/tailcfg from tailscale.com/version + tailscale.com/tailcfg from tailscale.com/version+ tailscale.com/tsweb from tailscale.com/cmd/stund+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/ipproto from tailscale.com/tailcfg - tailscale.com/types/key from tailscale.com/tailcfg + tailscale.com/types/key from tailscale.com/tailcfg+ tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/tsweb+ tailscale.com/types/opt from tailscale.com/envknob+ + tailscale.com/types/persist from tailscale.com/feature tailscale.com/types/ptr from tailscale.com/tailcfg+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/tailcfg+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 0d3a006a19afd..d5b7b059f8381 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -162,7 +162,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/netmap from tailscale.com/ipn+ tailscale.com/types/nettype from tailscale.com/net/netcheck+ tailscale.com/types/opt from tailscale.com/client/tailscale+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+ tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 92c44f4c13ff3..f14cdcff072b1 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -52,6 +52,7 @@ import ( "tailscale.com/syncs" "tailscale.com/tsd" "tailscale.com/types/flagtype" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/osshare" @@ -111,19 +112,20 @@ var args struct { // or comma-separated list thereof. tunname string - cleanUp bool - confFile string // empty, file path, or "vm:user-data" - debug string - port uint16 - statepath string - encryptState boolFlag - statedir string - socketpath string - birdSocketPath string - verbose int - socksAddr string // listen address for SOCKS5 server - httpProxyAddr string // listen address for HTTP proxy server - disableLogs bool + cleanUp bool + confFile string // empty, file path, or "vm:user-data" + debug string + port uint16 + statepath string + encryptState boolFlag + statedir string + socketpath string + birdSocketPath string + verbose int + socksAddr string // listen address for SOCKS5 server + httpProxyAddr string // listen address for HTTP proxy server + disableLogs bool + hardwareAttestation boolFlag } var ( @@ -204,6 +206,9 @@ func main() { flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if buildfeatures.HasTPM { + flag.Var(&args.hardwareAttestation, "hardware-attestation", "use hardware-backed keys to bind node identity to this device when supported by the OS and hardware. Uses TPM 2.0 on Linux and Windows; SecureEnclave on macOS and iOS; and Keystore on Android") + } if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { f() } @@ -667,6 +672,9 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID log.Fatalf("failed to start netstack: %v", err) } } + if buildfeatures.HasTPM && args.hardwareAttestation.v { + lb.SetHardwareAttested() + } return lb, nil } @@ -879,9 +887,26 @@ func applyIntegrationTestEnvKnob() { } } -// handleTPMFlags validates the --encrypt-state flag if set, and defaults -// state encryption on if it's supported and compatible with other settings. +// handleTPMFlags validates the --encrypt-state and --hardware-attestation flags +// if set, and defaults both to on if supported and compatible with other +// settings. func handleTPMFlags() { + switch { + case args.hardwareAttestation.v: + if _, err := key.NewEmptyHardwareAttestationKey(); err == key.ErrUnsupported { + log.SetFlags(0) + log.Fatalf("--hardware-attestation is not supported on this platform or in this build of tailscaled") + } + case !args.hardwareAttestation.set: + policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, feature.HardwareAttestationAvailable()) + if !policyHWAttestation { + break + } + if feature.TPMAvailable() { + args.hardwareAttestation.v = true + } + } + switch { case args.encryptState.v: // Explicitly enabled, validate. diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 61886482d8ef2..63a12b2495fd8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -7,6 +7,8 @@ import ( "bytes" "cmp" "context" + "crypto" + "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -604,6 +606,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if persist.NetworkLockKey.IsZero() { persist.NetworkLockKey = key.NewNLPrivate() } + nlPub := persist.NetworkLockKey.Public() if tryingNewKey.IsZero() { @@ -944,6 +947,27 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap TKAHead: tkaHead, ConnectionHandleForTest: connectionHandleForTest, } + + // If we have a hardware attestation key, sign the node key with it and send + // the key & signature in the map request. + if buildfeatures.HasTPM { + if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { + hwPub := key.HardwareAttestationPublicFromPlatformKey(k) + request.HardwareAttestationKey = hwPub + + t := c.clock.Now() + msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) + digest := sha256.Sum256([]byte(msg)) + sig, err := k.Sign(nil, digest[:], crypto.SHA256) + if err != nil { + c.logf("failed to sign node key with hardware attestation key: %v", err) + } else { + request.HardwareAttestationKeySignature = sig + request.HardwareAttestationKeySignatureTimestamp = t + } + } + } + var extraDebugFlags []string if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { diff --git a/feature/hooks.go b/feature/hooks.go index 2eade1eadc4f6..a3c6c0395ee81 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -6,6 +6,9 @@ package feature import ( "net/http" "net/url" + + "tailscale.com/types/logger" + "tailscale.com/types/persist" ) // HookCanAutoUpdate is a hook for the clientupdate package @@ -45,6 +48,8 @@ var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] // and available. var HookTPMAvailable Hook[func() bool] +var HookGenerateAttestationKeyIfEmpty Hook[func(p *persist.Persist, logf logger.Logf) (bool, error)] + // TPMAvailable reports whether a TPM device is supported and available. func TPMAvailable() bool { if f, ok := HookTPMAvailable.GetOk(); ok { @@ -52,3 +57,17 @@ func TPMAvailable() bool { } return false } + +// HookHardwareAttestationAvailable is a hook that reports whether hardware +// attestation is supported and available. +var HookHardwareAttestationAvailable Hook[func() bool] + +// HardwareAttestationAvailable reports whether hardware attestation is +// supported and available (TPM on Windows/Linux, Secure Enclave on macOS|iOS, +// KeyStore on Android) +func HardwareAttestationAvailable() bool { + if f, ok := HookHardwareAttestationAvailable.GetOk(); ok { + return f() + } + return false +} diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 92617f9954616..5fbda3b17bab3 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -142,13 +142,18 @@ type attestationKeySerialized struct { TPMPublic []byte `json:"tpmPublic"` } +// MarshalJSON implements json.Marshaler. func (ak *attestationKey) MarshalJSON() ([]byte, error) { + if ak == nil || ak.IsZero() { + return []byte("null"), nil + } return json.Marshal(attestationKeySerialized{ TPMPublic: ak.tpmPublic.Bytes(), TPMPrivate: ak.tpmPrivate.Buffer, }) } +// UnmarshalJSON implements json.Unmarshaler. func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { var aks attestationKeySerialized if err := json.Unmarshal(data, &aks); err != nil { @@ -254,6 +259,9 @@ func (ak *attestationKey) Close() error { } func (ak *attestationKey) Clone() key.HardwareAttestationKey { + if ak == nil { + return nil + } return &attestationKey{ tpm: ak.tpm, tpmPrivate: ak.tpmPrivate, @@ -263,4 +271,9 @@ func (ak *attestationKey) Clone() key.HardwareAttestationKey { } } -func (ak *attestationKey) IsZero() bool { return !ak.loaded() } +func (ak *attestationKey) IsZero() bool { + if ak == nil { + return true + } + return !ak.loaded() +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index b67cb4e3b23ff..dd37b0506f197 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -40,6 +40,8 @@ var infoOnce = sync.OnceValue(info) func init() { feature.Register("tpm") feature.HookTPMAvailable.Set(tpmSupported) + feature.HookHardwareAttestationAvailable.Set(tpmSupported) + hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go new file mode 100644 index 0000000000000..2c93cad4c97ff --- /dev/null +++ b/ipn/ipnlocal/hwattest.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tpm + +package ipnlocal + +import ( + "errors" + + "tailscale.com/feature" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +func init() { + feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) +} + +// generateAttestationKeyIfEmpty generates a new hardware attestation key if +// none exists. It returns true if a new key was generated and stored in +// p.AttestationKey. +func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { + // attempt to generate a new hardware attestation key if none exists + var ak key.HardwareAttestationKey + if p != nil { + ak = p.AttestationKey + } + + if ak == nil || ak.IsZero() { + var err error + ak, err = key.NewHardwareAttestationKey() + if err != nil { + if !errors.Is(err, key.ErrUnsupported) { + logf("failed to create hardware attestation key: %v", err) + } + } else if ak != nil { + logf("using new hardware attestation key: %v", ak.Public()) + if p == nil { + p = &persist.Persist{} + } + p.AttestationKey = ak + return true, nil + } + } + return false, nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e04ef9e6c02f7..8cc74c41ebc60 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -392,6 +392,23 @@ type LocalBackend struct { // // See tailscale/corp#29969. overrideExitNodePolicy bool + + // hardwareAttested is whether backend should use a hardware-backed key to + // bind the node identity to this device. + hardwareAttested atomic.Bool +} + +// SetHardwareAttested enables hardware attestation key signatures in map +// requests, if supported on this platform. SetHardwareAttested should be called +// before Start. +func (b *LocalBackend) SetHardwareAttested() { + b.hardwareAttested.Store(true) +} + +// HardwareAttested reports whether hardware-backed attestation keys should be +// used to bind the node's identity to this device. +func (b *LocalBackend) HardwareAttested() bool { + return b.hardwareAttested.Load() } // HealthTracker returns the health tracker for the backend. @@ -2455,10 +2472,23 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if b.reconcilePrefsLocked(newPrefs) { prefsChanged = true } + + // neither UpdatePrefs or reconciliation should change Persist + newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() + + if buildfeatures.HasTPM { + if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { + newKey, err := genKey(newPrefs.Persist, b.logf) + if err != nil { + b.logf("failed to populate attestation key from TPM: %v", err) + } + if newKey { + prefsChanged = true + } + } + } + if prefsChanged { - // Neither opts.UpdatePrefs nor prefs reconciliation - // is allowed to modify Persist; retain the old value. - newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { b.logf("failed to save updated and reconciled prefs: %v", err) } @@ -2491,8 +2521,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { discoPublic := b.MagicConn().DiscoPublicKey() - var err error - isNetstack := b.sys.IsNetstackRouter() debugFlags := controlDebugFlags if isNetstack { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index c8367d14d3fb4..33ecb688c52a3 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -7030,6 +7030,27 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func TestHardwareAttested(t *testing.T) { + b := new(LocalBackend) + + // default false + if got := b.HardwareAttested(); got != false { + t.Errorf("HardwareAttested() = %v, want false", got) + } + + // set true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after SetHardwareAttested()", got) + } + + // repeat calls are safe; still true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after second SetHardwareAttested()", got) + } +} + func TestDeps(t *testing.T) { deptest.DepChecker{ OnImport: func(pkg string) { diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 67e71aa70a098..9c217637890cc 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -19,7 +19,9 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -645,8 +647,8 @@ func (pm *profileManager) setProfileAsUserDefault(profile ipn.LoginProfileView) return pm.WriteState(k, []byte(profile.Key())) } -func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) { - bs, err := pm.store.ReadState(key) +func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) { + bs, err := pm.store.ReadState(k) if err == ipn.ErrStateNotExist || len(bs) == 0 { return defaultPrefs, nil } @@ -654,10 +656,18 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() + + // if supported by the platform, create an empty hardware attestation key to use when deserializing + // to avoid type exceptions from json.Unmarshaling into an interface{}. + hw, _ := key.NewEmptyHardwareAttestationKey() + savedPrefs.Persist = &persist.Persist{ + AttestationKey: hw, + } + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } - pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty()) + pm.logf("using backend prefs for %q: %v", k, savedPrefs.Pretty()) // Ignore any old stored preferences for https://login.tailscale.com // as the control server that would override the new default of diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 60c92ff8d3493..deeab2ade9b15 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -151,6 +151,7 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, + AttestationKey: nil, } } user1Node1 := newPersist(1, 1) diff --git a/ipn/prefs.go b/ipn/prefs.go index 4a0680bbab536..81dd1c1c3dc49 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -709,6 +709,7 @@ func NewPrefs() *Prefs { // Provide default values for options which might be missing // from the json data for any reason. The json can still // override them to false. + p := &Prefs{ // ControlURL is explicitly not set to signal that // it's not yet configured, which relaxes the CLI "up" diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 3339a631ce827..2336164096c14 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, }, { Prefs{ diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index e9f97bdc47054..ea4a9d1fa1748 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -176,7 +176,8 @@ type CapabilityVersion int // - 127: 2025-09-19: can handle C2N /debug/netmap. // - 128: 2025-10-02: can handle C2N /debug/health. // - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) -const CurrentCapabilityVersion CapabilityVersion = 129 +// - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest +const CurrentCapabilityVersion CapabilityVersion = 130 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -1372,9 +1373,13 @@ type MapRequest struct { // HardwareAttestationKey is the public key of the node's hardware-backed // identity attestation key, if any. HardwareAttestationKey key.HardwareAttestationPublic `json:",omitzero"` - // HardwareAttestationKeySignature is the signature of the NodeKey - // serialized using MarshalText using its hardware attestation key, if any. + // HardwareAttestationKeySignature is the signature of + // "$UNIX_TIMESTAMP|$NODE_KEY" using its hardware attestation key, if any. HardwareAttestationKeySignature []byte `json:",omitempty"` + // HardwareAttestationKeySignatureTimestamp is the time at which the + // HardwareAttestationKeySignature was created, if any. This UNIX timestamp + // value is prepended to the node key when signing. + HardwareAttestationKeySignatureTimestamp time.Time `json:",omitzero"` // Stream is whether the client wants to receive multiple MapResponses over // the same HTTP connection. diff --git a/types/persist/persist.go b/types/persist/persist.go index d888a6afb6af5..4b62c79ddd186 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,6 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -84,11 +85,20 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } + var pub, p2Pub key.HardwareAttestationPublic + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) + } + if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { + p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) + } + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && + pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -96,12 +106,16 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) + akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + akString = fmt.Sprintf("%v", p.AttestationKey.Public()) + } + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 680419ff2f30b..9dbe7e0f6fa6d 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,6 +19,9 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + if src.AttestationKey != nil { + dst.AttestationKey = src.AttestationKey.Clone() + } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -31,5 +34,6 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index dbf2a6d8c7662..713114b74dcd5 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 7d1507468fc65..dbf8294ef5a7a 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,10 +89,11 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -110,5 +111,6 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index 79b4af1e615a1..e450625cd1710 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -141,6 +141,10 @@ const ( // It's a noop on other platforms. EncryptState Key = "EncryptState" + // HardwareAttestation is a boolean key that controls whether to use a + // hardware-backed key to bind the node identity to this device. + HardwareAttestation Key = "HardwareAttestation" + // PostureChecking indicates if posture checking is enabled and the client shall gather // posture data. // Key is a string value that specifies an option: "always", "never", "user-decides". diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ae902e8c40a49..3a54f9dde5dd7 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -43,6 +43,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(pkey.PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(pkey.ReconnectAfter, setting.DeviceSetting, setting.DurationValue), setting.NewDefinition(pkey.Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.HardwareAttestation, setting.DeviceSetting, setting.BooleanValue), // User policy settings (can be configured on a user- or device-basis): setting.NewDefinition(pkey.AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), From 7c49cab1a6e6b2d05ab7133c07f6154d6b87f9ca Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 29 Sep 2025 11:44:23 -0600 Subject: [PATCH 0572/1093] clientupdate, util/osshare, util/winutil, version: improve Windows GUI filename resolution and WinUI build awareness On Windows arm64 we are going to need to ship two different GUI builds; one for Win10 (GOARCH=386) and one for Win11 (GOARCH=amd64, tags += winui). Due to quirks in MSI packaging, they cannot both share the same filename. This requires some fixes in places where we have hardcoded "tailscale-ipn" as the GUI filename. We also do some cleanup in clientupdate to ensure that autoupdates will continue to work correctly with the temporary "-winui" package variant. Fixes #17480 Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- clientupdate/clientupdate_windows.go | 32 +++++++++++----- util/osshare/filesharingstatus_windows.go | 46 ++++++++++++++--------- util/winutil/winutil_windows.go | 25 ++++++++++++ version/cmdname.go | 12 +++--- version/exename.go | 25 ++++++++++++ version/prop.go | 4 +- version/version_internal_test.go | 35 +++++++++++++++++ 7 files changed, 146 insertions(+), 33 deletions(-) create mode 100644 version/exename.go diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index b79d447ad4d30..5faeda6dd70e3 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -30,11 +30,6 @@ const ( // tailscale.exe process from running before the msiexec process runs and // tries to overwrite ourselves. winMSIEnv = "TS_UPDATE_WIN_MSI" - // winExePathEnv is the environment variable that is set along with - // winMSIEnv and carries the full path of the calling tailscale.exe binary. - // It is used to re-launch the GUI process (tailscale-ipn.exe) after - // install is complete. - winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" // winVersionEnv is the environment variable that is set along with // winMSIEnv and carries the version of tailscale that is being installed. // It is used for logging purposes. @@ -78,6 +73,17 @@ func verifyAuthenticode(path string) error { return authenticode.Verify(path, certSubjectTailscale) } +func isTSGUIPresent() bool { + us, err := os.Executable() + if err != nil { + return false + } + + tsgui := filepath.Join(filepath.Dir(us), "tsgui.dll") + _, err = os.Stat(tsgui) + return err == nil +} + func (up *Updater) updateWindows() error { if msi := os.Getenv(winMSIEnv); msi != "" { // stdout/stderr from this part of the install could be lost since the @@ -131,7 +137,15 @@ you can run the command prompt as Administrator one of these ways: return err } up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) - pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) + + qualifiers := []string{ver, arch} + // TODO(aaron): Temporary hack so autoupdate still works on winui builds; + // remove when we enable winui by default on the unstable track. + if isTSGUIPresent() { + qualifiers = append(qualifiers, "winui") + } + + pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s.msi", up.Track, strings.Join(qualifiers, "-")) msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { return err @@ -145,7 +159,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("making tailscale.exe copy to switch to...") up.cleanupOldDownloads(filepath.Join(os.TempDir(), updaterPrefix+"-*.exe")) - selfOrig, selfCopy, err := makeSelfCopy() + _, selfCopy, err := makeSelfCopy() if err != nil { return err } @@ -153,7 +167,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("running tailscale.exe copy for final install...") cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig, winVersionEnv+"="+ver) + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winVersionEnv+"="+ver) cmd.Stdout = up.Stderr cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin @@ -189,7 +203,7 @@ func (up *Updater) installMSI(msi string) error { case windows.ERROR_SUCCESS_REBOOT_REQUIRED: // In most cases, updating Tailscale should not require a reboot. // If it does, it might be because we failed to close the GUI - // and the installer couldn't replace tailscale-ipn.exe. + // and the installer couldn't replace its executable. // The old GUI will continue to run until the next reboot. // Not ideal, but also not a retryable error. up.Logf("[unexpected] reboot required") diff --git a/util/osshare/filesharingstatus_windows.go b/util/osshare/filesharingstatus_windows.go index 999fc1cf77372..c125de15990c3 100644 --- a/util/osshare/filesharingstatus_windows.go +++ b/util/osshare/filesharingstatus_windows.go @@ -9,30 +9,31 @@ import ( "fmt" "os" "path/filepath" - "sync" + "runtime" "golang.org/x/sys/windows/registry" + "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/winutil" ) const ( sendFileShellKey = `*\shell\tailscale` ) -var ipnExePath struct { - sync.Mutex - cache string // absolute path of tailscale-ipn.exe, populated lazily on first use -} +var ipnExePath lazy.SyncValue[string] // absolute path of the GUI executable func getIpnExePath(logf logger.Logf) string { - ipnExePath.Lock() - defer ipnExePath.Unlock() - - if ipnExePath.cache != "" { - return ipnExePath.cache + exe, err := winutil.GUIPathFromReg() + if err == nil { + return exe } - // Find the absolute path of tailscale-ipn.exe assuming that it's in the same + return findGUIInSameDirAsThisExe(logf) +} + +func findGUIInSameDirAsThisExe(logf logger.Logf) string { + // Find the absolute path of the GUI, assuming that it's in the same // directory as this executable (tailscaled.exe). p, err := os.Executable() if err != nil { @@ -43,14 +44,23 @@ func getIpnExePath(logf logger.Logf) string { logf("filepath.EvalSymlinks error: %v", err) return "" } - p = filepath.Join(filepath.Dir(p), "tailscale-ipn.exe") if p, err = filepath.Abs(p); err != nil { logf("filepath.Abs error: %v", err) return "" } - ipnExePath.cache = p - - return p + d := filepath.Dir(p) + candidates := []string{"tailscale-ipn.exe"} + if runtime.GOARCH == "arm64" { + // This name may be used on Windows 10 ARM64. + candidates = append(candidates, "tailscale-gui-386.exe") + } + for _, c := range candidates { + testPath := filepath.Join(d, c) + if _, err := os.Stat(testPath); err == nil { + return testPath + } + } + return "" } // SetFileSharingEnabled adds/removes "Send with Tailscale" from the Windows shell menu. @@ -64,7 +74,9 @@ func SetFileSharingEnabled(enabled bool, logf logger.Logf) { } func enableFileSharing(logf logger.Logf) { - path := getIpnExePath(logf) + path := ipnExePath.Get(func() string { + return getIpnExePath(logf) + }) if path == "" { return } @@ -79,7 +91,7 @@ func enableFileSharing(logf logger.Logf) { logf("k.SetStringValue error: %v", err) return } - if err := k.SetStringValue("Icon", path+",0"); err != nil { + if err := k.SetStringValue("Icon", path+",1"); err != nil { logf("k.SetStringValue error: %v", err) return } diff --git a/util/winutil/winutil_windows.go b/util/winutil/winutil_windows.go index 5dde9a347d7f7..c935b210e9e6a 100644 --- a/util/winutil/winutil_windows.go +++ b/util/winutil/winutil_windows.go @@ -8,8 +8,10 @@ import ( "fmt" "log" "math" + "os" "os/exec" "os/user" + "path/filepath" "reflect" "runtime" "strings" @@ -33,6 +35,10 @@ var ErrNoShell = errors.New("no Shell process is present") // ErrNoValue is returned when the value doesn't exist in the registry. var ErrNoValue = registry.ErrNotExist +// ErrBadRegValueFormat is returned when a string value does not match the +// expected format. +var ErrBadRegValueFormat = errors.New("registry value formatted incorrectly") + // GetDesktopPID searches the PID of the process that's running the // currently active desktop. Returns ErrNoShell if the shell is not present. // Usually the PID will be for explorer.exe. @@ -947,3 +953,22 @@ func IsDomainName(name string) (bool, error) { return isDomainName(name16) } + +// GUIPathFromReg obtains the path to the client GUI executable from the +// registry value that was written during installation. +func GUIPathFromReg() (string, error) { + regPath, err := GetRegString("GUIPath") + if err != nil { + return "", err + } + + if !filepath.IsAbs(regPath) { + return "", ErrBadRegValueFormat + } + + if _, err := os.Stat(regPath); err != nil { + return "", err + } + + return regPath, nil +} diff --git a/version/cmdname.go b/version/cmdname.go index 51e065438e3a5..c38544ce1642c 100644 --- a/version/cmdname.go +++ b/version/cmdname.go @@ -12,7 +12,7 @@ import ( "io" "os" "path" - "path/filepath" + "runtime" "strings" ) @@ -30,7 +30,7 @@ func CmdName() string { func cmdName(exe string) string { // fallbackName, the lowercase basename of the executable, is what we return if // we can't find the Go module metadata embedded in the file. - fallbackName := filepath.Base(strings.TrimSuffix(strings.ToLower(exe), ".exe")) + fallbackName := prepExeNameForCmp(exe, runtime.GOARCH) var ret string info, err := findModuleInfo(exe) @@ -45,10 +45,10 @@ func cmdName(exe string) string { break } } - if strings.HasPrefix(ret, "wg") && fallbackName == "tailscale-ipn" { - // The tailscale-ipn.exe binary for internal build system packaging reasons - // has a path of "tailscale.io/win/wg64", "tailscale.io/win/wg32", etc. - // Ignore that name and use "tailscale-ipn" instead. + if runtime.GOOS == "windows" && strings.HasPrefix(ret, "gui") && checkPreppedExeNameForGUI(fallbackName) { + // The GUI binary for internal build system packaging reasons + // has a path of "tailscale.io/win/gui". + // Ignore that name and use fallbackName instead. return fallbackName } if ret == "" { diff --git a/version/exename.go b/version/exename.go new file mode 100644 index 0000000000000..d5047c2038ffe --- /dev/null +++ b/version/exename.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package version + +import ( + "path/filepath" + "strings" +) + +// prepExeNameForCmp strips any extension and arch suffix from exe, and +// lowercases it. +func prepExeNameForCmp(exe, arch string) string { + baseNoExt := strings.ToLower(strings.TrimSuffix(filepath.Base(exe), filepath.Ext(exe))) + archSuffix := "-" + arch + return strings.TrimSuffix(baseNoExt, archSuffix) +} + +func checkPreppedExeNameForGUI(preppedExeName string) bool { + return preppedExeName == "tailscale-ipn" || preppedExeName == "tailscale-gui" +} + +func isGUIExeName(exe, arch string) bool { + return checkPreppedExeNameForGUI(prepExeNameForCmp(exe, arch)) +} diff --git a/version/prop.go b/version/prop.go index 9327e6fe6d0f4..0d6a5c00df375 100644 --- a/version/prop.go +++ b/version/prop.go @@ -159,7 +159,9 @@ func IsWindowsGUI() bool { if err != nil { return false } - return strings.EqualFold(exe, "tailscale-ipn.exe") || strings.EqualFold(exe, "tailscale-ipn") + // It is okay to use GOARCH here because we're checking whether our + // _own_ process is the GUI. + return isGUIExeName(exe, runtime.GOARCH) }) } diff --git a/version/version_internal_test.go b/version/version_internal_test.go index 19aeab44228bd..b3b848276e820 100644 --- a/version/version_internal_test.go +++ b/version/version_internal_test.go @@ -25,3 +25,38 @@ func TestIsValidLongWithTwoRepos(t *testing.T) { } } } + +func TestPrepExeNameForCmp(t *testing.T) { + cases := []struct { + exe string + want string + }{ + { + "tailscale-ipn.exe", + "tailscale-ipn", + }, + { + "tailscale-gui-amd64.exe", + "tailscale-gui", + }, + { + "tailscale-gui-amd64", + "tailscale-gui", + }, + { + "tailscale-ipn", + "tailscale-ipn", + }, + { + "TaIlScAlE-iPn.ExE", + "tailscale-ipn", + }, + } + + for _, c := range cases { + got := prepExeNameForCmp(c.exe, "amd64") + if got != c.want { + t.Errorf("prepExeNameForCmp(%q) = %q; want %q", c.exe, got, c.want) + } + } +} From d8a6d0183c35db1b8e7bf35d887772244c71e806 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 10 Oct 2025 12:24:52 -0700 Subject: [PATCH 0573/1093] ipn/ipnlocal: strip AttestationKey in redacted prefs view (#17527) Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- ipn/ipnlocal/local.go | 1 + 1 file changed, 1 insertion(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8cc74c41ebc60..36e4ad8a589e9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1216,6 +1216,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} + p2.Persist.AttestationKey = nil return p2.View() } From 005e264b5456f90d52920c2d396f307c645e1cbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 10 Oct 2025 15:33:30 -0400 Subject: [PATCH 0574/1093] util/eventbus/eventbustest: add support for synctest instead of timers (#17522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before synctest, timers was needed to allow the events to flow into the test bus. There is still a timer, but this one is not derived from the test deadline and it is mostly arbitrary as synctest will render it practically non-existent. With this approach, tests that do not need to test for the absence of events do not rely on synctest. Updates #15160 Signed-off-by: Claus Lensbøl --- health/health_test.go | 117 ++++++++------ net/netmon/netmon_test.go | 2 +- util/eventbus/eventbustest/doc.go | 14 ++ util/eventbus/eventbustest/eventbustest.go | 35 ++-- .../eventbustest/eventbustest_test.go | 150 +++++++++--------- util/eventbus/eventbustest/examples_test.go | 59 +++++++ 6 files changed, 231 insertions(+), 146 deletions(-) diff --git a/health/health_test.go b/health/health_test.go index 3b5ebbb38d22a..60707177603e9 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -5,12 +5,14 @@ package health import ( "errors" + "flag" "fmt" "maps" "reflect" "slices" "strconv" "testing" + "testing/synctest" "time" "github.com/google/go-cmp/cmp" @@ -26,6 +28,8 @@ import ( "tailscale.com/version" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + func wantChange(c Change) func(c Change) (bool, error) { return func(cEv Change) (bool, error) { if cEv.ControlHealthChanged != c.ControlHealthChanged { @@ -724,72 +728,83 @@ func TestControlHealthNotifies(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - tw.TimeOut = time.Second - - ht := NewTracker(bus) - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() - - // Expect events at starup, before doing anything else - if err := eventbustest.ExpectExactly(tw, - eventbustest.Type[Change](), // warming-up - eventbustest.Type[Change](), // is-using-unstable-version - eventbustest.Type[Change](), // not-in-map-poll - ); err != nil { - t.Errorf("startup error: %v", err) - } + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) + + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + // Expect events at starup, before doing anything else + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[Change](), // warming-up + eventbustest.Type[Change](), // is-using-unstable-version + eventbustest.Type[Change](), // not-in-map-poll + ); err != nil { + t.Errorf("startup error: %v", err) + } - // Only set initial state if we need to - if len(test.initialState) != 0 { - ht.SetControlHealth(test.initialState) - if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { - t.Errorf("initial state error: %v", err) + // Only set initial state if we need to + if len(test.initialState) != 0 { + ht.SetControlHealth(test.initialState) + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { + t.Errorf("initial state error: %v", err) + } } - } - ht.SetControlHealth(test.newState) + ht.SetControlHealth(test.newState) + // Close the bus early to avoid timers triggering more events. + bus.Close() - if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { - t.Errorf("event error: %v", err) - } + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { + t.Errorf("event error: %v", err) + } + }) }) } } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - tw.TimeOut = 100 * time.Millisecond - ht := NewTracker(bus) - ht.SetIPNState("NeedsLogin", true) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "control-health": {}, - }) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "control-health": {}, + }) - state := ht.CurrentState() - _, ok := state.Warnings["control-health"] + state := ht.CurrentState() + _, ok := state.Warnings["control-health"] - if ok { - t.Error("got a warning with code 'control-health', want none") - } + if ok { + t.Error("got a warning with code 'control-health', want none") + } - // An event is emitted when SetIPNState is run above, - // so only fail on the second event. - eventCounter := 0 - expectOne := func(c *Change) error { - eventCounter++ - if eventCounter == 1 { - return nil + // An event is emitted when SetIPNState is run above, + // so only fail on the second event. + eventCounter := 0 + expectOne := func(c *Change) error { + eventCounter++ + if eventCounter == 1 { + return nil + } + return errors.New("saw more than 1 event") } - return errors.New("saw more than 1 event") - } - if err := eventbustest.Expect(tw, expectOne); err == nil { - t.Error("event got emitted, want it to not be called") - } + synctest.Wait() + if err := eventbustest.Expect(tw, expectOne); err == nil { + t.Error("event got emitted, want it to not be called") + } + }) } // TestCurrentStateETagControlHealth tests that the ETag on an [UnhealthyState] diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 358dc0373ec14..6a87cedb8e7ea 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -144,7 +144,7 @@ func TestMonitorMode(t *testing.T) { <-done t.Logf("%v callbacks", n) case "eventbus": - tw.TimeOut = *monitorDuration + time.AfterFunc(*monitorDuration, bus.Close) n := 0 mon.Start() eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go index 9e39504a83521..1e9928b9d7cf9 100644 --- a/util/eventbus/eventbustest/doc.go +++ b/util/eventbus/eventbustest/doc.go @@ -39,6 +39,20 @@ // checks that the stream contains exactly the given events in the given order, // and no others. // +// To test for the absence of events, use [ExpectExactly] without any +// expected events, along side [testing/synctest] to avoid waiting for timers +// to ensure that no events are produced. This will look like: +// +// synctest.Test(t, func(t *testing.T) { +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatShouldNotEmitsSomeEvent() +// synctest.Wait() +// if err := eventbustest.ExpectExactly(tw); err != nil { +// t.Errorf("Expected no events or errors, got %v", err) +// } +// }) +// // See the [usage examples]. // // [usage examples]: https://github.com/tailscale/tailscale/blob/main/util/eventbus/eventbustest/examples_test.go diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 3f7bf45531db4..fd8a150812e0d 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -27,13 +27,9 @@ func NewBus(t testing.TB) *eventbus.Bus { // [Expect] and [ExpectExactly], to verify that the desired events were captured. func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { tw := &Watcher{ - mon: bus.Debugger().WatchBus(), - TimeOut: 5 * time.Second, - chDone: make(chan bool, 1), - events: make(chan any, 100), - } - if deadline, ok := t.Deadline(); ok { - tw.TimeOut = deadline.Sub(time.Now()) + mon: bus.Debugger().WatchBus(), + chDone: make(chan bool, 1), + events: make(chan any, 100), } t.Cleanup(tw.done) go tw.watch() @@ -41,16 +37,15 @@ func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { } // Watcher monitors and holds events for test expectations. +// The Watcher works with [synctest], and some scenarios does require the use of +// [synctest]. This is amongst others true if you are testing for the absence of +// events. +// +// For usage examples, see the documentation in the top of the package. type Watcher struct { mon *eventbus.Subscriber[eventbus.RoutedEvent] events chan any chDone chan bool - // TimeOut defines when the Expect* functions should stop looking for events - // coming from the Watcher. The value is set by [NewWatcher] and defaults to - // the deadline passed in by [testing.T]. If looking to verify the absence - // of an event, the TimeOut can be set to a lower value after creating the - // Watcher. - TimeOut time.Duration } // Type is a helper representing the expectation to see an event of type T, without @@ -103,7 +98,8 @@ func Expect(tw *Watcher, filters ...any) error { } else if ok { head++ } - case <-time.After(tw.TimeOut): + // Use synctest when you want an error here. + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", eventCount, len(filters)) @@ -118,12 +114,16 @@ func Expect(tw *Watcher, filters ...any) error { // in a given order, returning an error if the events does not match the given list // exactly. The given events are represented by a function as described in // [Expect]. Use [Expect] if other events are allowed. +// +// If you are expecting ExpectExactly to fail because of a missing event, or if +// you are testing for the absence of events, call [synctest.Wait] after +// actions that would publish an event, but before calling ExpectExactly. func ExpectExactly(tw *Watcher, filters ...any) error { if len(filters) == 0 { select { case event := <-tw.events: return fmt.Errorf("saw event type %s, expected none", reflect.TypeOf(event)) - case <-time.After(tw.TimeOut): + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return nil } } @@ -146,7 +146,7 @@ func ExpectExactly(tw *Watcher, filters ...any) error { return fmt.Errorf( "expected test ok for type %s, at index %d", argType, pos) } - case <-time.After(tw.TimeOut): + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", eventCount, len(filters)) @@ -162,6 +162,9 @@ func (tw *Watcher) watch() { select { case event := <-tw.mon.Events(): tw.events <- event.Event + case <-tw.mon.Done(): + tw.done() + return case <-tw.chDone: tw.mon.Close() return diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index 2d126767d13ce..ac454023c9c47 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -8,7 +8,7 @@ import ( "fmt" "strings" "testing" - "time" + "testing/synctest" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" @@ -110,37 +110,35 @@ func TestExpectFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - t.Cleanup(bus.Close) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - if *doDebug { - eventbustest.LogAllEvents(t, bus) - } - tw := eventbustest.NewWatcher(t, bus) + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + client := bus.Client("testClient") + updater := eventbus.Publish[EventFoo](client) - client := bus.Client("testClient") - defer client.Close() - updater := eventbus.Publish[EventFoo](client) + for _, i := range tt.events { + updater.Publish(EventFoo{i}) + } - for _, i := range tt.events { - updater.Publish(EventFoo{i}) - } + synctest.Wait() - if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { - if tt.wantErr == "" { - t.Errorf("Expect[EventFoo]: unexpected error: %v", err) - } else if !strings.Contains(err.Error(), tt.wantErr) { - t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) - } else { - t.Logf("Got expected error: %v (OK)", err) + if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { + if tt.wantErr == "" { + t.Errorf("Expect[EventFoo]: unexpected error: %v", err) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) + } else { + t.Logf("Got expected error: %v (OK)", err) + } + } else if tt.wantErr != "" { + t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) } - } else if tt.wantErr != "" { - t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) - } + }) }) } } @@ -244,37 +242,35 @@ func TestExpectEvents(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - t.Cleanup(bus.Close) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 100 * time.Millisecond + tw := eventbustest.NewWatcher(t, bus) - client := bus.Client("testClient") - defer client.Close() - updaterFoo := eventbus.Publish[EventFoo](client) - updaterBar := eventbus.Publish[EventBar](client) - updaterBaz := eventbus.Publish[EventBaz](client) + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) - for _, ev := range tt.events { - switch ev.(type) { - case EventFoo: - evCast := ev.(EventFoo) - updaterFoo.Publish(evCast) - case EventBar: - evCast := ev.(EventBar) - updaterBar.Publish(evCast) - case EventBaz: - evCast := ev.(EventBaz) - updaterBaz.Publish(evCast) + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } } - } - if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { - t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) - } + synctest.Wait() + if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) }) } } @@ -378,37 +374,35 @@ func TestExpectExactlyEventsFilter(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - bus := eventbustest.NewBus(t) - t.Cleanup(bus.Close) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + tw := eventbustest.NewWatcher(t, bus) - client := bus.Client("testClient") - defer client.Close() - updaterFoo := eventbus.Publish[EventFoo](client) - updaterBar := eventbus.Publish[EventBar](client) - updaterBaz := eventbus.Publish[EventBaz](client) + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) - for _, ev := range tt.events { - switch ev.(type) { - case EventFoo: - evCast := ev.(EventFoo) - updaterFoo.Publish(evCast) - case EventBar: - evCast := ev.(EventBar) - updaterBar.Publish(evCast) - case EventBaz: - evCast := ev.(EventBaz) - updaterBaz.Publish(evCast) + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } } - } - if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { - t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) - } + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) }) } } diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go index bc06e60a9230b..c848113173bc6 100644 --- a/util/eventbus/eventbustest/examples_test.go +++ b/util/eventbus/eventbustest/examples_test.go @@ -5,6 +5,8 @@ package eventbustest_test import ( "testing" + "testing/synctest" + "time" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" @@ -199,3 +201,60 @@ func TestExample_ExpectExactly_WithMultipleFunctions(t *testing.T) { // Output: // expected event type eventbustest.eventOfCuriosity, saw eventbustest.eventOfNoConcern, at index 1 } + +func TestExample_ExpectExactly_NoEvents(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + t.Log("Not producing events") + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw); err != nil { + t.Error(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK + }) +} + +func TestExample_ExpectExactly_OneEventExpectingTwo(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + updater.Publish(eventOfInterest{}) + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfInterest](), + ); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // timed out waiting for event, saw 1 events, 2 was expected + }) +} From 1a93a8a704b4f07a66d5086bfc1b7dfb1a3c6406 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 5 Oct 2025 12:43:54 -0700 Subject: [PATCH 0575/1093] feature/tpm: quiet log output a bit I was debugging a customer issue and saw in their 1.88.3 logs: TPM: error opening: stat /dev/tpm0: no such file or directory That's unnecessary output. The lack of TPM will be reported by them having a nil Hostinfo.TPM, which is plenty elsewhere in logs. Let's only write out an "error opening" line if it's an interesting error. (perhaps permissions, or EIO, etc) Updates #cleanup Change-Id: I3f987f6bf1d3ada03473ca3eef555e9cfafc7677 Signed-off-by: Brad Fitzpatrick --- feature/tpm/tpm.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index dd37b0506f197..6acb600ecd56e 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -73,10 +73,16 @@ func info() *tailcfg.TPMInfo { tpm, err := open() if err != nil { - logf("error opening: %v", err) + if !os.IsNotExist(err) || verboseTPM() { + // Only log if it's an interesting error, not just "no TPM", + // as is very common, especially in VMs. + logf("error opening: %v", err) + } return nil } - logf("successfully opened") + if verboseTPM() { + logf("successfully opened") + } defer tpm.Close() info := new(tailcfg.TPMInfo) From 743e5ac6960ef331e93d901faf58b7f4fea296f0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 15 Oct 2025 09:13:06 -0700 Subject: [PATCH 0576/1093] cmd/tailscale: surface relay-server-port set flag (#17528) Fixes tailscale/corp#31186 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/set.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 1807ada1329c3..43f8bbbc34afd 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -85,7 +85,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") - setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") + setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { st, err := localClient.Status(context.Background()) From 6d897c4ab4de855d33a57745d392146886c1e60f Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 15 Oct 2025 14:04:45 -0700 Subject: [PATCH 0577/1093] types/netlogtype: remove CBOR representation (#17545) Remove CBOR representation since it was never used. We should support CBOR in the future, but for remove it for now so that it is less work to add more fields. Also, rely on just omitzero for JSON now that it is supported in Go 1.24. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- types/netlogtype/netlogtype.go | 43 ++++++++++------------------- types/netlogtype/netlogtype_test.go | 7 ----- 2 files changed, 14 insertions(+), 36 deletions(-) diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index f2fa2bda92366..0f552611e85e2 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -12,20 +12,17 @@ import ( "tailscale.com/types/ipproto" ) -// TODO(joetsai): Remove "omitempty" if "omitzero" is ever supported in both -// the v1 and v2 "json" packages. - // Message is the log message that captures network traffic. type Message struct { - NodeID tailcfg.StableNodeID `json:"nodeId" cbor:"0,keyasint"` // e.g., "n123456CNTRL" + NodeID tailcfg.StableNodeID `json:"nodeId"` // e.g., "n123456CNTRL" - Start time.Time `json:"start" cbor:"12,keyasint"` // inclusive - End time.Time `json:"end" cbor:"13,keyasint"` // inclusive + Start time.Time `json:"start"` // inclusive + End time.Time `json:"end"` // inclusive - VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty" cbor:"14,keyasint,omitempty"` - SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty" cbor:"15,keyasint,omitempty"` - ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty" cbor:"16,keyasint,omitempty"` - PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty" cbor:"17,keyasint,omitempty"` + VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty"` + SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty"` + ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty"` + PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty"` } const ( @@ -51,18 +48,6 @@ const ( // this object is nested within an array. // It assumes that netip.Addr never has IPv6 zones. MaxConnectionCountsJSONSize = len(maxJSONConnCounts) - - maxCBORConnCounts = "\xbf" + maxCBORConn + maxCBORCounts + "\xff" - maxCBORConn = "\x00" + maxCBORProto + "\x01" + maxCBORAddrPort + "\x02" + maxCBORAddrPort - maxCBORProto = "\x18\xff" - maxCBORAddrPort = "\x52\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" - maxCBORCounts = "\x0c" + maxCBORCount + "\x0d" + maxCBORCount + "\x0e" + maxCBORCount + "\x0f" + maxCBORCount - maxCBORCount = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff" - - // MaxConnectionCountsCBORSize is the maximum size of a ConnectionCounts - // when it is serialized as CBOR. - // It assumes that netip.Addr never has IPv6 zones. - MaxConnectionCountsCBORSize = len(maxCBORConnCounts) ) // ConnectionCounts is a flattened struct of both a connection and counts. @@ -73,19 +58,19 @@ type ConnectionCounts struct { // Connection is a 5-tuple of proto, source and destination IP and port. type Connection struct { - Proto ipproto.Proto `json:"proto,omitzero,omitempty" cbor:"0,keyasint,omitempty"` - Src netip.AddrPort `json:"src,omitzero,omitempty" cbor:"1,keyasint,omitempty"` - Dst netip.AddrPort `json:"dst,omitzero,omitempty" cbor:"2,keyasint,omitempty"` + Proto ipproto.Proto `json:"proto,omitzero"` + Src netip.AddrPort `json:"src,omitzero"` + Dst netip.AddrPort `json:"dst,omitzero"` } func (c Connection) IsZero() bool { return c == Connection{} } // Counts are statistics about a particular connection. type Counts struct { - TxPackets uint64 `json:"txPkts,omitzero,omitempty" cbor:"12,keyasint,omitempty"` - TxBytes uint64 `json:"txBytes,omitzero,omitempty" cbor:"13,keyasint,omitempty"` - RxPackets uint64 `json:"rxPkts,omitzero,omitempty" cbor:"14,keyasint,omitempty"` - RxBytes uint64 `json:"rxBytes,omitzero,omitempty" cbor:"15,keyasint,omitempty"` + TxPackets uint64 `json:"txPkts,omitzero"` + TxBytes uint64 `json:"txBytes,omitzero"` + RxPackets uint64 `json:"rxPkts,omitzero"` + RxBytes uint64 `json:"rxBytes,omitzero"` } func (c Counts) IsZero() bool { return c == Counts{} } diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 403cb950883c7..00f89b228aa96 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -11,7 +11,6 @@ import ( "net/netip" "testing" - "github.com/fxamacker/cbor/v2" "github.com/google/go-cmp/cmp" "tailscale.com/util/must" ) @@ -32,10 +31,4 @@ func TestMaxSize(t *testing.T) { if string(outJSON) != maxJSONConnCounts { t.Errorf("JSON mismatch (-got +want):\n%s", cmp.Diff(string(outJSON), maxJSONConnCounts)) } - - outCBOR := must.Get(cbor.Marshal(cc)) - maxCBORConnCountsAlt := "\xa7" + maxCBORConnCounts[1:len(maxCBORConnCounts)-1] // may use a definite encoding of map - if string(outCBOR) != maxCBORConnCounts && string(outCBOR) != maxCBORConnCountsAlt { - t.Errorf("CBOR mismatch (-got +want):\n%s", cmp.Diff(string(outCBOR), maxCBORConnCounts)) - } } From e75f13bd93bd154e4e3e6c62c69ccae68863f2b7 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 15 Oct 2025 14:57:32 -0700 Subject: [PATCH 0578/1093] net/connstats: prepare to remove package (#17554) The connstats package was an unnecessary layer of indirection. It was seperated out of wgengine/netlog so that net/tstun and wgengine/magicsock wouldn't need a depenedency on the concrete implementation of network flow logging. Instead, we simply register a callback for counting connections. This PR does the bare minimum work to prepare tstun and magicsock to only care about that callback. A future PR will delete connstats and merge it into netlog. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/k8s-operator/depaware.txt | 3 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tsidp/depaware.txt | 3 +- net/connstats/stats.go | 38 ++++++++++---------- net/tstun/wrap.go | 34 +++++++++++------- net/tstun/wrap_test.go | 15 ++++---- tsnet/depaware.txt | 3 +- types/netlogfunc/netlogfunc.go | 15 ++++++++ types/netlogtype/netlogtype.go | 42 ++++++++++++++++++++++ wgengine/magicsock/derp.go | 4 +-- wgengine/magicsock/endpoint.go | 8 ++--- wgengine/magicsock/magicsock.go | 16 ++++----- wgengine/magicsock/magicsock_test.go | 52 +++++++++++++--------------- wgengine/netlog/netlog.go | 43 +++++++++-------------- 16 files changed, 170 insertions(+), 113 deletions(-) create mode 100644 types/netlogfunc/netlogfunc.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d4fdb87fc5766..8a8397f28c137 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -768,7 +768,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ @@ -834,6 +834,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index fe50dface5e57..96e18db43db19 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -78,7 +78,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -132,6 +131,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index a4999825e38b3..d46180e2d135e 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -102,7 +102,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -158,6 +157,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/net/batching+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 6ca10f80cf0bf..eed40845c61fd 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -330,7 +330,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -401,6 +401,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 894b4a07821b1..1b6bb6d63c802 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -174,7 +174,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -239,6 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/net/connstats/stats.go b/net/connstats/stats.go index 44b2762547f85..206181b27459c 100644 --- a/net/connstats/stats.go +++ b/net/connstats/stats.go @@ -16,6 +16,7 @@ import ( "golang.org/x/sync/errgroup" "tailscale.com/net/packet" "tailscale.com/net/tsaddr" + "tailscale.com/types/ipproto" "tailscale.com/types/netlogtype" ) @@ -85,14 +86,18 @@ func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end t // The source and destination of the packet directly correspond with // the source and destination in netlogtype.Connection. func (s *Statistics) UpdateTxVirtual(b []byte) { - s.updateVirtual(b, false) + var p packet.Parsed + p.Decode(b) + s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) } // UpdateRxVirtual updates the counters for a received IP packet. // The source and destination of the packet are inverted with respect to // the source and destination in netlogtype.Connection. func (s *Statistics) UpdateRxVirtual(b []byte) { - s.updateVirtual(b, true) + var p packet.Parsed + p.Decode(b) + s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) } var ( @@ -100,23 +105,18 @@ var ( tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() ) -func (s *Statistics) updateVirtual(b []byte, receive bool) { - var p packet.Parsed - p.Decode(b) - conn := netlogtype.Connection{Proto: p.IPProto, Src: p.Src, Dst: p.Dst} - if receive { - conn.Src, conn.Dst = conn.Dst, conn.Src - } - +func (s *Statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { // Network logging is defined as traffic between two Tailscale nodes. // Traffic with the internal Tailscale service is not with another node // and should not be logged. It also happens to be a high volume // amount of discrete traffic flows (e.g., DNS lookups). - switch conn.Dst.Addr() { + switch dst.Addr() { case tailscaleServiceIPv4, tailscaleServiceIPv6: return } + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + s.mu.Lock() defer s.mu.Unlock() cnts, found := s.virtual[conn] @@ -124,11 +124,11 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { return } if receive { - cnts.RxPackets++ - cnts.RxBytes += uint64(len(b)) + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(b)) + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) } s.virtual[conn] = cnts } @@ -138,7 +138,7 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, false) + s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) } // UpdateRxPhysical updates the counters for zero or more received wireguard packets. @@ -146,11 +146,11 @@ func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packet // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, true) + s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) } -func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int, receive bool) { - conn := netlogtype.Connection{Src: netip.AddrPortFrom(src, 0), Dst: dst} +func (s *Statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} s.mu.Lock() defer s.mu.Unlock() diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index fb93ca21eaaba..dfbab7812928c 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -24,7 +24,6 @@ import ( "go4.org/mem" "tailscale.com/disco" "tailscale.com/feature/buildfeatures" - "tailscale.com/net/connstats" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" "tailscale.com/net/tsaddr" @@ -33,6 +32,7 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/util/clientmetric" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" @@ -203,8 +203,8 @@ type Wrapper struct { // disableTSMPRejected disables TSMP rejected responses. For tests. disableTSMPRejected bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] captureHook syncs.AtomicValue[packet.CaptureCallback] @@ -977,8 +977,8 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { } sizes[buffsPos] = n if buildfeatures.HasConnStats { - if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(p.Buffer()) + if update := t.connCounter.Load(); update != nil { + updateConnCounter(update, p.Buffer(), false) } } buffsPos++ @@ -1106,9 +1106,9 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i } if buildfeatures.HasConnStats { - if stats := t.stats.Load(); stats != nil { + if update := t.connCounter.Load(); update != nil { for i := 0; i < n; i++ { - stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + updateConnCounter(update, outBuffs[i][offset:offset+sizes[i]], false) } } } @@ -1276,9 +1276,9 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { if buildfeatures.HasConnStats { - if stats := t.stats.Load(); stats != nil { + if update := t.connCounter.Load(); update != nil { for i := range buffs { - stats.UpdateRxVirtual((buffs)[i][offset:]) + updateConnCounter(update, buffs[i][offset:], true) } } } @@ -1498,11 +1498,11 @@ func (t *Wrapper) Unwrap() tun.Device { return t.tdev } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (t *Wrapper) SetStatistics(stats *connstats.Statistics) { +func (t *Wrapper) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { if buildfeatures.HasConnStats { - t.stats.Store(stats) + t.connCounter.Store(fn) } } @@ -1524,3 +1524,13 @@ func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { } t.captureHook.Store(cb) } + +func updateConnCounter(update netlogfunc.ConnectionCounter, b []byte, receive bool) { + var p packet.Parsed + p.Decode(b) + if receive { + update(p.IPProto, p.Dst, p.Src, 1, len(b), true) + } else { + update(p.IPProto, p.Src, p.Dst, 1, len(b), false) + } +} diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 223ee34f4336a..a668881919e1e 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -5,7 +5,6 @@ package tstun import ( "bytes" - "context" "encoding/binary" "encoding/hex" "expvar" @@ -27,7 +26,6 @@ import ( "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/tstest" @@ -370,9 +368,8 @@ func TestFilter(t *testing.T) { }() var buf [MaxPacketSize]byte - stats := connstats.NewStatistics(0, 0, nil) - defer stats.Shutdown(context.Background()) - tun.SetStatistics(stats) + var stats netlogtype.CountsByConnection + tun.SetConnectionCounter(stats.Add) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var n int @@ -380,9 +377,10 @@ func TestFilter(t *testing.T) { var filtered bool sizes := make([]int, 1) - tunStats, _ := stats.TestExtract() + tunStats := stats.Clone() + stats.Reset() if len(tunStats) > 0 { - t.Errorf("connstats.Statistics.Extract = %v, want {}", stats) + t.Errorf("connstats.Statistics.Extract = %v, want {}", tunStats) } if tt.dir == in { @@ -415,7 +413,8 @@ func TestFilter(t *testing.T) { } } - got, _ := stats.TestExtract() + got := stats.Clone() + stats.Reset() want := map[netlogtype.Connection]netlogtype.Counts{} var wasUDP bool if !tt.drop { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index d602c7b2f4733..893e52f2ce0cf 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -170,7 +170,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ + tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -234,6 +234,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ tailscale.com/types/netlogtype from tailscale.com/net/connstats+ tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ diff --git a/types/netlogfunc/netlogfunc.go b/types/netlogfunc/netlogfunc.go new file mode 100644 index 0000000000000..6185fcb715c65 --- /dev/null +++ b/types/netlogfunc/netlogfunc.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package netlogfunc defines types for network logging. +package netlogfunc + +import ( + "net/netip" + + "tailscale.com/types/ipproto" +) + +// ConnectionCounter is a function for counting packets and bytes +// for a particular connection. +type ConnectionCounter func(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index 0f552611e85e2..a29ea6f03dffa 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -5,7 +5,9 @@ package netlogtype import ( + "maps" "net/netip" + "sync" "time" "tailscale.com/tailcfg" @@ -83,3 +85,43 @@ func (c1 Counts) Add(c2 Counts) Counts { c1.RxBytes += c2.RxBytes return c1 } + +// CountsByConnection is a count of packets and bytes for each connection. +// All methods are safe for concurrent calls. +type CountsByConnection struct { + mu sync.Mutex + m map[Connection]Counts +} + +// Add adds packets and bytes for the specified connection. +func (c *CountsByConnection) Add(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + conn := Connection{Proto: proto, Src: src, Dst: dst} + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[Connection]Counts) + } + cnts := c.m[conn] + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + c.m[conn] = cnts +} + +// Clone deep copies the map. +func (c *CountsByConnection) Clone() map[Connection]Counts { + c.mu.Lock() + defer c.mu.Unlock() + return maps.Clone(c.m) +} + +// Reset clear the map. +func (c *CountsByConnection) Reset() { + c.mu.Lock() + defer c.mu.Unlock() + clear(c.m) +} diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index d33745892b847..37a4f1a64ee02 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -717,8 +717,8 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en } ep.noteRecvActivity(srcAddr, mono.Now()) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, srcAddr.ap, 1, dm.n) + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), srcAddr.ap, 1, dm.n, true) } c.metrics.inboundPacketsDERPTotal.Add(1) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 7deafb7528ca2..2010775a10d6e 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1105,8 +1105,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. - if stats := de.c.stats.Load(); err == nil && stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, udpAddr.ap, len(buffs), txBytes) + if update := de.c.connCounter.Load(); err == nil && update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), udpAddr.ap, len(buffs), txBytes, false) } } if derpAddr.IsValid() { @@ -1123,8 +1123,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } - if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes) + if update := de.c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), derpAddr, len(buffs), txBytes, false) } if allOk { return nil diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f855936ce3385..61fc50d1214fe 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -37,7 +37,6 @@ import ( "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" "tailscale.com/net/batching" - "tailscale.com/net/connstats" "tailscale.com/net/netcheck" "tailscale.com/net/neterror" "tailscale.com/net/netmon" @@ -56,6 +55,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/views" @@ -261,8 +261,8 @@ type Conn struct { //lint:ignore U1000 used on Linux/Darwin only peerMTUEnabled atomic.Bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] // captureHook, if non-nil, is the pcap logging callback when capturing. captureHook syncs.AtomicValue[packet.CaptureCallback] @@ -1862,8 +1862,8 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) if buildfeatures.HasConnStats { - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), ipp, 1, geneveInclusivePacketLen, true) } } if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { @@ -3745,11 +3745,11 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { }) } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (c *Conn) SetStatistics(stats *connstats.Statistics) { +func (c *Conn) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { if buildfeatures.HasConnStats { - c.stats.Store(stats) + c.connCounter.Store(fn) } } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index d1d62a26e0e65..60620b14100f1 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -32,6 +32,7 @@ import ( "unsafe" qt "github.com/frankban/quicktest" + "github.com/google/go-cmp/cmp" wgconn "github.com/tailscale/wireguard-go/conn" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" @@ -45,7 +46,6 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/ipn/ipnstate" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -158,14 +158,14 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st // happiness. type magicStack struct { privateKey key.NodePrivate - epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer - stats *connstats.Statistics // per-connection statistics - conn *Conn // the magicsock itself - tun *tuntest.ChannelTUN // TUN device to send/receive packets - tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks - dev *device.Device // the wireguard-go Device that connects the previous things - wgLogger *wglog.Logger // wireguard-go log wrapper - netMon *netmon.Monitor // always non-nil + epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer + counts netlogtype.CountsByConnection // per-connection statistics + conn *Conn // the magicsock itself + tun *tuntest.ChannelTUN // TUN device to send/receive packets + tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks + dev *device.Device // the wireguard-go Device that connects the previous things + wgLogger *wglog.Logger // wireguard-go log wrapper + netMon *netmon.Monitor // always non-nil metrics *usermetric.Registry } @@ -1143,22 +1143,19 @@ func testTwoDevicePing(t *testing.T, d *devices) { } } - m1.stats = connstats.NewStatistics(0, 0, nil) - defer m1.stats.Shutdown(context.Background()) - m1.conn.SetStatistics(m1.stats) - m2.stats = connstats.NewStatistics(0, 0, nil) - defer m2.stats.Shutdown(context.Background()) - m2.conn.SetStatistics(m2.stats) + m1.conn.SetConnectionCounter(m1.counts.Add) + m2.conn.SetConnectionCounter(m2.counts.Add) checkStats := func(t *testing.T, m *magicStack, wantConns []netlogtype.Connection) { - _, stats := m.stats.TestExtract() + defer m.counts.Reset() + counts := m.counts.Clone() for _, conn := range wantConns { - if _, ok := stats[conn]; ok { + if _, ok := counts[conn]; ok { return } } t.Helper() - t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(stats)) + t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(counts)) } addrPort := netip.MustParseAddrPort @@ -1221,9 +1218,9 @@ func testTwoDevicePing(t *testing.T, d *devices) { setT(t) defer setT(outerT) m1.conn.resetMetricsForTest() - m1.stats.TestExtract() + m1.counts.Reset() m2.conn.resetMetricsForTest() - m2.stats.TestExtract() + m2.counts.Reset() t.Logf("Metrics before: %s\n", m1.metrics.String()) ping1(t) ping2(t) @@ -1249,8 +1246,6 @@ func (c *Conn) resetMetricsForTest() { } func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { - _, phys := ms.stats.TestExtract() - physIPv4RxBytes := int64(0) physIPv4TxBytes := int64(0) physDERPRxBytes := int64(0) @@ -1259,7 +1254,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets := int64(0) physDERPRxPackets := int64(0) physDERPTxPackets := int64(0) - for conn, count := range phys { + for conn, count := range ms.counts.Clone() { t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String()) if conn.Dst.String() == "127.3.3.40:1" { physDERPRxBytes += int64(count.RxBytes) @@ -1273,6 +1268,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets += int64(count.TxPackets) } } + ms.counts.Reset() metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value() metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value() @@ -3986,7 +3982,8 @@ func TestConn_receiveIP(t *testing.T) { c.noteRecvActivity = func(public key.NodePublic) { noteRecvActivityCalled = true } - c.SetStatistics(connstats.NewStatistics(0, 0, nil)) + var counts netlogtype.CountsByConnection + c.SetConnectionCounter(counts.Add) if tt.insertWantEndpointTypeInPeerMap { var insertEPIntoPeerMap *endpoint @@ -4059,9 +4056,8 @@ func TestConn_receiveIP(t *testing.T) { } // Verify physical rx stats - stats := c.stats.Load() - _, gotPhy := stats.TestExtract() wantNonzeroRxStats := false + gotPhy := counts.Clone() switch ep := tt.wantEndpointType.(type) { case *lazyEndpoint: if ep.maybeEP != nil { @@ -4081,8 +4077,8 @@ func TestConn_receiveIP(t *testing.T) { RxBytes: wantRxBytes, }, } - if !reflect.DeepEqual(gotPhy, wantPhy) { - t.Errorf("receiveIP() got physical conn stats = %v, want %v", gotPhy, wantPhy) + if d := cmp.Diff(gotPhy, wantPhy); d != "" { + t.Errorf("receiveIP() stats mismatch (-got +want):\n%s", d) } } else { if len(gotPhy) != 0 { diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 7e1938d27ac3c..a04fd2126330c 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -8,6 +8,7 @@ package netlog import ( + "cmp" "context" "encoding/json" "errors" @@ -19,7 +20,6 @@ import ( "sync" "time" - "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -29,6 +29,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/logid" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netlogtype" "tailscale.com/util/eventbus" "tailscale.com/wgengine/router" @@ -40,12 +41,12 @@ const pollPeriod = 5 * time.Second // Device is an abstraction over a tunnel device or a magic socket. // Both *tstun.Wrapper and *magicsock.Conn implement this interface. type Device interface { - SetStatistics(*connstats.Statistics) + SetConnectionCounter(netlogfunc.ConnectionCounter) } type noopDevice struct{} -func (noopDevice) SetStatistics(*connstats.Statistics) {} +func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // Logger logs statistics about every connection. // At present, it only logs connections within a tailscale network. @@ -131,31 +132,21 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - if buildfeatures.HasConnStats { - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) - } + nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.mu.Lock() + addrs := nl.addrs + prefixes := nl.prefixes + nl.mu.Unlock() + recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) + }) // Register the connection tracker into the TUN device. - if tun == nil { - tun = noopDevice{} - } - nl.tun = tun - if buildfeatures.HasConnStats { - nl.tun.SetStatistics(nl.stats) - } + nl.tun = cmp.Or[Device](tun, noopDevice{}) + nl.tun.SetConnectionCounter(nl.stats.UpdateVirtual) // Register the connection tracker into magicsock. - if sock == nil { - sock = noopDevice{} - } - nl.sock = sock - nl.sock.SetStatistics(nl.stats) + nl.sock = cmp.Or[Device](sock, noopDevice{}) + nl.sock.SetConnectionCounter(nl.stats.UpdatePhysical) return nil } @@ -265,8 +256,8 @@ func (nl *Logger) Shutdown(ctx context.Context) error { // Shutdown in reverse order of Startup. // Do not hold lock while shutting down since this may flush one last time. nl.mu.Unlock() - nl.sock.SetStatistics(nil) - nl.tun.SetStatistics(nil) + nl.sock.SetConnectionCounter(nil) + nl.tun.SetConnectionCounter(nil) err1 := nl.stats.Shutdown(ctx) err2 := nl.logger.Shutdown(ctx) nl.mu.Lock() From e804b6435818527884112870f17ad32a673b2f2d Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 16 Oct 2025 00:07:29 -0700 Subject: [PATCH 0579/1093] wgengine/netlog: merge connstats into package (#17557) Merge the connstats package into the netlog package and unexport all of its declarations. Remove the buildfeatures.HasConnStats and use HasNetLog instead. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/k8s-operator/depaware.txt | 5 +-- cmd/tailscaled/depaware.txt | 5 +-- cmd/tsidp/depaware.txt | 3 +- .../feature_connstats_disabled.go | 13 ------- .../feature_connstats_enabled.go | 13 ------- feature/featuretags/featuretags.go | 6 +-- net/connstats/stats_omit.go | 24 ------------ net/tstun/wrap.go | 8 ++-- net/tstun/wrap_test.go | 2 +- tsnet/depaware.txt | 3 +- wgengine/magicsock/magicsock.go | 4 +- wgengine/netlog/netlog.go | 9 ++--- {net/connstats => wgengine/netlog}/stats.go | 38 +++++++++---------- .../netlog}/stats_test.go | 14 +++---- 14 files changed, 43 insertions(+), 104 deletions(-) delete mode 100644 feature/buildfeatures/feature_connstats_disabled.go delete mode 100644 feature/buildfeatures/feature_connstats_enabled.go delete mode 100644 net/connstats/stats_omit.go rename {net/connstats => wgengine/netlog}/stats.go (85%) rename {net/connstats => wgengine/netlog}/stats_test.go (95%) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 8a8397f28c137..6cffda2ddb2c8 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -768,7 +768,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ @@ -787,7 +786,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper from tailscale.com/feature/portmapper @@ -835,7 +834,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/client/tailscale+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index eed40845c61fd..e92d41b9855df 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -330,7 +330,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -349,7 +348,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/feature/capture+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ tailscale.com/net/portmapper from tailscale.com/feature/portmapper+ @@ -402,7 +401,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 1b6bb6d63c802..a2a473a5068ec 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -174,7 +174,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -240,7 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/cmd/tsidp+ diff --git a/feature/buildfeatures/feature_connstats_disabled.go b/feature/buildfeatures/feature_connstats_disabled.go deleted file mode 100644 index d9aac0e80961d..0000000000000 --- a/feature/buildfeatures/feature_connstats_disabled.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Code generated by gen.go; DO NOT EDIT. - -//go:build ts_omit_connstats - -package buildfeatures - -// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". -// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. -// It's a const so it can be used for dead code elimination. -const HasConnStats = false diff --git a/feature/buildfeatures/feature_connstats_enabled.go b/feature/buildfeatures/feature_connstats_enabled.go deleted file mode 100644 index c0451ce1e7f74..0000000000000 --- a/feature/buildfeatures/feature_connstats_enabled.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Code generated by gen.go; DO NOT EDIT. - -//go:build !ts_omit_connstats - -package buildfeatures - -// HasConnStats is whether the binary was built with support for modular feature "Track per-packet connection statistics". -// Specifically, it's whether the binary was NOT built with the "ts_omit_connstats" build tag. -// It's a const so it can be used for dead code elimination. -const HasConnStats = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c944d65ebcdbb..9c85dbaa0d923 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -134,11 +134,7 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"c2n"}, }, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, - "connstats": { - Sym: "ConnStats", - Desc: "Track per-packet connection statistics", - }, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "dbus": { Sym: "DBus", Desc: "Linux DBus support", diff --git a/net/connstats/stats_omit.go b/net/connstats/stats_omit.go deleted file mode 100644 index 15d16c9e449e3..0000000000000 --- a/net/connstats/stats_omit.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_connstats - -package connstats - -import ( - "context" - "net/netip" - "time" -) - -type Statistics struct{} - -func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical any)) *Statistics { - return &Statistics{} -} - -func (s *Statistics) UpdateTxVirtual(b []byte) {} -func (s *Statistics) UpdateRxVirtual(b []byte) {} -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) {} -func (s *Statistics) Shutdown(context.Context) error { return nil } diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index dfbab7812928c..70cc7118ac208 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -976,7 +976,7 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := t.connCounter.Load(); update != nil { updateConnCounter(update, p.Buffer(), false) } @@ -1105,7 +1105,7 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) } - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := t.connCounter.Load(); update != nil { for i := 0; i < n; i++ { updateConnCounter(update, outBuffs[i][offset:offset+sizes[i]], false) @@ -1275,7 +1275,7 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := t.connCounter.Load(); update != nil { for i := range buffs { updateConnCounter(update, buffs[i][offset:], true) @@ -1501,7 +1501,7 @@ func (t *Wrapper) Unwrap() tun.Device { // SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (t *Wrapper) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { t.connCounter.Store(fn) } } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index a668881919e1e..75cf5afb21f8f 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -380,7 +380,7 @@ func TestFilter(t *testing.T) { tunStats := stats.Clone() stats.Reset() if len(tunStats) > 0 { - t.Errorf("connstats.Statistics.Extract = %v, want {}", tunStats) + t.Errorf("netlogtype.CountsByConnection = %v, want {}", tunStats) } if tt.dir == in { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 893e52f2ce0cf..cd734e9959041 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -170,7 +170,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/wgengine/netlog tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ @@ -235,7 +234,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 61fc50d1214fe..e3c2d478e9882 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1861,7 +1861,7 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { if update := c.connCounter.Load(); update != nil { update(0, netip.AddrPortFrom(ep.nodeAddr, 0), ipp, 1, geneveInclusivePacketLen, true) } @@ -3748,7 +3748,7 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { // SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. func (c *Conn) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { - if buildfeatures.HasConnStats { + if buildfeatures.HasNetLog { c.connCounter.Store(fn) } } diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index a04fd2126330c..2984df99471b6 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -23,7 +23,6 @@ import ( "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" - "tailscale.com/net/connstats" "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" @@ -56,7 +55,7 @@ type Logger struct { mu sync.Mutex // protects all fields below logger *logtail.Logger - stats *connstats.Statistics + stats *statistics tun Device sock Device @@ -132,7 +131,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.stats = newStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { nl.mu.Lock() addrs := nl.addrs prefixes := nl.prefixes @@ -151,7 +150,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo return nil } -func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connstats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { +func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connStats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()} classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) { @@ -170,7 +169,7 @@ func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start } exitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) - for conn, cnts := range connstats { + for conn, cnts := range connStats { srcIsTailscaleIP, srcWithinSubnet := classifyAddr(conn.Src.Addr()) dstIsTailscaleIP, dstWithinSubnet := classifyAddr(conn.Dst.Addr()) switch { diff --git a/net/connstats/stats.go b/wgengine/netlog/stats.go similarity index 85% rename from net/connstats/stats.go rename to wgengine/netlog/stats.go index 206181b27459c..c06068803f125 100644 --- a/net/connstats/stats.go +++ b/wgengine/netlog/stats.go @@ -1,11 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ts_omit_connstats +//go:build !ts_omit_netlog && !ts_omit_logtail -// Package connstats maintains statistics about connections -// flowing through a TUN device (which operate at the IP layer). -package connstats +package netlog import ( "context" @@ -20,10 +18,10 @@ import ( "tailscale.com/types/netlogtype" ) -// Statistics maintains counters for every connection. +// statistics maintains counters for every connection. // All methods are safe for concurrent use. // The zero value is ready for use. -type Statistics struct { +type statistics struct { maxConns int // immutable once set mu sync.Mutex @@ -42,13 +40,13 @@ type connCnts struct { physical map[netlogtype.Connection]netlogtype.Counts } -// NewStatistics creates a data structure for tracking connection statistics +// newStatistics creates a data structure for tracking connection statistics // that periodically dumps the virtual and physical connection counts // depending on whether the maxPeriod or maxConns is exceeded. // The dump function is called from a single goroutine. // Shutdown must be called to cleanup resources. -func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *Statistics { - s := &Statistics{maxConns: maxConns} +func newStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *statistics { + s := &statistics{maxConns: maxConns} s.connCntsCh = make(chan connCnts, 256) s.shutdownCtx, s.shutdown = context.WithCancel(context.Background()) s.group.Go(func() error { @@ -85,7 +83,7 @@ func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end t // UpdateTxVirtual updates the counters for a transmitted IP packet // The source and destination of the packet directly correspond with // the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateTxVirtual(b []byte) { +func (s *statistics) UpdateTxVirtual(b []byte) { var p packet.Parsed p.Decode(b) s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) @@ -94,7 +92,7 @@ func (s *Statistics) UpdateTxVirtual(b []byte) { // UpdateRxVirtual updates the counters for a received IP packet. // The source and destination of the packet are inverted with respect to // the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateRxVirtual(b []byte) { +func (s *statistics) UpdateRxVirtual(b []byte) { var p packet.Parsed p.Decode(b) s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) @@ -105,7 +103,7 @@ var ( tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() ) -func (s *Statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { +func (s *statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { // Network logging is defined as traffic between two Tailscale nodes. // Traffic with the internal Tailscale service is not with another node // and should not be logged. It also happens to be a high volume @@ -137,7 +135,7 @@ func (s *Statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { +func (s *statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) } @@ -145,11 +143,11 @@ func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packet // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { +func (s *statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) } -func (s *Statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { +func (s *statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} s.mu.Lock() @@ -170,7 +168,7 @@ func (s *Statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort // preInsertConn updates the maps to handle insertion of a new connection. // It reports false if insertion is not allowed (i.e., after shutdown). -func (s *Statistics) preInsertConn() bool { +func (s *statistics) preInsertConn() bool { // Check whether insertion of a new connection will exceed maxConns. if len(s.virtual)+len(s.physical) == s.maxConns && s.maxConns > 0 { // Extract the current statistics and send it to the serializer. @@ -192,13 +190,13 @@ func (s *Statistics) preInsertConn() bool { return s.shutdownCtx.Err() == nil } -func (s *Statistics) extract() connCnts { +func (s *statistics) extract() connCnts { s.mu.Lock() defer s.mu.Unlock() return s.extractLocked() } -func (s *Statistics) extractLocked() connCnts { +func (s *statistics) extractLocked() connCnts { if len(s.virtual)+len(s.physical) == 0 { return connCnts{} } @@ -210,7 +208,7 @@ func (s *Statistics) extractLocked() connCnts { // TestExtract synchronously extracts the current network statistics map // and resets the counters. This should only be used for testing purposes. -func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { +func (s *statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { cc := s.extract() return cc.virtual, cc.physical } @@ -218,7 +216,7 @@ func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection] // Shutdown performs a final flush of statistics. // Statistics for any subsequent calls to Update will be dropped. // It is safe to call Shutdown concurrently and repeatedly. -func (s *Statistics) Shutdown(context.Context) error { +func (s *statistics) Shutdown(context.Context) error { s.shutdown() return s.group.Wait() } diff --git a/net/connstats/stats_test.go b/wgengine/netlog/stats_test.go similarity index 95% rename from net/connstats/stats_test.go rename to wgengine/netlog/stats_test.go index ae0bca8a5f008..6cf7eb9983817 100644 --- a/net/connstats/stats_test.go +++ b/wgengine/netlog/stats_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package connstats +package netlog import ( "context" @@ -54,7 +54,7 @@ func TestInterval(t *testing.T) { const maxConns = 2048 gotDump := make(chan struct{}, 1) - stats := NewStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { + stats := newStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { select { case gotDump <- struct{}{}: default: @@ -86,7 +86,7 @@ func TestConcurrent(t *testing.T) { const maxPeriod = 10 * time.Millisecond const maxConns = 10 virtualAggregate := make(map[netlogtype.Connection]netlogtype.Counts) - stats := NewStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + stats := newStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { c.Assert(start.IsZero(), qt.IsFalse) c.Assert(end.IsZero(), qt.IsFalse) c.Assert(end.Before(start), qt.IsFalse) @@ -170,7 +170,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) for j := 0; j < 1e3; j++ { s.UpdateTxVirtual(p) } @@ -181,7 +181,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) for j := 0; j < 1e3; j++ { binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination s.UpdateTxVirtual(p) @@ -193,7 +193,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) var group sync.WaitGroup for j := 0; j < runtime.NumCPU(); j++ { group.Add(1) @@ -215,7 +215,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) var group sync.WaitGroup for j := 0; j < runtime.NumCPU(); j++ { group.Add(1) From 419fba40e02c693cc02c0416d4d837a47d69e7a8 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 16 Oct 2025 10:11:34 +0100 Subject: [PATCH 0580/1093] k8s-operator/api-proxy: put kube api server events behind environment variable (#17550) This commit modifies the k8s-operator's api proxy implementation to only enable forwarding of api requests to tsrecorder when an environment variable is set. This new environment variable is named `TS_EXPERIMENTAL_KUBE_API_EVENTS`. Updates https://github.com/tailscale/corp/issues/32448 Signed-off-by: David Bond --- k8s-operator/api-proxy/proxy.go | 9 +++++++++ k8s-operator/api-proxy/proxy_events_test.go | 1 + 2 files changed, 10 insertions(+) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index fdb79815222d7..762a52f1fdbfc 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/envknob" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" "tailscale.com/net/netx" @@ -96,6 +97,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn upstreamURL: u, ts: ts, sendEventFunc: sessionrecording.SendEvent, + eventsEnabled: envknob.Bool("TS_EXPERIMENTAL_KUBE_API_EVENTS"), } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -192,6 +194,9 @@ type APIServerProxy struct { upstreamURL *url.URL sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error + + // Flag used to enable sending API requests as events to tsrecorder. + eventsEnabled bool } // serveDefault is the default handler for Kubernetes API server requests. @@ -310,6 +315,10 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request } func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse) error { + if !ap.eventsEnabled { + return nil + } + failOpen, addrs, err := determineRecorderConfig(who) if err != nil { return fmt.Errorf("error trying to determine whether the kubernetes api request needs to be recorded: %w", err) diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go index 230927dc07cb6..8bcf484368a35 100644 --- a/k8s-operator/api-proxy/proxy_events_test.go +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -61,6 +61,7 @@ func TestRecordRequestAsEvent(t *testing.T) { log: zl.Sugar(), ts: &tsnet.Server{}, sendEventFunc: sender.Send, + eventsEnabled: true, } defaultWho := &apitype.WhoIsResponse{ From 0ce88aa3433022bb96f3c2a97f5bfd7d2940d205 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Thu, 16 Oct 2025 11:13:41 +0100 Subject: [PATCH 0581/1093] all: use a consistent capitalisation for "Tailnet Lock" Updates https://github.com/tailscale/corp/issues/13108 Signed-off-by: Alex Chan --- cmd/tailscale/cli/network-lock.go | 8 ++++---- docs/windows/policy/en-US/tailscale.adml | 2 +- ipn/localapi/localapi.go | 2 +- tka/sig_test.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 9b2f6fbdb0738..f355f99b97ac5 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -225,18 +225,18 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { } if st.Enabled { - fmt.Println("Tailnet lock is ENABLED.") + fmt.Println("Tailnet Lock is ENABLED.") } else { - fmt.Println("Tailnet lock is NOT enabled.") + fmt.Println("Tailnet Lock is NOT enabled.") } fmt.Println() if st.Enabled && st.NodeKey != nil && !st.PublicKey.IsZero() { if st.NodeKeySigned { - fmt.Println("This node is accessible under tailnet lock. Node signature:") + fmt.Println("This node is accessible under Tailnet Lock. Node signature:") fmt.Println(st.NodeKeySignature.String()) } else { - fmt.Println("This node is LOCKED OUT by tailnet-lock, and action is required to establish connectivity.") + fmt.Println("This node is LOCKED OUT by Tailnet Lock, and action is required to establish connectivity.") fmt.Printf("Run the following command on a node with a trusted key:\n\ttailscale lock sign %v %s\n", st.NodeKey, st.PublicKey.CLIString()) } fmt.Println() diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 58e13be19ca98..a0be5e8314a2b 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -61,7 +61,7 @@ Managing authentication keys via Group Policy and MDM solutions poses significan While MDM solutions tend to offer better control over who can access the policy setting values, they can still be compromised. Additionally, with both Group Policy and MDM solutions, the auth key is always readable by all users who have access to the device where this policy setting applies, as well as by all applications running on the device. A compromised auth key can potentially be used by a malicious actor to gain or elevate access to the target network. -Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. +Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet Lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. If you enable this policy setting and specify an auth key, it will be used to authenticate the device unless the device is already logged in or an auth key is explicitly specified via the CLI. diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 32dc2963feb44..9e7c16891fc20 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -424,7 +424,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { // OS-specific details h.logf.JSON(1, "UserBugReportOS", osdiag.SupportInfo(osdiag.LogSupportInfoReasonBugReport)) - // Tailnet lock details + // Tailnet Lock details st := h.b.NetworkLockStatus() if st.Enabled { h.logf.JSON(1, "UserBugReportTailnetLockStatus", st) diff --git a/tka/sig_test.go b/tka/sig_test.go index d64575e7c7b45..99c25f8e57ae6 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -507,7 +507,7 @@ func TestDecodeWrappedAuthkey(t *testing.T) { } func TestResignNKS(t *testing.T) { - // Tailnet lock keypair of a signing node. + // Tailnet Lock keypair of a signing node. authPub, authPriv := testingKey25519(t, 1) authKey := Key{Kind: Key25519, Public: authPub, Votes: 2} From c3acf25d6217f6cb7b1eb74afaf2860293abf377 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 11:27:57 +0100 Subject: [PATCH 0582/1093] tka: remove an unused Mem.Orphans() method This method was added in cca25f6 in the initial in-memory implementation of Chonk, but it's not part of the Chonk interface and isn't implemented or used anywhere else. Let's get rid of it. Updates https://github.com/tailscale/corp/issues/33465 Signed-off-by: Alex Chan --- tka/tailchonk.go | 13 ------------- tka/tailchonk_test.go | 27 --------------------------- 2 files changed, 40 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 6c441669a6853..bebc6cec9c972 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -127,19 +127,6 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) { return aum, nil } -// Orphans returns all AUMs which do not have a parent. -func (c *Mem) Orphans() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() - out := make([]AUM, 0, 6) - for _, a := range c.aums { - if _, ok := a.Parent(); !ok { - out = append(out, a) - } - } - return out, nil -} - // ChildAUMs returns all AUMs with a specified previous // AUM hash. func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 86d5642a3bd10..376de323cf4af 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -73,33 +73,6 @@ func TestTailchonk_AUMMissing(t *testing.T) { } } -func TestTailchonkMem_Orphans(t *testing.T) { - chonk := Mem{} - - parentHash := randHash(t, 1) - orphan := AUM{MessageKind: AUMNoOp} - aums := []AUM{ - orphan, - // A parent is specified, so we shouldnt see it in GetOrphans() - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - if err := chonk.CommitVerifiedAUMs(aums); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - stored, err := chonk.Orphans() - if err != nil { - t.Fatalf("Orphans failed: %v", err) - } - if diff := cmp.Diff([]AUM{orphan}, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } -} - func TestTailchonk_ReadChainFromHead(t *testing.T) { for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { From 55a43c3736a7a7029eec214da8b2ab5788679906 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 10:53:12 +0100 Subject: [PATCH 0583/1093] tka: don't look up parent/child information from purged AUMs We soft-delete AUMs when they're purged, but when we call `ChildAUMs()`, we look up soft-deleted AUMs to find the `Children` field. This patch changes the behaviour of `ChildAUMs()` so it only looks at not-deleted AUMs. This means we don't need to record child information on AUMs any more, which is a minor space saving for any newly-recorded AUMs. Updates https://github.com/tailscale/tailscale/issues/17566 Updates https://github.com/tailscale/corp/issues/27166 Signed-off-by: Alex Chan --- tka/tailchonk.go | 105 ++++++++++++++++++++++-------------------- tka/tailchonk_test.go | 63 +++++++++++++++++++++---- 2 files changed, 108 insertions(+), 60 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index bebc6cec9c972..cb683c273d033 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -11,6 +11,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "sync" "time" @@ -206,10 +207,14 @@ func ChonkDir(dir string) (*FS, error) { // CBOR was chosen because we are already using it and it serializes // much smaller than JSON for AUMs. The 'keyasint' thing isn't essential // but again it saves a bunch of bytes. +// +// We have removed the following fields from fsHashInfo, but they may be +// present in data stored in existing deployments. Do not reuse these values, +// to avoid getting unexpected values from legacy data: +// - cbor:1, Children type fsHashInfo struct { - Children []AUMHash `cbor:"1,keyasint"` - AUM *AUM `cbor:"2,keyasint"` - CreatedUnix int64 `cbor:"3,keyasint,omitempty"` + AUM *AUM `cbor:"2,keyasint"` + CreatedUnix int64 `cbor:"3,keyasint,omitempty"` // PurgedUnix is set when the AUM is deleted. The value is // the unix epoch at the time it was deleted. @@ -285,32 +290,15 @@ func (c *FS) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() - info, err := c.get(prevAUMHash) - if err != nil { - if os.IsNotExist(err) { - // not knowing about this hash is not an error - return nil, nil - } - return nil, err - } - // NOTE(tom): We don't check PurgedUnix here because 'purged' - // only applies to that specific AUM (i.e. info.AUM) and not to - // any information about children stored against that hash. + var out []AUM - out := make([]AUM, len(info.Children)) - for i, h := range info.Children { - c, err := c.get(h) - if err != nil { - // We expect any AUM recorded as a child on its parent to exist. - return nil, fmt.Errorf("reading child %d of %x: %v", i, h, err) - } - if c.AUM == nil || c.PurgedUnix > 0 { - return nil, fmt.Errorf("child %d of %x: AUM not stored", i, h) + err := c.scanHashes(func(info *fsHashInfo) { + if info.AUM != nil && bytes.Equal(info.AUM.PrevAUMHash, prevAUMHash[:]) { + out = append(out, *info.AUM) } - out[i] = *c.AUM - } + }) - return out, nil + return out, err } func (c *FS) get(h AUMHash) (*fsHashInfo, error) { @@ -346,13 +334,45 @@ func (c *FS) Heads() ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() + // Scan the complete list of AUMs, and build a list of all parent hashes. + // This tells us which AUMs have children. + var parentHashes []AUMHash + + allAUMs, err := c.AllAUMs() + if err != nil { + return nil, err + } + + for _, h := range allAUMs { + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + parent, hasParent := aum.Parent() + if !hasParent { + continue + } + if !slices.Contains(parentHashes, parent) { + parentHashes = append(parentHashes, parent) + } + } + + // Now scan a second time, and only include AUMs which weren't marked as + // the parent of any other AUM. out := make([]AUM, 0, 6) // 6 is arbitrary. - err := c.scanHashes(func(info *fsHashInfo) { - if len(info.Children) == 0 && info.AUM != nil && info.PurgedUnix == 0 { - out = append(out, *info.AUM) + + for _, h := range allAUMs { + if slices.Contains(parentHashes, h) { + continue } - }) - return out, err + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + out = append(out, aum) + } + + return out, nil } // AllAUMs returns all AUMs stored in the chonk. @@ -362,7 +382,7 @@ func (c *FS) AllAUMs() ([]AUMHash, error) { out := make([]AUMHash, 0, 6) // 6 is arbitrary. err := c.scanHashes(func(info *fsHashInfo) { - if info.AUM != nil && info.PurgedUnix == 0 { + if info.AUM != nil { out = append(out, info.AUM.Hash()) } }) @@ -391,6 +411,9 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error { if err != nil { return fmt.Errorf("reading %x: %v", h, err) } + if info.PurgedUnix > 0 { + continue + } eachHashInfo(info) } @@ -445,24 +468,6 @@ func (c *FS) CommitVerifiedAUMs(updates []AUM) error { for i, aum := range updates { h := aum.Hash() - // We keep track of children against their parent so that - // ChildAUMs() do not need to scan all AUMs. - parent, hasParent := aum.Parent() - if hasParent { - err := c.commit(parent, func(info *fsHashInfo) { - // Only add it if its not already there. - for i := range info.Children { - if info.Children[i] == h { - return - } - } - info.Children = append(info.Children, h) - }) - if err != nil { - return fmt.Errorf("committing update[%d] to parent %x: %v", i, parent, err) - } - } - err := c.commit(h, func(info *fsHashInfo) { info.PurgedUnix = 0 // just in-case it was set for some reason info.AUM = &aum diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 376de323cf4af..cf6ea203bf86d 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/crypto/blake2s" + "tailscale.com/util/must" ) // randHash derives a fake blake2s hash from the test name @@ -144,9 +145,6 @@ func TestTailchonkFS_Commit(t *testing.T) { if _, err := os.Stat(filepath.Join(dir, base)); err != nil { t.Errorf("stat of AUM file failed: %v", err) } - if _, err := os.Stat(filepath.Join(chonk.base, "M7", "M7LL2NDB4NKCZIUPVS6RDM2GUOIMW6EEAFVBWMVCPUANQJPHT3SQ")); err != nil { - t.Errorf("stat of AUM parent failed: %v", err) - } info, err := chonk.get(aum.Hash()) if err != nil { @@ -199,6 +197,14 @@ func TestTailchonkFS_PurgeAUMs(t *testing.T) { } } +func hashesLess(x, y AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + +func aumHashesLess(x, y AUM) bool { + return hashesLess(x.Hash(), y.Hash()) +} + func TestTailchonkFS_AllAUMs(t *testing.T) { chonk := &FS{base: t.TempDir()} genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} @@ -220,14 +226,54 @@ func TestTailchonkFS_AllAUMs(t *testing.T) { if err != nil { t.Fatal(err) } - hashesLess := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } if diff := cmp.Diff([]AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) } } +func TestTailchonkFS_ChildAUMsOfPurgedAUM(t *testing.T) { + chonk := &FS{base: t.TempDir()} + parent := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{0, 0}} + + parentHash := parent.Hash() + + child1 := AUM{MessageKind: AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} + child2 := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} + child3 := AUM{MessageKind: AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} + + child2Hash := child2.Hash() + grandchild2A := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + grandchild2B := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + + commitSet := []AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} + + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check the set of hashes is correct + childHashes := must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Purge the parent AUM, and check the set of child AUMs is unchanged + chonk.PurgeAUMs([]AUMHash{parent.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Now purge one of the child AUMs, and check it no longer appears as a child of the parent + chonk.PurgeAUMs([]AUMHash{child3.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } +} + func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM @@ -585,10 +631,7 @@ func (c *compactingChonkFake) CommitTime(hash AUMHash) (time.Time, error) { } func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { - lessHashes := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } - if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(lessHashes)); diff != "" { + if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { c.t.Errorf("deletion set differs (-want, +got):\n%s", diff) } return nil From 8d119f62eebd6c3782f366d225df8b5f352f3daa Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 11:13:14 +0100 Subject: [PATCH 0584/1093] wgengine/magicsock: minor tidies in Test_endpoint_maybeProbeUDPLifetimeLocked * Remove a couple of single-letter `l` variables * Use named struct parameters in the test cases for readability * Delete `wantAfterInactivityForFn` parameter when it returns the default zero Updates #cleanup Signed-off-by: Alex Chan --- wgengine/magicsock/endpoint_test.go | 171 +++++++++++++--------------- 1 file changed, 77 insertions(+), 94 deletions(-) diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 666d862310c44..df1c9340657e4 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -173,130 +173,110 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { wantMaybe bool }{ { - "nil probeUDPLifetime", - higher, - &lower, - func() *probeUDPLifetime { + name: "nil probeUDPLifetime", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: func() *probeUDPLifetime { return nil }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + bestAddr: addr, }, { - "local higher disco key", - higher, - &lower, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "local higher disco key", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "remote no disco key", - higher, - nil, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "remote no disco key", + localDisco: higher, + remoteDisco: nil, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "invalid bestAddr", - lower, - &higher, - newProbeUDPLifetime, - addrQuality{}, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "invalid bestAddr", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addrQuality{}, }, { - "cycle started too recently", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now() - return l - }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 + name: "cycle started too recently", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now() + return lt }, - false, + bestAddr: addr, }, { - "maybe cliff 0 cycle not active", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now().Add(-l.config.CycleCanStartEvery).Add(-time.Second) - return l + name: "maybe cliff 0 cycle not active", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now().Add(-lt.config.CycleCanStartEvery).Add(-time.Second) + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 0", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 0 - return l + name: "maybe cliff 0", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 0 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 1", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 1 - return l + name: "maybe cliff 1", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 1 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[1] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 2", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 2 - return l + name: "maybe cliff 2", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 2 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[2] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, } for _, tt := range tests { @@ -316,7 +296,10 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { p := tt.probeUDPLifetimeFn() de.probeUDPLifetime = p gotAfterInactivityFor, gotMaybe := de.maybeProbeUDPLifetimeLocked() - wantAfterInactivityFor := tt.wantAfterInactivityForFn(p) + var wantAfterInactivityFor time.Duration + if tt.wantAfterInactivityForFn != nil { + wantAfterInactivityFor = tt.wantAfterInactivityForFn(p) + } if gotAfterInactivityFor != wantAfterInactivityFor { t.Errorf("maybeProbeUDPLifetimeLocked() gotAfterInactivityFor = %v, want %v", gotAfterInactivityFor, wantAfterInactivityFor) } From 6493206ac7f67ef4261018a3fb64122571fb5297 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 17 Oct 2025 10:00:42 -0700 Subject: [PATCH 0585/1093] .github/workflows: pin nix-related github actions (#17574) Updates #cleanup Signed-off-by: Andrew Lytvynov --- .github/workflows/flakehub-publish-tagged.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 9ff12c6a3fd14..50bb8b9f74de5 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -20,8 +20,8 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - - uses: "DeterminateSystems/nix-installer-action@main" - - uses: "DeterminateSystems/flakehub-push@main" + - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 + - uses: DeterminateSystems/flakehub-push@71f57208810a5d299fc6545350981de98fdbc860 # v6 with: visibility: "public" tag: "${{ inputs.tag }}" From 9083ef1ac4ca9de0d17a5da1c6a4cb5a22dc5b8e Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 17 Oct 2025 18:32:30 +0100 Subject: [PATCH 0586/1093] cmd/k8s-operator: allow pod tolerations on nameservers (#17260) This commit modifies the `DNSConfig` custom resource to allow specifying [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) on the nameserver pods. This will allow users to dictate where their nameserver pods are located within their clusters. Fixes: https://github.com/tailscale/tailscale/issues/17092 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_dnsconfigs.yaml | 43 +++++++++++++++++++ .../deploy/manifests/operator.yaml | 43 +++++++++++++++++++ cmd/k8s-operator/nameserver.go | 19 +++++--- cmd/k8s-operator/nameserver_test.go | 19 ++++++++ k8s-operator/api.md | 17 ++++++++ .../apis/v1alpha1/types_tsdnsconfig.go | 10 +++++ .../apis/v1alpha1/zz_generated.deepcopy.go | 27 ++++++++++++ 7 files changed, 171 insertions(+), 7 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index 43ebaecec9161..a819aa6518684 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -100,6 +100,49 @@ spec: tag: description: Tag defaults to unstable. type: string + pod: + description: Pod configuration. + type: object + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + type: array + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + type: object + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string replicas: description: Replicas specifies how many Pods to create. Defaults to 1. type: integer diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 520d17eae3d2f..c7c5ef0a7d3b2 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -431,6 +431,49 @@ spec: description: Tag defaults to unstable. type: string type: object + pod: + description: Pod configuration. + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object replicas: description: Replicas specifies how many Pods to create. Defaults to 1. format: int32 diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 3618642e1add1..5de1c47ba2b7e 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -191,6 +191,9 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Service != nil { dCfg.clusterIP = tsDNSCfg.Spec.Nameserver.Service.ClusterIP } + if tsDNSCfg.Spec.Nameserver.Pod != nil { + dCfg.tolerations = tsDNSCfg.Spec.Nameserver.Pod.Tolerations + } for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { @@ -217,13 +220,14 @@ type deployable struct { } type deployConfig struct { - replicas int32 - imageRepo string - imageTag string - labels map[string]string - ownerRefs []metav1.OwnerReference - namespace string - clusterIP string + replicas int32 + imageRepo string + imageTag string + labels map[string]string + ownerRefs []metav1.OwnerReference + namespace string + clusterIP string + tolerations []corev1.Toleration } var ( @@ -248,6 +252,7 @@ var ( d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels d.ObjectMeta.OwnerReferences = cfg.ownerRefs + d.Spec.Template.Spec.Tolerations = cfg.tolerations updateF := func(oldD *appsv1.Deployment) { oldD.Spec = d.Spec } diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 88e48b753126f..6da52d8a21490 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -42,6 +42,16 @@ func TestNameserverReconciler(t *testing.T) { Service: &tsapi.NameserverService{ ClusterIP: "5.4.3.2", }, + Pod: &tsapi.NameserverPod{ + Tolerations: []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, }, }, } @@ -79,6 +89,15 @@ func TestNameserverReconciler(t *testing.T) { wantsDeploy.Spec.Replicas = ptr.To[int32](3) wantsDeploy.Namespace = tsNamespace wantsDeploy.ObjectMeta.Labels = nameserverLabels + wantsDeploy.Spec.Template.Spec.Tolerations = []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + } + expectEqual(t, fc, wantsDeploy) }) diff --git a/k8s-operator/api.md b/k8s-operator/api.md index d75a21e37337a..979d199cb0783 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -443,6 +443,7 @@ _Appears in:_ | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | | `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | +| `pod` _[NameserverPod](#nameserverpod)_ | Pod configuration. | | | | `replicas` _integer_ | Replicas specifies how many Pods to create. Defaults to 1. | | Minimum: 0
      | @@ -463,6 +464,22 @@ _Appears in:_ | `tag` _string_ | Tag defaults to unstable. | | | +#### NameserverPod + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | If specified, applies tolerations to the pods deployed by the DNSConfig resource. | | | + + #### NameserverService diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 4d8d569f68eba..7991003b82dff 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -6,6 +6,7 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -84,6 +85,9 @@ type Nameserver struct { // Service configuration. // +optional Service *NameserverService `json:"service,omitempty"` + // Pod configuration. + // +optional + Pod *NameserverPod `json:"pod,omitempty"` // Replicas specifies how many Pods to create. Defaults to 1. // +optional // +kubebuilder:validation:Minimum=0 @@ -105,6 +109,12 @@ type NameserverService struct { ClusterIP string `json:"clusterIP,omitempty"` } +type NameserverPod struct { + // If specified, applies tolerations to the pods deployed by the DNSConfig resource. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + type DNSConfigStatus struct { // +listType=map // +listMapKey=type diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 5684fd5f82b4e..7492f1e547395 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -422,6 +422,11 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverService) **out = **in } + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(NameserverPod) + (*in).DeepCopyInto(*out) + } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) @@ -454,6 +459,28 @@ func (in *NameserverImage) DeepCopy() *NameserverImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverPod) DeepCopyInto(out *NameserverPod) { + *out = *in + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverPod. +func (in *NameserverPod) DeepCopy() *NameserverPod { + if in == nil { + return nil + } + out := new(NameserverPod) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverService) DeepCopyInto(out *NameserverService) { *out = *in From 54cee33baec6a2beeaa4aee2c771a8d9312fd8ac Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 17 Oct 2025 09:25:10 -0700 Subject: [PATCH 0587/1093] go.toolchain.rev: update to Go 1.25.3 Updates tailscale/go#140 Updates tailscale/go#142 Updates tailscale/go#138 Change-Id: Id25b6fa4e31eee243fec17667f14cdc48243c59e Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 0c6d33fa09fe0..3c281fa7a34bf 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.2 +go 1.25.3 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index d5de7955850fd..9ea6b37dcbc32 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -a80a86e575c5b7b23b78540e947335d22f74d274 +5c01b77ad0d27a8bd4ef89ef7e713fd7043c5a91 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 9cbf36b930e46..a62a525998ac7 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-1OCmJ7sZL6G/6wO2+lnW4uYPCIdbXhscD5qSTIPoxDk= +sha256-2TYziJLJrFOW2FehhahKficnDACJEwjuvVYyeQZbrcc= diff --git a/go.toolchain.version b/go.toolchain.version index 61b813d5e6327..5bb76b575e1f5 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.25.2 +1.25.3 From 6a73c0bdf5539971840e19c75113d8414f22a9c8 Mon Sep 17 00:00:00 2001 From: Max Coulombe Date: Fri, 17 Oct 2025 18:05:32 -0400 Subject: [PATCH 0588/1093] cmd/tailscale/cli,feature: add support for identity federation (#17529) Add new arguments to `tailscale up` so authkeys can be generated dynamically via identity federation. Updates #9192 Signed-off-by: mcoulombe --- cmd/tailscale/cli/up.go | 50 +++++- cmd/tailscale/cli/up_test.go | 3 + cmd/tailscale/depaware.txt | 4 +- cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/deps_test.go | 6 +- .../feature_identity_federation_disabled.go | 13 ++ .../feature_identity_federation_enabled.go | 13 ++ .../condregister/identityfederation/doc.go | 7 + .../maybe_identityfederation.go | 8 + feature/featuretags/featuretags.go | 13 +- .../identityfederation/identityfederation.go | 127 +++++++++++++ .../identityfederation_test.go | 167 ++++++++++++++++++ .../client/tailscale/identityfederation.go | 19 ++ internal/client/tailscale/tailscale.go | 3 + 14 files changed, 420 insertions(+), 14 deletions(-) create mode 100644 feature/buildfeatures/feature_identity_federation_disabled.go create mode 100644 feature/buildfeatures/feature_identity_federation_enabled.go create mode 100644 feature/condregister/identityfederation/doc.go create mode 100644 feature/condregister/identityfederation/maybe_identityfederation.go create mode 100644 feature/identityfederation/identityfederation.go create mode 100644 feature/identityfederation/identityfederation_test.go create mode 100644 internal/client/tailscale/identityfederation.go diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 07e008aab69c7..91a6b60878a93 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -25,6 +25,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" "tailscale.com/feature/buildfeatures" + _ "tailscale.com/feature/condregister/identityfederation" _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" @@ -96,6 +97,9 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") upf.StringVar(&upArgs.qrFormat, "qr-format", "small", "QR code formatting (small or large)") upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) + upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") + upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) + upf.StringVar(&upArgs.idTokenOrFile, "id-token", "", `ID token from the identity provider to exchange with the control server for workload identity federation; if it begins with "file:", then it's a path to a file containing the token`) upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server") upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") @@ -184,6 +188,9 @@ type upArgsT struct { statefulFiltering bool netfilterMode string authKeyOrFile string // "secret" or "file:/path/to/secret" + clientID string + clientSecretOrFile string // "secret" or "file:/path/to/secret" + idTokenOrFile string // "secret" or "file:/path/to/secret" hostname string opUser string json bool @@ -193,8 +200,9 @@ type upArgsT struct { postureChecking bool } -func (a upArgsT) getAuthKey() (string, error) { - v := a.authKeyOrFile +// resolveValueFromFile returns the value as-is, or if it starts with "file:", +// reads and returns the trimmed contents of the file. +func resolveValueFromFile(v string) (string, error) { if file, ok := strings.CutPrefix(v, "file:"); ok { b, err := os.ReadFile(file) if err != nil { @@ -205,6 +213,18 @@ func (a upArgsT) getAuthKey() (string, error) { return v, nil } +func (a upArgsT) getAuthKey() (string, error) { + return resolveValueFromFile(a.authKeyOrFile) +} + +func (a upArgsT) getClientSecret() (string, error) { + return resolveValueFromFile(a.clientSecretOrFile) +} + +func (a upArgsT) getIDToken() (string, error) { + return resolveValueFromFile(a.idTokenOrFile) +} + var upArgsGlobal upArgsT // Fields output when `tailscale up --json` is used. Two JSON blocks will be output. @@ -586,11 +606,33 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE // Try to use an OAuth secret to generate an auth key if that functionality // is available. if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { - authKey, err = f(ctx, authKey, strings.Split(upArgs.advertiseTags, ",")) + clientSecret := authKey // the authkey argument accepts client secrets, if both arguments are provided authkey has precedence + if clientSecret == "" { + clientSecret, err = upArgs.getClientSecret() + if err != nil { + return err + } + } + + authKey, err = f(ctx, clientSecret, strings.Split(upArgs.advertiseTags, ",")) + if err != nil { + return err + } + } + // Try to resolve the auth key via workload identity federation if that functionality + // is available and no auth key is yet determined. + if f, ok := tailscale.HookResolveAuthKeyViaWIF.GetOk(); ok && authKey == "" { + idToken, err := upArgs.getIDToken() + if err != nil { + return err + } + + authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, strings.Split(upArgs.advertiseTags, ",")) if err != nil { return err } } + err = localClient.Start(ctx, ipn.Options{ AuthKey: authKey, UpdatePrefs: prefs, @@ -869,7 +911,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes", "client-id", "client-secret", "id-token": return true } return false diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index efddb53249b55..fe2f1b555a2bc 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -43,6 +43,9 @@ var validUpFlags = set.Of( "stateful-filtering", "timeout", "unattended", + "client-id", + "client-secret", + "id-token", ) // TestUpFlagSetIsFrozen complains when new flags are added to tailscale up. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index d5b7b059f8381..b249639bc80bc 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -98,9 +98,11 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/feature from tailscale.com/tsweb+ tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli @@ -245,7 +247,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/net/ipv6 from golang.org/x/net/icmp+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ - golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index d46180e2d135e..9633e73989046 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -75,6 +75,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index b98c53eb55cf5..64d1beca7cd75 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -137,14 +137,14 @@ func TestOmitCaptivePortal(t *testing.T) { }.Check(t) } -func TestOmitOAuthKey(t *testing.T) { +func TestOmitAuth(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", - Tags: "ts_omit_oauthkey,ts_include_cli", + Tags: "ts_omit_oauthkey,ts_omit_identityfederation,ts_include_cli", OnDep: func(dep string) { if strings.HasPrefix(dep, "golang.org/x/oauth2") { - t.Errorf("unexpected dep with ts_omit_oauthkey: %q", dep) + t.Errorf("unexpected oauth2 dep: %q", dep) } }, }.Check(t) diff --git a/feature/buildfeatures/feature_identity_federation_disabled.go b/feature/buildfeatures/feature_identity_federation_disabled.go new file mode 100644 index 0000000000000..c7b16f729cbc5 --- /dev/null +++ b/feature/buildfeatures/feature_identity_federation_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_identity_federation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = false diff --git a/feature/buildfeatures/feature_identity_federation_enabled.go b/feature/buildfeatures/feature_identity_federation_enabled.go new file mode 100644 index 0000000000000..1f7cf17423c96 --- /dev/null +++ b/feature/buildfeatures/feature_identity_federation_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_identity_federation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = true diff --git a/feature/condregister/identityfederation/doc.go b/feature/condregister/identityfederation/doc.go new file mode 100644 index 0000000000000..503b2c8f127d5 --- /dev/null +++ b/feature/condregister/identityfederation/doc.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for authkey resolution +// via identity federation if it's not disabled by the +// ts_omit_identityfederation build tag. +package identityfederation diff --git a/feature/condregister/identityfederation/maybe_identityfederation.go b/feature/condregister/identityfederation/maybe_identityfederation.go new file mode 100644 index 0000000000000..b1db42fc3c77a --- /dev/null +++ b/feature/condregister/identityfederation/maybe_identityfederation.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_identityfederation + +package identityfederation + +import _ "tailscale.com/feature/identityfederation" diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 9c85dbaa0d923..c93e8b15b1001 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -155,12 +155,13 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Generic Receive Offload support (performance)", Deps: []FeatureTag{"netstack"}, }, - "health": {Sym: "Health", Desc: "Health checking support"}, - "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, - "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, - "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, - "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, - "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "health": {Sym: "Health", Desc: "Health checking support"}, + "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, + "identityfederation": {Sym: "IdentityFederation", Desc: "Auth key generation via identity federation support"}, + "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, + "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, + "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, "linkspeed": { Sym: "LinkSpeed", Desc: "Set link speed on TUN device for better OS integration (Linux only)", diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go new file mode 100644 index 0000000000000..a4470fc27eaea --- /dev/null +++ b/feature/identityfederation/identityfederation.go @@ -0,0 +1,127 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for using ID tokens to +// automatically request authkeys for logging in. +package identityfederation + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" +) + +func init() { + feature.Register("identityfederation") + tailscale.HookResolveAuthKeyViaWIF.Set(resolveAuthKey) +} + +// resolveAuthKey uses OIDC identity federation to exchange the provided ID token and client ID for an authkey. +func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + if clientID == "" { + return "", nil // Short-circuit, no client ID means not using identity federation + } + + if idToken == "" { + return "", errors.New("federated identity authkeys require --id-token") + } + if len(tags) == 0 { + return "", errors.New("federated identity authkeys require --advertise-tags") + } + if baseURL == "" { + baseURL = ipn.DefaultControlURL + } + + ephemeral, preauth, err := parseOptionalAttributes(clientID) + if err != nil { + return "", fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + accessToken, err := exchangeJWTForToken(ctx, baseURL, clientID, idToken) + if err != nil { + return "", fmt.Errorf("failed to exchange JWT for access token: %w", err) + } + if accessToken == "" { + return "", errors.New("received empty access token from Tailscale") + } + + tsClient := tailscale.NewClient("-", tailscale.APIKey(accessToken)) + tsClient.UserAgent = "tailscale-cli-identity-federation" + tsClient.BaseURL = baseURL + + authkey, _, err := tsClient.CreateKey(ctx, tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("unexpected error while creating authkey: %w", err) + } + if authkey == "" { + return "", errors.New("received empty authkey from control server") + } + + return authkey, nil +} + +func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized bool, err error) { + _, attrs, found := strings.Cut(clientID, "?") + if !found { + return true, false, nil + } + + parsed, err := url.ParseQuery(attrs) + if err != nil { + return false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + for k := range parsed { + switch k { + case "ephemeral": + ephemeral, err = strconv.ParseBool(parsed.Get(k)) + case "preauthorized": + preauthorized, err = strconv.ParseBool(parsed.Get(k)) + default: + return false, false, fmt.Errorf("unknown optional config attribute %q", k) + } + } + + return ephemeral, preauthorized, err +} + +// exchangeJWTForToken exchanges a JWT for a Tailscale access token. +func exchangeJWTForToken(ctx context.Context, baseURL, clientID, idToken string) (string, error) { + httpClient := &http.Client{Timeout: 10 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + token, err := (&oauth2.Config{ + Endpoint: oauth2.Endpoint{ + TokenURL: fmt.Sprintf("%s/api/v2/oauth/token-exchange", baseURL), + }, + }).Exchange(ctx, "", oauth2.SetAuthURLParam("client_id", clientID), oauth2.SetAuthURLParam("jwt", idToken)) + if err != nil { + // Try to extract more detailed error message + var retrieveErr *oauth2.RetrieveError + if errors.As(err, &retrieveErr) { + return "", fmt.Errorf("token exchange failed with status %d: %s", retrieveErr.Response.StatusCode, string(retrieveErr.Body)) + } + return "", fmt.Errorf("unexpected token exchange request error: %w", err) + } + + return token.AccessToken, nil +} diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go new file mode 100644 index 0000000000000..7b75852a819a1 --- /dev/null +++ b/feature/identityfederation/identityfederation_test.go @@ -0,0 +1,167 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package identityfederation + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + clientID string + idToken string + tags []string + wantAuthKey string + wantErr string + }{ + { + name: "success", + clientID: "client-123", + idToken: "token", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: "", + }, + { + name: "missing client id short-circuits without error", + clientID: "", + idToken: "token", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: "", + }, + { + name: "missing id token", + clientID: "client-123", + idToken: "", + tags: []string{"tag:test"}, + wantErr: "federated identity authkeys require --id-token", + }, + { + name: "missing tags", + clientID: "client-123", + idToken: "token", + tags: []string{}, + wantErr: "federated identity authkeys require --advertise-tags", + }, + { + name: "invalid client id attributes", + clientID: "client-123?invalid=value", + idToken: "token", + tags: []string{"tag:test"}, + wantErr: `failed to parse optional config attributes: unknown optional config attribute "invalid"`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := mockedControlServer(t) + defer srv.Close() + + authKey, err := resolveAuthKey(context.Background(), srv.URL, tt.clientID, tt.idToken, tt.tags) + if tt.wantErr != "" { + if err == nil { + t.Errorf("resolveAuthKey() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("resolveAuthKey() error = %q, want %q", err.Error(), tt.wantErr) + } + } else if err != nil { + t.Fatalf("resolveAuthKey() unexpected error = %v", err) + } + if authKey != tt.wantAuthKey { + t.Errorf("resolveAuthKey() = %q, want %q", authKey, tt.wantAuthKey) + } + }) + } +} + +func TestParseOptionalAttributes(t *testing.T) { + tests := []struct { + name string + clientID string + wantEphemeral bool + wantPreauth bool + wantErr string + }{ + { + name: "default values", + clientID: "client-123", + wantEphemeral: true, + wantPreauth: false, + wantErr: "", + }, + { + name: "custom values", + clientID: "client-123?ephemeral=false&preauthorized=true", + wantEphemeral: false, + wantPreauth: true, + wantErr: "", + }, + { + name: "unknown attribute", + clientID: "client-123?unknown=value", + wantEphemeral: false, + wantPreauth: false, + wantErr: `unknown optional config attribute "unknown"`, + }, + { + name: "invalid value", + clientID: "client-123?ephemeral=invalid", + wantEphemeral: false, + wantPreauth: false, + wantErr: `strconv.ParseBool: parsing "invalid": invalid syntax`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) + if tt.wantErr != "" { + if err == nil { + t.Errorf("parseOptionalAttributes() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("parseOptionalAttributes() error = %q, want %q", err.Error(), tt.wantErr) + } + } else { + if err != nil { + t.Errorf("parseOptionalAttributes() error = %v, want nil", err) + return + } + } + if ephemeral != tt.wantEphemeral { + t.Errorf("parseOptionalAttributes() ephemeral = %v, want %v", ephemeral, tt.wantEphemeral) + } + if preauth != tt.wantPreauth { + t.Errorf("parseOptionalAttributes() preauth = %v, want %v", preauth, tt.wantPreauth) + } + }) + } +} + +func mockedControlServer(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.Contains(r.URL.Path, "/oauth/token-exchange"): + // OAuth2 library sends the token exchange request + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"access-123","token_type":"Bearer","expires_in":3600}`)) + case strings.Contains(r.URL.Path, "/api/v2/tailnet") && strings.Contains(r.URL.Path, "/keys"): + // Tailscale client creates the authkey + w.Write([]byte(`{"key":"tskey-auth-xyz","created":"2024-01-01T00:00:00Z"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go new file mode 100644 index 0000000000000..e1fe3559c7b44 --- /dev/null +++ b/internal/client/tailscale/identityfederation.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKeyViaWIF resolves to [identityfederation.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// baseURL is the URL of the control server used for token exchange and authkey generation. +// clientID is the federated client ID used for token exchange, the format is / +// idToken is the Identity token from the identity provider +// tags is the list of tags to be associated with the auth key +var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error)] diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index cba7228bbc8b3..0e603bf792562 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -25,6 +25,9 @@ func init() { // AuthMethod is an alias to tailscale.com/client/tailscale. type AuthMethod = tsclient.AuthMethod +// APIKey is an alias to tailscale.com/client/tailscale. +type APIKey = tsclient.APIKey + // Device is an alias to tailscale.com/client/tailscale. type Device = tsclient.Device From c961d580912d25f48f1b916b9b2bc08f394b994d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 20 Oct 2025 11:23:35 +0100 Subject: [PATCH 0589/1093] cmd/tailscale: improve the error message for `lock log` with no lock Previously, running `tailscale lock log` in a tailnet without Tailnet Lock enabled would return a potentially confusing error: $ tailscale lock log 2025/10/20 11:07:09 failed to connect to local Tailscale service; is Tailscale running? It would return this error even if Tailscale was running. This patch fixes the error to be: $ tailscale lock log Tailnet Lock is not enabled Fixes #17586 Signed-off-by: Alex Chan --- cmd/tailscale/cli/network-lock.go | 8 ++++++ tstest/integration/integration_test.go | 40 ++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index f355f99b97ac5..a15d9ab88b596 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -690,6 +690,14 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er } func runNetworkLockLog(ctx context.Context, args []string) error { + st, err := localClient.NetworkLockStatus(ctx) + if err != nil { + return fixTailscaledConnectError(err) + } + if !st.Enabled { + return errors.New("Tailnet Lock is not enabled") + } + updates, err := localClient.NetworkLockLog(ctx, nlLogArgs.limit) if err != nil { return fixTailscaledConnectError(err) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 2e85bc8be2bb9..234bb8c6ec11a 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -2190,3 +2190,43 @@ func TestC2NDebugNetmap(t *testing.T) { t.Errorf("expected peer to be online; got %+v", nm.Peers[0].AsStruct()) } } + +func TestNetworkLock(t *testing.T) { + + // If you run `tailscale lock log` on a node where Tailnet Lock isn't + // enabled, you get an error explaining that. + t.Run("log-when-not-enabled", func(t *testing.T) { + tstest.Shard(t) + t.Parallel() + + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + n1.MustUp() + n1.AwaitRunning() + + cmdArgs := []string{"lock", "log"} + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var outBuf, errBuf bytes.Buffer + + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + + if err := cmd.Run(); !isNonZeroExitCode(err) { + t.Fatalf("command did not fail with non-zero exit code: %q", err) + } + + if outBuf.String() != "" { + t.Fatalf("stdout: want '', got %q", outBuf.String()) + } + + wantErr := "Tailnet Lock is not enabled\n" + if errBuf.String() != wantErr { + t.Fatalf("stderr: want %q, got %q", wantErr, errBuf.String()) + } + }) +} From 4673992b96603fbc1de370af7a6b3a1a68205d0b Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 17 Oct 2025 15:06:55 +0100 Subject: [PATCH 0590/1093] tka: created a shared testing library for Chonk This patch creates a set of tests that should be true for all implementations of Chonk and CompactableChonk, which we can share with the SQLite implementation in corp. It includes all the existing tests, plus a test for LastActiveAncestor which was in corp but not in oss. Updates https://github.com/tailscale/corp/issues/33465 Signed-off-by: Alex Chan --- tka/tailchonk_test.go | 213 ++---------------------- tstest/chonktest/chonktest.go | 256 +++++++++++++++++++++++++++++ tstest/chonktest/tailchonk_test.go | 53 ++++++ 3 files changed, 322 insertions(+), 200 deletions(-) create mode 100644 tstest/chonktest/chonktest.go create mode 100644 tstest/chonktest/tailchonk_test.go diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index cf6ea203bf86d..08686598033b8 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -5,7 +5,6 @@ package tka import ( "bytes" - "fmt" "os" "path/filepath" "sync" @@ -18,6 +17,13 @@ import ( "tailscale.com/util/must" ) +// This package has implementation-specific tests for Mem and FS. +// +// We also have tests for the Chonk interface in `chonktest`, which exercises +// both Mem and FS. Those tests are in a separate package so they can be shared +// with other repos; we don't call the shared test helpers from this package +// to avoid creating a circular dependency. + // randHash derives a fake blake2s hash from the test name // and the given seed. func randHash(t *testing.T, seed int64) [blake2s.Size]byte { @@ -31,103 +37,8 @@ func TestImplementsChonk(t *testing.T) { t.Logf("chonks: %v", impls) } -func TestTailchonk_ChildAUMs(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - parentHash := randHash(t, 1) - data := []AUM{ - { - MessageKind: AUMRemoveKey, - KeyID: []byte{1, 2}, - PrevAUMHash: parentHash[:], - }, - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - - if err := chonk.CommitVerifiedAUMs(data); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - stored, err := chonk.ChildAUMs(parentHash) - if err != nil { - t.Fatalf("ChildAUMs failed: %v", err) - } - if diff := cmp.Diff(data, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - -func TestTailchonk_AUMMissing(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - var notExists AUMHash - notExists[:][0] = 42 - if _, err := chonk.AUM(notExists); err != os.ErrNotExist { - t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) - } - }) - } -} - -func TestTailchonk_ReadChainFromHead(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - // t.Logf("genesis hash = %X", genesis.Hash()) - // t.Logf("intermediate hash = %X", intermediate.Hash()) - // t.Logf("leaf hash = %X", leaf.Hash()) - - // Read the chain from the leaf backwards. - gotLeafs, err := chonk.Heads() - if err != nil { - t.Fatalf("Heads failed: %v", err) - } - if diff := cmp.Diff([]AUM{leaf}, gotLeafs); diff != "" { - t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) - } - - parent, _ := gotLeafs[0].Parent() - gotIntermediate, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { - t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) - } - - parent, _ = gotIntermediate.Parent() - gotGenesis, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(genesis, gotGenesis); diff != "" { - t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - func TestTailchonkFS_Commit(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -156,7 +67,7 @@ func TestTailchonkFS_Commit(t *testing.T) { } func TestTailchonkFS_CommitTime(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -172,108 +83,6 @@ func TestTailchonkFS_CommitTime(t *testing.T) { } } -func TestTailchonkFS_PurgeAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - parentHash := randHash(t, 1) - aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} - - if err := chonk.CommitVerifiedAUMs([]AUM{aum}); err != nil { - t.Fatal(err) - } - if err := chonk.PurgeAUMs([]AUMHash{aum.Hash()}); err != nil { - t.Fatal(err) - } - - if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { - t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) - } - - info, err := chonk.get(aum.Hash()) - if err != nil { - t.Fatal(err) - } - if info.PurgedUnix == 0 { - t.Errorf("recently-created AUM PurgedUnix = %d, want non-zero", info.PurgedUnix) - } -} - -func hashesLess(x, y AUMHash) bool { - return bytes.Compare(x[:], y[:]) < 0 -} - -func aumHashesLess(x, y AUM) bool { - return hashesLess(x.Hash(), y.Hash()) -} - -func TestTailchonkFS_AllAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - hashes, err := chonk.AllAUMs() - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff([]AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { - t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) - } -} - -func TestTailchonkFS_ChildAUMsOfPurgedAUM(t *testing.T) { - chonk := &FS{base: t.TempDir()} - parent := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{0, 0}} - - parentHash := parent.Hash() - - child1 := AUM{MessageKind: AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} - child2 := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} - child3 := AUM{MessageKind: AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} - - child2Hash := child2.Hash() - grandchild2A := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} - grandchild2B := AUM{MessageKind: AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} - - commitSet := []AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} - - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - // Check the set of hashes is correct - childHashes := must.Get(chonk.ChildAUMs(parentHash)) - if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { - t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) - } - - // Purge the parent AUM, and check the set of child AUMs is unchanged - chonk.PurgeAUMs([]AUMHash{parent.Hash()}) - - childHashes = must.Get(chonk.ChildAUMs(parentHash)) - if diff := cmp.Diff([]AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { - t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) - } - - // Now purge one of the child AUMs, and check it no longer appears as a child of the parent - chonk.PurgeAUMs([]AUMHash{child3.Hash()}) - - childHashes = must.Get(chonk.ChildAUMs(parentHash)) - if diff := cmp.Diff([]AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { - t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) - } -} - func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM @@ -630,6 +439,10 @@ func (c *compactingChonkFake) CommitTime(hash AUMHash) (time.Time, error) { return c.aumAge[hash], nil } +func hashesLess(x, y AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { c.t.Errorf("deletion set differs (-want, +got):\n%s", diff) diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go new file mode 100644 index 0000000000000..bfe394b28fcaf --- /dev/null +++ b/tstest/chonktest/chonktest.go @@ -0,0 +1,256 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package chonktest contains a shared set of tests for the Chonk +// interface used to store AUM messages in Tailnet Lock, which we can +// share between different implementations. +package chonktest + +import ( + "bytes" + "encoding/binary" + "math/rand" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/crypto/blake2s" + "tailscale.com/tka" + "tailscale.com/util/must" +) + +// returns a random source based on the test name + extraSeed. +func testingRand(t *testing.T, extraSeed int64) *rand.Rand { + var seed int64 + if err := binary.Read(bytes.NewBuffer([]byte(t.Name())), binary.LittleEndian, &seed); err != nil { + panic(err) + } + return rand.New(rand.NewSource(seed + extraSeed)) +} + +// randHash derives a fake blake2s hash from the test name +// and the given seed. +func randHash(t *testing.T, seed int64) [blake2s.Size]byte { + var out [blake2s.Size]byte + testingRand(t, seed).Read(out[:]) + return out +} + +func hashesLess(x, y tka.AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + +func aumHashesLess(x, y tka.AUM) bool { + return hashesLess(x.Hash(), y.Hash()) +} + +// RunChonkTests is a set of tests for the behaviour of a Chonk. +// +// Any implementation of Chonk should pass these tests, so we know all +// Chonks behave in the same way. If you want to test behaviour that's +// specific to one implementation, write a separate test. +func RunChonkTests(t *testing.T, newChonk func(*testing.T) tka.Chonk) { + t.Run("ChildAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + stored, err := chonk.ChildAUMs(parentHash) + if err != nil { + t.Fatalf("ChildAUMs failed: %v", err) + } + if diff := cmp.Diff(data, stored, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Errorf("stored AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("AUMMissing", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + var notExists tka.AUMHash + notExists[:][0] = 42 + if _, err := chonk.AUM(notExists); err != os.ErrNotExist { + t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) + } + }) + + t.Run("ReadChainFromHead", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + t.Logf("genesis hash = %X", genesis.Hash()) + t.Logf("intermediate hash = %X", intermediate.Hash()) + t.Logf("leaf hash = %X", leaf.Hash()) + + // Read the chain from the leaf backwards. + gotLeafs, err := chonk.Heads() + if err != nil { + t.Fatalf("Heads failed: %v", err) + } + if diff := cmp.Diff([]tka.AUM{leaf}, gotLeafs); diff != "" { + t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) + } + + parent, _ := gotLeafs[0].Parent() + gotIntermediate, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { + t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) + } + + parent, _ = gotIntermediate.Parent() + gotGenesis, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(genesis, gotGenesis); diff != "" { + t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("LastActiveAncestor", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + + aum := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + hash := aum.Hash() + + if err := chonk.SetLastActiveAncestor(hash); err != nil { + t.Fatal(err) + } + got, err := chonk.LastActiveAncestor() + if err != nil { + t.Fatal(err) + } + if got == nil || hash.String() != got.String() { + t.Errorf("LastActiveAncestor=%s, want %s", got, hash) + } + }) +} + +// RunCompactableChonkTests is a set of tests for the behaviour of a +// CompactableChonk. +// +// Any implementation of CompactableChonk should pass these tests, so we +// know all CompactableChonk behave in the same way. If you want to test +// behaviour that's specific to one implementation, write a separate test. +func RunCompactableChonkTests(t *testing.T, newChonk func(t *testing.T) tka.CompactableChonk) { + t.Run("PurgeAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + aum := tka.AUM{MessageKind: tka.AUMNoOp, PrevAUMHash: parentHash[:]} + + if err := chonk.CommitVerifiedAUMs([]tka.AUM{aum}); err != nil { + t.Fatal(err) + } + if err := chonk.PurgeAUMs([]tka.AUMHash{aum.Hash()}); err != nil { + t.Fatal(err) + } + + if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { + t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) + } + }) + + t.Run("AllAUMs", func(t *testing.T) { + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + hashes, err := chonk.AllAUMs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff([]tka.AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { + t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) + } + }) + + t.Run("ChildAUMsOfPurgedAUM", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parent := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{0, 0}} + + parentHash := parent.Hash() + + child1 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} + child2 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} + child3 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} + + child2Hash := child2.Hash() + grandchild2A := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + grandchild2B := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + + commitSet := []tka.AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} + + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check the set of hashes is correct + childHashes := must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Purge the parent AUM, and check the set of child AUMs is unchanged + chonk.PurgeAUMs([]tka.AUMHash{parent.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Now purge one of the child AUMs, and check it no longer appears as a child of the parent + chonk.PurgeAUMs([]tka.AUMHash{child3.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + }) +} diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go new file mode 100644 index 0000000000000..ce6b043248de1 --- /dev/null +++ b/tstest/chonktest/tailchonk_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package chonktest + +import ( + "testing" + + "tailscale.com/tka" + "tailscale.com/util/must" +) + +func TestImplementsChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.Chonk + }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.Chonk { + return &tka.Mem{} + }, + }, + { + name: "FS", + newChonk: func(t *testing.T) tka.Chonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunChonkTests(t, tt.newChonk) + }) + } +} + +func TestImplementsCompactableChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.CompactableChonk + }{ + { + name: "FS", + newChonk: func(t *testing.T) tka.CompactableChonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunCompactableChonkTests(t, tt.newChonk) + }) + } +} From 4e1c270f9016040da064d474db4fca299cdea7ea Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 20 Oct 2025 15:03:03 +0000 Subject: [PATCH 0591/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/windows.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/licenses/windows.md b/licenses/windows.md index f6704cf32bb5a..b284aa1361f5d 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -42,7 +42,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/963e260a8227/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) From bf47d8e72ba672fd0f2bcc5888b01876be80e138 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 20 Oct 2025 11:04:07 -0500 Subject: [PATCH 0592/1093] VERSION.txt: this is v1.91.0 Signed-off-by: Nick Khyl --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 636ea711ad968..6979a6c0661bf 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.89.0 +1.91.0 From 3dde233cd3aed75f610b63ea33ab1baa9198c81b Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 20 Oct 2025 12:22:16 -0700 Subject: [PATCH 0593/1093] ipn/ipnlocal: use eventbus.SubscribeFunc in LocalBackend (#17524) This does not change which subscriptions are made, it only swaps them to use the SubscribeFunc API instead of Subscribe. Updates #15160 Updates #17487 Change-Id: Id56027836c96942206200567a118f8bcf9c07f64 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 119 ++++++++++++++++-------------------------- 1 file changed, 44 insertions(+), 75 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 36e4ad8a589e9..ee3059de437c7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -180,13 +180,13 @@ var ( // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelCauseFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System - eventSubs eventbus.Monitor + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + eventClient *eventbus.Client health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -589,74 +589,44 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // Start the event bus late, once all the assignments above are done. // (See previous race in tailscale/tailscale#17252) ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") - b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + b.eventClient = ec + eventbus.SubscribeFunc(ec, b.onClientVersion) + eventbus.SubscribeFunc(ec, func(au controlclient.AutoUpdate) { + b.onTailnetDefaultAutoUpdate(au.Value) + }) + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { b.linkChange(&cd) }) + if buildfeatures.HasHealth { + eventbus.SubscribeFunc(ec, b.onHealthChange) + } + if buildfeatures.HasPortList { + eventbus.SubscribeFunc(ec, b.setPortlistServices) + } + eventbus.SubscribeFunc(ec, b.onAppConnectorRouteUpdate) + eventbus.SubscribeFunc(ec, b.onAppConnectorStoreRoutes) return b, nil } -// consumeEventbusTopics consumes events from all relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [eventbus.Client] is closed. -func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { - clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) - autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) - - var healthChange <-chan health.Change - if buildfeatures.HasHealth { - healthChangeSub := eventbus.Subscribe[health.Change](ec) - healthChange = healthChangeSub.Events() +func (b *LocalBackend) onAppConnectorRouteUpdate(ru appctype.RouteUpdate) { + // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under + // one profile to arrive and be applied after a switch to another profile. + // We need to find a way to ensure that changes to the backend state are applied + // consistently in the presnce of profile changes, which currently may not happen in + // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) } - changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) - routeUpdateSub := eventbus.Subscribe[appctype.RouteUpdate](ec) - storeRoutesSub := eventbus.Subscribe[appctype.RouteInfo](ec) - - var portlist <-chan PortlistServices - if buildfeatures.HasPortList { - portlistSub := eventbus.Subscribe[PortlistServices](ec) - portlist = portlistSub.Events() + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) } +} - return func(ec *eventbus.Client) { - for { - select { - case <-ec.Done(): - return - case clientVersion := <-clientVersionSub.Events(): - b.onClientVersion(&clientVersion) - case au := <-autoUpdateSub.Events(): - b.onTailnetDefaultAutoUpdate(au.Value) - case change := <-healthChange: - b.onHealthChange(change) - case changeDelta := <-changeDeltaSub.Events(): - b.linkChange(&changeDelta) - - case pl := <-portlist: - if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans - b.setPortlistServices(pl) - } - case ru := <-routeUpdateSub.Events(): - // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under - // one profile to arrive and be applied after a switch to another profile. - // We need to find a way to ensure that changes to the backend state are applied - // consistently in the presnce of profile changes, which currently may not happen in - // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 - if err := b.AdvertiseRoute(ru.Advertise...); err != nil { - b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) - } - if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { - b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) - } - case ri := <-storeRoutesSub.Events(): - // Whether or not routes should be stored can change over time. - shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() - if shouldStoreRoutes { - if err := b.storeRouteInfo(ri); err != nil { - b.logf("appc: failed to store route info: %v", err) - } - } - } +func (b *LocalBackend) onAppConnectorStoreRoutes(ri appctype.RouteInfo) { + // Whether or not routes should be stored can change over time. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if shouldStoreRoutes { + if err := b.storeRouteInfo(ri); err != nil { + b.logf("appc: failed to store route info: %v", err) } } } @@ -1107,13 +1077,12 @@ func (b *LocalBackend) ClearCaptureSink() { // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { - // Close the [eventbus.Client] and wait for LocalBackend.consumeEventbusTopics - // to return. Do this before acquiring b.mu: - // 1. LocalBackend.consumeEventbusTopics event handlers also acquire b.mu, - // they can deadlock with c.Shutdown(). - // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against - // undesirable post/in-progress LocalBackend.Shutdown() behaviors. - b.eventSubs.Close() + // Close the [eventbus.Client] to wait for subscribers to + // return before acquiring b.mu: + // 1. Event handlers also acquire b.mu, they can deadlock with c.Shutdown(). + // 2. Event handlers may not guard against undesirable post/in-progress + // LocalBackend.Shutdown() behaviors. + b.eventClient.Close() b.em.close() From ab435ce3a6164033d976a6b8ab7d4bc1b83d3acb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Mon, 20 Oct 2025 15:24:39 -0400 Subject: [PATCH 0594/1093] client/systray: warn users launching the application with sudo (#17595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If users start the application with sudo, DBUS is likely not available or will not have the correct endpoints. We want to warn users when doing this. Closes #17593 Signed-off-by: Claus Lensbøl --- client/systray/systray.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index 4ac08058854e4..518b2e989a86f 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -158,6 +158,18 @@ func init() { // onReady is called by the systray package when the menu is ready to be built. func (menu *Menu) onReady() { log.Printf("starting") + if os.Getuid() == 0 || os.Getuid() != os.Geteuid() || os.Getenv("SUDO_USER") != "" || os.Getenv("DOAS_USER") != "" { + fmt.Fprintln(os.Stderr, ` +It appears that you might be running the systray with sudo/doas. +This can lead to issues with D-Bus, and should be avoided. + +The systray application should be run with the same user as your desktop session. +This usually means that you should run the application like: + +tailscale systray + +See https://tailscale.com/kb/1597/linux-systray for more information.`) + } setAppIcon(disconnected) menu.rebuild() From 675b1c6d542f71eee5dd20808a7e1aebce945580 Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Mon, 20 Oct 2025 15:36:31 -0500 Subject: [PATCH 0595/1093] cmd/tailscale/cli: error when advertising a Service from an untagged node (#17577) Service hosts must be tagged nodes, meaning it is only valid to advertise a Service from a machine which has at least one ACL tag. Fixes tailscale/corp#33197 Signed-off-by: Harry Harpham --- cmd/tailscale/cli/serve_legacy_test.go | 6 ++- cmd/tailscale/cli/serve_v2.go | 4 ++ cmd/tailscale/cli/serve_v2_test.go | 58 +++++++++++++++++++++++--- 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index c509508dfb1f0..1d3854b0b0f74 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -860,6 +860,7 @@ type fakeLocalServeClient struct { setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs + statusWithoutPeers *ipnstate.Status // nil for fakeStatus } // fakeStatus is a fake ipnstate.Status value for tests. @@ -880,7 +881,10 @@ var fakeStatus = &ipnstate.Status{ } func (lc *fakeLocalServeClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { - return fakeStatus, nil + if lc.statusWithoutPeers == nil { + return fakeStatus, nil + } + return lc.statusWithoutPeers, nil } func (lc *fakeLocalServeClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 9b0af2cad7a0c..ca0497f8d0369 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -420,6 +420,10 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { svcName = e.service dnsName = e.service.String() } + tagged := st.Self.Tags != nil && st.Self.Tags.Len() > 0 + if forService && !tagged && !turnOff { + return errors.New("service hosts must be tagged nodes") + } if !forService && srvType == serveTypeTUN { return errors.New("tun mode is only supported for services") } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 1deeaf3eaa9b5..f9653253a7cad 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -22,6 +22,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/views" ) func TestServeDevConfigMutations(t *testing.T) { @@ -33,10 +34,11 @@ func TestServeDevConfigMutations(t *testing.T) { } // group is a group of steps that share the same - // config mutation, but always starts from an empty config + // config mutation type group struct { - name string - steps []step + name string + steps []step + initialState fakeLocalServeClient // use the zero value for empty config } // creaet a temporary directory for path-based destinations @@ -814,17 +816,58 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, + { + name: "advertise_service", + initialState: fakeLocalServeClient{ + statusWithoutPeers: &ipnstate.Status{ + BackendState: ipn.Running.String(), + Self: &ipnstate.PeerStatus{ + DNSName: "foo.test.ts.net", + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrFunnel: nil, + tailcfg.CapabilityFunnelPorts + "?ports=443,8443": nil, + }, + Tags: ptrToReadOnlySlice([]string{"some-tag"}), + }, + CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, + }, + }, + steps: []step{{ + command: cmd("serve --service=svc:foo --http=80 text:foo"), + want: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:80": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Text: "foo"}, + }}, + }, + }, + }, + }, + }}, + }, + { + name: "advertise_service_from_untagged_node", + steps: []step{{ + command: cmd("serve --service=svc:foo --http=80 text:foo"), + wantErr: anyErr(), + }}, + }, } for _, group := range groups { t.Run(group.name, func(t *testing.T) { - lc := &fakeLocalServeClient{} + lc := group.initialState for i, st := range group.steps { var stderr bytes.Buffer var stdout bytes.Buffer var flagOut bytes.Buffer e := &serveEnv{ - lc: lc, + lc: &lc, testFlagOut: &flagOut, testStdout: &stdout, testStderr: &stderr, @@ -2249,3 +2292,8 @@ func exactErrMsg(want error) func(error) string { return fmt.Sprintf("\ngot: %v\nwant: %v\n", got, want) } } + +func ptrToReadOnlySlice[T any](s []T) *views.Slice[T] { + vs := views.SliceOf(s) + return &vs +} From 3944809a118153b83aa0a606e515e20b6fe6190b Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 09:52:23 +0100 Subject: [PATCH 0596/1093] .github/workflows: pin the google/oss-fuzz GitHub Actions Updates https://github.com/tailscale/corp/issues/31017 Signed-off-by: Alex Chan --- .github/workflows/test.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c3aa4f1bca1ff..b6d41e937c2db 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -613,7 +613,9 @@ jobs: steps: - name: build fuzzers id: build - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master + # As of 21 October 2025, this repo doesn't tag releases, so this commit + # hash is just the tip of master. + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264 # continue-on-error makes steps.build.conclusion be 'success' even if # steps.build.outcome is 'failure'. This means this step does not # contribute to the job's overall pass/fail evaluation. @@ -643,7 +645,9 @@ jobs: # report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong # value. if: steps.build.outcome == 'success' - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master + # As of 21 October 2025, this repo doesn't tag releases, so this commit + # hash is just the tip of master. + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264 with: oss-fuzz-project-name: 'tailscale' fuzz-seconds: 150 From 2b448f0696006b76d1d4cfa227472d0153782445 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 20 Oct 2025 18:12:25 +0100 Subject: [PATCH 0597/1093] ipn, tka: improve the logging around TKA sync and AUM errors * When we do the TKA sync, log whether TKA is enabled and whether we want it to be enabled. This would help us see if a node is making bootstrap errors. * When we fail to look up an AUM locally, log the ID of the AUM rather than a generic "file does not exist" error. These AUM IDs are cryptographic hashes of the TKA state, which itself just contains public keys and signatures. These IDs aren't sensitive and logging them is safe. Signed-off-by: Alex Chan Updates https://github.com/tailscale/corp/issues/33594 --- ipn/ipnlocal/network-lock.go | 11 ++++++----- tka/builder.go | 2 +- tka/tailchonk.go | 10 +++++----- tka/tka.go | 10 +++++----- 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 4990824453c47..6acb9fe1d400e 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -288,8 +288,11 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } - if b.tka != nil || nm.TKAEnabled { - b.logf("tkaSyncIfNeeded: enabled=%v, head=%v", nm.TKAEnabled, nm.TKAHead) + isEnabled := b.tka != nil + wantEnabled := nm.TKAEnabled + + if isEnabled || wantEnabled { + b.logf("tkaSyncIfNeeded: isEnabled=%t, wantEnabled=%t, head=%v", isEnabled, wantEnabled, nm.TKAHead) } ourNodeKey, ok := prefs.Persist().PublicNodeKeyOK() @@ -297,8 +300,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return errors.New("tkaSyncIfNeeded: no node key in prefs") } - isEnabled := b.tka != nil - wantEnabled := nm.TKAEnabled didJustEnable := false if isEnabled != wantEnabled { var ourHead tka.AUMHash @@ -948,7 +949,7 @@ func (b *LocalBackend) NetworkLockLog(maxEntries int) ([]ipnstate.NetworkLockUpd if err == os.ErrNotExist { break } - return out, fmt.Errorf("reading AUM: %w", err) + return out, fmt.Errorf("reading AUM (%v): %w", cursor, err) } update := ipnstate.NetworkLockUpdate{ diff --git a/tka/builder.go b/tka/builder.go index 642f39d77422d..199cec06d8b64 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -136,7 +136,7 @@ func (b *UpdateBuilder) Finalize(storage Chonk) ([]AUM, error) { needCheckpoint = false break } - return nil, fmt.Errorf("reading AUM: %v", err) + return nil, fmt.Errorf("reading AUM (%v): %v", cursor, err) } if aum.MessageKind == AUMCheckpoint { diff --git a/tka/tailchonk.go b/tka/tailchonk.go index cb683c273d033..d01c5826e24a2 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -596,7 +596,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // We've reached the end of the chain we have stored. return h, nil } - return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d): %w", i, err) + return AUMHash{}, fmt.Errorf("reading active chain (retainStateActive) (%d, %v): %w", i, parent, err) } } @@ -616,7 +616,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor") } if next, err = storage.AUM(parent); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } } @@ -632,7 +632,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // We've reached the end of the chain we have stored. break } - return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate): %w", err) + return AUMHash{}, fmt.Errorf("reading active chain (retainStateCandidate, %v): %w", parent, err) } } @@ -744,7 +744,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState if didAdjustCandidateAncestor { var next AUM if next, err = storage.AUM(candidateAncestor); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", candidateAncestor, err) } for { @@ -760,7 +760,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate candidateAncestor") } if next, err = storage.AUM(parent); err != nil { - return AUMHash{}, fmt.Errorf("searching for compaction target: %w", err) + return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } } } diff --git a/tka/tka.go b/tka/tka.go index 234c87fe1b89c..c37c39754661c 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -102,14 +102,14 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int iterAgain = false for j := range candidates { - parent, hasParent := candidates[j].Oldest.Parent() + parentHash, hasParent := candidates[j].Oldest.Parent() if hasParent { - parent, err := storage.AUM(parent) + parent, err := storage.AUM(parentHash) if err != nil { if err == os.ErrNotExist { continue } - return nil, fmt.Errorf("reading parent: %v", err) + return nil, fmt.Errorf("reading parent %s: %v", parentHash, err) } candidates[j].Oldest = parent if lastKnownOldest != nil && *lastKnownOldest == parent.Hash() { @@ -210,7 +210,7 @@ func fastForwardWithAdvancer( } nextAUM, err := storage.AUM(*startState.LastAUMHash) if err != nil { - return AUM{}, State{}, fmt.Errorf("reading next: %v", err) + return AUM{}, State{}, fmt.Errorf("reading next (%v): %v", *startState.LastAUMHash, err) } curs := nextAUM @@ -297,7 +297,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // If we got here, the current state is dependent on the previous. // Keep iterating backwards till thats not the case. if curs, err = storage.AUM(parent); err != nil { - return State{}, fmt.Errorf("reading parent: %v", err) + return State{}, fmt.Errorf("reading parent (%v): %v", parent, err) } } From 23359dc72706b7d9f32dcd428f22f5e4fdbfc4b7 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 11:07:33 +0100 Subject: [PATCH 0598/1093] tka: don't try to read AUMs which are partway through being written Fixes https://github.com/tailscale/tailscale/issues/17600 Signed-off-by: Alex Chan --- tka/tailchonk.go | 10 +++++++++- tka/tailchonk_test.go | 44 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index d01c5826e24a2..7750b062201ac 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -9,6 +9,7 @@ import ( "bytes" "errors" "fmt" + "log" "os" "path/filepath" "slices" @@ -403,9 +404,16 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error { return fmt.Errorf("reading prefix dir: %v", err) } for _, file := range files { + // Ignore files whose names aren't valid AUM hashes, which may be + // temporary files which are partway through being written, or other + // files added by the OS (like .DS_Store) which we can ignore. + // TODO(alexc): it might be useful to append a suffix like `.aum` to + // filenames, so we can more easily distinguish between AUMs and + // arbitrary other files. var h AUMHash if err := h.UnmarshalText([]byte(file.Name())); err != nil { - return fmt.Errorf("invalid aum file: %s: %w", file.Name(), err) + log.Printf("ignoring unexpected non-AUM: %s: %v", file.Name(), err) + continue } info, err := c.get(h) if err != nil { diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 08686598033b8..1a6bad4592053 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -7,6 +7,7 @@ import ( "bytes" "os" "path/filepath" + "slices" "sync" "testing" "time" @@ -83,6 +84,49 @@ func TestTailchonkFS_CommitTime(t *testing.T) { } } +// If we were interrupted while writing a temporary file, AllAUMs() +// should ignore it when scanning the AUM directory. +func TestTailchonkFS_IgnoreTempFile(t *testing.T) { + base := t.TempDir() + chonk := must.Get(ChonkDir(base)) + parentHash := randHash(t, 1) + aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} + must.Do(chonk.CommitVerifiedAUMs([]AUM{aum})) + + writeAUMFile := func(filename, contents string) { + t.Helper() + if err := os.MkdirAll(filepath.Join(base, filename[0:2]), os.ModePerm); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(base, filename[0:2], filename), []byte(contents), 0600); err != nil { + t.Fatal(err) + } + } + + // Check that calling AllAUMs() returns the single committed AUM + got, err := chonk.AllAUMs() + if err != nil { + t.Fatalf("AllAUMs() failed: %v", err) + } + want := []AUMHash{aum.Hash()} + if !slices.Equal(got, want) { + t.Fatalf("AllAUMs() is wrong: got %v, want %v", got, want) + } + + // Write some temporary files which are named like partially-committed AUMs, + // then check that AllAUMs() only returns the single committed AUM. + writeAUMFile("AUM1234.tmp", "incomplete AUM\n") + writeAUMFile("AUM1234.tmp_123", "second incomplete AUM\n") + + got, err = chonk.AllAUMs() + if err != nil { + t.Fatalf("AllAUMs() failed: %v", err) + } + if !slices.Equal(got, want) { + t.Fatalf("AllAUMs() is wrong: got %v, want %v", got, want) + } +} + func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM From c59c859f7d3fa1ace1427421026f5f1b6efb9b6f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 14:01:40 +0100 Subject: [PATCH 0599/1093] tsconsensus: mark several of these tests as known flaky Updates https://github.com/tailscale/tailscale/issues/15627 Signed-off-by: Alex Chan --- tsconsensus/tsconsensus_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 3b51a093f12ad..17f3d881f8687 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -251,6 +251,7 @@ func warnLogConfig() Config { } func TestStart(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) control, controlURL := startControl(t) ctx := context.Background() @@ -371,6 +372,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string } func TestApply(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -435,6 +437,7 @@ func assertCommandsWorkOnAnyNode(t testing.TB, participants []*participant) { } func TestConfig(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -474,6 +477,7 @@ func TestConfig(t *testing.T) { } func TestFollowerFailover(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -545,6 +549,7 @@ func TestFollowerFailover(t *testing.T) { } func TestRejoin(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" From c2d62d25c657c62785a0e8d06a598932fe48e6c6 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Tue, 21 Oct 2025 16:31:54 -0700 Subject: [PATCH 0600/1093] CODE_OF_CONDUCT: convert to semantic line breaks This reformats the existing text to have line breaks at sentences. This commit contains no textual changes to the code of conduct, but is done to make any subsequent changes easier to review. (sembr.org) Also apply prettier formatting for consistency. Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- CODE_OF_CONDUCT.md | 149 ++++++++++++++++----------------------------- 1 file changed, 51 insertions(+), 98 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index a5877cb112eff..51ffb60ab0754 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -2,146 +2,99 @@ ## Our Pledge -We are committed to creating an open, welcoming, diverse, inclusive, -healthy and respectful community. +We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community. ## Our Standards -Examples of behavior that contributes to a positive environment for our -community include: -* Demonstrating empathy and kindness toward other people. -* Being respectful of differing opinions, viewpoints, and experiences. -* Giving and gracefully accepting constructive feedback. -* Accepting responsibility and apologizing to those affected by our - mistakes, and learning from the experience. -* Focusing on what is best not just for us as individuals, but for the - overall community. +Examples of behavior that contributes to a positive environment for our community include: + +- Demonstrating empathy and kindness toward other people. +- Being respectful of differing opinions, viewpoints, and experiences. +- Giving and gracefully accepting constructive feedback. +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience. +- Focusing on what is best not just for us as individuals, but for the overall community. Examples of unacceptable behavior include without limitation: -* The use of sexualized language or imagery, and sexual attention or - advances of any kind. -* The use of violent, intimidating or bullying language or imagery. -* Trolling, insulting or derogatory comments, and personal or - political attacks. -* Public or private harassment. -* Publishing others' private information, such as a physical or email - address, without their explicit permission. -* Spamming community channels and members, such as sending repeat messages, - low-effort content, or automated messages. -* Phishing or any similar activity; -* Distributing or promoting malware; -* Other conduct which could reasonably be considered inappropriate in a - professional setting. - -Please also see the Tailscale Acceptable Use Policy, available at -[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). - -# Reporting Incidents - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported to Tailscale directly via info@tailscale.com, or to -the community leaders or moderators via DM or similar. + +- The use of sexualized language or imagery, and sexual attention or advances of any kind. +- The use of violent, intimidating or bullying language or imagery. +- Trolling, insulting or derogatory comments, and personal or political attacks. +- Public or private harassment. +- Publishing others' private information, such as a physical or email address, without their explicit permission. +- Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages. +- Phishing or any similar activity; +- Distributing or promoting malware; +- Other conduct which could reasonably be considered inappropriate in a professional setting. + +Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). + +## Reporting Incidents + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to Tailscale directly via , or to the community leaders or moderators via DM or similar. All complaints will be reviewed and investigated promptly and fairly. We will respect the privacy and safety of the reporter of any issues. -Please note that this community is not moderated by staff 24/7, and we -do not have, and do not undertake, any obligation to prescreen, monitor, -edit, or remove any content or data, or to actively seek facts or -circumstances indicating illegal activity. While we strive to keep the -community safe and welcoming, moderation may not be immediate at all hours. +Please note that this community is not moderated by staff 24/7, and we do not have, and do not undertake, any obligation to prescreen, monitor, edit, or remove any content or data, or to actively seek facts or circumstances indicating illegal activity. +While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours. If you encounter any issues, report them using the appropriate channels. ## Enforcement -Community leaders and moderators are responsible for clarifying and -enforcing our standards of acceptable behavior and will take appropriate -and fair corrective action in response to any behavior that they deem -inappropriate, threatening, offensive, or harmful. +Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. -Community leaders and moderators have the right and responsibility to remove, -edit, or reject comments, commits, code, wiki edits, issues, and other -contributions that are not aligned to this Community Code of Conduct. -Tailscale retains full discretion to take action (or not) in response -to a violation of these guidelines with or without notice or liability -to you. We will interpret our policies and resolve disputes in favor of -protecting users, customers, the public, our community and our company, -as a whole. +Community leaders and moderators have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Community Code of Conduct. +Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you. +We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole. ## Enforcement Guidelines -Community leaders will follow these Community Impact Guidelines in -determining the consequences for any action they deem in violation of -this Code of Conduct: +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction -Community Impact: Use of inappropriate language or other behavior -deemed unprofessional or unwelcome in the community. +Community Impact: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. -Consequence: A private, written warning from community leaders, -providing clarity around the nature of the violation and an -explanation of why the behavior was inappropriate. A public apology -may be requested. +Consequence: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. +A public apology may be requested. ### 2. Warning -Community Impact: A violation through a single incident or series -of actions. +Community Impact: A violation through a single incident or series of actions. -Consequence: A warning with consequences for continued -behavior. No interaction with the people involved, including -unsolicited interaction with those enforcing this Community Code of Conduct, -for a specified period of time. This includes avoiding interactions in -community spaces as well as external channels like social -media. Violating these terms may lead to a temporary or permanent ban. +Consequence: A warning with consequences for continued behavior. +No interaction with the people involved, including unsolicited interaction with those enforcing this Community Code of Conduct, for a specified period of time. +This includes avoiding interactions in community spaces as well as external channels like social media. +Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban -Community Impact: A serious violation of community standards, -including sustained inappropriate behavior. +Community Impact: A serious violation of community standards, including sustained inappropriate behavior. -Consequence: A temporary ban from any sort of interaction or -public communication with the community for a specified period of -time. No public or private interaction with the people involved, -including unsolicited interaction with those enforcing the Code of Conduct, -is allowed during this period. Violating these terms may lead to a permanent ban. +Consequence: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. +No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban -Community Impact: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of -an individual, or aggression toward or disparagement of -classes of individuals. +Community Impact: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. -Consequence: A permanent ban from any sort of public interaction -within the community. +Consequence: A permanent ban from any sort of public interaction within the community. ## Acceptable Use Policy -Violation of this Community Code of Conduct may also violate the -Tailscale Acceptable Use Policy, which may result in suspension or -termination of your Tailscale account. For more information, please -see the Tailscale Acceptable Use Policy, available at -[tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). +Violation of this Community Code of Conduct may also violate the Tailscale Acceptable Use Policy, which may result in suspension or termination of your Tailscale account. +For more information, please see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). ## Privacy -Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) -for more information about how Tailscale collects, uses, discloses and protects -information. +Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. ## Attribution -This Code of Conduct is adapted from the [Contributor -Covenant][homepage], version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at . -Community Impact Guidelines were inspired by [Mozilla's code of -conduct enforcement ladder](https://github.com/mozilla/diversity). +Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). [homepage]: https://www.contributor-covenant.org -For answers to common questions about this code of conduct, see the -FAQ at https://www.contributor-covenant.org/faq. Translations are -available at https://www.contributor-covenant.org/translations. - +For answers to common questions about this code of conduct, see the FAQ at . +Translations are available at . From afaa23c3b4c5fcbb7a62d42831a5b7e55e30eeac Mon Sep 17 00:00:00 2001 From: Will Norris Date: Tue, 21 Oct 2025 16:44:22 -0700 Subject: [PATCH 0601/1093] CODE_OF_CONDUCT: update document title Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- CODE_OF_CONDUCT.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 51ffb60ab0754..ef68d676879a1 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ -# Contributor Covenant Code of Conduct +# Tailscale Community Code of Conduct ## Our Pledge @@ -86,7 +86,7 @@ For more information, please see the Tailscale Acceptable Use Policy, available ## Privacy -Please see the Tailscale [Privacy Policy](http://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. +Please see the Tailscale [Privacy Policy](https://tailscale.com/privacy-policy) for more information about how Tailscale collects, uses, discloses and protects information. ## Attribution From 36ad24b20fcfa0b625516e6d5501972e640193bf Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 23 Oct 2025 14:56:56 -0700 Subject: [PATCH 0602/1093] feature/tpm: check TPM family data for compatibility (#17624) Check that the TPM we have opened is advertised as a 2.0 family device before using it for state sealing / hardware attestation. Updates #17622 Signed-off-by: Patrick O'Doherty --- feature/tpm/tpm.go | 8 ++++---- feature/tpm/tpm_test.go | 13 +++++++++++++ ipn/ipnlocal/c2n_test.go | 1 + tailcfg/tailcfg.go | 4 ++++ 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 6acb600ecd56e..64a702bd998d5 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -55,12 +55,11 @@ func init() { } func tpmSupported() bool { - tpm, err := open() - if err != nil { + hi := infoOnce() + if hi == nil { return false } - tpm.Close() - return true + return hi.FamilyIndicator == "2.0" } var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") @@ -104,6 +103,7 @@ func info() *tailcfg.TPMInfo { {tpm2.TPMPTVendorTPMType, func(info *tailcfg.TPMInfo, value uint32) { info.Model = int(value) }}, {tpm2.TPMPTFirmwareVersion1, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) << 32 }}, {tpm2.TPMPTFirmwareVersion2, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) }}, + {tpm2.TPMPTFamilyIndicator, toStr(&info.FamilyIndicator)}, } { resp, err := tpm2.GetCapability{ Capability: tpm2.TPMCapTPMProperties, diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index 5401fd5c38532..5c0fbafb65072 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -133,6 +133,19 @@ func TestStore(t *testing.T) { }) } +func BenchmarkInfo(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + hi := info() + if hi == nil { + b.Fatalf("tpm info error") + } + } + b.StopTimer() +} + func BenchmarkStore(b *testing.B) { skipWithoutTPM(b) b.StopTimer() diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 75a57dee5b79b..95cd5fa6995bc 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -384,6 +384,7 @@ func TestRedactNetmapPrivateKeys(t *testing.T) { f(tailcfg.Service{}, "Port"): false, f(tailcfg.Service{}, "Proto"): false, f(tailcfg.Service{}, "_"): false, + f(tailcfg.TPMInfo{}, "FamilyIndicator"): false, f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, f(tailcfg.TPMInfo{}, "Manufacturer"): false, f(tailcfg.TPMInfo{}, "Model"): false, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index ea4a9d1fa1748..a95d0559c2bec 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -928,6 +928,10 @@ type TPMInfo struct { // https://trustedcomputinggroup.org/resource/tpm-library-specification/. // Before revision 184, TCG used the "01.83" format for revision 183. SpecRevision int `json:",omitempty"` + + // FamilyIndicator is the TPM spec family, like "2.0". + // Read from TPM_PT_FAMILY_INDICATOR. + FamilyIndicator string `json:",omitempty"` } // Present reports whether a TPM device is present on this machine. From 672b1f0e76c074fbf922bc409f8bd1fdfc8057f3 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 23 Oct 2025 16:48:58 -0700 Subject: [PATCH 0603/1093] feature/tpm: use withSRK to probe TPM availability (#17627) On some platforms e.g. ChromeOS the owner hierarchy might not always be available to us. To avoid stale sealing exceptions later we probe to confirm it's working rather than rely solely on family indicator status. Updates #17622 Signed-off-by: Patrick O'Doherty --- feature/tpm/tpm.go | 17 ++++++++++++++++- feature/tpm/tpm_test.go | 12 ++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 64a702bd998d5..4b27a241fa255 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -59,7 +59,22 @@ func tpmSupported() bool { if hi == nil { return false } - return hi.FamilyIndicator == "2.0" + if hi.FamilyIndicator != "2.0" { + return false + } + + tpm, err := open() + if err != nil { + return false + } + defer tpm.Close() + + if err := withSRK(logger.Discard, tpm, func(srk tpm2.AuthHandle) error { + return nil + }); err != nil { + return false + } + return true } var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index 5c0fbafb65072..afce570fc250d 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -146,6 +146,18 @@ func BenchmarkInfo(b *testing.B) { b.StopTimer() } +func BenchmarkTPMSupported(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + if !tpmSupported() { + b.Fatalf("tpmSupported returned false") + } + } + b.StopTimer() +} + func BenchmarkStore(b *testing.B) { skipWithoutTPM(b) b.StopTimer() From 8576a802caabffd5c5e94d614acc8dc954f0a443 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 23 Oct 2025 17:59:25 -0700 Subject: [PATCH 0604/1093] util/linuxfw: fix 32-bit arm regression with iptables This fixes a regression from dd615c8fdd that moved the newIPTablesRunner constructor from a any-Linux-GOARCH file to one that was only amd64 and arm64, thus breaking iptables on other platforms (notably 32-bit "arm", as seen on older Pis running Buster with iptables) Tested by hand on a Raspberry Pi 2 w/ Buster + iptables for now, for lack of automated 32-bit arm tests at the moment. But filed #17629. Fixes #17623 Updates #17629 Change-Id: Iac1a3d78f35d8428821b46f0fed3f3717891c1bd Signed-off-by: Brad Fitzpatrick --- util/linuxfw/iptables.go | 4 +--- util/linuxfw/iptables_disabled.go | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 5bd7c528b11b3..76c5400becff8 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,9 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && (arm64 || amd64) && !ts_omit_iptables - -// TODO(#8502): add support for more architectures +//go:build linux && !ts_omit_iptables package linuxfw diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go index 8736f83998fa3..538e33647381a 100644 --- a/util/linuxfw/iptables_disabled.go +++ b/util/linuxfw/iptables_disabled.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !(arm64 || amd64)) || ts_omit_iptables +//go:build linux && ts_omit_iptables package linuxfw From d47c697748ec2cf0d3ca663811b094ec617529cd Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 21 Oct 2025 12:45:03 +0100 Subject: [PATCH 0605/1093] ipn/ipnlocal: skip TKA bootstrap request if Tailnet Lock is unavailable If you run tailscaled without passing a `--statedir`, Tailnet Lock is unavailable -- we don't have a folder to store the AUMs in. This causes a lot of unnecessary requests to bootstrap TKA, because every time the node receives a NetMap with some TKA state, it tries to bootstrap, fetches the bootstrap TKA state from the control plane, then fails with the error: TKA sync error: bootstrap: network-lock is not supported in this configuration, try setting --statedir We can't prevent the error, but we can skip the control plane request that immediately gets dropped on the floor. In local testing, a new node joining a tailnet caused *three* control plane requests which were unused. Updates tailscale/corp#19441 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 6acb9fe1d400e..f26c81011e824 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -288,6 +288,10 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } + if err := b.CanSupportNetworkLock(); err != nil { + return err + } + isEnabled := b.tka != nil wantEnabled := nm.TKAEnabled From 7418583e4735ac31bce0d4ba657e488a09ca488a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 24 Oct 2025 12:08:35 -0400 Subject: [PATCH 0606/1093] health: compare warnable codes to avoid errors on release branch (#17637) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This compares the warnings we actually care about and skips the unstable warnings and the changes with no warnings. Fixes #17635 Signed-off-by: Claus Lensbøl --- cmd/derper/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- health/health_test.go | 35 ++++++++++++++++++++++++----- health/warnings.go | 39 +++++++++++++++++---------------- tsconst/health.go | 26 ++++++++++++++++++++++ 5 files changed, 77 insertions(+), 27 deletions(-) create mode 100644 tsconst/health.go diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index b8dd28e6bf435..01c278fbd1691 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -116,7 +116,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/syncs from tailscale.com/cmd/derper+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tka from tailscale.com/client/local+ - LW tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/derp/derpserver diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 96e18db43db19..224026f25368d 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -116,7 +116,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tka from tailscale.com/control/controlclient+ - tailscale.com/tsconst from tailscale.com/net/netns + tailscale.com/tsconst from tailscale.com/net/netns+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ diff --git a/health/health_test.go b/health/health_test.go index 60707177603e9..af7d06c8fe258 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -19,6 +19,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "tailscale.com/metrics" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/opt" @@ -739,21 +740,27 @@ func TestControlHealthNotifies(t *testing.T) { ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() - // Expect events at starup, before doing anything else + // Expect events at starup, before doing anything else, skip unstable + // event and no warning event as they show up at different times. synctest.Wait() - if err := eventbustest.ExpectExactly(tw, - eventbustest.Type[Change](), // warming-up - eventbustest.Type[Change](), // is-using-unstable-version - eventbustest.Type[Change](), // not-in-map-poll + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), + CompareWarnableCode(t, tsconst.HealthWarnableNotInMapPoll), + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), ); err != nil { t.Errorf("startup error: %v", err) } // Only set initial state if we need to if len(test.initialState) != 0 { + t.Log("Setting initial state") ht.SetControlHealth(test.initialState) synctest.Wait() - if err := eventbustest.ExpectExactly(tw, eventbustest.Type[Change]()); err != nil { + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableMagicsockReceiveFuncError), + // Skip event with no warnable + CompareWarnableCode(t, tsconst.HealthWarnableNoDERPHome), + ); err != nil { t.Errorf("initial state error: %v", err) } } @@ -771,6 +778,22 @@ func TestControlHealthNotifies(t *testing.T) { } } +func CompareWarnableCode(t *testing.T, code string) func(Change) bool { + t.Helper() + return func(c Change) bool { + t.Helper() + if c.Warnable != nil { + t.Logf("Warnable code: %s", c.Warnable.Code) + if string(c.Warnable.Code) == code { + return true + } + } else { + t.Log("No Warnable") + } + return false + } +} + func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { synctest.Test(t, func(t *testing.T) { bus := eventbustest.NewBus(t) diff --git a/health/warnings.go b/health/warnings.go index 26577130d9f1c..a9c4b34a0f849 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -9,6 +9,7 @@ import ( "time" "tailscale.com/feature/buildfeatures" + "tailscale.com/tsconst" "tailscale.com/version" ) @@ -26,7 +27,7 @@ This file contains definitions for the Warnables maintained within this `health` // updateAvailableWarnable is a Warnable that warns the user that an update is available. var updateAvailableWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "update-available", + Code: tsconst.HealthWarnableUpdateAvailable, Title: "Update available", Severity: SeverityLow, Text: func(args Args) string { @@ -42,7 +43,7 @@ var updateAvailableWarnable = condRegister(func() *Warnable { // securityUpdateAvailableWarnable is a Warnable that warns the user that an important security update is available. var securityUpdateAvailableWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "security-update-available", + Code: tsconst.HealthWarnableSecurityUpdateAvailable, Title: "Security update available", Severity: SeverityMedium, Text: func(args Args) string { @@ -59,7 +60,7 @@ var securityUpdateAvailableWarnable = condRegister(func() *Warnable { // so they won't be surprised by all the issues that may arise. var unstableWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "is-using-unstable-version", + Code: tsconst.HealthWarnableIsUsingUnstableVersion, Title: "Using an unstable version", Severity: SeverityLow, Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), @@ -69,7 +70,7 @@ var unstableWarnable = condRegister(func() *Warnable { // NetworkStatusWarnable is a Warnable that warns the user that the network is down. var NetworkStatusWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "network-status", + Code: tsconst.HealthWarnableNetworkStatus, Title: "Network down", Severity: SeverityMedium, Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), @@ -81,7 +82,7 @@ var NetworkStatusWarnable = condRegister(func() *Warnable { // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. var IPNStateWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "wantrunning-false", + Code: tsconst.HealthWarnableWantRunningFalse, Title: "Tailscale off", Severity: SeverityLow, Text: StaticMessage("Tailscale is stopped."), @@ -91,7 +92,7 @@ var IPNStateWarnable = condRegister(func() *Warnable { // localLogWarnable is a Warnable that warns the user that the local log is misconfigured. var localLogWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "local-log-config-error", + Code: tsconst.HealthWarnableLocalLogConfigError, Title: "Local log misconfiguration", Severity: SeverityLow, Text: func(args Args) string { @@ -104,7 +105,7 @@ var localLogWarnable = condRegister(func() *Warnable { // and provides the last login error if available. var LoginStateWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "login-state", + Code: tsconst.HealthWarnableLoginState, Title: "Logged out", Severity: SeverityMedium, Text: func(args Args) string { @@ -121,7 +122,7 @@ var LoginStateWarnable = condRegister(func() *Warnable { // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. var notInMapPollWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "not-in-map-poll", + Code: tsconst.HealthWarnableNotInMapPoll, Title: "Out of sync", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, @@ -134,7 +135,7 @@ var notInMapPollWarnable = condRegister(func() *Warnable { // noDERPHomeWarnable is a Warnable that warns the user that Tailscale doesn't have a home DERP. var noDERPHomeWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "no-derp-home", + Code: tsconst.HealthWarnableNoDERPHome, Title: "No home relay server", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable}, @@ -147,7 +148,7 @@ var noDERPHomeWarnable = condRegister(func() *Warnable { // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. var noDERPConnectionWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "no-derp-connection", + Code: tsconst.HealthWarnableNoDERPConnection, Title: "Relay server unavailable", Severity: SeverityMedium, DependsOn: []*Warnable{ @@ -177,7 +178,7 @@ var noDERPConnectionWarnable = condRegister(func() *Warnable { // heard from the home DERP region for a while. var derpTimeoutWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "derp-timed-out", + Code: tsconst.HealthWarnableDERPTimedOut, Title: "Relay server timed out", Severity: SeverityMedium, DependsOn: []*Warnable{ @@ -198,7 +199,7 @@ var derpTimeoutWarnable = condRegister(func() *Warnable { // derpRegionErrorWarnable is a Warnable that warns the user that a DERP region is reporting an issue. var derpRegionErrorWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "derp-region-error", + Code: tsconst.HealthWarnableDERPRegionError, Title: "Relay server error", Severity: SeverityLow, DependsOn: []*Warnable{NetworkStatusWarnable}, @@ -211,7 +212,7 @@ var derpRegionErrorWarnable = condRegister(func() *Warnable { // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. var noUDP4BindWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "no-udp4-bind", + Code: tsconst.HealthWarnableNoUDP4Bind, Title: "NAT traversal setup failure", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, @@ -223,7 +224,7 @@ var noUDP4BindWarnable = condRegister(func() *Warnable { // mapResponseTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't received a network map from the coordination server in a while. var mapResponseTimeoutWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "mapresponse-timeout", + Code: tsconst.HealthWarnableMapResponseTimeout, Title: "Network map response timeout", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, @@ -236,7 +237,7 @@ var mapResponseTimeoutWarnable = condRegister(func() *Warnable { // tlsConnectionFailedWarnable is a Warnable that warns the user that Tailscale could not establish an encrypted connection with a server. var tlsConnectionFailedWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "tls-connection-failed", + Code: tsconst.HealthWarnableTLSConnectionFailed, Title: "Encrypted connection failed", Severity: SeverityMedium, DependsOn: []*Warnable{NetworkStatusWarnable}, @@ -249,7 +250,7 @@ var tlsConnectionFailedWarnable = condRegister(func() *Warnable { // magicsockReceiveFuncWarnable is a Warnable that warns the user that one of the Magicsock functions is not running. var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "magicsock-receive-func-error", + Code: tsconst.HealthWarnableMagicsockReceiveFuncError, Title: "MagicSock function not running", Severity: SeverityMedium, Text: func(args Args) string { @@ -261,7 +262,7 @@ var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { // testWarnable is a Warnable that is used within this package for testing purposes only. var testWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "test-warnable", + Code: tsconst.HealthWarnableTestWarnable, Title: "Test warnable", Severity: SeverityLow, Text: func(args Args) string { @@ -273,7 +274,7 @@ var testWarnable = condRegister(func() *Warnable { // applyDiskConfigWarnable is a Warnable that warns the user that there was an error applying the envknob config stored on disk. var applyDiskConfigWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "apply-disk-config", + Code: tsconst.HealthWarnableApplyDiskConfig, Title: "Could not apply configuration", Severity: SeverityMedium, Text: func(args Args) string { @@ -291,7 +292,7 @@ const warmingUpWarnableDuration = 5 * time.Second // the backend is fully started. var warmingUpWarnable = condRegister(func() *Warnable { return &Warnable{ - Code: "warming-up", + Code: tsconst.HealthWarnableWarmingUp, Title: "Tailscale is starting", Severity: SeverityLow, Text: StaticMessage("Tailscale is starting. Please wait."), diff --git a/tsconst/health.go b/tsconst/health.go new file mode 100644 index 0000000000000..5db9b1fc286ec --- /dev/null +++ b/tsconst/health.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +const ( + HealthWarnableUpdateAvailable = "update-available" + HealthWarnableSecurityUpdateAvailable = "security-update-available" + HealthWarnableIsUsingUnstableVersion = "is-using-unstable-version" + HealthWarnableNetworkStatus = "network-status" + HealthWarnableWantRunningFalse = "wantrunning-false" + HealthWarnableLocalLogConfigError = "local-log-config-error" + HealthWarnableLoginState = "login-state" + HealthWarnableNotInMapPoll = "not-in-map-poll" + HealthWarnableNoDERPHome = "no-derp-home" + HealthWarnableNoDERPConnection = "no-derp-connection" + HealthWarnableDERPTimedOut = "derp-timed-out" + HealthWarnableDERPRegionError = "derp-region-error" + HealthWarnableNoUDP4Bind = "no-udp4-bind" + HealthWarnableMapResponseTimeout = "mapresponse-timeout" + HealthWarnableTLSConnectionFailed = "tls-connection-failed" + HealthWarnableMagicsockReceiveFuncError = "magicsock-receive-func-error" + HealthWarnableTestWarnable = "test-warnable" + HealthWarnableApplyDiskConfig = "apply-disk-config" + HealthWarnableWarmingUp = "warming-up" +) From fd0e541e5d72aecddcb3e989c33b9aef23c7be96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 24 Oct 2025 15:00:55 -0400 Subject: [PATCH 0607/1093] net/tsdial: do not panic if setting the same eventbus twice (#17640) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #17638 Signed-off-by: Claus Lensbøl --- net/tsdial/tsdial.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index a0e2a11a472f0..c7483a125a07a 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -96,6 +96,7 @@ type Dialer struct { dnsCache *dnscache.MessageCache // nil until first non-empty SetExitDNSDoH nextSysConnID int activeSysConns map[int]net.Conn // active connections not yet closed + bus *eventbus.Bus // only used for comparison with already set bus. eventClient *eventbus.Client eventBusSubs eventbus.Monitor } @@ -226,14 +227,17 @@ func (d *Dialer) NetMon() *netmon.Monitor { func (d *Dialer) SetBus(bus *eventbus.Bus) { d.mu.Lock() defer d.mu.Unlock() - if d.eventClient != nil { - panic("eventbus has already been set") + if d.bus == bus { + return + } else if d.bus != nil { + panic("different eventbus has already been set") } // Having multiple watchers could lead to problems, // so unregister the callback if it exists. if d.netMonUnregister != nil { d.netMonUnregister() } + d.bus = bus d.eventClient = bus.Client("tsdial.Dialer") d.eventBusSubs = d.eventClient.Monitor(d.linkChangeWatcher(d.eventClient)) } From 4346615d77a6de16854c6e78f9d49375d6424e6e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 24 Oct 2025 14:08:47 -0700 Subject: [PATCH 0608/1093] logtail: avoid racing eventbus subscriptions with Shutdown (#17639) When the eventbus is enabled, set up the subscription for change deltas at the beginning when the client is created, rather than waiting for the first awaitInternetUp check. Otherwise, it is possible for a check to race with the client close in Shutdown, which triggers a panic. Updates #17638 Change-Id: I461c07939eca46699072b14b1814ecf28eec750c Signed-off-by: M. J. Fromberger --- logtail/logtail.go | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index 675422890149c..52823fedf4309 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -124,6 +124,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.Bus != nil { l.eventClient = cfg.Bus.Client("logtail.Logger") + l.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -162,6 +163,7 @@ type Logger struct { httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel eventClient *eventbus.Client + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] procID uint32 includeProcSequence bool @@ -427,8 +429,23 @@ func (l *Logger) internetUp() bool { func (l *Logger) awaitInternetUp(ctx context.Context) { if l.eventClient != nil { - l.awaitInternetUpBus(ctx) - return + for { + if l.internetUp() { + return + } + select { + case <-ctx.Done(): + return // give up + case <-l.changeDeltaSub.Done(): + return // give up (closing down) + case delta := <-l.changeDeltaSub.Events(): + if delta.New.AnyInterfaceUp() || l.internetUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + } + } } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { @@ -449,24 +466,6 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } -func (l *Logger) awaitInternetUpBus(ctx context.Context) { - if l.internetUp() { - return - } - sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) - defer sub.Close() - select { - case delta := <-sub.Events(): - if delta.New.AnyInterfaceUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - return - } - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") - case <-ctx.Done(): - return - } -} - // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. From a760cbe33f4bed64b63c6118808d02b2771ff785 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Mon, 27 Oct 2025 13:18:13 -0700 Subject: [PATCH 0609/1093] control/controlclient: back out HW key attestation (#17664) Temporarily back out the TPM-based hw attestation code while we debug Windows exceptions. Updates tailscale/corp#31269 Signed-off-by: Patrick O'Doherty --- control/controlclient/direct.go | 22 --------------- ipn/ipnlocal/hwattest.go | 48 --------------------------------- ipn/ipnlocal/local.go | 1 - ipn/ipnlocal/profiles.go | 10 ------- ipn/ipnlocal/profiles_test.go | 1 - ipn/prefs_test.go | 2 +- types/persist/persist.go | 18 ++----------- types/persist/persist_clone.go | 4 --- types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 10 +++---- 10 files changed, 8 insertions(+), 110 deletions(-) delete mode 100644 ipn/ipnlocal/hwattest.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 63a12b2495fd8..fe7cc235b05f8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -7,8 +7,6 @@ import ( "bytes" "cmp" "context" - "crypto" - "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -948,26 +946,6 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap ConnectionHandleForTest: connectionHandleForTest, } - // If we have a hardware attestation key, sign the node key with it and send - // the key & signature in the map request. - if buildfeatures.HasTPM { - if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { - hwPub := key.HardwareAttestationPublicFromPlatformKey(k) - request.HardwareAttestationKey = hwPub - - t := c.clock.Now() - msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) - digest := sha256.Sum256([]byte(msg)) - sig, err := k.Sign(nil, digest[:], crypto.SHA256) - if err != nil { - c.logf("failed to sign node key with hardware attestation key: %v", err) - } else { - request.HardwareAttestationKeySignature = sig - request.HardwareAttestationKeySignatureTimestamp = t - } - } - } - var extraDebugFlags []string if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go deleted file mode 100644 index 2c93cad4c97ff..0000000000000 --- a/ipn/ipnlocal/hwattest.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_tpm - -package ipnlocal - -import ( - "errors" - - "tailscale.com/feature" - "tailscale.com/types/key" - "tailscale.com/types/logger" - "tailscale.com/types/persist" -) - -func init() { - feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) -} - -// generateAttestationKeyIfEmpty generates a new hardware attestation key if -// none exists. It returns true if a new key was generated and stored in -// p.AttestationKey. -func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { - // attempt to generate a new hardware attestation key if none exists - var ak key.HardwareAttestationKey - if p != nil { - ak = p.AttestationKey - } - - if ak == nil || ak.IsZero() { - var err error - ak, err = key.NewHardwareAttestationKey() - if err != nil { - if !errors.Is(err, key.ErrUnsupported) { - logf("failed to create hardware attestation key: %v", err) - } - } else if ak != nil { - logf("using new hardware attestation key: %v", ak.Public()) - if p == nil { - p = &persist.Persist{} - } - p.AttestationKey = ak - return true, nil - } - } - return false, nil -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ee3059de437c7..7b2257cca2223 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1185,7 +1185,6 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} - p2.Persist.AttestationKey = nil return p2.View() } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 9c217637890cc..3e80cdaa93d1f 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -19,9 +19,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" - "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -656,14 +654,6 @@ func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() - - // if supported by the platform, create an empty hardware attestation key to use when deserializing - // to avoid type exceptions from json.Unmarshaling into an interface{}. - hw, _ := key.NewEmptyHardwareAttestationKey() - savedPrefs.Persist = &persist.Persist{ - AttestationKey: hw, - } - if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index deeab2ade9b15..60c92ff8d3493 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -151,7 +151,6 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, - AttestationKey: nil, } } user1Node1 := newPersist(1, 1) diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 2336164096c14..3339a631ce827 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, }, { Prefs{ diff --git a/types/persist/persist.go b/types/persist/persist.go index 4b62c79ddd186..d888a6afb6af5 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,7 +26,6 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -85,20 +84,11 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } - var pub, p2Pub key.HardwareAttestationPublic - if p.AttestationKey != nil && !p.AttestationKey.IsZero() { - pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) - } - if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { - p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) - } - return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && - pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -106,16 +96,12 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) - akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - if p.AttestationKey != nil && !p.AttestationKey.IsZero() { - akString = fmt.Sprintf("%v", p.AttestationKey.Public()) - } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 9dbe7e0f6fa6d..680419ff2f30b 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,9 +19,6 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src - if src.AttestationKey != nil { - dst.AttestationKey = src.AttestationKey.Clone() - } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -34,6 +31,5 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index 713114b74dcd5..dbf2a6d8c7662 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index dbf8294ef5a7a..7d1507468fc65 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,11 +89,10 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } -func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -111,6 +110,5 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) From 34e992f59db2feed0c5cd857d4829ea5ef5e0298 Mon Sep 17 00:00:00 2001 From: Max Coulombe Date: Mon, 27 Oct 2025 16:33:03 -0400 Subject: [PATCH 0610/1093] feature/identityfederation: strip query params on clientID (#17666) Updates #9192 Signed-off-by: mcoulombe --- .../identityfederation/identityfederation.go | 19 +++++++++++-------- .../identityfederation_test.go | 10 +++++++++- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go index a4470fc27eaea..ab1b65f1217d1 100644 --- a/feature/identityfederation/identityfederation.go +++ b/feature/identityfederation/identityfederation.go @@ -42,12 +42,12 @@ func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags baseURL = ipn.DefaultControlURL } - ephemeral, preauth, err := parseOptionalAttributes(clientID) + strippedID, ephemeral, preauth, err := parseOptionalAttributes(clientID) if err != nil { return "", fmt.Errorf("failed to parse optional config attributes: %w", err) } - accessToken, err := exchangeJWTForToken(ctx, baseURL, clientID, idToken) + accessToken, err := exchangeJWTForToken(ctx, baseURL, strippedID, idToken) if err != nil { return "", fmt.Errorf("failed to exchange JWT for access token: %w", err) } @@ -79,15 +79,15 @@ func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags return authkey, nil } -func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized bool, err error) { - _, attrs, found := strings.Cut(clientID, "?") +func parseOptionalAttributes(clientID string) (strippedID string, ephemeral bool, preauthorized bool, err error) { + strippedID, attrs, found := strings.Cut(clientID, "?") if !found { - return true, false, nil + return clientID, true, false, nil } parsed, err := url.ParseQuery(attrs) if err != nil { - return false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) + return "", false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) } for k := range parsed { @@ -97,11 +97,14 @@ func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized boo case "preauthorized": preauthorized, err = strconv.ParseBool(parsed.Get(k)) default: - return false, false, fmt.Errorf("unknown optional config attribute %q", k) + return "", false, false, fmt.Errorf("unknown optional config attribute %q", k) } } + if err != nil { + return "", false, false, err + } - return ephemeral, preauthorized, err + return strippedID, ephemeral, preauthorized, nil } // exchangeJWTForToken exchanges a JWT for a Tailscale access token. diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go index 7b75852a819a1..a673a42982706 100644 --- a/feature/identityfederation/identityfederation_test.go +++ b/feature/identityfederation/identityfederation_test.go @@ -87,6 +87,7 @@ func TestParseOptionalAttributes(t *testing.T) { tests := []struct { name string clientID string + wantClientID string wantEphemeral bool wantPreauth bool wantErr string @@ -94,6 +95,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "default values", clientID: "client-123", + wantClientID: "client-123", wantEphemeral: true, wantPreauth: false, wantErr: "", @@ -101,6 +103,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "custom values", clientID: "client-123?ephemeral=false&preauthorized=true", + wantClientID: "client-123", wantEphemeral: false, wantPreauth: true, wantErr: "", @@ -108,6 +111,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "unknown attribute", clientID: "client-123?unknown=value", + wantClientID: "", wantEphemeral: false, wantPreauth: false, wantErr: `unknown optional config attribute "unknown"`, @@ -115,6 +119,7 @@ func TestParseOptionalAttributes(t *testing.T) { { name: "invalid value", clientID: "client-123?ephemeral=invalid", + wantClientID: "", wantEphemeral: false, wantPreauth: false, wantErr: `strconv.ParseBool: parsing "invalid": invalid syntax`, @@ -123,7 +128,7 @@ func TestParseOptionalAttributes(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) + strippedID, ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) if tt.wantErr != "" { if err == nil { t.Errorf("parseOptionalAttributes() error = nil, want %q", tt.wantErr) @@ -138,6 +143,9 @@ func TestParseOptionalAttributes(t *testing.T) { return } } + if strippedID != tt.wantClientID { + t.Errorf("parseOptionalAttributes() strippedID = %v, want %v", strippedID, tt.wantClientID) + } if ephemeral != tt.wantEphemeral { t.Errorf("parseOptionalAttributes() ephemeral = %v, want %v", ephemeral, tt.wantEphemeral) } From f4e2720821d4975de8a1964b9274db3f19da48d2 Mon Sep 17 00:00:00 2001 From: srwareham Date: Mon, 27 Oct 2025 15:20:57 -0700 Subject: [PATCH 0611/1093] cmd/tailscale/cli: move JetKVM scripts to /userdata/init.d for persistence (#17610) Updates #16524 Updates jetkvm/rv1106-system#34 Signed-off-by: srwareham --- cmd/tailscale/cli/configure-jetkvm.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/configure-jetkvm.go b/cmd/tailscale/cli/configure-jetkvm.go index a8e0a7cb542ef..c80bf673605cf 100644 --- a/cmd/tailscale/cli/configure-jetkvm.go +++ b/cmd/tailscale/cli/configure-jetkvm.go @@ -48,9 +48,12 @@ func runConfigureJetKVM(ctx context.Context, args []string) error { if runtime.GOOS != "linux" || distro.Get() != distro.JetKVM { return errors.New("only implemented on JetKVM") } - err := os.WriteFile("/etc/init.d/S22tailscale", bytes.TrimLeft([]byte(` + if err := os.MkdirAll("/userdata/init.d", 0755); err != nil { + return errors.New("unable to create /userdata/init.d") + } + err := os.WriteFile("/userdata/init.d/S22tailscale", bytes.TrimLeft([]byte(` #!/bin/sh -# /etc/init.d/S22tailscale +# /userdata/init.d/S22tailscale # Start/stop tailscaled case "$1" in From 576aacd459406f3b8d76a1978825b24aa2c56291 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Fri, 3 Oct 2025 17:52:41 +0100 Subject: [PATCH 0612/1093] ipn/ipnlocal/serve: add grant headers Updates tailscale/corp/#28372 Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_legacy.go | 24 +-- cmd/tailscale/cli/serve_v2.go | 36 ++++- cmd/tailscale/cli/serve_v2_test.go | 49 +++++- ipn/ipn_clone.go | 10 +- ipn/ipn_view.go | 12 +- ipn/ipnlocal/serve.go | 68 +++++++- ipn/ipnlocal/serve_test.go | 242 +++++++++++++++++++++++++++++ ipn/serve.go | 2 + 8 files changed, 416 insertions(+), 27 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 95808fdf2eb34..95e518998bcd1 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -162,20 +162,20 @@ type serveEnv struct { json bool // output JSON (status only for now) // v2 specific flags - bg bgBoolFlag // background mode - setPath string // serve path - https uint // HTTP port - http uint // HTTP port - tcp uint // TCP port - tlsTerminatedTCP uint // a TLS terminated TCP port - subcmd serveMode // subcommand - yes bool // update without prompt - service tailcfg.ServiceName // service name - tun bool // redirect traffic to OS for service - allServices bool // apply config file to all services + bg bgBoolFlag // background mode + setPath string // serve path + https uint // HTTP port + http uint // HTTP port + tcp uint // TCP port + tlsTerminatedTCP uint // a TLS terminated TCP port + subcmd serveMode // subcommand + yes bool // update without prompt + service tailcfg.ServiceName // service name + tun bool // redirect traffic to OS for service + allServices bool // apply config file to all services + userCaps []tailcfg.PeerCapability // user capabilities to forward lc localServeClient // localClient interface, specific to serve - // optional stuff for tests: testFlagOut io.Writer testStdout io.Writer diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index ca0497f8d0369..4921bf31f8dfd 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -96,6 +96,28 @@ func (b *bgBoolFlag) String() string { return strconv.FormatBool(b.Value) } +type userCapsFlag struct { + Value *[]tailcfg.PeerCapability +} + +// Set appends s to the list of userCaps. +func (u *userCapsFlag) Set(s string) error { + if s == "" { + return nil + } + *u.Value = append(*u.Value, tailcfg.PeerCapability(s)) + return nil +} + +// String returns the string representation of the userCaps slice. +func (u *userCapsFlag) String() string { + s := make([]string, len(*u.Value)) + for i, v := range *u.Value { + s[i] = string(v) + } + return strings.Join(s, ",") +} + var serveHelpCommon = strings.TrimSpace(` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., 3000), @@ -199,6 +221,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") + fs.Var(&userCapsFlag{Value: &e.userCaps}, "usercaps", "User capability to forward to the server (can be specified multiple times)") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") @@ -469,7 +492,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.userCaps) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -790,7 +813,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er for name, details := range scf.Services { for ppr, ep := range details.Endpoints { if ep.Protocol == conffile.ProtoTUN { - err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix) + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil) if err != nil { return err } @@ -812,7 +835,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er portStr := fmt.Sprint(destPort) target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) } - err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix) + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil) if err != nil { return fmt.Errorf("service %q: %w", name, err) } @@ -915,12 +938,12 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: useTLS := srvType == serveTypeHTTPS - err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds) + err := e.applyWebServe(sc, dnsName, srvPort, useTLS, mount, target, mds, caps) if err != nil { return fmt.Errorf("failed apply web serve: %w", err) } @@ -1084,7 +1107,7 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN return output.String() } -func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target string, mds string) error { +func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target, mds string, caps []tailcfg.PeerCapability) error { h := new(ipn.HTTPHandler) switch { case strings.HasPrefix(target, "text:"): @@ -1118,6 +1141,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return err } h.Proxy = t + h.UserCaps = caps } // TODO: validation needs to check nested foreground configs diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index f9653253a7cad..d039c52cc7e7e 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -857,6 +857,53 @@ func TestServeDevConfigMutations(t *testing.T) { wantErr: anyErr(), }}, }, + { + name: "forward_grant_header", + steps: []step{ + { + command: cmd("serve --bg --usercaps=example.com/cap/foo 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, + }, + }}, + }, + }, + }, + { + command: cmd("serve --bg --usercaps=example.com/cap/foo --usercaps=example.com/cap/bar 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, + }, + }}, + }, + }, + }, + { + command: cmd("serve --bg --usercaps=example.com/cap/bar 3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: "http://127.0.0.1:3000", + UserCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, + }, + }}, + }, + }, + }, + }, + }, } for _, group := range groups { @@ -2009,7 +2056,7 @@ func TestSetServe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix) + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 3d67efc6fd33b..54511094b4809 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -232,14 +232,16 @@ func (src *HTTPHandler) Clone() *HTTPHandler { } dst := new(HTTPHandler) *dst = *src + dst.UserCaps = append(src.UserCaps[:0:0], src.UserCaps...) return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string + Path string + Proxy string + Text string + UserCaps []tailcfg.PeerCapability }{}) // Clone makes a deep copy of WebServerConfig. @@ -256,7 +258,7 @@ func (src *WebServerConfig) Clone() *WebServerConfig { if v == nil { dst.Handlers[k] = nil } else { - dst.Handlers[k] = ptr.To(*v) + dst.Handlers[k] = v.Clone() } } } diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 1c7639f6ff932..a87b6c42e87ba 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -891,11 +891,17 @@ func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy } // plaintext to serve (primarily for testing) func (v HTTPHandlerView) Text() string { return v.ж.Text } +// peer capabilities to forward in grant header, e.g. example.com/cap/mon +func (v HTTPHandlerView) UserCaps() views.Slice[tailcfg.PeerCapability] { + return views.SliceOf(v.ж.UserCaps) +} + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string + Path string + Proxy string + Text string + UserCaps []tailcfg.PeerCapability }{}) // View returns a read-only view of WebServerConfig. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 3c967fd1e6403..799161a7651a7 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -40,6 +40,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/views" "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" @@ -64,6 +65,7 @@ func init() { const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" + grantHeaderMaxSize = 15360 // 15 KiB ) // ErrETagMismatch signals that the given @@ -79,7 +81,8 @@ type serveHTTPContext struct { DestPort uint16 // provides funnel-specific context, nil if not funneled - Funnel *funnelFlow + Funnel *funnelFlow + PeerCapsFilter views.Slice[tailcfg.PeerCapability] } // funnelFlow represents a funneled connection initiated via IngressPeer @@ -803,6 +806,7 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Out.Host = r.In.Host addProxyForwardedHeaders(r) rp.lb.addTailscaleIdentityHeaders(r) + rp.lb.addTailscaleGrantHeader(r) }} // There is no way to autodetect h2c as per RFC 9113 @@ -927,6 +931,62 @@ func encTailscaleHeaderValue(v string) string { return mime.QEncoding.Encode("utf-8", v) } +func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { + r.Out.Header.Del("Tailscale-User-Capabilities") + + c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) + if !ok || c.Funnel != nil { + return + } + filter := c.PeerCapsFilter + if filter.IsNil() { + return + } + peerCaps := b.PeerCaps(c.SrcAddr.Addr()) + if peerCaps == nil { + return + } + + peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, filter.Len()) + for _, cap := range filter.AsSlice() { + if peerCaps.HasCapability(cap) { + peerCapsFiltered[cap] = peerCaps[cap] + } + } + + serialized, truncated, err := serializeUpToNBytes(peerCapsFiltered, grantHeaderMaxSize) + if err != nil { + b.logf("serve: failed to serialize PeerCapMap: %v", err) + return + } + if truncated { + b.logf("serve: serialized PeerCapMap exceeds %d bytes, forwarding truncated PeerCapMap", grantHeaderMaxSize) + } + + r.Out.Header.Set("Tailscale-User-Capabilities", encTailscaleHeaderValue(serialized)) +} + +// serializeUpToNBytes serializes capMap. It arbitrarily truncates entries from the capMap +// if the size of the serialized capMap would exceed N bytes. +func serializeUpToNBytes(capMap tailcfg.PeerCapMap, N int) (string, bool, error) { + numBytes := 0 + capped := false + result := tailcfg.PeerCapMap{} + for k, v := range capMap { + numBytes += len(k) + len(v) + if numBytes > N { + capped = true + break + } + result[k] = v + } + marshalled, err := json.Marshal(result) + if err != nil { + return "", false, err + } + return string(marshalled), capped, nil +} + // serveWebHandler is an http.HandlerFunc that maps incoming requests to the // correct *http. func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { @@ -950,6 +1010,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "unknown proxy destination", http.StatusInternalServerError) return } + // Inject user capabilities to forward into the request context + c, ok := serveHTTPContextKey.ValueOk(r.Context()) + if !ok { + return + } + c.PeerCapsFilter = h.UserCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index b4461d12f2ad0..5d880e18598fc 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/http/httptest" "net/netip" @@ -27,6 +28,7 @@ import ( "testing" "time" + "tailscale.com/control/controlclient" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -41,6 +43,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/wgengine" + "tailscale.com/wgengine/filter" ) func TestExpandProxyArg(t *testing.T) { @@ -768,6 +771,156 @@ func TestServeHTTPProxyHeaders(t *testing.T) { } } +func TestServeHTTPProxyGrantHeader(t *testing.T) { + b := newTestBackend(t) + + nm := b.NetMap() + matches, err := filter.MatchesFromFilterRules([]tailcfg.FilterRule{ + { + SrcIPs: []string{"100.150.151.152"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/interesting": []tailcfg.RawMessage{ + `{"role": "🐿"}`, + }, + }, + }}, + }, + { + SrcIPs: []string{"100.150.151.153"}, + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, + CapMap: tailcfg.PeerCapMap{ + "example.com/cap/boring": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + "example.com/cap/irrelevant": []tailcfg.RawMessage{ + `{"role": "Editor"}`, + }, + }, + }}, + }, + }) + if err != nil { + t.Fatal(err) + } + nm.PacketFilter = matches + b.SetControlClientStatus(nil, controlclient.Status{NetMap: nm}) + + // Start test serve endpoint. + testServ := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + // Piping all the headers through the response writer + // so we can check their values in tests below. + for key, val := range r.Header { + w.Header().Add(key, strings.Join(val, ",")) + } + }, + )) + defer testServ.Close() + + conf := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": { + Proxy: testServ.URL, + UserCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, + }, + }}, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + type headerCheck struct { + header string + want string + } + + tests := []struct { + name string + srcIP string + wantHeaders []headerCheck + }{ + { + name: "request-from-user-within-tailnet", + srcIP: "100.150.151.152", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.150.151.152"}, + {"Tailscale-User-Login", "someone@example.com"}, + {"Tailscale-User-Name", "Some One"}, + {"Tailscale-User-Profile-Pic", "https://example.com/photo.jpg"}, + {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, + {"Tailscale-User-Capabilities", `{"example.com/cap/interesting":[{"role":"🐿"}]}`}, + }, + }, + { + name: "request-from-tagged-node-within-tailnet", + srcIP: "100.150.151.153", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.150.151.153"}, + {"Tailscale-User-Login", ""}, + {"Tailscale-User-Name", ""}, + {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Headers-Info", ""}, + {"Tailscale-User-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, + }, + }, + { + name: "request-from-outside-tailnet", + srcIP: "100.160.161.162", + wantHeaders: []headerCheck{ + {"X-Forwarded-Proto", "https"}, + {"X-Forwarded-For", "100.160.161.162"}, + {"Tailscale-User-Login", ""}, + {"Tailscale-User-Name", ""}, + {"Tailscale-User-Profile-Pic", ""}, + {"Tailscale-Headers-Info", ""}, + {"Tailscale-User-Capabilities", ""}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &http.Request{ + URL: &url.URL{Path: "/"}, + TLS: &tls.ConnectionState{ServerName: "example.ts.net"}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + DestPort: 443, + SrcAddr: netip.MustParseAddrPort(tt.srcIP + ":1234"), // random src port for tests + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + // Verify the headers. The contract with users is that identity and grant headers containing non-ASCII + // UTF-8 characters will be Q-encoded. + h := w.Result().Header + dec := new(mime.WordDecoder) + for _, c := range tt.wantHeaders { + maybeEncoded := h.Get(c.header) + got, err := dec.DecodeHeader(maybeEncoded) + if err != nil { + t.Fatalf("invalid %q header; failed to decode: %v", maybeEncoded, err) + } + if got != c.want { + t.Errorf("invalid %q header; want=%q, got=%q", c.header, c.want, got) + } + } + }) + } +} + func Test_reverseProxyConfiguration(t *testing.T) { b := newTestBackend(t) type test struct { @@ -926,6 +1079,9 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { b.currentNode().SetNetMap(&netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Name: "example.ts.net", + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.150.151.151/32"), + }, }).View(), UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ tailcfg.UserID(1): (&tailcfg.UserProfile{ @@ -1171,3 +1327,89 @@ func TestServeGRPCProxy(t *testing.T) { }) } } + +func TestSerialisePeerCapMap(t *testing.T) { + var tests = []struct { + name string + capMap tailcfg.PeerCapMap + maxNumBytes int + wantOneOfSerialized []string + wantTruncated bool + }{ + { + name: "empty cap map", + capMap: tailcfg.PeerCapMap{}, + maxNumBytes: 50, + wantOneOfSerialized: []string{"{}"}, + wantTruncated: false, + }, + { + name: "cap map with one capability", + capMap: tailcfg.PeerCapMap{ + "tailscale.com/cap/kubernetes": []tailcfg.RawMessage{ + `{"impersonate": {"groups": ["tailnet-readers"]}}`, + }, + }, + maxNumBytes: 50, + wantOneOfSerialized: []string{ + `{"tailscale.com/cap/kubernetes":[{"impersonate":{"groups":["tailnet-readers"]}}]}`, + }, + wantTruncated: false, + }, + { + name: "cap map with two capabilities", + capMap: tailcfg.PeerCapMap{ + "foo.com/cap/something": []tailcfg.RawMessage{ + `{"role": "Admin"}`, + }, + "bar.com/cap/other-thing": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + }, + maxNumBytes: 50, + // Both cap map entries will be included, but they could appear in any order. + wantOneOfSerialized: []string{ + `{"foo.com/cap/something":[{"role":"Admin"}],"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, + `{"bar.com/cap/other-thing":[{"role":"Viewer"}],"foo.com/cap/something":[{"role":"Admin"}]}`, + }, + wantTruncated: false, + }, + { + name: "cap map that should be truncated to stay within size limits", + capMap: tailcfg.PeerCapMap{ + "foo.com/cap/something": []tailcfg.RawMessage{ + `{"role": "Admin"}`, + }, + "bar.com/cap/other-thing": []tailcfg.RawMessage{ + `{"role": "Viewer"}`, + }, + }, + maxNumBytes: 40, + // Only one cap map entry will be included, but we don't know which one. + wantOneOfSerialized: []string{ + `{"foo.com/cap/something":[{"role":"Admin"}]}`, + `{"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, + }, + wantTruncated: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotSerialized, gotCapped, err := serializeUpToNBytes(tt.capMap, tt.maxNumBytes) + + if err != nil { + t.Fatal(err) + } + if gotCapped != tt.wantTruncated { + t.Errorf("got %t, want %t", gotCapped, tt.wantTruncated) + } + for _, wantSerialized := range tt.wantOneOfSerialized { + if gotSerialized == wantSerialized { + return + } + } + t.Errorf("want one of %v, got %q", tt.wantOneOfSerialized, gotSerialized) + }) + } +} diff --git a/ipn/serve.go b/ipn/serve.go index a0f1334d7d150..c4a0997d224ed 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -160,6 +160,8 @@ type HTTPHandler struct { Text string `json:",omitempty"` // plaintext to serve (primarily for testing) + UserCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for // temporary ones? Error codes? Redirects? } From d6fa899eba4978b73c6113318363f570524e55e4 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Wed, 15 Oct 2025 12:59:10 +0100 Subject: [PATCH 0613/1093] ipn/ipnlocal/serve: remove grant header truncation logic Given that we filter based on the usercaps argument now, truncation should not be necessary anymore. Updates tailscale/corp/#28372 Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_v2.go | 16 ++--- cmd/tailscale/cli/serve_v2_test.go | 18 +++--- ipn/ipn_clone.go | 10 ++-- ipn/ipn_view.go | 12 ++-- ipn/ipnlocal/serve.go | 37 ++---------- ipn/ipnlocal/serve_test.go | 96 ++---------------------------- ipn/serve.go | 2 +- 8 files changed, 41 insertions(+), 152 deletions(-) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 95e518998bcd1..5c2d8eefa5edc 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -173,7 +173,7 @@ type serveEnv struct { service tailcfg.ServiceName // service name tun bool // redirect traffic to OS for service allServices bool // apply config file to all services - userCaps []tailcfg.PeerCapability // user capabilities to forward + acceptAppCaps []tailcfg.PeerCapability // app capabilities to forward lc localServeClient // localClient interface, specific to serve // optional stuff for tests: diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 4921bf31f8dfd..f822753aceccd 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -96,12 +96,12 @@ func (b *bgBoolFlag) String() string { return strconv.FormatBool(b.Value) } -type userCapsFlag struct { +type acceptAppCapsFlag struct { Value *[]tailcfg.PeerCapability } -// Set appends s to the list of userCaps. -func (u *userCapsFlag) Set(s string) error { +// Set appends s to the list of appCaps to accept. +func (u *acceptAppCapsFlag) Set(s string) error { if s == "" { return nil } @@ -109,8 +109,8 @@ func (u *userCapsFlag) Set(s string) error { return nil } -// String returns the string representation of the userCaps slice. -func (u *userCapsFlag) String() string { +// String returns the string representation of the slice of appCaps to accept. +func (u *acceptAppCapsFlag) String() string { s := make([]string, len(*u.Value)) for i, v := range *u.Value { s[i] = string(v) @@ -221,7 +221,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") - fs.Var(&userCapsFlag{Value: &e.userCaps}, "usercaps", "User capability to forward to the server (can be specified multiple times)") + fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capability to forward to the server (can be specified multiple times)") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") @@ -492,7 +492,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.userCaps) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -1141,7 +1141,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return err } h.Proxy = t - h.UserCaps = caps + h.AcceptAppCaps = caps } // TODO: validation needs to check nested foreground configs diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index d039c52cc7e7e..473acea6168ba 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -861,42 +861,42 @@ func TestServeDevConfigMutations(t *testing.T) { name: "forward_grant_header", steps: []step{ { - command: cmd("serve --bg --usercaps=example.com/cap/foo 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: "http://127.0.0.1:3000", - UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo"}, }, }}, }, }, }, { - command: cmd("serve --bg --usercaps=example.com/cap/foo --usercaps=example.com/cap/bar 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo --accept-app-caps=example.com/cap/bar 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: "http://127.0.0.1:3000", - UserCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/foo", "example.com/cap/bar"}, }, }}, }, }, }, { - command: cmd("serve --bg --usercaps=example.com/cap/bar 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/bar 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: "http://127.0.0.1:3000", - UserCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, + Proxy: "http://127.0.0.1:3000", + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/bar"}, }, }}, }, diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 54511094b4809..8a0a3c833a0ac 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -232,16 +232,16 @@ func (src *HTTPHandler) Clone() *HTTPHandler { } dst := new(HTTPHandler) *dst = *src - dst.UserCaps = append(src.UserCaps[:0:0], src.UserCaps...) + dst.AcceptAppCaps = append(src.AcceptAppCaps[:0:0], src.AcceptAppCaps...) return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string - UserCaps []tailcfg.PeerCapability + Path string + Proxy string + Text string + AcceptAppCaps []tailcfg.PeerCapability }{}) // Clone makes a deep copy of WebServerConfig. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index a87b6c42e87ba..61d0dec23f218 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -892,16 +892,16 @@ func (v HTTPHandlerView) Proxy() string { return v.ж.Proxy } func (v HTTPHandlerView) Text() string { return v.ж.Text } // peer capabilities to forward in grant header, e.g. example.com/cap/mon -func (v HTTPHandlerView) UserCaps() views.Slice[tailcfg.PeerCapability] { - return views.SliceOf(v.ж.UserCaps) +func (v HTTPHandlerView) AcceptAppCaps() views.Slice[tailcfg.PeerCapability] { + return views.SliceOf(v.ж.AcceptAppCaps) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { - Path string - Proxy string - Text string - UserCaps []tailcfg.PeerCapability + Path string + Proxy string + Text string + AcceptAppCaps []tailcfg.PeerCapability }{}) // View returns a read-only view of WebServerConfig. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 799161a7651a7..5971476deb185 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -65,7 +65,6 @@ func init() { const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" - grantHeaderMaxSize = 15360 // 15 KiB ) // ErrETagMismatch signals that the given @@ -932,7 +931,7 @@ func encTailscaleHeaderValue(v string) string { } func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { - r.Out.Header.Del("Tailscale-User-Capabilities") + r.Out.Header.Del("Tailscale-App-Capabilities") c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) if !ok || c.Funnel != nil { @@ -954,37 +953,13 @@ func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { } } - serialized, truncated, err := serializeUpToNBytes(peerCapsFiltered, grantHeaderMaxSize) + peerCapsSerialized, err := json.Marshal(peerCapsFiltered) if err != nil { - b.logf("serve: failed to serialize PeerCapMap: %v", err) + b.logf("serve: failed to serialize filtered PeerCapMap: %v", err) return } - if truncated { - b.logf("serve: serialized PeerCapMap exceeds %d bytes, forwarding truncated PeerCapMap", grantHeaderMaxSize) - } - - r.Out.Header.Set("Tailscale-User-Capabilities", encTailscaleHeaderValue(serialized)) -} -// serializeUpToNBytes serializes capMap. It arbitrarily truncates entries from the capMap -// if the size of the serialized capMap would exceed N bytes. -func serializeUpToNBytes(capMap tailcfg.PeerCapMap, N int) (string, bool, error) { - numBytes := 0 - capped := false - result := tailcfg.PeerCapMap{} - for k, v := range capMap { - numBytes += len(k) + len(v) - if numBytes > N { - capped = true - break - } - result[k] = v - } - marshalled, err := json.Marshal(result) - if err != nil { - return "", false, err - } - return string(marshalled), capped, nil + r.Out.Header.Set("Tailscale-App-Capabilities", encTailscaleHeaderValue(string(peerCapsSerialized))) } // serveWebHandler is an http.HandlerFunc that maps incoming requests to the @@ -1010,12 +985,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "unknown proxy destination", http.StatusInternalServerError) return } - // Inject user capabilities to forward into the request context + // Inject app capabilities to forward into the request context c, ok := serveHTTPContextKey.ValueOk(r.Context()) if !ok { return } - c.PeerCapsFilter = h.UserCaps() + c.PeerCapsFilter = h.AcceptAppCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 5d880e18598fc..a72c50c1f97e0 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -828,8 +828,8 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": { - Proxy: testServ.URL, - UserCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, + Proxy: testServ.URL, + AcceptAppCaps: []tailcfg.PeerCapability{"example.com/cap/interesting", "example.com/cap/boring"}, }, }}, }, @@ -858,7 +858,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Name", "Some One"}, {"Tailscale-User-Profile-Pic", "https://example.com/photo.jpg"}, {"Tailscale-Headers-Info", "https://tailscale.com/s/serve-headers"}, - {"Tailscale-User-Capabilities", `{"example.com/cap/interesting":[{"role":"🐿"}]}`}, + {"Tailscale-App-Capabilities", `{"example.com/cap/interesting":[{"role":"🐿"}]}`}, }, }, { @@ -871,7 +871,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, {"Tailscale-Headers-Info", ""}, - {"Tailscale-User-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, + {"Tailscale-App-Capabilities", `{"example.com/cap/boring":[{"role":"Viewer"}]}`}, }, }, { @@ -884,7 +884,7 @@ func TestServeHTTPProxyGrantHeader(t *testing.T) { {"Tailscale-User-Name", ""}, {"Tailscale-User-Profile-Pic", ""}, {"Tailscale-Headers-Info", ""}, - {"Tailscale-User-Capabilities", ""}, + {"Tailscale-App-Capabilities", ""}, }, }, } @@ -1327,89 +1327,3 @@ func TestServeGRPCProxy(t *testing.T) { }) } } - -func TestSerialisePeerCapMap(t *testing.T) { - var tests = []struct { - name string - capMap tailcfg.PeerCapMap - maxNumBytes int - wantOneOfSerialized []string - wantTruncated bool - }{ - { - name: "empty cap map", - capMap: tailcfg.PeerCapMap{}, - maxNumBytes: 50, - wantOneOfSerialized: []string{"{}"}, - wantTruncated: false, - }, - { - name: "cap map with one capability", - capMap: tailcfg.PeerCapMap{ - "tailscale.com/cap/kubernetes": []tailcfg.RawMessage{ - `{"impersonate": {"groups": ["tailnet-readers"]}}`, - }, - }, - maxNumBytes: 50, - wantOneOfSerialized: []string{ - `{"tailscale.com/cap/kubernetes":[{"impersonate":{"groups":["tailnet-readers"]}}]}`, - }, - wantTruncated: false, - }, - { - name: "cap map with two capabilities", - capMap: tailcfg.PeerCapMap{ - "foo.com/cap/something": []tailcfg.RawMessage{ - `{"role": "Admin"}`, - }, - "bar.com/cap/other-thing": []tailcfg.RawMessage{ - `{"role": "Viewer"}`, - }, - }, - maxNumBytes: 50, - // Both cap map entries will be included, but they could appear in any order. - wantOneOfSerialized: []string{ - `{"foo.com/cap/something":[{"role":"Admin"}],"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, - `{"bar.com/cap/other-thing":[{"role":"Viewer"}],"foo.com/cap/something":[{"role":"Admin"}]}`, - }, - wantTruncated: false, - }, - { - name: "cap map that should be truncated to stay within size limits", - capMap: tailcfg.PeerCapMap{ - "foo.com/cap/something": []tailcfg.RawMessage{ - `{"role": "Admin"}`, - }, - "bar.com/cap/other-thing": []tailcfg.RawMessage{ - `{"role": "Viewer"}`, - }, - }, - maxNumBytes: 40, - // Only one cap map entry will be included, but we don't know which one. - wantOneOfSerialized: []string{ - `{"foo.com/cap/something":[{"role":"Admin"}]}`, - `{"bar.com/cap/other-thing":[{"role":"Viewer"}]}`, - }, - wantTruncated: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotSerialized, gotCapped, err := serializeUpToNBytes(tt.capMap, tt.maxNumBytes) - - if err != nil { - t.Fatal(err) - } - if gotCapped != tt.wantTruncated { - t.Errorf("got %t, want %t", gotCapped, tt.wantTruncated) - } - for _, wantSerialized := range tt.wantOneOfSerialized { - if gotSerialized == wantSerialized { - return - } - } - t.Errorf("want one of %v, got %q", tt.wantOneOfSerialized, gotSerialized) - }) - } -} diff --git a/ipn/serve.go b/ipn/serve.go index c4a0997d224ed..3f674d9ed00ae 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -160,7 +160,7 @@ type HTTPHandler struct { Text string `json:",omitempty"` // plaintext to serve (primarily for testing) - UserCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + AcceptAppCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for // temporary ones? Error codes? Redirects? From d2e4a20f265b55216e1c0adcf9b4ba95c965d9f8 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Wed, 22 Oct 2025 09:41:19 +0100 Subject: [PATCH 0614/1093] ipn/ipnlocal/serve: error when PeerCaps serialisation fails Also consolidates variable and header naming and amends the CLI behavior * multiple app-caps have to be specified as comma-separated list * simple regex-based validation of app capability names is carried out during flag parsing Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_v2.go | 18 ++++- cmd/tailscale/cli/serve_v2_test.go | 113 ++++++++++++++++++++++++++++- ipn/ipnlocal/serve.go | 40 +++++----- 3 files changed, 150 insertions(+), 21 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index f822753aceccd..30adcb8e7baa9 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -20,6 +20,7 @@ import ( "os/signal" "path" "path/filepath" + "regexp" "slices" "sort" "strconv" @@ -100,12 +101,25 @@ type acceptAppCapsFlag struct { Value *[]tailcfg.PeerCapability } +// An application capability name has the form {domain}/{name}. +// Both parts must use the (simplified) FQDN label character set. +// The "name" can contain forward slashes. +// \pL = Unicode Letter, \pN = Unicode Number, - = Hyphen +var validAppCap = regexp.MustCompile(`^([\pL\pN-]+\.)+[\pL\pN-]+\/[\pL\pN-/]+$`) + // Set appends s to the list of appCaps to accept. func (u *acceptAppCapsFlag) Set(s string) error { if s == "" { return nil } - *u.Value = append(*u.Value, tailcfg.PeerCapability(s)) + appCaps := strings.Split(s, ",") + for _, appCap := range appCaps { + appCap = strings.TrimSpace(appCap) + if !validAppCap.MatchString(appCap) { + return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", s) + } + *u.Value = append(*u.Value, tailcfg.PeerCapability(appCap)) + } return nil } @@ -221,7 +235,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.https, "https", 0, "Expose an HTTPS server at the specified port (default mode)") if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") - fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capability to forward to the server (can be specified multiple times)") + fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 473acea6168ba..dfa17f1faee30 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -875,7 +875,7 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, { - command: cmd("serve --bg --accept-app-caps=example.com/cap/foo --accept-app-caps=example.com/cap/bar 3000"), + command: cmd("serve --bg --accept-app-caps=example.com/cap/foo,example.com/cap/bar 3000"), want: &ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, Web: map[ipn.HostPort]*ipn.WebServerConfig{ @@ -904,6 +904,15 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, + { + name: "invalid_accept_caps_invalid_app_cap", + steps: []step{ + { + command: cmd("serve --bg --accept-app-caps=example/cap/foo 3000"), // should be {domain.tld}/{name} + wantErr: anyErr(), + }, + }, + }, } for _, group := range groups { @@ -1220,6 +1229,108 @@ func TestSrcTypeFromFlags(t *testing.T) { } } +func TestAcceptSetAppCapsFlag(t *testing.T) { + testCases := []struct { + name string + inputs []string + expectErr bool + expectedValue []tailcfg.PeerCapability + }{ + { + name: "valid_simple", + inputs: []string{"example.com/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"example.com/name"}, + }, + { + name: "valid_unicode", + inputs: []string{"bücher.de/something"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"bücher.de/something"}, + }, + { + name: "more_valid_unicode", + inputs: []string{"example.tw/某某某"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"example.tw/某某某"}, + }, + { + name: "valid_path_slashes", + inputs: []string{"domain.com/path/to/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"domain.com/path/to/name"}, + }, + { + name: "valid_multiple_sets", + inputs: []string{"one.com/foo", "two.com/bar"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"one.com/foo", "two.com/bar"}, + }, + { + name: "valid_empty_string", + inputs: []string{""}, + expectErr: false, + expectedValue: nil, // Empty string should be a no-op and not append anything. + }, + { + name: "invalid_path_chars", + inputs: []string{"domain.com/path_with_underscore"}, + expectErr: true, + expectedValue: nil, // Slice should remain empty. + }, + { + name: "valid_subdomain", + inputs: []string{"sub.domain.com/name"}, + expectErr: false, + expectedValue: []tailcfg.PeerCapability{"sub.domain.com/name"}, + }, + { + name: "invalid_no_path", + inputs: []string{"domain.com/"}, + expectErr: true, + expectedValue: nil, + }, + { + name: "invalid_no_domain", + inputs: []string{"/path/only"}, + expectErr: true, + expectedValue: nil, + }, + { + name: "some_invalid_some_valid", + inputs: []string{"one.com/foo", "bad/bar", "two.com/baz"}, + expectErr: true, + expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var v []tailcfg.PeerCapability + flag := &acceptAppCapsFlag{Value: &v} + + var err error + for _, s := range tc.inputs { + err = flag.Set(s) + if err != nil { + break + } + } + + if tc.expectErr && err == nil { + t.Errorf("expected an error, but got none") + } + if !tc.expectErr && err != nil { + t.Errorf("did not expect an error, but got: %v", err) + } + + if !reflect.DeepEqual(tc.expectedValue, v) { + t.Errorf("unexpected value, got: %q, want: %q", v, tc.expectedValue) + } + }) + } +} + func TestCleanURLPath(t *testing.T) { tests := []struct { input string diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 5971476deb185..eb2c932c01165 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -80,8 +80,9 @@ type serveHTTPContext struct { DestPort uint16 // provides funnel-specific context, nil if not funneled - Funnel *funnelFlow - PeerCapsFilter views.Slice[tailcfg.PeerCapability] + Funnel *funnelFlow + // AppCapabilities lists all PeerCapabilities that should be forwarded by serve + AppCapabilities views.Slice[tailcfg.PeerCapability] } // funnelFlow represents a funneled connection initiated via IngressPeer @@ -805,10 +806,11 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Out.Host = r.In.Host addProxyForwardedHeaders(r) rp.lb.addTailscaleIdentityHeaders(r) - rp.lb.addTailscaleGrantHeader(r) - }} - - // There is no way to autodetect h2c as per RFC 9113 + if err := rp.lb.addAppCapabilitiesHeader(r); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }} // There is no way to autodetect h2c as per RFC 9113 // https://datatracker.ietf.org/doc/html/rfc9113#name-starting-http-2. // However, we assume that http:// proxy prefix in combination with the // protoccol being HTTP/2 is sufficient to detect h2c for our needs. Only use this for @@ -930,24 +932,25 @@ func encTailscaleHeaderValue(v string) string { return mime.QEncoding.Encode("utf-8", v) } -func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { - r.Out.Header.Del("Tailscale-App-Capabilities") +func (b *LocalBackend) addAppCapabilitiesHeader(r *httputil.ProxyRequest) error { + const appCapabilitiesHeaderName = "Tailscale-App-Capabilities" + r.Out.Header.Del(appCapabilitiesHeaderName) c, ok := serveHTTPContextKey.ValueOk(r.Out.Context()) if !ok || c.Funnel != nil { - return + return nil } - filter := c.PeerCapsFilter - if filter.IsNil() { - return + acceptCaps := c.AppCapabilities + if acceptCaps.IsNil() { + return nil } peerCaps := b.PeerCaps(c.SrcAddr.Addr()) if peerCaps == nil { - return + return nil } - peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, filter.Len()) - for _, cap := range filter.AsSlice() { + peerCapsFiltered := make(map[tailcfg.PeerCapability][]tailcfg.RawMessage, acceptCaps.Len()) + for _, cap := range acceptCaps.AsSlice() { if peerCaps.HasCapability(cap) { peerCapsFiltered[cap] = peerCaps[cap] } @@ -956,10 +959,11 @@ func (b *LocalBackend) addTailscaleGrantHeader(r *httputil.ProxyRequest) { peerCapsSerialized, err := json.Marshal(peerCapsFiltered) if err != nil { b.logf("serve: failed to serialize filtered PeerCapMap: %v", err) - return + return fmt.Errorf("unable to process app capabilities") } - r.Out.Header.Set("Tailscale-App-Capabilities", encTailscaleHeaderValue(string(peerCapsSerialized))) + r.Out.Header.Set(appCapabilitiesHeaderName, encTailscaleHeaderValue(string(peerCapsSerialized))) + return nil } // serveWebHandler is an http.HandlerFunc that maps incoming requests to the @@ -990,7 +994,7 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { if !ok { return } - c.PeerCapsFilter = h.AcceptAppCaps() + c.AppCapabilities = h.AcceptAppCaps() h := p.(http.Handler) // Trim the mount point from the URL path before proxying. (#6571) if r.URL.Path != "/" { From 02681732d12274e3a1d09708bbc0eabc5681fc34 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 28 Oct 2025 09:33:03 -0600 Subject: [PATCH 0615/1093] .github: drop branches filter with single asterisk from workflows (#17682) Drop usage of the branches filter with a single asterisk as this matches against zero or more characters but not a forward slash, resulting in PRs to branch names with forwards slashes in them not having these workflow run against them as expected. Updates https://github.com/tailscale/corp/issues/33523 Signed-off-by: Mario Minardi --- .github/workflows/docker-file-build.yml | 2 -- .github/workflows/installer.yml | 2 -- .github/workflows/request-dataplane-review.yml | 2 -- .github/workflows/webclient.yml | 2 -- 4 files changed, 8 deletions(-) diff --git a/.github/workflows/docker-file-build.yml b/.github/workflows/docker-file-build.yml index 04611e172bbea..c61680a343e72 100644 --- a/.github/workflows/docker-file-build.yml +++ b/.github/workflows/docker-file-build.yml @@ -4,8 +4,6 @@ on: branches: - main pull_request: - branches: - - "*" jobs: deploy: runs-on: ubuntu-latest diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 6144864fd53b8..bafa9925a647e 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -10,8 +10,6 @@ on: - scripts/installer.sh - .github/workflows/installer.yml pull_request: - branches: - - "*" paths: - scripts/installer.sh - .github/workflows/installer.yml diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 4a86b0541afaa..7ae5668c3765b 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -2,8 +2,6 @@ name: request-dataplane-review on: pull_request: - branches: - - "*" paths: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index e64137f2b160d..bcec1f52d3732 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -3,8 +3,6 @@ on: workflow_dispatch: # For now, only run on requests, not the main branches. pull_request: - branches: - - "*" paths: - "client/web/**" - ".github/workflows/webclient.yml" From db5815fb978db0873752618d4531ee2ac9f5f83d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 28 Oct 2025 08:45:22 -0700 Subject: [PATCH 0616/1093] Revert "logtail: avoid racing eventbus subscriptions with Shutdown (#17639)" (#17684) This reverts commit 4346615d77a6de16854c6e78f9d49375d6424e6e. We averted the shutdown race, but will need to service the subscriber even when we are not waiting for a change so that we do not delay the bus as a whole. Updates #17638 Change-Id: I5488466ed83f5ad1141c95267f5ae54878a24657 Signed-off-by: M. J. Fromberger --- logtail/logtail.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/logtail/logtail.go b/logtail/logtail.go index 52823fedf4309..675422890149c 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -124,7 +124,6 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.Bus != nil { l.eventClient = cfg.Bus.Client("logtail.Logger") - l.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -163,7 +162,6 @@ type Logger struct { httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel eventClient *eventbus.Client - changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] procID uint32 includeProcSequence bool @@ -429,23 +427,8 @@ func (l *Logger) internetUp() bool { func (l *Logger) awaitInternetUp(ctx context.Context) { if l.eventClient != nil { - for { - if l.internetUp() { - return - } - select { - case <-ctx.Done(): - return // give up - case <-l.changeDeltaSub.Done(): - return // give up (closing down) - case delta := <-l.changeDeltaSub.Events(): - if delta.New.AnyInterfaceUp() || l.internetUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - return - } - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") - } - } + l.awaitInternetUpBus(ctx) + return } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { @@ -466,6 +449,24 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } +func (l *Logger) awaitInternetUpBus(ctx context.Context) { + if l.internetUp() { + return + } + sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) + defer sub.Close() + select { + case delta := <-sub.Events(): + if delta.New.AnyInterfaceUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + case <-ctx.Done(): + return + } +} + // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. From 0a5ba8280f74c2577b7c91665aad37dc88ce6c99 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Tue, 28 Oct 2025 08:46:11 -0700 Subject: [PATCH 0617/1093] CODE_OF_CONDUCT.md: update code of conduct Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- CODE_OF_CONDUCT.md | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index ef68d676879a1..348483df57558 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -3,6 +3,7 @@ ## Our Pledge We are committed to creating an open, welcoming, diverse, inclusive, healthy and respectful community. +Unacceptable, harmful and inappropriate behavior will not be tolerated. ## Our Standards @@ -16,15 +17,18 @@ Examples of behavior that contributes to a positive environment for our communit Examples of unacceptable behavior include without limitation: -- The use of sexualized language or imagery, and sexual attention or advances of any kind. -- The use of violent, intimidating or bullying language or imagery. -- Trolling, insulting or derogatory comments, and personal or political attacks. +- The use of language, imagery or emojis (collectively "content") that is racist, sexist, homophobic, transphobic, or otherwise harassing or discriminatory based on any protected characteristic. +- The use of sexualized content and sexual attention or advances of any kind. +- The use of violent, intimidating or bullying content. +- Trolling, concern trolling, insulting or derogatory comments, and personal or political attacks. - Public or private harassment. -- Publishing others' private information, such as a physical or email address, without their explicit permission. +- Publishing others' personal information, such as a photo, physical address, email address, online profile information, or other personal information, without their explicit permission or with the intent to bully or harass the other person. +- Posting deep fake or other AI generated content about or involving another person without the explicit permission. - Spamming community channels and members, such as sending repeat messages, low-effort content, or automated messages. -- Phishing or any similar activity; -- Distributing or promoting malware; -- Other conduct which could reasonably be considered inappropriate in a professional setting. +- Phishing or any similar activity. +- Distributing or promoting malware. +- The use of any coded or suggestive content to hide or provoke otherwise unacceptable behavior. +- Other conduct which could reasonably be considered harmful, illegal, or inappropriate in a professional setting. Please also see the Tailscale Acceptable Use Policy, available at [tailscale.com/tailscale-aup](https://tailscale.com/tailscale-aup). @@ -38,7 +42,7 @@ Please note that this community is not moderated by staff 24/7, and we do not ha While we strive to keep the community safe and welcoming, moderation may not be immediate at all hours. If you encounter any issues, report them using the appropriate channels. -## Enforcement +## Enforcement Guidelines Community leaders and moderators are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. @@ -46,9 +50,8 @@ Community leaders and moderators have the right and responsibility to remove, ed Tailscale retains full discretion to take action (or not) in response to a violation of these guidelines with or without notice or liability to you. We will interpret our policies and resolve disputes in favor of protecting users, customers, the public, our community and our company, as a whole. -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: +Community leaders will follow these community enforcement guidelines in determining the consequences for any action they deem in violation of this Code of Conduct, +and retain full discretion to apply the enforcement guidelines as necessary depending on the circumstances: ### 1. Correction From edb11e0e60ce702ebe62e7bfca345f167ac5efad Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 28 Oct 2025 08:34:34 -0700 Subject: [PATCH 0618/1093] wgengine/magicsock: fix js/wasm crash regression loading non-existent portmapper Thanks for the report, @Need-an-AwP! Fixes #17681 Updates #9394 Change-Id: I2e0b722ef9b460bd7e79499192d1a315504ca84c Signed-off-by: Brad Fitzpatrick --- client/local/local.go | 13 +++++++++++++ client/tailscale/apitype/apitype.go | 10 ++++++++++ feature/feature.go | 6 ++++++ feature/portmapper/portmapper.go | 2 ++ ipn/localapi/debug.go | 10 ++++++++++ tstest/integration/integration_test.go | 22 ++++++++++++++++++++++ wgengine/magicsock/magicsock.go | 8 ++++++-- 7 files changed, 69 insertions(+), 2 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 582c7b8487957..2382a12252a20 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -596,6 +596,19 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } +// QueryOptionalFeatures queries the optional features supported by the Tailscale daemon. +func (lc *Client) QueryOptionalFeatures(ctx context.Context) (*apitype.OptionalFeatures, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/debug-optional-features", 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + var x apitype.OptionalFeatures + if err := json.Unmarshal(body, &x); err != nil { + return nil, err + } + return &x, nil +} + // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // The schema (including when keys are re-read) is not a stable interface. func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 58cdcecc78d4f..6d239d082cd95 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -94,3 +94,13 @@ type DNSQueryResponse struct { // Resolvers is the list of resolvers that the forwarder deemed able to resolve the query. Resolvers []*dnstype.Resolver } + +// OptionalFeatures describes which optional features are enabled in the build. +type OptionalFeatures struct { + // Features is the map of optional feature names to whether they are + // enabled. + // + // Disabled features may be absent from the map. (That is, false values + // are not guaranteed to be present.) + Features map[string]bool +} diff --git a/feature/feature.go b/feature/feature.go index 0d383b398ab60..110b104daae00 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -13,6 +13,12 @@ var ErrUnavailable = errors.New("feature not included in this build") var in = map[string]bool{} +// Registered reports the set of registered features. +// +// The returned map should not be modified by the caller, +// not accessed concurrently with calls to Register. +func Registered() map[string]bool { return in } + // Register notes that the named feature is linked into the binary. func Register(name string) { if _, ok := in[name]; ok { diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go index e7be00ad17d8c..d1b903cb69c20 100644 --- a/feature/portmapper/portmapper.go +++ b/feature/portmapper/portmapper.go @@ -6,6 +6,7 @@ package portmapper import ( + "tailscale.com/feature" "tailscale.com/net/netmon" "tailscale.com/net/portmapper" "tailscale.com/net/portmapper/portmappertype" @@ -14,6 +15,7 @@ import ( ) func init() { + feature.Register("portmapper") portmappertype.HookNewPortMapper.Set(newPortMapper) } diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index b3b919d31ede2..8aca7f0093f7d 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -19,6 +19,7 @@ import ( "sync" "time" + "tailscale.com/client/tailscale/apitype" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" @@ -39,6 +40,7 @@ func init() { Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches) Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules) Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges) + Register("debug-optional-features", (*Handler).serveDebugOptionalFeatures) } func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { @@ -463,3 +465,11 @@ func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } + +func (h *Handler) serveDebugOptionalFeatures(w http.ResponseWriter, r *http.Request) { + of := &apitype.OptionalFeatures{ + Features: feature.Registered(), + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(of) +} diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 234bb8c6ec11a..64f49c7b80afd 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -175,6 +175,28 @@ func TestControlKnobs(t *testing.T) { } } +func TestExpectedFeaturesLinked(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + + d1 := n1.StartDaemon() + n1.AwaitResponding() + lc := n1.LocalClient() + got, err := lc.QueryOptionalFeatures(t.Context()) + if err != nil { + t.Fatal(err) + } + if !got.Features["portmapper"] { + t.Errorf("optional feature portmapper unexpectedly not found: got %v", got.Features) + } + + d1.MustCleanShutdown(t) + + t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests()) +} + func TestCollectPanic(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15865") tstest.Shard(t) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index e3c2d478e9882..6584789017624 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -719,9 +719,13 @@ func NewConn(opts Options) (*Conn, error) { newPortMapper, ok := portmappertype.HookNewPortMapper.GetOk() if ok { c.portMapper = newPortMapper(portmapperLogf, opts.EventBus, opts.NetMon, disableUPnP, c.onlyTCP443.Load) - } else if !testenv.InTest() { - panic("unexpected: HookNewPortMapper not set") } + // If !ok, the HookNewPortMapper hook is not set (so feature/portmapper + // isn't linked), but the build tag to explicitly omit the portmapper + // isn't set either. This should only happen to js/wasm builds, where + // the portmapper is a no-op even if linked (but it's no longer linked, + // since the move to feature/portmapper), or if people are wiring up + // their own Tailscale build from pieces. } c.netMon = opts.NetMon From 09a2a1048d83ba098c40e05fc01a0c7128e80866 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 28 Oct 2025 14:20:19 -0700 Subject: [PATCH 0619/1093] derp: fix an unchecked error in a test (#17694) Found by staticcheck, the test was calling derphttp.NewClient but not checking its error result before doing other things to it. Updates #cleanup Change-Id: I4ade35a7de7c473571f176e747866bc0ab5774db Signed-off-by: M. J. Fromberger --- derp/derphttp/derphttp_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 76681d4984252..5208481ed7258 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -620,6 +620,9 @@ func TestURLDial(t *testing.T) { } netMon := netmon.NewStatic() c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + if err != nil { + t.Errorf("NewClient: %v", err) + } defer c.Close() if err := c.Connect(context.Background()); err != nil { From fcb614a53e8e2d5bb76279639d1962e1cb24983a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 28 Oct 2025 14:48:02 -0700 Subject: [PATCH 0620/1093] cmd/jsonimports: add static analyzer for consistent "json" imports (#17669) This migrates an internal tool to open source so that we can run it on the tailscale.com module as well. We add the "util/safediff" also as a dependency of the tool. This PR does not yet set up a CI to run this analyzer. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/jsonimports/format.go | 175 +++++++++++++++++++++ cmd/jsonimports/format_test.go | 162 +++++++++++++++++++ cmd/jsonimports/jsonimports.go | 124 +++++++++++++++ util/safediff/diff.go | 280 +++++++++++++++++++++++++++++++++ util/safediff/diff_test.go | 196 +++++++++++++++++++++++ 5 files changed, 937 insertions(+) create mode 100644 cmd/jsonimports/format.go create mode 100644 cmd/jsonimports/format_test.go create mode 100644 cmd/jsonimports/jsonimports.go create mode 100644 util/safediff/diff.go create mode 100644 util/safediff/diff_test.go diff --git a/cmd/jsonimports/format.go b/cmd/jsonimports/format.go new file mode 100644 index 0000000000000..6dbd175583a4d --- /dev/null +++ b/cmd/jsonimports/format.go @@ -0,0 +1,175 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "bytes" + "go/ast" + "go/format" + "go/parser" + "go/token" + "go/types" + "path" + "slices" + "strconv" + "strings" + + "tailscale.com/util/must" +) + +// mustFormatFile formats a Go source file and adjust "json" imports. +// It panics if there are any parsing errors. +// +// - "encoding/json" is imported under the name "jsonv1" or "jsonv1std" +// - "encoding/json/v2" is rewritten to import "github.com/go-json-experiment/json" instead +// - "encoding/json/jsontext" is rewritten to import "github.com/go-json-experiment/json/jsontext" instead +// - "github.com/go-json-experiment/json" is imported under the name "jsonv2" +// - "github.com/go-json-experiment/json/v1" is imported under the name "jsonv1" +// +// If no changes to the file is made, it returns input. +func mustFormatFile(in []byte) (out []byte) { + fset := token.NewFileSet() + f := must.Get(parser.ParseFile(fset, "", in, parser.ParseComments)) + + // Check for the existence of "json" imports. + jsonImports := make(map[string][]*ast.ImportSpec) + for _, imp := range f.Imports { + switch pkgPath := must.Get(strconv.Unquote(imp.Path.Value)); pkgPath { + case + "encoding/json", + "encoding/json/v2", + "encoding/json/jsontext", + "github.com/go-json-experiment/json", + "github.com/go-json-experiment/json/v1", + "github.com/go-json-experiment/json/jsontext": + jsonImports[pkgPath] = append(jsonImports[pkgPath], imp) + } + } + if len(jsonImports) == 0 { + return in + } + + // Best-effort local type-check of the file + // to resolve local declarations to detect shadowed variables. + typeInfo := &types.Info{Uses: make(map[*ast.Ident]types.Object)} + (&types.Config{ + Error: func(err error) {}, + }).Check("", fset, []*ast.File{f}, typeInfo) + + // Rewrite imports to instead use "github.com/go-json-experiment/json". + // This ensures that code continues to build even if + // goexperiment.jsonv2 is *not* specified. + // As of https://github.com/go-json-experiment/json/pull/186, + // imports to "github.com/go-json-experiment/json" are identical + // to the standard library if built with goexperiment.jsonv2. + for fromPath, toPath := range map[string]string{ + "encoding/json/v2": "github.com/go-json-experiment/json", + "encoding/json/jsontext": "github.com/go-json-experiment/json/jsontext", + } { + for _, imp := range jsonImports[fromPath] { + imp.Path.Value = strconv.Quote(toPath) + jsonImports[toPath] = append(jsonImports[toPath], imp) + } + delete(jsonImports, fromPath) + } + + // While in a transitory state, where both v1 and v2 json imports + // may exist in our codebase, always explicitly import with + // either jsonv1 or jsonv2 in the package name to avoid ambiguities + // when looking at a particular Marshal or Unmarshal call site. + renames := make(map[string]string) // mapping of old names to new names + deletes := make(map[*ast.ImportSpec]bool) // set of imports to delete + for pkgPath, imps := range jsonImports { + var newName string + switch pkgPath { + case "encoding/json": + newName = "jsonv1" + // If "github.com/go-json-experiment/json/v1" is also imported, + // then use jsonv1std for "encoding/json" to avoid a conflict. + if len(jsonImports["github.com/go-json-experiment/json/v1"]) > 0 { + newName += "std" + } + case "github.com/go-json-experiment/json": + newName = "jsonv2" + case "github.com/go-json-experiment/json/v1": + newName = "jsonv1" + } + + // Rename the import if different than expected. + if oldName := importName(imps[0]); oldName != newName && newName != "" { + renames[oldName] = newName + pos := imps[0].Pos() // preserve original positioning + imps[0].Name = ast.NewIdent(newName) + imps[0].Name.NamePos = pos + } + + // For all redundant imports, use the first imported name. + for _, imp := range imps[1:] { + renames[importName(imp)] = importName(imps[0]) + deletes[imp] = true + } + } + if len(deletes) > 0 { + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + return deletes[imp] + }) + for _, decl := range f.Decls { + if genDecl, ok := decl.(*ast.GenDecl); ok && genDecl.Tok == token.IMPORT { + genDecl.Specs = slices.DeleteFunc(genDecl.Specs, func(spec ast.Spec) bool { + return deletes[spec.(*ast.ImportSpec)] + }) + } + } + } + if len(renames) > 0 { + ast.Walk(astVisitor(func(n ast.Node) bool { + if sel, ok := n.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok { + // Just because the selector looks like "json.Marshal" + // does not mean that it is referencing the "json" package. + // There could be a local "json" declaration that shadows + // the package import. Check partial type information + // to see if there was a local declaration. + if obj, ok := typeInfo.Uses[id]; ok { + if _, ok := obj.(*types.PkgName); !ok { + return true + } + } + + if newName, ok := renames[id.String()]; ok { + id.Name = newName + } + } + } + return true + }), f) + } + + bb := new(bytes.Buffer) + must.Do(format.Node(bb, fset, f)) + return must.Get(format.Source(bb.Bytes())) +} + +// importName is the local package name used for an import. +// If no explicit local name is used, then it uses string parsing +// to derive the package name from the path, relying on the convention +// that the package name is the base name of the package path. +func importName(imp *ast.ImportSpec) string { + if imp.Name != nil { + return imp.Name.String() + } + pkgPath, _ := strconv.Unquote(imp.Path.Value) + pkgPath = strings.TrimRight(pkgPath, "/v0123456789") // exclude version directories + return path.Base(pkgPath) +} + +// astVisitor is a function that implements [ast.Visitor]. +type astVisitor func(ast.Node) bool + +func (f astVisitor) Visit(node ast.Node) ast.Visitor { + if !f(node) { + return nil + } + return f +} diff --git a/cmd/jsonimports/format_test.go b/cmd/jsonimports/format_test.go new file mode 100644 index 0000000000000..28654eb4550ee --- /dev/null +++ b/cmd/jsonimports/format_test.go @@ -0,0 +1,162 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "go/format" + "testing" + + "tailscale.com/util/must" + "tailscale.com/util/safediff" +) + +func TestFormatFile(t *testing.T) { + tests := []struct{ in, want string }{{ + in: `package foobar + + import ( + "encoding/json" + jsonv2exp "github.com/go-json-experiment/json" + ) + + func main() { + json.Marshal() + jsonv2exp.Marshal() + { + var json T // deliberately shadow "json" package name + json.Marshal() // should not be re-written + } + } + `, + want: `package foobar + + import ( + jsonv1 "encoding/json" + jsonv2 "github.com/go-json-experiment/json" + ) + + func main() { + jsonv1.Marshal() + jsonv2.Marshal() + { + var json T // deliberately shadow "json" package name + json.Marshal() // should not be re-written + } + } + `, + }, { + in: `package foobar + + import ( + "github.com/go-json-experiment/json" + jsonv2exp "github.com/go-json-experiment/json" + ) + + func main() { + json.Marshal() + jsonv2exp.Marshal() + } + `, + want: `package foobar + import ( + jsonv2 "github.com/go-json-experiment/json" + ) + func main() { + jsonv2.Marshal() + jsonv2.Marshal() + } + `, + }, { + in: `package foobar + import "github.com/go-json-experiment/json/v1" + func main() { + json.Marshal() + } + `, + want: `package foobar + import jsonv1 "github.com/go-json-experiment/json/v1" + func main() { + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + jsonv1in2 "github.com/go-json-experiment/json/v1" + ) + func main() { + json.Marshal() + jsonv1in2.Marshal() + } + `, + want: `package foobar + import ( + jsonv1std "encoding/json" + jsonv1 "github.com/go-json-experiment/json/v1" + ) + func main() { + jsonv1std.Marshal() + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + jsonv1in2 "github.com/go-json-experiment/json/v1" + ) + func main() { + json.Marshal() + jsonv1in2.Marshal() + } + `, + want: `package foobar + import ( + jsonv1std "encoding/json" + jsonv1 "github.com/go-json-experiment/json/v1" + ) + func main() { + jsonv1std.Marshal() + jsonv1.Marshal() + } + `, + }, { + in: `package foobar + import ( + "encoding/json" + j2 "encoding/json/v2" + "encoding/json/jsontext" + ) + func main() { + json.Marshal() + j2.Marshal() + jsontext.NewEncoder + } + `, + want: `package foobar + import ( + jsonv1 "encoding/json" + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + ) + func main() { + jsonv1.Marshal() + jsonv2.Marshal() + jsontext.NewEncoder + } + `, + }} + for _, tt := range tests { + got := string(must.Get(format.Source([]byte(tt.in)))) + got = string(mustFormatFile([]byte(got))) + want := string(must.Get(format.Source([]byte(tt.want)))) + if got != want { + diff, _ := safediff.Lines(got, want, -1) + t.Errorf("mismatch (-got +want)\n%s", diff) + t.Error(got) + t.Error(want) + } + } +} diff --git a/cmd/jsonimports/jsonimports.go b/cmd/jsonimports/jsonimports.go new file mode 100644 index 0000000000000..4be2e10cbe091 --- /dev/null +++ b/cmd/jsonimports/jsonimports.go @@ -0,0 +1,124 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The jsonimports tool formats all Go source files in the repository +// to enforce that "json" imports are consistent. +// +// With Go 1.25, the "encoding/json/v2" and "encoding/json/jsontext" +// packages are now available under goexperiment.jsonv2. +// This leads to possible confusion over the following: +// +// - "encoding/json" +// - "encoding/json/v2" +// - "encoding/json/jsontext" +// - "github.com/go-json-experiment/json/v1" +// - "github.com/go-json-experiment/json" +// - "github.com/go-json-experiment/json/jsontext" +// +// In order to enforce consistent usage, we apply the following rules: +// +// - Until the Go standard library formally accepts "encoding/json/v2" +// and "encoding/json/jsontext" into the standard library +// (i.e., they are no longer considered experimental), +// we forbid any code from directly importing those packages. +// Go code should instead import "github.com/go-json-experiment/json" +// and "github.com/go-json-experiment/json/jsontext". +// The latter packages contain aliases to the standard library +// if built on Go 1.25 with the goexperiment.jsonv2 tag specified. +// +// - Imports of "encoding/json" or "github.com/go-json-experiment/json/v1" +// must be explicitly imported under the package name "jsonv1". +// If both packages need to be imported, then the former should +// be imported under the package name "jsonv1std". +// +// - Imports of "github.com/go-json-experiment/json" +// must be explicitly imported under the package name "jsonv2". +// +// The latter two rules exist to provide clarity when reading code. +// Without them, it is unclear whether "json.Marshal" refers to v1 or v2. +// With them, however, it is clear that "jsonv1.Marshal" is calling v1 and +// that "jsonv2.Marshal" is calling v2. +// +// TODO(@joetsai): At this present moment, there is no guidance given on +// whether to use v1 or v2 for newly written Go source code. +// I will write a document in the near future providing more guidance. +// Feel free to continue using v1 "encoding/json" as you are accustomed to. +package main + +import ( + "bytes" + "flag" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + + "tailscale.com/syncs" + "tailscale.com/util/must" + "tailscale.com/util/safediff" +) + +func main() { + update := flag.Bool("update", false, "update all Go source files") + flag.Parse() + + // Change working directory to Git repository root. + repoRoot := strings.TrimSuffix(string(must.Get(exec.Command( + "git", "rev-parse", "--show-toplevel", + ).Output())), "\n") + must.Do(os.Chdir(repoRoot)) + + // Iterate over all indexed files in the Git repository. + var printMu sync.Mutex + var group sync.WaitGroup + sema := syncs.NewSemaphore(runtime.NumCPU()) + var numDiffs int + files := string(must.Get(exec.Command("git", "ls-files").Output())) + for file := range strings.Lines(files) { + sema.Acquire() + group.Go(func() { + defer sema.Release() + + // Ignore non-Go source files. + file = strings.TrimSuffix(file, "\n") + if !strings.HasSuffix(file, ".go") { + return + } + + // Format all "json" imports in the Go source file. + srcIn := must.Get(os.ReadFile(file)) + srcOut := mustFormatFile(srcIn) + + // Print differences with each formatted file. + if !bytes.Equal(srcIn, srcOut) { + numDiffs++ + + printMu.Lock() + fmt.Println(file) + lines, _ := safediff.Lines(string(srcIn), string(srcOut), -1) + for line := range strings.Lines(lines) { + fmt.Print("\t", line) + } + fmt.Println() + printMu.Unlock() + + // If -update is specified, write out the changes. + if *update { + mode := must.Get(os.Stat(file)).Mode() + must.Do(os.WriteFile(file, srcOut, mode)) + } + } + }) + } + group.Wait() + + // Report whether any differences were detected. + if numDiffs > 0 && !*update { + fmt.Printf(`%d files with "json" imports that need formatting`+"\n", numDiffs) + fmt.Println("Please run:") + fmt.Println("\t./tool/go run tailscale.com/cmd/jsonimports -update") + os.Exit(1) + } +} diff --git a/util/safediff/diff.go b/util/safediff/diff.go new file mode 100644 index 0000000000000..cf8add94b21dd --- /dev/null +++ b/util/safediff/diff.go @@ -0,0 +1,280 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package safediff computes the difference between two lists. +// +// It is guaranteed to run in O(n), but may not produce an optimal diff. +// Most diffing algorithms produce optimal diffs but run in O(n²). +// It is safe to pass in untrusted input. +package safediff + +import ( + "bytes" + "fmt" + "math" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp" +) + +var diffTest = false + +// Lines constructs a humanly readable line-by-line diff from x to y. +// The output (if multiple lines) is guaranteed to be no larger than maxSize, +// by truncating the output if necessary. A negative maxSize enforces no limit. +// +// Example diff: +// +// … 440 identical lines +// "ssh": [ +// … 35 identical lines +// { +// - "src": ["maisem@tailscale.com"], +// - "dst": ["tag:maisem-test"], +// - "users": ["maisem", "root"], +// - "action": "check", +// - // "recorder": ["100.12.34.56:80"], +// + "src": ["maisem@tailscale.com"], +// + "dst": ["tag:maisem-test"], +// + "users": ["maisem", "root"], +// + "action": "check", +// + "recorder": ["node:recorder-2"], +// }, +// … 77 identical lines +// ], +// … 345 identical lines +// +// Meaning of each line prefix: +// +// - '…' precedes a summary statement +// - ' ' precedes an identical line printed for context +// - '-' precedes a line removed from x +// - '+' precedes a line inserted from y +// +// The diffing algorithm runs in O(n) and is safe to use with untrusted inputs. +func Lines(x, y string, maxSize int) (out string, truncated bool) { + // Convert x and y into a slice of lines and compute the edit-script. + xs := strings.Split(x, "\n") + ys := strings.Split(y, "\n") + es := diffStrings(xs, ys) + + // Modify the edit-script to support printing identical lines of context. + const identicalContext edit = '*' // special edit code to indicate printed line + var xi, yi int // index into xs or ys + isIdentical := func(e edit) bool { return e == identical || e == identicalContext } + indentOf := func(s string) string { return s[:len(s)-len(strings.TrimLeftFunc(s, unicode.IsSpace))] } + for i, e := range es { + if isIdentical(e) { + // Print current line if adjacent symbols are non-identical. + switch { + case i-1 >= 0 && !isIdentical(es[i-1]): + es[i] = identicalContext + case i+1 < len(es) && !isIdentical(es[i+1]): + es[i] = identicalContext + } + } else { + // Print any preceding or succeeding lines, + // where the leading indent is a prefix of the current indent. + // Indentation often indicates a parent-child relationship + // in structured source code. + addParents := func(ss []string, si, direction int) { + childIndent := indentOf(ss[si]) + for j := direction; i+j >= 0 && i+j < len(es) && isIdentical(es[i+j]); j += direction { + parentIndent := indentOf(ss[si+j]) + if strings.HasPrefix(childIndent, parentIndent) && len(parentIndent) < len(childIndent) && parentIndent != "" { + es[i+j] = identicalContext + childIndent = parentIndent + } + } + } + switch e { + case removed, modified: // arbitrarily use the x value for modified values + addParents(xs, xi, -1) + addParents(xs, xi, +1) + case inserted: + addParents(ys, yi, -1) + addParents(ys, yi, +1) + } + } + if e != inserted { + xi++ + } + if e != removed { + yi++ + } + } + + // Show the line for a single hidden identical line, + // since it occupies the same vertical height. + for i, e := range es { + if e == identical { + prevNotIdentical := i-1 < 0 || es[i-1] != identical + nextNotIdentical := i+1 >= len(es) || es[i+1] != identical + if prevNotIdentical && nextNotIdentical { + es[i] = identicalContext + } + } + } + + // Adjust the maxSize, reserving space for the final summary. + if maxSize < 0 { + maxSize = math.MaxInt + } + maxSize -= len(stats{len(xs) + len(ys), len(xs), len(ys)}.appendText(nil)) + + // mayAppendLine appends a line if it does not exceed maxSize. + // Otherwise, it just updates prevStats. + var buf []byte + var prevStats stats + mayAppendLine := func(edit edit, line string) { + // Append the stats (if non-zero) and the line text. + // The stats reports the number of preceding identical lines. + if !truncated { + bufLen := len(buf) // original length (in case we exceed maxSize) + if !prevStats.isZero() { + buf = prevStats.appendText(buf) + prevStats = stats{} // just printed, so clear the stats + } + buf = fmt.Appendf(buf, "%c %s\n", edit, line) + truncated = len(buf) > maxSize + if !truncated { + return + } + buf = buf[:bufLen] // restore original buffer contents + } + + // Output is truncated, so just update the statistics. + switch edit { + case identical: + prevStats.numIdentical++ + case removed: + prevStats.numRemoved++ + case inserted: + prevStats.numInserted++ + } + } + + // Process the entire edit script. + for len(es) > 0 { + num := len(es) - len(bytes.TrimLeft(es, string(es[:1]))) + switch es[0] { + case identical: + prevStats.numIdentical += num + xs, ys = xs[num:], ys[num:] + case identicalContext: + for n := len(xs) - num; len(xs) > n; xs, ys = xs[1:], ys[1:] { + mayAppendLine(identical, xs[0]) // implies xs[0] == ys[0] + } + case modified: + for n := len(xs) - num; len(xs) > n; xs = xs[1:] { + mayAppendLine(removed, xs[0]) + } + for n := len(ys) - num; len(ys) > n; ys = ys[1:] { + mayAppendLine(inserted, ys[0]) + } + case removed: + for n := len(xs) - num; len(xs) > n; xs = xs[1:] { + mayAppendLine(removed, xs[0]) + } + case inserted: + for n := len(ys) - num; len(ys) > n; ys = ys[1:] { + mayAppendLine(inserted, ys[0]) + } + } + es = es[num:] + } + if len(xs)+len(ys)+len(es) > 0 { + panic("BUG: slices not fully consumed") + } + + if !prevStats.isZero() { + buf = prevStats.appendText(buf) // may exceed maxSize + } + return string(buf), truncated +} + +type stats struct{ numIdentical, numRemoved, numInserted int } + +func (s stats) isZero() bool { return s.numIdentical+s.numRemoved+s.numInserted == 0 } + +func (s stats) appendText(b []byte) []byte { + switch { + case s.numIdentical > 0 && s.numRemoved > 0 && s.numInserted > 0: + return fmt.Appendf(b, "… %d identical, %d removed, and %d inserted lines\n", s.numIdentical, s.numRemoved, s.numInserted) + case s.numIdentical > 0 && s.numRemoved > 0: + return fmt.Appendf(b, "… %d identical and %d removed lines\n", s.numIdentical, s.numRemoved) + case s.numIdentical > 0 && s.numInserted > 0: + return fmt.Appendf(b, "… %d identical and %d inserted lines\n", s.numIdentical, s.numInserted) + case s.numRemoved > 0 && s.numInserted > 0: + return fmt.Appendf(b, "… %d removed and %d inserted lines\n", s.numRemoved, s.numInserted) + case s.numIdentical > 0: + return fmt.Appendf(b, "… %d identical lines\n", s.numIdentical) + case s.numRemoved > 0: + return fmt.Appendf(b, "… %d removed lines\n", s.numRemoved) + case s.numInserted > 0: + return fmt.Appendf(b, "… %d inserted lines\n", s.numInserted) + default: + return fmt.Appendf(b, "…\n") + } +} + +// diffStrings computes an edit-script of two slices of strings. +// +// This calls cmp.Equal to access the "github.com/go-cmp/cmp/internal/diff" +// implementation, which has an O(N) diffing algorithm. It is not guaranteed +// to produce an optimal edit-script, but protects our runtime against +// adversarial inputs that would wreck the optimal O(N²) algorithm used by +// most diffing packages available in open-source. +// +// TODO(https://go.dev/issue/58893): Use "golang.org/x/tools/diff" instead? +func diffStrings(xs, ys []string) []edit { + d := new(diffRecorder) + cmp.Equal(xs, ys, cmp.Reporter(d)) + if diffTest { + numRemoved := bytes.Count(d.script, []byte{removed}) + numInserted := bytes.Count(d.script, []byte{inserted}) + if len(xs) != len(d.script)-numInserted || len(ys) != len(d.script)-numRemoved { + panic("BUG: edit-script is inconsistent") + } + } + return d.script +} + +type edit = byte + +const ( + identical edit = ' ' // equal symbol in both x and y + modified edit = '~' // modified symbol in both x and y + removed edit = '-' // removed symbol from x + inserted edit = '+' // inserted symbol from y +) + +// diffRecorder reproduces an edit-script, essentially recording +// the edit-script from "github.com/google/go-cmp/cmp/internal/diff". +// This implements the cmp.Reporter interface. +type diffRecorder struct { + last cmp.PathStep + script []edit +} + +func (d *diffRecorder) PushStep(ps cmp.PathStep) { d.last = ps } + +func (d *diffRecorder) Report(rs cmp.Result) { + if si, ok := d.last.(cmp.SliceIndex); ok { + if rs.Equal() { + d.script = append(d.script, identical) + } else { + switch xi, yi := si.SplitKeys(); { + case xi >= 0 && yi >= 0: + d.script = append(d.script, modified) + case xi >= 0: + d.script = append(d.script, removed) + case yi >= 0: + d.script = append(d.script, inserted) + } + } + } +} + +func (d *diffRecorder) PopStep() { d.last = nil } diff --git a/util/safediff/diff_test.go b/util/safediff/diff_test.go new file mode 100644 index 0000000000000..e580bd9222dd9 --- /dev/null +++ b/util/safediff/diff_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package safediff + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func init() { diffTest = true } + +func TestLines(t *testing.T) { + // The diffs shown below technically depend on the stability of cmp, + // but that should be fine for sufficiently simple diffs like these. + // If the output does change, that would suggest a significant regression + // in the optimality of cmp's diffing algorithm. + + x := `{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [{ + "type": "home", + "number": "212 555-1234" + }, { + "type": "office", + "number": "646 555-4567" + }], + "children": [ + "Catherine", + "Thomas", + "Trevor" + ], + "spouse": null +}` + y := x + y = strings.ReplaceAll(y, `"New York"`, `"Los Angeles"`) + y = strings.ReplaceAll(y, `"NY"`, `"CA"`) + y = strings.ReplaceAll(y, `"646 555-4567"`, `"315 252-8888"`) + + wantDiff := ` +… 5 identical lines + "address": { + "streetAddress": "21 2nd Street", +- "city": "New York", +- "state": "NY", ++ "city": "Los Angeles", ++ "state": "CA", + "postalCode": "10021-3100" + }, +… 3 identical lines + }, { + "type": "office", +- "number": "646 555-4567" ++ "number": "315 252-8888" + }], +… 7 identical lines +`[1:] + gotDiff, gotTrunc := Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } + + wantDiff = ` +… 5 identical lines + "address": { + "streetAddress": "21 2nd Street", +- "city": "New York", +- "state": "NY", ++ "city": "Los Angeles", +… 15 identical, 1 removed, and 2 inserted lines +`[1:] + gotDiff, gotTrunc = Lines(x, y, 200) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == false { + t.Errorf("Lines: output unexpectedly not truncated") + } + + wantDiff = "… 17 identical, 3 removed, and 3 inserted lines\n" + gotDiff, gotTrunc = Lines(x, y, 0) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == false { + t.Errorf("Lines: output unexpectedly not truncated") + } + + x = `{ + "unrelated": [ + "unrelated", + ], + "related": { + "unrelated": [ + "unrelated", + ], + "related": { + "unrelated": [ + "unrelated", + ], + "related": { + "related": "changed", + }, + "unrelated": [ + "unrelated", + ], + }, + "unrelated": [ + "unrelated", + ], + }, + "unrelated": [ + "unrelated", + ], +}` + y = strings.ReplaceAll(x, "changed", "CHANGED") + + wantDiff = ` +… 4 identical lines + "related": { +… 3 identical lines + "related": { +… 3 identical lines + "related": { +- "related": "changed", ++ "related": "CHANGED", + }, +… 3 identical lines + }, +… 3 identical lines + }, +… 4 identical lines +`[1:] + gotDiff, gotTrunc = Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } + + x = `{ + "ACLs": [ + { + "Action": "accept", + "Users": ["group:all"], + "Ports": ["tag:tmemes:80"], + }, + ], +}` + y = strings.ReplaceAll(x, "tag:tmemes:80", "tag:tmemes:80,8383") + wantDiff = ` + { + "ACLs": [ + { + "Action": "accept", + "Users": ["group:all"], +- "Ports": ["tag:tmemes:80"], ++ "Ports": ["tag:tmemes:80,8383"], + }, + ], + } +`[1:] + gotDiff, gotTrunc = Lines(x, y, -1) + if d := cmp.Diff(gotDiff, wantDiff); d != "" { + t.Errorf("Lines mismatch (-got +want):\n%s\ngot:\n%s\nwant:\n%s", d, gotDiff, wantDiff) + } else if gotTrunc == true { + t.Errorf("Lines: output unexpectedly truncated") + } +} + +func FuzzDiff(f *testing.F) { + f.Fuzz(func(t *testing.T, x, y string, maxSize int) { + const maxInput = 1e3 + if len(x) > maxInput { + x = x[:maxInput] + } + if len(y) > maxInput { + y = y[:maxInput] + } + diff, _ := Lines(x, y, maxSize) // make sure this does not panic + if strings.Count(diff, "\n") > 1 && maxSize >= 0 && len(diff) > maxSize { + t.Fatal("maxSize exceeded") + } + }) +} From 478342a642af49278237e74b994484c107b780d2 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 28 Oct 2025 14:48:37 -0700 Subject: [PATCH 0621/1093] wgengine/netlog: embed node information in network flow logs (#17668) This rewrites the netlog package to support embedding node information in network flow logs. Some bit of complexity comes in trying to pre-compute the expected size of the log message after JSON serialization to ensure that we can respect maximum body limits in log uploading. We also fix a bug in tstun, where we were recording the IP address after SNAT, which was resulting in non-sensible connection flows being logged. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- net/tstun/wrap.go | 10 +- types/netlogtype/netlogtype.go | 47 +++- wgengine/netlog/netlog.go | 461 ++++++++++++++++++++++++--------- wgengine/netlog/netlog_omit.go | 9 +- wgengine/netlog/netlog_test.go | 236 +++++++++++++++++ wgengine/netlog/record.go | 196 ++++++++++++++ wgengine/netlog/record_test.go | 255 ++++++++++++++++++ wgengine/netlog/stats.go | 222 ---------------- wgengine/netlog/stats_test.go | 235 ----------------- wgengine/userspace.go | 5 +- 10 files changed, 1085 insertions(+), 591 deletions(-) create mode 100644 wgengine/netlog/netlog_test.go create mode 100644 wgengine/netlog/record.go create mode 100644 wgengine/netlog/record_test.go delete mode 100644 wgengine/netlog/stats.go delete mode 100644 wgengine/netlog/stats_test.go diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 70cc7118ac208..db4f689bf33d0 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -967,6 +967,11 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { continue } } + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + updateConnCounter(update, p.Buffer(), false) + } + } // Make sure to do SNAT after filtering, so that any flow tracking in // the filter sees the original source address. See #12133. @@ -976,11 +981,6 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if buildfeatures.HasNetLog { - if update := t.connCounter.Load(); update != nil { - updateConnCounter(update, p.Buffer(), false) - } - } buffsPos++ } if buffsGRO != nil { diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index a29ea6f03dffa..86d645b354f08 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -21,6 +21,9 @@ type Message struct { Start time.Time `json:"start"` // inclusive End time.Time `json:"end"` // inclusive + SrcNode Node `json:"srcNode,omitzero"` + DstNodes []Node `json:"dstNodes,omitempty"` + VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty"` SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty"` ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty"` @@ -28,14 +31,30 @@ type Message struct { } const ( - messageJSON = `{"nodeId":"n0123456789abcdefCNTRL",` + maxJSONTimeRange + `,` + minJSONTraffic + `}` + messageJSON = `{"nodeId":` + maxJSONStableID + `,` + minJSONNodes + `,` + maxJSONTimeRange + `,` + minJSONTraffic + `}` + maxJSONStableID = `"n0123456789abcdefCNTRL"` + minJSONNodes = `"srcNode":{},"dstNodes":[]` maxJSONTimeRange = `"start":` + maxJSONRFC3339 + `,"end":` + maxJSONRFC3339 maxJSONRFC3339 = `"0001-01-01T00:00:00.000000000Z"` minJSONTraffic = `"virtualTraffic":{},"subnetTraffic":{},"exitTraffic":{},"physicalTraffic":{}` - // MaxMessageJSONSize is the overhead size of Message when it is - // serialized as JSON assuming that each traffic map is populated. - MaxMessageJSONSize = len(messageJSON) + // MinMessageJSONSize is the overhead size of Message when it is + // serialized as JSON assuming that each field is minimally populated. + // Each [Node] occupies at least [MinNodeJSONSize]. + // Each [ConnectionCounts] occupies at most [MaxConnectionCountsJSONSize]. + MinMessageJSONSize = len(messageJSON) + + nodeJSON = `{"nodeId":` + maxJSONStableID + `,"name":"","addresses":` + maxJSONAddrs + `,"user":"","tags":[]}` + maxJSONAddrV4 = `"255.255.255.255"` + maxJSONAddrV6 = `"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"` + maxJSONAddrs = `[` + maxJSONAddrV4 + `,` + maxJSONAddrV6 + `]` + + // MinNodeJSONSize is the overhead size of Node when it is + // serialized as JSON assuming that each field is minimally populated. + // It does not account for bytes occupied by + // [Node.Name], [Node.User], or [Node.Tags]. The [Node.Addresses] + // is assumed to contain a pair of IPv4 and IPv6 address. + MinNodeJSONSize = len(nodeJSON) maxJSONConnCounts = `{` + maxJSONConn + `,` + maxJSONCounts + `}` maxJSONConn = `"proto":` + maxJSONProto + `,"src":` + maxJSONAddrPort + `,"dst":` + maxJSONAddrPort @@ -52,6 +71,26 @@ const ( MaxConnectionCountsJSONSize = len(maxJSONConnCounts) ) +// Node is information about a node. +type Node struct { + // NodeID is the stable ID of the node. + NodeID tailcfg.StableNodeID `json:"nodeId"` + + // Name is the fully-qualified name of the node. + Name string `json:"name,omitzero"` // e.g., "carbonite.example.ts.net" + + // Addresses are the Tailscale IP addresses of the node. + Addresses []netip.Addr `json:"addresses,omitempty"` + + // User is the user that owns the node. + // It is not populated if the node is tagged. + User string `json:"user,omitzero"` // e.g., "johndoe@example.com" + + // Tags are the tags of the node. + // It is not populated if the node is owned by a user. + Tags []string `json:"tags,omitempty"` // e.g., ["tag:prod","tag:logs"] +} + // ConnectionCounts is a flattened struct of both a connection and counts. type ConnectionCounts struct { Connection diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 2984df99471b6..9809d1ce65326 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -10,8 +10,6 @@ package netlog import ( "cmp" "context" - "encoding/json" - "errors" "fmt" "io" "log" @@ -26,12 +24,18 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" - "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netlogfunc" "tailscale.com/types/netlogtype" + "tailscale.com/types/netmap" "tailscale.com/util/eventbus" + "tailscale.com/util/set" "tailscale.com/wgengine/router" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" ) // pollPeriod specifies how often to poll for network traffic. @@ -49,25 +53,38 @@ func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // Logger logs statistics about every connection. // At present, it only logs connections within a tailscale network. -// Exit node traffic is not logged for privacy reasons. +// By default, exit node traffic is not logged for privacy reasons +// unless the Tailnet administrator opts-into explicit logging. // The zero value is ready for use. type Logger struct { - mu sync.Mutex // protects all fields below + mu sync.Mutex // protects all fields below + logf logger.Logf + + // shutdownLocked shuts down the logger. + // The mutex must be held when calling. + shutdownLocked func(context.Context) error - logger *logtail.Logger - stats *statistics - tun Device - sock Device + record record // the current record of network connection flows + recordLen int // upper bound on JSON length of record + recordsChan chan record // set to nil when shutdown + flushTimer *time.Timer // fires when record should flush to recordsChan - addrs map[netip.Addr]bool - prefixes map[netip.Prefix]bool + // Information about Tailscale nodes. + // These are read-only once updated by ReconfigNetworkMap. + selfNode nodeUser + allNodes map[netip.Addr]nodeUser // includes selfNode; nodeUser values are always valid + + // Information about routes. + // These are read-only once updated by ReconfigRoutes. + routeAddrs set.Set[netip.Addr] + routePrefixes []netip.Prefix } // Running reports whether the logger is running. func (nl *Logger) Running() bool { nl.mu.Lock() defer nl.mu.Unlock() - return nl.logger != nil + return nl.shutdownLocked != nil } var testClient *http.Client @@ -75,9 +92,9 @@ var testClient *http.Client // Startup starts an asynchronous network logger that monitors // statistics for the provided tun and/or sock device. // -// The tun Device captures packets within the tailscale network, -// where at least one address is a tailscale IP address. -// The source is always from the perspective of the current node. +// The tun [Device] captures packets within the tailscale network, +// where at least one address is usually a tailscale IP address. +// The source is usually from the perspective of the current node. // If one of the other endpoint is not a tailscale IP address, // then it suggests the use of a subnet router or exit node. // For example, when using a subnet router, the source address is @@ -89,28 +106,33 @@ var testClient *http.Client // In this case, the node acting as a subnet router is acting on behalf // of some remote endpoint within the subnet range. // The tun is used to populate the VirtualTraffic, SubnetTraffic, -// and ExitTraffic fields in Message. +// and ExitTraffic fields in [netlogtype.Message]. // -// The sock Device captures packets at the magicsock layer. +// The sock [Device] captures packets at the magicsock layer. // The source is always a tailscale IP address and the destination // is a non-tailscale IP address to contact for that particular tailscale node. // The IP protocol and source port are always zero. -// The sock is used to populated the PhysicalTraffic field in Message. +// The sock is used to populated the PhysicalTraffic field in [netlogtype.Message]. +// // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. -func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { +func (nl *Logger) Startup(logf logger.Logf, nm *netmap.NetworkMap, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { nl.mu.Lock() defer nl.mu.Unlock() - if nl.logger != nil { - return fmt.Errorf("network logger already running for %v", nl.logger.PrivateID().Public()) + + if nl.shutdownLocked != nil { + return fmt.Errorf("network logger already running") } + nl.selfNode, nl.allNodes = makeNodeMaps(nm) // Startup a log stream to Tailscale's logging service. - logf := log.Printf + if logf == nil { + logf = log.Printf + } httpc := &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, netMon, health, logf)} if testClient != nil { httpc = testClient } - nl.logger = logtail.NewLogger(logtail.Config{ + logger := logtail.NewLogger(logtail.Config{ Collection: "tailtraffic.log.tailscale.io", PrivateID: nodeLogID, CopyPrivateID: domainLogID, @@ -124,108 +146,311 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo IncludeProcID: true, IncludeProcSequence: true, }, logf) - nl.logger.SetSockstatsLabel(sockstats.LabelNetlogLogger) - - // Startup a data structure to track per-connection statistics. - // There is a maximum size for individual log messages that logtail - // can upload to the Tailscale log service, so stay below this limit. - const maxLogSize = 256 << 10 - const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = newStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - nl.mu.Lock() - addrs := nl.addrs - prefixes := nl.prefixes - nl.mu.Unlock() - recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled) - }) + logger.SetSockstatsLabel(sockstats.LabelNetlogLogger) // Register the connection tracker into the TUN device. - nl.tun = cmp.Or[Device](tun, noopDevice{}) - nl.tun.SetConnectionCounter(nl.stats.UpdateVirtual) + tun = cmp.Or[Device](tun, noopDevice{}) + tun.SetConnectionCounter(nl.updateVirtConn) // Register the connection tracker into magicsock. - nl.sock = cmp.Or[Device](sock, noopDevice{}) - nl.sock.SetConnectionCounter(nl.stats.UpdatePhysical) + sock = cmp.Or[Device](sock, noopDevice{}) + sock.SetConnectionCounter(nl.updatePhysConn) + + // Startup a goroutine to record log messages. + // This is done asynchronously so that the cost of serializing + // the network flow log message never stalls processing of packets. + nl.record = record{} + nl.recordLen = 0 + nl.recordsChan = make(chan record, 100) + recorderDone := make(chan struct{}) + go func(recordsChan chan record) { + defer close(recorderDone) + for rec := range recordsChan { + msg := rec.toMessage(false, !logExitFlowEnabledEnabled) + if b, err := jsonv2.Marshal(msg, jsontext.AllowInvalidUTF8(true)); err != nil { + if nl.logf != nil { + nl.logf("netlog: json.Marshal error: %v", err) + } + } else { + logger.Logf("%s", b) + } + } + }(nl.recordsChan) + + // Register the mechanism for shutting down. + nl.shutdownLocked = func(ctx context.Context) error { + tun.SetConnectionCounter(nil) + sock.SetConnectionCounter(nil) + + // Flush and process all pending records. + nl.flushRecordLocked() + close(nl.recordsChan) + nl.recordsChan = nil + <-recorderDone + recorderDone = nil + + // Try to upload all pending records. + err := logger.Shutdown(ctx) + + // Purge state. + nl.shutdownLocked = nil + nl.selfNode = nodeUser{} + nl.allNodes = nil + nl.routeAddrs = nil + nl.routePrefixes = nil + + return err + } return nil } -func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connStats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { - m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()} - - classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) { - // NOTE: There could be mis-classifications where an address is treated - // as a Tailscale IP address because the subnet range overlaps with - // the subnet range that Tailscale IP addresses are allocated from. - // This should never happen for IPv6, but could happen for IPv4. - withinRoute = addrs[a] - for p := range prefixes { - if p.Contains(a) && p.Bits() > 0 { - withinRoute = true - break - } - } - return withinRoute && tsaddr.IsTailscaleIP(a), withinRoute && !tsaddr.IsTailscaleIP(a) +var ( + tailscaleServiceIPv4 = tsaddr.TailscaleServiceIP() + tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() +) + +func (nl *Logger) updateVirtConn(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + // Network logging is defined as traffic between two Tailscale nodes. + // Traffic with the internal Tailscale service is not with another node + // and should not be logged. It also happens to be a high volume + // amount of discrete traffic flows (e.g., DNS lookups). + switch dst.Addr() { + case tailscaleServiceIPv4, tailscaleServiceIPv6: + return } - exitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) - for conn, cnts := range connStats { - srcIsTailscaleIP, srcWithinSubnet := classifyAddr(conn.Src.Addr()) - dstIsTailscaleIP, dstWithinSubnet := classifyAddr(conn.Dst.Addr()) - switch { - case srcIsTailscaleIP && dstIsTailscaleIP: - m.VirtualTraffic = append(m.VirtualTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - case srcWithinSubnet || dstWithinSubnet: - m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) - default: - const anonymize = true - if anonymize && !logExitFlowEnabled { - // Only preserve the address if it is a Tailscale IP address. - srcOrig, dstOrig := conn.Src, conn.Dst - conn = netlogtype.Connection{} // scrub everything by default - if srcIsTailscaleIP { - conn.Src = netip.AddrPortFrom(srcOrig.Addr(), 0) - } - if dstIsTailscaleIP { - conn.Dst = netip.AddrPortFrom(dstOrig.Addr(), 0) - } - } - exitTraffic[conn] = exitTraffic[conn].Add(cnts) + nl.mu.Lock() + defer nl.mu.Unlock() + + // Lookup the connection and increment the counts. + nl.initRecordLocked() + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + cnts, found := nl.record.virtConns[conn] + if !found { + cnts.connType = nl.addNewVirtConnLocked(conn) + } + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + nl.record.virtConns[conn] = cnts +} + +// addNewVirtConnLocked adds the first insertion of a physical connection. +// The [Logger.mu] must be held. +func (nl *Logger) addNewVirtConnLocked(c netlogtype.Connection) connType { + // Check whether this is the first insertion of the src and dst node. + // If so, compute the additional JSON bytes that would be added + // to the record for the node information. + var srcNodeLen, dstNodeLen int + srcNode, srcSeen := nl.record.seenNodes[c.Src.Addr()] + if !srcSeen { + srcNode = nl.allNodes[c.Src.Addr()] + if srcNode.Valid() { + srcNodeLen = srcNode.jsonLen() } } - for conn, cnts := range exitTraffic { - m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + dstNode, dstSeen := nl.record.seenNodes[c.Dst.Addr()] + if !dstSeen { + dstNode = nl.allNodes[c.Dst.Addr()] + if dstNode.Valid() { + dstNodeLen = dstNode.jsonLen() + } + } + + // Check whether the additional [netlogtype.ConnectionCounts] + // and [netlogtype.Node] information would exceed [maxLogSize]. + if nl.recordLen+netlogtype.MaxConnectionCountsJSONSize+srcNodeLen+dstNodeLen > maxLogSize { + nl.flushRecordLocked() + nl.initRecordLocked() + } + + // Insert newly seen src and/or dst nodes. + if !srcSeen && srcNode.Valid() { + nl.record.seenNodes[c.Src.Addr()] = srcNode } - for conn, cnts := range sockStats { - m.PhysicalTraffic = append(m.PhysicalTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + if !dstSeen && dstNode.Valid() { + nl.record.seenNodes[c.Dst.Addr()] = dstNode } + nl.recordLen += netlogtype.MaxConnectionCountsJSONSize + srcNodeLen + dstNodeLen - if len(m.VirtualTraffic)+len(m.SubnetTraffic)+len(m.ExitTraffic)+len(m.PhysicalTraffic) > 0 { - if b, err := json.Marshal(m); err != nil { - logger.Logf("json.Marshal error: %v", err) + // Classify the traffic type. + var srcIsSelfNode bool + if nl.selfNode.Valid() { + srcIsSelfNode = nl.selfNode.Addresses().ContainsFunc(func(p netip.Prefix) bool { + return c.Src.Addr() == p.Addr() && p.IsSingleIP() + }) + } + switch { + case srcIsSelfNode && dstNode.Valid(): + return virtualTraffic + case srcIsSelfNode: + // TODO: Should we swap src for the node serving as the proxy? + // It is relatively useless always using the self IP address. + if nl.withinRoutesLocked(c.Dst.Addr()) { + return subnetTraffic // a client using another subnet router } else { - logger.Logf("%s", b) + return exitTraffic // a client using exit an exit node } + case dstNode.Valid(): + if nl.withinRoutesLocked(c.Src.Addr()) { + return subnetTraffic // serving as a subnet router + } else { + return exitTraffic // serving as an exit node + } + default: + return unknownTraffic + } +} + +func (nl *Logger) updatePhysConn(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + nl.mu.Lock() + defer nl.mu.Unlock() + + // Lookup the connection and increment the counts. + nl.initRecordLocked() + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + cnts, found := nl.record.physConns[conn] + if !found { + nl.addNewPhysConnLocked(conn) + } + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) } + nl.record.physConns[conn] = cnts } -func makeRouteMaps(cfg *router.Config) (addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool) { - addrs = make(map[netip.Addr]bool) - for _, p := range cfg.LocalAddrs { - if p.IsSingleIP() { - addrs[p.Addr()] = true +// addNewPhysConnLocked adds the first insertion of a physical connection. +// The [Logger.mu] must be held. +func (nl *Logger) addNewPhysConnLocked(c netlogtype.Connection) { + // Check whether this is the first insertion of the src node. + var srcNodeLen int + srcNode, srcSeen := nl.record.seenNodes[c.Src.Addr()] + if !srcSeen { + srcNode = nl.allNodes[c.Src.Addr()] + if srcNode.Valid() { + srcNodeLen = srcNode.jsonLen() } } - prefixes = make(map[netip.Prefix]bool) + + // Check whether the additional [netlogtype.ConnectionCounts] + // and [netlogtype.Node] information would exceed [maxLogSize]. + if nl.recordLen+netlogtype.MaxConnectionCountsJSONSize+srcNodeLen > maxLogSize { + nl.flushRecordLocked() + nl.initRecordLocked() + } + + // Insert newly seen src and/or dst nodes. + if !srcSeen && srcNode.Valid() { + nl.record.seenNodes[c.Src.Addr()] = srcNode + } + nl.recordLen += netlogtype.MaxConnectionCountsJSONSize + srcNodeLen +} + +// initRecordLocked initialize the current record if uninitialized. +// The [Logger.mu] must be held. +func (nl *Logger) initRecordLocked() { + if nl.recordLen != 0 { + return + } + nl.record = record{ + selfNode: nl.selfNode, + start: time.Now().UTC(), + seenNodes: make(map[netip.Addr]nodeUser), + virtConns: make(map[netlogtype.Connection]countsType), + physConns: make(map[netlogtype.Connection]netlogtype.Counts), + } + nl.recordLen = netlogtype.MinMessageJSONSize + nl.selfNode.jsonLen() + + // Start a time to auto-flush the record. + // Avoid tickers since continually waking up a goroutine + // is expensive on battery powered devices. + nl.flushTimer = time.AfterFunc(pollPeriod, func() { + nl.mu.Lock() + defer nl.mu.Unlock() + if !nl.record.start.IsZero() && time.Since(nl.record.start) > pollPeriod/2 { + nl.flushRecordLocked() + } + }) +} + +// flushRecordLocked flushes the current record if initialized. +// The [Logger.mu] must be held. +func (nl *Logger) flushRecordLocked() { + if nl.recordLen == 0 { + return + } + nl.record.end = time.Now().UTC() + if nl.recordsChan != nil { + select { + case nl.recordsChan <- nl.record: + default: + if nl.logf != nil { + nl.logf("netlog: dropped record due to processing backlog") + } + } + } + if nl.flushTimer != nil { + nl.flushTimer.Stop() + nl.flushTimer = nil + } + nl.record = record{} + nl.recordLen = 0 +} + +func makeNodeMaps(nm *netmap.NetworkMap) (selfNode nodeUser, allNodes map[netip.Addr]nodeUser) { + if nm == nil { + return + } + allNodes = make(map[netip.Addr]nodeUser) + if nm.SelfNode.Valid() { + selfNode = nodeUser{nm.SelfNode, nm.UserProfiles[nm.SelfNode.User()]} + for _, addr := range nm.SelfNode.Addresses().All() { + if addr.IsSingleIP() { + allNodes[addr.Addr()] = selfNode + } + } + } + for _, peer := range nm.Peers { + if peer.Valid() { + for _, addr := range peer.Addresses().All() { + if addr.IsSingleIP() { + allNodes[addr.Addr()] = nodeUser{peer, nm.UserProfiles[peer.User()]} + } + } + } + } + return selfNode, allNodes +} + +// ReconfigNetworkMap configures the network logger with an updated netmap. +func (nl *Logger) ReconfigNetworkMap(nm *netmap.NetworkMap) { + selfNode, allNodes := makeNodeMaps(nm) // avoid holding lock while making maps + nl.mu.Lock() + nl.selfNode, nl.allNodes = selfNode, allNodes + nl.mu.Unlock() +} + +func makeRouteMaps(cfg *router.Config) (addrs set.Set[netip.Addr], prefixes []netip.Prefix) { + addrs = make(set.Set[netip.Addr]) insertPrefixes := func(rs []netip.Prefix) { for _, p := range rs { if p.IsSingleIP() { - addrs[p.Addr()] = true + addrs.Add(p.Addr()) } else { - prefixes[p] = true + prefixes = append(prefixes, p) } } } + insertPrefixes(cfg.LocalAddrs) insertPrefixes(cfg.Routes) insertPrefixes(cfg.SubnetRoutes) return addrs, prefixes @@ -235,11 +460,25 @@ func makeRouteMaps(cfg *router.Config) (addrs map[netip.Addr]bool, prefixes map[ // The cfg is used to classify the types of connections captured by // the tun Device passed to Startup. func (nl *Logger) ReconfigRoutes(cfg *router.Config) { + addrs, prefixes := makeRouteMaps(cfg) // avoid holding lock while making maps nl.mu.Lock() - defer nl.mu.Unlock() - // TODO(joetsai): There is a race where deleted routes are not known at - // the time of extraction. We need to keep old routes around for a bit. - nl.addrs, nl.prefixes = makeRouteMaps(cfg) + nl.routeAddrs, nl.routePrefixes = addrs, prefixes + nl.mu.Unlock() +} + +// withinRoutesLocked reports whether a is within the configured routes, +// which should only contain Tailscale addresses and subnet routes. +// The [Logger.mu] must be held. +func (nl *Logger) withinRoutesLocked(a netip.Addr) bool { + if nl.routeAddrs.Contains(a) { + return true + } + for _, p := range nl.routePrefixes { + if p.Contains(a) && p.Bits() > 0 { + return true + } + } + return false } // Shutdown shuts down the network logger. @@ -248,26 +487,8 @@ func (nl *Logger) ReconfigRoutes(cfg *router.Config) { func (nl *Logger) Shutdown(ctx context.Context) error { nl.mu.Lock() defer nl.mu.Unlock() - if nl.logger == nil { + if nl.shutdownLocked == nil { return nil } - - // Shutdown in reverse order of Startup. - // Do not hold lock while shutting down since this may flush one last time. - nl.mu.Unlock() - nl.sock.SetConnectionCounter(nil) - nl.tun.SetConnectionCounter(nil) - err1 := nl.stats.Shutdown(ctx) - err2 := nl.logger.Shutdown(ctx) - nl.mu.Lock() - - // Purge state. - nl.logger = nil - nl.stats = nil - nl.tun = nil - nl.sock = nil - nl.addrs = nil - nl.prefixes = nil - - return errors.Join(err1, err2) + return nl.shutdownLocked(ctx) } diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go index 43209df919ace..03610a1ef017a 100644 --- a/wgengine/netlog/netlog_omit.go +++ b/wgengine/netlog/netlog_omit.go @@ -7,7 +7,8 @@ package netlog type Logger struct{} -func (*Logger) Startup(...any) error { return nil } -func (*Logger) Running() bool { return false } -func (*Logger) Shutdown(any) error { return nil } -func (*Logger) ReconfigRoutes(any) {} +func (*Logger) Startup(...any) error { return nil } +func (*Logger) Running() bool { return false } +func (*Logger) Shutdown(any) error { return nil } +func (*Logger) ReconfigNetworkMap(any) {} +func (*Logger) ReconfigRoutes(any) {} diff --git a/wgengine/netlog/netlog_test.go b/wgengine/netlog/netlog_test.go new file mode 100644 index 0000000000000..ed9f672bfb63d --- /dev/null +++ b/wgengine/netlog/netlog_test.go @@ -0,0 +1,236 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "encoding/binary" + "math/rand/v2" + "net/netip" + "sync" + "testing" + "testing/synctest" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tailcfg" + "tailscale.com/types/bools" + "tailscale.com/types/ipproto" + "tailscale.com/types/netlogtype" + "tailscale.com/types/netmap" + "tailscale.com/wgengine/router" +) + +func TestEmbedNodeInfo(t *testing.T) { + // Initialize the logger with a particular view of the netmap. + var logger Logger + logger.ReconfigNetworkMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: "n123456CNTL", + ID: 123456, + Name: "test.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:foo", "tag:bar"}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + StableID: "n123457CNTL", + ID: 123457, + Name: "peer1.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.4")}, + Tags: []string{"tag:peer"}, + }).View(), + (&tailcfg.Node{ + StableID: "n123458CNTL", + ID: 123458, + Name: "peer2.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.5")}, + User: 54321, + }).View(), + }, + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + 54321: (&tailcfg.UserProfile{ID: 54321, LoginName: "peer@example.com"}).View(), + }, + }) + logger.ReconfigRoutes(&router.Config{ + SubnetRoutes: []netip.Prefix{ + prefix("172.16.1.1/16"), + prefix("192.168.1.1/24"), + }, + }) + + // Update the counters for a few connections. + var group sync.WaitGroup + defer group.Wait() + conns := []struct { + virt bool + proto ipproto.Proto + src, dst netip.AddrPort + txP, txB, rxP, rxB int + }{ + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("100.1.2.4:1812"), 88, 278, 34, 887}, + {true, 0x6, addrPort("100.1.2.3:443"), addrPort("100.1.2.5:1742"), 96, 635, 23, 790}, + {true, 0x6, addrPort("100.1.2.3:443"), addrPort("100.1.2.6:1175"), 48, 94, 86, 618}, // unknown peer (in Tailscale IP space, but not a known peer) + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("192.168.1.241:713"), 43, 154, 66, 883}, + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("192.168.2.241:713"), 43, 154, 66, 883}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("172.16.5.18:713"), 7, 243, 40, 59}, + {true, 0x6, addrPort("100.1.2.3:80"), addrPort("172.20.5.18:713"), 61, 753, 42, 492}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("192.168.1.241:713"), addrPort("100.1.2.3:80"), 43, 154, 66, 883}, + {true, 0x6, addrPort("192.168.2.241:713"), addrPort("100.1.2.3:80"), 43, 154, 66, 883}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("172.16.5.18:713"), addrPort("100.1.2.3:80"), 7, 243, 40, 59}, + {true, 0x6, addrPort("172.20.5.18:713"), addrPort("100.1.2.3:80"), 61, 753, 42, 492}, // not in the subnet, must be exit traffic + {true, 0x6, addrPort("14.255.192.128:39230"), addrPort("243.42.106.193:48206"), 81, 791, 79, 316}, // unknown connection + {false, 0x6, addrPort("100.1.2.4:0"), addrPort("35.92.180.165:9743"), 63, 136, 61, 409}, // physical traffic with peer1 + {false, 0x6, addrPort("100.1.2.5:0"), addrPort("131.19.35.17:9743"), 88, 452, 2, 716}, // physical traffic with peer2 + } + for range 10 { + for _, conn := range conns { + update := bools.IfElse(conn.virt, logger.updateVirtConn, logger.updatePhysConn) + group.Go(func() { update(conn.proto, conn.src, conn.dst, conn.txP, conn.txB, false) }) + group.Go(func() { update(conn.proto, conn.src, conn.dst, conn.rxP, conn.rxB, true) }) + } + } + group.Wait() + + // Verify that the counters match. + got := logger.record.toMessage(false, false) + got.Start = time.Time{} // avoid flakiness + want := netlogtype.Message{ + NodeID: "n123456CNTL", + SrcNode: netlogtype.Node{ + NodeID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.3")}, + Tags: []string{"tag:bar", "tag:foo"}, + }, + DstNodes: []netlogtype.Node{{ + NodeID: "n123457CNTL", + Name: "peer1.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.4")}, + Tags: []string{"tag:peer"}, + }, { + NodeID: "n123458CNTL", + Name: "peer2.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.5")}, + User: "peer@example.com", + }}, + VirtualTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.3:80", "100.1.2.4:1812"), Counts: counts(880, 2780, 340, 8870)}, + {Connection: conn(0x6, "100.1.2.3:443", "100.1.2.5:1742"), Counts: counts(960, 6350, 230, 7900)}, + }, + SubnetTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.3:80", "172.16.5.18:713"), Counts: counts(70, 2430, 400, 590)}, + {Connection: conn(0x6, "100.1.2.3:80", "192.168.1.241:713"), Counts: counts(430, 1540, 660, 8830)}, + {Connection: conn(0x6, "172.16.5.18:713", "100.1.2.3:80"), Counts: counts(70, 2430, 400, 590)}, + {Connection: conn(0x6, "192.168.1.241:713", "100.1.2.3:80"), Counts: counts(430, 1540, 660, 8830)}, + }, + ExitTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "14.255.192.128:39230", "243.42.106.193:48206"), Counts: counts(810, 7910, 790, 3160)}, + {Connection: conn(0x6, "100.1.2.3:80", "172.20.5.18:713"), Counts: counts(610, 7530, 420, 4920)}, + {Connection: conn(0x6, "100.1.2.3:80", "192.168.2.241:713"), Counts: counts(430, 1540, 660, 8830)}, + {Connection: conn(0x6, "100.1.2.3:443", "100.1.2.6:1175"), Counts: counts(480, 940, 860, 6180)}, + {Connection: conn(0x6, "172.20.5.18:713", "100.1.2.3:80"), Counts: counts(610, 7530, 420, 4920)}, + {Connection: conn(0x6, "192.168.2.241:713", "100.1.2.3:80"), Counts: counts(430, 1540, 660, 8830)}, + }, + PhysicalTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "100.1.2.4:0", "35.92.180.165:9743"), Counts: counts(630, 1360, 610, 4090)}, + {Connection: conn(0x6, "100.1.2.5:0", "131.19.35.17:9743"), Counts: counts(880, 4520, 20, 7160)}, + }, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("Message (-got +want):\n%s", d) + } +} + +func TestUpdateRace(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + go func(recordsChan chan record) { + for range recordsChan { + } + }(logger.recordsChan) + + var group sync.WaitGroup + defer group.Wait() + for i := range 1000 { + group.Go(func() { + src, dst := randAddrPort(), randAddrPort() + for j := range 1000 { + if i%2 == 0 { + logger.updateVirtConn(0x1, src, dst, rand.IntN(10), rand.IntN(1000), j%2 == 0) + } else { + logger.updatePhysConn(0x1, src, dst, rand.IntN(10), rand.IntN(1000), j%2 == 0) + } + } + }) + group.Go(func() { + for range 1000 { + logger.ReconfigNetworkMap(new(netmap.NetworkMap)) + } + }) + group.Go(func() { + for range 1000 { + logger.ReconfigRoutes(new(router.Config)) + } + }) + } + + group.Wait() + logger.mu.Lock() + close(logger.recordsChan) + logger.mu.Unlock() +} + +func randAddrPort() netip.AddrPort { + var b [4]uint8 + binary.LittleEndian.PutUint32(b[:], rand.Uint32()) + return netip.AddrPortFrom(netip.AddrFrom4(b), uint16(rand.Uint32())) +} + +func TestAutoFlushMaxConns(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + for i := 0; len(logger.recordsChan) == 0; i++ { + logger.updateVirtConn(0, netip.AddrPortFrom(netip.Addr{}, uint16(i)), netip.AddrPort{}, 1, 1, false) + } + b, _ := jsonv2.Marshal(logger.recordsChan) + if len(b) > maxLogSize { + t.Errorf("len(Message) = %v, want <= %d", len(b), maxLogSize) + } +} + +func TestAutoFlushTimeout(t *testing.T) { + var logger Logger + logger.recordsChan = make(chan record, 1) + synctest.Test(t, func(t *testing.T) { + logger.updateVirtConn(0, netip.AddrPort{}, netip.AddrPort{}, 1, 1, false) + time.Sleep(pollPeriod) + }) + rec := <-logger.recordsChan + if d := rec.end.Sub(rec.start); d != pollPeriod { + t.Errorf("window = %v, want %v", d, pollPeriod) + } + if len(rec.virtConns) != 1 { + t.Errorf("len(virtConns) = %d, want 1", len(rec.virtConns)) + } +} + +func BenchmarkUpdateSameConn(b *testing.B) { + var logger Logger + b.ReportAllocs() + for range b.N { + logger.updateVirtConn(0, netip.AddrPort{}, netip.AddrPort{}, 1, 1, false) + } +} + +func BenchmarkUpdateNewConns(b *testing.B) { + var logger Logger + b.ReportAllocs() + for i := range b.N { + logger.updateVirtConn(0, netip.AddrPortFrom(netip.Addr{}, uint16(i)), netip.AddrPort{}, 1, 1, false) + } +} diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go new file mode 100644 index 0000000000000..b8db26fc59029 --- /dev/null +++ b/wgengine/netlog/record.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "cmp" + "net/netip" + "slices" + "time" + "unicode/utf8" + + "tailscale.com/tailcfg" + "tailscale.com/types/netlogtype" + "tailscale.com/util/set" +) + +// maxLogSize is the maximum number of bytes for a log message. +const maxLogSize = 256 << 10 + +// record is the in-memory representation of a [netlogtype.Message]. +// It uses maps to efficiently look-up addresses and connections. +// In contrast, [netlogtype.Message] is designed to be JSON serializable, +// where complex keys types are not well support in JSON objects. +type record struct { + selfNode nodeUser + + start time.Time + end time.Time + + seenNodes map[netip.Addr]nodeUser + + virtConns map[netlogtype.Connection]countsType + physConns map[netlogtype.Connection]netlogtype.Counts +} + +// nodeUser is a node with additional user profile information. +type nodeUser struct { + tailcfg.NodeView + user tailcfg.UserProfileView // UserProfileView for NodeView.User +} + +// countsType is a counts with classification information about the connection. +type countsType struct { + netlogtype.Counts + connType connType +} + +type connType uint8 + +const ( + unknownTraffic connType = iota + virtualTraffic + subnetTraffic + exitTraffic +) + +// toMessage converts a [record] into a [netlogtype.Message]. +func (r record) toMessage(excludeNodeInfo, anonymizeExitTraffic bool) netlogtype.Message { + if !r.selfNode.Valid() { + return netlogtype.Message{} + } + + m := netlogtype.Message{ + NodeID: r.selfNode.StableID(), + Start: r.start.UTC(), + End: r.end.UTC(), + } + + // Convert node fields. + if !excludeNodeInfo { + m.SrcNode = r.selfNode.toNode() + seenIDs := set.Of(r.selfNode.ID()) + for _, node := range r.seenNodes { + if _, ok := seenIDs[node.ID()]; !ok && node.Valid() { + m.DstNodes = append(m.DstNodes, node.toNode()) + seenIDs.Add(node.ID()) + } + } + slices.SortFunc(m.DstNodes, func(x, y netlogtype.Node) int { + return cmp.Compare(x.NodeID, y.NodeID) + }) + } + + // Converter traffic fields. + anonymizedExitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) + for conn, cnts := range r.virtConns { + switch cnts.connType { + case virtualTraffic: + m.VirtualTraffic = append(m.VirtualTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + case subnetTraffic: + m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + default: + if anonymizeExitTraffic { + conn = netlogtype.Connection{ // scrub the IP protocol type + Src: netip.AddrPortFrom(conn.Src.Addr(), 0), // scrub the port number + Dst: netip.AddrPortFrom(conn.Dst.Addr(), 0), // scrub the port number + } + if !r.seenNodes[conn.Src.Addr()].Valid() { + conn.Src = netip.AddrPort{} // not a Tailscale node, so scrub the address + } + if !r.seenNodes[conn.Dst.Addr()].Valid() { + conn.Dst = netip.AddrPort{} // not a Tailscale node, so scrub the address + } + anonymizedExitTraffic[conn] = anonymizedExitTraffic[conn].Add(cnts.Counts) + continue + } + m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts.Counts}) + } + } + for conn, cnts := range anonymizedExitTraffic { + m.ExitTraffic = append(m.ExitTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + } + for conn, cnts := range r.physConns { + m.PhysicalTraffic = append(m.PhysicalTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts}) + } + + // Sort the connections for deterministic results. + slices.SortFunc(m.VirtualTraffic, compareConnCnts) + slices.SortFunc(m.SubnetTraffic, compareConnCnts) + slices.SortFunc(m.ExitTraffic, compareConnCnts) + slices.SortFunc(m.PhysicalTraffic, compareConnCnts) + + return m +} + +func compareConnCnts(x, y netlogtype.ConnectionCounts) int { + return cmp.Or( + netip.AddrPort.Compare(x.Src, y.Src), + netip.AddrPort.Compare(x.Dst, y.Dst), + cmp.Compare(x.Proto, y.Proto)) +} + +// jsonLen computes an upper-bound on the size of the JSON representation. +func (nu nodeUser) jsonLen() int { + if !nu.Valid() { + return len(`{"nodeId":""}`) + } + n := netlogtype.MinNodeJSONSize + jsonQuotedLen(nu.Name()) + if nu.Tags().Len() > 0 { + for _, tag := range nu.Tags().All() { + n += jsonQuotedLen(tag) + len(",") + } + } else if nu.user.Valid() && nu.user.ID() == nu.User() { + n += jsonQuotedLen(nu.user.LoginName()) + } + return n +} + +// toNode converts the [nodeUser] into a [netlogtype.Node]. +func (nu nodeUser) toNode() netlogtype.Node { + if !nu.Valid() { + return netlogtype.Node{} + } + n := netlogtype.Node{NodeID: nu.StableID(), Name: nu.Name()} + var ipv4, ipv6 netip.Addr + for _, addr := range nu.Addresses().All() { + switch { + case addr.IsSingleIP() && addr.Addr().Is4(): + ipv4 = addr.Addr() + case addr.IsSingleIP() && addr.Addr().Is6(): + ipv6 = addr.Addr() + } + } + n.Addresses = []netip.Addr{ipv4, ipv6} + n.Addresses = slices.DeleteFunc(n.Addresses, func(a netip.Addr) bool { return !a.IsValid() }) + if nu.Tags().Len() > 0 { + n.Tags = nu.Tags().AsSlice() + slices.Sort(n.Tags) + n.Tags = slices.Compact(n.Tags) + } else if nu.user.Valid() && nu.user.ID() == nu.User() { + n.User = nu.user.LoginName() + } + return n +} + +// jsonQuotedLen computes the length of the JSON serialization of s +// according to [jsontext.AppendQuote]. +func jsonQuotedLen(s string) int { + n := len(`"`) + len(s) + len(`"`) + for i, r := range s { + switch { + case r == '\b', r == '\t', r == '\n', r == '\f', r == '\r', r == '"', r == '\\': + n += len(`\X`) - 1 + case r < ' ': + n += len(`\uXXXX`) - 1 + case r == utf8.RuneError: + if _, m := utf8.DecodeRuneInString(s[i:]); m == 1 { // exactly an invalid byte + n += len("�") - 1 + } + } + } + return n +} diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go new file mode 100644 index 0000000000000..d3ab8b86c63d0 --- /dev/null +++ b/wgengine/netlog/record_test.go @@ -0,0 +1,255 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog + +import ( + "net/netip" + "testing" + "time" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" + "tailscale.com/types/netlogtype" + "tailscale.com/util/must" +) + +func addr(s string) netip.Addr { + if s == "" { + return netip.Addr{} + } + return must.Get(netip.ParseAddr(s)) +} +func addrPort(s string) netip.AddrPort { + if s == "" { + return netip.AddrPort{} + } + return must.Get(netip.ParseAddrPort(s)) +} +func prefix(s string) netip.Prefix { + if p, err := netip.ParsePrefix(s); err == nil { + return p + } + a := addr(s) + return netip.PrefixFrom(a, a.BitLen()) +} + +func conn(proto ipproto.Proto, src, dst string) netlogtype.Connection { + return netlogtype.Connection{Proto: proto, Src: addrPort(src), Dst: addrPort(dst)} +} + +func counts(txP, txB, rxP, rxB uint64) netlogtype.Counts { + return netlogtype.Counts{TxPackets: txP, TxBytes: txB, RxPackets: rxP, RxBytes: rxB} +} + +func TestToMessage(t *testing.T) { + rec := record{ + selfNode: nodeUser{NodeView: (&tailcfg.Node{ + ID: 123456, + StableID: "n123456CNTL", + Name: "src.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:src"}, + }).View()}, + start: time.Now(), + end: time.Now().Add(5 * time.Second), + + seenNodes: map[netip.Addr]nodeUser{ + addr("100.1.2.4"): {NodeView: (&tailcfg.Node{ + ID: 123457, + StableID: "n123457CNTL", + Name: "dst1.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.4")}, + Tags: []string{"tag:dst1"}, + }).View()}, + addr("100.1.2.5"): {NodeView: (&tailcfg.Node{ + ID: 123458, + StableID: "n123458CNTL", + Name: "dst2.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.5")}, + Tags: []string{"tag:dst2"}, + }).View()}, + }, + + virtConns: map[netlogtype.Connection]countsType{ + conn(0x1, "100.1.2.3:1234", "100.1.2.4:80"): {Counts: counts(12, 34, 56, 78), connType: virtualTraffic}, + conn(0x1, "100.1.2.3:1234", "100.1.2.5:80"): {Counts: counts(23, 45, 78, 790), connType: virtualTraffic}, + conn(0x6, "172.16.1.1:80", "100.1.2.4:1234"): {Counts: counts(91, 54, 723, 621), connType: subnetTraffic}, + conn(0x6, "172.16.1.2:443", "100.1.2.5:1234"): {Counts: counts(42, 813, 3, 1823), connType: subnetTraffic}, + conn(0x6, "172.16.1.3:80", "100.1.2.6:1234"): {Counts: counts(34, 52, 78, 790), connType: subnetTraffic}, + conn(0x6, "100.1.2.3:1234", "12.34.56.78:80"): {Counts: counts(11, 110, 10, 100), connType: exitTraffic}, + conn(0x6, "100.1.2.4:1234", "23.34.56.78:80"): {Counts: counts(423, 1, 6, 123), connType: exitTraffic}, + conn(0x6, "100.1.2.4:1234", "23.34.56.78:443"): {Counts: counts(22, 220, 20, 200), connType: exitTraffic}, + conn(0x6, "100.1.2.5:1234", "45.34.56.78:80"): {Counts: counts(33, 330, 30, 300), connType: exitTraffic}, + conn(0x6, "100.1.2.6:1234", "67.34.56.78:80"): {Counts: counts(44, 440, 40, 400), connType: exitTraffic}, + conn(0x6, "42.54.72.42:555", "18.42.7.1:777"): {Counts: counts(44, 440, 40, 400)}, + }, + + physConns: map[netlogtype.Connection]netlogtype.Counts{ + conn(0, "100.1.2.4:0", "4.3.2.1:1234"): counts(12, 34, 56, 78), + conn(0, "100.1.2.5:0", "4.3.2.10:1234"): counts(78, 56, 34, 12), + }, + } + rec.seenNodes[rec.selfNode.toNode().Addresses[0]] = rec.selfNode + + got := rec.toMessage(false, false) + want := netlogtype.Message{ + NodeID: rec.selfNode.StableID(), + Start: rec.start, + End: rec.end, + SrcNode: rec.selfNode.toNode(), + DstNodes: []netlogtype.Node{ + rec.seenNodes[addr("100.1.2.4")].toNode(), + rec.seenNodes[addr("100.1.2.5")].toNode(), + }, + VirtualTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x1, "100.1.2.3:1234", "100.1.2.4:80"), Counts: counts(12, 34, 56, 78)}, + {Connection: conn(0x1, "100.1.2.3:1234", "100.1.2.5:80"), Counts: counts(23, 45, 78, 790)}, + }, + SubnetTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "172.16.1.1:80", "100.1.2.4:1234"), Counts: counts(91, 54, 723, 621)}, + {Connection: conn(0x6, "172.16.1.2:443", "100.1.2.5:1234"), Counts: counts(42, 813, 3, 1823)}, + {Connection: conn(0x6, "172.16.1.3:80", "100.1.2.6:1234"), Counts: counts(34, 52, 78, 790)}, + }, + ExitTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0x6, "42.54.72.42:555", "18.42.7.1:777"), Counts: counts(44, 440, 40, 400)}, + {Connection: conn(0x6, "100.1.2.3:1234", "12.34.56.78:80"), Counts: counts(11, 110, 10, 100)}, + {Connection: conn(0x6, "100.1.2.4:1234", "23.34.56.78:80"), Counts: counts(423, 1, 6, 123)}, + {Connection: conn(0x6, "100.1.2.4:1234", "23.34.56.78:443"), Counts: counts(22, 220, 20, 200)}, + {Connection: conn(0x6, "100.1.2.5:1234", "45.34.56.78:80"), Counts: counts(33, 330, 30, 300)}, + {Connection: conn(0x6, "100.1.2.6:1234", "67.34.56.78:80"), Counts: counts(44, 440, 40, 400)}, + }, + PhysicalTraffic: []netlogtype.ConnectionCounts{ + {Connection: conn(0, "100.1.2.4:0", "4.3.2.1:1234"), Counts: counts(12, 34, 56, 78)}, + {Connection: conn(0, "100.1.2.5:0", "4.3.2.10:1234"), Counts: counts(78, 56, 34, 12)}, + }, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(false, false) mismatch (-got +want):\n%s", d) + } + + got = rec.toMessage(true, false) + want.SrcNode = netlogtype.Node{} + want.DstNodes = nil + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(true, false) mismatch (-got +want):\n%s", d) + } + + got = rec.toMessage(true, true) + want.ExitTraffic = []netlogtype.ConnectionCounts{ + {Connection: conn(0, "", ""), Counts: counts(44+44, 440+440, 40+40, 400+400)}, + {Connection: conn(0, "100.1.2.3:0", ""), Counts: counts(11, 110, 10, 100)}, + {Connection: conn(0, "100.1.2.4:0", ""), Counts: counts(423+22, 1+220, 6+20, 123+200)}, + {Connection: conn(0, "100.1.2.5:0", ""), Counts: counts(33, 330, 30, 300)}, + } + if d := cmp.Diff(got, want, cmpopts.EquateComparable(netip.Addr{}, netip.AddrPort{})); d != "" { + t.Errorf("toMessage(true, true) mismatch (-got +want):\n%s", d) + } +} + +func TestToNode(t *testing.T) { + tests := []struct { + node *tailcfg.Node + user *tailcfg.UserProfile + want netlogtype.Node + }{ + {}, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Tags: []string{"tag:dupe", "tag:test", "tag:dupe"}, + User: 12345, // should be ignored + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Name: "test.tail123456.ts.net", + Addresses: []netip.Addr{addr("100.1.2.3")}, + Tags: []string{"tag:dupe", "tag:test"}, + }, + }, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + User: 12345, + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Addresses: []netip.Addr{addr("100.1.2.3")}, + }, + }, + { + node: &tailcfg.Node{ + StableID: "n123456CNTL", + Addresses: []netip.Prefix{prefix("100.1.2.3")}, + User: 12345, + }, + user: &tailcfg.UserProfile{ + ID: 12345, + LoginName: "user@domain", + }, + want: netlogtype.Node{ + NodeID: "n123456CNTL", + Addresses: []netip.Addr{addr("100.1.2.3")}, + User: "user@domain", + }, + }, + } + for _, tt := range tests { + nu := nodeUser{tt.node.View(), tt.user.View()} + got := nu.toNode() + b := must.Get(jsonv2.Marshal(got)) + if len(b) > nu.jsonLen() { + t.Errorf("jsonLen = %v, want >= %d", nu.jsonLen(), len(b)) + } + if d := cmp.Diff(got, tt.want, cmpopts.EquateComparable(netip.Addr{})); d != "" { + t.Errorf("toNode mismatch (-got +want):\n%s", d) + } + } +} + +func FuzzQuotedLen(f *testing.F) { + for _, s := range quotedLenTestdata { + f.Add(s) + } + f.Fuzz(func(t *testing.T, s string) { + testQuotedLen(t, s) + }) +} + +func TestQuotedLen(t *testing.T) { + for _, s := range quotedLenTestdata { + testQuotedLen(t, s) + } +} + +var quotedLenTestdata = []string{ + "", // empty string + func() string { + b := make([]byte, 128) + for i := range b { + b[i] = byte(i) + } + return string(b) + }(), // all ASCII + "�", // replacement rune + "\xff", // invalid UTF-8 + "ʕ◔ϖ◔ʔ", // Unicode gopher +} + +func testQuotedLen(t *testing.T, in string) { + got := jsonQuotedLen(in) + b, _ := jsontext.AppendQuote(nil, in) + want := len(b) + if got != want { + t.Errorf("jsonQuotedLen(%q) = %v, want %v", in, got, want) + } +} diff --git a/wgengine/netlog/stats.go b/wgengine/netlog/stats.go deleted file mode 100644 index c06068803f125..0000000000000 --- a/wgengine/netlog/stats.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ts_omit_netlog && !ts_omit_logtail - -package netlog - -import ( - "context" - "net/netip" - "sync" - "time" - - "golang.org/x/sync/errgroup" - "tailscale.com/net/packet" - "tailscale.com/net/tsaddr" - "tailscale.com/types/ipproto" - "tailscale.com/types/netlogtype" -) - -// statistics maintains counters for every connection. -// All methods are safe for concurrent use. -// The zero value is ready for use. -type statistics struct { - maxConns int // immutable once set - - mu sync.Mutex - connCnts - - connCntsCh chan connCnts - shutdownCtx context.Context - shutdown context.CancelFunc - group errgroup.Group -} - -type connCnts struct { - start time.Time - end time.Time - virtual map[netlogtype.Connection]netlogtype.Counts - physical map[netlogtype.Connection]netlogtype.Counts -} - -// newStatistics creates a data structure for tracking connection statistics -// that periodically dumps the virtual and physical connection counts -// depending on whether the maxPeriod or maxConns is exceeded. -// The dump function is called from a single goroutine. -// Shutdown must be called to cleanup resources. -func newStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *statistics { - s := &statistics{maxConns: maxConns} - s.connCntsCh = make(chan connCnts, 256) - s.shutdownCtx, s.shutdown = context.WithCancel(context.Background()) - s.group.Go(func() error { - // TODO(joetsai): Using a ticker is problematic on mobile platforms - // where waking up a process every maxPeriod when there is no activity - // is a drain on battery life. Switch this instead to instead use - // a time.Timer that is triggered upon network activity. - ticker := new(time.Ticker) - if maxPeriod > 0 { - ticker = time.NewTicker(maxPeriod) - defer ticker.Stop() - } - - for { - var cc connCnts - select { - case cc = <-s.connCntsCh: - case <-ticker.C: - cc = s.extract() - case <-s.shutdownCtx.Done(): - cc = s.extract() - } - if len(cc.virtual)+len(cc.physical) > 0 && dump != nil { - dump(cc.start, cc.end, cc.virtual, cc.physical) - } - if s.shutdownCtx.Err() != nil { - return nil - } - } - }) - return s -} - -// UpdateTxVirtual updates the counters for a transmitted IP packet -// The source and destination of the packet directly correspond with -// the source and destination in netlogtype.Connection. -func (s *statistics) UpdateTxVirtual(b []byte) { - var p packet.Parsed - p.Decode(b) - s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) -} - -// UpdateRxVirtual updates the counters for a received IP packet. -// The source and destination of the packet are inverted with respect to -// the source and destination in netlogtype.Connection. -func (s *statistics) UpdateRxVirtual(b []byte) { - var p packet.Parsed - p.Decode(b) - s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) -} - -var ( - tailscaleServiceIPv4 = tsaddr.TailscaleServiceIP() - tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() -) - -func (s *statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { - // Network logging is defined as traffic between two Tailscale nodes. - // Traffic with the internal Tailscale service is not with another node - // and should not be logged. It also happens to be a high volume - // amount of discrete traffic flows (e.g., DNS lookups). - switch dst.Addr() { - case tailscaleServiceIPv4, tailscaleServiceIPv6: - return - } - - conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} - - s.mu.Lock() - defer s.mu.Unlock() - cnts, found := s.virtual[conn] - if !found && !s.preInsertConn() { - return - } - if receive { - cnts.RxPackets += uint64(packets) - cnts.RxBytes += uint64(bytes) - } else { - cnts.TxPackets += uint64(packets) - cnts.TxBytes += uint64(bytes) - } - s.virtual[conn] = cnts -} - -// UpdateTxPhysical updates the counters for zero or more transmitted wireguard packets. -// The src is always a Tailscale IP address, representing some remote peer. -// The dst is a remote IP address and port that corresponds -// with some physical peer backing the Tailscale IP address. -func (s *statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) -} - -// UpdateRxPhysical updates the counters for zero or more received wireguard packets. -// The src is always a Tailscale IP address, representing some remote peer. -// The dst is a remote IP address and port that corresponds -// with some physical peer backing the Tailscale IP address. -func (s *statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) -} - -func (s *statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { - conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} - - s.mu.Lock() - defer s.mu.Unlock() - cnts, found := s.physical[conn] - if !found && !s.preInsertConn() { - return - } - if receive { - cnts.RxPackets += uint64(packets) - cnts.RxBytes += uint64(bytes) - } else { - cnts.TxPackets += uint64(packets) - cnts.TxBytes += uint64(bytes) - } - s.physical[conn] = cnts -} - -// preInsertConn updates the maps to handle insertion of a new connection. -// It reports false if insertion is not allowed (i.e., after shutdown). -func (s *statistics) preInsertConn() bool { - // Check whether insertion of a new connection will exceed maxConns. - if len(s.virtual)+len(s.physical) == s.maxConns && s.maxConns > 0 { - // Extract the current statistics and send it to the serializer. - // Avoid blocking the network packet handling path. - select { - case s.connCntsCh <- s.extractLocked(): - default: - // TODO(joetsai): Log that we are dropping an entire connCounts. - } - } - - // Initialize the maps if nil. - if s.virtual == nil && s.physical == nil { - s.start = time.Now().UTC() - s.virtual = make(map[netlogtype.Connection]netlogtype.Counts) - s.physical = make(map[netlogtype.Connection]netlogtype.Counts) - } - - return s.shutdownCtx.Err() == nil -} - -func (s *statistics) extract() connCnts { - s.mu.Lock() - defer s.mu.Unlock() - return s.extractLocked() -} - -func (s *statistics) extractLocked() connCnts { - if len(s.virtual)+len(s.physical) == 0 { - return connCnts{} - } - s.end = time.Now().UTC() - cc := s.connCnts - s.connCnts = connCnts{} - return cc -} - -// TestExtract synchronously extracts the current network statistics map -// and resets the counters. This should only be used for testing purposes. -func (s *statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - cc := s.extract() - return cc.virtual, cc.physical -} - -// Shutdown performs a final flush of statistics. -// Statistics for any subsequent calls to Update will be dropped. -// It is safe to call Shutdown concurrently and repeatedly. -func (s *statistics) Shutdown(context.Context) error { - s.shutdown() - return s.group.Wait() -} diff --git a/wgengine/netlog/stats_test.go b/wgengine/netlog/stats_test.go deleted file mode 100644 index 6cf7eb9983817..0000000000000 --- a/wgengine/netlog/stats_test.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package netlog - -import ( - "context" - "encoding/binary" - "fmt" - "math/rand" - "net/netip" - "runtime" - "sync" - "testing" - "time" - - qt "github.com/frankban/quicktest" - "tailscale.com/cmd/testwrapper/flakytest" - "tailscale.com/types/ipproto" - "tailscale.com/types/netlogtype" -) - -func testPacketV4(proto ipproto.Proto, srcAddr, dstAddr [4]byte, srcPort, dstPort, size uint16) (out []byte) { - var ipHdr [20]byte - ipHdr[0] = 4<<4 | 5 - binary.BigEndian.PutUint16(ipHdr[2:], size) - ipHdr[9] = byte(proto) - *(*[4]byte)(ipHdr[12:]) = srcAddr - *(*[4]byte)(ipHdr[16:]) = dstAddr - out = append(out, ipHdr[:]...) - switch proto { - case ipproto.TCP: - var tcpHdr [20]byte - binary.BigEndian.PutUint16(tcpHdr[0:], srcPort) - binary.BigEndian.PutUint16(tcpHdr[2:], dstPort) - out = append(out, tcpHdr[:]...) - case ipproto.UDP: - var udpHdr [8]byte - binary.BigEndian.PutUint16(udpHdr[0:], srcPort) - binary.BigEndian.PutUint16(udpHdr[2:], dstPort) - out = append(out, udpHdr[:]...) - default: - panic(fmt.Sprintf("unknown proto: %d", proto)) - } - return append(out, make([]byte, int(size)-len(out))...) -} - -// TestInterval ensures that we receive at least one call to `dump` using only -// maxPeriod. -func TestInterval(t *testing.T) { - c := qt.New(t) - - const maxPeriod = 10 * time.Millisecond - const maxConns = 2048 - - gotDump := make(chan struct{}, 1) - stats := newStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { - select { - case gotDump <- struct{}{}: - default: - } - }) - defer stats.Shutdown(context.Background()) - - srcAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - dstAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - srcPort := uint16(rand.Intn(16)) - dstPort := uint16(rand.Intn(16)) - size := uint16(64 + rand.Intn(1024)) - p := testPacketV4(ipproto.TCP, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size) - stats.UpdateRxVirtual(p) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - select { - case <-ctx.Done(): - c.Fatal("didn't receive dump within context deadline") - case <-gotDump: - } -} - -func TestConcurrent(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7030") - c := qt.New(t) - - const maxPeriod = 10 * time.Millisecond - const maxConns = 10 - virtualAggregate := make(map[netlogtype.Connection]netlogtype.Counts) - stats := newStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { - c.Assert(start.IsZero(), qt.IsFalse) - c.Assert(end.IsZero(), qt.IsFalse) - c.Assert(end.Before(start), qt.IsFalse) - c.Assert(len(virtual) > 0 && len(virtual) <= maxConns, qt.IsTrue) - c.Assert(len(physical) == 0, qt.IsTrue) - for conn, cnts := range virtual { - virtualAggregate[conn] = virtualAggregate[conn].Add(cnts) - } - }) - defer stats.Shutdown(context.Background()) - var wants []map[netlogtype.Connection]netlogtype.Counts - gots := make([]map[netlogtype.Connection]netlogtype.Counts, runtime.NumCPU()) - var group sync.WaitGroup - for i := range gots { - group.Add(1) - go func(i int) { - defer group.Done() - gots[i] = make(map[netlogtype.Connection]netlogtype.Counts) - rn := rand.New(rand.NewSource(time.Now().UnixNano())) - var p []byte - var t netlogtype.Connection - for j := 0; j < 1000; j++ { - delay := rn.Intn(10000) - if p == nil || rn.Intn(64) == 0 { - proto := ipproto.TCP - if rn.Intn(2) == 0 { - proto = ipproto.UDP - } - srcAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - dstAddr := netip.AddrFrom4([4]byte{192, 168, 0, byte(rand.Intn(16))}) - srcPort := uint16(rand.Intn(16)) - dstPort := uint16(rand.Intn(16)) - size := uint16(64 + rand.Intn(1024)) - p = testPacketV4(proto, srcAddr.As4(), dstAddr.As4(), srcPort, dstPort, size) - t = netlogtype.Connection{Proto: proto, Src: netip.AddrPortFrom(srcAddr, srcPort), Dst: netip.AddrPortFrom(dstAddr, dstPort)} - } - t2 := t - receive := rn.Intn(2) == 0 - if receive { - t2.Src, t2.Dst = t2.Dst, t2.Src - } - - cnts := gots[i][t2] - if receive { - stats.UpdateRxVirtual(p) - cnts.RxPackets++ - cnts.RxBytes += uint64(len(p)) - } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(p)) - stats.UpdateTxVirtual(p) - } - gots[i][t2] = cnts - time.Sleep(time.Duration(rn.Intn(1 + delay))) - } - }(i) - } - group.Wait() - c.Assert(stats.Shutdown(context.Background()), qt.IsNil) - wants = append(wants, virtualAggregate) - - got := make(map[netlogtype.Connection]netlogtype.Counts) - want := make(map[netlogtype.Connection]netlogtype.Counts) - mergeMaps(got, gots...) - mergeMaps(want, wants...) - c.Assert(got, qt.DeepEquals, want) -} - -func mergeMaps(dst map[netlogtype.Connection]netlogtype.Counts, srcs ...map[netlogtype.Connection]netlogtype.Counts) { - for _, src := range srcs { - for conn, cnts := range src { - dst[conn] = dst[conn].Add(cnts) - } - } -} - -func Benchmark(b *testing.B) { - // TODO: Test IPv6 packets? - b.Run("SingleRoutine/SameConn", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - for j := 0; j < 1e3; j++ { - s.UpdateTxVirtual(p) - } - } - }) - b.Run("SingleRoutine/UniqueConns", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{}, [4]byte{}, 0, 0, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - for j := 0; j < 1e3; j++ { - binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination - s.UpdateTxVirtual(p) - } - } - }) - b.Run("MultiRoutine/SameConn", func(b *testing.B) { - p := testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 123, 456, 789) - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - var group sync.WaitGroup - for j := 0; j < runtime.NumCPU(); j++ { - group.Add(1) - go func() { - defer group.Done() - for k := 0; k < 1e3; k++ { - s.UpdateTxVirtual(p) - } - }() - } - group.Wait() - } - }) - b.Run("MultiRoutine/UniqueConns", func(b *testing.B) { - ps := make([][]byte, runtime.NumCPU()) - for i := range ps { - ps[i] = testPacketV4(ipproto.UDP, [4]byte{192, 168, 0, 1}, [4]byte{192, 168, 0, 2}, 0, 0, 789) - } - b.ResetTimer() - b.ReportAllocs() - for range b.N { - s := newStatistics(0, 0, nil) - var group sync.WaitGroup - for j := 0; j < runtime.NumCPU(); j++ { - group.Add(1) - go func(j int) { - defer group.Done() - p := ps[j] - j *= 1e3 - for k := 0; k < 1e3; k++ { - binary.BigEndian.PutUint32(p[20:], uint32(j+k)) // unique port combination - s.UpdateTxVirtual(p) - } - }(j) - } - group.Wait() - } - }) -} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 8856a3eaf4d11..619df655ccd44 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1055,7 +1055,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public()) - if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { + if err := e.networkLogger.Startup(e.logf, nm, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { e.logf("wgengine: Reconfig: error starting up network logger: %v", err) } e.networkLogger.ReconfigRoutes(routerCfg) @@ -1352,6 +1352,9 @@ func (e *userspaceEngine) SetNetworkMap(nm *netmap.NetworkMap) { e.mu.Lock() e.netMap = nm e.mu.Unlock() + if e.networkLogger.Running() { + e.networkLogger.ReconfigNetworkMap(nm) + } } func (e *userspaceEngine) UpdateStatus(sb *ipnstate.StatusBuilder) { From 9ac8105fda6de829b81280666dacaa1b9c093fe1 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 28 Oct 2025 14:53:59 -0700 Subject: [PATCH 0622/1093] cmd/jsontags: add static analyzer for incompatible `json` struct tags (#17670) This migrates an internal tool to open source so that we can run it on the tailscale.com module as well. This PR does not yet set up a CI to run this analyzer. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/jsontags/analyzer.go | 201 +++++++++++++++++++++++++++++++++++++++ cmd/jsontags/iszero.go | 75 +++++++++++++++ cmd/jsontags/report.go | 135 ++++++++++++++++++++++++++ 3 files changed, 411 insertions(+) create mode 100644 cmd/jsontags/analyzer.go create mode 100644 cmd/jsontags/iszero.go create mode 100644 cmd/jsontags/report.go diff --git a/cmd/jsontags/analyzer.go b/cmd/jsontags/analyzer.go new file mode 100644 index 0000000000000..d799b66cbb583 --- /dev/null +++ b/cmd/jsontags/analyzer.go @@ -0,0 +1,201 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsontags checks for incompatible usage of JSON struct tags. +package jsontags + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "jsonvet", + Doc: "check for incompatible usages of JSON struct tags", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // TODO: Report byte arrays fields without an explicit `format` tag option. + + inspect.Preorder([]ast.Node{(*ast.StructType)(nil)}, func(n ast.Node) { + structType, ok := pass.TypesInfo.Types[n.(*ast.StructType)].Type.(*types.Struct) + if !ok { + return // type information may be incomplete + } + for i := range structType.NumFields() { + fieldVar := structType.Field(i) + tag := reflect.StructTag(structType.Tag(i)).Get("json") + if tag == "" { + continue + } + var seenName, hasFormat bool + for opt := range strings.SplitSeq(tag, ",") { + if !seenName { + seenName = true + continue + } + switch opt { + case "omitempty": + // For bools, ints, uints, floats, strings, and interfaces, + // it is always safe to migrate from `omitempty` to `omitzero` + // so long as the type does not have an IsZero method or + // the IsZero method is identical to reflect.Value.IsZero. + // + // For pointers, it is only safe to migrate from `omitempty` to `omitzero` + // so long as the type does not have an IsZero method, regardless of + // whether the IsZero method is identical to reflect.Value.IsZero. + // + // For pointers, `omitempty` behaves identically on both v1 and v2 + // so long as the type does not implement a Marshal method that + // might serialize as an empty JSON value (i.e., null, "", [], or {}). + hasIsZero := hasIsZeroMethod(fieldVar.Type()) && !hasPureIsZeroMethod(fieldVar.Type()) + underType := fieldVar.Type().Underlying() + basic, isBasic := underType.(*types.Basic) + array, isArrayKind := underType.(*types.Array) + _, isMapKind := underType.(*types.Map) + _, isSliceKind := underType.(*types.Slice) + _, isPointerKind := underType.(*types.Pointer) + _, isInterfaceKind := underType.(*types.Interface) + supportedInV1 := isNumericKind(underType) || + isBasic && basic.Kind() == types.Bool || + isBasic && basic.Kind() == types.String || + isArrayKind && array.Len() == 0 || + isMapKind || isSliceKind || isPointerKind || isInterfaceKind + notSupportedInV2 := isNumericKind(underType) || + isBasic && basic.Kind() == types.Bool + switch { + case isMapKind, isSliceKind: + // This operates the same under both v1 and v2 so long as + // the map or slice type does not implement Marshal + // that could emit an empty JSON value for cases + // other than when the map or slice are empty. + // This is very rare. + case isString(fieldVar.Type()): + // This operates the same under both v1 and v2. + // These are safe to migrate to `omitzero`, + // but doing so is probably unnecessary churn. + // Note that this is only for a unnamed string type. + case !supportedInV1: + // This never worked in v1. Switching to `omitzero` + // may lead to unexpected behavior changes. + report(pass, structType, fieldVar, OmitEmptyUnsupportedInV1) + case notSupportedInV2: + // This does not work in v2. Switching to `omitzero` + // may lead to unexpected behavior changes. + report(pass, structType, fieldVar, OmitEmptyUnsupportedInV2) + case !hasIsZero: + // These are safe to migrate to `omitzero` such that + // it behaves identically under v1 and v2. + report(pass, structType, fieldVar, OmitEmptyShouldBeOmitZero) + case isPointerKind: + // This operates the same under both v1 and v2 so long as + // the pointer type does not implement Marshal that + // could emit an empty JSON value. + // For example, time.Time is safe since the zero value + // never marshals as an empty JSON string. + default: + // This is a non-pointer type with an IsZero method. + // If IsZero is not identical to reflect.Value.IsZero, + // omission may behave slightly differently when using + // `omitzero` instead of `omitempty`. + // Thus the finding uses the word "should". + report(pass, structType, fieldVar, OmitEmptyShouldBeOmitZeroButHasIsZero) + } + case "string": + if !isNumericKind(fieldVar.Type()) { + report(pass, structType, fieldVar, StringOnNonNumericKind) + } + default: + key, _, ok := strings.Cut(opt, ":") + hasFormat = key == "format" && ok + } + } + if !hasFormat && isTimeDuration(mayPointerElem(fieldVar.Type())) { + report(pass, structType, fieldVar, FormatMissingOnTimeDuration) + } + } + }) + return nil, nil +} + +// hasIsZeroMethod reports whether t has an IsZero method. +func hasIsZeroMethod(t types.Type) bool { + for method := range types.NewMethodSet(t).Methods() { + if fn, ok := method.Type().(*types.Signature); ok && method.Obj().Name() == "IsZero" { + if fn.Params().Len() == 0 && fn.Results().Len() == 1 && isBool(fn.Results().At(0).Type()) { + return true + } + } + } + return false +} + +// isBool reports whether t is a bool type. +func isBool(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Kind() == types.Bool +} + +// isString reports whether t is a string type. +func isString(t types.Type) bool { + basic, ok := t.(*types.Basic) + return ok && basic.Kind() == types.String +} + +// isTimeDuration reports whether t is a time.Duration type. +func isTimeDuration(t types.Type) bool { + return isNamed(t, "time", "Duration") +} + +// mayPointerElem returns the pointed-at type if t is a pointer, +// otherwise it returns t as-is. +func mayPointerElem(t types.Type) types.Type { + if pointer, ok := t.(*types.Pointer); ok { + return pointer.Elem() + } + return t +} + +// isNamed reports t is a named typed of the given path and name. +func isNamed(t types.Type, path, name string) bool { + gotPath, gotName := typeName(t) + return gotPath == path && gotName == name +} + +// typeName reports the pkgPath and name of the type. +// It recursively follows type aliases to get the underlying named type. +func typeName(t types.Type) (pkgPath, name string) { + if named, ok := types.Unalias(t).(*types.Named); ok { + obj := named.Obj() + if pkg := obj.Pkg(); pkg != nil { + return pkg.Path(), obj.Name() + } + return "", obj.Name() + } + return "", "" +} + +// isNumericKind reports whether t is a numeric kind. +func isNumericKind(t types.Type) bool { + if basic, ok := t.Underlying().(*types.Basic); ok { + switch basic.Kind() { + case types.Int, types.Int8, types.Int16, types.Int32, types.Int64: + case types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.Uintptr: + case types.Float32, types.Float64: + default: + return false + } + return true + } + return false +} diff --git a/cmd/jsontags/iszero.go b/cmd/jsontags/iszero.go new file mode 100644 index 0000000000000..77520d72c66f3 --- /dev/null +++ b/cmd/jsontags/iszero.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsontags + +import ( + "go/types" + "reflect" + + "tailscale.com/util/set" +) + +var _ = reflect.Value.IsZero // refer for hot-linking purposes + +var pureIsZeroMethods map[string]set.Set[string] + +// hasPureIsZeroMethod reports whether the IsZero method is truly +// identical to [reflect.Value.IsZero]. +func hasPureIsZeroMethod(t types.Type) bool { + // TODO: Detect this automatically by checking the method AST? + path, name := typeName(t) + return pureIsZeroMethods[path].Contains(name) +} + +// PureIsZeroMethodsInTailscaleModule is a list of known IsZero methods +// in the "tailscale.com" module that are pure. +var PureIsZeroMethodsInTailscaleModule = map[string]set.Set[string]{ + "tailscale.com/net/packet": set.Of( + "TailscaleRejectReason", + ), + "tailscale.com/tailcfg": set.Of( + "UserID", + "LoginID", + "NodeID", + "StableNodeID", + ), + "tailscale.com/tka": set.Of( + "AUMHash", + ), + "tailscale.com/types/geo": set.Of( + "Point", + ), + "tailscale.com/tstime/mono": set.Of( + "Time", + ), + "tailscale.com/types/key": set.Of( + "NLPrivate", + "NLPublic", + "DERPMesh", + "MachinePrivate", + "MachinePublic", + "ControlPrivate", + "DiscoPrivate", + "DiscoPublic", + "DiscoShared", + "HardwareAttestationPublic", + "ChallengePublic", + "NodePrivate", + "NodePublic", + ), + "tailscale.com/types/netlogtype": set.Of( + "Connection", + "Counts", + ), +} + +// RegisterPureIsZeroMethods specifies a list of pure IsZero methods +// where it is identical to calling [reflect.Value.IsZero] on the receiver. +// This is not strictly necessary, but allows for more accurate +// detection of improper use of `json` tags. +// +// This must be called at init and the input must not be mutated. +func RegisterPureIsZeroMethods(methods map[string]set.Set[string]) { + pureIsZeroMethods = methods +} diff --git a/cmd/jsontags/report.go b/cmd/jsontags/report.go new file mode 100644 index 0000000000000..f05788b61dd0d --- /dev/null +++ b/cmd/jsontags/report.go @@ -0,0 +1,135 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsontags + +import ( + "fmt" + "go/types" + "os" + "strings" + + _ "embed" + + "golang.org/x/tools/go/analysis" + "tailscale.com/util/set" +) + +var jsontagsAllowlist map[ReportKind]set.Set[string] + +// ParseAllowlist parses an allowlist of reports to ignore, +// which is a newline-delimited list of tuples separated by a tab, +// where each tuple is a [ReportKind] and a fully-qualified field name. +// +// For example: +// +// OmitEmptyUnsupportedInV1 tailscale.com/path/to/package.StructType.FieldName +// OmitEmptyUnsupportedInV1 tailscale.com/path/to/package.*.FieldName +// +// The struct type name may be "*" for anonymous struct types such +// as those declared within a function or as a type literal in a variable. +func ParseAllowlist(b []byte) map[ReportKind]set.Set[string] { + var allowlist map[ReportKind]set.Set[string] + for line := range strings.SplitSeq(string(b), "\n") { + kind, field, _ := strings.Cut(strings.TrimSpace(line), "\t") + if allowlist == nil { + allowlist = make(map[ReportKind]set.Set[string]) + } + fields := allowlist[ReportKind(kind)] + if fields == nil { + fields = make(set.Set[string]) + } + fields.Add(field) + allowlist[ReportKind(kind)] = fields + } + return allowlist +} + +// RegisterAllowlist registers an allowlist of reports to ignore, +// which is represented by a set of fully-qualified field names +// for each [ReportKind]. +// +// For example: +// +// { +// "OmitEmptyUnsupportedInV1": set.Of( +// "tailscale.com/path/to/package.StructType.FieldName", +// "tailscale.com/path/to/package.*.FieldName", +// ), +// } +// +// The struct type name may be "*" for anonymous struct types such +// as those declared within a function or as a type literal in a variable. +// +// This must be called at init and the input must not be mutated. +func RegisterAllowlist(allowlist map[ReportKind]set.Set[string]) { + jsontagsAllowlist = allowlist +} + +type ReportKind string + +const ( + OmitEmptyUnsupportedInV1 ReportKind = "OmitEmptyUnsupportedInV1" + OmitEmptyUnsupportedInV2 ReportKind = "OmitEmptyUnsupportedInV2" + OmitEmptyShouldBeOmitZero ReportKind = "OmitEmptyShouldBeOmitZero" + OmitEmptyShouldBeOmitZeroButHasIsZero ReportKind = "OmitEmptyShouldBeOmitZeroButHasIsZero" + StringOnNonNumericKind ReportKind = "StringOnNonNumericKind" + FormatMissingOnTimeDuration ReportKind = "FormatMissingOnTimeDuration" +) + +func (k ReportKind) message() string { + switch k { + case OmitEmptyUnsupportedInV1: + return "uses `omitempty` on an unspported type in json/v1; should probably use `omitzero` instead" + case OmitEmptyUnsupportedInV2: + return "uses `omitempty` on an unspported type in json/v2; should probably use `omitzero` instead" + case OmitEmptyShouldBeOmitZero: + return "should use `omitzero` instead of `omitempty`" + case OmitEmptyShouldBeOmitZeroButHasIsZero: + return "should probably use `omitzero` instead of `omitempty`" + case StringOnNonNumericKind: + return "must not use `string` on non-numeric types" + case FormatMissingOnTimeDuration: + return "must use an explicit `format` tag (e.g., `format:nano`) on a time.Duration type; see https://go.dev/issue/71631" + default: + return string(k) + } +} + +func report(pass *analysis.Pass, structType *types.Struct, fieldVar *types.Var, k ReportKind) { + // Lookup the full name of the struct type. + var fullName string + for _, name := range pass.Pkg.Scope().Names() { + if obj := pass.Pkg.Scope().Lookup(name); obj != nil { + if named, ok := obj.(*types.TypeName); ok { + if types.Identical(named.Type().Underlying(), structType) { + fullName = fmt.Sprintf("%v.%v.%v", named.Pkg().Path(), named.Name(), fieldVar.Name()) + break + } + } + } + } + if fullName == "" { + // Full name could not be found since this is probably an anonymous type + // or locally declared within a function scope. + // Use just the package path and field name instead. + // This is imprecise, but better than nothing. + fullName = fmt.Sprintf("%s.*.%s", fieldVar.Pkg().Path(), fieldVar.Name()) + } + if jsontagsAllowlist[k].Contains(fullName) { + return + } + + const appendAllowlist = "" + if appendAllowlist != "" { + if f, err := os.OpenFile(appendAllowlist, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0664); err == nil { + fmt.Fprintf(f, "%v\t%v\n", k, fullName) + f.Close() + } + } + + pass.Report(analysis.Diagnostic{ + Pos: fieldVar.Pos(), + Message: fmt.Sprintf("field %q %s", fieldVar.Name(), k.message()), + }) +} From 3c19addc21c55c83ea1f4180789784c91f2bf348 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 29 Oct 2025 11:00:17 +0000 Subject: [PATCH 0623/1093] tka: rename a mutex to `mu` instead of single-letter `l` See http://go/no-ell Updates tailscale/corp#33846 Signed-off-by: Alex Chan Change-Id: I88ecd9db847e04237c1feab9dfcede5ca1050cc5 --- tka/tailchonk.go | 26 +++++++++++++------------- tka/tailchonk_test.go | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 7750b062201ac..90f99966cde34 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -82,7 +82,7 @@ type CompactableChonk interface { // // Mem implements the Chonk interface. type Mem struct { - l sync.RWMutex + mu sync.RWMutex aums map[AUMHash]AUM parentIndex map[AUMHash][]AUMHash @@ -90,23 +90,23 @@ type Mem struct { } func (c *Mem) SetLastActiveAncestor(hash AUMHash) error { - c.l.Lock() - defer c.l.Unlock() + c.mu.Lock() + defer c.mu.Unlock() c.lastActiveAncestor = &hash return nil } func (c *Mem) LastActiveAncestor() (*AUMHash, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() return c.lastActiveAncestor, nil } // Heads returns AUMs for which there are no children. In other // words, the latest AUM in all chains (the 'leaf'). func (c *Mem) Heads() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() out := make([]AUM, 0, 6) // An AUM is a 'head' if there are no nodes for which it is the parent. @@ -120,8 +120,8 @@ func (c *Mem) Heads() ([]AUM, error) { // AUM returns the AUM with the specified digest. func (c *Mem) AUM(hash AUMHash) (AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() aum, ok := c.aums[hash] if !ok { return AUM{}, os.ErrNotExist @@ -132,8 +132,8 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) { // ChildAUMs returns all AUMs with a specified previous // AUM hash. func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() + c.mu.RLock() + defer c.mu.RUnlock() out := make([]AUM, 0, 6) for _, entry := range c.parentIndex[prevAUMHash] { out = append(out, c.aums[entry]) @@ -147,8 +147,8 @@ func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { // as the rest of the TKA implementation assumes that only // verified AUMs are stored. func (c *Mem) CommitVerifiedAUMs(updates []AUM) error { - c.l.Lock() - defer c.l.Unlock() + c.mu.Lock() + defer c.mu.Unlock() if c.aums == nil { c.parentIndex = make(map[AUMHash][]AUMHash, 64) c.aums = make(map[AUMHash]AUM, 64) diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 1a6bad4592053..7816d2dc158b5 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -496,7 +496,7 @@ func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { // Avoid go vet complaining about copying a lock value func cloneMem(src, dst *Mem) { - dst.l = sync.RWMutex{} + dst.mu = sync.RWMutex{} dst.aums = src.aums dst.parentIndex = src.parentIndex dst.lastActiveAncestor = src.lastActiveAncestor From 06b092388e4efb2226a264a03df14b778505278c Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 29 Oct 2025 08:37:19 -0700 Subject: [PATCH 0624/1093] ipn/ipnlocal: do not stall event processing for appc route updates (#17663) A follow-up to #17411. Put AppConnector events into a task queue, as they may take some time to process. Ensure that the queue is stopped at shutdown so that cleanup will remain orderly. Because events are delivered on a separate goroutine, slow processing of an event does not cause an immediate problem; however, a subscriber that blocks for a long time will push back on the bus as a whole. See https://godoc.org/tailscale.com/util/eventbus#hdr-Expected_subscriber_behavior for more discussion. Updates #17192 Updates #15160 Change-Id: Ib313cc68aec273daf2b1ad79538266c81ef063e3 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7b2257cca2223..df278a3253553 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -87,6 +87,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" + "tailscale.com/util/execqueue" "tailscale.com/util/goroutines" "tailscale.com/util/mak" "tailscale.com/util/osuser" @@ -187,6 +188,7 @@ type LocalBackend struct { statsLogf logger.Logf // for printing peers stats on change sys *tsd.System eventClient *eventbus.Client + appcTask execqueue.ExecQueue // handles updates from appc health *health.Tracker // always non-nil polc policyclient.Client // always non-nil @@ -613,12 +615,14 @@ func (b *LocalBackend) onAppConnectorRouteUpdate(ru appctype.RouteUpdate) { // We need to find a way to ensure that changes to the backend state are applied // consistently in the presnce of profile changes, which currently may not happen in // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 - if err := b.AdvertiseRoute(ru.Advertise...); err != nil { - b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) - } - if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { - b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) - } + b.appcTask.Add(func() { + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) + } + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) + } + }) } func (b *LocalBackend) onAppConnectorStoreRoutes(ri appctype.RouteInfo) { @@ -1082,6 +1086,7 @@ func (b *LocalBackend) Shutdown() { // 1. Event handlers also acquire b.mu, they can deadlock with c.Shutdown(). // 2. Event handlers may not guard against undesirable post/in-progress // LocalBackend.Shutdown() behaviors. + b.appcTask.Shutdown() b.eventClient.Close() b.em.close() From da90e3d8f25d7ef53758688a04a354c9d38d9edc Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 29 Oct 2025 12:41:44 -0400 Subject: [PATCH 0625/1093] cmd/k8s-operator: rename 'l' variables (#17700) Single letter 'l' variables can eventually become confusing when they're rendered in some fonts that make them similar to 1 or I. Updates #cleanup Signed-off-by: Fernando Serboncini --- cmd/k8s-operator/operator.go | 4 ++-- cmd/k8s-operator/proxygroup.go | 4 ++-- cmd/k8s-operator/proxygroup_test.go | 36 ++++++++++++++--------------- cmd/k8s-operator/sts_test.go | 10 ++++---- cmd/k8s-operator/tsrecorder.go | 4 ++-- cmd/k8s-operator/tsrecorder_test.go | 2 +- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 89c8ff3e205bf..cc97b1be29cbe 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -636,7 +636,7 @@ func runReconcilers(opts reconcilerOpts) { recorder: eventRecorder, tsNamespace: opts.tailscaleNamespace, Client: mgr.GetClient(), - l: opts.log.Named("recorder-reconciler"), + log: opts.log.Named("recorder-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, loginServer: opts.loginServer, @@ -691,7 +691,7 @@ func runReconcilers(opts reconcilerOpts) { Complete(&ProxyGroupReconciler{ recorder: eventRecorder, Client: mgr.GetClient(), - l: opts.log.Named("proxygroup-reconciler"), + log: opts.log.Named("proxygroup-reconciler"), clock: tstime.DefaultClock{}, tsClient: opts.tsClient, diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index debeb5c6b3442..946e017a26f00 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -80,7 +80,7 @@ var ( // ProxyGroupReconciler ensures cluster resources for a ProxyGroup definition. type ProxyGroupReconciler struct { client.Client - l *zap.SugaredLogger + log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock tsClient tsClient @@ -101,7 +101,7 @@ type ProxyGroupReconciler struct { } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { - return r.l.With("ProxyGroup", name) + return r.log.With("ProxyGroup", name) } func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index d763cf92276ec..2bcc9fb7a9720 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -670,7 +670,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { t.Logf("created node %q with data", n.name) } - reconciler.l = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) + reconciler.log = zl.Sugar().With("TestName", tt.name).With("Reconcile", i) pg.Spec.Replicas = r.replicas pc.Spec.StaticEndpoints = r.staticEndpointConfig @@ -784,7 +784,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), + log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), clock: cl, } @@ -845,7 +845,7 @@ func TestProxyGroup(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} @@ -1049,7 +1049,7 @@ func TestProxyGroupTypes(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zl.Sugar(), + log: zl.Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } @@ -1289,24 +1289,24 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), + log: zap.Must(zap.NewDevelopment()).Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } expectReconciled(t, r, "", pg.Name) pg.ObjectMeta.Finalizers = append(pg.ObjectMeta.Finalizers, FinalizerName) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "", 0, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Set kube-apiserver valid. mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { - tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) }) expectReconciled(t, r, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionTrue, reasonKubeAPIServerProxyValid, "", 1, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Set available. @@ -1318,17 +1318,17 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { TailnetIPs: []string{"1.2.3.4", "::1"}, }, } - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "", 0, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Set kube-apiserver configured. mustUpdateStatus(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { - tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(p, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) }) expectReconciled(t, r, "", pg.Name) - tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.l) - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.l) + tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.log) + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionTrue, reasonProxyGroupReady, "", 1, r.clock, r.log) expectEqual(t, fc, pg, omitPGStatusConditionMessages) } @@ -1342,7 +1342,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), + log: zap.Must(zap.NewDevelopment()).Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } @@ -1427,7 +1427,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { tsNamespace: tsNamespace, tsProxyImage: testProxyImage, Client: fc, - l: zap.Must(zap.NewDevelopment()).Sugar(), + log: zap.Must(zap.NewDevelopment()).Sugar(), tsClient: &fakeTSClient{}, clock: tstest.NewClock(tstest.ClockOpts{}), } @@ -1902,7 +1902,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { defaultProxyClass: tt.defaultProxyClass, Client: fc, tsClient: &fakeTSClient{}, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, } diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index ea28e77a14c36..afe54ed98bc49 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -71,11 +71,11 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { SecurityContext: &corev1.PodSecurityContext{ RunAsUser: ptr.To(int64(0)), }, - ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, - NodeName: "some-node", - NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, - Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, - Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, + ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, + NodeName: "some-node", + NodeSelector: map[string]string{"beta.kubernetes.io/os": "linux"}, + Affinity: &corev1.Affinity{NodeAffinity: &corev1.NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{}}}, + Tolerations: []corev1.Toleration{{Key: "", Operator: "Exists"}}, PriorityClassName: "high-priority", TopologySpreadConstraints: []corev1.TopologySpreadConstraint{ { diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index ec95ecf40dab5..c922f78feff38 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -54,7 +54,7 @@ var gaugeRecorderResources = clientmetric.NewGauge(kubetypes.MetricRecorderCount // Recorder CRs. type RecorderReconciler struct { client.Client - l *zap.SugaredLogger + log *zap.SugaredLogger recorder record.EventRecorder clock tstime.Clock tsNamespace string @@ -66,7 +66,7 @@ type RecorderReconciler struct { } func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger { - return r.l.With("Recorder", name) + return r.log.With("Recorder", name) } func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index 990bd68193e8b..184af23447c7c 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -52,7 +52,7 @@ func TestRecorder(t *testing.T) { Client: fc, tsClient: tsClient, recorder: fr, - l: zl.Sugar(), + log: zl.Sugar(), clock: cl, loginServer: tsLoginServer, } From 74f1d8bd87931ded9540d7108afa36e28308be2e Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Wed, 29 Oct 2025 11:58:10 -0500 Subject: [PATCH 0626/1093] cmd/tailscale/cli: unhide serve get-config and serve set-config (#17598) Fixes tailscale/corp#33152 Signed-off-by: Harry Harpham --- cmd/tailscale/cli/serve_v2.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 30adcb8e7baa9..ad143cfdcc85d 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -292,7 +292,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Name: "get-config", ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), ShortHelp: "Get service configuration to save to a file", - LongHelp: hidden + "Get the configuration for services that this node is currently hosting in a\n" + + LongHelp: "Get the configuration for services that this node is currently hosting in a\n" + "format that can later be provided to set-config. This can be used to declaratively set\n" + "configuration for a service host.", Exec: e.runServeGetConfig, @@ -305,10 +305,11 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { Name: "set-config", ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), ShortHelp: "Define service configuration from a file", - LongHelp: hidden + "Read the provided configuration file and use it to declaratively set the configuration\n" + + LongHelp: "Read the provided configuration file and use it to declaratively set the configuration\n" + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + - "all services are overwritten.", + "all services are overwritten.\n\n" + + "For information on the file format, see tailscale.com/kb/1589/tailscale-services-configuration-file", Exec: e.runServeSetConfig, FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { fs.BoolVar(&e.allServices, "all", false, "apply config to all services") From d5a40c01ab5bc5e33ef2b0ec4bea3cbd38050f48 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 29 Oct 2025 13:21:23 -0700 Subject: [PATCH 0627/1093] cmd/k8s-operator/generate: skip tests if no network or Helm is down Updates helm/helm#31434 Change-Id: I5eb20e97ff543f883d5646c9324f50f54180851d Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/generate/main.go | 2 +- cmd/k8s-operator/generate/main_test.go | 26 +++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 6904f1df02ec0..5fd5d551b5e02 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -144,7 +144,7 @@ func generate(baseDir string) error { if _, err := file.Write([]byte(helmConditionalEnd)); err != nil { return fmt.Errorf("error writing helm if-statement end: %w", err) } - return nil + return file.Close() } for _, crd := range []struct { crdPath, templatePath string diff --git a/cmd/k8s-operator/generate/main_test.go b/cmd/k8s-operator/generate/main_test.go index c7956dcdbef8f..5ea7fec80971a 100644 --- a/cmd/k8s-operator/generate/main_test.go +++ b/cmd/k8s-operator/generate/main_test.go @@ -7,26 +7,50 @@ package main import ( "bytes" + "context" + "net" "os" "os/exec" "path/filepath" "strings" "testing" + "time" + + "tailscale.com/tstest/nettest" + "tailscale.com/util/cibuild" ) func Test_generate(t *testing.T) { + nettest.SkipIfNoNetwork(t) + + ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second) + defer cancel() + if _, err := net.DefaultResolver.LookupIPAddr(ctx, "get.helm.sh"); err != nil { + // https://github.com/helm/helm/issues/31434 + t.Skipf("get.helm.sh seems down or unreachable; skipping test") + } + base, err := os.Getwd() base = filepath.Join(base, "../../../") if err != nil { t.Fatalf("error getting current working directory: %v", err) } defer cleanup(base) + + helmCLIPath := filepath.Join(base, "tool/helm") + if out, err := exec.Command(helmCLIPath, "version").CombinedOutput(); err != nil && cibuild.On() { + // It's not just DNS. Azure is generating bogus certs within GitHub Actions at least for + // helm. So try to run it and see if we can even fetch it. + // + // https://github.com/helm/helm/issues/31434 + t.Skipf("error fetching helm; skipping test in CI: %v, %s", err, out) + } + if err := generate(base); err != nil { t.Fatalf("CRD template generation: %v", err) } tempDir := t.TempDir() - helmCLIPath := filepath.Join(base, "tool/helm") helmChartTemplatesPath := filepath.Join(base, "cmd/k8s-operator/deploy/chart") helmPackageCmd := exec.Command(helmCLIPath, "package", helmChartTemplatesPath, "--destination", tempDir, "--version", "0.0.1") helmPackageCmd.Stderr = os.Stderr From 89962546471472823f4fce7877ca7f906c07ecb0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 29 Oct 2025 13:02:29 -0700 Subject: [PATCH 0628/1093] sessionrecording: fix regression in recent http2 package change In 3f5c560fd45664813 I changed to use std net/http's HTTP/2 support, instead of pulling in x/net/http2. But I forgot to update DialTLSContext to DialContext, which meant it was falling back to using the std net.Dialer for its dials, instead of the passed-in one. The tests only passed because they were using localhost addresses, so the std net.Dialer worked. But in prod, where a tsnet Dialer would be needed, it didn't work, and would time out for 10 seconds before resorting to the old protocol. So this fixes the tests to use an isolated in-memory network to prevent that class of problem in the future. With the test change, the old code fails and the new code passes. Thanks to @jasonodonnell for debugging! Updates #17304 Updates 3f5c560fd45664813 Change-Id: I3602bafd07dc6548e2c62985af9ac0afb3a0e967 Signed-off-by: Brad Fitzpatrick --- sessionrecording/connect.go | 5 +---- sessionrecording/connect_test.go | 14 ++++++++++---- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index 8abf9dd7e9142..9d20b41f9b31a 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -405,10 +405,7 @@ func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { return &http.Client{ Transport: &http.Transport{ Protocols: &p, - // Pretend like we're using TLS, but actually use the provided - // DialFunc underneath. This is necessary to convince the transport - // to actually dial. - DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index cacf061d79b79..e834828f5a6cc 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/net/memnet" ) func TestConnectToRecorder(t *testing.T) { @@ -145,7 +146,14 @@ func TestConnectToRecorder(t *testing.T) { t.Run(tt.desc, func(t *testing.T) { mux, uploadHash := tt.setup(t) - srv := httptest.NewUnstartedServer(mux) + memNet := &memnet.Network{} + ln := memNet.NewLocalTCPListener() + + srv := &httptest.Server{ + Config: &http.Server{Handler: mux}, + Listener: ln, + } + if tt.http2 { // Wire up h2c-compatible HTTP/2 server. This is optional // because the v1 recorder didn't support HTTP/2 and we try to @@ -159,10 +167,8 @@ func TestConnectToRecorder(t *testing.T) { srv.Start() t.Cleanup(srv.Close) - d := new(net.Dialer) - ctx := context.Background() - w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(srv.Listener.Addr().String())}, d.DialContext) + w, _, errc, err := ConnectToRecorder(ctx, []netip.AddrPort{netip.MustParseAddrPort(ln.Addr().String())}, memNet.Dial) if err != nil { t.Fatalf("ConnectToRecorder: %v", err) } From 05d2dcaf49ab0dbbc6fd726e851c7c5bc2139dfa Mon Sep 17 00:00:00 2001 From: Erisa A Date: Wed, 29 Oct 2025 21:15:46 +0000 Subject: [PATCH 0629/1093] words: remove a fish (#17704) Some combinations are problematic in non-fish contexts. Updates #words Signed-off-by: Erisa A --- words/tails.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/words/tails.txt b/words/tails.txt index f5e93bf504687..b0119a7563224 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -755,7 +755,6 @@ pipefish seahorse flounder tilapia -chub dorado shad lionfish From d68513b0dbe352ca03165be35ebc5edd626ac83e Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 29 Oct 2025 21:27:59 -0400 Subject: [PATCH 0630/1093] ipn: add support for HTTP Redirects (#17594) Adds a new Redirect field to HTTPHandler for serving HTTP redirects from the Tailscale serve config. The redirect URL supports template variables ${HOST} and ${REQUEST_URI} that are resolved per request. By default, it redirects using HTTP Status 302 (Found). For another redirect status, like 301 - Moved Permanently, pass the HTTP status code followed by ':' on Redirect, like: "301:https://tailscale.com" Updates #11252 Updates #11330 Signed-off-by: Fernando Serboncini --- ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 10 +++ ipn/ipnlocal/serve.go | 20 ++++++ ipn/ipnlocal/serve_test.go | 127 +++++++++++++++++++++++++++++++++++++ ipn/serve.go | 11 +++- 5 files changed, 168 insertions(+), 1 deletion(-) diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 8a0a3c833a0ac..3d2670947c7e3 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -242,6 +242,7 @@ var _HTTPHandlerCloneNeedsRegeneration = HTTPHandler(struct { Proxy string Text string AcceptAppCaps []tailcfg.PeerCapability + Redirect string }{}) // Clone makes a deep copy of WebServerConfig. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 61d0dec23f218..ba5477a6d93ae 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -896,12 +896,22 @@ func (v HTTPHandlerView) AcceptAppCaps() views.Slice[tailcfg.PeerCapability] { return views.SliceOf(v.ж.AcceptAppCaps) } +// Redirect, if not empty, is the target URL to redirect requests to. +// By default, we redirect with HTTP 302 (Found) status. +// If Redirect starts with ':', then we use that status instead. +// +// The target URL supports the following expansion variables: +// - ${HOST}: replaced with the request's Host header value +// - ${REQUEST_URI}: replaced with the request's full URI (path and query string) +func (v HTTPHandlerView) Redirect() string { return v.ж.Redirect } + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _HTTPHandlerViewNeedsRegeneration = HTTPHandler(struct { Path string Proxy string Text string AcceptAppCaps []tailcfg.PeerCapability + Redirect string }{}) // View returns a read-only view of WebServerConfig. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index eb2c932c01165..554761ed7bb18 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -966,6 +966,19 @@ func (b *LocalBackend) addAppCapabilitiesHeader(r *httputil.ProxyRequest) error return nil } +// parseRedirectWithCode parses a redirect string that may optionally start with +// a HTTP redirect status code ("3xx:"). +// Returns the status code and the final redirect URL. +// If no code prefix is found, returns http.StatusFound (302). +func parseRedirectWithCode(redirect string) (code int, url string) { + if len(redirect) >= 4 && redirect[3] == ':' { + if statusCode, err := strconv.Atoi(redirect[:3]); err == nil && statusCode >= 300 && statusCode <= 399 { + return statusCode, redirect[4:] + } + } + return http.StatusFound, redirect +} + // serveWebHandler is an http.HandlerFunc that maps incoming requests to the // correct *http. func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { @@ -979,6 +992,13 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) { io.WriteString(w, s) return } + if v := h.Redirect(); v != "" { + code, v := parseRedirectWithCode(v) + v = strings.ReplaceAll(v, "${HOST}", r.Host) + v = strings.ReplaceAll(v, "${REQUEST_URI}", r.RequestURI) + http.Redirect(w, r, v, code) + return + } if v := h.Path(); v != "" { b.serveFileOrDirectory(w, r, v, mountPoint) return diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index a72c50c1f97e0..c3e5b2ff968b2 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -72,6 +72,41 @@ func TestExpandProxyArg(t *testing.T) { } } +func TestParseRedirectWithRedirectCode(t *testing.T) { + tests := []struct { + in string + wantCode int + wantURL string + }{ + {"301:https://example.com", 301, "https://example.com"}, + {"302:https://example.com", 302, "https://example.com"}, + {"303:/path", 303, "/path"}, + {"307:https://example.com/path?query=1", 307, "https://example.com/path?query=1"}, + {"308:https://example.com", 308, "https://example.com"}, + + {"https://example.com", 302, "https://example.com"}, + {"/path", 302, "/path"}, + {"http://example.com", 302, "http://example.com"}, + {"git://example.com", 302, "git://example.com"}, + + {"200:https://example.com", 302, "200:https://example.com"}, + {"404:https://example.com", 302, "404:https://example.com"}, + {"500:https://example.com", 302, "500:https://example.com"}, + {"30:https://example.com", 302, "30:https://example.com"}, + {"3:https://example.com", 302, "3:https://example.com"}, + {"3012:https://example.com", 302, "3012:https://example.com"}, + {"abc:https://example.com", 302, "abc:https://example.com"}, + {"301", 302, "301"}, + } + for _, tt := range tests { + gotCode, gotURL := parseRedirectWithCode(tt.in) + if gotCode != tt.wantCode || gotURL != tt.wantURL { + t.Errorf("parseRedirectWithCode(%q) = (%d, %q), want (%d, %q)", + tt.in, gotCode, gotURL, tt.wantCode, tt.wantURL) + } + } +} + func TestGetServeHandler(t *testing.T) { const serverName = "example.ts.net" conf1 := &ipn.ServeConfig{ @@ -1327,3 +1362,95 @@ func TestServeGRPCProxy(t *testing.T) { }) } } + +func TestServeHTTPRedirect(t *testing.T) { + b := newTestBackend(t) + + tests := []struct { + host string + path string + redirect string + reqURI string + wantCode int + wantLoc string + }{ + { + host: "hardcoded-root", + path: "/", + redirect: "https://example.com/", + reqURI: "/old", + wantCode: http.StatusFound, // 302 is the default + wantLoc: "https://example.com/", + }, + { + host: "template-host-and-uri", + path: "/", + redirect: "https://${HOST}${REQUEST_URI}", + reqURI: "/path?foo=bar", + wantCode: http.StatusFound, // 302 is the default + wantLoc: "https://template-host-and-uri/path?foo=bar", + }, + { + host: "custom-301", + path: "/", + redirect: "301:https://example.com/", + reqURI: "/old", + wantCode: http.StatusMovedPermanently, // 301 + wantLoc: "https://example.com/", + }, + { + host: "custom-307", + path: "/", + redirect: "307:https://example.com/new", + reqURI: "/old", + wantCode: http.StatusTemporaryRedirect, // 307 + wantLoc: "https://example.com/new", + }, + { + host: "custom-308", + path: "/", + redirect: "308:https://example.com/permanent", + reqURI: "/old", + wantCode: http.StatusPermanentRedirect, // 308 + wantLoc: "https://example.com/permanent", + }, + } + + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + conf := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + ipn.HostPort(tt.host + ":80"): { + Handlers: map[string]*ipn.HTTPHandler{ + tt.path: {Redirect: tt.redirect}, + }, + }, + }, + } + if err := b.SetServeConfig(conf, ""); err != nil { + t.Fatal(err) + } + + req := &http.Request{ + Host: tt.host, + URL: &url.URL{Path: tt.path}, + RequestURI: tt.reqURI, + TLS: &tls.ConnectionState{ServerName: tt.host}, + } + req = req.WithContext(serveHTTPContextKey.WithValue(req.Context(), &serveHTTPContext{ + DestPort: 80, + SrcAddr: netip.MustParseAddrPort("1.2.3.4:1234"), + })) + + w := httptest.NewRecorder() + b.serveWebHandler(w, req) + + if w.Code != tt.wantCode { + t.Errorf("got status %d, want %d", w.Code, tt.wantCode) + } + if got := w.Header().Get("Location"); got != tt.wantLoc { + t.Errorf("got Location %q, want %q", got, tt.wantLoc) + } + }) + } +} diff --git a/ipn/serve.go b/ipn/serve.go index 3f674d9ed00ae..2ac37a141271a 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -162,8 +162,17 @@ type HTTPHandler struct { AcceptAppCaps []tailcfg.PeerCapability `json:",omitempty"` // peer capabilities to forward in grant header, e.g. example.com/cap/mon + // Redirect, if not empty, is the target URL to redirect requests to. + // By default, we redirect with HTTP 302 (Found) status. + // If Redirect starts with ':', then we use that status instead. + // + // The target URL supports the following expansion variables: + // - ${HOST}: replaced with the request's Host header value + // - ${REQUEST_URI}: replaced with the request's full URI (path and query string) + Redirect string `json:",omitempty"` + // TODO(bradfitz): bool to not enumerate directories? TTL on mapping for - // temporary ones? Error codes? Redirects? + // temporary ones? Error codes? } // WebHandlerExists reports whether if the ServeConfig Web handler exists for From 95426b79a9b102c9224cebbdab170033b65ddd08 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 30 Oct 2025 08:18:32 -0700 Subject: [PATCH 0631/1093] logtail: avoid racing eventbus subscriptions with shutdown (#17695) In #17639 we moved the subscription into NewLogger to ensure we would not race subscribing with shutdown of the eventbus client. Doing so fixed that problem, but exposed another: As we were only servicing events occasionally when waiting for the network to come up, we could leave the eventbus to stall in cases where a number of network deltas arrived later and weren't processed. To address that, let's separate the concerns: As before, we'll Subscribe early to avoid conflicts with shutdown; but instead of using the subscriber directly to determine readiness, we'll keep track of the last-known network state in a selectable condition that the subscriber updates for us. When we want to wait, we'll wait on that condition (or until our context ends), ensuring all the events get processed in a timely manner. Updates #17638 Updates #15160 Change-Id: I28339a372be4ab24be46e2834a218874c33a0d2d Signed-off-by: M. J. Fromberger --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + flake.nix | 2 +- go.mod | 3 ++- go.mod.sri | 2 +- go.sum | 10 ++++--- logtail/logtail.go | 50 ++++++++++++++++++++--------------- shell.nix | 2 +- tsnet/depaware.txt | 1 + 10 files changed, 43 insertions(+), 30 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6cffda2ddb2c8..8d1f7fa066d17 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -12,6 +12,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/trigger from tailscale.com/logtail 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e92d41b9855df..c1708711a32fb 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -86,6 +86,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + github.com/creachadair/msync/trigger from tailscale.com/logtail LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index a2a473a5068ec..80c8e04a823d4 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -9,6 +9,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/trigger from tailscale.com/logtail W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc diff --git a/flake.nix b/flake.nix index 726757f7a76b7..da4c87a0bbe0b 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= +# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= diff --git a/go.mod b/go.mod index 3c281fa7a34bf..12f7946b8508c 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/creachadair/msync v0.7.1 github.com/creachadair/taskgroup v0.13.2 github.com/creack/pty v1.1.23 github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa @@ -114,7 +115,7 @@ require ( golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 - honnef.co/go/tools v0.5.1 + honnef.co/go/tools v0.6.1 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.32.0 diff --git a/go.mod.sri b/go.mod.sri index f94054422c6d7..c9f537473daf7 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= +sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= diff --git a/go.sum b/go.sum index bc386d1fdb37f..eea0d6c7de11a 100644 --- a/go.sum +++ b/go.sum @@ -244,8 +244,10 @@ github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= -github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= +github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA= +github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs= +github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7uho= +github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008= github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -1534,8 +1536,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= -honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= diff --git a/logtail/logtail.go b/logtail/logtail.go index 675422890149c..6ff4dd04f069a 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -25,6 +25,7 @@ import ( "sync/atomic" "time" + "github.com/creachadair/msync/trigger" "github.com/go-json-experiment/json/jsontext" "tailscale.com/envknob" "tailscale.com/net/netmon" @@ -124,6 +125,8 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.Bus != nil { l.eventClient = cfg.Bus.Client("logtail.Logger") + // Subscribe to change deltas from NetMon to detect when the network comes up. + eventbus.SubscribeFunc(l.eventClient, l.onChangeDelta) } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -162,6 +165,7 @@ type Logger struct { httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel eventClient *eventbus.Client + networkIsUp trigger.Cond // set/reset by netmon.ChangeDelta events procID uint32 includeProcSequence bool @@ -418,16 +422,36 @@ func (l *Logger) uploading(ctx context.Context) { } func (l *Logger) internetUp() bool { - if l.netMonitor == nil { - // No way to tell, so assume it is. + select { + case <-l.networkIsUp.Ready(): return true + default: + if l.netMonitor == nil { + return true // No way to tell, so assume it is. + } + return l.netMonitor.InterfaceState().AnyInterfaceUp() + } +} + +// onChangeDelta is an eventbus subscriber function that handles +// [netmon.ChangeDelta] events to detect whether the Internet is expected to be +// reachable. +func (l *Logger) onChangeDelta(delta *netmon.ChangeDelta) { + if delta.New.AnyInterfaceUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + l.networkIsUp.Set() + } else { + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up\n") + l.networkIsUp.Reset() } - return l.netMonitor.InterfaceState().AnyInterfaceUp() } func (l *Logger) awaitInternetUp(ctx context.Context) { if l.eventClient != nil { - l.awaitInternetUpBus(ctx) + select { + case <-l.networkIsUp.Ready(): + case <-ctx.Done(): + } return } upc := make(chan bool, 1) @@ -449,24 +473,6 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } -func (l *Logger) awaitInternetUpBus(ctx context.Context) { - if l.internetUp() { - return - } - sub := eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) - defer sub.Close() - select { - case delta := <-sub.Events(): - if delta.New.AnyInterfaceUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - return - } - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") - case <-ctx.Done(): - return - } -} - // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. diff --git a/shell.nix b/shell.nix index ec345998afe30..99cfbd24368bf 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= +# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index cd734e9959041..ef0fe0667a00b 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -9,6 +9,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/trigger from tailscale.com/logtail W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc From adee8b9180cbdc0bd352ffbf11a7dba3b4e6b946 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Thu, 30 Oct 2025 13:57:39 +0000 Subject: [PATCH 0632/1093] cmd/tailscale/cli/serve_v2: improve validation error Specify the app apability that failed the test, instead of the entire comma-separated list. Fixes #cleanup Signed-off-by: Gesa Stupperich --- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tailscale/cli/serve_v2_test.go | 62 +++++++++++++++++++----------- 2 files changed, 40 insertions(+), 24 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index ad143cfdcc85d..74458a950dbfc 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -116,7 +116,7 @@ func (u *acceptAppCapsFlag) Set(s string) error { for _, appCap := range appCaps { appCap = strings.TrimSpace(appCap) if !validAppCap.MatchString(appCap) { - return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", s) + return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", appCap) } *u.Value = append(*u.Value, tailcfg.PeerCapability(appCap)) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index dfa17f1faee30..7f7f2c37c97e7 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "reflect" + "regexp" "slices" "strconv" "strings" @@ -908,8 +909,13 @@ func TestServeDevConfigMutations(t *testing.T) { name: "invalid_accept_caps_invalid_app_cap", steps: []step{ { - command: cmd("serve --bg --accept-app-caps=example/cap/foo 3000"), // should be {domain.tld}/{name} - wantErr: anyErr(), + command: cmd("serve --bg --accept-app-caps=example.com/cap/fine,NOTFINE 3000"), // should be {domain.tld}/{name} + wantErr: func(err error) (badErrMsg string) { + if err == nil || !strings.Contains(err.Error(), fmt.Sprintf("%q does not match", "NOTFINE")) { + return fmt.Sprintf("wanted validation error that quotes the non-matching capability (and nothing more) but got %q", err.Error()) + } + return "" + }, }, }, }, @@ -1231,10 +1237,11 @@ func TestSrcTypeFromFlags(t *testing.T) { func TestAcceptSetAppCapsFlag(t *testing.T) { testCases := []struct { - name string - inputs []string - expectErr bool - expectedValue []tailcfg.PeerCapability + name string + inputs []string + expectErr bool + expectErrToMatch *regexp.Regexp + expectedValue []tailcfg.PeerCapability }{ { name: "valid_simple", @@ -1262,7 +1269,7 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { }, { name: "valid_multiple_sets", - inputs: []string{"one.com/foo", "two.com/bar"}, + inputs: []string{"one.com/foo,two.com/bar"}, expectErr: false, expectedValue: []tailcfg.PeerCapability{"one.com/foo", "two.com/bar"}, }, @@ -1273,10 +1280,11 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { expectedValue: nil, // Empty string should be a no-op and not append anything. }, { - name: "invalid_path_chars", - inputs: []string{"domain.com/path_with_underscore"}, - expectErr: true, - expectedValue: nil, // Slice should remain empty. + name: "invalid_path_chars", + inputs: []string{"domain.com/path_with_underscore"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"domain.com/path_with_underscore"`), + expectedValue: nil, // Slice should remain empty. }, { name: "valid_subdomain", @@ -1285,22 +1293,25 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { expectedValue: []tailcfg.PeerCapability{"sub.domain.com/name"}, }, { - name: "invalid_no_path", - inputs: []string{"domain.com/"}, - expectErr: true, - expectedValue: nil, + name: "invalid_no_path", + inputs: []string{"domain.com/"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"domain.com/"`), + expectedValue: nil, }, { - name: "invalid_no_domain", - inputs: []string{"/path/only"}, - expectErr: true, - expectedValue: nil, + name: "invalid_no_domain", + inputs: []string{"/path/only"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"/path/only"`), + expectedValue: nil, }, { - name: "some_invalid_some_valid", - inputs: []string{"one.com/foo", "bad/bar", "two.com/baz"}, - expectErr: true, - expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error + name: "some_invalid_some_valid", + inputs: []string{"one.com/foo,bad/bar,two.com/baz"}, + expectErr: true, + expectErrToMatch: regexp.MustCompile(`"bad/bar"`), + expectedValue: []tailcfg.PeerCapability{"one.com/foo"}, // Parsing will stop after first error }, } @@ -1320,6 +1331,11 @@ func TestAcceptSetAppCapsFlag(t *testing.T) { if tc.expectErr && err == nil { t.Errorf("expected an error, but got none") } + if tc.expectErrToMatch != nil { + if !tc.expectErrToMatch.MatchString(err.Error()) { + t.Errorf("expected error to match %q, but was %q", tc.expectErrToMatch, err) + } + } if !tc.expectErr && err != nil { t.Errorf("did not expect an error, but got: %v", err) } From b6c6960e40a79bc8869b004edc7d17df06a46dec Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Thu, 30 Oct 2025 17:12:08 +0000 Subject: [PATCH 0633/1093] control/controlclient: remove unused reference to mapCtx (#17614) Updates #cleanup Signed-off-by: James Sanderson --- control/controlclient/auto.go | 3 +-- util/backoff/backoff.go | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 9f5bf38aeecc6..52255e89f9600 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -433,7 +433,6 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c := mrs.c c.mu.Lock() - ctx := c.mapCtx c.inMapPoll = true if c.loggedIn { c.state = StateSynchronized @@ -447,7 +446,7 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c.sendStatus("mapRoutine-got-netmap", nil, "", nm) } // Reset the backoff timer if we got a netmap. - mrs.bo.BackOff(ctx, nil) + mrs.bo.Reset() } func (mrs mapRoutineState) UpdateNetmapDelta(muts []netmap.NodeMutation) bool { diff --git a/util/backoff/backoff.go b/util/backoff/backoff.go index c6aeae998fa27..95089fc2479ff 100644 --- a/util/backoff/backoff.go +++ b/util/backoff/backoff.go @@ -78,3 +78,9 @@ func (b *Backoff) BackOff(ctx context.Context, err error) { case <-tChannel: } } + +// Reset resets the backoff schedule, equivalent to calling BackOff with a nil +// error. +func (b *Backoff) Reset() { + b.n = 0 +} From f522b9dbb77bc82be6fc46cacc94148f3bafdf66 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 30 Oct 2025 10:32:30 -0700 Subject: [PATCH 0634/1093] feature/tpm: protect all TPM handle operations with a mutex (#17708) In particular on Windows, the `transport.TPMCloser` we get is not safe for concurrent use. This is especially noticeable because `tpm.attestationKey.Clone` uses the same open handle as the original key. So wrap the operations on ak.tpm with a mutex and make a deep copy with a new connection in Clone. Updates #15830 Updates #17662 Updates #17644 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 40 +++++++++++++++++--- feature/tpm/attestation_test.go | 66 +++++++++++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 6 deletions(-) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 5fbda3b17bab3..597d4a6493829 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -10,6 +10,7 @@ import ( "fmt" "io" "log" + "sync" "github.com/google/go-tpm/tpm2" "github.com/google/go-tpm/tpm2/transport" @@ -19,7 +20,8 @@ import ( ) type attestationKey struct { - tpm transport.TPMCloser + tpmMu sync.Mutex + tpm transport.TPMCloser // private and public parts of the TPM key as returned from tpm2.Create. // These are used for serialization. tpmPrivate tpm2.TPM2BPrivate @@ -144,7 +146,7 @@ type attestationKeySerialized struct { // MarshalJSON implements json.Marshaler. func (ak *attestationKey) MarshalJSON() ([]byte, error) { - if ak == nil || ak.IsZero() { + if ak == nil || len(ak.tpmPublic.Bytes()) == 0 || len(ak.tpmPrivate.Buffer) == 0 { return []byte("null"), nil } return json.Marshal(attestationKeySerialized{ @@ -163,6 +165,13 @@ func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { ak.tpmPrivate = tpm2.TPM2BPrivate{Buffer: aks.TPMPrivate} ak.tpmPublic = tpm2.BytesAs2B[tpm2.TPMTPublic, *tpm2.TPMTPublic](aks.TPMPublic) + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + if ak.tpm != nil { + ak.tpm.Close() + ak.tpm = nil + } + tpm, err := open() if err != nil { return key.ErrUnsupported @@ -182,6 +191,9 @@ func (ak *attestationKey) Public() crypto.PublicKey { } func (ak *attestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + if !ak.loaded() { return nil, errors.New("tpm2 attestation key is not loaded during Sign") } @@ -247,6 +259,9 @@ func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) { } func (ak *attestationKey) Close() error { + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() + var errs []error if ak.handle != nil && ak.tpm != nil { _, err := tpm2.FlushContext{FlushHandle: ak.handle.Handle}.Execute(ak.tpm) @@ -262,18 +277,31 @@ func (ak *attestationKey) Clone() key.HardwareAttestationKey { if ak == nil { return nil } - return &attestationKey{ - tpm: ak.tpm, + + tpm, err := open() + if err != nil { + log.Printf("[unexpected] failed to open a TPM connection in feature/tpm.attestationKey.Clone: %v", err) + return nil + } + akc := &attestationKey{ + tpm: tpm, tpmPrivate: ak.tpmPrivate, tpmPublic: ak.tpmPublic, - handle: ak.handle, - pub: ak.pub, } + if err := akc.load(); err != nil { + log.Printf("[unexpected] failed to load TPM key in feature/tpm.attestationKey.Clone: %v", err) + tpm.Close() + return nil + } + return akc } func (ak *attestationKey) IsZero() bool { if ak == nil { return true } + + ak.tpmMu.Lock() + defer ak.tpmMu.Unlock() return !ak.loaded() } diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go index ead88c955aeea..e7ff729871230 100644 --- a/feature/tpm/attestation_test.go +++ b/feature/tpm/attestation_test.go @@ -10,6 +10,8 @@ import ( "crypto/rand" "crypto/sha256" "encoding/json" + "runtime" + "sync" "testing" ) @@ -62,6 +64,37 @@ func TestAttestationKeySign(t *testing.T) { } } +func TestAttestationKeySignConcurrent(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + wg := sync.WaitGroup{} + for range runtime.GOMAXPROCS(-1) { + wg.Go(func() { + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + }) + } + wg.Wait() +} + func TestAttestationKeyUnmarshal(t *testing.T) { skipWithoutTPM(t) ak, err := newAttestationKey() @@ -96,3 +129,36 @@ func TestAttestationKeyUnmarshal(t *testing.T) { t.Error("unmarshalled public key is not the same as the original public key") } } + +func TestAttestationKeyClone(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + + ak2 := ak.Clone() + if ak2 == nil { + t.Fatal("Clone failed") + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + // Close the original key, ak2 should remain open and usable. + if err := ak.Close(); err != nil { + t.Fatal(err) + } + + data := []byte("secrets") + digest := sha256.Sum256(data) + // Check signature/validation round trip using cloned key. + sig, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak2.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } +} From 061e6266cf4e9c9a0f06b0d60d4d7840f6b7678d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 30 Oct 2025 14:40:57 -0700 Subject: [PATCH 0635/1093] util/eventbus: allow logging of slow subscribers (#17705) Add options to the eventbus.Bus to plumb in a logger. Route that logger in to the subscriber machinery, and trigger a log message to it when a subscriber fails to respond to its delivered events for 5s or more. The log message includes the package, filename, and line number of the call site that created the subscription. Add tests that verify this works. Updates #17680 Change-Id: I0546516476b1e13e6a9cf79f19db2fe55e56c698 Signed-off-by: M. J. Fromberger --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 +-- shell.nix | 2 +- util/eventbus/bus.go | 35 ++++++++++++++++-- util/eventbus/bus_test.go | 73 ++++++++++++++++++++++++++++++++++++++ util/eventbus/client.go | 7 ++-- util/eventbus/debug.go | 36 +++++++++++++++++++ util/eventbus/subscribe.go | 35 ++++++++++++++++-- 10 files changed, 185 insertions(+), 13 deletions(-) diff --git a/flake.nix b/flake.nix index da4c87a0bbe0b..e50f396387574 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= +# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= diff --git a/go.mod b/go.mod index 12f7946b8508c..836810fc0319c 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 - github.com/google/go-cmp v0.6.0 + github.com/google/go-cmp v0.7.0 github.com/google/go-containerregistry v0.20.3 github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 diff --git a/go.mod.sri b/go.mod.sri index c9f537473daf7..108423f4e1ad4 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= +sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= diff --git a/go.sum b/go.sum index eea0d6c7de11a..a0d9461ece2fb 100644 --- a/go.sum +++ b/go.sum @@ -492,8 +492,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= diff --git a/shell.nix b/shell.nix index 99cfbd24368bf..6b579b45552cc 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-pZCy1KHUe7f7cjm816OwA+bjGrSRnSTxkvCmB4cmWqw= +# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index d1507d8e67587..b1639136a5133 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -5,10 +5,12 @@ package eventbus import ( "context" + "log" "reflect" "slices" "sync" + "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -30,6 +32,7 @@ type Bus struct { write chan PublishedEvent snapshot chan chan []PublishedEvent routeDebug hook[RoutedEvent] + logf logger.Logf topicsMu sync.Mutex topics map[reflect.Type][]*subscribeState @@ -40,19 +43,42 @@ type Bus struct { clients set.Set[*Client] } -// New returns a new bus. Use [Publish] to make event publishers, -// and [Subscribe] and [SubscribeFunc] to make event subscribers. -func New() *Bus { +// New returns a new bus with default options. It is equivalent to +// calling [NewWithOptions] with zero [BusOptions]. +func New() *Bus { return NewWithOptions(BusOptions{}) } + +// NewWithOptions returns a new [Bus] with the specified [BusOptions]. +// Use [Bus.Client] to construct clients on the bus. +// Use [Publish] to make event publishers. +// Use [Subscribe] and [SubscribeFunc] to make event subscribers. +func NewWithOptions(opts BusOptions) *Bus { ret := &Bus{ write: make(chan PublishedEvent), snapshot: make(chan chan []PublishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, + logf: opts.logger(), } ret.router = runWorker(ret.pump) return ret } +// BusOptions are optional parameters for a [Bus]. A zero value is ready for +// use and provides defaults as described. +type BusOptions struct { + // Logf, if non-nil, is used for debug logs emitted by the bus and clients, + // publishers, and subscribers under its care. If it is nil, logs are sent + // to [log.Printf]. + Logf logger.Logf +} + +func (o BusOptions) logger() logger.Logf { + if o.Logf == nil { + return log.Printf + } + return o.Logf +} + // Client returns a new client with no subscriptions. Use [Subscribe] // to receive events, and [Publish] to emit events. // @@ -166,6 +192,9 @@ func (b *Bus) pump(ctx context.Context) { } } +// logger returns a [logger.Logf] to which logs related to bus activity should be written. +func (b *Bus) logger() logger.Logf { return b.logf } + func (b *Bus) dest(t reflect.Type) []*subscribeState { b.topicsMu.Lock() defer b.topicsMu.Unlock() diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index de292cf1adb5b..1e0cd8abf2cff 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -4,8 +4,11 @@ package eventbus_test import ( + "bytes" "errors" "fmt" + "log" + "regexp" "testing" "testing/synctest" "time" @@ -436,6 +439,76 @@ func TestMonitor(t *testing.T) { t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) } +func TestSlowSubs(t *testing.T) { + swapLogBuf := func(t *testing.T) *bytes.Buffer { + logBuf := new(bytes.Buffer) + save := log.Writer() + log.SetOutput(logBuf) + t.Cleanup(func() { log.SetOutput(save) }) + return logBuf + } + + t.Run("Subscriber", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + pc := b.Client("pub") + p := eventbus.Publish[EventA](pc) + + sc := b.Client("sub") + s := eventbus.Subscribe[EventA](sc) + + go func() { + time.Sleep(6 * time.Second) // trigger the slow check at 5s. + t.Logf("Subscriber accepted %v", <-s.Events()) + }() + + p.Publish(EventA{12345}) + + time.Sleep(7 * time.Second) // advance time... + synctest.Wait() // subscriber is done + + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `subscriber for eventbus_test.EventA is slow.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant: %s", got, want) + } + }) + }) + + t.Run("SubscriberFunc", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + pc := b.Client("pub") + p := eventbus.Publish[EventB](pc) + + sc := b.Client("sub") + eventbus.SubscribeFunc[EventB](sc, func(e EventB) { + time.Sleep(6 * time.Second) // trigger the slow check at 5s. + t.Logf("SubscriberFunc processed %v", e) + }) + + p.Publish(EventB{67890}) + + time.Sleep(7 * time.Second) // advance time... + synctest.Wait() // subscriber is done + + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `subscriber for eventbus_test.EventB is slow.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant: %s", got, want) + } + }) + }) +} + func TestRegression(t *testing.T) { bus := eventbus.New() t.Cleanup(bus.Close) diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 9e3f3ee76cc31..c119c67a939c2 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -7,6 +7,7 @@ import ( "reflect" "sync" + "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -29,6 +30,8 @@ type Client struct { func (c *Client) Name() string { return c.name } +func (c *Client) logger() logger.Logf { return c.bus.logger() } + // Close closes the client. It implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { @@ -142,7 +145,7 @@ func Subscribe[T any](c *Client) *Subscriber[T] { } r := c.subscribeStateLocked() - s := newSubscriber[T](r) + s := newSubscriber[T](r, logfForCaller(c.logger())) r.addSubscriber(s) return s } @@ -165,7 +168,7 @@ func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] { } r := c.subscribeStateLocked() - s := newSubscriberFunc[T](r, f) + s := newSubscriberFunc[T](r, f, logfForCaller(c.logger())) r.addSubscriber(s) return s } diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 6d5463bece7b2..2f2c9589ad0e2 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -6,12 +6,22 @@ package eventbus import ( "cmp" "fmt" + "path/filepath" "reflect" + "runtime" "slices" + "strings" "sync" "sync/atomic" + "time" + + "tailscale.com/types/logger" ) +// slowSubscriberTimeout is a timeout after which a subscriber that does not +// accept a pending event will be flagged as being slow. +const slowSubscriberTimeout = 5 * time.Second + // A Debugger offers access to a bus's privileged introspection and // debugging facilities. // @@ -204,3 +214,29 @@ type DebugTopic struct { Publisher string Subscribers []string } + +// logfForCaller returns a [logger.Logf] that prefixes its output with the +// package, filename, and line number of the caller's caller. +// If logf == nil, it returns [logger.Discard]. +// If the caller location could not be determined, it returns logf unmodified. +func logfForCaller(logf logger.Logf) logger.Logf { + if logf == nil { + return logger.Discard + } + pc, fpath, line, _ := runtime.Caller(2) // +1 for my caller, +1 for theirs + if f := runtime.FuncForPC(pc); f != nil { + return logger.WithPrefix(logf, fmt.Sprintf("%s %s:%d: ", funcPackageName(f.Name()), filepath.Base(fpath), line)) + } + return logf +} + +func funcPackageName(funcName string) string { + ls := max(strings.LastIndex(funcName, "/"), 0) + for { + i := strings.LastIndex(funcName, ".") + if i <= ls { + return funcName + } + funcName = funcName[:i] + } +} diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index c35c7e7f05682..0b821b3f51586 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,6 +8,9 @@ import ( "fmt" "reflect" "sync" + "time" + + "tailscale.com/types/logger" ) type DeliveredEvent struct { @@ -182,12 +185,18 @@ type Subscriber[T any] struct { stop stopFlag read chan T unregister func() + logf logger.Logf + slow *time.Timer // used to detect slow subscriber service } -func newSubscriber[T any](r *subscribeState) *Subscriber[T] { +func newSubscriber[T any](r *subscribeState, logf logger.Logf) *Subscriber[T] { + slow := time.NewTimer(0) + slow.Stop() // reset in dispatch return &Subscriber[T]{ read: make(chan T), unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + logf: logf, + slow: slow, } } @@ -212,6 +221,11 @@ func (s *Subscriber[T]) monitor(debugEvent T) { func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { t := vals.Peek().Event.(T) + + start := time.Now() + s.slow.Reset(slowSubscriberTimeout) + defer s.slow.Stop() + for { // Keep the cases in this select in sync with subscribeState.pump // above. The only difference should be that this select @@ -226,6 +240,9 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent return false case ch := <-snapshot: ch <- vals.Snapshot() + case <-s.slow.C: + s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start)) + s.slow.Reset(slowSubscriberTimeout) } } } @@ -260,12 +277,18 @@ type SubscriberFunc[T any] struct { stop stopFlag read func(T) unregister func() + logf logger.Logf + slow *time.Timer // used to detect slow subscriber service } -func newSubscriberFunc[T any](r *subscribeState, f func(T)) *SubscriberFunc[T] { +func newSubscriberFunc[T any](r *subscribeState, f func(T), logf logger.Logf) *SubscriberFunc[T] { + slow := time.NewTimer(0) + slow.Stop() // reset in dispatch return &SubscriberFunc[T]{ read: f, unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + logf: logf, + slow: slow, } } @@ -285,6 +308,11 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE t := vals.Peek().Event.(T) callDone := make(chan struct{}) go s.runCallback(t, callDone) + + start := time.Now() + s.slow.Reset(slowSubscriberTimeout) + defer s.slow.Stop() + // Keep the cases in this select in sync with subscribeState.pump // above. The only difference should be that this select // delivers a value by calling s.read. @@ -299,6 +327,9 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE return false case ch := <-snapshot: ch <- vals.Snapshot() + case <-s.slow.C: + s.logf("subscriber for %T is slow (%v elapsed)", t, time.Since(start)) + s.slow.Reset(slowSubscriberTimeout) } } } From 4c856078e4912a3f3a6d1e31d0db03e423685f47 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 31 Oct 2025 09:58:09 -0700 Subject: [PATCH 0636/1093] util/eventbus: block for the subscriber during SubscribeFunc close (#17642) Prior to this change a SubscriberFunc treated the call to the subscriber's function as the completion of delivery. But that means when we are closing the subscriber, that callback could continue to execute for some time after the close returns. For channel-based subscribers that works OK because the close takes effect before the subscriber ever sees the event. To make the two subscriber types symmetric, we should also wait for the callback to finish before returning. This ensures that a Close of the client means the same thing with both kinds of subscriber. Updates #17638 Change-Id: I82fd31bcaa4e92fab07981ac0e57e6e3a7d9d60b Signed-off-by: M. J. Fromberger --- util/eventbus/bus_test.go | 71 +++++++++++++++++++++++++++++++++----- util/eventbus/subscribe.go | 7 ++++ 2 files changed, 70 insertions(+), 8 deletions(-) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 1e0cd8abf2cff..61728fbfd93d2 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -89,6 +89,61 @@ func TestSubscriberFunc(t *testing.T) { } }) + t.Run("CloseWait", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + time.Sleep(2 * time.Second) + }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() // subscriber has the event + c.Close() + + // If close does not wait for the subscriber, the test will fail + // because an active goroutine remains in the bubble. + }) + }) + + t.Run("CloseWait/Belated", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + buf := swapLogBuf(t) + + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + + // This subscriber stalls for a long time, so that when we try to + // close the client it gives up and returns in the timeout condition. + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + time.Sleep(time.Minute) // notably, longer than the wait period + }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() // subscriber has the event + c.Close() + + // Verify that the logger recorded that Close gave up on the slowpoke. + want := regexp.MustCompile(`^.* tailscale.com/util/eventbus_test bus_test.go:\d+: ` + + `giving up on subscriber for eventbus_test.EventA after \d+s at close.*`) + if got := buf.String(); !want.MatchString(got) { + t.Errorf("Wrong log output\ngot: %q\nwant %s", got, want) + } + + // Wait for the subscriber to actually finish to clean up the goroutine. + time.Sleep(2 * time.Minute) + }) + }) + t.Run("SubscriberPublishes", func(t *testing.T) { synctest.Test(t, func(t *testing.T) { b := eventbus.New() @@ -440,14 +495,6 @@ func TestMonitor(t *testing.T) { } func TestSlowSubs(t *testing.T) { - swapLogBuf := func(t *testing.T) *bytes.Buffer { - logBuf := new(bytes.Buffer) - save := log.Writer() - log.SetOutput(logBuf) - t.Cleanup(func() { log.SetOutput(save) }) - return logBuf - } - t.Run("Subscriber", func(t *testing.T) { synctest.Test(t, func(t *testing.T) { buf := swapLogBuf(t) @@ -571,3 +618,11 @@ func (q *queueChecker) Got(v any) { func (q *queueChecker) Empty() bool { return len(q.want) == 0 } + +func swapLogBuf(t *testing.T) *bytes.Buffer { + logBuf := new(bytes.Buffer) + save := log.Writer() + log.SetOutput(logBuf) + t.Cleanup(func() { log.SetOutput(save) }) + return logBuf +} diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 0b821b3f51586..03d577f27c3fe 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -324,6 +324,13 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE case val := <-acceptCh(): vals.Add(val) case <-ctx.Done(): + // Wait for the callback to be complete, but not forever. + s.slow.Reset(5 * slowSubscriberTimeout) + select { + case <-s.slow.C: + s.logf("giving up on subscriber for %T after %v at close", t, time.Since(start)) + case <-callDone: + } return false case ch := <-snapshot: ch <- vals.Snapshot() From db7dcd516f7da6792cd4fa44b97bc510102941c5 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 31 Oct 2025 14:28:39 -0700 Subject: [PATCH 0637/1093] Revert "control/controlclient: back out HW key attestation (#17664)" (#17732) This reverts commit a760cbe33f4bed64b63c6118808d02b2771ff785. Signed-off-by: Andrew Lytvynov --- control/controlclient/direct.go | 22 +++++++++++++++ ipn/ipnlocal/hwattest.go | 48 +++++++++++++++++++++++++++++++++ ipn/ipnlocal/local.go | 1 + ipn/ipnlocal/profiles.go | 10 +++++++ ipn/ipnlocal/profiles_test.go | 1 + ipn/prefs_test.go | 2 +- types/persist/persist.go | 18 +++++++++++-- types/persist/persist_clone.go | 4 +++ types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 10 ++++--- 10 files changed, 110 insertions(+), 8 deletions(-) create mode 100644 ipn/ipnlocal/hwattest.go diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index fe7cc235b05f8..63a12b2495fd8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -7,6 +7,8 @@ import ( "bytes" "cmp" "context" + "crypto" + "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -946,6 +948,26 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap ConnectionHandleForTest: connectionHandleForTest, } + // If we have a hardware attestation key, sign the node key with it and send + // the key & signature in the map request. + if buildfeatures.HasTPM { + if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { + hwPub := key.HardwareAttestationPublicFromPlatformKey(k) + request.HardwareAttestationKey = hwPub + + t := c.clock.Now() + msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) + digest := sha256.Sum256([]byte(msg)) + sig, err := k.Sign(nil, digest[:], crypto.SHA256) + if err != nil { + c.logf("failed to sign node key with hardware attestation key: %v", err) + } else { + request.HardwareAttestationKeySignature = sig + request.HardwareAttestationKeySignatureTimestamp = t + } + } + } + var extraDebugFlags []string if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go new file mode 100644 index 0000000000000..2c93cad4c97ff --- /dev/null +++ b/ipn/ipnlocal/hwattest.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tpm + +package ipnlocal + +import ( + "errors" + + "tailscale.com/feature" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +func init() { + feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) +} + +// generateAttestationKeyIfEmpty generates a new hardware attestation key if +// none exists. It returns true if a new key was generated and stored in +// p.AttestationKey. +func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { + // attempt to generate a new hardware attestation key if none exists + var ak key.HardwareAttestationKey + if p != nil { + ak = p.AttestationKey + } + + if ak == nil || ak.IsZero() { + var err error + ak, err = key.NewHardwareAttestationKey() + if err != nil { + if !errors.Is(err, key.ErrUnsupported) { + logf("failed to create hardware attestation key: %v", err) + } + } else if ak != nil { + logf("using new hardware attestation key: %v", ak.Public()) + if p == nil { + p = &persist.Persist{} + } + p.AttestationKey = ak + return true, nil + } + } + return false, nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index df278a3253553..ffab4b69dbd45 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1190,6 +1190,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} + p2.Persist.AttestationKey = nil return p2.View() } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 3e80cdaa93d1f..9c217637890cc 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -19,7 +19,9 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -654,6 +656,14 @@ func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() + + // if supported by the platform, create an empty hardware attestation key to use when deserializing + // to avoid type exceptions from json.Unmarshaling into an interface{}. + hw, _ := key.NewEmptyHardwareAttestationKey() + savedPrefs.Persist = &persist.Persist{ + AttestationKey: hw, + } + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 60c92ff8d3493..deeab2ade9b15 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -151,6 +151,7 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, + AttestationKey: nil, } } user1Node1 := newPersist(1, 1) diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 3339a631ce827..2336164096c14 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, }, { Prefs{ diff --git a/types/persist/persist.go b/types/persist/persist.go index d888a6afb6af5..4b62c79ddd186 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,6 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -84,11 +85,20 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } + var pub, p2Pub key.HardwareAttestationPublic + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) + } + if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { + p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) + } + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && + pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -96,12 +106,16 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) + akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + akString = fmt.Sprintf("%v", p.AttestationKey.Public()) + } + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 680419ff2f30b..9dbe7e0f6fa6d 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,6 +19,9 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + if src.AttestationKey != nil { + dst.AttestationKey = src.AttestationKey.Clone() + } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -31,5 +34,6 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index dbf2a6d8c7662..713114b74dcd5 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 7d1507468fc65..dbf8294ef5a7a 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,10 +89,11 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -110,5 +111,6 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) From 77123a569ba1055f091db06e2d1b59c09b02f108 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 4 Nov 2025 12:36:04 -0800 Subject: [PATCH 0638/1093] wgengine/netlog: include node OS in logged attributes (#17755) Include the node's OS with network flow log information. Refactor the JSON-length computation to be a bit more precise. Updates tailscale/corp#33352 Fixes tailscale/corp#34030 Signed-off-by: Joe Tsai --- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 2 +- tsnet/depaware.txt | 2 +- types/netlogtype/netlogtype.go | 15 +++------------ wgengine/netlog/record.go | 26 ++++++++++++++++++++++---- wgengine/netlog/record_test.go | 2 ++ 7 files changed, 31 insertions(+), 19 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 8d1f7fa066d17..ebd22770e9bd2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -825,7 +825,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c1708711a32fb..bdc110e1a83ec 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -392,6 +392,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/bools from tailscale.com/wgengine/netlog tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 80c8e04a823d4..ebf03b541c585 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -230,7 +230,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index ef0fe0667a00b..4817a511acf6f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -225,7 +225,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ - tailscale.com/types/bools from tailscale.com/tsnet + tailscale.com/types/bools from tailscale.com/tsnet+ tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index 86d645b354f08..cc38684a30dbf 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -44,18 +44,6 @@ const ( // Each [ConnectionCounts] occupies at most [MaxConnectionCountsJSONSize]. MinMessageJSONSize = len(messageJSON) - nodeJSON = `{"nodeId":` + maxJSONStableID + `,"name":"","addresses":` + maxJSONAddrs + `,"user":"","tags":[]}` - maxJSONAddrV4 = `"255.255.255.255"` - maxJSONAddrV6 = `"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"` - maxJSONAddrs = `[` + maxJSONAddrV4 + `,` + maxJSONAddrV6 + `]` - - // MinNodeJSONSize is the overhead size of Node when it is - // serialized as JSON assuming that each field is minimally populated. - // It does not account for bytes occupied by - // [Node.Name], [Node.User], or [Node.Tags]. The [Node.Addresses] - // is assumed to contain a pair of IPv4 and IPv6 address. - MinNodeJSONSize = len(nodeJSON) - maxJSONConnCounts = `{` + maxJSONConn + `,` + maxJSONCounts + `}` maxJSONConn = `"proto":` + maxJSONProto + `,"src":` + maxJSONAddrPort + `,"dst":` + maxJSONAddrPort maxJSONProto = `255` @@ -82,6 +70,9 @@ type Node struct { // Addresses are the Tailscale IP addresses of the node. Addresses []netip.Addr `json:"addresses,omitempty"` + // OS is the operating system of the node. + OS string `json:"os,omitzero"` // e.g., "linux" + // User is the user that owns the node. // It is not populated if the node is tagged. User string `json:"user,omitzero"` // e.g., "johndoe@example.com" diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go index b8db26fc59029..45e30fabec1a6 100644 --- a/wgengine/netlog/record.go +++ b/wgengine/netlog/record.go @@ -13,6 +13,7 @@ import ( "unicode/utf8" "tailscale.com/tailcfg" + "tailscale.com/types/bools" "tailscale.com/types/netlogtype" "tailscale.com/util/set" ) @@ -134,17 +135,31 @@ func compareConnCnts(x, y netlogtype.ConnectionCounts) int { } // jsonLen computes an upper-bound on the size of the JSON representation. -func (nu nodeUser) jsonLen() int { +func (nu nodeUser) jsonLen() (n int) { if !nu.Valid() { return len(`{"nodeId":""}`) } - n := netlogtype.MinNodeJSONSize + jsonQuotedLen(nu.Name()) + n += len(`{}`) + n += len(`"nodeId":`) + jsonQuotedLen(string(nu.StableID())) + len(`,`) + if len(nu.Name()) > 0 { + n += len(`"name":`) + jsonQuotedLen(nu.Name()) + len(`,`) + } + if nu.Addresses().Len() > 0 { + n += len(`"addresses":[]`) + for _, addr := range nu.Addresses().All() { + n += bools.IfElse(addr.Addr().Is4(), len(`"255.255.255.255"`), len(`"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"`)) + len(",") + } + } + if nu.Hostinfo().Valid() && len(nu.Hostinfo().OS()) > 0 { + n += len(`"os":`) + jsonQuotedLen(nu.Hostinfo().OS()) + len(`,`) + } if nu.Tags().Len() > 0 { + n += len(`"tags":[]`) for _, tag := range nu.Tags().All() { n += jsonQuotedLen(tag) + len(",") } - } else if nu.user.Valid() && nu.user.ID() == nu.User() { - n += jsonQuotedLen(nu.user.LoginName()) + } else if nu.user.Valid() && nu.user.ID() == nu.User() && len(nu.user.LoginName()) > 0 { + n += len(`"user":`) + jsonQuotedLen(nu.user.LoginName()) + len(",") } return n } @@ -166,6 +181,9 @@ func (nu nodeUser) toNode() netlogtype.Node { } n.Addresses = []netip.Addr{ipv4, ipv6} n.Addresses = slices.DeleteFunc(n.Addresses, func(a netip.Addr) bool { return !a.IsValid() }) + if nu.Hostinfo().Valid() { + n.OS = nu.Hostinfo().OS() + } if nu.Tags().Len() > 0 { n.Tags = nu.Tags().AsSlice() slices.Sort(n.Tags) diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go index d3ab8b86c63d0..7dd840d29f052 100644 --- a/wgengine/netlog/record_test.go +++ b/wgengine/netlog/record_test.go @@ -190,6 +190,7 @@ func TestToNode(t *testing.T) { node: &tailcfg.Node{ StableID: "n123456CNTL", Addresses: []netip.Prefix{prefix("100.1.2.3")}, + Hostinfo: (&tailcfg.Hostinfo{OS: "linux"}).View(), User: 12345, }, user: &tailcfg.UserProfile{ @@ -199,6 +200,7 @@ func TestToNode(t *testing.T) { want: netlogtype.Node{ NodeID: "n123456CNTL", Addresses: []netip.Addr{addr("100.1.2.3")}, + OS: "linux", User: "user@domain", }, }, From 446752687c7c5a22058d633a57ecf82578a86681 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 5 Nov 2025 14:56:31 -0800 Subject: [PATCH 0639/1093] cmd/vet: move jsontags into vet (#17777) The cmd/jsontags is non-idiomatic since it is not a main binary. Move it to a vet directory, which will eventually contain a vettool binary. Update tailscale/corp#791 Signed-off-by: Joe Tsai --- cmd/{ => vet}/jsontags/analyzer.go | 0 cmd/{ => vet}/jsontags/iszero.go | 0 cmd/{ => vet}/jsontags/report.go | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) rename cmd/{ => vet}/jsontags/analyzer.go (100%) rename cmd/{ => vet}/jsontags/iszero.go (100%) rename cmd/{ => vet}/jsontags/report.go (97%) diff --git a/cmd/jsontags/analyzer.go b/cmd/vet/jsontags/analyzer.go similarity index 100% rename from cmd/jsontags/analyzer.go rename to cmd/vet/jsontags/analyzer.go diff --git a/cmd/jsontags/iszero.go b/cmd/vet/jsontags/iszero.go similarity index 100% rename from cmd/jsontags/iszero.go rename to cmd/vet/jsontags/iszero.go diff --git a/cmd/jsontags/report.go b/cmd/vet/jsontags/report.go similarity index 97% rename from cmd/jsontags/report.go rename to cmd/vet/jsontags/report.go index f05788b61dd0d..19d40799b8875 100644 --- a/cmd/jsontags/report.go +++ b/cmd/vet/jsontags/report.go @@ -28,9 +28,9 @@ var jsontagsAllowlist map[ReportKind]set.Set[string] // // The struct type name may be "*" for anonymous struct types such // as those declared within a function or as a type literal in a variable. -func ParseAllowlist(b []byte) map[ReportKind]set.Set[string] { +func ParseAllowlist(s string) map[ReportKind]set.Set[string] { var allowlist map[ReportKind]set.Set[string] - for line := range strings.SplitSeq(string(b), "\n") { + for line := range strings.SplitSeq(s, "\n") { kind, field, _ := strings.Cut(strings.TrimSpace(line), "\t") if allowlist == nil { allowlist = make(map[ReportKind]set.Set[string]) From 5b40f0bc547701f461605a418d49a20a0edc9f8b Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 5 Nov 2025 16:17:49 -0800 Subject: [PATCH 0640/1093] cmd/vet: add static vet checker that runs jsontags (#17778) This starts running the jsontags vet checker on the module. All existing findings are adding to an allowlist. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- .github/workflows/vet.yml | 38 +++++ cmd/vet/jsontags_allowlist | 315 +++++++++++++++++++++++++++++++++++++ cmd/vet/vet.go | 24 +++ flake.nix | 2 +- go.mod.sri | 2 +- shell.nix | 2 +- 6 files changed, 380 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/vet.yml create mode 100644 cmd/vet/jsontags_allowlist create mode 100644 cmd/vet/vet.go diff --git a/.github/workflows/vet.yml b/.github/workflows/vet.yml new file mode 100644 index 0000000000000..7eff6b45fd37b --- /dev/null +++ b/.github/workflows/vet.yml @@ -0,0 +1,38 @@ +name: tailscale.com/cmd/vet + +env: + HOME: ${{ github.workspace }} + # GOMODCACHE is the same definition on all OSes. Within the workspace, we use + # toplevel directories "src" (for the checked out source code), and "gomodcache" + # and other caches as siblings to follow. + GOMODCACHE: ${{ github.workspace }}/gomodcache + +on: + push: + branches: + - main + - "release-branch/*" + paths: + - "**.go" + pull_request: + paths: + - "**.go" + +jobs: + vet: + runs-on: [ self-hosted, linux ] + timeout-minutes: 5 + + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + + - name: Build 'go vet' tool + working-directory: src + run: ./tool/go build -o /tmp/vettool tailscale.com/cmd/vet + + - name: Run 'go vet' + working-directory: src + run: ./tool/go vet -vettool=/tmp/vettool tailscale.com/... diff --git a/cmd/vet/jsontags_allowlist b/cmd/vet/jsontags_allowlist new file mode 100644 index 0000000000000..060a81b053865 --- /dev/null +++ b/cmd/vet/jsontags_allowlist @@ -0,0 +1,315 @@ +OmitEmptyShouldBeOmitZero tailscale.com/client/web.authResponse.ViewerIdentity +OmitEmptyShouldBeOmitZero tailscale.com/cmd/k8s-operator.OwnerRef.Resource +OmitEmptyShouldBeOmitZero tailscale.com/cmd/tailscale/cli.apiResponse.Error +OmitEmptyShouldBeOmitZero tailscale.com/health.UnhealthyState.PrimaryAction +OmitEmptyShouldBeOmitZero tailscale.com/internal/client/tailscale.VIPService.Name +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AcceptDNS +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AcceptRoutes +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AllowLANWhileUsingExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AuthKey +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.AutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.DisableSNAT +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Enabled +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.Locked +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.NetfilterMode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.NoStatefulFiltering +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.OperatorUser +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.PostureChecking +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.RunSSHServer +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.RunWebClient +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ServeConfigTemp +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ServerURL +OmitEmptyShouldBeOmitZero tailscale.com/ipn.ConfigVAlpha.ShieldsUp +OmitEmptyShouldBeOmitZero tailscale.com/ipn.OutgoingFile.PeerID +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.AutoExitNode +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.NoStatefulFiltering +OmitEmptyShouldBeOmitZero tailscale.com/ipn.Prefs.RelayServerPort +OmitEmptyShouldBeOmitZero tailscale.com/ipn/auditlog.transaction.Action +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.AllowedIPs +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.Location +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.PrimaryRoutes +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.PeerStatus.Tags +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.Status.ExitNodeStatus +OmitEmptyShouldBeOmitZero tailscale.com/ipn/ipnstate.UpdateProgress.Status +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.HostnamePrefix +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ConnectorSpec.SubnetRouter +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.Debug +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.ImagePullPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Container.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.KubeAPIServerConfig.Mode +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Image +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Pod +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Nameserver.Service +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.Affinity +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.DNSConfig +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.DNSPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.TailscaleContainer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Pod.TailscaleInitContainer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.Metrics +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.StaticEndpoints +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.TailscaleConfig +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.HostnamePrefix +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.KubeAPIServer +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroupSpec.Replicas +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.ImagePullPolicy +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.Affinity +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.SecurityContext +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.StatefulSet.Pod +OmitEmptyShouldBeOmitZero tailscale.com/k8s-operator/apis/v1alpha1.Storage.S3 +OmitEmptyShouldBeOmitZero tailscale.com/kube/ingressservices.Config.IPv4Mapping +OmitEmptyShouldBeOmitZero tailscale.com/kube/ingressservices.Config.IPv6Mapping +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.Enabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.IssueCerts +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.Mode +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.APIServerProxyConfig.ServiceName +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.AcceptRoutes +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.APIServerProxy +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.App +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.AuthKey +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.HealthCheckEnabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.Hostname +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LocalAddr +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LocalPort +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.LogLevel +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.MetricsEnabled +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.ServerURL +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.ConfigV1Alpha1.State +OmitEmptyShouldBeOmitZero tailscale.com/kube/k8s-proxy/conf.VersionedConfig.V1Alpha1 +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeapi.ObjectMeta.DeletionGracePeriodSeconds +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeapi.Status.Details +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubeclient.JSONPatch.Value +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubetypes.*.Mode +OmitEmptyShouldBeOmitZero tailscale.com/kube/kubetypes.KubernetesCapRule.Impersonate +OmitEmptyShouldBeOmitZero tailscale.com/sessionrecording.CastHeader.Kubernetes +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.AuditLogRequest.Action +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Debug.Exit +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.DERPMap.HomeParams +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.DisplayMessage.PrimaryAction +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.AppConnector +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Container +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Desktop +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Location +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.NetInfo +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.StateEncrypted +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.TPM +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.Userspace +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Hostinfo.UserspaceRouter +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ClientVersion +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.CollectServices +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ControlDialPlan +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Debug +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DefaultAutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DERPMap +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DNSConfig +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Node +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.PingRequest +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.SSHPolicy +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.TKAInfo +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.NetPortRange.Bits +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.Online +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.SelfNodeV4MasqAddrForThisPeer +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.Node.SelfNodeV6MasqAddrForThisPeer +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.PeerChange.Online +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.RegisterRequest.Auth +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.RegisterResponseAuth.Oauth2Token +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.SSHAction.OnRecordingFailure +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.SSHPrincipal.Node +OmitEmptyShouldBeOmitZero tailscale.com/tempfork/acme.*.ExternalAccountBinding +OmitEmptyShouldBeOmitZero tailscale.com/tsweb.AccessLogRecord.RequestID +OmitEmptyShouldBeOmitZero tailscale.com/types/opt.*.Unset +OmitEmptyShouldBeOmitZero tailscale.com/types/views.viewStruct.AddrsPtr +OmitEmptyShouldBeOmitZero tailscale.com/types/views.viewStruct.StringsPtr +OmitEmptyShouldBeOmitZero tailscale.com/wgengine/magicsock.EndpointChange.From +OmitEmptyShouldBeOmitZero tailscale.com/wgengine/magicsock.EndpointChange.To +OmitEmptyShouldBeOmitZeroButHasIsZero tailscale.com/types/persist.Persist.AttestationKey +OmitEmptyUnsupportedInV1 tailscale.com/client/tailscale.KeyCapabilities.Devices +OmitEmptyUnsupportedInV1 tailscale.com/client/tailscale/apitype.ExitNodeSuggestionResponse.Location +OmitEmptyUnsupportedInV1 tailscale.com/cmd/k8s-operator.ServiceMonitorSpec.NamespaceSelector +OmitEmptyUnsupportedInV1 tailscale.com/derp.ClientInfo.MeshKey +OmitEmptyUnsupportedInV1 tailscale.com/ipn.MaskedPrefs.AutoUpdateSet +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Connector.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Container.Resources +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.DNSConfig.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.ProxyClass.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.ProxyGroup.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.Recorder.ObjectMeta +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderContainer.Resources +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.Container +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderPod.ServiceAccount +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderSpec.Storage +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.RecorderStatefulSet.Pod +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.S3.Credentials +OmitEmptyUnsupportedInV1 tailscale.com/k8s-operator/apis/v1alpha1.S3Credentials.Secret +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.FirstTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.LastTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.Event.Source +OmitEmptyUnsupportedInV1 tailscale.com/kube/kubeapi.ObjectMeta.CreationTimestamp +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg_test.*.Groups +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg.Oauth2Token.Expiry +OmitEmptyUnsupportedInV1 tailscale.com/tailcfg.QueryFeatureRequest.NodeKey +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.*.ExpirySeconds +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.DerpRegion.Preferred +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale.DevicePostureIdentity.Disabled +OmitEmptyUnsupportedInV2 tailscale.com/client/tailscale/apitype.DNSResolver.UseWithExitNode +OmitEmptyUnsupportedInV2 tailscale.com/client/web.authResponse.NeedsSynoAuth +OmitEmptyUnsupportedInV2 tailscale.com/cmd/tsidp.tailscaleClaims.UserID +OmitEmptyUnsupportedInV2 tailscale.com/derp.ClientInfo.IsProber +OmitEmptyUnsupportedInV2 tailscale.com/derp.ClientInfo.Version +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.TokenBucketBytesBurst +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.TokenBucketBytesPerSecond +OmitEmptyUnsupportedInV2 tailscale.com/derp.ServerInfo.Version +OmitEmptyUnsupportedInV2 tailscale.com/health.UnhealthyState.ImpactsConnectivity +OmitEmptyUnsupportedInV2 tailscale.com/ipn.AutoUpdatePrefsMask.ApplySet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.AutoUpdatePrefsMask.CheckSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseRoutesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseServicesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AdvertiseTagsSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AppConnectorSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.AutoExitNodeSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ControlURLSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.CorpDNSSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.DriveSharesSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.EggSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeAllowLANAccessSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeIDSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ExitNodeIPSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ForceDaemonSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.HostnameSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.InternalExitNodePriorSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.LoggedOutSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NetfilterKindSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NetfilterModeSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NoSNATSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NoStatefulFilteringSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.NotepadURLsSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.OperatorUserSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.PostureCheckingSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ProfileNameSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RelayServerPortSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RouteAllSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RunSSHSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.RunWebClientSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.ShieldsUpSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.MaskedPrefs.WantRunningSet +OmitEmptyUnsupportedInV2 tailscale.com/ipn.PartialFile.Done +OmitEmptyUnsupportedInV2 tailscale.com/ipn.Prefs.Egg +OmitEmptyUnsupportedInV2 tailscale.com/ipn.Prefs.ForceDaemon +OmitEmptyUnsupportedInV2 tailscale.com/ipn.ServiceConfig.Tun +OmitEmptyUnsupportedInV2 tailscale.com/ipn.TCPPortHandler.HTTP +OmitEmptyUnsupportedInV2 tailscale.com/ipn.TCPPortHandler.HTTPS +OmitEmptyUnsupportedInV2 tailscale.com/ipn/auditlog.transaction.Retries +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.AltSharerUserID +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.Expired +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PeerStatus.ShareeNode +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PingResult.IsLocalIP +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.PingResult.PeerAPIPort +OmitEmptyUnsupportedInV2 tailscale.com/ipn/ipnstate.Status.HaveNodeKey +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.PortRange.EndPort +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.ProxyClassSpec.UseLetsEncryptStagingEnvironment +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.RecorderSpec.EnableUI +OmitEmptyUnsupportedInV2 tailscale.com/k8s-operator/apis/v1alpha1.TailscaleConfig.AcceptRoutes +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Event.Count +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.ObjectMeta.Generation +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Status.Code +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnforceRecorder +OmitEmptyUnsupportedInV2 tailscale.com/log/sockstatlog.event.IsCellularInterface +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.CastHeader.SrcNodeUserID +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.Source.NodeUserID +OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.v2ResponseFrame.Ack +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg_test.*.ToggleOn +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.AuditLogRequest.Version +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NPostureIdentityResponse.PostureDisabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NSSHUsernamesRequest.Max +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Expired +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Missing +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.C2NTLSCertInfo.Valid +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.Notify +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.RunningLatest +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ClientVersion.UrgentSecurityUpdate +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.DialStartDelaySec +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.DialTimeoutSec +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.ControlIPCandidate.Priority +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Debug.DisableLogTail +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Debug.SleepSeconds +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPMap.OmitDefaultRegions +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.CanPort80 +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.DERPPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.InsecureForTests +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.STUNOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPNode.STUNPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Avoid +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Latitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.Longitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DERPRegion.NoMeasureNoHome +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DisplayMessage.ImpactsConnectivity +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.DNSConfig.Proxied +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.AllowsUpdate +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.IngressEnabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.NoLogsNoSupport +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.ShareeNode +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.ShieldsUp +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Hostinfo.WireIngress +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Latitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Longitude +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Location.Priority +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.MapSessionSeq +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.OmitPeers +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapRequest.ReadOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapResponse.KeepAlive +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.MapResponse.Seq +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.NetInfo.HavePortMap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Cap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Expired +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.HomeDERP +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.IsJailed +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.IsWireGuardOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.MachineAuthorized +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.Sharer +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.Node.UnsignedPeerAPIOnly +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PeerChange.Cap +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PeerChange.DERPRegion +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingRequest.Log +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingRequest.URLIsNoise +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.DERPRegionID +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.IsLocalIP +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.LatencySeconds +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.PingResponse.PeerAPIPort +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.QueryFeatureResponse.Complete +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.QueryFeatureResponse.ShouldWait +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.RegisterRequest.Ephemeral +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.RegisterRequest.SignatureType +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.Accept +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowAgentForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowLocalPortForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.AllowRemotePortForwarding +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.Reject +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHAction.SessionDuration +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.SSHPrincipal.Any +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TKAInfo.Disabled +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.FirmwareVersion +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.Model +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.TPMInfo.SpecRevision +OmitEmptyUnsupportedInV2 tailscale.com/tailcfg.WebClientAuthResponse.Complete +OmitEmptyUnsupportedInV2 tailscale.com/tempfork/acme.*.TermsAgreed +OmitEmptyUnsupportedInV2 tailscale.com/tstime/rate.jsonValue.Updated +OmitEmptyUnsupportedInV2 tailscale.com/tstime/rate.jsonValue.Value +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Bytes +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Code +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.Seconds +OmitEmptyUnsupportedInV2 tailscale.com/tsweb.AccessLogRecord.TLS +OmitEmptyUnsupportedInV2 tailscale.com/tsweb/varz.SomeStats.TotalY +OmitEmptyUnsupportedInV2 tailscale.com/types/appctype.AppConnectorConfig.AdvertiseRoutes +OmitEmptyUnsupportedInV2 tailscale.com/types/dnstype.Resolver.UseWithExitNode +OmitEmptyUnsupportedInV2 tailscale.com/types/opt.testStruct.Int +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.GitDirty +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.IsDev +OmitEmptyUnsupportedInV2 tailscale.com/version.Meta.UnstableBranch diff --git a/cmd/vet/vet.go b/cmd/vet/vet.go new file mode 100644 index 0000000000000..45473af48f0ee --- /dev/null +++ b/cmd/vet/vet.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package vet is a tool to statically check Go source code. +package main + +import ( + _ "embed" + + "golang.org/x/tools/go/analysis/unitchecker" + "tailscale.com/cmd/vet/jsontags" +) + +//go:embed jsontags_allowlist +var jsontagsAllowlistSource string + +func init() { + jsontags.RegisterAllowlist(jsontags.ParseAllowlist(jsontagsAllowlistSource)) + jsontags.RegisterPureIsZeroMethods(jsontags.PureIsZeroMethodsInTailscaleModule) +} + +func main() { + unitchecker.Main(jsontags.Analyzer) +} diff --git a/flake.nix b/flake.nix index e50f396387574..d2f03d4d81382 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= +# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= diff --git a/go.mod.sri b/go.mod.sri index 108423f4e1ad4..325a03b43bdfd 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= +sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= diff --git a/shell.nix b/shell.nix index 6b579b45552cc..c11b4bbcfb4f7 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-D0znIEcy9d822snZbdNCNLoN47cOP1F2SKmfwSFRvXw= +# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= From 1ed117dbc08ac60a69ba46bdb7289b1d416bc5dc Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 6 Nov 2025 15:36:58 +0000 Subject: [PATCH 0641/1093] cmd/k8s-operator: remove Services feature flag detection Now that the feature is in beta, no one should encounter this error. Updates #cleanup Change-Id: I69ed3f460b7f28c44da43ce2f552042f980a0420 Signed-off-by: Tom Proctor --- cmd/k8s-operator/api-server-proxy-pg.go | 6 ------ cmd/k8s-operator/ingress-for-pg.go | 24 ------------------------ cmd/k8s-operator/svc-for-pg.go | 10 ---------- 3 files changed, 40 deletions(-) diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go index 252859eb37197..1a81e4967e5d8 100644 --- a/cmd/k8s-operator/api-server-proxy-pg.go +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -157,12 +157,6 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s // 1. Check there isn't a Tailscale Service with the same hostname // already created and not owned by this ProxyGroup. existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(pg, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyValid, metav1.ConditionFalse, reasonKubeAPIServerProxyInvalid, msgFeatureFlagNotEnabled, pg.Generation, r.clock, logger) - return nil - } if err != nil && !isErrorTailscaleServiceNotFound(err) { return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 3afeb528f7f8f..4d831180578eb 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -154,11 +154,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // needs to be explicitly enabled for a tailnet to be able to use them. serviceName := tailcfg.ServiceName("svc:" + hostname) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - return false, nil - } if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -453,11 +448,6 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName) - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - return false, nil - } if isErrorTailscaleServiceNotFound(err) { return false, nil } @@ -515,12 +505,6 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) svc, err := r.tsClient.GetVIPService(ctx, serviceName) if err != nil { - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - r.recorder.Event(ing, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msg) - return false, nil - } if isErrorTailscaleServiceNotFound(err) { return false, nil } @@ -1122,14 +1106,6 @@ func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string, return len(cert) > 0 && len(key) > 0, nil } -func isErrorFeatureFlagNotEnabled(err error) bool { - // messageFFNotEnabled is the error message returned by - // Tailscale control plane when a Tailscale Service API call is made for a - // tailnet that does not have the Tailscale Services feature flag enabled. - const messageFFNotEnabled = "feature unavailable for tailnet" - return err != nil && strings.Contains(err.Error(), messageFFNotEnabled) -} - func isErrorTailscaleServiceNotFound(err error) bool { var errResp tailscale.ErrResponse ok := errors.As(err, &errResp) diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 62cc36bd4a82b..144d3755811da 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -207,11 +207,6 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) - if isErrorFeatureFlagNotEnabled(err) { - logger.Warn(msgFeatureFlagNotEnabled) - r.recorder.Event(svc, corev1.EventTypeWarning, warningTailscaleServiceFeatureFlagNotEnabled, msgFeatureFlagNotEnabled) - return false, nil - } if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -530,11 +525,6 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { svc, err := tsClient.GetVIPService(ctx, name) - if isErrorFeatureFlagNotEnabled(err) { - msg := fmt.Sprintf("Unable to proceed with cleanup: %s.", msgFeatureFlagNotEnabled) - logger.Warn(msg) - return false, nil - } if err != nil { errResp := &tailscale.ErrResponse{} ok := errors.As(err, errResp) From d4c5b278b3dd67e31498dfbfe321c5e00a801898 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Sun, 5 Oct 2025 02:10:50 +0100 Subject: [PATCH 0642/1093] cmd/k8s-operator: support workload identity federation The feature is currently in private alpha, so requires a tailnet feature flag. Initially focuses on supporting the operator's own auth, because the operator is the only device we maintain that uses static long-lived credentials. All other operator-created devices use single-use auth keys. Testing steps: * Create a cluster with an API server accessible over public internet * kubectl get --raw /.well-known/openid-configuration | jq '.issuer' * Create a federated OAuth client in the Tailscale admin console with: * The issuer from the previous step * Subject claim `system:serviceaccount:tailscale:operator` * Write scopes services, devices:core, auth_keys * Tag tag:k8s-operator * Allow the Tailscale control plane to get the public portion of the ServiceAccount token signing key without authentication: * kubectl create clusterrolebinding oidc-discovery \ --clusterrole=system:service-account-issuer-discovery \ --group=system:unauthenticated * helm install --set oauth.clientId=... --set oauth.audience=... Updates #17457 Change-Id: Ib29c85ba97b093c70b002f4f41793ffc02e6c6e9 Signed-off-by: Tom Proctor --- .../deploy/chart/templates/deployment.yaml | 26 ++++ .../deploy/chart/templates/oauth-secret.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 21 ++- cmd/k8s-operator/generate/main.go | 2 +- cmd/k8s-operator/operator.go | 18 +-- cmd/k8s-operator/tsclient.go | 102 ++++++++++--- cmd/k8s-operator/tsclient_test.go | 135 ++++++++++++++++++ 7 files changed, 272 insertions(+), 34 deletions(-) create mode 100644 cmd/k8s-operator/tsclient_test.go diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 51d0a88c36671..0f2dc42fc3c3a 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -34,7 +34,9 @@ spec: securityContext: {{- toYaml . | nindent 8 }} {{- end }} + {{- if or .Values.oauth.clientSecret .Values.oauth.audience }} volumes: + {{- if .Values.oauth.clientSecret }} - name: oauth {{- with .Values.oauthSecretVolume }} {{- toYaml . | nindent 10 }} @@ -42,6 +44,17 @@ spec: secret: secretName: operator-oauth {{- end }} + {{- else }} + - name: oidc-jwt + projected: + defaultMode: 420 + sources: + - serviceAccountToken: + audience: {{ .Values.oauth.audience }} + expirationSeconds: 3600 + path: token + {{- end }} + {{- end }} containers: - name: operator {{- with .Values.operatorConfig.securityContext }} @@ -72,10 +85,15 @@ spec: value: {{ .Values.loginServer }} - name: OPERATOR_INGRESS_CLASS_NAME value: {{ .Values.ingressClass.name }} + {{- if .Values.oauth.clientSecret }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE value: /oauth/client_secret + {{- else if .Values.oauth.audience }} + - name: CLIENT_ID + value: {{ .Values.oauth.clientId }} + {{- end }} {{- $proxyTag := printf ":%s" ( .Values.proxyConfig.image.tag | default .Chart.AppVersion )}} - name: PROXY_IMAGE value: {{ coalesce .Values.proxyConfig.image.repo .Values.proxyConfig.image.repository }}{{- if .Values.proxyConfig.image.digest -}}{{ printf "@%s" .Values.proxyConfig.image.digest}}{{- else -}}{{ printf "%s" $proxyTag }}{{- end }} @@ -100,10 +118,18 @@ spec: {{- with .Values.operatorConfig.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} + {{- if or .Values.oauth.clientSecret .Values.oauth.audience }} volumeMounts: + {{- if .Values.oauth.clientSecret }} - name: oauth mountPath: /oauth readOnly: true + {{- else }} + - name: oidc-jwt + mountPath: /var/run/secrets/tailscale/serviceaccount + readOnly: true + {{- end }} + {{- end }} {{- with .Values.operatorConfig.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml index b44fde0a17b49..b85c78915dedc 100644 --- a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml @@ -1,7 +1,7 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -{{ if and .Values.oauth .Values.oauth.clientId -}} +{{ if and .Values.oauth .Values.oauth.clientId .Values.oauth.clientSecret -}} apiVersion: v1 kind: Secret metadata: diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index cdedb92e819e4..eb11fc7f27a86 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -1,13 +1,20 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -# Operator oauth credentials. If set a Kubernetes Secret with the provided -# values will be created in the operator namespace. If unset a Secret named -# operator-oauth must be precreated or oauthSecretVolume needs to be adjusted. -# This block will be overridden by oauthSecretVolume, if set. -oauth: {} - # clientId: "" - # clientSecret: "" +# Operator oauth credentials. If unset a Secret named operator-oauth must be +# precreated or oauthSecretVolume needs to be adjusted. This block will be +# overridden by oauthSecretVolume, if set. +oauth: + # The Client ID the operator will authenticate with. + clientId: "" + # If set a Kubernetes Secret with the provided value will be created in + # the operator namespace, and mounted into the operator Pod. Takes precedence + # over oauth.audience. + clientSecret: "" + # The audience for oauth.clientId if using a workload identity federation + # OAuth client. Mutually exclusive with oauth.clientSecret. + # See https://tailscale.com/kb/1581/workload-identity-federation. + audience: "" # URL of the control plane to be used by all resources managed by the operator. loginServer: "" diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 5fd5d551b5e02..08bdc350d500c 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -69,7 +69,7 @@ func main() { }() log.Print("Templating Helm chart contents") helmTmplCmd := exec.Command("./tool/helm", "template", "operator", "./cmd/k8s-operator/deploy/chart", - "--namespace=tailscale") + "--namespace=tailscale", "--set=oauth.clientSecret=''") helmTmplCmd.Dir = repoRoot var out bytes.Buffer helmTmplCmd.Stdout = &out diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index cc97b1be29cbe..d5ff077800b24 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -164,22 +164,24 @@ func main() { runReconcilers(rOpts) } -// initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the -// CLIENT_ID_FILE and CLIENT_SECRET_FILE environment variables to authenticate -// with Tailscale. +// initTSNet initializes the tsnet.Server and logs in to Tailscale. If CLIENT_ID +// is set, it authenticates to the Tailscale API using the federated OIDC workload +// identity flow. Otherwise, it uses the CLIENT_ID_FILE and CLIENT_SECRET_FILE +// environment variables to authenticate with static credentials. func initTSNet(zlog *zap.SugaredLogger, loginServer string) (*tsnet.Server, tsClient) { var ( - clientIDPath = defaultEnv("CLIENT_ID_FILE", "") - clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") + clientID = defaultEnv("CLIENT_ID", "") // Used for workload identity federation. + clientIDPath = defaultEnv("CLIENT_ID_FILE", "") // Used for static client credentials. + clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "") // Used for static client credentials. hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator") kubeSecret = defaultEnv("OPERATOR_SECRET", "") operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator") ) startlog := zlog.Named("startup") - if clientIDPath == "" || clientSecretPath == "" { - startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") + if clientID == "" && (clientIDPath == "" || clientSecretPath == "") { + startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set") // TODO(tomhjp): error message can mention WIF once it's publicly available. } - tsc, err := newTSClient(context.Background(), clientIDPath, clientSecretPath, loginServer) + tsc, err := newTSClient(zlog.Named("ts-api-client"), clientID, clientIDPath, clientSecretPath, loginServer) if err != nil { startlog.Fatalf("error creating Tailscale client: %v", err) } diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index 50620c26ddf27..d22fa1797dd5c 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -8,8 +8,13 @@ package main import ( "context" "fmt" + "net/http" "os" + "sync" + "time" + "go.uber.org/zap" + "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" @@ -20,30 +25,53 @@ import ( // call should be performed on the default tailnet for the provided credentials. const ( defaultTailnet = "-" + oidcJWTPath = "/var/run/secrets/tailscale/serviceaccount/token" ) -func newTSClient(ctx context.Context, clientIDPath, clientSecretPath, loginServer string) (tsClient, error) { - clientID, err := os.ReadFile(clientIDPath) - if err != nil { - return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) - } - clientSecret, err := os.ReadFile(clientSecretPath) - if err != nil { - return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) - } - const tokenURLPath = "/api/v2/oauth/token" - tokenURL := fmt.Sprintf("%s%s", ipn.DefaultControlURL, tokenURLPath) +func newTSClient(logger *zap.SugaredLogger, clientID, clientIDPath, clientSecretPath, loginServer string) (*tailscale.Client, error) { + baseURL := ipn.DefaultControlURL if loginServer != "" { - tokenURL = fmt.Sprintf("%s%s", loginServer, tokenURLPath) + baseURL = loginServer } - credentials := clientcredentials.Config{ - ClientID: string(clientID), - ClientSecret: string(clientSecret), - TokenURL: tokenURL, + + var httpClient *http.Client + if clientID == "" { + // Use static client credentials mounted to disk. + id, err := os.ReadFile(clientIDPath) + if err != nil { + return nil, fmt.Errorf("error reading client ID %q: %w", clientIDPath, err) + } + secret, err := os.ReadFile(clientSecretPath) + if err != nil { + return nil, fmt.Errorf("reading client secret %q: %w", clientSecretPath, err) + } + credentials := clientcredentials.Config{ + ClientID: string(id), + ClientSecret: string(secret), + TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token"), + } + tokenSrc := credentials.TokenSource(context.Background()) + httpClient = oauth2.NewClient(context.Background(), tokenSrc) + } else { + // Use workload identity federation. + tokenSrc := &jwtTokenSource{ + logger: logger, + jwtPath: oidcJWTPath, + baseCfg: clientcredentials.Config{ + ClientID: clientID, + TokenURL: fmt.Sprintf("%s%s", baseURL, "/api/v2/oauth/token-exchange"), + }, + } + httpClient = &http.Client{ + Transport: &oauth2.Transport{ + Source: tokenSrc, + }, + } } + c := tailscale.NewClient(defaultTailnet, nil) c.UserAgent = "tailscale-k8s-operator" - c.HTTPClient = credentials.Client(ctx) + c.HTTPClient = httpClient if loginServer != "" { c.BaseURL = loginServer } @@ -63,3 +91,43 @@ type tsClient interface { // DeleteVIPService is a method for deleting a Tailscale Service. DeleteVIPService(ctx context.Context, name tailcfg.ServiceName) error } + +// jwtTokenSource implements the [oauth2.TokenSource] interface, but with the +// ability to regenerate a fresh underlying token source each time a new value +// of the JWT parameter is needed due to expiration. +type jwtTokenSource struct { + logger *zap.SugaredLogger + jwtPath string // Path to the file containing an automatically refreshed JWT. + baseCfg clientcredentials.Config // Holds config that doesn't change for the lifetime of the process. + + mu sync.Mutex // Guards underlying. + underlying oauth2.TokenSource // The oauth2 client implementation. Does its own separate caching of the access token. +} + +func (s *jwtTokenSource) Token() (*oauth2.Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.underlying != nil { + t, err := s.underlying.Token() + if err == nil && t != nil && t.Valid() { + return t, nil + } + } + + s.logger.Debugf("Refreshing JWT from %s", s.jwtPath) + tk, err := os.ReadFile(s.jwtPath) + if err != nil { + return nil, fmt.Errorf("error reading JWT from %q: %w", s.jwtPath, err) + } + + // Shallow copy of the base config. + credentials := s.baseCfg + credentials.EndpointParams = map[string][]string{ + "jwt": {string(tk)}, + } + + src := credentials.TokenSource(context.Background()) + s.underlying = oauth2.ReuseTokenSourceWithExpiry(nil, src, time.Minute) + return s.underlying.Token() +} diff --git a/cmd/k8s-operator/tsclient_test.go b/cmd/k8s-operator/tsclient_test.go new file mode 100644 index 0000000000000..16de512d5809f --- /dev/null +++ b/cmd/k8s-operator/tsclient_test.go @@ -0,0 +1,135 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "go.uber.org/zap" + "golang.org/x/oauth2" +) + +func TestNewStaticClient(t *testing.T) { + const ( + clientIDFile = "client-id" + clientSecretFile = "client-secret" + ) + + tmp := t.TempDir() + clientIDPath := filepath.Join(tmp, clientIDFile) + if err := os.WriteFile(clientIDPath, []byte("test-client-id"), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", clientIDPath, err) + } + clientSecretPath := filepath.Join(tmp, clientSecretFile) + if err := os.WriteFile(clientSecretPath, []byte("test-client-secret"), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", clientSecretPath, err) + } + + srv := testAPI(t, 3600) + cl, err := newTSClient(zap.NewNop().Sugar(), "", clientIDPath, clientSecretPath, srv.URL) + if err != nil { + t.Fatalf("error creating Tailscale client: %v", err) + } + + resp, err := cl.HTTPClient.Get(srv.URL) + if err != nil { + t.Fatalf("error making test API call: %v", err) + } + defer resp.Body.Close() + + got, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + want := "Bearer " + testToken("/api/v2/oauth/token", "test-client-id", "test-client-secret", "") + if string(got) != want { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestNewWorkloadIdentityClient(t *testing.T) { + // 5 seconds is within expiryDelta leeway, so the access token will + // immediately be considered expired and get refreshed on each access. + srv := testAPI(t, 5) + cl, err := newTSClient(zap.NewNop().Sugar(), "test-client-id", "", "", srv.URL) + if err != nil { + t.Fatalf("error creating Tailscale client: %v", err) + } + + // Modify the path where the JWT will be read from. + oauth2Transport, ok := cl.HTTPClient.Transport.(*oauth2.Transport) + if !ok { + t.Fatalf("expected oauth2.Transport, got %T", cl.HTTPClient.Transport) + } + jwtTokenSource, ok := oauth2Transport.Source.(*jwtTokenSource) + if !ok { + t.Fatalf("expected jwtTokenSource, got %T", oauth2Transport.Source) + } + tmp := t.TempDir() + jwtPath := filepath.Join(tmp, "token") + jwtTokenSource.jwtPath = jwtPath + + for _, jwt := range []string{"test-jwt", "updated-test-jwt"} { + if err := os.WriteFile(jwtPath, []byte(jwt), 0600); err != nil { + t.Fatalf("error writing test file %q: %v", jwtPath, err) + } + resp, err := cl.HTTPClient.Get(srv.URL) + if err != nil { + t.Fatalf("error making test API call: %v", err) + } + defer resp.Body.Close() + + got, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("error reading response body: %v", err) + } + if want := "Bearer " + testToken("/api/v2/oauth/token-exchange", "test-client-id", "", jwt); string(got) != want { + t.Errorf("got %q; want %q", got, want) + } + } +} + +func testAPI(t *testing.T, expirationSeconds int) *httptest.Server { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("test server got request: %s %s", r.Method, r.URL.Path) + switch r.URL.Path { + case "/api/v2/oauth/token", "/api/v2/oauth/token-exchange": + id, secret, ok := r.BasicAuth() + if !ok { + t.Fatal("missing or invalid basic auth") + } + w.Header().Set("Content-Type", "application/json") + if err := json.NewEncoder(w).Encode(map[string]any{ + "access_token": testToken(r.URL.Path, id, secret, r.FormValue("jwt")), + "token_type": "Bearer", + "expires_in": expirationSeconds, + }); err != nil { + t.Fatalf("error writing response: %v", err) + } + case "/": + // Echo back the authz header for test assertions. + _, err := w.Write([]byte(r.Header.Get("Authorization"))) + if err != nil { + t.Fatalf("error writing response: %v", err) + } + default: + w.WriteHeader(http.StatusNotFound) + } + })) + t.Cleanup(srv.Close) + return srv +} + +func testToken(path, id, secret, jwt string) string { + return fmt.Sprintf("%s|%s|%s|%s", path, id, secret, jwt) +} From bab5e68d0a67339de3c7f3b1fe6f0f8d84524a3a Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Fri, 7 Nov 2025 18:38:49 -0500 Subject: [PATCH 0643/1093] net/udprelay: use GetGlobalAddrs and add local port endpoint (#17797) Use GetGlobalAddrs() to discover all STUN endpoints, handling bad NATs that create multiple mappings. When MappingVariesByDestIP is true, also add the first STUN IPv4 address with the relay's local port for static port mapping scenarios. Updates #17796 Signed-off-by: Raj Singh --- net/udprelay/server.go | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 83831dd698164..de1376b6480b1 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -393,14 +393,29 @@ func (s *Server) addrDiscoveryLoop() { if err != nil { return nil, err } - if rep.GlobalV4.IsValid() { - addrPorts.Add(rep.GlobalV4) + // Add STUN-discovered endpoints with their observed ports. + v4Addrs, v6Addrs := rep.GetGlobalAddrs() + for _, addr := range v4Addrs { + if addr.IsValid() { + addrPorts.Add(addr) + } } - if rep.GlobalV6.IsValid() { - addrPorts.Add(rep.GlobalV6) + for _, addr := range v6Addrs { + if addr.IsValid() { + addrPorts.Add(addr) + } + } + + if len(v4Addrs) >= 1 && v4Addrs[0].IsValid() { + // If they're behind a hard NAT and are using a fixed + // port locally, assume they might've added a static + // port mapping on their router to the same explicit + // port that the relay is running with. Worst case + // it's an invalid candidate mapping. + if rep.MappingVariesByDestIP.EqualBool(true) && s.uc4Port != 0 { + addrPorts.Add(netip.AddrPortFrom(v4Addrs[0].Addr(), s.uc4Port)) + } } - // TODO(jwhited): consider logging if rep.MappingVariesByDestIP as - // that's a hint we are not well-positioned to operate as a UDP relay. return addrPorts.Slice(), nil } From 875a9c526d1c2c6fc6d1c4f239f27571b92404e3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 17:44:47 -0800 Subject: [PATCH 0644/1093] tsnet: skip a 30s long flaky-ish test on macOS Updates #17805 Change-Id: I540f50d067eee12e430dfd9de6871dc784fffb8a Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 1e22681fcfe36..1b6ebf4e4f616 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -275,6 +275,9 @@ func TestDialBlocks(t *testing.T) { } func TestConn(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("slow on macOS: https://github.com/tailscale/tailscale/issues/17805") + } tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() From de733c5951c3ead36df8cc107996f1488337f37f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 9 Nov 2025 07:05:28 -0800 Subject: [PATCH 0645/1093] tailcfg: kill off rest of HairPinning symbols It was disabled in May 2024 in #12205 (9eb72bb51). This removes the unused symbols. Updates #188 Updates tailscale/corp#19106 Updates tailscale/corp#19116 Change-Id: I5208b7b750b18226ed703532ed58c4ea17195a8e Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/c2n_test.go | 1 - tailcfg/tailcfg.go | 9 ++------- tailcfg/tailcfg_clone.go | 1 - tailcfg/tailcfg_test.go | 1 - tailcfg/tailcfg_view.go | 5 ----- 5 files changed, 2 insertions(+), 15 deletions(-) diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 95cd5fa6995bc..877d102d0986b 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -324,7 +324,6 @@ func TestRedactNetmapPrivateKeys(t *testing.T) { f(tailcfg.Location{}, "Priority"): false, f(tailcfg.NetInfo{}, "DERPLatency"): false, f(tailcfg.NetInfo{}, "FirewallMode"): false, - f(tailcfg.NetInfo{}, "HairPinning"): false, f(tailcfg.NetInfo{}, "HavePortMap"): false, f(tailcfg.NetInfo{}, "LinkType"): false, f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index a95d0559c2bec..43ed3188fdb26 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1018,10 +1018,6 @@ type NetInfo struct { // vary based on the destination IP. MappingVariesByDestIP opt.Bool - // HairPinning is their router does hairpinning. - // It reports true even if there's no NAT involved. - HairPinning opt.Bool - // WorkingIPv6 is whether the host has IPv6 internet connectivity. WorkingIPv6 opt.Bool @@ -1089,8 +1085,8 @@ func (ni *NetInfo) String() string { if ni == nil { return "NetInfo(nil)" } - return fmt.Sprintf("NetInfo{varies=%v hairpin=%v ipv6=%v ipv6os=%v udp=%v icmpv4=%v derp=#%v portmap=%v link=%q firewallmode=%q}", - ni.MappingVariesByDestIP, ni.HairPinning, ni.WorkingIPv6, + return fmt.Sprintf("NetInfo{varies=%v ipv6=%v ipv6os=%v udp=%v icmpv4=%v derp=#%v portmap=%v link=%q firewallmode=%q}", + ni.MappingVariesByDestIP, ni.WorkingIPv6, ni.OSHasIPv6, ni.WorkingUDP, ni.WorkingICMPv4, ni.PreferredDERP, ni.portMapSummary(), ni.LinkType, ni.FirewallMode) } @@ -1133,7 +1129,6 @@ func (ni *NetInfo) BasicallyEqual(ni2 *NetInfo) bool { return true } return ni.MappingVariesByDestIP == ni2.MappingVariesByDestIP && - ni.HairPinning == ni2.HairPinning && ni.WorkingIPv6 == ni2.WorkingIPv6 && ni.OSHasIPv6 == ni2.OSHasIPv6 && ni.WorkingUDP == ni2.WorkingUDP && diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 9aa7673886bc6..751b7c288f274 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -207,7 +207,6 @@ func (src *NetInfo) Clone() *NetInfo { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoCloneNeedsRegeneration = NetInfo(struct { MappingVariesByDestIP opt.Bool - HairPinning opt.Bool WorkingIPv6 opt.Bool OSHasIPv6 opt.Bool WorkingUDP opt.Bool diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index addd2330ba239..6691263eb997a 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -607,7 +607,6 @@ func TestNodeEqual(t *testing.T) { func TestNetInfoFields(t *testing.T) { handled := []string{ "MappingVariesByDestIP", - "HairPinning", "WorkingIPv6", "OSHasIPv6", "WorkingUDP", diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 88dd90096ab55..dbd29a87a354e 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -741,10 +741,6 @@ func (v *NetInfoView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { // vary based on the destination IP. func (v NetInfoView) MappingVariesByDestIP() opt.Bool { return v.ж.MappingVariesByDestIP } -// HairPinning is their router does hairpinning. -// It reports true even if there's no NAT involved. -func (v NetInfoView) HairPinning() opt.Bool { return v.ж.HairPinning } - // WorkingIPv6 is whether the host has IPv6 internet connectivity. func (v NetInfoView) WorkingIPv6() opt.Bool { return v.ж.WorkingIPv6 } @@ -809,7 +805,6 @@ func (v NetInfoView) String() string { return v.ж.String() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _NetInfoViewNeedsRegeneration = NetInfo(struct { MappingVariesByDestIP opt.Bool - HairPinning opt.Bool WorkingIPv6 opt.Bool OSHasIPv6 opt.Bool WorkingUDP opt.Bool From 2e265213fddada539452a59536c88dbbc535a27d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 21:15:13 -0800 Subject: [PATCH 0646/1093] tsnet: fix TestConn to be fast, not flaky Fixes #17805 Change-Id: I36e37cb0cfb2ea7b2341fd4b9809fbf1dd46d991 Signed-off-by: Brad Fitzpatrick --- tsnet/tsnet_test.go | 118 +++++++++++++++++++++++++++++++++----------- 1 file changed, 89 insertions(+), 29 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 1b6ebf4e4f616..c19ae3c14ecbd 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -274,33 +274,56 @@ func TestDialBlocks(t *testing.T) { defer c.Close() } +// TestConn tests basic TCP connections between two tsnet Servers, s1 and s2: +// +// - s1, a subnet router, first listens on its TCP :8081. +// - s2 can connect to s1:8081 +// - s2 cannot connect to s1:8082 (no listener) +// - s2 can dial through the subnet router functionality (getting a synthetic RST +// that we verify we generated & saw) func TestConn(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("slow on macOS: https://github.com/tailscale/tailscale/issues/17805") - } tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() controlURL, c := startControl(t) s1, s1ip, s1PubKey := startServer(t, ctx, controlURL, "s1") - s2, _, _ := startServer(t, ctx, controlURL, "s2") - s1.lb.EditPrefs(&ipn.MaskedPrefs{ + // Track whether we saw an attempted dial to 192.0.2.1:8081. + var saw192DocNetDial atomic.Bool + s1.RegisterFallbackTCPHandler(func(src, dst netip.AddrPort) (handler func(net.Conn), intercept bool) { + t.Logf("s1: fallback TCP handler called for %v -> %v", src, dst) + if dst.String() == "192.0.2.1:8081" { + saw192DocNetDial.Store(true) + } + return nil, true // nil handler but intercept=true means to send RST + }) + + lc1 := must.Get(s1.LocalClient()) + + must.Get(lc1.EditPrefs(ctx, &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AdvertiseRoutes: []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}, }, AdvertiseRoutesSet: true, - }) + })) c.SetSubnetRoutes(s1PubKey, []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")}) - lc2, err := s2.LocalClient() - if err != nil { - t.Fatal(err) - } + // Start s2 after s1 is fully set up, including advertising its routes, + // otherwise the test is flaky if the test starts dialing through s2 before + // our test control server has told s2 about s1's routes. + s2, _, _ := startServer(t, ctx, controlURL, "s2") + lc2 := must.Get(s2.LocalClient()) + + must.Get(lc2.EditPrefs(ctx, &ipn.MaskedPrefs{ + Prefs: ipn.Prefs{ + RouteAll: true, + }, + RouteAllSet: true, + })) // ping to make sure the connection is up. - res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP) + res, err := lc2.Ping(ctx, s1ip, tailcfg.PingTSMP) if err != nil { t.Fatal(err) } @@ -313,12 +336,26 @@ func TestConn(t *testing.T) { } defer ln.Close() - w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) - if err != nil { - t.Fatal(err) - } + s1Conns := make(chan net.Conn) + go func() { + for { + c, err := ln.Accept() + if err != nil { + if ctx.Err() != nil { + return + } + t.Errorf("s1.Accept: %v", err) + return + } + select { + case s1Conns <- c: + case <-ctx.Done(): + c.Close() + } + } + }() - r, err := ln.Accept() + w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) if err != nil { t.Fatal(err) } @@ -328,28 +365,51 @@ func TestConn(t *testing.T) { t.Fatal(err) } - got := make([]byte, len(want)) - if _, err := io.ReadAtLeast(r, got, len(got)); err != nil { - t.Fatal(err) - } - t.Logf("got: %q", got) - if string(got) != want { - t.Errorf("got %q, want %q", got, want) + select { + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for connection") + case r := <-s1Conns: + got := make([]byte, len(want)) + _, err := io.ReadAtLeast(r, got, len(got)) + r.Close() + if err != nil { + t.Fatal(err) + } + t.Logf("got: %q", got) + if string(got) != want { + t.Errorf("got %q, want %q", got, want) + } } + // Dial a non-existent port on s1 and expect it to fail. _, err = s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8082", s1ip)) // some random port if err == nil { t.Fatalf("unexpected success; should have seen a connection refused error") } - - // s1 is a subnet router for TEST-NET-1 (192.0.2.0/24). Lets dial to that - // subnet from s2 to ensure a listener without an IP address (i.e. ":8081") - // only matches destination IPs corresponding to the node's IP, and not - // to any random IP a subnet is routing. - _, err = s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", "192.0.2.1")) + t.Logf("got expected failure: %v", err) + + // s1 is a subnet router for TEST-NET-1 (192.0.2.0/24). Let's dial to that + // subnet from s2 to ensure a listener without an IP address (i.e. our + // ":8081" listen above) only matches destination IPs corresponding to the + // s1 node's IP addresses, and not to any random IP of a subnet it's routing. + // + // The RegisterFallbackTCPHandler on s1 above handles sending a RST when the + // TCP SYN arrives from s2. But we bound it to 5 seconds lest a regression + // like tailscale/tailscale#17805 recur. + s2dialer := s2.Sys().Dialer.Get() + s2dialer.SetSystemDialerForTest(func(ctx context.Context, netw, addr string) (net.Conn, error) { + t.Logf("s2: unexpected system dial called for %s %s", netw, addr) + return nil, fmt.Errorf("system dialer called unexpectedly for %s %s", netw, addr) + }) + docCtx, docCancel := context.WithTimeout(ctx, 5*time.Second) + defer docCancel() + _, err = s2.Dial(docCtx, "tcp", "192.0.2.1:8081") if err == nil { t.Fatalf("unexpected success; should have seen a connection refused error") } + if !saw192DocNetDial.Load() { + t.Errorf("expected s1's fallback TCP handler to have been called for 192.0.2.1:8081") + } } func TestLoopbackLocalAPI(t *testing.T) { From ae3dff15e40982d3aeaf0e457001da434cb4e6d8 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Sun, 9 Nov 2025 15:49:24 -0800 Subject: [PATCH 0647/1093] ipn/ipnlocal: clean up some of the weird locking (#17802) * lock released early just to call `b.send` when it can call `b.sendToLocked` instead * `UnlockEarly` called to release the lock before trivially fast operations, we can wait for a defer there Updates #11649 Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/local.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ffab4b69dbd45..d7c16f98278dc 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1533,8 +1533,6 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } if st.Err != nil { - // The following do not depend on any data for which we need b locked. - unlock.UnlockEarly() if errors.Is(st.Err, io.EOF) { b.logf("[v1] Received error: EOF") return @@ -1543,7 +1541,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.send(ipn.Notify{ErrMessage: &s}) + b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) } return } @@ -1743,6 +1741,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.health.SetLocalLogConfigHealth(errors.New(msg)) // Connecting to this tailnet without logging is forbidden; boot us outta here. b.mu.Lock() + defer b.mu.Unlock() // Get the current prefs again, since we unlocked above. prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false @@ -1754,8 +1753,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - b.mu.Unlock() - b.send(ipn.Notify{ErrMessage: &msg, Prefs: &p}) + b.sendToLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}, allClients) return } if oldNetMap != nil { @@ -4795,8 +4793,8 @@ func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { // TODO(danderson): we shouldn't be mangling hostinfo here after // painstakingly constructing it in twelvety other places. func (b *LocalBackend) doSetHostinfoFilterServices() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() cc := b.cc if cc == nil { @@ -4821,8 +4819,6 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { hi.Services = []tailcfg.Service{} } - unlock.UnlockEarly() - // Don't mutate hi.Service's underlying array. Append to // the slice with no free capacity. c := len(hi.Services) From c7dbd3987eda5b2ad656ad15b412ba5c6a3cce1a Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 10 Nov 2025 09:53:40 +0000 Subject: [PATCH 0648/1093] tka: remove an unused parameter from `computeActiveAncestor` Updates #cleanup Change-Id: I86ee7a0d048dafc8c0d030291261240050451721 Signed-off-by: Alex Chan --- tka/tka.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tka/tka.go b/tka/tka.go index c37c39754661c..a8144e96fc2b1 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -342,7 +342,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // hint to choose what to use. For that, we rely on the chainsThroughActive // bit, which signals to us that that ancestor was part of the // chain in a previous run. -func computeActiveAncestor(storage Chonk, chains []chain) (AUMHash, error) { +func computeActiveAncestor(chains []chain) (AUMHash, error) { // Dedupe possible ancestors, tracking if they were part of // the active chain on a previous run. ancestors := make(map[AUMHash]bool, len(chains)) @@ -392,7 +392,7 @@ func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (c } // Find the right ancestor. - oldestHash, err := computeActiveAncestor(storage, chains) + oldestHash, err := computeActiveAncestor(chains) if err != nil { return chain{}, fmt.Errorf("computing ancestor: %v", err) } From 4c67df42f67190b6e4d65341562b17f6c502ce60 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 10 Nov 2025 12:03:41 +0000 Subject: [PATCH 0649/1093] tka: log a better error if there are no chain candidates Previously if `chains` was empty, it would be passed to `computeActiveAncestor()`, which would fail with the misleading error "multiple distinct chains". Updates tailscale/corp#33846 Signed-off-by: Alex Chan Change-Id: Ib93a755dbdf4127f81cbf69f3eece5a388db31c8 --- tka/tka.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tka/tka.go b/tka/tka.go index a8144e96fc2b1..c34e35e7b11ee 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -391,6 +391,10 @@ func computeActiveChain(storage Chonk, lastKnownOldest *AUMHash, maxIter int) (c return chain{}, fmt.Errorf("computing candidates: %v", err) } + if len(chains) == 0 { + return chain{}, errors.New("no chain candidates in AUM storage") + } + // Find the right ancestor. oldestHash, err := computeActiveAncestor(chains) if err != nil { From fe5501a4e95424c4501b53db83d1293b6fa61ec6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 9 Nov 2025 16:47:42 -0800 Subject: [PATCH 0650/1093] wgengine: make getStatus a bit cheaper (less alloc-y) This removes one of the O(n=peers) allocs in getStatus, as Engine.getStatus happens more often than Reconfig. Updates #17814 Change-Id: I8a87fbebbecca3aedadba38e46cc418fd163c2b0 Signed-off-by: Brad Fitzpatrick --- wgengine/userspace.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 619df655ccd44..1e70856cae10d 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -145,7 +145,7 @@ type userspaceEngine struct { netMap *netmap.NetworkMap // or nil closing bool // Close was called (even if we're still closing) statusCallback StatusCallback - peerSequence []key.NodePublic + peerSequence views.Slice[key.NodePublic] endpoints []tailcfg.Endpoint pendOpen map[flowtrackTuple]*pendingOpenFlow // see pendopen.go @@ -939,12 +939,15 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, e.tundev.SetWGConfig(cfg) peerSet := make(set.Set[key.NodePublic], len(cfg.Peers)) + e.mu.Lock() - e.peerSequence = e.peerSequence[:0] + seq := make([]key.NodePublic, 0, len(cfg.Peers)) for _, p := range cfg.Peers { - e.peerSequence = append(e.peerSequence, p.PublicKey) + seq = append(seq, p.PublicKey) peerSet.Add(p.PublicKey) } + e.peerSequence = views.SliceOf(seq) + nm := e.netMap e.mu.Unlock() @@ -1199,7 +1202,7 @@ func (e *userspaceEngine) getStatus() (*Status, error) { e.mu.Lock() closing := e.closing - peerKeys := slices.Clone(e.peerSequence) + peerKeys := e.peerSequence localAddrs := slices.Clone(e.endpoints) e.mu.Unlock() @@ -1207,8 +1210,8 @@ func (e *userspaceEngine) getStatus() (*Status, error) { return nil, ErrEngineClosing } - peers := make([]ipnstate.PeerStatusLite, 0, len(peerKeys)) - for _, key := range peerKeys { + peers := make([]ipnstate.PeerStatusLite, 0, peerKeys.Len()) + for _, key := range peerKeys.All() { if status, ok := e.getPeerStatusLite(key); ok { peers = append(peers, status) } From e059382174c43dff9f237f75dba0a6470e8acc47 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 10 Nov 2025 10:22:47 -0800 Subject: [PATCH 0651/1093] wgengine/magicsock: clean up determineEndpoints docs (#17822) Updates #cleanup Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 6584789017624..1f0a85f07714d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1243,8 +1243,8 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { } // determineEndpoints returns the machine's endpoint addresses. It does a STUN -// lookup (via netcheck) to determine its public address. Additionally any -// static enpoints provided by user are always added to the returned endpoints +// lookup (via netcheck) to determine its public address. Additionally, any +// static endpoints provided by user are always added to the returned endpoints // without validating if the node can be reached via those endpoints. // // c.mu must NOT be held. From e0e87311306ed6dde78a36110307afac86146768 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 20:21:22 -0800 Subject: [PATCH 0652/1093] feature, ipn/ipnlocal: add, use feature.CanSystemdStatus for more DCE When systemd notification support was omitted from the build, or on non-Linux systems, we were unnecessarily emitting code and generating garbage stringifying addresses upon transition to the Running state. Updates #12614 Change-Id: If713f47351c7922bb70e9da85bf92725b25954b9 Signed-off-by: Brad Fitzpatrick --- feature/sdnotify.go | 9 ++++++++- ipn/ipnlocal/local.go | 12 +++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/feature/sdnotify.go b/feature/sdnotify.go index e785dc1acc09a..7a786dfabd519 100644 --- a/feature/sdnotify.go +++ b/feature/sdnotify.go @@ -23,10 +23,17 @@ var HookSystemdStatus Hook[func(format string, args ...any)] // It does nothing on non-Linux systems or if the binary was built without // the sdnotify feature. func SystemdStatus(format string, args ...any) { - if runtime.GOOS != "linux" || !buildfeatures.HasSDNotify { + if !CanSystemdStatus { // mid-stack inlining DCE return } if f, ok := HookSystemdStatus.GetOk(); ok { f(format, args...) } } + +// CanSystemdStatus reports whether the current build has systemd notifications +// linked in. +// +// It's effectively the same as HookSystemdStatus.IsSet(), but a constant for +// dead code elimination reasons. +const CanSystemdStatus = runtime.GOOS == "linux" && buildfeatures.HasSDNotify diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d7c16f98278dc..245e23db1ac88 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5629,12 +5629,14 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // Needed so that UpdateEndpoints can run b.e.RequestStatus() case ipn.Running: - var addrStrs []string - addrs := netMap.GetAddresses() - for _, p := range addrs.All() { - addrStrs = append(addrStrs, p.Addr().String()) + if feature.CanSystemdStatus { + var addrStrs []string + addrs := netMap.GetAddresses() + for _, p := range addrs.All() { + addrStrs = append(addrStrs, p.Addr().String()) + } + feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) } - feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) default: b.logf("[unexpected] unknown newState %#v", newState) } From 8ed6bb3198246df240d32b3361738aac6102e254 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 9 Nov 2025 16:13:39 -0800 Subject: [PATCH 0653/1093] ipn/ipnlocal: move vipServiceHash etc to serve.go, out of local.go Updates #12614 Change-Id: I3c16b94fcb997088ff18d5a21355e0279845ed7e Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 67 ++++++++++++-------------------------- ipn/ipnlocal/local_test.go | 6 ++-- ipn/ipnlocal/serve.go | 53 +++++++++++++++++++++++++++++- 3 files changed, 75 insertions(+), 51 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 245e23db1ac88..8bdc1a14a0999 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -10,7 +10,6 @@ import ( "context" "crypto/sha256" "encoding/binary" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -5487,20 +5486,9 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } hi.SSH_HostKeys = sshHostKeys - hi.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) - - // The Hostinfo.IngressEnabled field is used to communicate to control whether - // the node has funnel enabled. - hi.IngressEnabled = b.hasIngressEnabledLocked() - // The Hostinfo.WantIngress field tells control whether the user intends - // to use funnel with this node even though it is not currently enabled. - // This is an optimization to control- Funnel requires creation of DNS - // records and because DNS propagation can take time, we want to ensure - // that the records exist for any node that intends to use funnel even - // if it's not enabled. If hi.IngressEnabled is true, control knows that - // DNS records are needed, so we can save bandwidth and not send - // WireIngress. - hi.WireIngress = b.shouldWireInactiveIngressLocked() + for _, f := range hookMaybeMutateHostinfoLocked { + f(b, hi, prefs) + } if buildfeatures.HasAppConnectors { hi.AppConnector.Set(prefs.AppConnector().Advertise) @@ -6284,36 +6272,34 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } // Update funnel and service hash info in hostinfo and kick off control update if needed. - b.updateIngressAndServiceHashLocked(prefs) + b.maybeSentHostinfoIfChangedLocked(prefs) b.setTCPPortsIntercepted(handlePorts) } -// updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and +// hookMaybeMutateHostinfoLocked is a hook that allows conditional features +// to mutate the provided hostinfo before it is sent to control. +// +// The hook function should return true if it mutated the hostinfo. +// +// The LocalBackend's mutex is held while calling. +var hookMaybeMutateHostinfoLocked feature.Hooks[func(*LocalBackend, *tailcfg.Hostinfo, ipn.PrefsView) bool] + +// maybeSentHostinfoIfChangedLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and // hostinfo.IngressEnabled fields and kicks off a Hostinfo update if the values have changed. // // b.mu must be held. -func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { +func (b *LocalBackend) maybeSentHostinfoIfChangedLocked(prefs ipn.PrefsView) { if b.hostinfo == nil { return } - hostInfoChanged := false - if ie := b.hasIngressEnabledLocked(); b.hostinfo.IngressEnabled != ie { - b.logf("Hostinfo.IngressEnabled changed to %v", ie) - b.hostinfo.IngressEnabled = ie - hostInfoChanged = true - } - if wire := b.shouldWireInactiveIngressLocked(); b.hostinfo.WireIngress != wire { - b.logf("Hostinfo.WireIngress changed to %v", wire) - b.hostinfo.WireIngress = wire - hostInfoChanged = true - } - latestHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) - if b.hostinfo.ServicesHash != latestHash { - b.hostinfo.ServicesHash = latestHash - hostInfoChanged = true + changed := false + for _, f := range hookMaybeMutateHostinfoLocked { + if f(b, b.hostinfo, prefs) { + changed = true + } } // Kick off a Hostinfo update to control if ingress status has changed. - if hostInfoChanged { + if changed { b.goTracker.Go(b.doSetHostinfoFilterServices) } } @@ -7707,19 +7693,6 @@ func maybeUsernameOf(actor ipnauth.Actor) string { return username } -func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { - if len(services) == 0 { - return "" - } - buf, err := json.Marshal(services) - if err != nil { - b.logf("vipServiceHashLocked: %v", err) - return "" - } - hash := sha256.Sum256(buf) - return hex.EncodeToString(hash[:]) -} - var ( metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") ) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 33ecb688c52a3..bac74a33ccf80 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -6745,7 +6745,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { if tt.hasPreviousSC { b.mu.Lock() b.serveConfig = previousSC.View() - b.hostinfo.ServicesHash = b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + b.hostinfo.ServicesHash = vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) b.mu.Unlock() } b.serveConfig = tt.sc.View() @@ -6763,7 +6763,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { })() was := b.goTracker.StartedGoroutines() - b.updateIngressAndServiceHashLocked(prefs) + b.maybeSentHostinfoIfChangedLocked(prefs) if tt.hi != nil { if tt.hi.IngressEnabled != tt.wantIngress { @@ -6773,7 +6773,7 @@ func TestUpdateIngressAndServiceHashLocked(t *testing.T) { t.Errorf("WireIngress = %v, want %v", tt.hi.WireIngress, tt.wantWireIngress) } b.mu.Lock() - svcHash := b.vipServiceHash(b.vipServicesFromPrefsLocked(prefs)) + svcHash := vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) b.mu.Unlock() if tt.hi.ServicesHash != svcHash { t.Errorf("ServicesHash = %v, want %v", tt.hi.ServicesHash, svcHash) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 554761ed7bb18..1c527e130ebbe 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -59,6 +59,9 @@ func init() { b.setVIPServicesTCPPortsInterceptedLocked(nil) }) + hookMaybeMutateHostinfoLocked.Add(maybeUpdateHostinfoServicesHashLocked) + hookMaybeMutateHostinfoLocked.Add(maybeUpdateHostinfoFunnelLocked) + RegisterC2N("GET /vip-services", handleC2NVIPServicesGet) } @@ -1227,7 +1230,7 @@ func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Req b.logf("c2n: GET /vip-services received") var res tailcfg.C2NVIPServicesResponse res.VIPServices = b.VIPServices() - res.ServicesHash = b.vipServiceHash(res.VIPServices) + res.ServicesHash = vipServiceHash(b.logf, res.VIPServices) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) @@ -1443,3 +1446,51 @@ func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tail b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) } + +func maybeUpdateHostinfoServicesHashLocked(b *LocalBackend, hi *tailcfg.Hostinfo, prefs ipn.PrefsView) bool { + latestHash := vipServiceHash(b.logf, b.vipServicesFromPrefsLocked(prefs)) + if hi.ServicesHash != latestHash { + hi.ServicesHash = latestHash + return true + } + return false +} + +func maybeUpdateHostinfoFunnelLocked(b *LocalBackend, hi *tailcfg.Hostinfo, prefs ipn.PrefsView) (changed bool) { + // The Hostinfo.IngressEnabled field is used to communicate to control whether + // the node has funnel enabled. + if ie := b.hasIngressEnabledLocked(); hi.IngressEnabled != ie { + b.logf("Hostinfo.IngressEnabled changed to %v", ie) + hi.IngressEnabled = ie + changed = true + } + // The Hostinfo.WireIngress field tells control whether the user intends + // to use funnel with this node even though it is not currently enabled. + // This is an optimization to control- Funnel requires creation of DNS + // records and because DNS propagation can take time, we want to ensure + // that the records exist for any node that intends to use funnel even + // if it's not enabled. If hi.IngressEnabled is true, control knows that + // DNS records are needed, so we can save bandwidth and not send + // WireIngress. + if wire := b.shouldWireInactiveIngressLocked(); hi.WireIngress != wire { + b.logf("Hostinfo.WireIngress changed to %v", wire) + hi.WireIngress = wire + changed = true + } + return changed +} + +func vipServiceHash(logf logger.Logf, services []*tailcfg.VIPService) string { + if len(services) == 0 { + return "" + } + h := sha256.New() + jh := json.NewEncoder(h) + if err := jh.Encode(services); err != nil { + logf("vipServiceHashLocked: %v", err) + return "" + } + var buf [sha256.Size]byte + h.Sum(buf[:0]) + return hex.EncodeToString(buf[:]) +} From 6e24f509466794dd16dc25df917ecca0686efb33 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 16:48:41 -0800 Subject: [PATCH 0654/1093] tsnet: add tstest.Shard on the slow tests So they're not all run N times on the sharded oss builders and are only run one time each. Updates tailscale/corp#28679 Change-Id: Ie21e84b06731fdc8ec3212eceb136c8fc26b0115 Signed-off-by: Brad Fitzpatrick --- tsnet/packet_filter_test.go | 2 ++ tsnet/tsnet_test.go | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/tsnet/packet_filter_test.go b/tsnet/packet_filter_test.go index 462234222f936..455400eaa0c8a 100644 --- a/tsnet/packet_filter_test.go +++ b/tsnet/packet_filter_test.go @@ -12,6 +12,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/netmap" @@ -47,6 +48,7 @@ func waitFor(t testing.TB, ctx context.Context, s *Server, f func(*netmap.Networ // netmaps and turning them into packet filters together. Only the control-plane // side is mocked out. func TestPacketFilterFromNetmap(t *testing.T) { + tstest.Shard(t) t.Parallel() var key key.NodePublic diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index c19ae3c14ecbd..b0deb20796221 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -235,6 +235,7 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) } func TestDialBlocks(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -282,6 +283,7 @@ func TestDialBlocks(t *testing.T) { // - s2 can dial through the subnet router functionality (getting a synthetic RST // that we verify we generated & saw) func TestConn(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -414,6 +416,7 @@ func TestConn(t *testing.T) { func TestLoopbackLocalAPI(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/8557") + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -489,6 +492,7 @@ func TestLoopbackLocalAPI(t *testing.T) { func TestLoopbackSOCKS5(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/8198") + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -539,6 +543,7 @@ func TestLoopbackSOCKS5(t *testing.T) { } func TestTailscaleIPs(t *testing.T) { + tstest.Shard(t) controlURL, _ := startControl(t) tmp := t.TempDir() @@ -581,6 +586,7 @@ func TestTailscaleIPs(t *testing.T) { // TestListenerCleanup is a regression test to verify that s.Close doesn't // deadlock if a listener is still open. func TestListenerCleanup(t *testing.T) { + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -623,6 +629,7 @@ func (wc *closeTrackConn) Close() error { // tests https://github.com/tailscale/tailscale/issues/6973 -- that we can start a tsnet server, // stop it, and restart it, even on Windows. func TestStartStopStartGetsSameIP(t *testing.T) { + tstest.Shard(t) controlURL, _ := startControl(t) tmp := t.TempDir() @@ -672,6 +679,7 @@ func TestStartStopStartGetsSameIP(t *testing.T) { } func TestFunnel(t *testing.T) { + tstest.Shard(t) ctx, dialCancel := context.WithTimeout(context.Background(), 30*time.Second) defer dialCancel() @@ -733,6 +741,7 @@ func TestFunnel(t *testing.T) { } func TestListenerClose(t *testing.T) { + tstest.Shard(t) ctx := context.Background() controlURL, _ := startControl(t) @@ -812,6 +821,7 @@ func (c *bufferedConn) Read(b []byte) (int, error) { } func TestFallbackTCPHandler(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -854,6 +864,7 @@ func TestFallbackTCPHandler(t *testing.T) { } func TestCapturePcap(t *testing.T) { + tstest.Shard(t) const timeLimit = 120 ctx, cancel := context.WithTimeout(context.Background(), timeLimit*time.Second) defer cancel() @@ -907,6 +918,7 @@ func TestCapturePcap(t *testing.T) { } func TestUDPConn(t *testing.T) { + tstest.Shard(t) tstest.ResourceCheck(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() @@ -1098,6 +1110,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC } func TestUserMetricsByteCounters(t *testing.T) { + tstest.Shard(t) ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) defer cancel() @@ -1212,6 +1225,7 @@ func TestUserMetricsByteCounters(t *testing.T) { } func TestUserMetricsRouteGauges(t *testing.T) { + tstest.Shard(t) // Windows does not seem to support or report back routes when running in // userspace via tsnet. So, we skip this check on Windows. // TODO(kradalby): Figure out if this is correct. @@ -1368,6 +1382,7 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { } func TestDeps(t *testing.T) { + tstest.Shard(t) deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", From 4650061326af386d370e5ebc5b2fae018752908b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 17:23:22 -0800 Subject: [PATCH 0655/1093] ipn/ipnlocal: fix state_test data race seen in CI Unfortunately I closed the tab and lost it in my sea of CI failures I'm currently fighting. Updates #cleanup Change-Id: I4e3a652d57d52b75238f25d104fc1987add64191 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/state_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index fca01f1056fcb..53b8f78e4f33c 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -652,7 +652,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { } // undo the state hack above. + b.mu.Lock() b.state = ipn.Starting + b.mu.Unlock() // User wants to logout. store.awaitWrite() From 18806de400a29b035a9985f22d1390a50e38fcab Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 10 Nov 2025 20:07:33 -0800 Subject: [PATCH 0656/1093] wgengine/magicsock: validate endpoint.derpAddr in Conn.onUDPRelayAllocResp (#17828) Otherwise a zero value will panic in Conn.sendUDPStd. Updates #17827 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1f0a85f07714d..3d7b16f309bdb 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -651,7 +651,9 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { ep.mu.Lock() defer ep.mu.Unlock() derpAddr := ep.derpAddr - go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) + if derpAddr.IsValid() { + go c.sendDiscoMessage(epAddr{ap: derpAddr}, ep.publicKey, disco.key, allocResp.Message, discoVerboseLog) + } } // Synchronize waits for all [eventbus] events published From 2ad2d4d409e6b5eac5dbecb59ce307eb3297587c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 10 Nov 2025 21:08:13 -0800 Subject: [PATCH 0657/1093] wgengine/magicsock: fix UDPRelayAllocReq/Resp deadlock (#17831) Updates #17830 Signed-off-by: Jordan Whited --- wgengine/magicsock/magicsock.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 3d7b16f309bdb..f1721e1d929e1 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2444,7 +2444,10 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake if !nodeHasCap(c.filt, c.peers.At(peerI), c.self, tailcfg.PeerCapabilityRelay) { return } - c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ + // [Conn.mu] must not be held while publishing, or [Conn.onUDPRelayAllocResp] + // can deadlock as the req sub and resp pub are the same goroutine. + // See #17830. + go c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ RxFromDiscoKey: sender, RxFromNodeKey: nodeKey, Message: req, From 42ce5c88bed24817def3049d75e5a6810f172c7a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 16:51:46 -0800 Subject: [PATCH 0658/1093] wgengine/magicsock: unblock Conn.Synchronize on Conn.Close I noticed a deadlock in a test in a in-development PR where during a shutdown storm of things (from a tsnet.Server.Close), LocalBackend was trying to call magicsock.Conn.Synchronize but the magicsock and/or eventbus was already shut down and no longer processing events. Updates #16369 Change-Id: I58b1f86c8959303c3fb46e2e3b7f38f6385036f1 Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/magicsock.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f1721e1d929e1..d44cf1c1173f9 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -665,7 +665,10 @@ func (c *Conn) Synchronize() { } sp := syncPoint(make(chan struct{})) c.syncPub.Publish(sp) - sp.Wait() + select { + case <-sp: + case <-c.donec: + } } // NewConn creates a magic Conn listening on opts.Port. From 1eba5b0cbdf044b5a3a45fc5372f240865fb8ca3 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Nov 2025 15:44:55 -0800 Subject: [PATCH 0659/1093] util/eventbus: log goroutine stacks when hung in CI Updates #17680 Change-Id: Ie48dc2d64b7583d68578a28af52f6926f903ca4f Signed-off-by: Brad Fitzpatrick --- cmd/derper/depaware.txt | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- tsnet/depaware.txt | 2 +- util/eventbus/subscribe.go | 7 +++++++ 9 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 01c278fbd1691..0a75ac43e5a28 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -139,7 +139,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netmon tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/tsweb+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ebd22770e9bd2..b800b78c6aad4 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -848,7 +848,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b249639bc80bc..53dc998bda611 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -171,7 +171,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/types/key+ tailscale.com/types/views from tailscale.com/tailcfg+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netcheck+ tailscale.com/util/cloudenv from tailscale.com/net/dnscache+ tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 224026f25368d..e750f86e6d4e5 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -144,7 +144,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 9633e73989046..17f1a22b24da0 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -171,7 +171,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cmpver from tailscale.com/clientupdate diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index bdc110e1a83ec..1b5bdab912430 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -416,7 +416,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index ebf03b541c585..21ca122c4bdc3 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -253,7 +253,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 4817a511acf6f..cf91aa483f175 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -248,7 +248,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/views from tailscale.com/appc+ tailscale.com/util/backoff from tailscale.com/control/controlclient+ tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ - tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ LW tailscale.com/util/cmpver from tailscale.com/net/dns+ diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 03d577f27c3fe..53253d33045c5 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -7,10 +7,12 @@ import ( "context" "fmt" "reflect" + "runtime" "sync" "time" "tailscale.com/types/logger" + "tailscale.com/util/cibuild" ) type DeliveredEvent struct { @@ -329,6 +331,11 @@ func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredE select { case <-s.slow.C: s.logf("giving up on subscriber for %T after %v at close", t, time.Since(start)) + if cibuild.On() { + all := make([]byte, 2<<20) + n := runtime.Stack(all, true) + s.logf("goroutine stacks:\n%s", all[:n]) + } case <-callDone: } return false From 3280dac79787d464493b2d4e735bdc1e5de0a2ef Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Mon, 10 Nov 2025 16:05:09 -0800 Subject: [PATCH 0660/1093] wgengine/router/osrouter: fix linux magicsock port changing Fixes #17837 Signed-off-by: Sachin Iyer --- wgengine/router/osrouter/router_linux.go | 2 +- wgengine/router/osrouter/router_linux_test.go | 40 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 58bd0513ab768..196e1d5529025 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -581,7 +581,7 @@ func (r *linuxRouter) updateMagicsockPort(port uint16, network string) error { } if port != 0 { - if err := r.nfr.AddMagicsockPortRule(*magicsockPort, network); err != nil { + if err := r.nfr.AddMagicsockPortRule(port, network); err != nil { return fmt.Errorf("add magicsock port rule: %w", err) } } diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index 39210ddef14a2..929fda1b42e35 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1290,3 +1290,43 @@ func TestIPRulesForUBNT(t *testing.T) { } } } + +func TestUpdateMagicsockPortChange(t *testing.T) { + nfr := &fakeIPTablesRunner{ + t: t, + ipt4: make(map[string][]string), + ipt6: make(map[string][]string), + } + nfr.ipt4["filter/ts-input"] = []string{} + + r := &linuxRouter{ + logf: logger.Discard, + health: new(health.Tracker), + netfilterMode: netfilterOn, + nfr: nfr, + } + + if err := r.updateMagicsockPort(12345, "udp4"); err != nil { + t.Fatalf("failed to set initial port: %v", err) + } + + if err := r.updateMagicsockPort(54321, "udp4"); err != nil { + t.Fatalf("failed to update port: %v", err) + } + + newPortRule := buildMagicsockPortRule(54321) + hasNewRule := slices.Contains(nfr.ipt4["filter/ts-input"], newPortRule) + + if !hasNewRule { + t.Errorf("firewall rule for NEW port 54321 not found.\nExpected: %s\nActual rules: %v", + newPortRule, nfr.ipt4["filter/ts-input"]) + } + + oldPortRule := buildMagicsockPortRule(12345) + hasOldRule := slices.Contains(nfr.ipt4["filter/ts-input"], oldPortRule) + + if hasOldRule { + t.Errorf("firewall rule for OLD port 12345 still exists (should be deleted).\nFound: %s\nAll rules: %v", + oldPortRule, nfr.ipt4["filter/ts-input"]) + } +} From 85cb64c4ff0537b5722f2df84393ef4d8c4c83ad Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Tue, 11 Nov 2025 10:07:02 -0800 Subject: [PATCH 0661/1093] wf: correct IPv6 link-local range from ff80::/10 to fe80::/10 (#17840) Fixes #17833 Signed-off-by: Sachin Iyer --- wf/firewall.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wf/firewall.go b/wf/firewall.go index 076944c8decad..dc1045ff84934 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -18,7 +18,7 @@ import ( // Known addresses. var ( - linkLocalRange = netip.MustParsePrefix("ff80::/10") + linkLocalRange = netip.MustParsePrefix("fe80::/10") linkLocalDHCPMulticast = netip.MustParseAddr("ff02::1:2") siteLocalDHCPMulticast = netip.MustParseAddr("ff05::1:3") linkLocalRouterMulticast = netip.MustParseAddr("ff02::2") From d37884c734762cdd96d184c877b3b6eac139e5a2 Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Wed, 12 Nov 2025 02:46:40 -0800 Subject: [PATCH 0662/1093] cmd/k8s-operator: remove early return in ingress matching (#17841) Fixes #17834 Signed-off-by: Sachin Iyer --- cmd/k8s-operator/operator.go | 2 +- cmd/k8s-operator/operator_test.go | 36 +++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index d5ff077800b24..6b545a8273567 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -1122,7 +1122,7 @@ func serviceHandlerForIngress(cl client.Client, logger *zap.SugaredLogger, ingre reqs := make([]reconcile.Request, 0) for _, ing := range ingList.Items { if ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != ingressClassName { - return nil + continue } if hasProxyGroupAnnotation(&ing) { // We don't want to reconcile backend Services for Ingresses for ProxyGroups. diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 5af237342e8cd..b15c93b1c93d0 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1698,6 +1698,42 @@ func Test_serviceHandlerForIngress(t *testing.T) { } } +func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { + fc := fake.NewFakeClient() + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "backend", Namespace: "default"}, + } + mustCreate(t, fc, svc) + + mustCreate(t, fc, &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "nginx-ing", Namespace: "default"}, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("nginx"), + DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, + }, + }) + + mustCreate(t, fc, &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "ts-ing", Namespace: "default"}, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, + }, + }) + + got := serviceHandlerForIngress(fc, zl.Sugar(), "tailscale")(context.Background(), svc) + want := []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: "default", Name: "ts-ing"}}} + + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("unexpected reconcile requests (-got +want):\n%s", diff) + } +} + func Test_clusterDomainFromResolverConf(t *testing.T) { zl, err := zap.NewDevelopment() if err != nil { From 16e90dcb27605f1bd03bc1eda0b1d256662c89bf Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Wed, 12 Nov 2025 07:13:21 -0800 Subject: [PATCH 0663/1093] net/batching: fix gro size handling for misordered UDP_GRO messages (#17842) Fixes #17835 Signed-off-by: Sachin Iyer --- net/batching/conn_linux.go | 2 +- net/batching/conn_linux_test.go | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 7f6c4ed422e31..bd7ac25be2a4d 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -353,7 +353,7 @@ func getGSOSizeFromControl(control []byte) (int, error) { ) for len(rem) > unix.SizeofCmsghdr { - hdr, data, rem, err = unix.ParseOneSocketControlMessage(control) + hdr, data, rem, err = unix.ParseOneSocketControlMessage(rem) if err != nil { return 0, fmt.Errorf("error parsing socket control message: %w", err) } diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index e518c3f9f06d9..5e3c29e5ce37b 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -8,8 +8,11 @@ import ( "net" "testing" + "unsafe" + "github.com/tailscale/wireguard-go/conn" "golang.org/x/net/ipv6" + "golang.org/x/sys/unix" "tailscale.com/net/packet" ) @@ -314,3 +317,36 @@ func TestMinReadBatchMsgsLen(t *testing.T) { t.Fatalf("IdealBatchSize: %d != conn.IdealBatchSize(): %d", IdealBatchSize, conn.IdealBatchSize) } } + +func Test_getGSOSizeFromControl_MultipleMessages(t *testing.T) { + // Test that getGSOSizeFromControl correctly parses UDP_GRO when it's not the first control message. + const expectedGSOSize = 1420 + + // First message: IP_TOS + firstMsgLen := unix.CmsgSpace(1) + firstMsg := make([]byte, firstMsgLen) + hdr1 := (*unix.Cmsghdr)(unsafe.Pointer(&firstMsg[0])) + hdr1.Level = unix.SOL_IP + hdr1.Type = unix.IP_TOS + hdr1.SetLen(unix.CmsgLen(1)) + firstMsg[unix.SizeofCmsghdr] = 0 + + // Second message: UDP_GRO + secondMsgLen := unix.CmsgSpace(2) + secondMsg := make([]byte, secondMsgLen) + hdr2 := (*unix.Cmsghdr)(unsafe.Pointer(&secondMsg[0])) + hdr2.Level = unix.SOL_UDP + hdr2.Type = unix.UDP_GRO + hdr2.SetLen(unix.CmsgLen(2)) + binary.NativeEndian.PutUint16(secondMsg[unix.SizeofCmsghdr:], expectedGSOSize) + + control := append(firstMsg, secondMsg...) + + gsoSize, err := getGSOSizeFromControl(control) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gsoSize != expectedGSOSize { + t.Errorf("got GSO size %d, want %d", gsoSize, expectedGSOSize) + } +} From e8d2f964499989d1cd99db556b0a3e3f293dd86b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 12 Nov 2025 10:25:27 -0500 Subject: [PATCH 0664/1093] ipn/ipnlocal, net/netns: add node cap to disable netns interface binding on netext Apple clients (#17691) updates tailscale/corp#31571 It appears that on the latest macOS, iOS and tVOS versions, the work that netns is doing to bind outgoing connections to the default interface (and all of the trimmings and workarounds in netmon et al that make that work) are not needed. The kernel is extension-aware and doing nothing, is the right thing. This is, however, not the case for tailscaled (which is not a special process). To allow us to test this assertion (and where it might break things), we add a new node cap that turns this behaviour off only for network-extension equipped clients, making it possible to turn this off tailnet-wide, without breaking any tailscaled macos nodes. Signed-off-by: Jonathan Nobels --- ipn/ipnlocal/local.go | 7 ++++--- net/netns/netns.go | 23 +++++++++++++++++++---- net/netns/netns_darwin.go | 7 +++---- tailcfg/tailcfg.go | 4 ++++ 4 files changed, 30 insertions(+), 11 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8bdc1a14a0999..62d8ea49073a3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6169,9 +6169,10 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setDebugLogsByCapabilityLocked(nm) } - // See the netns package for documentation on what this capability does. - netns.SetBindToInterfaceByRoute(nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) - netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) + // See the netns package for documentation on what these capability do. + netns.SetBindToInterfaceByRoute(b.logf, nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) + netns.SetDisableBindConnToInterface(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) + netns.SetDisableBindConnToInterfaceAppleExt(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterfaceAppleExt)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) if buildfeatures.HasServe { diff --git a/net/netns/netns.go b/net/netns/netns.go index a473506fac024..ccb20d27ed890 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -39,20 +39,35 @@ var bindToInterfaceByRoute atomic.Bool // setting the TS_BIND_TO_INTERFACE_BY_ROUTE. // // Currently, this only changes the behaviour on macOS and Windows. -func SetBindToInterfaceByRoute(v bool) { +func SetBindToInterfaceByRoute(logf logger.Logf, v bool) { + logf("netns: bindToInterfaceByRoute to %v", v) bindToInterfaceByRoute.Store(v) } var disableBindConnToInterface atomic.Bool // SetDisableBindConnToInterface disables the (normal) behavior of binding -// connections to the default network interface. +// connections to the default network interface on Darwin nodes. // -// Currently, this only has an effect on Darwin. -func SetDisableBindConnToInterface(v bool) { +// Unless you intended to disable this for tailscaled on macos (which is likely +// to break things), you probably wanted to set +// SetDisableBindConnToInterfaceAppleExt which will disable explicit interface +// binding only when tailscaled is running inside a network extension process. +func SetDisableBindConnToInterface(logf logger.Logf, v bool) { + logf("netns: disableBindConnToInterface set to %v", v) disableBindConnToInterface.Store(v) } +var disableBindConnToInterfaceAppleExt atomic.Bool + +// SetDisableBindConnToInterfaceAppleExt disables the (normal) behavior of binding +// connections to the default network interface but only on Apple clients where +// tailscaled is running inside a network extension. +func SetDisableBindConnToInterfaceAppleExt(logf logger.Logf, v bool) { + logf("netns: disableBindConnToInterfaceAppleExt set to %v", v) + disableBindConnToInterfaceAppleExt.Store(v) +} + // Listener returns a new net.Listener with its Control hook func // initialized as necessary to run in logical network namespace that // doesn't route back into Tailscale. diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index 1f30f00d2a870..ff05a3f3139c3 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -21,6 +21,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/version" ) func control(logf logger.Logf, netMon *netmon.Monitor) func(network, address string, c syscall.RawConn) error { @@ -36,13 +37,11 @@ var errInterfaceStateInvalid = errors.New("interface state invalid") // controlLogf binds c to a particular interface as necessary to dial the // provided (network, address). func controlLogf(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) error { - if isLocalhost(address) { - // Don't bind to an interface for localhost connections. + if disableBindConnToInterface.Load() || (version.IsMacGUIVariant() && disableBindConnToInterfaceAppleExt.Load()) { return nil } - if disableBindConnToInterface.Load() { - logf("netns_darwin: binding connection to interfaces disabled") + if isLocalhost(address) { return nil } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 43ed3188fdb26..346957803d235 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2460,6 +2460,10 @@ const ( // of connections to the default network interface on Darwin nodes. CapabilityDebugDisableBindConnToInterface NodeCapability = "https://tailscale.com/cap/debug-disable-bind-conn-to-interface" + // CapabilityDebugDisableBindConnToInterface disables the automatic binding + // of connections to the default network interface on Darwin nodes using network extensions + CapabilityDebugDisableBindConnToInterfaceAppleExt NodeCapability = "https://tailscale.com/cap/debug-disable-bind-conn-to-interface-apple-ext" + // CapabilityTailnetLock indicates the node may initialize tailnet lock. CapabilityTailnetLock NodeCapability = "https://tailscale.com/cap/tailnet-lock" From 27a0168cdc326830440e87fabd60bcdc00dd45c4 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Thu, 6 Nov 2025 14:53:22 -0800 Subject: [PATCH 0665/1093] util/dnsname: increase maxNameLength to account for trailing dot Fixes #17788 Signed-off-by: Fran Bull --- util/dnsname/dnsname.go | 2 +- util/dnsname/dnsname_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index 6404a9af1cc2f..ef898ebbd842f 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -14,7 +14,7 @@ const ( // maxLabelLength is the maximum length of a label permitted by RFC 1035. maxLabelLength = 63 // maxNameLength is the maximum length of a DNS name. - maxNameLength = 253 + maxNameLength = 254 ) // A FQDN is a fully-qualified DNS name or name suffix. diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index 719e28be3966b..49eeaee48a947 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -59,6 +59,38 @@ func TestFQDN(t *testing.T) { } } +func TestFQDNTooLong(t *testing.T) { + // RFC 1035 says a dns name has a max size of 255 octets, and is represented as labels of len+ASCII chars so + // example.com + // is represented as + // 7example3com0 + // which is to say that if we have a trailing dot then the dots cancel out all the len bytes except the first and + // we can accept 254 chars. + + // This name is max length + name := "aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaa.example.com." + if len(name) != 254 { + t.Fatalf("name should be 254 chars including trailing . (len is %d)", len(name)) + } + got, err := ToFQDN(name) + if err != nil { + t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + } + if string(got) != name { + t.Fatalf("want: %s, got: %s", name, got) + } + + // This name is too long + name = "x" + name + got, err = ToFQDN(name) + if got != "" { + t.Fatalf("want: \"\", got: %s", got) + } + if err == nil || !strings.HasSuffix(err.Error(), "is too long to be a DNS name") { + t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + } +} + func TestFQDNContains(t *testing.T) { tests := []struct { a, b string From f387b1010e92fe34656e4106aee0111cb48ea9a1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 12 Nov 2025 08:51:04 -0800 Subject: [PATCH 0666/1093] wgengine/wgcfg: remove two unused Config fields They distracted me in some refactoring. They're set but never used. Updates #17858 Change-Id: I6ec7d6841ab684a55bccca7b7cbf7da9c782694f Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/state_test.go | 14 -------------- util/deephash/tailscale_types_test.go | 1 - wgengine/bench/wg.go | 2 -- wgengine/magicsock/magicsock_test.go | 2 -- wgengine/wgcfg/config.go | 7 +------ wgengine/wgcfg/nmcfg/nmcfg.go | 2 -- wgengine/wgcfg/wgcfg_clone.go | 3 --- 7 files changed, 1 insertion(+), 30 deletions(-) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 53b8f78e4f33c..ca281fbece4c9 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1243,8 +1243,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // After the auth is completed, the configs must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1301,8 +1299,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // Once the auth is completed, the configs must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node2.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node2.SelfNode.Addresses().AsSlice(), }, @@ -1351,8 +1347,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // must be updated to reflect the node's netmap. wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1376,8 +1370,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { }, wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node3.SelfNode.StableID(), Peers: []wgcfg.Peer{ { PublicKey: node1.SelfNode.Key(), @@ -1449,8 +1441,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { }, wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1480,8 +1470,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { // With seamless renewal, starting a reauth should leave everything up: wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, @@ -1513,8 +1501,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { }, wantState: ipn.Starting, wantCfg: &wgcfg.Config{ - Name: "tailscale", - NodeID: node1.SelfNode.StableID(), Peers: []wgcfg.Peer{}, Addresses: node1.SelfNode.Addresses().AsSlice(), }, diff --git a/util/deephash/tailscale_types_test.go b/util/deephash/tailscale_types_test.go index d760253990048..eeb7fdf84d11f 100644 --- a/util/deephash/tailscale_types_test.go +++ b/util/deephash/tailscale_types_test.go @@ -85,7 +85,6 @@ type tailscaleTypes struct { func getVal() *tailscaleTypes { return &tailscaleTypes{ &wgcfg.Config{ - Name: "foo", Addresses: []netip.Prefix{netip.PrefixFrom(netip.AddrFrom16([16]byte{3: 3}).Unmap(), 5)}, Peers: []wgcfg.Peer{ { diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 4de7677f26257..f0fa38bf97198 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -38,7 +38,6 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. k1 := key.NewNode() c1 := wgcfg.Config{ - Name: "e1", PrivateKey: k1, Addresses: []netip.Prefix{a1}, } @@ -65,7 +64,6 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. l2 := logger.WithPrefix(logf, "e2: ") k2 := key.NewNode() c2 := wgcfg.Config{ - Name: "e2", PrivateKey: k2, Addresses: []netip.Prefix{a2}, } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 60620b14100f1..e91dac2ec1874 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1059,7 +1059,6 @@ func testTwoDevicePing(t *testing.T, d *devices) { }) m1cfg := &wgcfg.Config{ - Name: "peer1", PrivateKey: m1.privateKey, Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.1/32")}, Peers: []wgcfg.Peer{ @@ -1071,7 +1070,6 @@ func testTwoDevicePing(t *testing.T, d *devices) { }, } m2cfg := &wgcfg.Config{ - Name: "peer2", PrivateKey: m2.privateKey, Addresses: []netip.Prefix{netip.MustParsePrefix("1.0.0.2/32")}, Peers: []wgcfg.Peer{ diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 926964a4bdc20..2734f6c6ea969 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -8,7 +8,6 @@ import ( "net/netip" "slices" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logid" ) @@ -18,8 +17,6 @@ import ( // Config is a WireGuard configuration. // It only supports the set of things Tailscale uses. type Config struct { - Name string - NodeID tailcfg.StableNodeID PrivateKey key.NodePrivate Addresses []netip.Prefix MTU uint16 @@ -40,9 +37,7 @@ func (c *Config) Equal(o *Config) bool { if c == nil || o == nil { return c == o } - return c.Name == o.Name && - c.NodeID == o.NodeID && - c.PrivateKey.Equal(o.PrivateKey) && + return c.PrivateKey.Equal(o.PrivateKey) && c.MTU == o.MTU && c.NetworkLogging == o.NetworkLogging && slices.Equal(c.Addresses, o.Addresses) && diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 1add608e4496c..08b162730804f 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -51,7 +51,6 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { // WGCfg returns the NetworkMaps's WireGuard configuration. func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { cfg := &wgcfg.Config{ - Name: "tailscale", PrivateKey: nm.PrivateKey, Addresses: nm.GetAddresses().AsSlice(), Peers: make([]wgcfg.Peer, 0, len(nm.Peers)), @@ -59,7 +58,6 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // Setup log IDs for data plane audit logging. if nm.SelfNode.Valid() { - cfg.NodeID = nm.SelfNode.StableID() canNetworkLog := nm.SelfNode.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) logExitFlowEnabled := nm.SelfNode.HasCap(tailcfg.NodeAttrLogExitFlows) if canNetworkLog && nm.SelfNode.DataPlaneAuditLogID() != "" && nm.DomainAuditLogID != "" { diff --git a/wgengine/wgcfg/wgcfg_clone.go b/wgengine/wgcfg/wgcfg_clone.go index 749d8d8160579..9f3cabde182f9 100644 --- a/wgengine/wgcfg/wgcfg_clone.go +++ b/wgengine/wgcfg/wgcfg_clone.go @@ -8,7 +8,6 @@ package wgcfg import ( "net/netip" - "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logid" "tailscale.com/types/ptr" @@ -35,8 +34,6 @@ func (src *Config) Clone() *Config { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ConfigCloneNeedsRegeneration = Config(struct { - Name string - NodeID tailcfg.StableNodeID PrivateKey key.NodePrivate Addresses []netip.Prefix MTU uint16 From 37aa7e6935d5808158e7c9755ba8402a36b87925 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 12 Nov 2025 08:16:51 -0800 Subject: [PATCH 0667/1093] util/dnsname: fix test error message Updates #17788 Signed-off-by: Fran Bull --- util/dnsname/dnsname_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index 49eeaee48a947..b038bb1bd10e1 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -74,7 +74,7 @@ func TestFQDNTooLong(t *testing.T) { } got, err := ToFQDN(name) if err != nil { - t.Fatalf("want: error to end with \"is too long to be a DNS name\", got: %v", err) + t.Fatalf("want: no error, got: %v", err) } if string(got) != name { t.Fatalf("want: %s, got: %s", name, got) From 31fe75ad9eb9d9a48ded976e07ba60f4a734f4a6 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 10 Nov 2025 15:02:31 +0000 Subject: [PATCH 0668/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 11 ++++++----- licenses/tailscale.md | 1 + licenses/windows.md | 16 +++++++++------- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 4c50e95595742..2a795ddbb9cdf 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -29,6 +29,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) @@ -67,13 +68,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.43.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.46.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.37.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.36.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.30.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 0ef5bcf61d5f8..c04e555637d2d 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -37,6 +37,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) diff --git a/licenses/windows.md b/licenses/windows.md index b284aa1361f5d..06a5712ceb509 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -15,6 +15,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) @@ -36,9 +37,9 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) - - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.0/LICENSE)) + - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.2/LICENSE)) - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE)) - - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.65.0/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.66.1/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) @@ -47,19 +48,20 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) + - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.43.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.28.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.46.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.37.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.36.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.8/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From f4f9dd7f8c95bdcdc84de7de7c0de4fb591b73d0 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 12 Nov 2025 15:47:01 -0800 Subject: [PATCH 0669/1093] net/udprelay: replace VNI pool with selection algorithm (#17868) This reduces memory usage when tailscaled is acting as a peer relay. Updates #17801 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 45 +++++++++++++++++++++++++++---------- net/udprelay/server_test.go | 23 +++++++++++++++++++ 2 files changed, 56 insertions(+), 12 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index de1376b6480b1..69e0de095f5fe 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -77,11 +77,17 @@ type Server struct { addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints closed bool lamportID uint64 - vniPool []uint32 // the pool of available VNIs + nextVNI uint32 byVNI map[uint32]*serverEndpoint byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } +const ( + minVNI = uint32(1) + maxVNI = uint32(1<<24 - 1) + totalPossibleVNI = maxVNI - minVNI + 1 +) + // serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. // serverEndpoint methods are not thread-safe. type serverEndpoint struct { @@ -281,15 +287,10 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve steadyStateLifetime: defaultSteadyStateLifetime, closeCh: make(chan struct{}), byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), + nextVNI: minVNI, byVNI: make(map[uint32]*serverEndpoint), } s.discoPublic = s.disco.Public() - // TODO: instead of allocating 10s of MBs for the full pool, allocate - // smaller chunks and increase as needed - s.vniPool = make([]uint32, 0, 1<<24-1) - for i := 1; i < 1<<24; i++ { - s.vniPool = append(s.vniPool, uint32(i)) - } // TODO(creachadair): Find a way to plumb this in during initialization. // As-written, messages published here will not be seen by other components @@ -572,7 +573,6 @@ func (s *Server) Close() error { defer s.mu.Unlock() clear(s.byVNI) clear(s.byDisco) - s.vniPool = nil s.closed = true s.bus.Close() }) @@ -594,7 +594,6 @@ func (s *Server) endpointGCLoop() { if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { delete(s.byDisco, k) delete(s.byVNI, v.vni) - s.vniPool = append(s.vniPool, v.vni) } } } @@ -729,6 +728,27 @@ func (e ErrServerNotReady) Error() string { return fmt.Sprintf("server not ready, retry after %v", e.RetryAfter) } +// getNextVNILocked returns the next available VNI. It implements the +// "Traditional BSD Port Selection Algorithm" from RFC6056. This algorithm does +// not attempt to obfuscate the selection, i.e. the selection is predictable. +// For now, we favor simplicity and reducing VNI re-use over more complex +// ephemeral port (VNI) selection algorithms. +func (s *Server) getNextVNILocked() (uint32, error) { + for i := uint32(0); i < totalPossibleVNI; i++ { + vni := s.nextVNI + if vni == maxVNI { + s.nextVNI = minVNI + } else { + s.nextVNI++ + } + _, ok := s.byVNI[vni] + if !ok { + return vni, nil + } + } + return 0, errors.New("VNI pool exhausted") +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns @@ -777,8 +797,9 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv }, nil } - if len(s.vniPool) == 0 { - return endpoint.ServerEndpoint{}, errors.New("VNI pool exhausted") + vni, err := s.getNextVNILocked() + if err != nil { + return endpoint.ServerEndpoint{}, err } s.lamportID++ @@ -786,10 +807,10 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv discoPubKeys: pair, lamportID: s.lamportID, allocatedAt: time.Now(), + vni: vni, } e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1]) - e.vni, s.vniPool = s.vniPool[0], s.vniPool[1:] s.byDisco[pair] = e s.byVNI[e.vni] = e diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 8fc4a4f78cb47..bf7f0a9b5f1de 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + qt "github.com/frankban/quicktest" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go4.org/mem" @@ -319,3 +320,25 @@ func TestServer(t *testing.T) { }) } } + +func TestServer_getNextVNILocked(t *testing.T) { + t.Parallel() + c := qt.New(t) + s := &Server{ + nextVNI: minVNI, + byVNI: make(map[uint32]*serverEndpoint), + } + for i := uint64(0); i < uint64(totalPossibleVNI); i++ { + vni, err := s.getNextVNILocked() + if err != nil { // using quicktest here triples test time + t.Fatal(err) + } + s.byVNI[vni] = nil + } + c.Assert(s.nextVNI, qt.Equals, minVNI) + _, err := s.getNextVNILocked() + c.Assert(err, qt.IsNotNil) + delete(s.byVNI, minVNI) + _, err = s.getNextVNILocked() + c.Assert(err, qt.IsNil) +} From 6ac80b7334eb978390c75134a82462d43c78f029 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 12 Nov 2025 17:53:39 -0500 Subject: [PATCH 0670/1093] cmd/{cloner,viewer}: handle maps of views Instead of trying to call View() on something that's already a View type (or trying to Clone the view unnecessarily), we can re-use the existing View values in a map[T]ViewType. Fixes #17866 Signed-off-by: Andrew Dunham --- cmd/cloner/cloner.go | 14 ++++-- cmd/viewer/tests/tests.go | 6 ++- cmd/viewer/tests/tests_clone.go | 17 +++++++ cmd/viewer/tests/tests_view.go | 78 ++++++++++++++++++++++++++++++++- cmd/viewer/viewer.go | 15 +++++-- 5 files changed, 120 insertions(+), 10 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 544d00518e113..917f4856d351d 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -192,7 +192,16 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t\tdst.%s[k] = append([]%s{}, src.%s[k]...)", fname, n, fname) writef("\t}") writef("}") - } else if codegen.ContainsPointers(elem) { + } else if codegen.IsViewType(elem) || !codegen.ContainsPointers(elem) { + // If the map values are view types (which are + // immutable and don't need cloning) or don't + // themselves contain pointers, we can just + // clone the map itself. + it.Import("", "maps") + writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) + } else { + // Otherwise we need to clone each element of + // the map. writef("if dst.%s != nil {", fname) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tfor k, v := range src.%s {", fname) @@ -228,9 +237,6 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\t}") writef("}") - } else { - it.Import("", "maps") - writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } case *types.Interface: // If ft is an interface with a "Clone() ft" method, it can be used to clone the field. diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index 4020e5651978a..d1c753db78710 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -13,7 +13,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct --clone-only-type=OnlyGetClone +//go:generate go run tailscale.com/cmd/viewer --type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct,StructWithMapOfViews --clone-only-type=OnlyGetClone type StructWithoutPtrs struct { Int int @@ -238,3 +238,7 @@ type GenericTypeAliasStruct[T integer, T2 views.ViewCloner[T2, V2], V2 views.Str NonCloneable T Cloneable T2 } + +type StructWithMapOfViews struct { + MapOfViews map[string]StructWithoutPtrsView +} diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index 106a9b6843b56..4602b9d887d2b 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -547,3 +547,20 @@ func _GenericTypeAliasStructCloneNeedsRegeneration[T integer, T2 views.ViewClone Cloneable T2 }{}) } + +// Clone makes a deep copy of StructWithMapOfViews. +// The result aliases no memory with the original. +func (src *StructWithMapOfViews) Clone() *StructWithMapOfViews { + if src == nil { + return nil + } + dst := new(StructWithMapOfViews) + *dst = *src + dst.MapOfViews = maps.Clone(src.MapOfViews) + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithMapOfViewsCloneNeedsRegeneration = StructWithMapOfViews(struct { + MapOfViews map[string]StructWithoutPtrsView +}{}) diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index e50a71c9e0220..495281c23b3aa 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -16,7 +16,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct +//go:generate go run tailscale.com/cmd/cloner -clonefunc=false -type=StructWithPtrs,StructWithoutPtrs,Map,StructWithSlices,OnlyGetClone,StructWithEmbedded,GenericIntStruct,GenericNoPtrsStruct,GenericCloneableStruct,StructWithContainers,StructWithTypeAliasFields,GenericTypeAliasStruct,StructWithMapOfViews // View returns a read-only view of StructWithPtrs. func (p *StructWithPtrs) View() StructWithPtrsView { @@ -1053,3 +1053,79 @@ func _GenericTypeAliasStructViewNeedsRegeneration[T integer, T2 views.ViewCloner Cloneable T2 }{}) } + +// View returns a read-only view of StructWithMapOfViews. +func (p *StructWithMapOfViews) View() StructWithMapOfViewsView { + return StructWithMapOfViewsView{ж: p} +} + +// StructWithMapOfViewsView provides a read-only view over StructWithMapOfViews. +// +// Its methods should only be called if `Valid()` returns true. +type StructWithMapOfViewsView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *StructWithMapOfViews +} + +// Valid reports whether v's underlying value is non-nil. +func (v StructWithMapOfViewsView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v StructWithMapOfViewsView) AsStruct() *StructWithMapOfViews { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v StructWithMapOfViewsView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v StructWithMapOfViewsView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *StructWithMapOfViewsView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x StructWithMapOfViews + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *StructWithMapOfViewsView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x StructWithMapOfViews + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +func (v StructWithMapOfViewsView) MapOfViews() views.Map[string, StructWithoutPtrsView] { + return views.MapOf(v.ж.MapOfViews) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _StructWithMapOfViewsViewNeedsRegeneration = StructWithMapOfViews(struct { + MapOfViews map[string]StructWithoutPtrsView +}{}) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 4fd81ea510d40..3fae737cde692 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -367,14 +367,21 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, fie case *types.Struct, *types.Named, *types.Alias: strucT := u args.FieldType = it.QualifiedName(fieldType) - if codegen.ContainsPointers(strucT) { + + // We need to call View() unless the type is + // either a View itself or does not contain + // pointers (and can thus be shallow-copied). + // + // Otherwise, we need to create a View of the + // map value. + if codegen.IsViewType(strucT) || !codegen.ContainsPointers(strucT) { + template = "mapField" + args.MapValueType = it.QualifiedName(mElem) + } else { args.MapFn = "t.View()" template = "mapFnField" args.MapValueType = it.QualifiedName(mElem) args.MapValueView = appendNameSuffix(args.MapValueType, "View") - } else { - template = "mapField" - args.MapValueType = it.QualifiedName(mElem) } case *types.Basic: template = "mapField" From ca9b68aafd16eaba9b6847cd4421f8ecafc160c5 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Thu, 13 Nov 2025 07:19:17 -0500 Subject: [PATCH 0671/1093] cmd/tailscale/cli: remove service flag from funnel command (#17850) Fixes #17849. Signed-off-by: Naman Sood --- cmd/tailscale/cli/serve_v2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 74458a950dbfc..e194b1e10c71a 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -236,10 +236,10 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { if subcmd == serve { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") - fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), From 08e74effc0f0099b33ef266a098c52a406b76a5b Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 12 Nov 2025 23:22:55 -0500 Subject: [PATCH 0672/1093] cmd/cloner: support cloning arbitrarily-nested maps Fixes #17870 Signed-off-by: Andrew Dunham --- cmd/cloner/cloner.go | 139 ++++++++++++++++++++------ cmd/cloner/cloner_test.go | 106 ++++++++++++++++++++ cmd/cloner/clonerex/clonerex.go | 14 ++- cmd/cloner/clonerex/clonerex_clone.go | 127 ++++++++++++++++++++++- 4 files changed, 354 insertions(+), 32 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 917f4856d351d..a81bd10bd5401 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -201,40 +201,23 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("\tdst.%s = maps.Clone(src.%s)", fname, fname) } else { // Otherwise we need to clone each element of - // the map. + // the map using our recursive helper. writef("if dst.%s != nil {", fname) writef("\tdst.%s = map[%s]%s{}", fname, it.QualifiedName(ft.Key()), it.QualifiedName(elem)) writef("\tfor k, v := range src.%s {", fname) - switch elem := elem.Underlying().(type) { - case *types.Pointer: - writef("\t\tif v == nil { dst.%s[k] = nil } else {", fname) - if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { - if _, isIface := base.(*types.Interface); isIface { - it.Import("", "tailscale.com/types/ptr") - writef("\t\t\tdst.%s[k] = ptr.To((*v).Clone())", fname) - } else { - writef("\t\t\tdst.%s[k] = v.Clone()", fname) - } - } else { - it.Import("", "tailscale.com/types/ptr") - writef("\t\t\tdst.%s[k] = ptr.To(*v)", fname) - } - writef("}") - case *types.Interface: - if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil { - if _, isPtr := cloneResultType.(*types.Pointer); isPtr { - writef("\t\tdst.%s[k] = *(v.Clone())", fname) - } else { - writef("\t\tdst.%s[k] = v.Clone()", fname) - } - } else { - writef(`panic("%s (%v) does not have a Clone method")`, fname, elem) - } - default: - writef("\t\tdst.%s[k] = *(v.Clone())", fname) - } - + // Use a recursive helper here; this handles + // arbitrarily nested maps in addition to + // simpler types. + writeMapValueClone(mapValueCloneParams{ + Buf: buf, + It: it, + Elem: elem, + SrcExpr: "v", + DstExpr: fmt.Sprintf("dst.%s[k]", fname), + BaseIndent: "\t", + Depth: 1, + }) writef("\t}") writef("}") } @@ -277,3 +260,99 @@ func methodResultType(typ types.Type, method string) types.Type { } return sig.Results().At(0).Type() } + +type mapValueCloneParams struct { + // Buf is the buffer to write generated code to + Buf *bytes.Buffer + // It is the import tracker for managing imports. + It *codegen.ImportTracker + // Elem is the type of the map value to clone + Elem types.Type + // SrcExpr is the expression for the source value (e.g., "v", "v2", "v3") + SrcExpr string + // DstExpr is the expression for the destination (e.g., "dst.Field[k]", "dst.Field[k][k2]") + DstExpr string + // BaseIndent is the "base" indentation string for the generated code + // (i.e. 1 or more tabs). Additional indentation will be added based on + // the Depth parameter. + BaseIndent string + // Depth is the current nesting depth (1 for first level, 2 for second, etc.) + Depth int +} + +// writeMapValueClone generates code to clone a map value recursively. +// It handles arbitrary nesting of maps, pointers, and interfaces. +func writeMapValueClone(params mapValueCloneParams) { + indent := params.BaseIndent + strings.Repeat("\t", params.Depth) + writef := func(format string, args ...any) { + fmt.Fprintf(params.Buf, indent+format+"\n", args...) + } + + switch elem := params.Elem.Underlying().(type) { + case *types.Pointer: + writef("if %s == nil { %s = nil } else {", params.SrcExpr, params.DstExpr) + if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { + if _, isIface := base.(*types.Interface); isIface { + params.It.Import("", "tailscale.com/types/ptr") + writef("\t%s = ptr.To((*%s).Clone())", params.DstExpr, params.SrcExpr) + } else { + writef("\t%s = %s.Clone()", params.DstExpr, params.SrcExpr) + } + } else { + params.It.Import("", "tailscale.com/types/ptr") + writef("\t%s = ptr.To(*%s)", params.DstExpr, params.SrcExpr) + } + writef("}") + + case *types.Map: + // Recursively handle nested maps + innerElem := elem.Elem() + if codegen.IsViewType(innerElem) || !codegen.ContainsPointers(innerElem) { + // Inner map values don't need deep cloning + params.It.Import("", "maps") + writef("%s = maps.Clone(%s)", params.DstExpr, params.SrcExpr) + } else { + // Inner map values need cloning + keyType := params.It.QualifiedName(elem.Key()) + valueType := params.It.QualifiedName(innerElem) + // Generate unique variable names for nested loops based on depth + keyVar := fmt.Sprintf("k%d", params.Depth+1) + valVar := fmt.Sprintf("v%d", params.Depth+1) + + writef("if %s == nil {", params.SrcExpr) + writef("\t%s = nil", params.DstExpr) + writef("\tcontinue") + writef("}") + writef("%s = map[%s]%s{}", params.DstExpr, keyType, valueType) + writef("for %s, %s := range %s {", keyVar, valVar, params.SrcExpr) + + // Recursively generate cloning code for the nested map value + nestedDstExpr := fmt.Sprintf("%s[%s]", params.DstExpr, keyVar) + writeMapValueClone(mapValueCloneParams{ + Buf: params.Buf, + It: params.It, + Elem: innerElem, + SrcExpr: valVar, + DstExpr: nestedDstExpr, + BaseIndent: params.BaseIndent, + Depth: params.Depth + 1, + }) + + writef("}") + } + + case *types.Interface: + if cloneResultType := methodResultType(elem, "Clone"); cloneResultType != nil { + if _, isPtr := cloneResultType.(*types.Pointer); isPtr { + writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr) + } else { + writef("%s = %s.Clone()", params.DstExpr, params.SrcExpr) + } + } else { + writef(`panic("map value (%%v) does not have a Clone method")`, elem) + } + + default: + writef("%s = *(%s.Clone())", params.DstExpr, params.SrcExpr) + } +} diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index 3556c14bc109e..754a4ac49a220 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -108,3 +108,109 @@ func TestInterfaceContainer(t *testing.T) { }) } } + +func TestMapWithPointers(t *testing.T) { + num1, num2 := 42, 100 + orig := &clonerex.MapWithPointers{ + Nested: map[string]*int{ + "foo": &num1, + "bar": &num2, + }, + WithCloneMethod: map[string]*clonerex.SliceContainer{ + "container1": {Slice: []*int{&num1, &num2}}, + "container2": {Slice: []*int{&num1}}, + }, + CloneInterface: map[string]clonerex.Cloneable{ + "impl1": &clonerex.CloneableImpl{Value: 123}, + "impl2": &clonerex.CloneableImpl{Value: 456}, + }, + } + + cloned := orig.Clone() + if !reflect.DeepEqual(orig, cloned) { + t.Errorf("Clone() = %v, want %v", cloned, orig) + } + + // Mutate cloned.Nested pointer values + *cloned.Nested["foo"] = 999 + if *orig.Nested["foo"] == 999 { + t.Errorf("Clone() aliased memory in Nested: original was modified") + } + + // Mutate cloned.WithCloneMethod slice values + *cloned.WithCloneMethod["container1"].Slice[0] = 888 + if *orig.WithCloneMethod["container1"].Slice[0] == 888 { + t.Errorf("Clone() aliased memory in WithCloneMethod: original was modified") + } + + // Mutate cloned.CloneInterface values + if impl, ok := cloned.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok { + impl.Value = 777 + if origImpl, ok := orig.CloneInterface["impl1"].(*clonerex.CloneableImpl); ok { + if origImpl.Value == 777 { + t.Errorf("Clone() aliased memory in CloneInterface: original was modified") + } + } + } +} + +func TestDeeplyNestedMap(t *testing.T) { + num := 123 + orig := &clonerex.DeeplyNestedMap{ + ThreeLevels: map[string]map[string]map[string]int{ + "a": { + "b": {"c": 1, "d": 2}, + "e": {"f": 3}, + }, + "g": { + "h": {"i": 4}, + }, + }, + FourLevels: map[string]map[string]map[string]map[string]*clonerex.SliceContainer{ + "l1a": { + "l2a": { + "l3a": { + "l4a": {Slice: []*int{&num}}, + "l4b": {Slice: []*int{&num, &num}}, + }, + }, + }, + }, + } + + cloned := orig.Clone() + if !reflect.DeepEqual(orig, cloned) { + t.Errorf("Clone() = %v, want %v", cloned, orig) + } + + // Mutate the clone's ThreeLevels map + cloned.ThreeLevels["a"]["b"]["c"] = 777 + if orig.ThreeLevels["a"]["b"]["c"] == 777 { + t.Errorf("Clone() aliased memory in ThreeLevels: original was modified") + } + + // Mutate the clone's FourLevels map at the deepest pointer level + *cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] = 666 + if *orig.FourLevels["l1a"]["l2a"]["l3a"]["l4a"].Slice[0] == 666 { + t.Errorf("Clone() aliased memory in FourLevels: original was modified") + } + + // Add a new top-level key to the clone's FourLevels map + newNum := 999 + cloned.FourLevels["l1b"] = map[string]map[string]map[string]*clonerex.SliceContainer{ + "l2b": { + "l3b": { + "l4c": {Slice: []*int{&newNum}}, + }, + }, + } + if _, exists := orig.FourLevels["l1b"]; exists { + t.Errorf("Clone() aliased FourLevels map: new top-level key appeared in original") + } + + // Add a new nested key to the clone's FourLevels map + cloned.FourLevels["l1a"]["l2a"]["l3a"]["l4c"] = &clonerex.SliceContainer{Slice: []*int{&newNum}} + if _, exists := orig.FourLevels["l1a"]["l2a"]["l3a"]["l4c"]; exists { + t.Errorf("Clone() aliased FourLevels map: new nested key appeared in original") + } +} diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index 6463f91442a32..b9f6d60dedb35 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap // Package clonerex is an example package for the cloner tool. package clonerex @@ -32,3 +32,15 @@ func (c *CloneableImpl) Clone() Cloneable { type InterfaceContainer struct { Interface Cloneable } + +type MapWithPointers struct { + Nested map[string]*int + WithCloneMethod map[string]*SliceContainer + CloneInterface map[string]Cloneable +} + +// DeeplyNestedMap tests arbitrary depth of map nesting (3+ levels) +type DeeplyNestedMap struct { + ThreeLevels map[string]map[string]map[string]int + FourLevels map[string]map[string]map[string]map[string]*SliceContainer +} diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index 533d7e723d3ea..13e1276c4e4b8 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -6,6 +6,8 @@ package clonerex import ( + "maps" + "tailscale.com/types/ptr" ) @@ -54,9 +56,114 @@ var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct { Interface Cloneable }{}) +// Clone makes a deep copy of MapWithPointers. +// The result aliases no memory with the original. +func (src *MapWithPointers) Clone() *MapWithPointers { + if src == nil { + return nil + } + dst := new(MapWithPointers) + *dst = *src + if dst.Nested != nil { + dst.Nested = map[string]*int{} + for k, v := range src.Nested { + if v == nil { + dst.Nested[k] = nil + } else { + dst.Nested[k] = ptr.To(*v) + } + } + } + if dst.WithCloneMethod != nil { + dst.WithCloneMethod = map[string]*SliceContainer{} + for k, v := range src.WithCloneMethod { + if v == nil { + dst.WithCloneMethod[k] = nil + } else { + dst.WithCloneMethod[k] = v.Clone() + } + } + } + if dst.CloneInterface != nil { + dst.CloneInterface = map[string]Cloneable{} + for k, v := range src.CloneInterface { + dst.CloneInterface[k] = v.Clone() + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _MapWithPointersCloneNeedsRegeneration = MapWithPointers(struct { + Nested map[string]*int + WithCloneMethod map[string]*SliceContainer + CloneInterface map[string]Cloneable +}{}) + +// Clone makes a deep copy of DeeplyNestedMap. +// The result aliases no memory with the original. +func (src *DeeplyNestedMap) Clone() *DeeplyNestedMap { + if src == nil { + return nil + } + dst := new(DeeplyNestedMap) + *dst = *src + if dst.ThreeLevels != nil { + dst.ThreeLevels = map[string]map[string]map[string]int{} + for k, v := range src.ThreeLevels { + if v == nil { + dst.ThreeLevels[k] = nil + continue + } + dst.ThreeLevels[k] = map[string]map[string]int{} + for k2, v2 := range v { + dst.ThreeLevels[k][k2] = maps.Clone(v2) + } + } + } + if dst.FourLevels != nil { + dst.FourLevels = map[string]map[string]map[string]map[string]*SliceContainer{} + for k, v := range src.FourLevels { + if v == nil { + dst.FourLevels[k] = nil + continue + } + dst.FourLevels[k] = map[string]map[string]map[string]*SliceContainer{} + for k2, v2 := range v { + if v2 == nil { + dst.FourLevels[k][k2] = nil + continue + } + dst.FourLevels[k][k2] = map[string]map[string]*SliceContainer{} + for k3, v3 := range v2 { + if v3 == nil { + dst.FourLevels[k][k2][k3] = nil + continue + } + dst.FourLevels[k][k2][k3] = map[string]*SliceContainer{} + for k4, v4 := range v3 { + if v4 == nil { + dst.FourLevels[k][k2][k3][k4] = nil + } else { + dst.FourLevels[k][k2][k3][k4] = v4.Clone() + } + } + } + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _DeeplyNestedMapCloneNeedsRegeneration = DeeplyNestedMap(struct { + ThreeLevels map[string]map[string]map[string]int + FourLevels map[string]map[string]map[string]map[string]*SliceContainer +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of SliceContainer,InterfaceContainer. +// where T is one of SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap. func Clone(dst, src any) bool { switch src := src.(type) { case *SliceContainer: @@ -77,6 +184,24 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *MapWithPointers: + switch dst := dst.(type) { + case *MapWithPointers: + *dst = *src.Clone() + return true + case **MapWithPointers: + *dst = src.Clone() + return true + } + case *DeeplyNestedMap: + switch dst := dst.(type) { + case *DeeplyNestedMap: + *dst = *src.Clone() + return true + case **DeeplyNestedMap: + *dst = src.Clone() + return true + } } return false } From 146ea42822cce4d9743218c94aaf13d3d171e0a4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 8 Nov 2025 17:23:33 -0800 Subject: [PATCH 0673/1093] ipn/ipnlocal: remove all the weird locking (LockedOnEntry, UnlockEarly, etc) Fixes #11649 Updates #16369 Co-authored-by: James Sanderson Change-Id: I63eaa18fe870ddf81d84b949efac4d1b44c3db86 Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 7 + ipn/ipnext/ipnext.go | 3 +- ipn/ipnlocal/local.go | 574 ++++++++++++++++------------------ ipn/ipnlocal/local_test.go | 19 +- ipn/ipnlocal/state_test.go | 201 ++---------- util/execqueue/execqueue.go | 21 +- wgengine/userspace.go | 27 ++ wgengine/watchdog.go | 6 + wgengine/wgengine.go | 7 + 9 files changed, 360 insertions(+), 505 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 52255e89f9600..40b02a598f865 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -615,6 +615,13 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM // does its thing, which may result in a call back into the client. metricQueued.Add(1) c.observerQueue.Add(func() { + c.mu.Lock() + closed := c.closed + c.mu.Unlock() + if closed { + return + } + if canSkipStatus(newSt, c.lastStatus.Load()) { metricSkippable.Add(1) if !c.direct.controlKnobs.DisableSkipStatusQueue.Load() { diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 4ff37dc8e3775..fc93cc8760a0b 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -323,7 +323,8 @@ type ProfileStateChangeCallback func(_ ipn.LoginProfileView, _ ipn.PrefsView, sa // [ProfileStateChangeCallback]s are called first. // // It returns a function to be called when the cc is being shut down, -// or nil if no cleanup is needed. +// or nil if no cleanup is needed. That cleanup function should not call +// back into LocalBackend, which may be locked during shutdown. type NewControlClientCallback func(controlclient.Client, ipn.LoginProfileView) (cleanup func()) // Hooks is a collection of hooks that extensions can add to (non-concurrently) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 62d8ea49073a3..076752469abef 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -271,9 +271,14 @@ type LocalBackend struct { sshServer SSHServer // or nil, initialized lazily. appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. - notifyCancel context.CancelFunc - cc controlclient.Client // TODO(nickkhyl): move to nodeBackend - ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend + notifyCancel context.CancelFunc + cc controlclient.Client // TODO(nickkhyl): move to nodeBackend + ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto; TODO(nickkhyl): move to nodeBackend + + // ignoreControlClientUpdates indicates whether we want to ignore SetControlClientStatus updates + // before acquiring b.mu. This is used during shutdown to avoid deadlocks. + ignoreControlClientUpdates atomic.Bool + machinePrivKey key.MachinePrivate tka *tkaState // TODO(nickkhyl): move to nodeBackend state ipn.State // TODO(nickkhyl): move to nodeBackend @@ -314,10 +319,6 @@ type LocalBackend struct { serveListeners map[netip.AddrPort]*localListener // listeners for local serve traffic serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *reverseProxy - // mu must be held before calling statusChanged.Wait() or - // statusChanged.Broadcast(). - statusChanged *sync.Cond - // dialPlan is any dial plan that we've received from the control // server during a previous connection; it is cleared on logout. dialPlan atomic.Pointer[tailcfg.ControlDialPlan] // TODO(nickkhyl): maybe move to nodeBackend? @@ -520,8 +521,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.currentNodeAtomic.Store(nb) nb.ready() - mConn.SetNetInfoCallback(b.setNetInfo) - if sys.InitialConfig != nil { if err := b.initPrefsFromConfig(sys.InitialConfig); err != nil { return nil, err @@ -559,7 +558,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.setTCPPortsIntercepted(nil) - b.statusChanged = sync.NewCond(&b.mu) b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() @@ -604,6 +602,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } eventbus.SubscribeFunc(ec, b.onAppConnectorRouteUpdate) eventbus.SubscribeFunc(ec, b.onAppConnectorStoreRoutes) + mConn.SetNetInfoCallback(b.setNetInfo) // TODO(tailscale/tailscale#17887): move to eventbus return b, nil } @@ -838,8 +837,8 @@ func (b *LocalBackend) Dialer() *tsdial.Dialer { // It returns (false, nil) if not running in declarative mode, (true, nil) on // success, or (false, error) on failure. func (b *LocalBackend) ReloadConfig() (ok bool, err error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.conf == nil { return false, nil } @@ -847,7 +846,7 @@ func (b *LocalBackend) ReloadConfig() (ok bool, err error) { if err != nil { return false, err } - if err := b.setConfigLockedOnEntry(conf, unlock); err != nil { + if err := b.setConfigLocked(conf); err != nil { return false, fmt.Errorf("error setting config: %w", err) } @@ -904,10 +903,9 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } -// setConfigLockedOnEntry uses the provided config to update the backend's prefs +// setConfigLocked uses the provided config to update the backend's prefs // and other state. -func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlockOnce) error { - defer unlock() +func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -915,7 +913,7 @@ func (b *LocalBackend) setConfigLockedOnEntry(conf *conffile.Config, unlock unlo } p.ApplyEdits(&mp) b.setStaticEndpointsFromConfigLocked(conf) - b.setPrefsLockedOnEntry(p, unlock) + b.setPrefsLocked(p) b.conf = conf return nil @@ -1521,11 +1519,31 @@ func (b *LocalBackend) GetFilterForTest() *filter.Filter { return nb.filterAtomic.Load() } +func (b *LocalBackend) settleEventBus() { + // The move to eventbus made some things racy that + // weren't before so we have to wait for it to all be settled + // before we call certain things. + // See https://github.com/tailscale/tailscale/issues/16369 + // But we can't do this while holding b.mu without deadlocks, + // (https://github.com/tailscale/tailscale/pull/17804#issuecomment-3514426485) so + // now we just do it in lots of places before acquiring b.mu. + // Is this winning?? + if b.sys != nil { + if ms, ok := b.sys.MagicSock.GetOK(); ok { + ms.Synchronize() + } + } +} + // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. // Among other things, this is where we update the netmap, packet filters, DNS and DERP maps. func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st controlclient.Status) { - unlock := b.lockAndGetUnlock() - defer unlock() + if b.ignoreControlClientUpdates.Load() { + b.logf("ignoring SetControlClientStatus during controlclient shutdown") + return + } + b.mu.Lock() + defer b.mu.Unlock() if b.cc != c { b.logf("Ignoring SetControlClientStatus from old client") @@ -1540,7 +1558,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control var uerr controlclient.UserVisibleError if errors.As(st.Err, &uerr) { s := uerr.UserVisibleError() - b.sendToLocked(ipn.Notify{ErrMessage: &s}, allClients) + b.sendLocked(ipn.Notify{ErrMessage: &s}) } return } @@ -1600,25 +1618,20 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.keyExpired = isExpired } - unlock.UnlockEarly() - if keyExpiryExtended && wasBlocked { // Key extended, unblock the engine - b.blockEngineUpdates(false) + b.blockEngineUpdatesLocked(false) } if st.LoginFinished() && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine - b.blockEngineUpdates(false) + b.blockEngineUpdatesLocked(false) } - b.authReconfig() - b.send(ipn.Notify{LoginFinished: &empty.Message{}}) + b.authReconfigLocked() + b.sendLocked(ipn.Notify{LoginFinished: &empty.Message{}}) } - // Lock b again and do only the things that require locking. - b.mu.Lock() - prefsChanged := false cn := b.currentNode() prefs := b.pm.CurrentPrefs().AsStruct() @@ -1731,16 +1744,12 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.setNetMapLocked(st.NetMap) b.updateFilterLocked(prefs.View()) } - b.mu.Unlock() // Now complete the lock-free parts of what we started while locked. if st.NetMap != nil { if envknob.NoLogsNoSupport() && st.NetMap.HasCap(tailcfg.CapabilityDataPlaneAuditLogs) { msg := "tailnet requires logging to be enabled. Remove --no-logs-no-support from tailscaled command line." b.health.SetLocalLogConfigHealth(errors.New(msg)) - // Connecting to this tailnet without logging is forbidden; boot us outta here. - b.mu.Lock() - defer b.mu.Unlock() // Get the current prefs again, since we unlocked above. prefs := b.pm.CurrentPrefs().AsStruct() prefs.WantRunning = false @@ -1752,7 +1761,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control }); err != nil { b.logf("Failed to save new controlclient state: %v", err) } - b.sendToLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}, allClients) + b.sendLocked(ipn.Notify{ErrMessage: &msg, Prefs: &p}) return } if oldNetMap != nil { @@ -1774,11 +1783,11 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // Update the DERP map in the health package, which uses it for health notifications b.health.SetDERPMap(st.NetMap.DERPMap) - b.send(ipn.Notify{NetMap: st.NetMap}) + b.sendLocked(ipn.Notify{NetMap: st.NetMap}) // The error here is unimportant as is the result. This will recalculate the suggested exit node // cache the value and push any changes to the IPN bus. - b.SuggestExitNode() + b.suggestExitNodeLocked() // Check and update the exit node if needed, now that we have a new netmap. // @@ -1788,16 +1797,16 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control // // Otherwise, it might briefly show the exit node as offline and display a warning, // if the node wasn't online or wasn't advertising default routes in the previous netmap. - b.RefreshExitNode() + b.refreshExitNodeLocked() } if st.URL != "" { b.logf("Received auth URL: %.20v...", st.URL) - b.setAuthURL(st.URL) + b.setAuthURLLocked(st.URL) } - b.stateMachine() + b.stateMachineLocked() // This is currently (2020-07-28) necessary; conditionally disabling it is fragile! // This is where netmap information gets propagated to router and magicsock. - b.authReconfig() + b.authReconfigLocked() } type preferencePolicyInfo struct { @@ -2003,13 +2012,14 @@ func (b *LocalBackend) registerSysPolicyWatch() (unregister func(), err error) { // // b.mu must not be held. func (b *LocalBackend) reconcilePrefs() (_ ipn.PrefsView, anyChange bool) { - unlock := b.lockAndGetUnlock() + b.mu.Lock() + defer b.mu.Unlock() + prefs := b.pm.CurrentPrefs().AsStruct() if !b.reconcilePrefsLocked(prefs) { - unlock.UnlockEarly() return prefs.View(), false } - return b.setPrefsLockedOnEntry(prefs, unlock), true + return b.setPrefsLocked(prefs), true } // sysPolicyChanged is a callback triggered by syspolicy when it detects @@ -2057,6 +2067,11 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo b.send(*notify) } }() + + // Gross. See https://github.com/tailscale/tailscale/issues/16369 + b.settleEventBus() + defer b.settleEventBus() + b.mu.Lock() defer b.mu.Unlock() @@ -2077,7 +2092,7 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo if !ok || n.StableID() != exitNodeID { continue } - b.goTracker.Go(b.RefreshExitNode) + b.refreshExitNodeLocked() break } } @@ -2241,51 +2256,60 @@ func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged b func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { if err != nil { b.logf("wgengine status error: %v", err) - b.broadcastStatusChanged() return } if s == nil { b.logf("[unexpected] non-error wgengine update with status=nil: %v", s) - b.broadcastStatusChanged() return } b.mu.Lock() + defer b.mu.Unlock() + + // For now, only check this in the callback, but don't check it in setWgengineStatusLocked if s.AsOf.Before(b.lastStatusTime) { // Don't process a status update that is older than the one we have // already processed. (corp#2579) - b.mu.Unlock() return } b.lastStatusTime = s.AsOf + + b.setWgengineStatusLocked(s) +} + +// setWgengineStatusLocked updates LocalBackend's view of the engine status and +// updates the endpoints both in the backend and in the control client. +// +// Unlike setWgengineStatus it does not discard out-of-order updates, so +// statuses sent here are always processed. This is useful for ensuring we don't +// miss a "we shut down" status during backend shutdown even if other statuses +// arrive out of order. +// +// TODO(zofrex): we should ensure updates actually do arrive in order and move +// the out-of-order check into this function. +// +// b.mu must be held. +func (b *LocalBackend) setWgengineStatusLocked(s *wgengine.Status) { es := b.parseWgStatusLocked(s) cc := b.cc + + // TODO(zofrex): the only reason we even write this is to transition from + // "Starting" to "Running" in the call to state machine a few lines below + // this. Maybe we don't even need to store it at all. b.engineStatus = es + needUpdateEndpoints := !slices.Equal(s.LocalAddrs, b.endpoints) if needUpdateEndpoints { b.endpoints = append([]tailcfg.Endpoint{}, s.LocalAddrs...) } - b.mu.Unlock() if cc != nil { if needUpdateEndpoints { cc.UpdateEndpoints(s.LocalAddrs) } - b.stateMachine() + b.stateMachineLocked() } - b.broadcastStatusChanged() - b.send(ipn.Notify{Engine: &es}) -} - -// broadcastStatusChanged must not be called with b.mu held. -func (b *LocalBackend) broadcastStatusChanged() { - // The sync.Cond docs say: "It is allowed but not required for the caller to hold c.L during the call." - // In this particular case, we must acquire b.mu. Otherwise we might broadcast before - // the waiter (in requestEngineStatusAndWait) starts to wait, in which case - // the waiter can get stuck indefinitely. See PR 2865. - b.mu.Lock() - b.statusChanged.Broadcast() - b.mu.Unlock() + b.sendLocked(ipn.Notify{Engine: &es}) } // SetNotifyCallback sets the function to call when the backend has something to @@ -2365,8 +2389,14 @@ func (b *LocalBackend) initOnce() { // actually a supported operation (it should be, but it's very unclear // from the following whether or not that is a safe transition). func (b *LocalBackend) Start(opts ipn.Options) error { - b.logf("Start") + defer b.settleEventBus() // with b.mu unlocked + b.mu.Lock() + defer b.mu.Unlock() + return b.startLocked(opts) +} +func (b *LocalBackend) startLocked(opts ipn.Options) error { + b.logf("Start") b.startOnce.Do(b.initOnce) var clientToShutdown controlclient.Client @@ -2375,8 +2405,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error { clientToShutdown.Shutdown() } }() - unlock := b.lockAndGetUnlock() - defer unlock() if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { @@ -2591,7 +2619,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { // regress tsnet.Server restarts. cc.Login(controlclient.LoginDefault) } - b.stateMachineLockedOnEntry(unlock) + b.stateMachineLocked() return nil } @@ -3255,6 +3283,10 @@ func (b *LocalBackend) send(n ipn.Notify) { b.sendTo(n, allClients) } +func (b *LocalBackend) sendLocked(n ipn.Notify) { + b.sendToLocked(n, allClients) +} + // SendNotify sends a notification to the IPN bus, // typically to the GUI client. func (b *LocalBackend) SendNotify(n ipn.Notify) { @@ -3345,21 +3377,22 @@ func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) } } -// setAuthURL sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. +// setAuthURLLocked sets the authURL and triggers [LocalBackend.popBrowserAuthNow] if the URL has changed. // This method is called when a new authURL is received from the control plane, meaning that either a user // has started a new interactive login (e.g., by running `tailscale login` or clicking Login in the GUI), // or the control plane was unable to authenticate this node non-interactively (e.g., due to key expiration). // A non-nil b.authActor indicates that an interactive login is in progress and was initiated by the specified actor. +// +// b.mu must be held. +// // If url is "", it is equivalent to calling [LocalBackend.resetAuthURLLocked] with b.mu held. -func (b *LocalBackend) setAuthURL(url string) { +func (b *LocalBackend) setAuthURLLocked(url string) { var popBrowser, keyExpired bool var recipient ipnauth.Actor - b.mu.Lock() switch { case url == "": b.resetAuthURLLocked() - b.mu.Unlock() return case b.authURL != url: b.authURL = url @@ -3376,33 +3409,33 @@ func (b *LocalBackend) setAuthURL(url string) { // Consume the StartLoginInteractive call, if any, that caused the control // plane to send us this URL. b.authActor = nil - b.mu.Unlock() if popBrowser { - b.popBrowserAuthNow(url, keyExpired, recipient) + b.popBrowserAuthNowLocked(url, keyExpired, recipient) } } -// popBrowserAuthNow shuts down the data plane and sends the URL to the recipient's +// popBrowserAuthNowLocked shuts down the data plane and sends the URL to the recipient's // [watchSession]s if the recipient is non-nil; otherwise, it sends the URL to all watchSessions. // keyExpired is the value of b.keyExpired upon entry and indicates // whether the node's key has expired. -// It must not be called with b.mu held. -func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient ipnauth.Actor) { +// +// b.mu must be held. +func (b *LocalBackend) popBrowserAuthNowLocked(url string, keyExpired bool, recipient ipnauth.Actor) { b.logf("popBrowserAuthNow(%q): url=%v, key-expired=%v, seamless-key-renewal=%v", maybeUsernameOf(recipient), url != "", keyExpired, b.seamlessRenewalEnabled()) // Deconfigure the local network data plane if: // - seamless key renewal is not enabled; // - key is expired (in which case tailnet connectivity is down anyway). if !b.seamlessRenewalEnabled() || keyExpired { - b.blockEngineUpdates(true) - b.stopEngineAndWait() + b.blockEngineUpdatesLocked(true) + b.stopEngineAndWaitLocked() - if b.State() == ipn.Running { - b.enterState(ipn.Starting) + if b.state == ipn.Running { + b.enterStateLocked(ipn.Starting) } } - b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) + b.tellRecipientToBrowseToURLLocked(url, toNotificationTarget(recipient)) } // validPopBrowserURL reports whether urlStr is a valid value for a @@ -3450,13 +3483,16 @@ func (b *LocalBackend) validPopBrowserURLLocked(urlStr string) bool { } func (b *LocalBackend) tellClientToBrowseToURL(url string) { - b.tellRecipientToBrowseToURL(url, allClients) + b.mu.Lock() + defer b.mu.Unlock() + b.tellRecipientToBrowseToURLLocked(url, allClients) } -// tellRecipientToBrowseToURL is like tellClientToBrowseToURL but allows specifying a recipient. -func (b *LocalBackend) tellRecipientToBrowseToURL(url string, recipient notificationTarget) { - if b.validPopBrowserURL(url) { - b.sendTo(ipn.Notify{BrowseToURL: &url}, recipient) +// tellRecipientToBrowseToURLLocked is like tellClientToBrowseToURL but allows specifying a recipient +// and b.mu must be held. +func (b *LocalBackend) tellRecipientToBrowseToURLLocked(url string, recipient notificationTarget) { + if b.validPopBrowserURLLocked(url) { + b.sendToLocked(ipn.Notify{BrowseToURL: &url}, recipient) } } @@ -3471,8 +3507,8 @@ func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) { } func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() if !prefs.Valid() { @@ -3494,14 +3530,14 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) - _, err := b.editPrefsLockedOnEntry( + _, err := b.editPrefsLocked( ipnauth.Self, &ipn.MaskedPrefs{ Prefs: *prefsClone, AutoUpdateSet: ipn.AutoUpdatePrefsMask{ ApplySet: true, }, - }, unlock) + }) if err != nil { b.logf("failed to apply tailnet-wide default for auto-updates (%v): %v", au, err) return @@ -3734,6 +3770,7 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { // active [watchSession]s. func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth.Actor) error { b.mu.Lock() + defer b.mu.Unlock() if b.cc == nil { panic("LocalBackend.assertClient: b.cc == nil") } @@ -3751,12 +3788,11 @@ func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth b.authActor = user } cc := b.cc - b.mu.Unlock() b.logf("StartLoginInteractiveAs(%q): url=%v", maybeUsernameOf(user), hasValidURL) if hasValidURL { - b.popBrowserAuthNow(url, keyExpired, user) + b.popBrowserAuthNowLocked(url, keyExpired, user) } else { cc.Login(b.loginFlags | controlclient.LoginInteractive) } @@ -3886,8 +3922,8 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt // // On non-multi-user systems, the actor should be set to nil. func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() var userIdentifier string if user := cmp.Or(actor, b.currentUser); user != nil { @@ -3909,7 +3945,7 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { action = "connected" } reason := fmt.Sprintf("client %s (%s)", action, userIdentifier) - b.switchToBestProfileLockedOnEntry(reason, unlock) + b.switchToBestProfileLocked(reason) } // SwitchToBestProfile selects the best profile to use, @@ -3919,13 +3955,14 @@ func (b *LocalBackend) SetCurrentUser(actor ipnauth.Actor) { // or disconnecting, or a change in the desktop session state, and is used // for logging. func (b *LocalBackend) SwitchToBestProfile(reason string) { - b.switchToBestProfileLockedOnEntry(reason, b.lockAndGetUnlock()) + b.mu.Lock() + defer b.mu.Unlock() + b.switchToBestProfileLocked(reason) } -// switchToBestProfileLockedOnEntry is like [LocalBackend.SwitchToBestProfile], -// but b.mu must held on entry. It is released on exit. -func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock unlockOnce) { - defer unlock() +// switchToBestProfileLocked is like [LocalBackend.SwitchToBestProfile], +// but b.mu must held on entry. +func (b *LocalBackend) switchToBestProfileLocked(reason string) { oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) profile, background := b.resolveBestProfileLocked() cp, switched, err := b.pm.SwitchToProfile(profile) @@ -3956,7 +3993,7 @@ func (b *LocalBackend) switchToBestProfileLockedOnEntry(reason string, unlock un if newControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc); oldControlURL != newControlURL { b.resetDialPlan() } - if err := b.resetForProfileChangeLockedOnEntry(unlock); err != nil { + if err := b.resetForProfileChangeLocked(); err != nil { // TODO(nickkhyl): The actual reset cannot fail. However, // the TKA initialization or [LocalBackend.Start] can fail. // These errors are not critical as far as we're concerned. @@ -4204,8 +4241,8 @@ func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { // Setting the value to false when use of an exit node is already false is not an error, // nor is true when the exit node is already in use. func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.PrefsView, error) { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() p0 := b.pm.CurrentPrefs() if !buildfeatures.HasUseExitNode { @@ -4249,7 +4286,7 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P mp.InternalExitNodePrior = p0.ExitNodeID() } } - return b.editPrefsLockedOnEntry(actor, mp, unlock) + return b.editPrefsLocked(actor, mp) } // MaybeClearAppConnector clears the routes from any AppConnector if @@ -4280,8 +4317,11 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip if mp.SetsInternal() { return ipn.PrefsView{}, errors.New("can't set Internal fields") } + defer b.settleEventBus() - return b.editPrefsLockedOnEntry(actor, mp, b.lockAndGetUnlock()) + b.mu.Lock() + defer b.mu.Unlock() + return b.editPrefsLocked(actor, mp) } // checkEditPrefsAccessLocked checks whether the current user has access @@ -4471,8 +4511,8 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { profileID := b.pm.CurrentProfile().ID() var reconnectTimer tstime.TimerController reconnectTimer = b.clock.AfterFunc(d, func() { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if b.reconnectTimer != reconnectTimer { // We're either not the most recent timer, or we lost the race when @@ -4490,7 +4530,7 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } mp := &ipn.MaskedPrefs{WantRunningSet: true, Prefs: ipn.Prefs{WantRunning: true}} - if _, err := b.editPrefsLockedOnEntry(ipnauth.Self, mp, unlock); err != nil { + if _, err := b.editPrefsLocked(ipnauth.Self, mp); err != nil { b.logf("failed to automatically reconnect as %q after %v: %v", cp.Name(), d, err) } else { b.logf("automatically reconnected as %q after %v", cp.Name(), d) @@ -4519,11 +4559,8 @@ func (b *LocalBackend) stopReconnectTimerLocked() { } } -// Warning: b.mu must be held on entry, but it unlocks it on the way out. -// TODO(bradfitz): redo the locking on all these weird methods like this. -func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.MaskedPrefs, unlock unlockOnce) (ipn.PrefsView, error) { - defer unlock() // for error paths - +// b.mu must be held. +func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -4560,11 +4597,11 @@ func (b *LocalBackend) editPrefsLockedOnEntry(actor ipnauth.Actor, mp *ipn.Maske // before the modified prefs are actually set for the current profile. b.onEditPrefsLocked(actor, mp, p0, p1.View()) - newPrefs := b.setPrefsLockedOnEntry(p1, unlock) + newPrefs := b.setPrefsLocked(p1) // Note: don't perform any actions for the new prefs here. Not // every prefs change goes through EditPrefs. Put your actions - // in setPrefsLocksOnEntry instead. + // in setPrefsLocked instead. // This should return the public prefs, not the private ones. return stripKeysFromPrefs(newPrefs), nil @@ -4587,12 +4624,10 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { return nil } -// setPrefsLockedOnEntry requires b.mu be held to call it, but it -// unlocks b.mu when done. newp ownership passes to this function. +// setPrefsLocked requires b.mu be held to call it. +// newp ownership passes to this function. // It returns a read-only copy of the new prefs. -func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) ipn.PrefsView { - defer unlock() - +func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { cn := b.currentNode() netMap := cn.NetMap() b.setAtomicValuesFromPrefsLocked(newp.View()) @@ -4653,10 +4688,8 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.resetAlwaysOnOverrideLocked() } - unlock.UnlockEarly() - if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { - b.doSetHostinfoFilterServices() + b.doSetHostinfoFilterServicesLocked() } if netMap != nil { @@ -4669,12 +4702,12 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) } if oldp.WantRunning() != newp.WantRunning { - b.stateMachine() + b.stateMachineLocked() } else { - b.authReconfig() + b.authReconfigLocked() } - b.send(ipn.Notify{Prefs: &prefs}) + b.sendLocked(ipn.Notify{Prefs: &prefs}) return prefs } @@ -4794,7 +4827,11 @@ func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { func (b *LocalBackend) doSetHostinfoFilterServices() { b.mu.Lock() defer b.mu.Unlock() + b.doSetHostinfoFilterServicesLocked() +} +// b.mu must be held +func (b *LocalBackend) doSetHostinfoFilterServicesLocked() { cc := b.cc if cc == nil { // Control client isn't up yet. @@ -4863,15 +4900,15 @@ func (b *LocalBackend) isEngineBlocked() bool { return b.blocked } -// blockEngineUpdate sets b.blocked to block, while holding b.mu. Its -// indirect effect is to turn b.authReconfig() into a no-op if block -// is true. -func (b *LocalBackend) blockEngineUpdates(block bool) { +// blockEngineUpdatesLocked sets b.blocked to block. +// +// Its indirect effect is to turn b.authReconfig() into a no-op if block is +// true. +// +// b.mu must be held. +func (b *LocalBackend) blockEngineUpdatesLocked(block bool) { b.logf("blockEngineUpdates(%v)", block) - - b.mu.Lock() b.blocked = block - b.mu.Unlock() } // reconfigAppConnectorLocked updates the app connector state based on the @@ -4982,38 +5019,41 @@ func (b *LocalBackend) readvertiseAppConnectorRoutes() { // updates are not currently blocked, based on the cached netmap and // user prefs. func (b *LocalBackend) authReconfig() { - // Wait for magicsock to process pending [eventbus] events, - // such as netmap updates. This should be completed before - // wireguard-go is reconfigured. See tailscale/tailscale#16369. - b.MagicConn().Synchronize() - b.mu.Lock() - blocked := b.blocked - prefs := b.pm.CurrentPrefs() - cn := b.currentNode() - nm := cn.NetMap() - hasPAC := b.prevIfState.HasPAC() - disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) - dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) - dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) - // If the current node is an app connector, ensure the app connector machine is started - b.reconfigAppConnectorLocked(nm, prefs) - closing := b.shutdownCalled - b.mu.Unlock() + defer b.mu.Unlock() + b.authReconfigLocked() +} - if closing { +// authReconfigLocked is the locked version of [LocalBackend.authReconfig]. +// +// b.mu must be held. +func (b *LocalBackend) authReconfigLocked() { + + if b.shutdownCalled { b.logf("[v1] authReconfig: skipping because in shutdown") return } - - if blocked { + if b.blocked { b.logf("[v1] authReconfig: blocked, skipping.") return } + + cn := b.currentNode() + + nm := cn.NetMap() if nm == nil { b.logf("[v1] authReconfig: netmap not yet valid. Skipping.") return } + + prefs := b.pm.CurrentPrefs() + hasPAC := b.prevIfState.HasPAC() + disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) + dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) + // If the current node is an app connector, ensure the app connector machine is started + b.reconfigAppConnectorLocked(nm, prefs) + if !prefs.WantRunning() { b.logf("[v1] authReconfig: skipping because !WantRunning.") return @@ -5048,7 +5088,7 @@ func (b *LocalBackend) authReconfig() { } oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.NetMon.Get(), b.sys.ControlKnobs(), version.OS()) - rcfg := b.routerConfig(cfg, prefs, oneCGNATRoute) + rcfg := b.routerConfigLocked(cfg, prefs, oneCGNATRoute) err = b.e.Reconfig(cfg, rcfg, dcfg) if err == wgengine.ErrNoChanges { @@ -5056,9 +5096,9 @@ func (b *LocalBackend) authReconfig() { } b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) - b.initPeerAPIListener() + b.initPeerAPIListenerLocked() if buildfeatures.HasAppConnectors { - b.readvertiseAppConnectorRoutes() + go b.goTracker.Go(b.readvertiseAppConnectorRoutes) } } @@ -5181,12 +5221,18 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { + b.mu.Lock() + defer b.mu.Unlock() + b.initPeerAPIListenerLocked() +} + +// b.mu must be held. +func (b *LocalBackend) initPeerAPIListenerLocked() { if !buildfeatures.HasPeerAPIServer { return } b.logf("[v1] initPeerAPIListener: entered") - b.mu.Lock() - defer b.mu.Unlock() + if b.shutdownCalled { b.logf("[v1] initPeerAPIListener: shutting down") return @@ -5349,15 +5395,15 @@ func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (route } // routerConfig produces a router.Config from a wireguard config and IPN prefs. -func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneCGNATRoute bool) *router.Config { +// +// b.mu must be held. +func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView, oneCGNATRoute bool) *router.Config { singleRouteThreshold := 10_000 if oneCGNATRoute { singleRouteThreshold = 1 } - b.mu.Lock() - netfilterKind := b.capForcedNetfilter // protected by b.mu - b.mu.Unlock() + netfilterKind := b.capForcedNetfilter // protected by b.mu (hence the Locked suffix) if prefs.NetfilterKind() != "" { if netfilterKind != "" { @@ -5515,21 +5561,16 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } } -// enterState transitions the backend into newState, updating internal +// enterStateLocked transitions the backend into newState, updating internal // state and propagating events out as needed. // // TODO(danderson): while this isn't a lie, exactly, a ton of other // places twiddle IPN internal state without going through here, so // really this is more "one of several places in which random things // happen". -func (b *LocalBackend) enterState(newState ipn.State) { - unlock := b.lockAndGetUnlock() - b.enterStateLockedOnEntry(newState, unlock) -} - -// enterStateLockedOnEntry is like enterState but requires b.mu be held to call -// it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlockOnce) { +// +// b.mu must be held. +func (b *LocalBackend) enterStateLocked(newState ipn.State) { cn := b.currentNode() oldState := b.state b.setStateLocked(newState) @@ -5581,17 +5622,16 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.pauseOrResumeControlClientLocked() - unlock.UnlockEarly() - // prefs may change irrespective of state; WantRunning should be explicitly // set before potential early return even if the state is unchanged. b.health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning()) if oldState == newState { return } + b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)", oldState, newState, prefs.WantRunning(), netMap != nil) - b.send(ipn.Notify{State: &newState}) + b.sendLocked(ipn.Notify{State: &newState}) switch newState { case ipn.NeedsLogin: @@ -5599,7 +5639,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock // always block updates on NeedsLogin even if seamless renewal is enabled, // to prevent calls to authReconfig from reconfiguring the engine when our // key has expired and we're waiting to authenticate to use the new key. - b.blockEngineUpdates(true) + b.blockEngineUpdatesLocked(true) fallthrough case ipn.Stopped, ipn.NoState: // Unconfigure the engine if it has stopped (WantRunning is set to false) @@ -5613,9 +5653,9 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock feature.SystemdStatus("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: - b.authReconfig() + b.authReconfigLocked() // Needed so that UpdateEndpoints can run - b.e.RequestStatus() + b.goTracker.Go(b.e.RequestStatus) case ipn.Running: if feature.CanSystemdStatus { var addrStrs []string @@ -5724,109 +5764,23 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // that have happened. It is invoked from the various callbacks that // feed events into LocalBackend. // -// TODO(apenwarr): use a channel or something to prevent reentrancy? -// Or maybe just call the state machine from fewer places. -func (b *LocalBackend) stateMachine() { - unlock := b.lockAndGetUnlock() - b.stateMachineLockedOnEntry(unlock) -} - -// stateMachineLockedOnEntry is like stateMachine but requires b.mu be held to -// call it, but it unlocks b.mu when done (via unlock, a once func). -func (b *LocalBackend) stateMachineLockedOnEntry(unlock unlockOnce) { - b.enterStateLockedOnEntry(b.nextStateLocked(), unlock) -} - -// lockAndGetUnlock locks b.mu and returns a sync.OnceFunc function that will -// unlock it at most once. -// -// This is all very unfortunate but exists as a guardrail against the -// unfortunate "lockedOnEntry" methods in this package (primarily -// enterStateLockedOnEntry) that require b.mu held to be locked on entry to the -// function but unlock the mutex on their way out. As a stepping stone to -// cleaning things up (as of 2024-04-06), we at least pass the unlock func -// around now and defer unlock in the caller to avoid missing unlocks and double -// unlocks. TODO(bradfitz,maisem): make the locking in this package more -// traditional (simple). See https://github.com/tailscale/tailscale/issues/11649 -func (b *LocalBackend) lockAndGetUnlock() (unlock unlockOnce) { - b.mu.Lock() - var unlocked atomic.Bool - return func() bool { - if unlocked.CompareAndSwap(false, true) { - b.mu.Unlock() - return true - } - return false - } -} - -// unlockOnce is a func that unlocks only b.mu the first time it's called. -// Therefore it can be safely deferred to catch error paths, without worrying -// about double unlocks if a different point in the code later needs to explicitly -// unlock it first as well. It reports whether it was unlocked. -type unlockOnce func() bool - -// UnlockEarly unlocks the LocalBackend.mu. It panics if u returns false, -// indicating that this unlocker was already used. -// -// We're using this method to help us document & find the places that have -// atypical locking patterns. See -// https://github.com/tailscale/tailscale/issues/11649 for background. -// -// A normal unlock is a deferred one or an explicit b.mu.Unlock a few lines -// after the lock, without lots of control flow in-between. An "early" unlock is -// one that happens in weird places, like in various "LockedOnEntry" methods in -// this package that require the mutex to be locked on entry but unlock it -// somewhere in the middle (maybe several calls away) and then sometimes proceed -// to lock it again. -// -// The reason UnlockeEarly panics if already called is because these are the -// points at which it's assumed that the mutex is already held and it now needs -// to be released. If somebody already released it, that invariant was violated. -// On the other hand, simply calling u only returns false instead of panicking -// so you can defer it without care, confident you got all the error return -// paths which were previously done by hand. -func (u unlockOnce) UnlockEarly() { - if !u() { - panic("Unlock on already-called unlockOnce") - } +// requires b.mu to be held. +func (b *LocalBackend) stateMachineLocked() { + b.enterStateLocked(b.nextStateLocked()) } // stopEngineAndWait deconfigures the local network data plane, and // waits for it to deliver a status update indicating it has stopped // before returning. -func (b *LocalBackend) stopEngineAndWait() { +// +// b.mu must be held. +func (b *LocalBackend) stopEngineAndWaitLocked() { b.logf("stopEngineAndWait...") - b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - b.requestEngineStatusAndWaitForStopped() + st, _ := b.e.ResetAndStop() // TODO: what should we do if this returns an error? + b.setWgengineStatusLocked(st) b.logf("stopEngineAndWait: done.") } -// Requests the wgengine status, and does not return until a status was -// delivered (to the usual callback) that indicates the engine is stopped. -func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { - b.logf("requestEngineStatusAndWaitForStopped") - - b.mu.Lock() - defer b.mu.Unlock() - - b.goTracker.Go(b.e.RequestStatus) - b.logf("requestEngineStatusAndWaitForStopped: waiting...") - for { - b.statusChanged.Wait() // temporarily releases lock while waiting - - if !b.blocked { - b.logf("requestEngineStatusAndWaitForStopped: engine is no longer blocked, must have stopped and started again, not safe to wait.") - break - } - if b.engineStatus.NumLive == 0 && b.engineStatus.LiveDERPs == 0 { - b.logf("requestEngineStatusAndWaitForStopped: engine is stopped.") - break - } - b.logf("requestEngineStatusAndWaitForStopped: engine is still running. Waiting...") - } -} - // setControlClientLocked sets the control client to cc, // which may be nil. // @@ -5834,6 +5788,7 @@ func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) + b.ignoreControlClientUpdates.Store(cc == nil) } // resetControlClientLocked sets b.cc to nil and returns the old value. If the @@ -5927,11 +5882,11 @@ func (b *LocalBackend) ShouldHandleViaIP(ip netip.Addr) bool { // Logout logs out the current profile, if any, and waits for the logout to // complete. func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() if !b.hasNodeKeyLocked() { // Already logged out. + b.mu.Unlock() return nil } cc := b.cc @@ -5940,17 +5895,17 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { // delete it later. profile := b.pm.CurrentProfile() - _, err := b.editPrefsLockedOnEntry( + _, err := b.editPrefsLocked( actor, &ipn.MaskedPrefs{ WantRunningSet: true, LoggedOutSet: true, Prefs: ipn.Prefs{WantRunning: false, LoggedOut: true}, - }, unlock) + }) + b.mu.Unlock() if err != nil { return err } - // b.mu is now unlocked, after editPrefsLockedOnEntry. // Clear any previous dial plan(s), if set. b.resetDialPlan() @@ -5970,14 +5925,14 @@ func (b *LocalBackend) Logout(ctx context.Context, actor ipnauth.Actor) error { return err } - unlock = b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() if err := b.pm.DeleteProfile(profile.ID()); err != nil { b.logf("error deleting profile: %v", err) return err } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // setNetInfo sets b.hostinfo.NetInfo to ni, and passes ni along to the @@ -6028,12 +5983,19 @@ func (b *LocalBackend) RefreshExitNode() { if !buildfeatures.HasUseExitNode { return } - if b.resolveExitNode() { - b.authReconfig() + b.mu.Lock() + defer b.mu.Unlock() + b.refreshExitNodeLocked() +} + +// refreshExitNodeLocked is like RefreshExitNode but requires b.mu be held. +func (b *LocalBackend) refreshExitNodeLocked() { + if b.resolveExitNodeLocked() { + b.authReconfigLocked() } } -// resolveExitNode determines which exit node to use based on the current prefs +// resolveExitNodeLocked determines which exit node to use based on the current prefs // and netmap. It updates the exit node ID in the prefs if needed, updates the // exit node ID in the hostinfo if needed, sends a notification to clients, and // returns true if the exit node has changed. @@ -6041,13 +6003,11 @@ func (b *LocalBackend) RefreshExitNode() { // It is the caller's responsibility to reconfigure routes and actually // start using the selected exit node, if needed. // -// b.mu must not be held. -func (b *LocalBackend) resolveExitNode() (changed bool) { +// b.mu must be held. +func (b *LocalBackend) resolveExitNodeLocked() (changed bool) { if !buildfeatures.HasUseExitNode { return false } - b.mu.Lock() - defer b.mu.Unlock() nm := b.currentNode().NetMap() prefs := b.pm.CurrentPrefs().AsStruct() @@ -6854,8 +6814,8 @@ func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool // It will restart the backend on success. // If the profile is not known, it returns an errProfileNotFound. func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() oldControlURL := b.pm.CurrentPrefs().ControlURLOrDefault(b.polc) if _, changed, err := b.pm.SwitchToProfileByID(profile); !changed || err != nil { @@ -6867,7 +6827,7 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { b.resetDialPlan() } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // resetDialPlan resets the dialPlan for this LocalBackend. It will log if @@ -6881,12 +6841,10 @@ func (b *LocalBackend) resetDialPlan() { } } -// resetForProfileChangeLockedOnEntry resets the backend for a profile change. +// resetForProfileChangeLocked resets the backend for a profile change. // -// b.mu must held on entry. It is released on exit. -func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) error { - defer unlock() - +// b.mu must be held. +func (b *LocalBackend) resetForProfileChangeLocked() error { if b.shutdownCalled { // Prevent a call back to Start during Shutdown, which calls Logout for // ephemeral nodes, which can then call back here. But we're shutting @@ -6903,7 +6861,6 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { - // Needs to happen without b.mu held. defer prevCC.Shutdown() } // TKA errors should not prevent resetting the backend state. @@ -6917,19 +6874,19 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err b.resetAlwaysOnOverrideLocked() b.extHost.NotifyProfileChange(b.pm.CurrentProfile(), b.pm.CurrentPrefs(), false) b.setAtomicValuesFromPrefsLocked(b.pm.CurrentPrefs()) - b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu + b.enterStateLocked(ipn.NoState) b.health.SetLocalLogConfigHealth(nil) if tkaErr != nil { return tkaErr } - return b.Start(ipn.Options{}) + return b.startLocked(ipn.Options{}) } // DeleteProfile deletes a profile with the given ID. // If the profile is not known, it is a no-op. func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() needToRestart := b.pm.CurrentProfile().ID() == p if err := b.pm.DeleteProfile(p); err != nil { @@ -6941,7 +6898,7 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { if !needToRestart { return nil } - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // CurrentProfile returns the current LoginProfile. @@ -6954,8 +6911,8 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() b.pm.SwitchToNewProfile() @@ -6963,7 +6920,7 @@ func (b *LocalBackend) NewProfile() error { // set. Conservatively reset the dialPlan. b.resetDialPlan() - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } // ListProfiles returns a list of all LoginProfiles. @@ -6978,12 +6935,11 @@ func (b *LocalBackend) ListProfiles() []ipn.LoginProfileView { // backend is left with a new profile, ready for StartLoginInterative to be // called to register it as new node. func (b *LocalBackend) ResetAuth() error { - unlock := b.lockAndGetUnlock() - defer unlock() + b.mu.Lock() + defer b.mu.Unlock() - prevCC := b.resetControlClientLocked() - if prevCC != nil { - defer prevCC.Shutdown() // call must happen after release b.mu + if prevCC := b.resetControlClientLocked(); prevCC != nil { + defer prevCC.Shutdown() } if err := b.clearMachineKeyLocked(); err != nil { return err @@ -6992,7 +6948,7 @@ func (b *LocalBackend) ResetAuth() error { return err } b.resetDialPlan() // always reset if we're removing everything - return b.resetForProfileChangeLockedOnEntry(unlock) + return b.resetForProfileChangeLocked() } func (b *LocalBackend) GetPeerEndpointChanges(ctx context.Context, ip netip.Addr) ([]magicsock.EndpointChange, error) { @@ -7223,7 +7179,7 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // be selected at random, so the result is not stable. To be eligible for // consideration, the peer must have NodeAttrSuggestExitNode in its CapMap. // -// b.mu.lock() must be held. +// b.mu must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { if !buildfeatures.HasUseExitNode { return response, feature.ErrUnavailable diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bac74a33ccf80..962335046024c 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1503,15 +1503,6 @@ func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { } } -func wantStateNotify(want ipn.State) wantedNotification { - return wantedNotification{ - name: "State=" + want.String(), - cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { - return n.State != nil && *n.State == want - }, - } -} - func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface @@ -4318,9 +4309,9 @@ func (b *LocalBackend) SetPrefsForTest(newp *ipn.Prefs) { if newp == nil { panic("SetPrefsForTest got nil prefs") } - unlock := b.lockAndGetUnlock() - defer unlock() - b.setPrefsLockedOnEntry(newp, unlock) + b.mu.Lock() + defer b.mu.Unlock() + b.setPrefsLocked(newp) } type peerOptFunc func(*tailcfg.Node) @@ -5808,12 +5799,12 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client -func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { +func newLocalBackendWithTestControl(t testing.TB, enableLogging bool, newControl newTestControlFn) *LocalBackend { bus := eventbustest.NewBus(t) return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystemWithBus(bus), newControl) } -func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { +func newLocalBackendWithSysAndTestControl(t testing.TB, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { logf := logger.Discard if enableLogging { logf = tstest.WhileTestRunningLogger(t) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ca281fbece4c9..2197112b29da6 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1542,6 +1542,11 @@ func TestEngineReconfigOnStateChange(t *testing.T) { tt.steps(t, lb, cc) } + // TODO(bradfitz): this whole event bus settling thing + // should be unnecessary once the bogus uses of eventbus + // are removed. (https://github.com/tailscale/tailscale/issues/16369) + lb.settleEventBus() + if gotState := lb.State(); gotState != tt.wantState { t.Errorf("State: got %v; want %v", gotState, tt.wantState) } @@ -1572,35 +1577,30 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } -// TestStateMachineURLRace tests that wgengine updates arriving in the middle of +// TestSendPreservesAuthURL tests that wgengine updates arriving in the middle of // processing an auth URL doesn't result in the auth URL being cleared. -func TestStateMachineURLRace(t *testing.T) { - runTestStateMachineURLRace(t, false) +func TestSendPreservesAuthURL(t *testing.T) { + runTestSendPreservesAuthURL(t, false) } -func TestStateMachineURLRaceSeamless(t *testing.T) { - runTestStateMachineURLRace(t, true) +func TestSendPreservesAuthURLSeamless(t *testing.T) { + runTestSendPreservesAuthURL(t, true) } -func runTestStateMachineURLRace(t *testing.T, seamless bool) { +func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { var cc *mockControl b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { cc = newClient(t, opts) return cc }) - nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) - t.Logf("Start") - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.NeedsLogin)}) b.Start(ipn.Options{ UpdatePrefs: &ipn.Prefs{ WantRunning: true, ControlURL: "https://localhost:1/", }, }) - nw.check() t.Logf("LoginFinished") cc.persist.UserProfile.LoginName = "user1" @@ -1610,72 +1610,16 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { b.sys.ControlKnobs().SeamlessKeyRenewal.Store(true) } - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Starting)}) cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), }}) - nw.check() t.Logf("Running") - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Running)}) b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) - nw.check() t.Logf("Re-auth (StartLoginInteractive)") b.StartLoginInteractive(t.Context()) - stop := make(chan struct{}) - stopSpamming := sync.OnceFunc(func() { - stop <- struct{}{} - }) - // if seamless renewal is enabled, the engine won't be disabled, and we won't - // ever call stopSpamming, so make sure it does get called - defer stopSpamming() - - // Intercept updates between the engine and localBackend, so that we can see - // when the "stopped" update comes in and ensure we stop sending our "we're - // up" updates after that point. - b.e.SetStatusCallback(func(s *wgengine.Status, err error) { - // This is not one of our fake status updates, this is generated from the - // engine in response to LocalBackend calling RequestStatus. Stop spamming - // our fake statuses. - // - // TODO(zofrex): This is fragile, it works right now but would break if the - // calling pattern of RequestStatus changes. We should ensure that we keep - // sending "we're up" statuses right until Reconfig is called with - // zero-valued configs, and after that point only send "stopped" statuses. - stopSpamming() - - // Once stopSpamming returns we are guaranteed to not send any more updates, - // so we can now send the real update (indicating shutdown) and be certain - // it will be received after any fake updates we sent. This is possibly a - // stronger guarantee than we get from the real engine? - b.setWgengineStatus(s, err) - }) - - // time needs to be >= last time for the status to be accepted, send all our - // spam with the same stale time so that when a real update comes in it will - // definitely be accepted. - time := b.lastStatusTime - - // Flood localBackend with a lot of wgengine status updates, so if there are - // any race conditions in the multiple locks/unlocks that happen as we process - // the received auth URL, we will hit them. - go func() { - t.Logf("sending lots of fake wgengine status updates") - for { - select { - case <-stop: - t.Logf("stopping fake wgengine status updates") - return - default: - b.setWgengineStatus(&wgengine.Status{AsOf: time, DERPs: 1}, nil) - } - } - }() - t.Logf("Re-auth (receive URL)") url1 := "https://localhost:1/1" cc.send(sendOpt{url: url1}) @@ -1685,122 +1629,11 @@ func runTestStateMachineURLRace(t *testing.T, seamless bool) { // status update to trample it have ended as well. if b.authURL == "" { t.Fatalf("expected authURL to be set") + } else { + t.Log("authURL was set") } } -func TestWGEngineDownThenUpRace(t *testing.T) { - var cc *mockControl - b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { - cc = newClient(t, opts) - return cc - }) - - nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) - - t.Logf("Start") - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.NeedsLogin)}) - b.Start(ipn.Options{ - UpdatePrefs: &ipn.Prefs{ - WantRunning: true, - ControlURL: "https://localhost:1/", - }, - }) - nw.check() - - t.Logf("LoginFinished") - cc.persist.UserProfile.LoginName = "user1" - cc.persist.NodeID = "node1" - - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Starting)}) - cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }}) - nw.check() - - nw.watch(0, []wantedNotification{ - wantStateNotify(ipn.Running)}) - b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) - nw.check() - - t.Logf("Re-auth (StartLoginInteractive)") - b.StartLoginInteractive(t.Context()) - - var timeLock sync.RWMutex - timestamp := b.lastStatusTime - - engineShutdown := make(chan struct{}) - gotShutdown := sync.OnceFunc(func() { - t.Logf("engineShutdown") - engineShutdown <- struct{}{} - }) - - b.e.SetStatusCallback(func(s *wgengine.Status, err error) { - timeLock.Lock() - if s.AsOf.After(timestamp) { - timestamp = s.AsOf - } - timeLock.Unlock() - - if err != nil || (s.DERPs == 0 && len(s.Peers) == 0) { - gotShutdown() - } else { - b.setWgengineStatus(s, err) - } - }) - - t.Logf("Re-auth (receive URL)") - url1 := "https://localhost:1/1" - - done := make(chan struct{}) - var wg sync.WaitGroup - - wg.Go(func() { - t.Log("cc.send starting") - cc.send(sendOpt{url: url1}) // will block until engine stops - t.Log("cc.send returned") - }) - - <-engineShutdown // will get called once cc.send is blocked - gotShutdown = sync.OnceFunc(func() { - t.Logf("engineShutdown") - engineShutdown <- struct{}{} - }) - - wg.Go(func() { - t.Log("StartLoginInteractive starting") - b.StartLoginInteractive(t.Context()) // will also block until engine stops - t.Log("StartLoginInteractive returned") - }) - - <-engineShutdown // will get called once StartLoginInteractive is blocked - - st := controlclient.Status{} - st.SetStateForTest(controlclient.StateAuthenticated) - b.SetControlClientStatus(cc, st) - - timeLock.RLock() - b.setWgengineStatus(&wgengine.Status{AsOf: timestamp}, nil) // engine is down event finally arrives - b.setWgengineStatus(&wgengine.Status{AsOf: timestamp, DERPs: 1}, nil) // engine is back up - timeLock.RUnlock() - - go func() { - wg.Wait() - done <- struct{}{} - }() - - t.Log("waiting for .send and .StartLoginInteractive to return") - - select { - case <-done: - case <-time.After(10 * time.Second): - t.Fatalf("timed out waiting") - } - - t.Log("both returned") -} - func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( firstAutoUserID = tailcfg.UserID(10000) @@ -2033,6 +1866,14 @@ func (e *mockEngine) RequestStatus() { } } +func (e *mockEngine) ResetAndStop() (*wgengine.Status, error) { + err := e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + return nil, err + } + return &wgengine.Status{AsOf: time.Now()}, nil +} + func (e *mockEngine) PeerByKey(key.NodePublic) (_ wgint.Peer, ok bool) { return wgint.Peer{}, false } diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index 889cea2555806..dce70c542f7df 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -12,6 +12,8 @@ import ( type ExecQueue struct { mu sync.Mutex + ctx context.Context // context.Background + closed on Shutdown + cancel context.CancelFunc // closes ctx closed bool inFlight bool // whether a goroutine is running q.run doneWaiter chan struct{} // non-nil if waiter is waiting, then closed @@ -24,6 +26,7 @@ func (q *ExecQueue) Add(f func()) { if q.closed { return } + q.initCtxLocked() if q.inFlight { q.queue = append(q.queue, f) } else { @@ -79,18 +82,32 @@ func (q *ExecQueue) Shutdown() { q.mu.Lock() defer q.mu.Unlock() q.closed = true + if q.cancel != nil { + q.cancel() + } } -// Wait waits for the queue to be empty. +func (q *ExecQueue) initCtxLocked() { + if q.ctx == nil { + q.ctx, q.cancel = context.WithCancel(context.Background()) + } +} + +// Wait waits for the queue to be empty or shut down. func (q *ExecQueue) Wait(ctx context.Context) error { q.mu.Lock() + q.initCtxLocked() waitCh := q.doneWaiter if q.inFlight && waitCh == nil { waitCh = make(chan struct{}) q.doneWaiter = waitCh } + closed := q.closed q.mu.Unlock() + if closed { + return errors.New("execqueue shut down") + } if waitCh == nil { return nil } @@ -98,6 +115,8 @@ func (q *ExecQueue) Wait(ctx context.Context) error { select { case <-waitCh: return nil + case <-q.ctx.Done(): + return errors.New("execqueue shut down") case <-ctx.Done(): return ctx.Err() } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 1e70856cae10d..8ad771fc5e000 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -47,6 +47,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/backoff" "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -924,6 +925,32 @@ func hasOverlap(aips, rips views.Slice[netip.Prefix]) bool { return false } +// ResetAndStop resets the engine to a clean state (like calling Reconfig +// with all pointers to zero values) and waits for it to be fully stopped, +// with no live peers or DERPs. +// +// Unlike Reconfig, it does not return ErrNoChanges. +// +// If the engine stops, returns the status. NB that this status will not be sent +// to the registered status callback, it is on the caller to ensure this status +// is handled appropriately. +func (e *userspaceEngine) ResetAndStop() (*Status, error) { + if err := e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}); err != nil && !errors.Is(err, ErrNoChanges) { + return nil, err + } + bo := backoff.NewBackoff("UserspaceEngineResetAndStop", e.logf, 1*time.Second) + for { + st, err := e.getStatus() + if err != nil { + return nil, err + } + if len(st.Peers) == 0 && st.DERPs == 0 { + return st, nil + } + bo.BackOff(context.Background(), fmt.Errorf("waiting for engine to stop: peers=%d derps=%d", len(st.Peers), st.DERPs)) + } +} + func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { if routerCfg == nil { panic("routerCfg must not be nil") diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 0500e6f7fd4c7..9cc4ed3b594c3 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -124,6 +124,12 @@ func (e *watchdogEngine) watchdog(name string, fn func()) { func (e *watchdogEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { return e.watchdogErr("Reconfig", func() error { return e.wrap.Reconfig(cfg, routerCfg, dnsCfg) }) } +func (e *watchdogEngine) ResetAndStop() (st *Status, err error) { + e.watchdog("ResetAndStop", func() { + st, err = e.wrap.ResetAndStop() + }) + return st, err +} func (e *watchdogEngine) GetFilter() *filter.Filter { return e.wrap.GetFilter() } diff --git a/wgengine/wgengine.go b/wgengine/wgengine.go index 6aaf567ad01ee..be78731474bc9 100644 --- a/wgengine/wgengine.go +++ b/wgengine/wgengine.go @@ -69,6 +69,13 @@ type Engine interface { // The returned error is ErrNoChanges if no changes were made. Reconfig(*wgcfg.Config, *router.Config, *dns.Config) error + // ResetAndStop resets the engine to a clean state (like calling Reconfig + // with all pointers to zero values) and waits for it to be fully stopped, + // with no live peers or DERPs. + // + // Unlike Reconfig, it does not return ErrNoChanges. + ResetAndStop() (*Status, error) + // PeerForIP returns the node to which the provided IP routes, // if any. If none is found, (nil, false) is returned. PeerForIP(netip.Addr) (_ PeerForIP, ok bool) From 9e4d1fd87fc3ab6cfa1b91c7a7c3ced53348fb02 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 13 Nov 2025 20:57:48 -0800 Subject: [PATCH 0674/1093] feature/relayserver,ipn/ipnlocal,net/udprelay: plumb DERPMap (#17881) This commit replaces usage of local.Client in net/udprelay with DERPMap plumbing over the eventbus. This has been a longstanding TODO. This work was also accelerated by a memory leak in net/http when using local.Client over long periods of time. So, this commit also addresses said leak. Updates #17801 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 220 ++++++++++++----------- feature/relayserver/relayserver_test.go | 222 +++++++++++++++++++----- ipn/ipnlocal/node_backend.go | 13 +- net/udprelay/server.go | 38 ++-- 4 files changed, 324 insertions(+), 169 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index df2fb4cb7c165..2646a0cbfee6e 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -21,8 +21,10 @@ import ( "tailscale.com/ipn/ipnext" "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" + "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" @@ -68,25 +70,41 @@ func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r * // extension. It is registered with [ipnext.RegisterExtension] if the package is // imported. func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { - return &extension{ + e := &extension{ + newServerFn: func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + return udprelay.NewServer(logf, port, overrideAddrs) + }, logf: logger.WithPrefix(logf, featureName+": "), - bus: sb.Sys().Bus.Get(), - }, nil + } + e.ec = sb.Sys().Bus.Get().Client("relayserver.extension") + e.respPub = eventbus.Publish[magicsock.UDPRelayAllocResp](e.ec) + eventbus.SubscribeFunc(e.ec, e.onDERPMapView) + eventbus.SubscribeFunc(e.ec, e.onAllocReq) + return e, nil +} + +// relayServer is an interface for [udprelay.Server]. +type relayServer interface { + Close() error + AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) + GetSessions() []status.ServerSession + SetDERPMapView(tailcfg.DERPMapView) } // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { - logf logger.Logf - bus *eventbus.Bus + newServerFn func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) // swappable for tests + logf logger.Logf + ec *eventbus.Client + respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] - mu sync.Mutex // guards the following fields - shutdown bool - - port *int // ipn.Prefs.RelayServerPort, nil if disabled - eventSubs *eventbus.Monitor // nil if not connected to eventbus - debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer + mu sync.Mutex // guards the following fields + shutdown bool // true if Shutdown() has been called + rs relayServer // nil when disabled + port *int // ipn.Prefs.RelayServerPort, nil if disabled + derpMapView tailcfg.DERPMapView // latest seen over the eventbus + hasNodeAttrDisableRelayServer bool // [tailcfg.NodeAttrDisableRelayServer] } // Name implements [ipnext.Extension]. @@ -104,26 +122,83 @@ func (e *extension) Init(host ipnext.Host) error { return nil } -// handleBusLifetimeLocked handles the lifetime of consumeEventbusTopics. -func (e *extension) handleBusLifetimeLocked() { - busShouldBeRunning := !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer - if !busShouldBeRunning { - e.disconnectFromBusLocked() +func (e *extension) onDERPMapView(view tailcfg.DERPMapView) { + e.mu.Lock() + defer e.mu.Unlock() + e.derpMapView = view + if e.rs != nil { + e.rs.SetDERPMapView(view) + } +} + +func (e *extension) onAllocReq(req magicsock.UDPRelayAllocReq) { + e.mu.Lock() + defer e.mu.Unlock() + if e.shutdown { + return + } + if e.rs == nil { + if !e.relayServerShouldBeRunningLocked() { + return + } + e.tryStartRelayServerLocked() + if e.rs == nil { + return + } + } + se, err := e.rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) + if err != nil { + e.logf("error allocating endpoint: %v", err) + return + } + e.respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, + }, + }) +} + +func (e *extension) tryStartRelayServerLocked() { + rs, err := e.newServerFn(e.logf, *e.port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) return - } else if e.eventSubs != nil { - return // already running } + e.rs = rs + e.rs.SetDERPMapView(e.derpMapView) +} - ec := e.bus.Client("relayserver.extension") - e.debugSessionsCh = make(chan chan []status.ServerSession) - e.eventSubs = ptr.To(ec.Monitor(e.consumeEventbusTopics(ec, *e.port))) +func (e *extension) relayServerShouldBeRunningLocked() bool { + return !e.shutdown && e.port != nil && !e.hasNodeAttrDisableRelayServer +} + +// handleRelayServerLifetimeLocked handles the lifetime of [e.rs]. +func (e *extension) handleRelayServerLifetimeLocked() { + if !e.relayServerShouldBeRunningLocked() { + e.stopRelayServerLocked() + return + } else if e.rs != nil { + return // already running + } + e.tryStartRelayServerLocked() } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() e.hasNodeAttrDisableRelayServer = nodeView.HasCap(tailcfg.NodeAttrDisableRelayServer) - e.handleBusLifetimeLocked() + e.handleRelayServerLifetimeLocked() } func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { @@ -133,13 +208,13 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV enableOrDisableServer := ok != (e.port != nil) portChanged := ok && e.port != nil && newPort != *e.port if enableOrDisableServer || portChanged || !sameNode { - e.disconnectFromBusLocked() + e.stopRelayServerLocked() e.port = nil if ok { e.port = ptr.To(newPort) } } - e.handleBusLifetimeLocked() + e.handleRelayServerLifetimeLocked() } // overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It @@ -162,88 +237,20 @@ var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { return }) -// consumeEventbusTopics serves endpoint allocation requests over the eventbus. -// It also serves [relayServer] debug information on a channel. -// consumeEventbusTopics must never acquire [extension.mu], which can be held -// by other goroutines while waiting to receive on [extension.eventSubs] or the -// inner [extension.debugSessionsCh] channel. -func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*eventbus.Client) { - reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](ec) - respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](ec) - debugSessionsCh := e.debugSessionsCh - - return func(ec *eventbus.Client) { - rs, err := udprelay.NewServer(e.logf, port, overrideAddrs()) - if err != nil { - e.logf("error initializing server: %v", err) - } - - defer func() { - if rs != nil { - rs.Close() - } - }() - for { - select { - case <-ec.Done(): - return - case respCh := <-debugSessionsCh: - if rs == nil { - respCh <- nil - continue - } - sessions := rs.GetSessions() - respCh <- sessions - case req := <-reqSub.Events(): - if rs == nil { - // The server may have previously failed to initialize if - // the configured port was in use, try again. - rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) - if err != nil { - e.logf("error initializing server: %v", err) - continue - } - } - se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) - if err != nil { - e.logf("error allocating endpoint: %v", err) - continue - } - respPub.Publish(magicsock.UDPRelayAllocResp{ - ReqRxFromNodeKey: req.RxFromNodeKey, - ReqRxFromDiscoKey: req.RxFromDiscoKey, - Message: &disco.AllocateUDPRelayEndpointResponse{ - Generation: req.Message.Generation, - UDPRelayEndpoint: disco.UDPRelayEndpoint{ - ServerDisco: se.ServerDisco, - ClientDisco: se.ClientDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, - }, - }, - }) - } - } - } -} - -func (e *extension) disconnectFromBusLocked() { - if e.eventSubs != nil { - e.eventSubs.Close() - e.eventSubs = nil - e.debugSessionsCh = nil +func (e *extension) stopRelayServerLocked() { + if e.rs != nil { + e.rs.Close() } + e.rs = nil } // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { e.mu.Lock() defer e.mu.Unlock() - e.disconnectFromBusLocked() e.shutdown = true + e.ec.Close() + e.stopRelayServerLocked() return nil } @@ -253,23 +260,14 @@ func (e *extension) Shutdown() error { func (e *extension) serverStatus() status.ServerStatus { e.mu.Lock() defer e.mu.Unlock() - st := status.ServerStatus{ UDPPort: nil, Sessions: nil, } - if e.port == nil || e.eventSubs == nil { + if e.rs == nil { return st } st.UDPPort = ptr.To(*e.port) - - ch := make(chan []status.ServerSession) - select { - case e.debugSessionsCh <- ch: - resp := <-ch - st.Sessions = resp - return st - case <-e.eventSubs.Done(): - return st - } + st.Sessions = e.rs.GetSessions() + return st } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 65c503524c5de..2184b51759b61 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -4,13 +4,20 @@ package relayserver import ( + "errors" + "net/netip" + "reflect" "testing" "tailscale.com/ipn" + "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" + "tailscale.com/tailcfg" "tailscale.com/tsd" + "tailscale.com/tstime" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" - "tailscale.com/util/eventbus" ) func Test_extension_profileStateChanged(t *testing.T) { @@ -19,29 +26,33 @@ func Test_extension_profileStateChanged(t *testing.T) { type fields struct { port *int + rs relayServer } type args struct { prefs ipn.PrefsView sameNode bool } tests := []struct { - name string - fields fields - args args - wantPort *int - wantBusRunning bool + name string + fields fields + args args + wantPort *int + wantRelayServerFieldNonNil bool + wantRelayServerFieldMutated bool }{ { - name: "no changes non-nil port", + name: "no changes non-nil port previously running", fields: fields{ port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, }, { name: "prefs port nil", @@ -52,8 +63,23 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithNilPort.View(), sameNode: true, }, - wantPort: nil, - wantBusRunning: false, + wantPort: nil, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "prefs port nil previously running", + fields: fields{ + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithNilPort.View(), + sameNode: true, + }, + wantPort: nil, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "prefs port changed", @@ -64,8 +90,23 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "prefs port changed previously running", + fields: fields{ + port: ptr.To(2), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, { name: "sameNode false", @@ -76,8 +117,23 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "sameNode false previously running", + fields: fields{ + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: false, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, { name: "prefs port non-nil extension port nil", @@ -88,85 +144,165 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), - wantBusRunning: true, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { sys := tsd.NewSystem() - bus := sys.Bus.Get() - e := &extension{ - logf: logger.Discard, - port: tt.fields.port, - bus: bus, + ipne, err := newExtension(logger.Discard, mockSafeBackend{sys}) + if err != nil { + t.Fatal(err) + } + e := ipne.(*extension) + e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + return &mockRelayServer{}, nil } - defer e.disconnectFromBusLocked() + e.port = tt.fields.port + e.rs = tt.fields.rs + defer e.Shutdown() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantBusRunning != (e.eventSubs != nil) { - t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) + if tt.wantRelayServerFieldNonNil != (e.rs != nil) { + t.Errorf("wantRelayServerFieldNonNil: %v != (e.rs != nil): %v", tt.wantRelayServerFieldNonNil, e.rs != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) } else if tt.wantPort != nil && *tt.wantPort != *e.port { t.Errorf("wantPort: %d != *e.port: %d", *tt.wantPort, *e.port) } + if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.fields.rs, e.rs) { + t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.fields.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.fields.rs, e.rs)) + } }) } } -func Test_extension_handleBusLifetimeLocked(t *testing.T) { +func mockRelayServerNotZeroVal() *mockRelayServer { + return &mockRelayServer{true} +} + +type mockRelayServer struct { + set bool +} + +func (mockRelayServer) Close() error { return nil } +func (mockRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { + return endpoint.ServerEndpoint{}, errors.New("not implemented") +} +func (mockRelayServer) GetSessions() []status.ServerSession { return nil } +func (mockRelayServer) SetDERPMapView(tailcfg.DERPMapView) { return } + +type mockSafeBackend struct { + sys *tsd.System +} + +func (m mockSafeBackend) Sys() *tsd.System { return m.sys } +func (mockSafeBackend) Clock() tstime.Clock { return nil } +func (mockSafeBackend) TailscaleVarRoot() string { return "" } + +func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { tests := []struct { name string shutdown bool port *int - eventSubs *eventbus.Monitor + rs relayServer hasNodeAttrDisableRelayServer bool - wantBusRunning bool + wantRelayServerFieldNonNil bool + wantRelayServerFieldMutated bool }{ { name: "want running", shutdown: false, port: ptr.To(1), hasNodeAttrDisableRelayServer: false, - wantBusRunning: true, + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + }, + { + name: "want running previously running", + shutdown: false, + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, }, { name: "shutdown true", shutdown: true, port: ptr.To(1), hasNodeAttrDisableRelayServer: false, - wantBusRunning: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "shutdown true previously running", + shutdown: true, + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "port nil", shutdown: false, port: nil, hasNodeAttrDisableRelayServer: false, - wantBusRunning: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "port nil previously running", + shutdown: false, + port: nil, + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, { name: "hasNodeAttrDisableRelayServer true", shutdown: false, port: nil, hasNodeAttrDisableRelayServer: true, - wantBusRunning: false, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: false, + }, + { + name: "hasNodeAttrDisableRelayServer true previously running", + shutdown: false, + port: nil, + rs: mockRelayServerNotZeroVal(), + hasNodeAttrDisableRelayServer: true, + wantRelayServerFieldNonNil: false, + wantRelayServerFieldMutated: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - e := &extension{ - logf: logger.Discard, - bus: eventbus.New(), - shutdown: tt.shutdown, - port: tt.port, - eventSubs: tt.eventSubs, - hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, + sys := tsd.NewSystem() + ipne, err := newExtension(logger.Discard, mockSafeBackend{sys}) + if err != nil { + t.Fatal(err) + } + e := ipne.(*extension) + e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + return &mockRelayServer{}, nil + } + e.shutdown = tt.shutdown + e.port = tt.port + e.rs = tt.rs + e.hasNodeAttrDisableRelayServer = tt.hasNodeAttrDisableRelayServer + e.handleRelayServerLifetimeLocked() + defer e.Shutdown() + if tt.wantRelayServerFieldNonNil != (e.rs != nil) { + t.Errorf("wantRelayServerFieldNonNil: %v != (e.rs != nil): %v", tt.wantRelayServerFieldNonNil, e.rs != nil) } - e.handleBusLifetimeLocked() - defer e.disconnectFromBusLocked() - if tt.wantBusRunning != (e.eventSubs != nil) { - t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) + if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.rs, e.rs) { + t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.rs, e.rs)) } }) } diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 3408d4cbb325d..dbe23e4d5245a 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -75,10 +75,11 @@ type nodeBackend struct { filterAtomic atomic.Pointer[filter.Filter] // initialized once and immutable - eventClient *eventbus.Client - filterPub *eventbus.Publisher[magicsock.FilterUpdate] - nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] - nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] + eventClient *eventbus.Client + filterPub *eventbus.Publisher[magicsock.FilterUpdate] + nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] + nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] + derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView] // TODO(nickkhyl): maybe use sync.RWMutex? mu sync.Mutex // protects the following fields @@ -121,6 +122,7 @@ func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *n nb.filterPub = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) nb.nodeViewsPub = eventbus.Publish[magicsock.NodeViewsUpdate](nb.eventClient) nb.nodeMutsPub = eventbus.Publish[magicsock.NodeMutationsUpdate](nb.eventClient) + nb.derpMapViewPub = eventbus.Publish[tailcfg.DERPMapView](nb.eventClient) nb.filterPub.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) return nb } @@ -435,6 +437,9 @@ func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { if nm != nil { nv.SelfNode = nm.SelfNode nv.Peers = nm.Peers + nb.derpMapViewPub.Publish(nm.DERPMap.View()) + } else { + nb.derpMapViewPub.Publish(tailcfg.DERPMapView{}) } nb.nodeViewsPub.Publish(nv) } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 69e0de095f5fe..c050c94166e2e 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -21,7 +21,6 @@ import ( "go4.org/mem" "golang.org/x/net/ipv6" - "tailscale.com/client/local" "tailscale.com/disco" "tailscale.com/net/batching" "tailscale.com/net/netaddr" @@ -32,6 +31,7 @@ import ( "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" + "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -72,7 +72,8 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields + mu sync.Mutex // guards the following fields + derpMap *tailcfg.DERPMap addrDiscoveryOnce bool // addrDiscovery completed once (successfully or unsuccessfully) addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints closed bool @@ -374,15 +375,12 @@ func (s *Server) addrDiscoveryLoop() { } } - // fetch DERPMap to feed to netcheck - derpMapCtx, derpMapCancel := context.WithTimeout(context.Background(), time.Second) - defer derpMapCancel() - localClient := &local.Client{} - // TODO(jwhited): We are in-process so use eventbus or similar. - // local.Client gets us going. - dm, err := localClient.CurrentDERPMap(derpMapCtx) - if err != nil { - return nil, err + dm := s.getDERPMap() + if dm == nil { + // We don't have a DERPMap which is required to dynamically + // discover external addresses, but we can return the endpoints we + // do have. + return addrPorts.Slice(), nil } // get addrPorts as visible from DERP @@ -864,3 +862,21 @@ func (s *Server) GetSessions() []status.ServerSession { } return sessions } + +// SetDERPMapView sets the [tailcfg.DERPMapView] to use for future netcheck +// reports. +func (s *Server) SetDERPMapView(view tailcfg.DERPMapView) { + s.mu.Lock() + defer s.mu.Unlock() + if !view.Valid() { + s.derpMap = nil + return + } + s.derpMap = view.AsStruct() +} + +func (s *Server) getDERPMap() *tailcfg.DERPMap { + s.mu.Lock() + defer s.mu.Unlock() + return s.derpMap +} From b5cd29932ef1836ae40b6a6f2688212f1227922d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 7 Nov 2025 12:04:18 -0500 Subject: [PATCH 0675/1093] tka: add a test for unmarshaling existing AUMs Updates https://github.com/tailscale/tailscale/issues/17613 Change-Id: I693a580949eef59263353af6e7e03a7af9bbaa0b Signed-off-by: Alex Chan --- tka/aum_test.go | 76 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/tka/aum_test.go b/tka/aum_test.go index 4297efabff13f..833a026544f54 100644 --- a/tka/aum_test.go +++ b/tka/aum_test.go @@ -5,6 +5,8 @@ package tka import ( "bytes" + "encoding/base64" + "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -156,6 +158,80 @@ func TestSerialization(t *testing.T) { } } +func fromBase64(s string) []byte { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + panic(fmt.Sprintf("base64 decode failed: %v", err)) + } + return data +} + +// This test verifies that we can read AUMs which were serialized with +// older versions of our code. +func TestDeserializeExistingAUMs(t *testing.T) { + for _, tt := range []struct { + Name string + Data []byte + Want AUM + }{ + { + // This is an AUM which was created in a test tailnet, and encoded + // on 2025-11-07 with commit d4c5b27. + Name: "genesis-aum-2025-11-07", + Data: fromBase64("pAEFAvYFpQH2AopYII0sLaLSEZU3W5DT1dG2WYnzjCBr4tXtVbCT2LvA9LS6WCAQhwVGDiUGRiu3P63gucZ/8otjt2DXyk+OBjbh5iWx1Fgg5VU4oRQiMoq5qK00McfpwtmjcheVammLCRwzdp2Zje9YIHDoOXe4ogPSy7lfA/veyPCKM6iZe3PTgzhQZ4W5Sh7wWCBYQtiQ6NcRlyVARJxgAj1BbbvdJQ0t4m+vHqU1J02oDlgg2sksJA+COfsBkrohwHBWlbKrpS8Mvigpl+enuHw9rIJYIB/+CUBBBLUz0KeHu7NKrg5ZEhjjPUWhNcf9QTNHjuNWWCCJuxqPZ6/IASPTmAERaoKnBNH/D+zY4p4TUGHR4fACjFggMtDAipPutgcxKnU9Tg2663gP3KlTQfztV3hBwiePZdRYIGYeD2erBkRouSL20lOnWHHlRq5kmNfN6xFb2CTaPjnXA4KjAQECAQNYIADftG3yaitV/YMoKSBP45zgyeodClumN9ZaeQg/DmCEowEBAgEDWCBRKbmWSzOyHXbHJuYn8s7dmMPDzxmIjgBoA80cBYgItAQbEWOrxfqJzIkFG/5uNUp0s/ScF4GiAVggAN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQCWEAENvzblKV2qx6PED5YdGy8kWa7nxEnaeuMmS5Wkx0n7CXs0XxD5f2NIE+pSv9cOsNkfYNndQkYD7ne33hQOsQM"), + Want: AUM{ + MessageKind: AUMCheckpoint, + State: &State{ + DisablementSecrets: [][]byte{ + fromBase64("jSwtotIRlTdbkNPV0bZZifOMIGvi1e1VsJPYu8D0tLo="), + fromBase64("EIcFRg4lBkYrtz+t4LnGf/KLY7dg18pPjgY24eYlsdQ="), + fromBase64("5VU4oRQiMoq5qK00McfpwtmjcheVammLCRwzdp2Zje8="), + fromBase64("cOg5d7iiA9LLuV8D+97I8IozqJl7c9ODOFBnhblKHvA="), + fromBase64("WELYkOjXEZclQEScYAI9QW273SUNLeJvrx6lNSdNqA4="), + fromBase64("2sksJA+COfsBkrohwHBWlbKrpS8Mvigpl+enuHw9rII="), + fromBase64("H/4JQEEEtTPQp4e7s0quDlkSGOM9RaE1x/1BM0eO41Y="), + fromBase64("ibsaj2evyAEj05gBEWqCpwTR/w/s2OKeE1Bh0eHwAow="), + fromBase64("MtDAipPutgcxKnU9Tg2663gP3KlTQfztV3hBwiePZdQ="), + fromBase64("Zh4PZ6sGRGi5IvbSU6dYceVGrmSY183rEVvYJNo+Odc="), + }, + Keys: []Key{ + { + Kind: Key25519, + Votes: 1, + Public: fromBase64("AN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQ="), + }, + { + Kind: Key25519, + Votes: 1, + Public: fromBase64("USm5lkszsh12xybmJ/LO3ZjDw88ZiI4AaAPNHAWICLQ="), + }, + }, + StateID1: 1253033988139371657, + StateID2: 18333649726973670556, + }, + Signatures: []tkatype.Signature{ + { + KeyID: fromBase64("AN+0bfJqK1X9gygpIE/jnODJ6h0KW6Y31lp5CD8OYIQ="), + Signature: fromBase64("BDb825SldqsejxA+WHRsvJFmu58RJ2nrjJkuVpMdJ+wl7NF8Q+X9jSBPqUr/XDrDZH2DZ3UJGA+53t94UDrEDA=="), + }, + }, + }, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + var got AUM + + if err := got.Unserialize(tt.Data); err != nil { + t.Fatalf("Unserialize: %v", err) + } + + if diff := cmp.Diff(got, tt.Want); diff != "" { + t.Fatalf("wrong AUM (-got, +want):\n%s", diff) + } + }) + } +} + func TestAUMWeight(t *testing.T) { var fakeKeyID [blake2s.Size]byte testingRand(t, 1).Read(fakeKeyID[:]) From 124301fbb651382959f8bfe9b1f1765e42e8a3ef Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Fri, 14 Nov 2025 13:21:56 +0000 Subject: [PATCH 0676/1093] ipn/ipnlocal: log prefs changes and reason in Start (#17876) Updates tailscale/corp#34238 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 076752469abef..f0a77531bdd3b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2397,6 +2397,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { func (b *LocalBackend) startLocked(opts ipn.Options) error { b.logf("Start") + logf := logger.WithPrefix(b.logf, "Start: ") b.startOnce.Do(b.initOnce) var clientToShutdown controlclient.Client @@ -2426,7 +2427,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { sysak, _ := b.polc.GetString(pkey.AuthKey, "") if sysak != "" { - b.logf("Start: setting opts.AuthKey by syspolicy, len=%v", len(sysak)) + logf("setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) } } @@ -2459,11 +2460,13 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { cn := b.currentNode() - prefsChanged := false + var prefsChanged bool + var prefsChangedWhy []string newPrefs := b.pm.CurrentPrefs().AsStruct() if opts.UpdatePrefs != nil { newPrefs = opts.UpdatePrefs.Clone() prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "opts.UpdatePrefs") } // Apply any syspolicy overrides, resolve exit node ID, etc. // As of 2025-07-03, this is primarily needed in two cases: @@ -2471,6 +2474,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { // - when Always Mode is enabled and we need to set WantRunning to true if b.reconcilePrefsLocked(newPrefs) { prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "reconcilePrefsLocked") } // neither UpdatePrefs or reconciliation should change Persist @@ -2478,19 +2482,21 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { if buildfeatures.HasTPM { if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { - newKey, err := genKey(newPrefs.Persist, b.logf) + newKey, err := genKey(newPrefs.Persist, logf) if err != nil { - b.logf("failed to populate attestation key from TPM: %v", err) + logf("failed to populate attestation key from TPM: %v", err) } if newKey { prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "newKey") } } } if prefsChanged { + logf("updated prefs: %v, reason: %v", newPrefs.Pretty(), prefsChangedWhy) if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { - b.logf("failed to save updated and reconciled prefs: %v", err) + logf("failed to save updated and reconciled prefs (but still using updated prefs in memory): %v", err) } } prefs := newPrefs.View() @@ -2510,7 +2516,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { serverURL := prefs.ControlURLOrDefault(b.polc) if inServerMode := prefs.ForceDaemon(); inServerMode || runtime.GOOS == "windows" { - b.logf("Start: serverMode=%v", inServerMode) + logf("serverMode=%v", inServerMode) } b.applyPrefsToHostinfoLocked(hostinfo, prefs) @@ -2578,7 +2584,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { endpoints := b.endpoints if err := b.initTKALocked(); err != nil { - b.logf("initTKALocked: %v", err) + logf("initTKALocked: %v", err) } var tkaHead string if b.tka != nil { From 0285e1d5fb2b06cd4003ab3a7c1037caa091a85e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 14 Nov 2025 10:22:58 -0800 Subject: [PATCH 0677/1093] feature/relayserver: fix Shutdown() deadlock (#17898) Updates #17894 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 2646a0cbfee6e..868d5f61a2fa7 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -246,10 +246,13 @@ func (e *extension) stopRelayServerLocked() { // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { + // [extension.mu] must not be held when closing the [eventbus.Client]. Close + // blocks until all [eventbus.SubscribeFunc]'s have returned, and the ones + // used in this package also acquire [extension.mu]. See #17894. + e.ec.Close() e.mu.Lock() defer e.mu.Unlock() e.shutdown = true - e.ec.Close() e.stopRelayServerLocked() return nil } From 052602752f57dd2dc273f65811a0946a6c575bda Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 14 Nov 2025 08:39:32 -0800 Subject: [PATCH 0678/1093] control/controlclient: make Observer optional As a baby step towards eventbus-ifying controlclient, make the Observer optional. This also means callers that don't care (like this network lock test, and some tests in other repos) can omit it, rather than passing in a no-op one. Updates #12639 Change-Id: Ibd776b45b4425c08db19405bc3172b238e87da4e Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 10 ++++++---- control/controlclient/direct.go | 4 ++++ ipn/ipnlocal/network-lock_test.go | 7 ------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 40b02a598f865..50248a647bec2 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -117,7 +117,7 @@ type Auto struct { logf logger.Logf closed bool updateCh chan struct{} // readable when we should inform the server of a change - observer Observer // called to update Client status; always non-nil + observer Observer // if non-nil, called to update Client status observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil @@ -170,9 +170,6 @@ func NewNoStart(opts Options) (_ *Auto, err error) { } }() - if opts.Observer == nil { - return nil, errors.New("missing required Options.Observer") - } if opts.Logf == nil { opts.Logf = func(fmt string, args ...any) {} } @@ -609,6 +606,11 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM Err: err, state: state, } + + if c.observer == nil { + return + } + c.lastStatus.Store(newSt) // Launch a new goroutine to avoid blocking the caller while the observer diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 63a12b2495fd8..d30db61918ef0 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -115,6 +115,9 @@ type Direct struct { // Observer is implemented by users of the control client (such as LocalBackend) // to get notified of changes in the control client's status. +// +// If an implementation of Observer also implements [NetmapDeltaUpdater], they get +// delta updates as well as full netmap updates. type Observer interface { // SetControlClientStatus is called when the client has a new status to // report. The Client is provided to allow the Observer to track which @@ -145,6 +148,7 @@ type Options struct { // Observer is called when there's a change in status to report // from the control client. + // If nil, no status updates are reported. Observer Observer // SkipIPForwardingCheck declares that the host's IP diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index c7c4c905f5ca1..17040fef3e187 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -41,12 +41,6 @@ import ( "tailscale.com/util/set" ) -type observerFunc func(controlclient.Status) - -func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlclient.Status) { - f(s) -} - func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} @@ -64,7 +58,6 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even }, HTTPTestClient: c, NoiseTestClient: c, - Observer: observerFunc(func(controlclient.Status) {}), Dialer: dialer, Bus: bus, } From 208a32af5b6c306afb797edd793677f3a6db3306 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 14 Nov 2025 11:59:09 -0500 Subject: [PATCH 0679/1093] logpolicy: fix nil pointer dereference with invalid TS_LOG_TARGET When TS_LOG_TARGET is set to an invalid URL, url.Parse returns an error and nil pointer, which caused a panic when accessing u.Host. Now we check the error from url.Parse and log a helpful message while falling back to the default log host. Fixes #17792 Signed-off-by: Andrew Dunham --- logpolicy/logpolicy.go | 14 ++++++++---- logpolicy/logpolicy_test.go | 44 +++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 9c7e62ab0da11..26858b7132ef6 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -640,10 +640,16 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { logHost := logtail.DefaultHost if val := getLogTarget(); val != "" { - opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") - conf.BaseURL = val - u, _ := url.Parse(val) - logHost = u.Host + u, err := url.Parse(val) + if err != nil { + opts.Logf("logpolicy: invalid TS_LOG_TARGET %q: %v; using default log host", val, err) + } else if u.Host == "" { + opts.Logf("logpolicy: invalid TS_LOG_TARGET %q: missing host; using default log host", val) + } else { + opts.Logf("You have enabled a non-default log target. Doing without being told to by Tailscale staff or your network administrator will make getting support difficult.") + conf.BaseURL = val + logHost = u.Host + } } if conf.HTTPC == nil { diff --git a/logpolicy/logpolicy_test.go b/logpolicy/logpolicy_test.go index 28f03448a225d..c09e590bb8399 100644 --- a/logpolicy/logpolicy_test.go +++ b/logpolicy/logpolicy_test.go @@ -84,3 +84,47 @@ func TestOptions(t *testing.T) { }) } } + +// TestInvalidLogTarget is a test for #17792 +func TestInvalidLogTarget(t *testing.T) { + defer resetLogTarget() + + tests := []struct { + name string + logTarget string + }{ + { + name: "invalid_url_no_scheme", + logTarget: "not a url at all", + }, + { + name: "malformed_url", + logTarget: "ht!tp://invalid", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetLogTarget() + os.Setenv("TS_LOG_TARGET", tt.logTarget) + + opts := Options{ + Collection: "test.log.tailscale.io", + Logf: t.Logf, + } + + // This should not panic even with invalid log target + config, policy := opts.init(false) + if policy == nil { + t.Fatal("expected non-nil policy") + } + defer policy.Close() + + // When log target is invalid, it should fall back to the invalid value + // but not crash. BaseURL should remain empty + if config.BaseURL != "" { + t.Errorf("got BaseURL=%q, want empty", config.BaseURL) + } + }) + } +} From ce10f7c14cdfc9bdc1c1b26efd7f79d669968a32 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 14 Nov 2025 10:58:53 -0800 Subject: [PATCH 0680/1093] wgengine/wgcfg/nmcfg: reduce wireguard reconfig log spam On the corp tailnet (using Mullvad exit nodes + bunch of expired devices + subnet routers), these were generating big ~35 KB blobs of logging regularly. This logging shouldn't even exist at this level, and should be rate limited at a higher level, but for now as a bandaid, make it less spammy. Updates #cleanup Change-Id: I0b5e9e6e859f13df5f982cd71cd5af85b73f0c0a Signed-off-by: Brad Fitzpatrick --- wgengine/wgcfg/nmcfg/nmcfg.go | 75 +++++++++++++++-------------------- 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 08b162730804f..28d5345d6108c 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -5,7 +5,8 @@ package nmcfg import ( - "bytes" + "bufio" + "cmp" "fmt" "net/netip" "strings" @@ -18,16 +19,7 @@ import ( ) func nodeDebugName(n tailcfg.NodeView) string { - name := n.Name() - if name == "" { - name = n.Hostinfo().Hostname() - } - if i := strings.Index(name, "."); i != -1 { - name = name[:i] - } - if name == "" && n.Addresses().Len() != 0 { - return n.Addresses().At(0).String() - } + name, _, _ := strings.Cut(cmp.Or(n.Name(), n.Hostinfo().Hostname()), ".") return name } @@ -77,10 +69,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, } } - // Logging buffers - skippedUnselected := new(bytes.Buffer) - skippedSubnets := new(bytes.Buffer) - skippedExpired := new(bytes.Buffer) + var skippedExitNode, skippedSubnetRouter, skippedExpired []tailcfg.NodeView for _, peer := range nm.Peers { if peer.DiscoKey().IsZero() && peer.HomeDERP() == 0 && !peer.IsWireGuardOnly() { @@ -93,16 +82,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, // anyway, since control intentionally breaks node keys for // expired peers so that we can't discover endpoints via DERP. if peer.Expired() { - if skippedExpired.Len() >= 1<<10 { - if !bytes.HasSuffix(skippedExpired.Bytes(), []byte("...")) { - skippedExpired.WriteString("...") - } - } else { - if skippedExpired.Len() > 0 { - skippedExpired.WriteString(", ") - } - fmt.Fprintf(skippedExpired, "%s/%v", peer.StableID(), peer.Key().ShortString()) - } + skippedExpired = append(skippedExpired, peer) continue } @@ -112,28 +92,22 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, }) cpeer := &cfg.Peers[len(cfg.Peers)-1] - didExitNodeWarn := false + didExitNodeLog := false cpeer.V4MasqAddr = peer.SelfNodeV4MasqAddrForThisPeer().Clone() cpeer.V6MasqAddr = peer.SelfNodeV6MasqAddrForThisPeer().Clone() cpeer.IsJailed = peer.IsJailed() for _, allowedIP := range peer.AllowedIPs().All() { if allowedIP.Bits() == 0 && peer.StableID() != exitNode { - if didExitNodeWarn { + if didExitNodeLog { // Don't log about both the IPv4 /0 and IPv6 /0. continue } - didExitNodeWarn = true - if skippedUnselected.Len() > 0 { - skippedUnselected.WriteString(", ") - } - fmt.Fprintf(skippedUnselected, "%q (%v)", nodeDebugName(peer), peer.Key().ShortString()) + didExitNodeLog = true + skippedExitNode = append(skippedExitNode, peer) continue } else if cidrIsSubnet(peer, allowedIP) { if (flags & netmap.AllowSubnetRoutes) == 0 { - if skippedSubnets.Len() > 0 { - skippedSubnets.WriteString(", ") - } - fmt.Fprintf(skippedSubnets, "%v from %q (%v)", allowedIP, nodeDebugName(peer), peer.Key().ShortString()) + skippedSubnetRouter = append(skippedSubnetRouter, peer) continue } } @@ -141,14 +115,27 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, } } - if skippedUnselected.Len() > 0 { - logf("[v1] wgcfg: skipped unselected default routes from: %s", skippedUnselected.Bytes()) - } - if skippedSubnets.Len() > 0 { - logf("[v1] wgcfg: did not accept subnet routes: %s", skippedSubnets) - } - if skippedExpired.Len() > 0 { - logf("[v1] wgcfg: skipped expired peer: %s", skippedExpired) + logList := func(title string, nodes []tailcfg.NodeView) { + if len(nodes) == 0 { + return + } + logf("[v1] wgcfg: %s from %d nodes: %s", title, len(nodes), logger.ArgWriter(func(bw *bufio.Writer) { + const max = 5 + for i, n := range nodes { + if i == max { + fmt.Fprintf(bw, "... +%d", len(nodes)-max) + return + } + if i > 0 { + bw.WriteString(", ") + } + fmt.Fprintf(bw, "%s (%s)", nodeDebugName(n), n.StableID()) + } + })) } + logList("skipped unselected exit nodes", skippedExitNode) + logList("did not accept subnet routes", skippedSubnetRouter) + logList("skipped expired peers", skippedExpired) + return cfg, nil } From ab4b990d51c41aff8e1ae7a08435dedfe621ce0d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 14 Nov 2025 11:57:41 -0800 Subject: [PATCH 0681/1093] net/netmon: do not abandon a subscriber when exiting early (#17899) LinkChangeLogLimiter keeps a subscription to track rate limits for log messages. But when its context ended, it would exit the subscription loop, leaving the subscriber still alive. Ensure the subscriber gets cleaned up when the context ends, so we don't stall event processing. Updates tailscale/corp#34311 Change-Id: I82749e482e9a00dfc47f04afbc69dd0237537cb2 Signed-off-by: M. J. Fromberger --- net/netmon/loghelper.go | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 2e28e8cda7895..675762cd10b18 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -18,13 +18,13 @@ import ( // done. func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - nm.b.Monitor(nm.changeDeltaWatcher(nm.b, ctx, func(cd ChangeDelta) { + sub := eventbus.SubscribeFunc(nm.b, func(cd ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } - })) - + }) + context.AfterFunc(ctx, sub.Close) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it // means we've already logged this format string. @@ -42,19 +42,3 @@ func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) lo logf(format, args...) } } - -func (nm *Monitor) changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, fn func(ChangeDelta)) func(*eventbus.Client) { - sub := eventbus.Subscribe[ChangeDelta](ec) - return func(ec *eventbus.Client) { - for { - select { - case <-ctx.Done(): - return - case <-sub.Done(): - return - case change := <-sub.Events(): - fn(change) - } - } - } -} From bd36817e842d7d9651fc7d6b2c6781d0eb0b56e6 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 14 Nov 2025 12:05:48 -0800 Subject: [PATCH 0682/1093] scripts/installer.sh: compare major versions numerically (#17904) Most /etc/os-release files set the VERSION_ID to a `MAJOR.MINOR` string, but we were trying to compare this numerically against a major version number. I can only assume that Linux Mint used switched from a plain integer, since shells only do integer comparisons. This patch extracts a VERSION_MAJOR from the VERSION_ID using parameter expansion and unifies all the other ad-hoc comparisons to use it. Fixes #15841 Signed-off-by: Simon Law Co-authored-by: Xavier --- scripts/installer.sh | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index b40177005821b..e5b6cd23bc9a7 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -42,6 +42,8 @@ main() { # - VERSION_CODENAME: the codename of the OS release, if any (e.g. "buster") # - UBUNTU_CODENAME: if it exists, use instead of VERSION_CODENAME . /etc/os-release + VERSION_MAJOR="${VERSION_ID:-}" + VERSION_MAJOR="${VERSION_MAJOR%%.*}" case "$ID" in ubuntu|pop|neon|zorin|tuxedo) OS="ubuntu" @@ -53,10 +55,10 @@ main() { PACKAGETYPE="apt" # Third-party keyrings became the preferred method of # installation in Ubuntu 20.04. - if expr "$VERSION_ID" : "2.*" >/dev/null; then - APT_KEY_TYPE="keyring" - else + if [ "$VERSION_MAJOR" -lt 20 ]; then APT_KEY_TYPE="legacy" + else + APT_KEY_TYPE="keyring" fi ;; debian) @@ -76,7 +78,7 @@ main() { # They don't specify the Debian version they're based off in os-release # but Parrot 6 is based on Debian 12 Bookworm. VERSION=bookworm - elif [ "$VERSION_ID" -lt 11 ]; then + elif [ "$VERSION_MAJOR" -lt 11 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -94,7 +96,7 @@ main() { VERSION="$VERSION_CODENAME" fi PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -104,7 +106,7 @@ main() { OS="ubuntu" VERSION="$UBUNTU_CODENAME" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 6 ]; then + if [ "$VERSION_MAJOR" -lt 6 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -113,7 +115,7 @@ main() { industrial-os) OS="debian" PACKAGETYPE="apt" - if [ "$(printf %.1s "$VERSION_ID")" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then VERSION="buster" APT_KEY_TYPE="legacy" else @@ -124,7 +126,7 @@ main() { parrot|mendel) OS="debian" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 5 ]; then + if [ "$VERSION_MAJOR" -lt 5 ]; then VERSION="buster" APT_KEY_TYPE="legacy" else @@ -150,7 +152,7 @@ main() { PACKAGETYPE="apt" # Third-party keyrings became the preferred method of # installation in Raspbian 11 (Bullseye). - if [ "$VERSION_ID" -lt 11 ]; then + if [ "$VERSION_MAJOR" -lt 11 ]; then APT_KEY_TYPE="legacy" else APT_KEY_TYPE="keyring" @@ -159,12 +161,11 @@ main() { kali) OS="debian" PACKAGETYPE="apt" - YEAR="$(echo "$VERSION_ID" | cut -f1 -d.)" APT_SYSTEMCTL_START=true # Third-party keyrings became the preferred method of # installation in Debian 11 (Bullseye), which Kali switched # to in roughly 2021.x releases - if [ "$YEAR" -lt 2021 ]; then + if [ "$VERSION_MAJOR" -lt 2021 ]; then # Kali VERSION_ID is "kali-rolling", which isn't distinguishing VERSION="buster" APT_KEY_TYPE="legacy" @@ -176,7 +177,7 @@ main() { Deepin|deepin) # https://github.com/tailscale/tailscale/issues/7862 OS="debian" PACKAGETYPE="apt" - if [ "$VERSION_ID" -lt 20 ]; then + if [ "$VERSION_MAJOR" -lt 20 ]; then APT_KEY_TYPE="legacy" VERSION="buster" else @@ -189,7 +190,7 @@ main() { # All versions of PikaOS are new enough to prefer keyring APT_KEY_TYPE="keyring" # Older versions of PikaOS are based on Ubuntu rather than Debian - if [ "$VERSION_ID" -lt 4 ]; then + if [ "$VERSION_MAJOR" -lt 4 ]; then OS="ubuntu" VERSION="$UBUNTU_CODENAME" else @@ -205,7 +206,7 @@ main() { ;; centos) OS="$ID" - VERSION="$VERSION_ID" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -213,7 +214,7 @@ main() { ;; ol) OS="oracle" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -224,7 +225,7 @@ main() { if [ "$ID" = "miraclelinux" ]; then OS="rhel" fi - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="dnf" if [ "$VERSION" = "7" ]; then PACKAGETYPE="yum" @@ -247,7 +248,7 @@ main() { ;; xenenterprise) OS="centos" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="yum" ;; opensuse-leap|sles) @@ -311,7 +312,7 @@ main() { ;; freebsd) OS="$ID" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="pkg" ;; osmc) @@ -322,7 +323,7 @@ main() { ;; photon) OS="photon" - VERSION="$(echo "$VERSION_ID" | cut -f1 -d.)" + VERSION="$VERSION_MAJOR" PACKAGETYPE="tdnf" ;; From 91344400082af271904e08e8f81654979d3d9c10 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 14 Nov 2025 17:33:55 +0000 Subject: [PATCH 0683/1093] various: adds missing apostrophes to comments Updates #cleanup Change-Id: I7bf29cc153c3c04e087f9bdb146c3437bed0129a Signed-off-by: Alex Chan --- cmd/sniproxy/sniproxy_test.go | 8 ++++---- ipn/ipnlocal/network-lock_test.go | 2 +- tka/aum.go | 2 +- tka/builder.go | 2 +- tka/key_test.go | 4 ++-- tka/scenario_test.go | 2 +- tka/sig.go | 2 +- tka/sig_test.go | 4 ++-- tka/state.go | 2 +- tka/sync.go | 6 +++--- tka/sync_test.go | 2 +- tka/tka.go | 16 ++++++++-------- tka/verify.go | 2 +- 13 files changed, 27 insertions(+), 27 deletions(-) diff --git a/cmd/sniproxy/sniproxy_test.go b/cmd/sniproxy/sniproxy_test.go index cd2e070bd336f..07fbd2eceb839 100644 --- a/cmd/sniproxy/sniproxy_test.go +++ b/cmd/sniproxy/sniproxy_test.go @@ -152,7 +152,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { configCapKey: []tailcfg.RawMessage{tailcfg.RawMessage(b)}, }) - // Lets spin up a second node (to represent the client). + // Let's spin up a second node (to represent the client). client, _, _ := startNode(t, ctx, controlURL, "client") // Make sure that the sni node has received its config. @@ -176,7 +176,7 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { t.Error("sni node never received its configuration from the coordination server!") } - // Lets make the client open a connection to the sniproxy node, and + // Let's make the client open a connection to the sniproxy node, and // make sure it results in a connection to our test listener. w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) if err != nil { @@ -208,10 +208,10 @@ func TestSNIProxyWithFlagConfig(t *testing.T) { sni, _, ip := startNode(t, ctx, controlURL, "snitest") go run(ctx, sni, 0, sni.Hostname, false, 0, "", fmt.Sprintf("tcp/%d/localhost", ln.Addr().(*net.TCPAddr).Port)) - // Lets spin up a second node (to represent the client). + // Let's spin up a second node (to represent the client). client, _, _ := startNode(t, ctx, controlURL, "client") - // Lets make the client open a connection to the sniproxy node, and + // Let's make the client open a connection to the sniproxy node, and // make sure it results in a connection to our test listener. w, err := client.Dial(ctx, "tcp", fmt.Sprintf("%s:%d", ip, ln.Addr().(*net.TCPAddr).Port)) if err != nil { diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 17040fef3e187..00d4ff6d9f11a 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -528,7 +528,7 @@ func TestTKASync(t *testing.T) { }, } - // Finally, lets trigger a sync. + // Finally, let's trigger a sync. err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ TKAEnabled: true, TKAHead: controlAuthority.Head(), diff --git a/tka/aum.go b/tka/aum.go index 08d70897ee70f..6d75830bd293f 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -226,7 +226,7 @@ func (a *AUM) Serialize() tkatype.MarshaledAUM { // Further, experience with other attempts (JWS/JWT,SAML,X509 etc) has // taught us that even subtle behaviors such as how you handle invalid // or unrecognized fields + any invariants in subsequent re-serialization - // can easily lead to security-relevant logic bugs. Its certainly possible + // can easily lead to security-relevant logic bugs. It's certainly possible // to invent a workable scheme by massaging a JSON parsing library, though // profoundly unwise. // diff --git a/tka/builder.go b/tka/builder.go index 199cec06d8b64..ab2364d856ee2 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -114,7 +114,7 @@ func (b *UpdateBuilder) generateCheckpoint() error { } } - // Checkpoints cant specify a parent AUM. + // Checkpoints can't specify a parent AUM. state.LastAUMHash = nil return b.mkUpdate(AUM{MessageKind: AUMCheckpoint, State: &state}) } diff --git a/tka/key_test.go b/tka/key_test.go index e912f89c4f7eb..fc379e246ad32 100644 --- a/tka/key_test.go +++ b/tka/key_test.go @@ -42,7 +42,7 @@ func TestVerify25519(t *testing.T) { aum := AUM{ MessageKind: AUMRemoveKey, KeyID: []byte{1, 2, 3, 4}, - // Signatures is set to crap so we are sure its ignored in the sigHash computation. + // Signatures is set to crap so we are sure it's ignored in the sigHash computation. Signatures: []tkatype.Signature{{KeyID: []byte{45, 42}}}, } sigHash := aum.SigHash() @@ -89,7 +89,7 @@ func TestNLPrivate(t *testing.T) { t.Error("signature did not verify") } - // We manually compute the keyID, so make sure its consistent with + // We manually compute the keyID, so make sure it's consistent with // tka.Key.ID(). if !bytes.Equal(k.MustID(), p.KeyID()) { t.Errorf("private.KeyID() & tka KeyID differ: %x != %x", k.MustID(), p.KeyID()) diff --git a/tka/scenario_test.go b/tka/scenario_test.go index 89a8111e18cef..a0361a130dcc6 100644 --- a/tka/scenario_test.go +++ b/tka/scenario_test.go @@ -204,7 +204,7 @@ func TestNormalPropagation(t *testing.T) { `) control := s.mkNode("control") - // Lets say theres a node with some updates! + // Let's say there's a node with some updates! n1 := s.mkNodeWithForks("n1", true, map[string]*testChain{ "L2": newTestchain(t, `L3 -> L4`), }) diff --git a/tka/sig.go b/tka/sig.go index 7b1838d409130..46d598ad97b47 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -277,7 +277,7 @@ func (s *NodeKeySignature) verifySignature(nodeKey key.NodePublic, verificationK // Recurse to verify the signature on the nested structure. var nestedPub key.NodePublic // SigCredential signatures certify an indirection key rather than a node - // key, so theres no need to check the node key. + // key, so there's no need to check the node key. if s.Nested.SigKind != SigCredential { if err := nestedPub.UnmarshalBinary(s.Nested.Pubkey); err != nil { return fmt.Errorf("nested pubkey: %v", err) diff --git a/tka/sig_test.go b/tka/sig_test.go index 99c25f8e57ae6..2fafb0436de1f 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -119,7 +119,7 @@ func TestSigNested(t *testing.T) { } // Test verification fails if the outer signature is signed with a - // different public key to whats specified in WrappingPubkey + // different public key to what's specified in WrappingPubkey sig.Signature = ed25519.Sign(priv, sigHash[:]) if err := sig.verifySignature(node.Public(), k); err == nil { t.Error("verifySignature(node) succeeded with different signature") @@ -275,7 +275,7 @@ func TestSigCredential(t *testing.T) { } // Test verification fails if the outer signature is signed with a - // different public key to whats specified in WrappingPubkey + // different public key to what's specified in WrappingPubkey sig.Signature = ed25519.Sign(priv, sigHash[:]) if err := sig.verifySignature(node.Public(), k); err == nil { t.Error("verifySignature(node) succeeded with different signature") diff --git a/tka/state.go b/tka/state.go index 0a30c56a02fa8..95a319bd9bd7d 100644 --- a/tka/state.go +++ b/tka/state.go @@ -140,7 +140,7 @@ func (s State) checkDisablement(secret []byte) bool { // Specifically, the rules are: // - The last AUM hash must match (transitively, this implies that this // update follows the last update message applied to the state machine) -// - Or, the state machine knows no parent (its brand new). +// - Or, the state machine knows no parent (it's brand new). func (s State) parentMatches(update AUM) bool { if s.LastAUMHash == nil { return true diff --git a/tka/sync.go b/tka/sync.go index 6c2b7cbb8c81a..e3a858c155347 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -54,7 +54,7 @@ const ( // can then be applied locally with Inform(). // // This SyncOffer + AUM exchange should be performed by both ends, -// because its possible that either end has AUMs that the other needs +// because it's possible that either end has AUMs that the other needs // to find out about. func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) { oldest := a.oldestAncestor.Hash() @@ -123,7 +123,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) ( } // Case: 'head intersection' - // If we have the remote's head, its more likely than not that + // If we have the remote's head, it's more likely than not that // we have updates that build on that head. To confirm this, // we iterate backwards through our chain to see if the given // head is an ancestor of our current chain. @@ -165,7 +165,7 @@ func computeSyncIntersection(storage Chonk, localOffer, remoteOffer SyncOffer) ( // Case: 'tail intersection' // So we don't have a clue what the remote's head is, but // if one of the ancestors they gave us is part of our chain, - // then theres an intersection, which is a starting point for + // then there's an intersection, which is a starting point for // the remote to send us AUMs from. // // We iterate the list of ancestors in order because the remote diff --git a/tka/sync_test.go b/tka/sync_test.go index 7250eacf7d143..f9d86c16a9e0c 100644 --- a/tka/sync_test.go +++ b/tka/sync_test.go @@ -357,7 +357,7 @@ func TestSyncSimpleE2E(t *testing.T) { t.Fatalf("control Open() failed: %v", err) } - // Control knows the full chain, node only knows the genesis. Lets see + // Control knows the full chain, node only knows the genesis. Let's see // if they can sync. nodeOffer, err := node.SyncOffer(nodeStorage) if err != nil { diff --git a/tka/tka.go b/tka/tka.go index c34e35e7b11ee..9dce74e9a8046 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -94,7 +94,7 @@ func computeChainCandidates(storage Chonk, lastKnownOldest *AUMHash, maxIter int // candidates.Oldest needs to be computed by working backwards from // head as far as we can. - iterAgain := true // if theres still work to be done. + iterAgain := true // if there's still work to be done. for i := 0; iterAgain; i++ { if i >= maxIter { return nil, fmt.Errorf("iteration limit exceeded (%d)", maxIter) @@ -295,7 +295,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) } // If we got here, the current state is dependent on the previous. - // Keep iterating backwards till thats not the case. + // Keep iterating backwards till that's not the case. if curs, err = storage.AUM(parent); err != nil { return State{}, fmt.Errorf("reading parent (%v): %v", parent, err) } @@ -324,7 +324,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) return curs.Hash() == wantHash }) // fastForward only terminates before the done condition if it - // doesnt have any later AUMs to process. This cant be the case + // doesn't have any later AUMs to process. This can't be the case // as we've already iterated through them above so they must exist, // but we check anyway to be super duper sure. if err == nil && *state.LastAUMHash != wantHash { @@ -336,7 +336,7 @@ func computeStateAt(storage Chonk, maxIter int, wantHash AUMHash) (State, error) // computeActiveAncestor determines which ancestor AUM to use as the // ancestor of the valid chain. // -// If all the chains end up having the same ancestor, then thats the +// If all the chains end up having the same ancestor, then that's the // only possible ancestor, ezpz. However if there are multiple distinct // ancestors, that means there are distinct chains, and we need some // hint to choose what to use. For that, we rely on the chainsThroughActive @@ -357,7 +357,7 @@ func computeActiveAncestor(chains []chain) (AUMHash, error) { } } - // Theres more than one, so we need to use the ancestor that was + // There's more than one, so we need to use the ancestor that was // part of the active chain in a previous iteration. // Note that there can only be one distinct ancestor that was // formerly part of the active chain, because AUMs can only have @@ -479,7 +479,7 @@ func (a *Authority) Head() AUMHash { // Open initializes an existing TKA from the given tailchonk. // // Only use this if the current node has initialized an Authority before. -// If a TKA exists on other nodes but theres nothing locally, use Bootstrap(). +// If a TKA exists on other nodes but there's nothing locally, use Bootstrap(). // If no TKA exists anywhere and you are creating it for the first // time, use New(). func Open(storage Chonk) (*Authority, error) { @@ -592,14 +592,14 @@ func (a *Authority) InformIdempotent(storage Chonk, updates []AUM) (Authority, e toCommit := make([]AUM, 0, len(updates)) prevHash := a.Head() - // The state at HEAD is the current state of the authority. Its likely + // The state at HEAD is the current state of the authority. It's likely // to be needed, so we prefill it rather than computing it. stateAt[prevHash] = a.state // Optimization: If the set of updates is a chain building from // the current head, EG: // ==> updates[0] ==> updates[1] ... - // Then theres no need to recompute the resulting state from the + // Then there's no need to recompute the resulting state from the // stored ancestor, because the last state computed during iteration // is the new state. This should be the common case. // isHeadChain keeps track of this. diff --git a/tka/verify.go b/tka/verify.go index e4e22e5518e8b..ed0ecea669817 100644 --- a/tka/verify.go +++ b/tka/verify.go @@ -18,7 +18,7 @@ import ( // provided AUM BLAKE2s digest, using the given key. func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { // NOTE(tom): Even if we can compute the public from the KeyID, - // its possible for the KeyID to be attacker-controlled + // it's possible for the KeyID to be attacker-controlled // so we should use the public contained in the state machine. switch key.Kind { case Key25519: From 888a5d4812c97a818c4cc041a3f97aae8bd81afc Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 14 Nov 2025 12:58:53 -0800 Subject: [PATCH 0684/1093] ipn/localapi: use constant-time comparison for RequiredPassword (#17906) Updates #cleanup Signed-off-by: Andrew Lytvynov --- ipn/localapi/localapi.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 9e7c16891fc20..de5ff53ac9d83 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -7,6 +7,7 @@ package localapi import ( "bytes" "cmp" + "crypto/subtle" "encoding/json" "errors" "fmt" @@ -257,7 +258,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, "auth required", http.StatusUnauthorized) return } - if pass != h.RequiredPassword { + if subtle.ConstantTimeCompare([]byte(pass), []byte(h.RequiredPassword)) == 0 { metricInvalidRequests.Add(1) http.Error(w, "bad password", http.StatusForbidden) return From c5919b4ed1f38374d51fe9e92ef57e322c31c875 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 14 Nov 2025 13:23:25 -0800 Subject: [PATCH 0685/1093] feature/tpm: check IsZero in clone instead of just nil (#17884) The key.NewEmptyHardwareAttestationKey hook returns a non-nil empty attestationKey, which means that the nil check in Clone doesn't trigger and proceeds to try and clone an empty key. Check IsZero instead to reduce log spam from Clone. As a drive-by, make tpmAvailable check a sync.Once because the result won't change. Updates #17882 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 2 +- feature/tpm/tpm.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 597d4a6493829..49b80ade1e410 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -274,7 +274,7 @@ func (ak *attestationKey) Close() error { } func (ak *attestationKey) Clone() key.HardwareAttestationKey { - if ak == nil { + if ak.IsZero() { return nil } diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 4b27a241fa255..7cbdec088de04 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -35,12 +35,15 @@ import ( "tailscale.com/util/testenv" ) -var infoOnce = sync.OnceValue(info) +var ( + infoOnce = sync.OnceValue(info) + tpmSupportedOnce = sync.OnceValue(tpmSupported) +) func init() { feature.Register("tpm") - feature.HookTPMAvailable.Set(tpmSupported) - feature.HookHardwareAttestationAvailable.Set(tpmSupported) + feature.HookTPMAvailable.Set(tpmSupportedOnce) + feature.HookHardwareAttestationAvailable.Set(tpmSupportedOnce) hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() From a96ef432cfe36ef2d8c63fee7ab9c7cb95c39708 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Wed, 12 Nov 2025 16:40:23 -0800 Subject: [PATCH 0686/1093] control/controlclient,ipn/ipnlocal: replace State enum with boolean flags Remove the State enum (StateNew, StateNotAuthenticated, etc.) from controlclient and replace it with two explicit boolean fields: - LoginFinished: indicates successful authentication - Synced: indicates we've received at least one netmap This makes the state more composable and easier to reason about, as multiple conditions can be true independently rather than being encoded in a single enum value. The State enum was originally intended as the state machine for the whole client, but that abstraction moved to ipn.Backend long ago. This change continues moving away from the legacy state machine by representing state as a combination of independent facts. Also adds test helpers in ipnlocal that check independent, observable facts (hasValidNetMap, needsLogin, etc.) rather than relying on derived state enums, making tests more robust. Updates #12639 Signed-off-by: James Tucker --- control/controlclient/auto.go | 52 ++--- control/controlclient/controlclient_test.go | 28 ++- control/controlclient/status.go | 90 +------- ipn/ipnlocal/local.go | 6 +- ipn/ipnlocal/state_test.go | 241 ++++++++++++++------ 5 files changed, 224 insertions(+), 193 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 50248a647bec2..9d648409b4c47 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -138,7 +138,6 @@ type Auto struct { loggedIn bool // true if currently logged in loginGoal *LoginGoal // non-nil if some login activity is desired inMapPoll bool // true once we get the first MapResponse in a stream; false when HTTP response ends - state State // TODO(bradfitz): delete this, make it computed by method from other state authCtx context.Context // context used for auth requests mapCtx context.Context // context used for netmap and update requests @@ -296,10 +295,11 @@ func (c *Auto) authRoutine() { c.mu.Lock() goal := c.loginGoal ctx := c.authCtx + loggedIn := c.loggedIn if goal != nil { - c.logf("[v1] authRoutine: %s; wantLoggedIn=%v", c.state, true) + c.logf("[v1] authRoutine: loggedIn=%v; wantLoggedIn=%v", loggedIn, true) } else { - c.logf("[v1] authRoutine: %s; goal=nil paused=%v", c.state, c.paused) + c.logf("[v1] authRoutine: loggedIn=%v; goal=nil paused=%v", loggedIn, c.paused) } c.mu.Unlock() @@ -322,11 +322,6 @@ func (c *Auto) authRoutine() { c.mu.Lock() c.urlToVisit = goal.url - if goal.url != "" { - c.state = StateURLVisitRequired - } else { - c.state = StateAuthenticating - } c.mu.Unlock() var url string @@ -360,7 +355,6 @@ func (c *Auto) authRoutine() { flags: LoginDefault, url: url, } - c.state = StateURLVisitRequired c.mu.Unlock() c.sendStatus("authRoutine-url", err, url, nil) @@ -380,7 +374,6 @@ func (c *Auto) authRoutine() { c.urlToVisit = "" c.loggedIn = true c.loginGoal = nil - c.state = StateAuthenticated c.mu.Unlock() c.sendStatus("authRoutine-success", nil, "", nil) @@ -431,12 +424,9 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c.mu.Lock() c.inMapPoll = true - if c.loggedIn { - c.state = StateSynchronized - } c.expiry = nm.Expiry stillAuthed := c.loggedIn - c.logf("[v1] mapRoutine: netmap received: %s", c.state) + c.logf("[v1] mapRoutine: netmap received: loggedIn=%v inMapPoll=true", stillAuthed) c.mu.Unlock() if stillAuthed { @@ -484,8 +474,8 @@ func (c *Auto) mapRoutine() { } c.mu.Lock() - c.logf("[v1] mapRoutine: %s", c.state) loggedIn := c.loggedIn + c.logf("[v1] mapRoutine: loggedIn=%v", loggedIn) ctx := c.mapCtx c.mu.Unlock() @@ -516,9 +506,6 @@ func (c *Auto) mapRoutine() { c.direct.health.SetOutOfPollNetMap() c.mu.Lock() c.inMapPoll = false - if c.state == StateSynchronized { - c.state = StateAuthenticated - } paused := c.paused c.mu.Unlock() @@ -584,12 +571,12 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM c.mu.Unlock() return } - state := c.state loggedIn := c.loggedIn inMapPoll := c.inMapPoll + loginGoal := c.loginGoal c.mu.Unlock() - c.logf("[v1] sendStatus: %s: %v", who, state) + c.logf("[v1] sendStatus: %s: loggedIn=%v inMapPoll=%v", who, loggedIn, inMapPoll) var p persist.PersistView if nm != nil && loggedIn && inMapPoll { @@ -600,11 +587,12 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM nm = nil } newSt := &Status{ - URL: url, - Persist: p, - NetMap: nm, - Err: err, - state: state, + URL: url, + Persist: p, + NetMap: nm, + Err: err, + LoggedIn: loggedIn && loginGoal == nil, + InMapPoll: inMapPoll, } if c.observer == nil { @@ -667,14 +655,15 @@ func canSkipStatus(s1, s2 *Status) bool { // we can't skip it. return false } - if s1.Err != nil || s1.URL != "" { - // If s1 has an error or a URL, we shouldn't skip it, lest the error go - // away in s2 or in-between. We want to make sure all the subsystems see - // it. Plus there aren't many of these, so not worth skipping. + if s1.Err != nil || s1.URL != "" || s1.LoggedIn { + // If s1 has an error, a URL, or LoginFinished set, we shouldn't skip it, + // lest the error go away in s2 or in-between. We want to make sure all + // the subsystems see it. Plus there aren't many of these, so not worth + // skipping. return false } - if !s1.Persist.Equals(s2.Persist) || s1.state != s2.state { - // If s1 has a different Persist or state than s2, + if !s1.Persist.Equals(s2.Persist) || s1.LoggedIn != s2.LoggedIn || s1.InMapPoll != s2.InMapPoll || s1.URL != s2.URL { + // If s1 has a different Persist, LoginFinished, Synced, or URL than s2, // don't skip it. We only care about skipping the typical // entries where the only difference is the NetMap. return false @@ -736,7 +725,6 @@ func (c *Auto) Logout(ctx context.Context) error { } c.mu.Lock() c.loggedIn = false - c.state = StateNotAuthenticated c.cancelAuthCtxLocked() c.cancelMapCtxLocked() c.mu.Unlock() diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 3914d10ef8310..bc301122673f7 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -15,7 +15,6 @@ import ( "net/netip" "net/url" "reflect" - "slices" "sync/atomic" "testing" "time" @@ -49,7 +48,7 @@ func fieldsOf(t reflect.Type) (fields []string) { func TestStatusEqual(t *testing.T) { // Verify that the Equal method stays in sync with reality - equalHandles := []string{"Err", "URL", "NetMap", "Persist", "state"} + equalHandles := []string{"Err", "URL", "LoggedIn", "InMapPoll", "NetMap", "Persist"} if have := fieldsOf(reflect.TypeFor[Status]()); !reflect.DeepEqual(have, equalHandles) { t.Errorf("Status.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, equalHandles) @@ -81,7 +80,7 @@ func TestStatusEqual(t *testing.T) { }, { &Status{}, - &Status{state: StateAuthenticated}, + &Status{LoggedIn: true, Persist: new(persist.Persist).View()}, false, }, } @@ -135,8 +134,20 @@ func TestCanSkipStatus(t *testing.T) { want: false, }, { - name: "s1-state-diff", - s1: &Status{state: 123, NetMap: nm1}, + name: "s1-login-finished-diff", + s1: &Status{LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-login-finished", + s1: &Status{LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, + s2: &Status{NetMap: nm2}, + want: false, + }, + { + name: "s1-synced-diff", + s1: &Status{InMapPoll: true, LoggedIn: true, Persist: new(persist.Persist).View(), NetMap: nm1}, s2: &Status{NetMap: nm2}, want: false, }, @@ -167,10 +178,11 @@ func TestCanSkipStatus(t *testing.T) { }) } - want := []string{"Err", "URL", "NetMap", "Persist", "state"} - if f := fieldsOf(reflect.TypeFor[Status]()); !slices.Equal(f, want) { - t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want) + coveredFields := []string{"Err", "URL", "LoggedIn", "InMapPoll", "NetMap", "Persist"} + if have := fieldsOf(reflect.TypeFor[Status]()); !reflect.DeepEqual(have, coveredFields) { + t.Errorf("Status fields = %q; this code was only written to handle fields %q", have, coveredFields) } + } func TestRetryableErrors(t *testing.T) { diff --git a/control/controlclient/status.go b/control/controlclient/status.go index d0fdf80d745e3..65afb7a5011f2 100644 --- a/control/controlclient/status.go +++ b/control/controlclient/status.go @@ -4,8 +4,6 @@ package controlclient import ( - "encoding/json" - "fmt" "reflect" "tailscale.com/types/netmap" @@ -13,57 +11,6 @@ import ( "tailscale.com/types/structs" ) -// State is the high-level state of the client. It is used only in -// unit tests for proper sequencing, don't depend on it anywhere else. -// -// TODO(apenwarr): eliminate the state, as it's now obsolete. -// -// apenwarr: Historical note: controlclient.Auto was originally -// intended to be the state machine for the whole tailscale client, but that -// turned out to not be the right abstraction layer, and it moved to -// ipn.Backend. Since ipn.Backend now has a state machine, it would be -// much better if controlclient could be a simple stateless API. But the -// current server-side API (two interlocking polling https calls) makes that -// very hard to implement. A server side API change could untangle this and -// remove all the statefulness. -type State int - -const ( - StateNew = State(iota) - StateNotAuthenticated - StateAuthenticating - StateURLVisitRequired - StateAuthenticated - StateSynchronized // connected and received map update -) - -func (s State) AppendText(b []byte) ([]byte, error) { - return append(b, s.String()...), nil -} - -func (s State) MarshalText() ([]byte, error) { - return []byte(s.String()), nil -} - -func (s State) String() string { - switch s { - case StateNew: - return "state:new" - case StateNotAuthenticated: - return "state:not-authenticated" - case StateAuthenticating: - return "state:authenticating" - case StateURLVisitRequired: - return "state:url-visit-required" - case StateAuthenticated: - return "state:authenticated" - case StateSynchronized: - return "state:synchronized" - default: - return fmt.Sprintf("state:unknown:%d", int(s)) - } -} - type Status struct { _ structs.Incomparable @@ -76,6 +23,14 @@ type Status struct { // URL, if non-empty, is the interactive URL to visit to finish logging in. URL string + // LoggedIn, if true, indicates that serveRegister has completed and no + // other login change is in progress. + LoggedIn bool + + // InMapPoll, if true, indicates that we've received at least one netmap + // and are connected to receive updates. + InMapPoll bool + // NetMap is the latest server-pushed state of the tailnet network. NetMap *netmap.NetworkMap @@ -83,26 +38,8 @@ type Status struct { // // TODO(bradfitz,maisem): clarify this. Persist persist.PersistView - - // state is the internal state. It should not be exposed outside this - // package, but we have some automated tests elsewhere that need to - // use it via the StateForTest accessor. - // TODO(apenwarr): Unexport or remove these. - state State } -// LoginFinished reports whether the controlclient is in its "StateAuthenticated" -// state where it's in a happy register state but not yet in a map poll. -// -// TODO(bradfitz): delete this and everything around Status.state. -func (s *Status) LoginFinished() bool { return s.state == StateAuthenticated } - -// StateForTest returns the internal state of s for tests only. -func (s *Status) StateForTest() State { return s.state } - -// SetStateForTest sets the internal state of s for tests only. -func (s *Status) SetStateForTest(state State) { s.state = state } - // Equal reports whether s and s2 are equal. func (s *Status) Equal(s2 *Status) bool { if s == nil && s2 == nil { @@ -111,15 +48,8 @@ func (s *Status) Equal(s2 *Status) bool { return s != nil && s2 != nil && s.Err == s2.Err && s.URL == s2.URL && - s.state == s2.state && + s.LoggedIn == s2.LoggedIn && + s.InMapPoll == s2.InMapPoll && reflect.DeepEqual(s.Persist, s2.Persist) && reflect.DeepEqual(s.NetMap, s2.NetMap) } - -func (s Status) String() string { - b, err := json.MarshalIndent(s, "", "\t") - if err != nil { - panic(err) - } - return s.state.String() + " " + string(b) -} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index f0a77531bdd3b..41d110400169f 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1623,7 +1623,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.blockEngineUpdatesLocked(false) } - if st.LoginFinished() && (wasBlocked || authWasInProgress) { + if st.LoggedIn && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine b.blockEngineUpdatesLocked(false) @@ -1658,8 +1658,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control prefs.Persist = st.Persist.AsStruct() } } - if st.LoginFinished() { - if b.authURL != "" { + if st.LoggedIn { + if authWasInProgress { b.resetAuthURLLocked() // Interactive login finished successfully (URL visited). // After an interactive login, the user always wants diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 2197112b29da6..0c95ef4fcf5f6 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -206,9 +206,7 @@ func (cc *mockControl) send(opts sendOpt) { Err: err, } if loginFinished { - s.SetStateForTest(controlclient.StateAuthenticated) - } else if url == "" && err == nil && nm == nil { - s.SetStateForTest(controlclient.StateNotAuthenticated) + s.LoggedIn = true } cc.opts.Observer.SetControlClientStatus(cc, s) } @@ -228,7 +226,6 @@ func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { NetMap: nm, Persist: cc.persist.View(), } - s.SetStateForTest(controlclient.StateURLVisitRequired) cc.opts.Observer.SetControlClientStatus(cc, s) } @@ -434,8 +431,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { // for it, so it doesn't count as Prefs.LoggedOut==true. c.Assert(prefs.LoggedOut(), qt.IsTrue) c.Assert(prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login (prefs show logged out) + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify the actual facts about our state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Restart the state machine. @@ -455,8 +455,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify the actual facts about our state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Start non-interactive login with no token. @@ -473,7 +476,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { // (This behaviour is needed so that b.Login() won't // start connecting to an old account right away, if one // exists when you launch another login.) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need login + c.Assert(needsLogin(b), qt.IsTrue) } // Attempted non-interactive login with no key; indicate that @@ -500,10 +504,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we need URL visit + c.Assert(hasAuthURL(b), qt.IsTrue) c.Assert(nn[2].BrowseToURL, qt.IsNotNil) c.Assert(url1, qt.Equals, *nn[2].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + c.Assert(isFullyAuthenticated(b), qt.IsFalse) } // Now we'll try an interactive login. @@ -518,7 +523,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(nn[0].BrowseToURL, qt.IsNotNil) c.Assert(url1, qt.Equals, *nn[0].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need to complete login + c.Assert(needsLogin(b), qt.IsTrue) } // Sometimes users press the Login button again, in the middle of @@ -534,7 +540,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { notifies.drain(0) // backend asks control for another login sequence cc.assertCalls("Login") - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need login + c.Assert(needsLogin(b), qt.IsTrue) } // Provide a new interactive login URL. @@ -550,7 +557,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(1) c.Assert(nn[0].BrowseToURL, qt.IsNotNil) c.Assert(url2, qt.Equals, *nn[0].BrowseToURL) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify we still need to complete login + c.Assert(needsLogin(b), qt.IsTrue) } // Pretend that the interactive login actually happened. @@ -582,10 +590,18 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user1") - c.Assert(ipn.NeedsMachineAuth, qt.Equals, *nn[2].State) - c.Assert(ipn.NeedsMachineAuth, qt.Equals, b.State()) + // nn[2] is a state notification after login + // Verify login finished but need machine auth using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(needsMachineAuth(b), qt.IsTrue) + nm := b.NetMap() + c.Assert(nm, qt.IsNotNil) + // For an empty netmap (after initial login), SelfNode may not be valid yet. + // In this case, we can't check MachineAuthorized, but needsMachineAuth already verified the state. + if nm.SelfNode.Valid() { + c.Assert(nm.SelfNode.MachineAuthorized(), qt.IsFalse) + } } // Pretend that the administrator has authorized our machine. @@ -603,8 +619,13 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + // nn[0] is a state notification after machine auth granted + c.Assert(len(nn), qt.Equals, 1) + // Verify machine authorized using backend state + nm := b.NetMap() + c.Assert(nm, qt.IsNotNil) + c.Assert(nm.SelfNode.Valid(), qt.IsTrue) + c.Assert(nm.SelfNode.MachineAuthorized(), qt.IsTrue) } // TODO: add a fake DERP server to our fake netmap, so we can @@ -627,9 +648,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification, nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) } // The user changes their preference to WantRunning after all. @@ -645,17 +666,12 @@ func runTestStateMachine(t *testing.T, seamless bool) { // BUG: Login isn't needed here. We never logged out. cc.assertCalls("Login", "unpause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification, nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) c.Assert(store.sawWrite(), qt.IsTrue) } - // undo the state hack above. - b.mu.Lock() - b.state = ipn.Starting - b.mu.Unlock() - // User wants to logout. store.awaitWrite() t.Logf("\n\nLogout") @@ -664,27 +680,26 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(5) previousCC.assertCalls("pause", "Logout", "unpause", "Shutdown") + // nn[0] is state notification (Stopped) c.Assert(nn[0].State, qt.IsNotNil) c.Assert(*nn[0].State, qt.Equals, ipn.Stopped) - + // nn[1] is prefs notification after logout c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) cc.assertCalls("New") - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(*nn[2].State, qt.Equals, ipn.NoState) - - c.Assert(nn[3].Prefs, qt.IsNotNil) // emptyPrefs + // nn[2] is the initial state notification after New (NoState) + // nn[3] is prefs notification with emptyPrefs + c.Assert(nn[3].Prefs, qt.IsNotNil) c.Assert(nn[3].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[3].Prefs.WantRunning(), qt.IsFalse) - c.Assert(nn[4].State, qt.IsNotNil) - c.Assert(*nn[4].State, qt.Equals, ipn.NeedsLogin) - - c.Assert(b.State(), qt.Equals, ipn.NeedsLogin) - c.Assert(store.sawWrite(), qt.IsTrue) + // nn[4] is state notification (NeedsLogin) + // Verify logged out and needs new login using backend state + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // A second logout should be a no-op as we are in the NeedsLogin state. @@ -696,7 +711,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(b.Prefs().LoggedOut(), qt.IsTrue) c.Assert(b.Prefs().WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify still needs login + c.Assert(needsLogin(b), qt.IsTrue) } // A third logout should also be a no-op as the cc should be in @@ -709,7 +725,8 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(b.Prefs().LoggedOut(), qt.IsTrue) c.Assert(b.Prefs().WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify still needs login + c.Assert(needsLogin(b), qt.IsTrue) } // Oh, you thought we were done? Ha! Now we have to test what @@ -732,11 +749,13 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls() c.Assert(nn[0].Prefs, qt.IsNotNil) - c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsTrue) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[1].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // Verify notification indicates we need login + c.Assert(nn[1].Prefs == nil || nn[1].Prefs.LoggedOut(), qt.IsTrue) + // Verify we need login after restart + c.Assert(needsLogin(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsFalse) } // Explicitly set the ControlURL to avoid defaulting to [ipn.DefaultControlURL]. @@ -787,8 +806,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) // If a user initiates an interactive login, they also expect WantRunning to become true. c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) - c.Assert(nn[2].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[2].State) + // nn[2] is state notification (Starting) - verify using backend state + c.Assert(isWantRunning(b), qt.IsTrue) + c.Assert(isLoggedIn(b), qt.IsTrue) } // Now we've logged in successfully. Let's disconnect. @@ -802,9 +822,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Stopped), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) } @@ -822,10 +842,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { // and WantRunning is false, so cc should be paused. cc.assertCalls("New", "Login", "pause") c.Assert(nn[0].Prefs, qt.IsNotNil) - c.Assert(nn[1].State, qt.IsNotNil) c.Assert(nn[0].Prefs.WantRunning(), qt.IsFalse) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) - c.Assert(*nn[1].State, qt.Equals, ipn.Stopped) + // nn[1] is state notification (Stopped) + // Verify backend shows we're not wanting to run + c.Assert(isWantRunning(b), qt.IsFalse) } // When logged in but !WantRunning, ipn leaves us unpaused to retrieve @@ -863,9 +884,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("Login", "unpause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Starting), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) } // Disconnect. @@ -879,9 +900,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { nn := notifies.drain(2) cc.assertCalls("pause") // BUG: I would expect Prefs to change first, and state after. - c.Assert(nn[0].State, qt.IsNotNil) + // nn[0] is state notification (Stopped), nn[1] is prefs notification c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(ipn.Stopped, qt.Equals, *nn[0].State) + c.Assert(nn[1].Prefs.WantRunning(), qt.IsFalse) } // We want to try logging in as a different user, while Stopped. @@ -926,12 +947,13 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls("unpause") c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[2].State, qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user3") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) - c.Assert(ipn.Starting, qt.Equals, *nn[2].State) + // nn[2] is state notification (Starting) - verify using backend state + c.Assert(isWantRunning(b), qt.IsTrue) + c.Assert(isLoggedIn(b), qt.IsTrue) } // The last test case is the most common one: restarting when both @@ -950,11 +972,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[0].Prefs, qt.IsNotNil) c.Assert(nn[0].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[0].Prefs.WantRunning(), qt.IsTrue) - // We're logged in and have a valid netmap, so we should - // be in the Starting state. - c.Assert(nn[1].State, qt.IsNotNil) - c.Assert(*nn[1].State, qt.Equals, ipn.Starting) - c.Assert(b.State(), qt.Equals, ipn.Starting) + // nn[1] is state notification (Starting) + // Verify we're authenticated with valid netmap using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) } // Control server accepts our valid key from before. @@ -971,7 +992,9 @@ func runTestStateMachine(t *testing.T, seamless bool) { // NOTE: No prefs change this time. WantRunning stays true. // We were in Starting in the first place, so that doesn't // change either, so we don't expect any notifications. - c.Assert(ipn.Starting, qt.Equals, b.State()) + // Verify we're still authenticated with valid netmap + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) } t.Logf("\n\nExpireKey") notifies.expect(1) @@ -982,9 +1005,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.NeedsLogin, qt.Equals, *nn[0].State) - c.Assert(ipn.NeedsLogin, qt.Equals, b.State()) + // nn[0] is state notification (NeedsLogin) due to key expiry + c.Assert(len(nn), qt.Equals, 1) + // Verify key expired, need new login using backend state + c.Assert(needsLogin(b), qt.IsTrue) c.Assert(b.isEngineBlocked(), qt.IsTrue) } @@ -997,9 +1021,11 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Starting, qt.Equals, *nn[0].State) - c.Assert(ipn.Starting, qt.Equals, b.State()) + // nn[0] is state notification (Starting) after key extension + c.Assert(len(nn), qt.Equals, 1) + // Verify key extended, authenticated again using backend state + c.Assert(isFullyAuthenticated(b), qt.IsTrue) + c.Assert(hasValidNetMap(b), qt.IsTrue) c.Assert(b.isEngineBlocked(), qt.IsFalse) } notifies.expect(1) @@ -1008,9 +1034,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { { nn := notifies.drain(1) cc.assertCalls() - c.Assert(nn[0].State, qt.IsNotNil) - c.Assert(ipn.Running, qt.Equals, *nn[0].State) - c.Assert(ipn.Running, qt.Equals, b.State()) + // nn[0] is state notification (Running) after DERP connection + c.Assert(len(nn), qt.Equals, 1) + // Verify we can route traffic using backend state + c.Assert(canRouteTraffic(b), qt.IsTrue) } } @@ -1901,3 +1928,77 @@ func (e *mockEngine) Close() { func (e *mockEngine) Done() <-chan struct{} { return e.done } + +// hasValidNetMap returns true if the backend has a valid network map with a valid self node. +func hasValidNetMap(b *LocalBackend) bool { + nm := b.NetMap() + return nm != nil && nm.SelfNode.Valid() +} + +// needsLogin returns true if the backend needs user login action. +// This is true when logged out, when an auth URL is present (interactive login in progress), +// or when the node key has expired. +func needsLogin(b *LocalBackend) bool { + // Note: b.Prefs() handles its own locking, so we lock only for authURL and keyExpired access + b.mu.Lock() + authURL := b.authURL + keyExpired := b.keyExpired + b.mu.Unlock() + return b.Prefs().LoggedOut() || authURL != "" || keyExpired +} + +// needsMachineAuth returns true if the user has logged in but the machine is not yet authorized. +// This includes the case where we have a netmap but no valid SelfNode yet (empty netmap after initial login). +func needsMachineAuth(b *LocalBackend) bool { + // Note: b.NetMap() and b.Prefs() handle their own locking + nm := b.NetMap() + prefs := b.Prefs() + if prefs.LoggedOut() || nm == nil { + return false + } + // If we have a valid SelfNode, check its MachineAuthorized status + if nm.SelfNode.Valid() { + return !nm.SelfNode.MachineAuthorized() + } + // Empty netmap (no SelfNode yet) after login also means we need machine auth + return true +} + +// hasAuthURL returns true if an authentication URL is present (user needs to visit a URL). +func hasAuthURL(b *LocalBackend) bool { + b.mu.Lock() + authURL := b.authURL + b.mu.Unlock() + return authURL != "" +} + +// canRouteTraffic returns true if the backend is capable of routing traffic. +// This requires a valid netmap, machine authorization, and WantRunning preference. +func canRouteTraffic(b *LocalBackend) bool { + // Note: b.NetMap() and b.Prefs() handle their own locking + nm := b.NetMap() + prefs := b.Prefs() + return nm != nil && + nm.SelfNode.Valid() && + nm.SelfNode.MachineAuthorized() && + prefs.WantRunning() +} + +// isFullyAuthenticated returns true if the user has completed login and no auth URL is pending. +func isFullyAuthenticated(b *LocalBackend) bool { + // Note: b.Prefs() handles its own locking, so we lock only for authURL access + b.mu.Lock() + authURL := b.authURL + b.mu.Unlock() + return !b.Prefs().LoggedOut() && authURL == "" +} + +// isWantRunning returns true if the WantRunning preference is set. +func isWantRunning(b *LocalBackend) bool { + return b.Prefs().WantRunning() +} + +// isLoggedIn returns true if the user is logged in (not logged out). +func isLoggedIn(b *LocalBackend) bool { + return !b.Prefs().LoggedOut() +} From e1f0ad7a0516d056d27c383c51effa90f1c11d2e Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 14 Nov 2025 19:43:44 -0800 Subject: [PATCH 0687/1093] net/udprelay: implement Server.SetStaticAddrPorts (#17909) Only used in tests for now. Updates tailscale/corp#31489 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 32 ++--------- feature/relayserver/relayserver_test.go | 5 +- net/udprelay/server.go | 75 ++++++++++++++----------- net/udprelay/server_test.go | 24 +++++--- 4 files changed, 64 insertions(+), 72 deletions(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 868d5f61a2fa7..cfa372bd7ae5a 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -8,14 +8,10 @@ package relayserver import ( "encoding/json" "fmt" - "log" "net/http" - "net/netip" - "strings" "sync" "tailscale.com/disco" - "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" @@ -71,8 +67,8 @@ func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r * // imported. func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { e := &extension{ - newServerFn: func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { - return udprelay.NewServer(logf, port, overrideAddrs) + newServerFn: func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { + return udprelay.NewServer(logf, port, onlyStaticAddrPorts) }, logf: logger.WithPrefix(logf, featureName+": "), } @@ -94,7 +90,7 @@ type relayServer interface { // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { - newServerFn func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) // swappable for tests + newServerFn func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) // swappable for tests logf logger.Logf ec *eventbus.Client respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] @@ -170,7 +166,7 @@ func (e *extension) onAllocReq(req magicsock.UDPRelayAllocReq) { } func (e *extension) tryStartRelayServerLocked() { - rs, err := e.newServerFn(e.logf, *e.port, overrideAddrs()) + rs, err := e.newServerFn(e.logf, *e.port, false) if err != nil { e.logf("error initializing server: %v", err) return @@ -217,26 +213,6 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.handleRelayServerLifetimeLocked() } -// overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It -// can be between 0 and 3 comma-separated Addrs. TS_DEBUG_RELAY_SERVER_ADDRS is -// not a stable interface, and is subject to change. -var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { - all := envknob.String("TS_DEBUG_RELAY_SERVER_ADDRS") - const max = 3 - remain := all - for remain != "" && len(ret) < max { - var s string - s, remain, _ = strings.Cut(remain, ",") - addr, err := netip.ParseAddr(s) - if err != nil { - log.Printf("ignoring invalid Addr %q in TS_DEBUG_RELAY_SERVER_ADDRS %q: %v", s, all, err) - continue - } - ret = append(ret, addr) - } - return -}) - func (e *extension) stopRelayServerLocked() { if e.rs != nil { e.rs.Close() diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 2184b51759b61..3d71c55d76dd5 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -5,7 +5,6 @@ package relayserver import ( "errors" - "net/netip" "reflect" "testing" @@ -157,7 +156,7 @@ func Test_extension_profileStateChanged(t *testing.T) { t.Fatal(err) } e := ipne.(*extension) - e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + e.newServerFn = func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { return &mockRelayServer{}, nil } e.port = tt.fields.port @@ -289,7 +288,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { t.Fatal(err) } e := ipne.(*extension) - e.newServerFn = func(logf logger.Logf, port int, overrideAddrs []netip.Addr) (relayServer, error) { + e.newServerFn = func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { return &mockRelayServer{}, nil } e.shutdown = tt.shutdown diff --git a/net/udprelay/server.go b/net/udprelay/server.go index c050c94166e2e..7138cec7a93dd 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -36,6 +36,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/nettype" + "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/util/set" ) @@ -72,15 +73,16 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields - derpMap *tailcfg.DERPMap - addrDiscoveryOnce bool // addrDiscovery completed once (successfully or unsuccessfully) - addrPorts []netip.AddrPort // the ip:port pairs returned as candidate endpoints - closed bool - lamportID uint64 - nextVNI uint32 - byVNI map[uint32]*serverEndpoint - byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint + mu sync.Mutex // guards the following fields + derpMap *tailcfg.DERPMap + onlyStaticAddrPorts bool // no dynamic addr port discovery when set + staticAddrPorts views.Slice[netip.AddrPort] // static ip:port pairs set with [Server.SetStaticAddrPorts] + dynamicAddrPorts []netip.AddrPort // dynamically discovered ip:port pairs + closed bool + lamportID uint64 + nextVNI uint32 + byVNI map[uint32]*serverEndpoint + byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } const ( @@ -278,15 +280,17 @@ func (e *serverEndpoint) isBound() bool { // NewServer constructs a [Server] listening on port. If port is zero, then // port selection is left up to the host networking stack. If -// len(overrideAddrs) > 0 these will be used in place of dynamic discovery, -// which is useful to override in tests. -func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Server, err error) { +// onlyStaticAddrPorts is true, then dynamic addr:port discovery will be +// disabled, and only addr:port's set via [Server.SetStaticAddrPorts] will be +// used. +func NewServer(logf logger.Logf, port int, onlyStaticAddrPorts bool) (s *Server, err error) { s = &Server{ logf: logf, disco: key.NewDisco(), bindLifetime: defaultBindLifetime, steadyStateLifetime: defaultSteadyStateLifetime, closeCh: make(chan struct{}), + onlyStaticAddrPorts: onlyStaticAddrPorts, byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), nextVNI: minVNI, byVNI: make(map[uint32]*serverEndpoint), @@ -321,19 +325,7 @@ func NewServer(logf logger.Logf, port int, overrideAddrs []netip.Addr) (s *Serve return nil, err } - if len(overrideAddrs) > 0 { - addrPorts := make(set.Set[netip.AddrPort], len(overrideAddrs)) - for _, addr := range overrideAddrs { - if addr.IsValid() { - if addr.Is4() { - addrPorts.Add(netip.AddrPortFrom(addr, s.uc4Port)) - } else if s.uc6 != nil { - addrPorts.Add(netip.AddrPortFrom(addr, s.uc6Port)) - } - } - } - s.addrPorts = addrPorts.Slice() - } else { + if !s.onlyStaticAddrPorts { s.wg.Add(1) go s.addrDiscoveryLoop() } @@ -429,8 +421,7 @@ func (s *Server) addrDiscoveryLoop() { s.logf("error discovering IP:port candidates: %v", err) } s.mu.Lock() - s.addrPorts = addrPorts - s.addrDiscoveryOnce = true + s.dynamicAddrPorts = addrPorts s.mu.Unlock() case <-s.closeCh: return @@ -747,6 +738,15 @@ func (s *Server) getNextVNILocked() (uint32, error) { return 0, errors.New("VNI pool exhausted") } +// getAllAddrPortsCopyLocked returns a copy of the combined +// [Server.staticAddrPorts] and [Server.dynamicAddrPorts] slices. +func (s *Server) getAllAddrPortsCopyLocked() []netip.AddrPort { + addrPorts := make([]netip.AddrPort, 0, len(s.dynamicAddrPorts)+s.staticAddrPorts.Len()) + addrPorts = append(addrPorts, s.staticAddrPorts.AsSlice()...) + addrPorts = append(addrPorts, slices.Clone(s.dynamicAddrPorts)...) + return addrPorts +} + // AllocateEndpoint allocates an [endpoint.ServerEndpoint] for the provided pair // of [key.DiscoPublic]'s. If an allocation already exists for discoA and discoB // it is returned without modification/reallocation. AllocateEndpoint returns @@ -760,11 +760,8 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{}, ErrServerClosed } - if len(s.addrPorts) == 0 { - if !s.addrDiscoveryOnce { - return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} - } - return endpoint.ServerEndpoint{}, errors.New("server addrPorts are not yet known") + if s.staticAddrPorts.Len() == 0 && len(s.dynamicAddrPorts) == 0 { + return endpoint.ServerEndpoint{}, ErrServerNotReady{RetryAfter: endpoint.ServerRetryAfter} } if discoA.Compare(s.discoPublic) == 0 || discoB.Compare(s.discoPublic) == 0 { @@ -787,7 +784,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv // consider storing them (maybe interning) in the [*serverEndpoint] // at allocation time. ClientDisco: pair.Get(), - AddrPorts: slices.Clone(s.addrPorts), + AddrPorts: s.getAllAddrPortsCopyLocked(), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, @@ -817,7 +814,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, ClientDisco: pair.Get(), - AddrPorts: slices.Clone(s.addrPorts), + AddrPorts: s.getAllAddrPortsCopyLocked(), VNI: e.vni, LamportID: e.lamportID, BindLifetime: tstime.GoDuration{Duration: s.bindLifetime}, @@ -880,3 +877,13 @@ func (s *Server) getDERPMap() *tailcfg.DERPMap { defer s.mu.Unlock() return s.derpMap } + +// SetStaticAddrPorts sets addr:port pairs the [Server] will advertise +// as candidates it is potentially reachable over, in combination with +// dynamically discovered pairs. This replaces any previously-provided static +// values. +func (s *Server) SetStaticAddrPorts(addrPorts views.Slice[netip.AddrPort]) { + s.mu.Lock() + defer s.mu.Unlock() + s.staticAddrPorts = addrPorts +} diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index bf7f0a9b5f1de..6c3d616586bc9 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -17,6 +17,7 @@ import ( "tailscale.com/disco" "tailscale.com/net/packet" "tailscale.com/types/key" + "tailscale.com/types/views" ) type testClient struct { @@ -185,31 +186,40 @@ func TestServer(t *testing.T) { cases := []struct { name string - overrideAddrs []netip.Addr + staticAddrs []netip.Addr forceClientsMixedAF bool }{ { - name: "over ipv4", - overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, + name: "over ipv4", + staticAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1")}, }, { - name: "over ipv6", - overrideAddrs: []netip.Addr{netip.MustParseAddr("::1")}, + name: "over ipv6", + staticAddrs: []netip.Addr{netip.MustParseAddr("::1")}, }, { name: "mixed address families", - overrideAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, + staticAddrs: []netip.Addr{netip.MustParseAddr("127.0.0.1"), netip.MustParseAddr("::1")}, forceClientsMixedAF: true, }, } for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - server, err := NewServer(t.Logf, 0, tt.overrideAddrs) + server, err := NewServer(t.Logf, 0, true) if err != nil { t.Fatal(err) } defer server.Close() + addrPorts := make([]netip.AddrPort, 0, len(tt.staticAddrs)) + for _, addr := range tt.staticAddrs { + if addr.Is4() { + addrPorts = append(addrPorts, netip.AddrPortFrom(addr, server.uc4Port)) + } else if server.uc6Port != 0 { + addrPorts = append(addrPorts, netip.AddrPortFrom(addr, server.uc6Port)) + } + } + server.SetStaticAddrPorts(views.SliceOf(addrPorts)) endpoint, err := server.AllocateEndpoint(discoA.Public(), discoB.Public()) if err != nil { From 8444659ed8eafe501485396696f78d3eddf72ef4 Mon Sep 17 00:00:00 2001 From: Xinyu Kuo Date: Fri, 17 Oct 2025 13:47:13 +0800 Subject: [PATCH 0688/1093] cmd/tailscale/cli: fix panic in netcheck with mismatched DERP region IDs Fixes #17564 Signed-off-by: Xinyu Kuo --- cmd/tailscale/cli/netcheck.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 5ae8db8fa3fbb..a8a8992f5ba23 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -180,7 +180,11 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { printf("\t* Nearest DERP: unknown (no response to latency probes)\n") } else { if report.PreferredDERP != 0 { - printf("\t* Nearest DERP: %v\n", dm.Regions[report.PreferredDERP].RegionName) + if region, ok := dm.Regions[report.PreferredDERP]; ok { + printf("\t* Nearest DERP: %v\n", region.RegionName) + } else { + printf("\t* Nearest DERP: %v (region not found in map)\n", report.PreferredDERP) + } } else { printf("\t* Nearest DERP: [none]\n") } From 8aa46a395631f49b98104eea2b13432f1a196375 Mon Sep 17 00:00:00 2001 From: Avery Palmer Date: Sat, 15 Nov 2025 17:15:14 +0000 Subject: [PATCH 0689/1093] util/clientmetric: fix regression causing Metric.v to be uninitialised m.v was uninitialised when Tailscale built with ts_omit_logtail Fixes #17918 Signed-off-by: Avery Palmer --- util/clientmetric/clientmetric.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 65223e6a9375a..9e6b03a15ce93 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -133,15 +133,18 @@ func (m *Metric) Publish() { metrics[m.name] = m sortedDirty = true + if m.f == nil { + if len(valFreeList) == 0 { + valFreeList = make([]int64, 256) + } + m.v = &valFreeList[0] + valFreeList = valFreeList[1:] + } + if buildfeatures.HasLogTail { if m.f != nil { lastLogVal = append(lastLogVal, scanEntry{f: m.f}) } else { - if len(valFreeList) == 0 { - valFreeList = make([]int64, 256) - } - m.v = &valFreeList[0] - valFreeList = valFreeList[1:] lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } } From 4e01e8a66ec6ae1d5ebecc60ad12b26ce300c860 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 10:01:44 -0800 Subject: [PATCH 0690/1093] wgengine/netlog: fix send to closed channel in test Fixes #17922 Change-Id: I2cd600b0ecda389079f2004985ac9a25ffbbfdd1 Signed-off-by: Brad Fitzpatrick --- wgengine/netlog/netlog_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wgengine/netlog/netlog_test.go b/wgengine/netlog/netlog_test.go index ed9f672bfb63d..b4758c7ec7beb 100644 --- a/wgengine/netlog/netlog_test.go +++ b/wgengine/netlog/netlog_test.go @@ -182,6 +182,7 @@ func TestUpdateRace(t *testing.T) { group.Wait() logger.mu.Lock() close(logger.recordsChan) + logger.recordsChan = nil logger.mu.Unlock() } From 98aadbaf548dfd55523c89c49f60ad1aed4ccb6b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 09:49:08 -0800 Subject: [PATCH 0691/1093] util/cache: remove unused code Updates #cleanup Change-Id: I9be7029c5d2a7d6297125d0147e93205a7c68989 Signed-off-by: Brad Fitzpatrick --- util/cache/cache_test.go | 199 --------------------------------------- util/cache/interface.go | 40 -------- util/cache/locking.go | 43 --------- util/cache/none.go | 23 ----- util/cache/single.go | 81 ---------------- 5 files changed, 386 deletions(-) delete mode 100644 util/cache/cache_test.go delete mode 100644 util/cache/interface.go delete mode 100644 util/cache/locking.go delete mode 100644 util/cache/none.go delete mode 100644 util/cache/single.go diff --git a/util/cache/cache_test.go b/util/cache/cache_test.go deleted file mode 100644 index a6683e12dd772..0000000000000 --- a/util/cache/cache_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import ( - "errors" - "testing" - "time" -) - -var startTime = time.Date(2023, time.March, 1, 0, 0, 0, 0, time.UTC) - -func TestSingleCache(t *testing.T) { - testTime := startTime - timeNow := func() time.Time { return testTime } - c := &Single[string, int]{ - timeNow: timeNow, - } - - t.Run("NoServeExpired", func(t *testing.T) { - testCacheImpl(t, c, &testTime, false) - }) - - t.Run("ServeExpired", func(t *testing.T) { - c.Empty() - c.ServeExpired = true - testTime = startTime - testCacheImpl(t, c, &testTime, true) - }) -} - -func TestLocking(t *testing.T) { - testTime := startTime - timeNow := func() time.Time { return testTime } - c := NewLocking(&Single[string, int]{ - timeNow: timeNow, - }) - - // Just verify that the inner cache's behaviour hasn't changed. - testCacheImpl(t, c, &testTime, false) -} - -func testCacheImpl(t *testing.T, c Cache[string, int], testTime *time.Time, serveExpired bool) { - var fillTime time.Time - t.Run("InitialFill", func(t *testing.T) { - fillTime = testTime.Add(time.Hour) - val, err := c.Get("key", func() (int, time.Time, error) { - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - }) - - // Fetching again won't call our fill function - t.Run("SecondFetch", func(t *testing.T) { - *testTime = fillTime.Add(-1 * time.Second) - called := false - val, err := c.Get("key", func() (int, time.Time, error) { - called = true - return -1, fillTime, nil - }) - if called { - t.Fatal("wanted no call to fill function") - } - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - }) - - // Fetching after the expiry time will re-fill - t.Run("ReFill", func(t *testing.T) { - *testTime = fillTime.Add(1) - fillTime = fillTime.Add(time.Hour) - val, err := c.Get("key", func() (int, time.Time, error) { - return 999, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 999 { - t.Fatalf("got val=%d; want 999", val) - } - }) - - // An error on fetch will serve the expired value. - t.Run("FetchError", func(t *testing.T) { - if !serveExpired { - t.Skipf("not testing ServeExpired") - } - - *testTime = fillTime.Add(time.Hour + 1) - val, err := c.Get("key", func() (int, time.Time, error) { - return 0, time.Time{}, errors.New("some error") - }) - if err != nil { - t.Fatal(err) - } - if val != 999 { - t.Fatalf("got val=%d; want 999", val) - } - }) - - // Fetching a different key re-fills - t.Run("DifferentKey", func(t *testing.T) { - *testTime = fillTime.Add(time.Hour + 1) - - var calls int - val, err := c.Get("key1", func() (int, time.Time, error) { - calls++ - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - if calls != 1 { - t.Errorf("got %d, want 1 call", calls) - } - - val, err = c.Get("key2", func() (int, time.Time, error) { - calls++ - return 456, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 456 { - t.Fatalf("got val=%d; want 456", val) - } - if calls != 2 { - t.Errorf("got %d, want 2 call", calls) - } - }) - - // Calling Forget with the wrong key does nothing, and with the correct - // key will drop the cache. - t.Run("Forget", func(t *testing.T) { - // Add some time so that previously-cached values don't matter. - fillTime = testTime.Add(2 * time.Hour) - *testTime = fillTime.Add(-1 * time.Second) - - const key = "key" - - var calls int - val, err := c.Get(key, func() (int, time.Time, error) { - calls++ - return 123, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - if calls != 1 { - t.Errorf("got %d, want 1 call", calls) - } - - // Forgetting the wrong key does nothing - c.Forget("other") - val, err = c.Get(key, func() (int, time.Time, error) { - t.Fatal("should not be called") - panic("unreachable") - }) - if err != nil { - t.Fatal(err) - } - if val != 123 { - t.Fatalf("got val=%d; want 123", val) - } - - // Forgetting the correct key re-fills - c.Forget(key) - - val, err = c.Get("key2", func() (int, time.Time, error) { - calls++ - return 456, fillTime, nil - }) - if err != nil { - t.Fatal(err) - } - if val != 456 { - t.Fatalf("got val=%d; want 456", val) - } - if calls != 2 { - t.Errorf("got %d, want 2 call", calls) - } - }) -} diff --git a/util/cache/interface.go b/util/cache/interface.go deleted file mode 100644 index 0db87ba0e2ff4..0000000000000 --- a/util/cache/interface.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package cache contains an interface for a cache around a typed value, and -// various cache implementations that implement that interface. -package cache - -import "time" - -// Cache is the interface for the cache types in this package. -// -// Functions in this interface take a key parameter, but it is valid for a -// cache type to hold a single value associated with a key, and simply drop the -// cached value if provided with a different key. -// -// It is valid for Cache implementations to be concurrency-safe or not, and -// each implementation should document this. If you need a concurrency-safe -// cache, an existing cache can be wrapped with a lock using NewLocking(inner). -// -// K and V should be types that can be successfully passed to json.Marshal. -type Cache[K comparable, V any] interface { - // Get should return a previously-cached value or call the provided - // FillFunc to obtain a new one. The provided key can be used either to - // allow multiple cached values, or to drop the cache if the key - // changes; either is valid. - Get(K, FillFunc[V]) (V, error) - - // Forget should remove the given key from the cache, if it is present. - // If it is not present, nothing should be done. - Forget(K) - - // Empty should empty the cache such that the next call to Get should - // call the provided FillFunc for all possible keys. - Empty() -} - -// FillFunc is the signature of a function for filling a cache. It should -// return the value to be cached, the time that the cached value is valid -// until, or an error. -type FillFunc[T any] func() (T, time.Time, error) diff --git a/util/cache/locking.go b/util/cache/locking.go deleted file mode 100644 index 85e44b360a9b0..0000000000000 --- a/util/cache/locking.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import "sync" - -// Locking wraps an inner Cache implementation with a mutex, making it -// safe for concurrent use. All methods are serialized on the same mutex. -type Locking[K comparable, V any, C Cache[K, V]] struct { - sync.Mutex - inner C -} - -// NewLocking creates a new Locking cache wrapping inner. -func NewLocking[K comparable, V any, C Cache[K, V]](inner C) *Locking[K, V, C] { - return &Locking[K, V, C]{inner: inner} -} - -// Get implements Cache. -// -// The cache's mutex is held for the entire duration of this function, -// including while the FillFunc is being called. This function is not -// reentrant; attempting to call Get from a FillFunc will deadlock. -func (c *Locking[K, V, C]) Get(key K, f FillFunc[V]) (V, error) { - c.Lock() - defer c.Unlock() - return c.inner.Get(key, f) -} - -// Forget implements Cache. -func (c *Locking[K, V, C]) Forget(key K) { - c.Lock() - defer c.Unlock() - c.inner.Forget(key) -} - -// Empty implements Cache. -func (c *Locking[K, V, C]) Empty() { - c.Lock() - defer c.Unlock() - c.inner.Empty() -} diff --git a/util/cache/none.go b/util/cache/none.go deleted file mode 100644 index c4073e0d90cf3..0000000000000 --- a/util/cache/none.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -// None provides no caching and always calls the provided FillFunc. -// -// It is safe for concurrent use if the underlying FillFunc is. -type None[K comparable, V any] struct{} - -var _ Cache[int, int] = None[int, int]{} - -// Get always calls the provided FillFunc and returns what it does. -func (c None[K, V]) Get(_ K, f FillFunc[V]) (V, error) { - v, _, e := f() - return v, e -} - -// Forget implements Cache. -func (None[K, V]) Forget(K) {} - -// Empty implements Cache. -func (None[K, V]) Empty() {} diff --git a/util/cache/single.go b/util/cache/single.go deleted file mode 100644 index 6b9ac2c1193c6..0000000000000 --- a/util/cache/single.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package cache - -import ( - "time" -) - -// Single is a simple in-memory cache that stores a single value until a -// defined time before it is re-fetched. It also supports returning a -// previously-expired value if refreshing the value in the cache fails. -// -// Single is not safe for concurrent use. -type Single[K comparable, V any] struct { - key K - val V - goodUntil time.Time - timeNow func() time.Time // for tests - - // ServeExpired indicates that if an error occurs when filling the - // cache, an expired value can be returned instead of an error. - // - // This value should only be set when this struct is created. - ServeExpired bool -} - -var _ Cache[int, int] = (*Single[int, int])(nil) - -// Get will return the cached value, if any, or fill the cache by calling f and -// return the corresponding value. If f returns an error and c.ServeExpired is -// true, then a previous expired value can be returned with no error. -func (c *Single[K, V]) Get(key K, f FillFunc[V]) (V, error) { - var now time.Time - if c.timeNow != nil { - now = c.timeNow() - } else { - now = time.Now() - } - - if c.key == key && now.Before(c.goodUntil) { - return c.val, nil - } - - // Re-fill cached entry - val, until, err := f() - if err == nil { - c.key = key - c.val = val - c.goodUntil = until - return val, nil - } - - // Never serve an expired entry for the wrong key. - if c.key == key && c.ServeExpired && !c.goodUntil.IsZero() { - return c.val, nil - } - - var zero V - return zero, err -} - -// Forget implements Cache. -func (c *Single[K, V]) Forget(key K) { - if c.key != key { - return - } - - c.Empty() -} - -// Empty implements Cache. -func (c *Single[K, V]) Empty() { - c.goodUntil = time.Time{} - - var zeroKey K - c.key = zeroKey - - var zeroVal V - c.val = zeroVal -} From 653d0738f9afd9ee4785eff06c4a1908a4e6eaaf Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 13:28:29 -0800 Subject: [PATCH 0692/1093] types/netmap: remove PrivateKey from NetworkMap It's an unnecessary nuisance having it. We go out of our way to redact it in so many places when we don't even need it there anyway. Updates #12639 Change-Id: I5fc72e19e9cf36caeb42cf80ba430873f67167c3 Signed-off-by: Brad Fitzpatrick --- client/systray/systray.go | 2 +- cmd/sniproxy/sniproxy.go | 2 +- cmd/stund/depaware.txt | 5 +- cmd/tailscale/cli/debug.go | 21 +- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tsidp/tsidp.go | 2 +- control/controlclient/map.go | 1 - ipn/backend.go | 2 +- ipn/ipnlocal/c2n.go | 1 - ipn/ipnlocal/c2n_test.go | 345 +------------------------ ipn/ipnlocal/local.go | 43 +-- ipn/ipnlocal/local_test.go | 104 ++++++++ ipn/ipnlocal/profiles.go | 2 + ipn/localapi/localapi.go | 8 - ipn/localapi/localapi_test.go | 19 +- tsconsensus/monitor.go | 4 +- tsnet/tsnet.go | 2 +- tstest/integration/integration_test.go | 6 - tstest/typewalk/typewalk.go | 106 ++++++++ types/key/util.go | 18 ++ types/netmap/netmap.go | 9 +- types/netmap/netmap_test.go | 9 + wgengine/bench/wg.go | 10 +- wgengine/magicsock/magicsock_test.go | 26 +- wgengine/wgcfg/nmcfg/nmcfg.go | 5 +- 25 files changed, 292 insertions(+), 462 deletions(-) create mode 100644 tstest/typewalk/typewalk.go diff --git a/client/systray/systray.go b/client/systray/systray.go index 518b2e989a86f..bc099a1ec23a2 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -512,7 +512,7 @@ func (menu *Menu) watchIPNBus() { } func (menu *Menu) watchIPNBusInner() error { - watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, ipn.NotifyNoPrivateKeys) + watcher, err := menu.lc.WatchIPNBus(menu.bgCtx, 0) if err != nil { return fmt.Errorf("watching ipn bus: %w", err) } diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index c020b4a1f1605..2115c8095b351 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -141,7 +141,7 @@ func run(ctx context.Context, ts *tsnet.Server, wgPort int, hostname string, pro // in the netmap. // We set the NotifyInitialNetMap flag so we will always get woken with the // current netmap, before only being woken on changes. - bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap|ipn.NotifyNoPrivateKeys) + bus, err := lc.WatchIPNBus(ctx, ipn.NotifyWatchEngineUpdates|ipn.NotifyInitialNetMap) if err != nil { log.Fatalf("watching IPN bus: %v", err) } diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index bd8eebb7b1d27..7b3d05f94ccb2 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -82,8 +82,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/mak from tailscale.com/syncs+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb + tailscale.com/util/set from tailscale.com/types/key tailscale.com/util/slicesx from tailscale.com/tailcfg - tailscale.com/util/testenv from tailscale.com/types/logger + tailscale.com/util/testenv from tailscale.com/types/logger+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob @@ -94,7 +95,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - golang.org/x/exp/constraints from tailscale.com/tsweb/varz + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 2836ae29814e7..ffed51a63e112 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -258,7 +258,6 @@ func debugCmd() *ffcli.Command { fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") - fs.BoolVar(&watchIPNArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") return fs })(), @@ -270,7 +269,6 @@ func debugCmd() *ffcli.Command { ShortHelp: "Print the current network map", FlagSet: (func() *flag.FlagSet { fs := newFlagSet("netmap") - fs.BoolVar(&netmapArgs.showPrivateKey, "show-private-key", false, "include node private key in printed netmap") return fs })(), }, @@ -614,11 +612,10 @@ func runPrefs(ctx context.Context, args []string) error { } var watchIPNArgs struct { - netmap bool - initial bool - showPrivateKey bool - rateLimit bool - count int + netmap bool + initial bool + rateLimit bool + count int } func runWatchIPN(ctx context.Context, args []string) error { @@ -626,9 +623,6 @@ func runWatchIPN(ctx context.Context, args []string) error { if watchIPNArgs.initial { mask = ipn.NotifyInitialState | ipn.NotifyInitialPrefs | ipn.NotifyInitialNetMap } - if !watchIPNArgs.showPrivateKey { - mask |= ipn.NotifyNoPrivateKeys - } if watchIPNArgs.rateLimit { mask |= ipn.NotifyRateLimit } @@ -652,18 +646,11 @@ func runWatchIPN(ctx context.Context, args []string) error { return nil } -var netmapArgs struct { - showPrivateKey bool -} - func runNetmap(ctx context.Context, args []string) error { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap - if !netmapArgs.showPrivateKey { - mask |= ipn.NotifyNoPrivateKeys - } watcher, err := localClient.WatchIPNBus(ctx, mask) if err != nil { return err diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index e194b1e10c71a..1ce14cf097faf 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -475,7 +475,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { // if foreground mode, create a WatchIPNBus session // and use the nested config for all following operations // TODO(marwan-at-work): nested-config validations should happen here or previous to this point. - watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err = e.lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return err } diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index c02b09745aec8..7093ab9ee193a 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -287,7 +287,7 @@ func serveOnLocalTailscaled(ctx context.Context, lc *local.Client, st *ipnstate. // We watch the IPN bus just to get a session ID. The session expires // when we stop watching the bus, and that auto-deletes the foreground // serve/funnel configs we are creating below. - watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return nil, nil, fmt.Errorf("could not set up ipn bus watcher: %v", err) } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index eafdb2d565a76..a9db25517f87d 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -867,7 +867,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { nm := &netmap.NetworkMap{ NodeKey: ms.publicNodeKey, - PrivateKey: ms.privateNodeKey, MachineKey: ms.machinePubKey, Peers: peerViews, UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfileView), diff --git a/ipn/backend.go b/ipn/backend.go index 91cf81ca52962..b4ba958c5dd1e 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -74,7 +74,7 @@ const ( NotifyInitialPrefs NotifyWatchOpt = 1 << 2 // if set, the first Notify message (sent immediately) will contain the current Prefs NotifyInitialNetMap NotifyWatchOpt = 1 << 3 // if set, the first Notify message (sent immediately) will contain the current NetMap - NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // if set, private keys that would normally be sent in updates are zeroed out + NotifyNoPrivateKeys NotifyWatchOpt = 1 << 4 // (no-op) it used to redact private keys; now they always are and this does nothing NotifyInitialDriveShares NotifyWatchOpt = 1 << 5 // if set, the first Notify message (sent immediately) will contain the current Taildrive Shares NotifyInitialOutgoingFiles NotifyWatchOpt = 1 << 6 // if set, the first Notify message (sent immediately) will contain the current Taildrop OutgoingFiles diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 0c228060faf63..b5e722b97c4a4 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -179,7 +179,6 @@ func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Reques } field.SetZero() } - nm, _ = redactNetmapPrivateKeys(nm) return json.Marshal(nm) } diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 877d102d0986b..420633c87b554 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -13,21 +13,17 @@ import ( "os" "path/filepath" "reflect" - "strings" "testing" "time" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" - "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/must" - "tailscale.com/util/set" - "tailscale.com/wgengine/filter/filtertype" gcmp "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -144,338 +140,6 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } -// eachStructField calls cb for each struct field in struct type tp, recursively. -func eachStructField(tp reflect.Type, cb func(reflect.Type, reflect.StructField)) { - if !strings.HasPrefix(tp.PkgPath(), "tailscale.com/") { - // Stop traversing when we reach a non-tailscale type. - return - } - - for i := range tp.NumField() { - cb(tp, tp.Field(i)) - - switch tp.Field(i).Type.Kind() { - case reflect.Struct: - eachStructField(tp.Field(i).Type, cb) - case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: - if tp.Field(i).Type.Elem().Kind() == reflect.Struct { - eachStructField(tp.Field(i).Type.Elem(), cb) - } - } - } -} - -// eachStructValue calls cb for each struct field in the struct value v, recursively. -func eachStructValue(v reflect.Value, cb func(reflect.Type, reflect.StructField, reflect.Value)) { - if v.IsZero() { - return - } - - for i := range v.NumField() { - cb(v.Type(), v.Type().Field(i), v.Field(i)) - - switch v.Type().Field(i).Type.Kind() { - case reflect.Struct: - eachStructValue(v.Field(i), cb) - case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: - if v.Field(i).Type().Elem().Kind() == reflect.Struct { - eachStructValue(v.Field(i).Addr().Elem(), cb) - } - } - } -} - -// TestRedactNetmapPrivateKeys tests that redactNetmapPrivateKeys redacts all private keys -// and other private fields from a netmap.NetworkMap, and only those fields. -func TestRedactNetmapPrivateKeys(t *testing.T) { - type field struct { - t reflect.Type - f string - } - f := func(t any, f string) field { - return field{reflect.TypeOf(t), f} - } - // fields is a map of all struct fields in netmap.NetworkMap and its - // sub-structs, marking each field as private (true) or public (false). - // If you add a new field to netmap.NetworkMap or its sub-structs, - // you must add it to this list, marking it as private or public. - fields := map[field]bool{ - // Private fields to be redacted. - f(netmap.NetworkMap{}, "PrivateKey"): true, - - // All other fields are public. - f(netmap.NetworkMap{}, "AllCaps"): false, - f(netmap.NetworkMap{}, "CollectServices"): false, - f(netmap.NetworkMap{}, "DERPMap"): false, - f(netmap.NetworkMap{}, "DNS"): false, - f(netmap.NetworkMap{}, "DisplayMessages"): false, - f(netmap.NetworkMap{}, "Domain"): false, - f(netmap.NetworkMap{}, "DomainAuditLogID"): false, - f(netmap.NetworkMap{}, "Expiry"): false, - f(netmap.NetworkMap{}, "MachineKey"): false, - f(netmap.NetworkMap{}, "Name"): false, - f(netmap.NetworkMap{}, "NodeKey"): false, - f(netmap.NetworkMap{}, "PacketFilter"): false, - f(netmap.NetworkMap{}, "PacketFilterRules"): false, - f(netmap.NetworkMap{}, "Peers"): false, - f(netmap.NetworkMap{}, "SSHPolicy"): false, - f(netmap.NetworkMap{}, "SelfNode"): false, - f(netmap.NetworkMap{}, "TKAEnabled"): false, - f(netmap.NetworkMap{}, "TKAHead"): false, - f(netmap.NetworkMap{}, "UserProfiles"): false, - f(filtertype.CapMatch{}, "Cap"): false, - f(filtertype.CapMatch{}, "Dst"): false, - f(filtertype.CapMatch{}, "Values"): false, - f(filtertype.Match{}, "Caps"): false, - f(filtertype.Match{}, "Dsts"): false, - f(filtertype.Match{}, "IPProto"): false, - f(filtertype.Match{}, "SrcCaps"): false, - f(filtertype.Match{}, "Srcs"): false, - f(filtertype.Match{}, "SrcsContains"): false, - f(filtertype.NetPortRange{}, "Net"): false, - f(filtertype.NetPortRange{}, "Ports"): false, - f(filtertype.PortRange{}, "First"): false, - f(filtertype.PortRange{}, "Last"): false, - f(key.DiscoPublic{}, "k"): false, - f(key.MachinePublic{}, "k"): false, - f(key.NodePrivate{}, "_"): false, - f(key.NodePrivate{}, "k"): false, - f(key.NodePublic{}, "k"): false, - f(tailcfg.CapGrant{}, "CapMap"): false, - f(tailcfg.CapGrant{}, "Caps"): false, - f(tailcfg.CapGrant{}, "Dsts"): false, - f(tailcfg.DERPHomeParams{}, "RegionScore"): false, - f(tailcfg.DERPMap{}, "HomeParams"): false, - f(tailcfg.DERPMap{}, "OmitDefaultRegions"): false, - f(tailcfg.DERPMap{}, "Regions"): false, - f(tailcfg.DNSConfig{}, "CertDomains"): false, - f(tailcfg.DNSConfig{}, "Domains"): false, - f(tailcfg.DNSConfig{}, "ExitNodeFilteredSet"): false, - f(tailcfg.DNSConfig{}, "ExtraRecords"): false, - f(tailcfg.DNSConfig{}, "FallbackResolvers"): false, - f(tailcfg.DNSConfig{}, "Nameservers"): false, - f(tailcfg.DNSConfig{}, "Proxied"): false, - f(tailcfg.DNSConfig{}, "Resolvers"): false, - f(tailcfg.DNSConfig{}, "Routes"): false, - f(tailcfg.DNSConfig{}, "TempCorpIssue13969"): false, - f(tailcfg.DNSRecord{}, "Name"): false, - f(tailcfg.DNSRecord{}, "Type"): false, - f(tailcfg.DNSRecord{}, "Value"): false, - f(tailcfg.DisplayMessageAction{}, "Label"): false, - f(tailcfg.DisplayMessageAction{}, "URL"): false, - f(tailcfg.DisplayMessage{}, "ImpactsConnectivity"): false, - f(tailcfg.DisplayMessage{}, "PrimaryAction"): false, - f(tailcfg.DisplayMessage{}, "Severity"): false, - f(tailcfg.DisplayMessage{}, "Text"): false, - f(tailcfg.DisplayMessage{}, "Title"): false, - f(tailcfg.FilterRule{}, "CapGrant"): false, - f(tailcfg.FilterRule{}, "DstPorts"): false, - f(tailcfg.FilterRule{}, "IPProto"): false, - f(tailcfg.FilterRule{}, "SrcBits"): false, - f(tailcfg.FilterRule{}, "SrcIPs"): false, - f(tailcfg.HostinfoView{}, "ж"): false, - f(tailcfg.Hostinfo{}, "AllowsUpdate"): false, - f(tailcfg.Hostinfo{}, "App"): false, - f(tailcfg.Hostinfo{}, "AppConnector"): false, - f(tailcfg.Hostinfo{}, "BackendLogID"): false, - f(tailcfg.Hostinfo{}, "Cloud"): false, - f(tailcfg.Hostinfo{}, "Container"): false, - f(tailcfg.Hostinfo{}, "Desktop"): false, - f(tailcfg.Hostinfo{}, "DeviceModel"): false, - f(tailcfg.Hostinfo{}, "Distro"): false, - f(tailcfg.Hostinfo{}, "DistroCodeName"): false, - f(tailcfg.Hostinfo{}, "DistroVersion"): false, - f(tailcfg.Hostinfo{}, "Env"): false, - f(tailcfg.Hostinfo{}, "ExitNodeID"): false, - f(tailcfg.Hostinfo{}, "FrontendLogID"): false, - f(tailcfg.Hostinfo{}, "GoArch"): false, - f(tailcfg.Hostinfo{}, "GoArchVar"): false, - f(tailcfg.Hostinfo{}, "GoVersion"): false, - f(tailcfg.Hostinfo{}, "Hostname"): false, - f(tailcfg.Hostinfo{}, "IPNVersion"): false, - f(tailcfg.Hostinfo{}, "IngressEnabled"): false, - f(tailcfg.Hostinfo{}, "Location"): false, - f(tailcfg.Hostinfo{}, "Machine"): false, - f(tailcfg.Hostinfo{}, "NetInfo"): false, - f(tailcfg.Hostinfo{}, "NoLogsNoSupport"): false, - f(tailcfg.Hostinfo{}, "OS"): false, - f(tailcfg.Hostinfo{}, "OSVersion"): false, - f(tailcfg.Hostinfo{}, "Package"): false, - f(tailcfg.Hostinfo{}, "PushDeviceToken"): false, - f(tailcfg.Hostinfo{}, "RequestTags"): false, - f(tailcfg.Hostinfo{}, "RoutableIPs"): false, - f(tailcfg.Hostinfo{}, "SSH_HostKeys"): false, - f(tailcfg.Hostinfo{}, "Services"): false, - f(tailcfg.Hostinfo{}, "ServicesHash"): false, - f(tailcfg.Hostinfo{}, "ShareeNode"): false, - f(tailcfg.Hostinfo{}, "ShieldsUp"): false, - f(tailcfg.Hostinfo{}, "StateEncrypted"): false, - f(tailcfg.Hostinfo{}, "TPM"): false, - f(tailcfg.Hostinfo{}, "Userspace"): false, - f(tailcfg.Hostinfo{}, "UserspaceRouter"): false, - f(tailcfg.Hostinfo{}, "WireIngress"): false, - f(tailcfg.Hostinfo{}, "WoLMACs"): false, - f(tailcfg.Location{}, "City"): false, - f(tailcfg.Location{}, "CityCode"): false, - f(tailcfg.Location{}, "Country"): false, - f(tailcfg.Location{}, "CountryCode"): false, - f(tailcfg.Location{}, "Latitude"): false, - f(tailcfg.Location{}, "Longitude"): false, - f(tailcfg.Location{}, "Priority"): false, - f(tailcfg.NetInfo{}, "DERPLatency"): false, - f(tailcfg.NetInfo{}, "FirewallMode"): false, - f(tailcfg.NetInfo{}, "HavePortMap"): false, - f(tailcfg.NetInfo{}, "LinkType"): false, - f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, - f(tailcfg.NetInfo{}, "OSHasIPv6"): false, - f(tailcfg.NetInfo{}, "PCP"): false, - f(tailcfg.NetInfo{}, "PMP"): false, - f(tailcfg.NetInfo{}, "PreferredDERP"): false, - f(tailcfg.NetInfo{}, "UPnP"): false, - f(tailcfg.NetInfo{}, "WorkingICMPv4"): false, - f(tailcfg.NetInfo{}, "WorkingIPv6"): false, - f(tailcfg.NetInfo{}, "WorkingUDP"): false, - f(tailcfg.NetPortRange{}, "Bits"): false, - f(tailcfg.NetPortRange{}, "IP"): false, - f(tailcfg.NetPortRange{}, "Ports"): false, - f(tailcfg.NetPortRange{}, "_"): false, - f(tailcfg.NodeView{}, "ж"): false, - f(tailcfg.Node{}, "Addresses"): false, - f(tailcfg.Node{}, "AllowedIPs"): false, - f(tailcfg.Node{}, "Cap"): false, - f(tailcfg.Node{}, "CapMap"): false, - f(tailcfg.Node{}, "Capabilities"): false, - f(tailcfg.Node{}, "ComputedName"): false, - f(tailcfg.Node{}, "ComputedNameWithHost"): false, - f(tailcfg.Node{}, "Created"): false, - f(tailcfg.Node{}, "DataPlaneAuditLogID"): false, - f(tailcfg.Node{}, "DiscoKey"): false, - f(tailcfg.Node{}, "Endpoints"): false, - f(tailcfg.Node{}, "ExitNodeDNSResolvers"): false, - f(tailcfg.Node{}, "Expired"): false, - f(tailcfg.Node{}, "HomeDERP"): false, - f(tailcfg.Node{}, "Hostinfo"): false, - f(tailcfg.Node{}, "ID"): false, - f(tailcfg.Node{}, "IsJailed"): false, - f(tailcfg.Node{}, "IsWireGuardOnly"): false, - f(tailcfg.Node{}, "Key"): false, - f(tailcfg.Node{}, "KeyExpiry"): false, - f(tailcfg.Node{}, "KeySignature"): false, - f(tailcfg.Node{}, "LastSeen"): false, - f(tailcfg.Node{}, "LegacyDERPString"): false, - f(tailcfg.Node{}, "Machine"): false, - f(tailcfg.Node{}, "MachineAuthorized"): false, - f(tailcfg.Node{}, "Name"): false, - f(tailcfg.Node{}, "Online"): false, - f(tailcfg.Node{}, "PrimaryRoutes"): false, - f(tailcfg.Node{}, "SelfNodeV4MasqAddrForThisPeer"): false, - f(tailcfg.Node{}, "SelfNodeV6MasqAddrForThisPeer"): false, - f(tailcfg.Node{}, "Sharer"): false, - f(tailcfg.Node{}, "StableID"): false, - f(tailcfg.Node{}, "Tags"): false, - f(tailcfg.Node{}, "UnsignedPeerAPIOnly"): false, - f(tailcfg.Node{}, "User"): false, - f(tailcfg.Node{}, "computedHostIfDifferent"): false, - f(tailcfg.PortRange{}, "First"): false, - f(tailcfg.PortRange{}, "Last"): false, - f(tailcfg.SSHPolicy{}, "Rules"): false, - f(tailcfg.Service{}, "Description"): false, - f(tailcfg.Service{}, "Port"): false, - f(tailcfg.Service{}, "Proto"): false, - f(tailcfg.Service{}, "_"): false, - f(tailcfg.TPMInfo{}, "FamilyIndicator"): false, - f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, - f(tailcfg.TPMInfo{}, "Manufacturer"): false, - f(tailcfg.TPMInfo{}, "Model"): false, - f(tailcfg.TPMInfo{}, "SpecRevision"): false, - f(tailcfg.TPMInfo{}, "Vendor"): false, - f(tailcfg.UserProfileView{}, "ж"): false, - f(tailcfg.UserProfile{}, "DisplayName"): false, - f(tailcfg.UserProfile{}, "ID"): false, - f(tailcfg.UserProfile{}, "LoginName"): false, - f(tailcfg.UserProfile{}, "ProfilePicURL"): false, - f(views.Slice[ipproto.Proto]{}, "ж"): false, - f(views.Slice[tailcfg.FilterRule]{}, "ж"): false, - } - - t.Run("field_list_is_complete", func(t *testing.T) { - seen := set.Set[field]{} - eachStructField(reflect.TypeOf(netmap.NetworkMap{}), func(rt reflect.Type, sf reflect.StructField) { - f := field{rt, sf.Name} - seen.Add(f) - if _, ok := fields[f]; !ok { - // Fail the test if netmap has a field not in the list. If you see this test - // failure, please add the new field to the fields map above, marking it as private or public. - t.Errorf("netmap field has not been declared as private or public: %v.%v", rt, sf.Name) - } - }) - - for want := range fields { - if !seen.Contains(want) { - // Fail the test if the list has a field not in netmap. If you see this test - // failure, please remove the field from the fields map above. - t.Errorf("field declared that has not been found in netmap: %v.%v", want.t, want.f) - } - } - }) - - // tests is a list of test cases, each with a non-redacted netmap and the expected redacted netmap. - // If you add a new private field to netmap.NetworkMap or its sub-structs, please add a test case - // here that has that field set in nm, and the expected redacted value in wantRedacted. - tests := []struct { - name string - nm *netmap.NetworkMap - wantRedacted *netmap.NetworkMap - }{ - { - name: "redact_private_key", - nm: &netmap.NetworkMap{ - PrivateKey: key.NewNode(), - }, - wantRedacted: &netmap.NetworkMap{}, - }, - } - - // confirmedRedacted is a set of all private fields that have been covered by the tests above. - confirmedRedacted := set.Set[field]{} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - // Record which of the private fields are set in the non-redacted netmap. - eachStructValue(reflect.ValueOf(tt.nm).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { - f := field{tt, sf.Name} - if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { - confirmedRedacted.Add(f) - } - }) - - got, _ := redactNetmapPrivateKeys(tt.nm) - if !reflect.DeepEqual(got, tt.wantRedacted) { - t.Errorf("unexpected redacted netmap: %+v", got) - } - - // Check that all private fields in the redacted netmap are zero. - eachStructValue(reflect.ValueOf(got).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { - f := field{tt, sf.Name} - if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { - t.Errorf("field not redacted: %v.%v", tt, sf.Name) - } - }) - }) - } - - // Check that all private fields in netmap.NetworkMap and its sub-structs - // are covered by the tests above. If you see a test failure here, - // please add a test case above that has that field set in nm. - for f, shouldRedact := range fields { - if shouldRedact { - if !confirmedRedacted.Contains(f) { - t.Errorf("field not covered by tests: %v.%v", f.t, f.f) - } - } - } -} - func TestHandleC2NDebugNetmap(t *testing.T) { nm := &netmap.NetworkMap{ Name: "myhost", @@ -495,10 +159,7 @@ func TestHandleC2NDebugNetmap(t *testing.T) { Hostinfo: (&tailcfg.Hostinfo{Hostname: "peer1"}).View(), }).View(), }, - PrivateKey: key.NewNode(), } - withoutPrivateKey := *nm - withoutPrivateKey.PrivateKey = key.NodePrivate{} for _, tt := range []struct { name string @@ -507,12 +168,12 @@ func TestHandleC2NDebugNetmap(t *testing.T) { }{ { name: "simple_get", - want: &withoutPrivateKey, + want: nm, }, { name: "post_no_omit", req: &tailcfg.C2NDebugNetmapRequest{}, - want: &withoutPrivateKey, + want: nm, }, { name: "post_omit_peers_and_name", @@ -524,7 +185,7 @@ func TestHandleC2NDebugNetmap(t *testing.T) { { name: "post_omit_nonexistent_field", req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"ThisFieldDoesNotExist"}}, - want: &withoutPrivateKey, + want: nm, }, } { t.Run(tt.name, func(t *testing.T) { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 41d110400169f..9de1f3d85531b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3052,9 +3052,6 @@ func (b *LocalBackend) WatchNotifications(ctx context.Context, mask ipn.NotifyWa func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.Actor, mask ipn.NotifyWatchOpt, onWatchAdded func(), fn func(roNotify *ipn.Notify) (keepGoing bool)) { ch := make(chan *ipn.Notify, 128) sessionID := rands.HexString(16) - if mask&ipn.NotifyNoPrivateKeys != 0 { - fn = filterPrivateKeys(fn) - } if mask&ipn.NotifyHealthActions == 0 { // if UI does not support PrimaryAction in health warnings, append // action URLs to the warning text instead. @@ -3154,39 +3151,6 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A sender.Run(ctx, ch) } -// filterPrivateKeys returns an IPN listener func that wraps the supplied IPN -// listener and zeroes out the PrivateKey in the NetMap passed to the wrapped -// listener. -func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { - return func(n *ipn.Notify) bool { - redacted, changed := redactNetmapPrivateKeys(n.NetMap) - if !changed { - return fn(n) - } - - // The netmap in n is shared across all watchers, so to mutate it for a - // single watcher we have to clone the notify and the netmap. We can - // make shallow clones, at least. - n2 := *n - n2.NetMap = redacted - return fn(&n2) - } -} - -// redactNetmapPrivateKeys returns a copy of nm with private keys zeroed out. -// If no change was needed, it returns nm unmodified. -func redactNetmapPrivateKeys(nm *netmap.NetworkMap) (redacted *netmap.NetworkMap, changed bool) { - if nm == nil || nm.PrivateKey.IsZero() { - return nm, false - } - - // The netmap might be shared across watchers, so make at least a shallow - // clone before mutating it. - nm2 := *nm - nm2.PrivateKey = key.NodePrivate{} - return &nm2, true -} - // appendHealthActions returns an IPN listener func that wraps the supplied IPN // listener func and transforms health messages passed to the wrapped listener. // If health messages with PrimaryActions are present, it appends the label & @@ -5087,7 +5051,12 @@ func (b *LocalBackend) authReconfigLocked() { } } - cfg, err := nmcfg.WGCfg(nm, b.logf, flags, prefs.ExitNodeID()) + priv := b.pm.CurrentPrefs().Persist().PrivateNodeKey() + if !priv.IsZero() && priv.Public() != nm.NodeKey { + priv = key.NodePrivate{} + } + + cfg, err := nmcfg.WGCfg(priv, nm, b.logf, flags, prefs.ExitNodeID()) if err != nil { b.logf("wgcfg: %v", err) return diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 962335046024c..5df0ae5bbe920 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -20,6 +20,7 @@ import ( "slices" "strings" "sync" + "sync/atomic" "testing" "time" @@ -49,6 +50,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/tstest/deptest" + "tailscale.com/tstest/typewalk" "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" @@ -57,6 +59,7 @@ import ( "tailscale.com/types/logid" "tailscale.com/types/netmap" "tailscale.com/types/opt" + "tailscale.com/types/persist" "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" @@ -7112,3 +7115,104 @@ func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { return nil } } + +type fakeAttestationKey struct{ key.HardwareAttestationKey } + +func (f *fakeAttestationKey) Clone() key.HardwareAttestationKey { + return &fakeAttestationKey{} +} + +// TestStripKeysFromPrefs tests that LocalBackend's [stripKeysFromPrefs] (as used +// by sendNotify etc) correctly removes all private keys from an ipn.Notify. +// +// It does so by testing the the two ways that Notifys are sent: via sendNotify, +// and via extension hooks. +func TestStripKeysFromPrefs(t *testing.T) { + // genNotify generates a sample ipn.Notify with various private keys set + // at a certain path through the Notify data structure. + genNotify := map[string]func() ipn.Notify{ + "Notify.Prefs.ж.Persist.PrivateNodeKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{PrivateNodeKey: key.NewNode()}, + }).View()), + } + }, + "Notify.Prefs.ж.Persist.OldPrivateNodeKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{OldPrivateNodeKey: key.NewNode()}, + }).View()), + } + }, + "Notify.Prefs.ж.Persist.NetworkLockKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{NetworkLockKey: key.NewNLPrivate()}, + }).View()), + } + }, + "Notify.Prefs.ж.Persist.AttestationKey": func() ipn.Notify { + return ipn.Notify{ + Prefs: ptr.To((&ipn.Prefs{ + Persist: &persist.Persist{AttestationKey: new(fakeAttestationKey)}, + }).View()), + } + }, + } + + private := key.PrivateTypesForTest() + + for path := range typewalk.MatchingPaths(reflect.TypeFor[ipn.Notify](), private.Contains) { + t.Run(path.Name, func(t *testing.T) { + gen, ok := genNotify[path.Name] + if !ok { + t.Fatalf("no genNotify function for path %q", path.Name) + } + withKey := gen() + + if path.Walk(reflect.ValueOf(withKey)).IsZero() { + t.Fatalf("generated notify does not have non-zero value at path %q", path.Name) + } + + h := &ExtensionHost{} + ch := make(chan *ipn.Notify, 1) + b := &LocalBackend{ + extHost: h, + notifyWatchers: map[string]*watchSession{ + "test": {ch: ch}, + }, + } + + var okay atomic.Int32 + testNotify := func(via string) func(*ipn.Notify) { + return func(n *ipn.Notify) { + if n == nil { + t.Errorf("notify from %s is nil", via) + return + } + if !path.Walk(reflect.ValueOf(*n)).IsZero() { + t.Errorf("notify from %s has non-zero value at path %q; key not stripped", via, path.Name) + } else { + okay.Add(1) + } + } + } + + h.Hooks().MutateNotifyLocked.Add(testNotify("MutateNotifyLocked hook")) + + b.send(withKey) + + select { + case n := <-ch: + testNotify("watchSession")(n) + default: + t.Errorf("no notify sent to watcher channel") + } + + if got := okay.Load(); got != 2 { + t.Errorf("notify passed validation %d times; want 2", got) + } + }) + } +} diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 9c217637890cc..40a3c9887b2ff 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -24,6 +24,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/testenv" ) var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") @@ -849,6 +850,7 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView { // ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing. func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) { + testenv.AssertInTest() bus := eventbus.New() defer bus.Close() ht := health.NewTracker(bus) // in tests, don't care about the health status diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index de5ff53ac9d83..ddd55234ae84d 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -877,14 +877,6 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) { } mask = ipn.NotifyWatchOpt(v) } - // Users with only read access must request private key filtering. If they - // don't filter out private keys, require write access. - if (mask & ipn.NotifyNoPrivateKeys) == 0 { - if !h.PermitWrite { - http.Error(w, "watch IPN bus access denied, must set ipn.NotifyNoPrivateKeys when not running as admin/root or operator", http.StatusForbidden) - return - } - } w.Header().Set("Content-Type", "application/json") ctx := r.Context() diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index fa24717f7a942..d00b4117be43d 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -263,13 +263,17 @@ func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { }) } +// TestServeWatchIPNBus used to test that various WatchIPNBus mask flags +// changed the permissions required to access the endpoint. +// However, since the removal of the NotifyNoPrivateKeys flag requirement +// for read-only users, this test now only verifies that the endpoint +// behaves correctly based on the PermitRead and PermitWrite settings. func TestServeWatchIPNBus(t *testing.T) { tstest.Replace(t, &validLocalHostForTesting, true) tests := []struct { desc string permitRead, permitWrite bool - mask ipn.NotifyWatchOpt // extra bits in addition to ipn.NotifyInitialState wantStatus int }{ { @@ -279,20 +283,13 @@ func TestServeWatchIPNBus(t *testing.T) { wantStatus: http.StatusForbidden, }, { - desc: "read-initial-state", + desc: "read-only", permitRead: true, permitWrite: false, - wantStatus: http.StatusForbidden, - }, - { - desc: "read-initial-state-no-private-keys", - permitRead: true, - permitWrite: false, - mask: ipn.NotifyNoPrivateKeys, wantStatus: http.StatusOK, }, { - desc: "read-initial-state-with-private-keys", + desc: "read-and-write", permitRead: true, permitWrite: true, wantStatus: http.StatusOK, @@ -311,7 +308,7 @@ func TestServeWatchIPNBus(t *testing.T) { c := s.Client() ctx, cancel := context.WithCancel(context.Background()) - req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/localapi/v0/watch-ipn-bus?mask=%d", s.URL, ipn.NotifyInitialState|tt.mask), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/localapi/v0/watch-ipn-bus?mask=%d", s.URL, ipn.NotifyInitialState), nil) if err != nil { t.Fatal(err) } diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index 61a5a74a07c42..2aa4c863b3e4c 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -102,15 +102,13 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { } func (m *monitor) handleNetmap(w http.ResponseWriter, r *http.Request) { - var mask ipn.NotifyWatchOpt = ipn.NotifyInitialNetMap - mask |= ipn.NotifyNoPrivateKeys lc, err := m.ts.LocalClient() if err != nil { log.Printf("monitor: error LocalClient: %v", err) http.Error(w, "", http.StatusInternalServerError) return } - watcher, err := lc.WatchIPNBus(r.Context(), mask) + watcher, err := lc.WatchIPNBus(r.Context(), ipn.NotifyInitialNetMap) if err != nil { log.Printf("monitor: error WatchIPNBus: %v", err) http.Error(w, "", http.StatusInternalServerError) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 2944f63595a48..14747650f42ee 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -350,7 +350,7 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { return nil, fmt.Errorf("tsnet.Up: %w", err) } - watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState|ipn.NotifyNoPrivateKeys) + watcher, err := lc.WatchIPNBus(ctx, ipn.NotifyInitialState) if err != nil { return nil, fmt.Errorf("tsnet.Up: %w", err) } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 64f49c7b80afd..9d75cfc29fbb8 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -2128,16 +2128,10 @@ func TestC2NDebugNetmap(t *testing.T) { var current netmap.NetworkMap must.Do(json.Unmarshal(resp.Current, ¤t)) - if !current.PrivateKey.IsZero() { - t.Errorf("current netmap has non-zero private key: %v", current.PrivateKey) - } // Check candidate netmap if we sent a map response. if cand != nil { var candidate netmap.NetworkMap must.Do(json.Unmarshal(resp.Candidate, &candidate)) - if !candidate.PrivateKey.IsZero() { - t.Errorf("candidate netmap has non-zero private key: %v", candidate.PrivateKey) - } if diff := cmp.Diff(current.SelfNode, candidate.SelfNode); diff != "" { t.Errorf("SelfNode differs (-current +candidate):\n%s", diff) } diff --git a/tstest/typewalk/typewalk.go b/tstest/typewalk/typewalk.go new file mode 100644 index 0000000000000..b22505351b1a2 --- /dev/null +++ b/tstest/typewalk/typewalk.go @@ -0,0 +1,106 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package typewalk provides utilities to walk Go types using reflection. +package typewalk + +import ( + "iter" + "reflect" + "strings" +) + +// Path describes a path via a type where a private key may be found, +// along with a function to test whether a reflect.Value at that path is +// non-zero. +type Path struct { + // Name is the path from the root type, suitable for using as a t.Run name. + Name string + + // Walk returns the reflect.Value at the end of the path, given a root + // reflect.Value. + Walk func(root reflect.Value) (leaf reflect.Value) +} + +// MatchingPaths returns a sequence of [Path] for all paths +// within the given type that end in a type matching match. +func MatchingPaths(rt reflect.Type, match func(reflect.Type) bool) iter.Seq[Path] { + // valFromRoot is a function that, given a reflect.Value of the root struct, + // returns the reflect.Value at some path within it. + type valFromRoot func(reflect.Value) reflect.Value + + return func(yield func(Path) bool) { + var walk func(reflect.Type, valFromRoot) + var path []string + var done bool + seen := map[reflect.Type]bool{} + + walk = func(t reflect.Type, getV valFromRoot) { + if seen[t] { + return + } + seen[t] = true + defer func() { seen[t] = false }() + if done { + return + } + if match(t) { + if !yield(Path{ + Name: strings.Join(path, "."), + Walk: getV, + }) { + done = true + } + return + } + switch t.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Array: + walk(t.Elem(), func(root reflect.Value) reflect.Value { + v := getV(root) + return v.Elem() + }) + case reflect.Struct: + for i := range t.NumField() { + sf := t.Field(i) + fieldName := sf.Name + if fieldName == "_" { + continue + } + path = append(path, fieldName) + walk(sf.Type, func(root reflect.Value) reflect.Value { + return getV(root).FieldByName(fieldName) + }) + path = path[:len(path)-1] + if done { + return + } + } + case reflect.Map: + walk(t.Elem(), func(root reflect.Value) reflect.Value { + v := getV(root) + if v.Len() == 0 { + return reflect.Zero(t.Elem()) + } + iter := v.MapRange() + iter.Next() + return iter.Value() + }) + if done { + return + } + walk(t.Key(), func(root reflect.Value) reflect.Value { + v := getV(root) + if v.Len() == 0 { + return reflect.Zero(t.Key()) + } + iter := v.MapRange() + iter.Next() + return iter.Key() + }) + } + } + + path = append(path, rt.Name()) + walk(rt, func(v reflect.Value) reflect.Value { return v }) + } +} diff --git a/types/key/util.go b/types/key/util.go index bdb2a06f68e67..50fac827556aa 100644 --- a/types/key/util.go +++ b/types/key/util.go @@ -10,9 +10,12 @@ import ( "errors" "fmt" "io" + "reflect" "slices" "go4.org/mem" + "tailscale.com/util/set" + "tailscale.com/util/testenv" ) // rand fills b with cryptographically strong random bytes. Panics if @@ -115,3 +118,18 @@ func debug32(k [32]byte) string { dst[6] = ']' return string(dst[:7]) } + +// PrivateTypesForTest returns the set of private key types +// in this package, for testing purposes. +func PrivateTypesForTest() set.Set[reflect.Type] { + testenv.AssertInTest() + return set.Of( + reflect.TypeFor[ChallengePrivate](), + reflect.TypeFor[ControlPrivate](), + reflect.TypeFor[DiscoPrivate](), + reflect.TypeFor[MachinePrivate](), + reflect.TypeFor[NodePrivate](), + reflect.TypeFor[NLPrivate](), + reflect.TypeFor[HardwareAttestationKey](), + ) +} diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index cc6bec1db8edb..0a2f3ea71fd09 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -26,11 +26,10 @@ import ( // The fields should all be considered read-only. They might // alias parts of previous NetworkMap values. type NetworkMap struct { - SelfNode tailcfg.NodeView - AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap - NodeKey key.NodePublic - PrivateKey key.NodePrivate - Expiry time.Time + SelfNode tailcfg.NodeView + AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap + NodeKey key.NodePublic + Expiry time.Time // Name is the DNS name assigned to this node. // It is the MapResponse.Node.Name value and ends with a period. Name string diff --git a/types/netmap/netmap_test.go b/types/netmap/netmap_test.go index 40f504741bfea..ee4fecdb4ff4e 100644 --- a/types/netmap/netmap_test.go +++ b/types/netmap/netmap_test.go @@ -6,11 +6,13 @@ package netmap import ( "encoding/hex" "net/netip" + "reflect" "testing" "go4.org/mem" "tailscale.com/net/netaddr" "tailscale.com/tailcfg" + "tailscale.com/tstest/typewalk" "tailscale.com/types/key" ) @@ -316,3 +318,10 @@ func TestPeerIndexByNodeID(t *testing.T) { } } } + +func TestNoPrivateKeyMaterial(t *testing.T) { + private := key.PrivateTypesForTest() + for path := range typewalk.MatchingPaths(reflect.TypeFor[NetworkMap](), private.Contains) { + t.Errorf("NetworkMap contains private key material at path: %q", path.Name) + } +} diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index f0fa38bf97198..ce6add866f9e8 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -111,9 +111,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. Endpoints: epFromTyped(st.LocalAddrs), } e2.SetNetworkMap(&netmap.NetworkMap{ - NodeKey: k2.Public(), - PrivateKey: k2, - Peers: []tailcfg.NodeView{n.View()}, + NodeKey: k2.Public(), + Peers: []tailcfg.NodeView{n.View()}, }) p := wgcfg.Peer{ @@ -143,9 +142,8 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. Endpoints: epFromTyped(st.LocalAddrs), } e1.SetNetworkMap(&netmap.NetworkMap{ - NodeKey: k1.Public(), - PrivateKey: k1, - Peers: []tailcfg.NodeView{n.View()}, + NodeKey: k1.Public(), + Peers: []tailcfg.NodeView{n.View()}, }) p := wgcfg.Peer{ diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index e91dac2ec1874..09c54f504bd92 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -308,8 +308,7 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM buildNetmapLocked := func(myIdx int) *netmap.NetworkMap { me := ms[myIdx] nm := &netmap.NetworkMap{ - PrivateKey: me.privateKey, - NodeKey: me.privateKey.Public(), + NodeKey: me.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.PrefixFrom(netaddr.IPv4(1, 0, 0, byte(myIdx+1)), 32)}, }).View(), @@ -356,7 +355,7 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM peerSet.Add(peer.Key()) } m.conn.UpdatePeers(peerSet) - wg, err := nmcfg.WGCfg(nm, logf, 0, "") + wg, err := nmcfg.WGCfg(ms[i].privateKey, nm, logf, 0, "") if err != nil { // We're too far from the *testing.T to be graceful, // blow up. Shouldn't happen anyway. @@ -2201,9 +2200,8 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + Name: "ts", + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{tsaip}, }).View(), @@ -2224,7 +2222,7 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { } m.conn.onNodeViewsUpdate(nv) - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2266,9 +2264,8 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + Name: "ts", + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{tsaip}, }).View(), @@ -2290,7 +2287,7 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { } m.conn.onNodeViewsUpdate(nv) - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2334,7 +2331,7 @@ func applyNetworkMap(t *testing.T, m *magicStack, nm *netmap.NetworkMap) { m.conn.noV6.Store(true) // Turn the network map into a wireguard config (for the tailscale internal wireguard device). - cfg, err := nmcfg.WGCfg(nm, t.Logf, netmap.AllowSubnetRoutes, "") + cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { t.Fatal(err) } @@ -2403,9 +2400,8 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { wgEpV6 := netip.MustParseAddrPort(v6.LocalAddr().String()) nm := &netmap.NetworkMap{ - Name: "ts", - PrivateKey: m.privateKey, - NodeKey: m.privateKey.Public(), + Name: "ts", + NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{tsaip}, }).View(), diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 28d5345d6108c..487e78d81218d 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -12,6 +12,7 @@ import ( "strings" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" @@ -41,9 +42,9 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { } // WGCfg returns the NetworkMaps's WireGuard configuration. -func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { +func WGCfg(pk key.NodePrivate, nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags, exitNode tailcfg.StableNodeID) (*wgcfg.Config, error) { cfg := &wgcfg.Config{ - PrivateKey: nm.PrivateKey, + PrivateKey: pk, Addresses: nm.GetAddresses().AsSlice(), Peers: make([]wgcfg.Peer, 0, len(nm.Peers)), } From 3a41c0c585f4b008c07a02ae91ed43cdbb62c721 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Mon, 20 Oct 2025 11:40:30 -0400 Subject: [PATCH 0693/1093] ipn/ipnlocal: add PROXY protocol support to Funnel/Serve This adds the --proxy-protocol flag to 'tailscale serve' and 'tailscale funnel', which tells the Tailscale client to prepend a PROXY protocol[1] header when making connections to the proxied-to backend. I've verified that this works with our existing funnel servers without additional work, since they pass along source address information via PeerAPI already. Updates #7747 [1]: https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt Change-Id: I647c24d319375c1b33e995555a541b7615d2d203 Signed-off-by: Andrew Dunham --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscale/cli/serve_legacy.go | 3 +- cmd/tailscale/cli/serve_v2.go | 27 ++++++--- cmd/tailscale/cli/serve_v2_test.go | 94 +++++++++++++++++++++++++----- cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + flake.nix | 2 +- go.mod | 1 + go.mod.sri | 2 +- go.sum | 2 + ipn/ipn_clone.go | 9 +-- ipn/ipn_view.go | 15 +++-- ipn/ipnlocal/serve.go | 72 +++++++++++++++++++++++ ipn/serve.go | 19 +++++- shell.nix | 2 +- tsnet/depaware.txt | 1 + 16 files changed, 216 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b800b78c6aad4..4542fcad6e6fe 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/opencontainers/go-digest from github.com/distribution/reference + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal github.com/pkg/errors from github.com/evanphx/json-patch/v5+ D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 5c2d8eefa5edc..171ec335c008b 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -168,6 +168,7 @@ type serveEnv struct { http uint // HTTP port tcp uint // TCP port tlsTerminatedTCP uint // a TLS terminated TCP port + proxyProtocol uint // PROXY protocol version (1 or 2) subcmd serveMode // subcommand yes bool // update without prompt service tailcfg.ServiceName // service name @@ -570,7 +571,7 @@ func (e *serveEnv) handleTCPServe(ctx context.Context, srcType string, srcPort u return fmt.Errorf("cannot serve TCP; already serving web on %d", srcPort) } - sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, dnsName) + sc.SetTCPForwarding(srcPort, fwdAddr, terminateTLS, 0 /* proxy proto */, dnsName) if !reflect.DeepEqual(cursc, sc) { if err := e.lc.SetServeConfig(ctx, sc); err != nil { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 1ce14cf097faf..33b676bf86a1f 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -240,6 +240,7 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") + fs.UintVar(&e.proxyProtocol, "proxy-protocol", 0, "PROXY protocol version (1 or 2) for TCP forwarding") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), @@ -413,6 +414,14 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { return errHelpFunc(subcmd) } + if (srvType == serveTypeHTTP || srvType == serveTypeHTTPS) && e.proxyProtocol != 0 { + return fmt.Errorf("PROXY protocol is only supported for TCP forwarding, not HTTP/HTTPS") + } + // Validate PROXY protocol version + if e.proxyProtocol != 0 && e.proxyProtocol != 1 && e.proxyProtocol != 2 { + return fmt.Errorf("invalid PROXY protocol version %d; must be 1 or 2", e.proxyProtocol) + } + sc, err := e.lc.GetServeConfig(ctx) if err != nil { return fmt.Errorf("error getting serve config: %w", err) @@ -507,7 +516,7 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } - err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps) + err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps, int(e.proxyProtocol)) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } if err != nil { @@ -828,7 +837,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er for name, details := range scf.Services { for ppr, ep := range details.Endpoints { if ep.Protocol == conffile.ProtoTUN { - err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil) + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix, nil, 0 /* proxy protocol */) if err != nil { return err } @@ -850,7 +859,7 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er portStr := fmt.Sprint(destPort) target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) } - err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil) + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix, nil, 0 /* proxy protocol */) if err != nil { return fmt.Errorf("service %q: %w", name, err) } @@ -953,7 +962,7 @@ func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { } } -func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability) error { +func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability, proxyProtocol int) error { // update serve config based on the type switch srvType { case serveTypeHTTPS, serveTypeHTTP: @@ -966,7 +975,7 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy if e.setPath != "" { return fmt.Errorf("cannot mount a path for TCP serve") } - err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target) + err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target, proxyProtocol) if err != nil { return fmt.Errorf("failed to apply TCP serve: %w", err) } @@ -1092,6 +1101,9 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN if tcpHandler.TerminateTLS != "" { tlsStatus = "TLS terminated" } + if ver := tcpHandler.ProxyProtocol; ver != 0 { + tlsStatus = fmt.Sprintf("%s, PROXY protocol v%d", tlsStatus, ver) + } output.WriteString(fmt.Sprintf("|-- tcp://%s:%d (%s)\n", host, srvPort, tlsStatus)) for _, a := range ips { @@ -1170,7 +1182,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return nil } -func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string) error { +func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string, proxyProtocol int) error { var terminateTLS bool switch srcType { case serveTypeTCP: @@ -1197,8 +1209,7 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } - sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, dnsName) - + sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, proxyProtocol, dnsName) return nil } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 7f7f2c37c97e7..5cdb395587031 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -919,6 +919,73 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, + { + name: "tcp_with_proxy_protocol_v1", + steps: []step{{ + command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: { + TCPForward: "localhost:5432", + ProxyProtocol: 1, + }, + }, + }, + }}, + }, + { + name: "tls_terminated_tcp_with_proxy_protocol_v2", + steps: []step{{ + command: cmd("serve --tls-terminated-tcp=443 --proxy-protocol=2 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + TCPForward: "localhost:5432", + TerminateTLS: "foo.test.ts.net", + ProxyProtocol: 2, + }, + }, + }, + }}, + }, + { + name: "tcp_update_to_add_proxy_protocol", + steps: []step{ + { + command: cmd("serve --tcp=8000 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: {TCPForward: "localhost:5432"}, + }, + }, + }, + { + command: cmd("serve --tcp=8000 --proxy-protocol=1 --bg tcp://localhost:5432"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8000: { + TCPForward: "localhost:5432", + ProxyProtocol: 1, + }, + }, + }, + }, + }, + }, + { + name: "tcp_proxy_protocol_invalid_version", + steps: []step{{ + command: cmd("serve --tcp=8000 --proxy-protocol=3 --bg tcp://localhost:5432"), + wantErr: anyErr(), + }}, + }, + { + name: "proxy_protocol_without_tcp", + steps: []step{{ + command: cmd("serve --https=443 --proxy-protocol=1 --bg http://localhost:3000"), + wantErr: anyErr(), + }}, + }, } for _, group := range groups { @@ -1889,18 +1956,19 @@ func TestSetServe(t *testing.T) { e := &serveEnv{} magicDNSSuffix := "test.ts.net" tests := []struct { - name string - desc string - cfg *ipn.ServeConfig - st *ipnstate.Status - dnsName string - srvType serveType - srvPort uint16 - mountPath string - target string - allowFunnel bool - expected *ipn.ServeConfig - expectErr bool + name string + desc string + cfg *ipn.ServeConfig + st *ipnstate.Status + dnsName string + srvType serveType + srvPort uint16 + mountPath string + target string + allowFunnel bool + proxyProtocol int + expected *ipn.ServeConfig + expectErr bool }{ { name: "add new handler", @@ -2183,7 +2251,7 @@ func TestSetServe(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil) + err := e.setServe(tt.cfg, tt.dnsName, tt.srvType, tt.srvPort, tt.mountPath, tt.target, tt.allowFunnel, magicDNSSuffix, nil, tt.proxyProtocol) if err != nil && !tt.expectErr { t.Fatalf("got error: %v; did not expect error.", err) } diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 1b5bdab912430..be0fd799e66d1 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -156,6 +156,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/pierrec/lz4/v4/internal/lz4errors from github.com/pierrec/lz4/v4+ L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal LD github.com/pkg/sftp from tailscale.com/ssh/tailssh LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 21ca122c4bdc3..c68fab6340cad 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -43,6 +43,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient diff --git a/flake.nix b/flake.nix index d2f03d4d81382..217df38c3798a 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= +# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= diff --git a/go.mod b/go.mod index 836810fc0319c..fc8870bafda2b 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,7 @@ require ( github.com/miekg/dns v1.1.58 github.com/mitchellh/go-ps v1.0.0 github.com/peterbourgon/ff/v3 v3.4.0 + github.com/pires/go-proxyproto v0.8.1 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 diff --git a/go.mod.sri b/go.mod.sri index 325a03b43bdfd..b7df2cc2ccccd 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= +sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= diff --git a/go.sum b/go.sum index a0d9461ece2fb..177efd4f7e293 100644 --- a/go.sum +++ b/go.sum @@ -809,6 +809,8 @@ github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkM github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= +github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 3d2670947c7e3..d5af906ee95e8 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -218,10 +218,11 @@ func (src *TCPPortHandler) Clone() *TCPPortHandler { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _TCPPortHandlerCloneNeedsRegeneration = TCPPortHandler(struct { - HTTPS bool - HTTP bool - TCPForward string - TerminateTLS string + HTTPS bool + HTTP bool + TCPForward string + TerminateTLS string + ProxyProtocol int }{}) // Clone makes a deep copy of HTTPHandler. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index ba5477a6d93ae..3179e3bb5e2e0 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -807,12 +807,19 @@ func (v TCPPortHandlerView) TCPForward() string { return v.ж.TCPForward } // (the HTTPS mode uses ServeConfig.Web) func (v TCPPortHandlerView) TerminateTLS() string { return v.ж.TerminateTLS } +// ProxyProtocol indicates whether to send a PROXY protocol header +// before forwarding the connection to TCPForward. +// +// This is only valid if TCPForward is non-empty. +func (v TCPPortHandlerView) ProxyProtocol() int { return v.ж.ProxyProtocol } + // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _TCPPortHandlerViewNeedsRegeneration = TCPPortHandler(struct { - HTTPS bool - HTTP bool - TCPForward string - TerminateTLS string + HTTPS bool + HTTP bool + TCPForward string + TerminateTLS string + ProxyProtocol int }{}) // View returns a read-only view of HTTPHandler. diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 1c527e130ebbe..b5118873b2fca 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -33,6 +33,7 @@ import ( "time" "unicode/utf8" + "github.com/pires/go-proxyproto" "go4.org/mem" "tailscale.com/ipn" "tailscale.com/net/netutil" @@ -671,10 +672,81 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, }) } + var proxyHeader []byte + if ver := tcph.ProxyProtocol(); ver > 0 { + // backAddr is the final "destination" of the connection, + // which is the connection to the proxied-to backend. + backAddr := backConn.RemoteAddr().(*net.TCPAddr) + + // We always want to format the PROXY protocol + // header based on the IPv4 or IPv6-ness of + // the client. The SourceAddr and + // DestinationAddr need to match in type, so we + // need to be careful to not e.g. set a + // SourceAddr of type IPv6 and DestinationAddr + // of type IPv4. + // + // If this is an IPv6-mapped IPv4 address, + // though, unmap it. + proxySrcAddr := srcAddr + if proxySrcAddr.Addr().Is4In6() { + proxySrcAddr = netip.AddrPortFrom( + proxySrcAddr.Addr().Unmap(), + proxySrcAddr.Port(), + ) + } + + is4 := proxySrcAddr.Addr().Is4() + + var destAddr netip.Addr + if self := b.currentNode().Self(); self.Valid() { + if is4 { + destAddr = nodeIP(self, netip.Addr.Is4) + } else { + destAddr = nodeIP(self, netip.Addr.Is6) + } + } + if !destAddr.IsValid() { + // Pick a best-effort destination address of localhost. + if is4 { + destAddr = netip.AddrFrom4([4]byte{127, 0, 0, 1}) + } else { + destAddr = netip.IPv6Loopback() + } + } + + header := &proxyproto.Header{ + Version: byte(ver), + Command: proxyproto.PROXY, + SourceAddr: net.TCPAddrFromAddrPort(proxySrcAddr), + DestinationAddr: &net.TCPAddr{ + IP: destAddr.AsSlice(), + Port: backAddr.Port, + }, + } + if is4 { + header.TransportProtocol = proxyproto.TCPv4 + } else { + header.TransportProtocol = proxyproto.TCPv6 + } + var err error + proxyHeader, err = header.Format() + if err != nil { + b.logf("localbackend: failed to format proxy protocol header for port %v (from %v) to %s: %v", dport, srcAddr, backDst, err) + } + } + // TODO(bradfitz): do the RegisterIPPortIdentity and // UnregisterIPPortIdentity stuff that netstack does errc := make(chan error, 1) go func() { + if len(proxyHeader) > 0 { + if _, err := backConn.Write(proxyHeader); err != nil { + errc <- err + backConn.Close() // to ensure that the other side gets EOF + return + } + } _, err := io.Copy(backConn, conn) errc <- err }() diff --git a/ipn/serve.go b/ipn/serve.go index 2ac37a141271a..1aab829feeec7 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -149,6 +149,12 @@ type TCPPortHandler struct { // SNI name with this value. It is only used if TCPForward is non-empty. // (the HTTPS mode uses ServeConfig.Web) TerminateTLS string `json:",omitempty"` + + // ProxyProtocol indicates whether to send a PROXY protocol header + // before forwarding the connection to TCPForward. + // + // This is only valid if TCPForward is non-empty. + ProxyProtocol int `json:",omitzero"` } // HTTPHandler is either a path or a proxy to serve. @@ -404,7 +410,10 @@ func (sc *ServeConfig) SetWebHandler(handler *HTTPHandler, host string, port uin // connections from the given port. If terminateTLS is true, TLS connections // are terminated with only the given host name permitted before passing them // to the fwdAddr. -func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, host string) { +// +// If proxyProtocol is non-zero, the corresponding PROXY protocol version +// header is sent before forwarding the connection. +func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTLS bool, proxyProtocol int, host string) { if sc == nil { sc = new(ServeConfig) } @@ -417,11 +426,15 @@ func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTL } tcpPortHandler = &svcConfig.TCP } - mak.Set(tcpPortHandler, port, &TCPPortHandler{TCPForward: fwdAddr}) + handler := &TCPPortHandler{ + TCPForward: fwdAddr, + ProxyProtocol: proxyProtocol, // can be 0 + } if terminateTLS { - (*tcpPortHandler)[port].TerminateTLS = host + handler.TerminateTLS = host } + mak.Set(tcpPortHandler, port, handler) } // SetFunnel sets the sc.AllowFunnel value for the given host and port. diff --git a/shell.nix b/shell.nix index c11b4bbcfb4f7..f43108753da13 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-p8dVVZm2bLL4J/d4TtnUOp3bfMqUkBGE+53RUhamF+A= +# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index cf91aa483f175..6eb493ef8d07c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -43,6 +43,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient From 99b06eac49ba1cdc1f72409b957f526b25d62622 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sat, 15 Nov 2025 09:40:57 -0800 Subject: [PATCH 0694/1093] syncs: add Mutex/RWMutex alias/wrappers for future mutex debugging Updates #17852 Change-Id: I477340fb8e40686870e981ade11cd61597c34a20 Signed-off-by: Brad Fitzpatrick --- appc/appconnector.go | 4 ++-- client/local/local.go | 3 ++- control/controlbase/conn.go | 3 ++- control/controlclient/direct.go | 4 ++-- derp/derpserver/derpserver.go | 2 +- envknob/envknob.go | 4 ++-- feature/relayserver/relayserver.go | 4 ++-- health/health.go | 3 ++- ipn/auditlog/extension.go | 4 ++-- ipn/ipnlocal/cert.go | 6 +++--- ipn/ipnlocal/local.go | 6 +++--- ipn/ipnlocal/node_backend.go | 3 ++- logtail/buffer.go | 5 +++-- metrics/metrics.go | 3 +-- net/captivedetection/captivedetection.go | 3 ++- net/dns/manager_windows.go | 4 ++-- net/dns/resolver/debug.go | 4 ++-- net/dns/resolver/forwarder.go | 3 ++- net/dns/resolver/tsdns.go | 2 +- net/dnscache/dnscache.go | 5 +++-- net/memnet/memnet.go | 4 ++-- net/netcheck/netcheck.go | 4 ++-- net/netmon/interfaces_darwin.go | 4 ++-- net/netmon/netmon.go | 3 ++- net/netutil/netutil.go | 5 +++-- net/ping/ping.go | 3 ++- net/portmapper/portmapper.go | 3 +-- net/sockstats/sockstats_tsgo.go | 4 ++-- net/tsdial/tsdial.go | 3 ++- net/wsconn/wsconn.go | 4 ++-- proxymap/proxymap.go | 4 ++-- syncs/locked.go | 4 ++-- syncs/mutex.go | 18 ++++++++++++++++++ syncs/mutex_debug.go | 18 ++++++++++++++++++ syncs/shardedint_test.go | 3 ++- util/eventbus/bus.go | 8 ++++---- util/eventbus/client.go | 4 ++-- util/eventbus/debug.go | 4 ++-- util/eventbus/subscribe.go | 4 ++-- util/execqueue/execqueue.go | 5 +++-- util/expvarx/expvarx.go | 4 ++-- util/goroutines/tracker.go | 4 ++-- util/limiter/limiter.go | 4 ++-- util/ringlog/ringlog.go | 4 ++-- util/syspolicy/rsop/change_callbacks.go | 3 ++- util/syspolicy/rsop/resultant_policy.go | 4 ++-- util/syspolicy/rsop/rsop.go | 3 +-- util/syspolicy/setting/setting.go | 4 ++-- wgengine/magicsock/blockforever_conn.go | 4 +++- wgengine/magicsock/endpoint.go | 4 ++-- wgengine/magicsock/endpoint_tracker.go | 4 ++-- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/rebinding_conn.go | 4 ++-- wgengine/magicsock/relaymanager.go | 3 ++- wgengine/netlog/netlog.go | 4 ++-- 55 files changed, 145 insertions(+), 94 deletions(-) create mode 100644 syncs/mutex.go create mode 100644 syncs/mutex_debug.go diff --git a/appc/appconnector.go b/appc/appconnector.go index e7b5032f0edc4..5625decbfa062 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -16,9 +16,9 @@ import ( "net/netip" "slices" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/views" @@ -139,7 +139,7 @@ type AppConnector struct { hasStoredRoutes bool // mu guards the fields that follow - mu sync.Mutex + mu syncs.Mutex // domains is a map of lower case domain names with no trailing dot, to an // ordered list of resolved IP addresses. diff --git a/client/local/local.go b/client/local/local.go index 2382a12252a20..a5e04f122ca54 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -38,6 +38,7 @@ import ( "tailscale.com/net/udprelay/status" "tailscale.com/paths" "tailscale.com/safesocket" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/appctype" "tailscale.com/types/dnstype" @@ -1363,7 +1364,7 @@ type IPNBusWatcher struct { httpRes *http.Response dec *json.Decoder - mu sync.Mutex + mu syncs.Mutex closed bool } diff --git a/control/controlbase/conn.go b/control/controlbase/conn.go index dc22212e887cb..78ef73f71000b 100644 --- a/control/controlbase/conn.go +++ b/control/controlbase/conn.go @@ -18,6 +18,7 @@ import ( "golang.org/x/crypto/blake2s" chp "golang.org/x/crypto/chacha20poly1305" + "tailscale.com/syncs" "tailscale.com/types/key" ) @@ -48,7 +49,7 @@ type Conn struct { // rxState is all the Conn state that Read uses. type rxState struct { - sync.Mutex + syncs.Mutex cipher cipher.AEAD nonce nonce buf *maxMsgBuffer // or nil when reads exhausted diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index d30db61918ef0..9e7d10d8dcf0b 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -23,7 +23,6 @@ import ( "runtime" "slices" "strings" - "sync" "sync/atomic" "time" @@ -44,6 +43,7 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/tstime" @@ -92,7 +92,7 @@ type Direct struct { dialPlan ControlDialPlanner // can be nil - mu sync.Mutex // mutex guards the following fields + mu syncs.Mutex // mutex guards the following fields serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic diff --git a/derp/derpserver/derpserver.go b/derp/derpserver/derpserver.go index 31cf9363a43bf..0bbc667806a5a 100644 --- a/derp/derpserver/derpserver.go +++ b/derp/derpserver/derpserver.go @@ -177,7 +177,7 @@ type Server struct { verifyClientsURL string verifyClientsURLFailOpen bool - mu sync.Mutex + mu syncs.Mutex closed bool netConns map[derp.Conn]chan struct{} // chan is closed when conn closes clients map[key.NodePublic]*clientSet diff --git a/envknob/envknob.go b/envknob/envknob.go index 9dea8f74d15df..17a21387ecaea 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -28,19 +28,19 @@ import ( "slices" "strconv" "strings" - "sync" "sync/atomic" "time" "tailscale.com/feature/buildfeatures" "tailscale.com/kube/kubetypes" + "tailscale.com/syncs" "tailscale.com/types/opt" "tailscale.com/version" "tailscale.com/version/distro" ) var ( - mu sync.Mutex + mu syncs.Mutex // +checklocks:mu set = map[string]string{} // +checklocks:mu diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index cfa372bd7ae5a..7d12d62e5802e 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "net/http" - "sync" "tailscale.com/disco" "tailscale.com/feature" @@ -19,6 +18,7 @@ import ( "tailscale.com/net/udprelay" "tailscale.com/net/udprelay/endpoint" "tailscale.com/net/udprelay/status" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -95,7 +95,7 @@ type extension struct { ec *eventbus.Client respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] - mu sync.Mutex // guards the following fields + mu syncs.Mutex // guards the following fields shutdown bool // true if Shutdown() has been called rs relayServer // nil when disabled port *int // ipn.Prefs.RelayServerPort, nil if disabled diff --git a/health/health.go b/health/health.go index cbfa599c56eaf..f0f6a6ffbb162 100644 --- a/health/health.go +++ b/health/health.go @@ -20,6 +20,7 @@ import ( "tailscale.com/envknob" "tailscale.com/feature/buildfeatures" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" @@ -30,7 +31,7 @@ import ( ) var ( - mu sync.Mutex + mu syncs.Mutex debugHandler map[string]http.Handler ) diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index f73681db073c1..ae2a296b2c420 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "sync" "time" "tailscale.com/control/controlclient" @@ -15,6 +14,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -40,7 +40,7 @@ type extension struct { store lazy.SyncValue[LogStore] // mu protects all following fields. - mu sync.Mutex + mu syncs.Mutex // logger is the current audit logger, or nil if it is not set up, // such as before the first control client is created, or after // a profile change and before the new control client is created. diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index ab49976c8aeea..d7133d25e24a2 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -30,7 +30,6 @@ import ( "runtime" "slices" "strings" - "sync" "time" "tailscale.com/atomicfile" @@ -42,6 +41,7 @@ import ( "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tempfork/acme" "tailscale.com/types/logger" @@ -60,9 +60,9 @@ var ( // acmeMu guards all ACME operations, so concurrent requests // for certs don't slam ACME. The first will go through and // populate the on-disk cache and the rest should use that. - acmeMu sync.Mutex + acmeMu syncs.Mutex - renewMu sync.Mutex // lock order: acmeMu before renewMu + renewMu syncs.Mutex // lock order: acmeMu before renewMu renewCertAt = map[string]time.Time{} ) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9de1f3d85531b..ed183e508e69d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -247,7 +247,7 @@ type LocalBackend struct { extHost *ExtensionHost // The mutex protects the following elements. - mu sync.Mutex + mu syncs.Mutex // currentNodeAtomic is the current node context. It is always non-nil. // It must be re-created when [LocalBackend] switches to a different profile/node @@ -329,14 +329,14 @@ type LocalBackend struct { // // tkaSyncLock MUST be taken before mu (or inversely, mu must not be held // at the moment that tkaSyncLock is taken). - tkaSyncLock sync.Mutex + tkaSyncLock syncs.Mutex clock tstime.Clock // Last ClientVersion received in MapResponse, guarded by mu. lastClientVersion *tailcfg.ClientVersion // lastNotifiedDriveSharesMu guards lastNotifiedDriveShares - lastNotifiedDriveSharesMu sync.Mutex + lastNotifiedDriveSharesMu syncs.Mutex // lastNotifiedDriveShares keeps track of the last set of shares that we // notified about. diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index dbe23e4d5245a..6880440bdd600 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -16,6 +16,7 @@ import ( "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/dnstype" "tailscale.com/types/key" @@ -82,7 +83,7 @@ type nodeBackend struct { derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView] // TODO(nickkhyl): maybe use sync.RWMutex? - mu sync.Mutex // protects the following fields + mu syncs.Mutex // protects the following fields shutdownOnce sync.Once // guards calling [nodeBackend.shutdown] readyCh chan struct{} // closed by [nodeBackend.ready]; nil after shutdown diff --git a/logtail/buffer.go b/logtail/buffer.go index d14d8fbf6ae51..82c9b461010b2 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -9,7 +9,8 @@ import ( "bytes" "errors" "fmt" - "sync" + + "tailscale.com/syncs" ) type Buffer interface { @@ -36,7 +37,7 @@ type memBuffer struct { next []byte pending chan qentry - dropMu sync.Mutex + dropMu syncs.Mutex dropCount int } diff --git a/metrics/metrics.go b/metrics/metrics.go index d1b1c06c9dc2c..19966d395f815 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -11,7 +11,6 @@ import ( "io" "slices" "strings" - "sync" "tailscale.com/syncs" ) @@ -41,7 +40,7 @@ type LabelMap struct { Label string expvar.Map // shardedIntMu orders the initialization of new shardedint keys - shardedIntMu sync.Mutex + shardedIntMu syncs.Mutex } // SetInt64 sets the *Int value stored under the given map key. diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index a06362a5b4d1d..3ec820b794400 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -18,6 +18,7 @@ import ( "time" "tailscale.com/net/netmon" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/logger" ) @@ -32,7 +33,7 @@ type Detector struct { // currIfIndex is the index of the interface that is currently being used by the httpClient. currIfIndex int // mu guards currIfIndex. - mu sync.Mutex + mu syncs.Mutex // logf is the logger used for logging messages. If it is nil, log.Printf is used. logf logger.Logf } diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 444c5d37debf4..5ccadbab2d9ad 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -16,7 +16,6 @@ import ( "slices" "sort" "strings" - "sync" "syscall" "time" @@ -27,6 +26,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/envknob" "tailscale.com/health" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/util/syspolicy/pkey" @@ -51,7 +51,7 @@ type windowsManager struct { unregisterPolicyChangeCb func() // called when the manager is closing - mu sync.Mutex + mu syncs.Mutex closing bool } diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index 0f9b106bb2eb4..a41462e185e24 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -8,12 +8,12 @@ import ( "html" "net/http" "strconv" - "sync" "sync/atomic" "time" "tailscale.com/feature/buildfeatures" "tailscale.com/health" + "tailscale.com/syncs" ) func init() { @@ -39,7 +39,7 @@ func init() { var fwdLogAtomic atomic.Pointer[fwdLog] type fwdLog struct { - mu sync.Mutex + mu syncs.Mutex pos int // ent[pos] is next entry ent []fwdLogEntry } diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 86f0f5b8c48c4..5adc43efca860 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -37,6 +37,7 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tsdial" + "tailscale.com/syncs" "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/types/nettype" @@ -231,7 +232,7 @@ type forwarder struct { ctx context.Context // good until Close ctxCancel context.CancelFunc // closes ctx - mu sync.Mutex // guards following + mu syncs.Mutex // guards following dohClient map[string]*http.Client // urlBase -> client diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 93cbf3839c923..3185cbe2b35ff 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -214,7 +214,7 @@ type Resolver struct { closed chan struct{} // mu guards the following fields from being updated while used. - mu sync.Mutex + mu syncs.Mutex localDomains []dnsname.FQDN hostToIP map[dnsname.FQDN][]netip.Addr ipToHost map[netip.Addr]dnsname.FQDN diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index 94d4bbee7955f..e222b983f0287 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -20,6 +20,7 @@ import ( "tailscale.com/envknob" "tailscale.com/net/netx" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" "tailscale.com/util/singleflight" @@ -97,7 +98,7 @@ type Resolver struct { sf singleflight.Group[string, ipRes] - mu sync.Mutex + mu syncs.Mutex ipCache map[string]ipCacheEntry } @@ -474,7 +475,7 @@ type dialCall struct { d *dialer network, address, host, port string - mu sync.Mutex // lock ordering: dialer.mu, then dialCall.mu + mu syncs.Mutex // lock ordering: dialer.mu, then dialCall.mu fails map[netip.Addr]error // set of IPs that failed to dial thus far } diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index 1e43df2daaaae..db9e3872f6f26 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -12,9 +12,9 @@ import ( "fmt" "net" "net/netip" - "sync" "tailscale.com/net/netx" + "tailscale.com/syncs" ) var _ netx.Network = (*Network)(nil) @@ -26,7 +26,7 @@ var _ netx.Network = (*Network)(nil) // // Its zero value is a valid [netx.Network] implementation. type Network struct { - mu sync.Mutex + mu syncs.Mutex lns map[string]*Listener // address -> listener } diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 726221675fb03..95750b2d066f6 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -235,7 +235,7 @@ type Client struct { testEnoughRegions int testCaptivePortalDelay time.Duration - mu sync.Mutex // guards following + mu syncs.Mutex // guards following nextFull bool // do a full region scan, even if last != nil prev map[time.Time]*Report // some previous reports last *Report // most recent report @@ -597,7 +597,7 @@ type reportState struct { stopProbeCh chan struct{} waitPortMap sync.WaitGroup - mu sync.Mutex + mu syncs.Mutex report *Report // to be returned by GetReport inFlight map[stun.TxID]func(netip.AddrPort) // called without c.mu held gotEP4 netip.AddrPort diff --git a/net/netmon/interfaces_darwin.go b/net/netmon/interfaces_darwin.go index b175f980a2109..126040350bdb2 100644 --- a/net/netmon/interfaces_darwin.go +++ b/net/netmon/interfaces_darwin.go @@ -7,12 +7,12 @@ import ( "fmt" "net" "strings" - "sync" "syscall" "unsafe" "golang.org/x/net/route" "golang.org/x/sys/unix" + "tailscale.com/syncs" "tailscale.com/util/mak" ) @@ -26,7 +26,7 @@ func parseRoutingTable(rib []byte) ([]route.Message, error) { } var ifNames struct { - sync.Mutex + syncs.Mutex m map[int]string // ifindex => name } diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index f7d1b1107e379..657da04d5978c 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -15,6 +15,7 @@ import ( "time" "tailscale.com/feature/buildfeatures" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -65,7 +66,7 @@ type Monitor struct { // and not change at runtime. tsIfName string // tailscale interface name, if known/set ("tailscale0", "utun3", ...) - mu sync.Mutex // guards all following fields + mu syncs.Mutex // guards all following fields cbs set.HandleSet[ChangeFunc] ifState *State gwValid bool // whether gw and gwSelfIP are valid diff --git a/net/netutil/netutil.go b/net/netutil/netutil.go index bc64e8fdc9eb4..5c42f51c64837 100644 --- a/net/netutil/netutil.go +++ b/net/netutil/netutil.go @@ -8,7 +8,8 @@ import ( "bufio" "io" "net" - "sync" + + "tailscale.com/syncs" ) // NewOneConnListener returns a net.Listener that returns c on its @@ -29,7 +30,7 @@ func NewOneConnListener(c net.Conn, addr net.Addr) net.Listener { type oneConnListener struct { addr net.Addr - mu sync.Mutex + mu syncs.Mutex conn net.Conn } diff --git a/net/ping/ping.go b/net/ping/ping.go index 1ff3862dc65a1..8e16a692a8136 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -23,6 +23,7 @@ import ( "golang.org/x/net/icmp" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/mak" ) @@ -64,7 +65,7 @@ type Pinger struct { wg sync.WaitGroup // Following fields protected by mu - mu sync.Mutex + mu syncs.Mutex // conns is a map of "type" to net.PacketConn, type is either // "ip4:icmp" or "ip6:icmp" conns map[string]net.PacketConn diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 9368d1c4ee05b..16a981d1d8336 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -14,7 +14,6 @@ import ( "net/http" "net/netip" "slices" - "sync" "sync/atomic" "time" @@ -123,7 +122,7 @@ type Client struct { testPxPPort uint16 // if non-zero, pxpPort to use for tests testUPnPPort uint16 // if non-zero, uPnPPort to use for tests - mu sync.Mutex // guards following, and all fields thereof + mu syncs.Mutex // guards following, and all fields thereof // runningCreate is whether we're currently working on creating // a port mapping (whether GetCachedMappingOrStartCreatingOne kicked diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index fec9ec3b0dad2..aa875df9aeddd 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -10,12 +10,12 @@ import ( "fmt" "net" "strings" - "sync" "sync/atomic" "syscall" "time" "tailscale.com/net/netmon" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/version" @@ -40,7 +40,7 @@ var sockStats = struct { // mu protects fields in this group (but not the fields within // sockStatCounters). It should not be held in the per-read/write // callbacks. - mu sync.Mutex + mu syncs.Mutex countersByLabel map[Label]*sockStatCounters knownInterfaces map[int]string // interface index -> name usedInterfaces map[int]int // set of interface indexes diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index c7483a125a07a..065c01384ed55 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -27,6 +27,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" @@ -86,7 +87,7 @@ type Dialer struct { routes atomic.Pointer[bart.Table[bool]] // or nil if UserDial should not use routes. `true` indicates routes that point into the Tailscale interface - mu sync.Mutex + mu syncs.Mutex closed bool dns dnsMap tunName string // tun device name diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 3c83ffd8c320f..9e44da59ca1d7 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -12,11 +12,11 @@ import ( "math" "net" "os" - "sync" "sync/atomic" "time" "github.com/coder/websocket" + "tailscale.com/syncs" ) // NetConn converts a *websocket.Conn into a net.Conn. @@ -102,7 +102,7 @@ type netConn struct { reading atomic.Bool afterReadDeadline atomic.Bool - readMu sync.Mutex + readMu syncs.Mutex // eofed is true if the reader should return io.EOF from the Read call. // // +checklocks:readMu diff --git a/proxymap/proxymap.go b/proxymap/proxymap.go index dfe6f2d586000..20dc96c848307 100644 --- a/proxymap/proxymap.go +++ b/proxymap/proxymap.go @@ -9,9 +9,9 @@ import ( "fmt" "net/netip" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/mak" ) @@ -22,7 +22,7 @@ import ( // ask tailscaled (via the LocalAPI WhoIs method) the Tailscale identity that a // given localhost:port corresponds to. type Mapper struct { - mu sync.Mutex + mu syncs.Mutex // m holds the mapping from localhost IP:ports to Tailscale IPs. It is // keyed first by the protocol ("tcp" or "udp"), then by the IP:port. diff --git a/syncs/locked.go b/syncs/locked.go index d2048665dee3d..d2e9edef7a9dd 100644 --- a/syncs/locked.go +++ b/syncs/locked.go @@ -8,7 +8,7 @@ import ( ) // AssertLocked panics if m is not locked. -func AssertLocked(m *sync.Mutex) { +func AssertLocked(m *Mutex) { if m.TryLock() { m.Unlock() panic("mutex is not locked") @@ -16,7 +16,7 @@ func AssertLocked(m *sync.Mutex) { } // AssertRLocked panics if rw is not locked for reading or writing. -func AssertRLocked(rw *sync.RWMutex) { +func AssertRLocked(rw *RWMutex) { if rw.TryLock() { rw.Unlock() panic("mutex is not locked") diff --git a/syncs/mutex.go b/syncs/mutex.go new file mode 100644 index 0000000000000..e61d1d1ab0687 --- /dev/null +++ b/syncs/mutex.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_mutex_debug + +package syncs + +import "sync" + +// Mutex is an alias for sync.Mutex. +// +// It's only not a sync.Mutex when built with the ts_mutex_debug build tag. +type Mutex = sync.Mutex + +// RWMutex is an alias for sync.RWMutex. +// +// It's only not a sync.RWMutex when built with the ts_mutex_debug build tag. +type RWMutex = sync.RWMutex diff --git a/syncs/mutex_debug.go b/syncs/mutex_debug.go new file mode 100644 index 0000000000000..14b52ffe3cc51 --- /dev/null +++ b/syncs/mutex_debug.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_mutex_debug + +package syncs + +import "sync" + +type Mutex struct { + sync.Mutex +} + +type RWMutex struct { + sync.RWMutex +} + +// TODO(bradfitz): actually track stuff when in debug mode. diff --git a/syncs/shardedint_test.go b/syncs/shardedint_test.go index d355a15400a90..815a739d13842 100644 --- a/syncs/shardedint_test.go +++ b/syncs/shardedint_test.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package syncs +package syncs_test import ( "expvar" "sync" "testing" + . "tailscale.com/syncs" "tailscale.com/tstest" ) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index b1639136a5133..46fa5b1988334 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -8,8 +8,8 @@ import ( "log" "reflect" "slices" - "sync" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -34,12 +34,12 @@ type Bus struct { routeDebug hook[RoutedEvent] logf logger.Logf - topicsMu sync.Mutex + topicsMu syncs.Mutex topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - clientsMu sync.Mutex + clientsMu syncs.Mutex clients set.Set[*Client] } @@ -306,7 +306,7 @@ func (w *worker) StopAndWait() { type stopFlag struct { // guards the lazy construction of stopped, and the value of // alreadyStopped. - mu sync.Mutex + mu syncs.Mutex stopped chan struct{} alreadyStopped bool } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index c119c67a939c2..a7a5ab673bdfd 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -5,8 +5,8 @@ package eventbus import ( "reflect" - "sync" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/set" ) @@ -22,7 +22,7 @@ type Client struct { bus *Bus publishDebug hook[PublishedEvent] - mu sync.Mutex + mu syncs.Mutex pub set.Set[publisher] sub *subscribeState // Lazily created on first subscribe stop stopFlag // signaled on Close diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 2f2c9589ad0e2..0453defb1a77e 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -11,10 +11,10 @@ import ( "runtime" "slices" "strings" - "sync" "sync/atomic" "time" + "tailscale.com/syncs" "tailscale.com/types/logger" ) @@ -147,7 +147,7 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { // A hook collects hook functions that can be run as a group. type hook[T any] struct { - sync.Mutex + syncs.Mutex fns []hookFn[T] } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 53253d33045c5..b0348e125c393 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,9 +8,9 @@ import ( "fmt" "reflect" "runtime" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/cibuild" ) @@ -51,7 +51,7 @@ type subscribeState struct { snapshot chan chan []DeliveredEvent debug hook[DeliveredEvent] - outputsMu sync.Mutex + outputsMu syncs.Mutex outputs map[reflect.Type]subscriber } diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index dce70c542f7df..2ea0c1f2f231f 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -7,11 +7,12 @@ package execqueue import ( "context" "errors" - "sync" + + "tailscale.com/syncs" ) type ExecQueue struct { - mu sync.Mutex + mu syncs.Mutex ctx context.Context // context.Background + closed on Shutdown cancel context.CancelFunc // closes ctx closed bool diff --git a/util/expvarx/expvarx.go b/util/expvarx/expvarx.go index 762f65d069aa6..bcdc4a91a7982 100644 --- a/util/expvarx/expvarx.go +++ b/util/expvarx/expvarx.go @@ -7,9 +7,9 @@ package expvarx import ( "encoding/json" "expvar" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -20,7 +20,7 @@ type SafeFunc struct { limit time.Duration onSlow func(time.Duration, any) - mu sync.Mutex + mu syncs.Mutex inflight *lazy.SyncValue[any] } diff --git a/util/goroutines/tracker.go b/util/goroutines/tracker.go index 044843d33d155..c2a0cb8c3a3ed 100644 --- a/util/goroutines/tracker.go +++ b/util/goroutines/tracker.go @@ -4,9 +4,9 @@ package goroutines import ( - "sync" "sync/atomic" + "tailscale.com/syncs" "tailscale.com/util/set" ) @@ -15,7 +15,7 @@ type Tracker struct { started atomic.Int64 // counter running atomic.Int64 // gauge - mu sync.Mutex + mu syncs.Mutex onDone set.HandleSet[func()] } diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index 5af5f7bd11950..30e0b74ed60ff 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -8,9 +8,9 @@ import ( "fmt" "html" "io" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/lru" ) @@ -75,7 +75,7 @@ type Limiter[K comparable] struct { // perpetually in debt and cannot proceed at all. Overdraft int64 - mu sync.Mutex + mu syncs.Mutex cache *lru.Cache[K, *bucket] } diff --git a/util/ringlog/ringlog.go b/util/ringlog/ringlog.go index 85e0c48611821..62dfbae5bd5c3 100644 --- a/util/ringlog/ringlog.go +++ b/util/ringlog/ringlog.go @@ -4,7 +4,7 @@ // Package ringlog contains a limited-size concurrency-safe generic ring log. package ringlog -import "sync" +import "tailscale.com/syncs" // New creates a new [RingLog] containing at most max items. func New[T any](max int) *RingLog[T] { @@ -15,7 +15,7 @@ func New[T any](max int) *RingLog[T] { // RingLog is a concurrency-safe fixed size log window containing entries of [T]. type RingLog[T any] struct { - mu sync.Mutex + mu syncs.Mutex pos int buf []T max int diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index fdf51c253cbd7..71135bb2ac788 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "tailscale.com/syncs" "tailscale.com/util/set" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/pkey" @@ -70,7 +71,7 @@ func (c PolicyChange) HasChangedAnyOf(keys ...pkey.Key) bool { // policyChangeCallbacks are the callbacks to invoke when the effective policy changes. // It is safe for concurrent use. type policyChangeCallbacks struct { - mu sync.Mutex + mu syncs.Mutex cbs set.HandleSet[PolicyChangeCallback] } diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go index 297d26f9f6fe5..bdda909763008 100644 --- a/util/syspolicy/rsop/resultant_policy.go +++ b/util/syspolicy/rsop/resultant_policy.go @@ -7,10 +7,10 @@ import ( "errors" "fmt" "slices" - "sync" "sync/atomic" "time" + "tailscale.com/syncs" "tailscale.com/util/syspolicy/internal/loggerx" "tailscale.com/util/syspolicy/setting" "tailscale.com/util/testenv" @@ -58,7 +58,7 @@ type Policy struct { changeCallbacks policyChangeCallbacks - mu sync.Mutex + mu syncs.Mutex watcherStarted bool // whether [Policy.watchReload] was started sources source.ReadableSources closing bool // whether [Policy.Close] was called (even if we're still closing) diff --git a/util/syspolicy/rsop/rsop.go b/util/syspolicy/rsop/rsop.go index 429b9b10121b3..333dca64343c1 100644 --- a/util/syspolicy/rsop/rsop.go +++ b/util/syspolicy/rsop/rsop.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "slices" - "sync" "tailscale.com/syncs" "tailscale.com/util/slicesx" @@ -20,7 +19,7 @@ import ( ) var ( - policyMu sync.Mutex // protects [policySources] and [effectivePolicies] + policyMu syncs.Mutex // protects [policySources] and [effectivePolicies] policySources []*source.Source // all registered policy sources effectivePolicies []*Policy // all active (non-closed) effective policies returned by [PolicyFor] diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 091cf58d31b71..0ca36176e675c 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -11,9 +11,9 @@ import ( "fmt" "slices" "strings" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/types/lazy" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" @@ -215,7 +215,7 @@ type DefinitionMap map[pkey.Key]*Definition var ( definitions lazy.SyncValue[DefinitionMap] - definitionsMu sync.Mutex + definitionsMu syncs.Mutex definitionsList []*Definition definitionsUsed bool ) diff --git a/wgengine/magicsock/blockforever_conn.go b/wgengine/magicsock/blockforever_conn.go index f2e85dcd57002..272a12513b353 100644 --- a/wgengine/magicsock/blockforever_conn.go +++ b/wgengine/magicsock/blockforever_conn.go @@ -10,11 +10,13 @@ import ( "sync" "syscall" "time" + + "tailscale.com/syncs" ) // blockForeverConn is a net.PacketConn whose reads block until it is closed. type blockForeverConn struct { - mu sync.Mutex + mu syncs.Mutex cond *sync.Cond closed bool } diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 2010775a10d6e..c2e5dcca37417 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -17,7 +17,6 @@ import ( "reflect" "runtime" "slices" - "sync" "sync/atomic" "time" @@ -28,6 +27,7 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/stun" "tailscale.com/net/tstun" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -73,7 +73,7 @@ type endpoint struct { disco atomic.Pointer[endpointDisco] // if the peer supports disco, the key and short string // mu protects all following fields. - mu sync.Mutex // Lock ordering: Conn.mu, then endpoint.mu + mu syncs.Mutex // Lock ordering: Conn.mu, then endpoint.mu heartBeatTimer *time.Timer // nil when idle lastSendExt mono.Time // last time there were outgoing packets sent to this peer from an external trigger (e.g. wireguard-go or disco pingCLI) diff --git a/wgengine/magicsock/endpoint_tracker.go b/wgengine/magicsock/endpoint_tracker.go index 5caddd1a06960..e95852d2491b7 100644 --- a/wgengine/magicsock/endpoint_tracker.go +++ b/wgengine/magicsock/endpoint_tracker.go @@ -6,9 +6,9 @@ package magicsock import ( "net/netip" "slices" - "sync" "time" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tempfork/heap" "tailscale.com/util/mak" @@ -107,7 +107,7 @@ func (eh endpointHeap) Min() *endpointTrackerEntry { // // See tailscale/tailscale#7877 for more information. type endpointTracker struct { - mu sync.Mutex + mu syncs.Mutex endpoints map[netip.Addr]*endpointHeap } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d44cf1c1173f9..f610d6adbf01e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -286,7 +286,7 @@ type Conn struct { // mu guards all following fields; see userspaceEngine lock // ordering rules against the engine. For derphttp, mu must // be held before derphttp.Client.mu. - mu sync.Mutex + mu syncs.Mutex muCond *sync.Cond onlyTCP443 atomic.Bool diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index 2798abbf20ed8..c98e645705b46 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -8,7 +8,6 @@ import ( "fmt" "net" "net/netip" - "sync" "sync/atomic" "syscall" @@ -16,6 +15,7 @@ import ( "tailscale.com/net/batching" "tailscale.com/net/netaddr" "tailscale.com/net/packet" + "tailscale.com/syncs" "tailscale.com/types/nettype" ) @@ -31,7 +31,7 @@ type RebindingUDPConn struct { // Neither is expected to be nil, sockets are bound on creation. pconnAtomic atomic.Pointer[nettype.PacketConn] - mu sync.Mutex // held while changing pconn (and pconnAtomic) + mu syncs.Mutex // held while changing pconn (and pconnAtomic) pconn nettype.PacketConn port uint16 } diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index a9dca70ae2228..2f93f1085168a 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -15,6 +15,7 @@ import ( "tailscale.com/net/packet" "tailscale.com/net/stun" udprelay "tailscale.com/net/udprelay/endpoint" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" @@ -58,7 +59,7 @@ type relayManager struct { getServersCh chan chan set.Set[candidatePeerRelay] derpHomeChangeCh chan derpHomeChangeEvent - discoInfoMu sync.Mutex // guards the following field + discoInfoMu syncs.Mutex // guards the following field discoInfoByServerDisco map[key.DiscoPublic]*relayHandshakeDiscoInfo // runLoopStoppedCh is written to by runLoop() upon return, enabling event diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 9809d1ce65326..12fe9c797641a 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -15,7 +15,6 @@ import ( "log" "net/http" "net/netip" - "sync" "time" "tailscale.com/health" @@ -24,6 +23,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -57,7 +57,7 @@ func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // unless the Tailnet administrator opts-into explicit logging. // The zero value is ready for use. type Logger struct { - mu sync.Mutex // protects all fields below + mu syncs.Mutex // protects all fields below logf logger.Logf // shutdownLocked shuts down the logger. From 139c395d7df2479657867e24f3a75a1608b6fa6f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 22 Oct 2025 15:08:36 +0100 Subject: [PATCH 0695/1093] cmd/tailscale/cli: stabilise the output of `tailscale lock log --json` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch changes the behaviour of `tailscale lock log --json` to make it more useful for users. It also introduces versioning of our JSON output. ## Changes to `tailscale lock log --json` Previously this command would print the hash and base64-encoded bytes of each AUM, and users would need their own CBOR decoder to interpret it in a useful way: ```json [ { "Hash": [ 80, 136, 151, … ], "Change": "checkpoint", "Raw": "pAEFAvYFpQH2AopYIAkPN+8V3cJpkoC5ZY2+RI2Bcg2q5G7tRAQQd67W3YpnWCDPOo4KGeQBd8hdGsjoEQpSXyiPdlm+NXAlJ5dS1qEbFlggylNJDQM5ZQ2ULNsXxg2ZBFkPl/D93I1M56/rowU+UIlYIPZ/SxT9EA2Idy9kaCbsFzjX/s3Ms7584wWGbWd/f/QAWCBHYZzYiAPpQ+NXN+1Wn2fopQYk4yl7kNQcMXUKNAdt1lggcfjcuVACOH0J9pRNvYZQFOkbiBmLOW1hPKJsbC1D1GdYIKrJ38XMgpVMuTuBxM4YwoLmrK/RgXQw1uVEL3cywl3QWCA0FilVVv8uys8BNhS62cfNvCew1Pw5wIgSe3Prv8d8pFggQrwIt6ldYtyFPQcC5V18qrCnt7VpThACaz5RYzpx7RNYIKskOA7UoNiVtMkOrV2QoXv6EvDpbO26a01lVeh8UCeEA4KjAQECAQNYIORIdNHqSOzz1trIygnP5w3JWK2DtlY5NDIBbD7SKcjWowEBAgEDWCD27LpxiZNiA19k0QZhOWmJRvBdK2mz+dHu7rf0iGTPFwQb69Gt42fKNn0FGwRUiav/k6dDF4GiAVgg5Eh00epI7PPW2sjKCc/nDclYrYO2Vjk0MgFsPtIpyNYCWEDzIAooc+m45ay5PB/OB4AA9Fdki4KJq9Ll+PF6IJHYlOVhpTbc3E0KF7ODu1WURd0f7PXnW72dr89CSfGxIHAF" } ] ``` Now we print the AUM in an expanded form that can be easily read by scripts, although we include the raw bytes for verification and auditing. ```json { "SchemaVersion": "1", "Messages": [ { "Hash": "KCEJPRKNSXJG2TPH3EHQRLJNLIIK2DV53FUNPADWA7BZJWBDRXZQ", "AUM": { "MessageKind": "checkpoint", "PrevAUMHash": null, "Key": null, "KeyID": null, "State": { … }, "Votes": null, "Meta": null, "Signatures": [ { "KeyID": "tlpub:e44874d1ea48ecf3d6dac8ca09cfe70dc958ad83b656393432016c3ed229c8d6", "Signature": "8yAKKHPpuOWsuTwfzgeAAPRXZIuCiavS5fjxeiCR2JTlYaU23NxNChezg7tVlEXdH+z151u9na/PQknxsSBwBQ==" } ] }, "Raw": "pAEFAvYFpQH2AopYIAkPN-8V3cJpkoC5ZY2-RI2Bcg2q5G7tRAQQd67W3YpnWCDPOo4KGeQBd8hdGsjoEQpSXyiPdlm-NXAlJ5dS1qEbFlggylNJDQM5ZQ2ULNsXxg2ZBFkPl_D93I1M56_rowU-UIlYIPZ_SxT9EA2Idy9kaCbsFzjX_s3Ms7584wWGbWd_f_QAWCBHYZzYiAPpQ-NXN-1Wn2fopQYk4yl7kNQcMXUKNAdt1lggcfjcuVACOH0J9pRNvYZQFOkbiBmLOW1hPKJsbC1D1GdYIKrJ38XMgpVMuTuBxM4YwoLmrK_RgXQw1uVEL3cywl3QWCA0FilVVv8uys8BNhS62cfNvCew1Pw5wIgSe3Prv8d8pFggQrwIt6ldYtyFPQcC5V18qrCnt7VpThACaz5RYzpx7RNYIKskOA7UoNiVtMkOrV2QoXv6EvDpbO26a01lVeh8UCeEA4KjAQECAQNYIORIdNHqSOzz1trIygnP5w3JWK2DtlY5NDIBbD7SKcjWowEBAgEDWCD27LpxiZNiA19k0QZhOWmJRvBdK2mz-dHu7rf0iGTPFwQb69Gt42fKNn0FGwRUiav_k6dDF4GiAVgg5Eh00epI7PPW2sjKCc_nDclYrYO2Vjk0MgFsPtIpyNYCWEDzIAooc-m45ay5PB_OB4AA9Fdki4KJq9Ll-PF6IJHYlOVhpTbc3E0KF7ODu1WURd0f7PXnW72dr89CSfGxIHAF" } ] } ``` This output was previously marked as unstable, and it wasn't very useful, so changing it should be fine. ## Versioning our JSON output This patch introduces a way to version our JSON output on the CLI, so we can make backwards-incompatible changes in future without breaking existing scripts or integrations. You can run this command in two ways: ``` tailscale lock log --json tailscale lock log --json=1 ``` Passing an explicit version number allows you to pick a specific JSON schema. If we ever want to change the schema, we increment the version number and users must opt-in to the new output. A bare `--json` flag will always return schema version 1, for compatibility with existing scripts. Updates https://github.com/tailscale/tailscale/issues/17613 Updates https://github.com/tailscale/corp/issues/23258 Signed-off-by: Alex Chan Change-Id: I897f78521cc1a81651f5476228c0882d7b723606 --- cmd/tailscale/cli/jsonoutput/jsonoutput.go | 84 ++++++++ .../cli/jsonoutput/network-lock-v1.go | 203 +++++++++++++++++ cmd/tailscale/cli/network-lock.go | 29 ++- cmd/tailscale/cli/network-lock_test.go | 204 ++++++++++++++++++ cmd/tailscale/depaware.txt | 1 + tka/aum.go | 15 +- 6 files changed, 524 insertions(+), 12 deletions(-) create mode 100644 cmd/tailscale/cli/jsonoutput/jsonoutput.go create mode 100644 cmd/tailscale/cli/jsonoutput/network-lock-v1.go create mode 100644 cmd/tailscale/cli/network-lock_test.go diff --git a/cmd/tailscale/cli/jsonoutput/jsonoutput.go b/cmd/tailscale/cli/jsonoutput/jsonoutput.go new file mode 100644 index 0000000000000..aa49acc28baae --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/jsonoutput.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsonoutput provides stable and versioned JSON serialisation for CLI output. +// This allows us to provide stable output to scripts/clients, but also make +// breaking changes to the output when it's useful. +// +// Historically we only used `--json` as a boolean flag, so changing the output +// could break scripts that rely on the existing format. +// +// This package allows callers to pass a version number to `--json` and get +// a consistent output. We'll bump the version when we make a breaking change +// that's likely to break scripts that rely on the existing output, e.g. if +// we remove a field or change the type/format. +// +// Passing just the boolean flag `--json` will always return v1, to preserve +// compatibility with scripts written before we versioned our output. +package jsonoutput + +import ( + "errors" + "fmt" + "strconv" +) + +// JSONSchemaVersion implements flag.Value, and tracks whether the CLI has +// been called with `--json`, and if so, with what value. +type JSONSchemaVersion struct { + // IsSet tracks if the flag was provided at all. + IsSet bool + + // Value tracks the desired schema version, which defaults to 1 if + // the user passes `--json` without an argument. + Value int +} + +// String returns the default value which is printed in the CLI help text. +func (v *JSONSchemaVersion) String() string { + if v.IsSet { + return strconv.Itoa(v.Value) + } else { + return "(not set)" + } +} + +// Set is called when the user passes the flag as a command-line argument. +func (v *JSONSchemaVersion) Set(s string) error { + if v.IsSet { + return errors.New("received multiple instances of --json; only pass it once") + } + + v.IsSet = true + + // If the user doesn't supply a schema version, default to 1. + // This ensures that any existing scripts will continue to get their + // current output. + if s == "true" { + v.Value = 1 + return nil + } + + version, err := strconv.Atoi(s) + if err != nil { + return fmt.Errorf("invalid integer value passed to --json: %q", s) + } + v.Value = version + return nil +} + +// IsBoolFlag tells the flag package that JSONSchemaVersion can be set +// without an argument. +func (v *JSONSchemaVersion) IsBoolFlag() bool { + return true +} + +// ResponseEnvelope is a set of fields common to all versioned JSON output. +type ResponseEnvelope struct { + // SchemaVersion is the version of the JSON output, e.g. "1", "2", "3" + SchemaVersion string + + // ResponseWarning tells a user if a newer version of the JSON output + // is available. + ResponseWarning string `json:"_WARNING,omitzero"` +} diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-v1.go b/cmd/tailscale/cli/jsonoutput/network-lock-v1.go new file mode 100644 index 0000000000000..8a2d2de336b3d --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/network-lock-v1.go @@ -0,0 +1,203 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package jsonoutput + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" +) + +// PrintNetworkLockJSONV1 prints the stored TKA state as a JSON object to the CLI, +// in a stable "v1" format. +// +// This format includes: +// +// - the AUM hash as a base32-encoded string +// - the raw AUM as base64-encoded bytes +// - the expanded AUM, which prints named fields for consumption by other tools +func PrintNetworkLockJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error { + messages := make([]logMessageV1, len(updates)) + + for i, update := range updates { + var aum tka.AUM + if err := aum.Unserialize(update.Raw); err != nil { + return fmt.Errorf("decoding: %w", err) + } + + h := aum.Hash() + + if !bytes.Equal(h[:], update.Hash[:]) { + return fmt.Errorf("incorrect AUM hash: got %v, want %v", h, update) + } + + messages[i] = toLogMessageV1(aum, update) + } + + result := struct { + ResponseEnvelope + Messages []logMessageV1 + }{ + ResponseEnvelope: ResponseEnvelope{ + SchemaVersion: "1", + }, + Messages: messages, + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + return enc.Encode(result) +} + +// toLogMessageV1 converts a [tka.AUM] and [ipnstate.NetworkLockUpdate] to the +// JSON output returned by the CLI. +func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 { + expandedAUM := expandedAUMV1{} + expandedAUM.MessageKind = aum.MessageKind.String() + if len(aum.PrevAUMHash) > 0 { + expandedAUM.PrevAUMHash = aum.PrevAUMHash.String() + } + if key := aum.Key; key != nil { + expandedAUM.Key = toExpandedKeyV1(key) + } + if keyID := aum.KeyID; keyID != nil { + expandedAUM.KeyID = fmt.Sprintf("tlpub:%x", keyID) + } + if state := aum.State; state != nil { + expandedState := expandedStateV1{} + if h := state.LastAUMHash; h != nil { + expandedState.LastAUMHash = h.String() + } + for _, secret := range state.DisablementSecrets { + expandedState.DisablementSecrets = append(expandedState.DisablementSecrets, fmt.Sprintf("%x", secret)) + } + for _, key := range state.Keys { + expandedState.Keys = append(expandedState.Keys, toExpandedKeyV1(&key)) + } + expandedState.StateID1 = state.StateID1 + expandedState.StateID2 = state.StateID2 + expandedAUM.State = expandedState + } + if votes := aum.Votes; votes != nil { + expandedAUM.Votes = *votes + } + expandedAUM.Meta = aum.Meta + for _, signature := range aum.Signatures { + expandedAUM.Signatures = append(expandedAUM.Signatures, expandedSignatureV1{ + KeyID: fmt.Sprintf("tlpub:%x", signature.KeyID), + Signature: base64.URLEncoding.EncodeToString(signature.Signature), + }) + } + + return logMessageV1{ + Hash: aum.Hash().String(), + AUM: expandedAUM, + Raw: base64.URLEncoding.EncodeToString(update.Raw), + } +} + +// toExpandedKeyV1 converts a [tka.Key] to the JSON output returned +// by the CLI. +func toExpandedKeyV1(key *tka.Key) expandedKeyV1 { + return expandedKeyV1{ + Kind: key.Kind.String(), + Votes: key.Votes, + Public: fmt.Sprintf("tlpub:%x", key.Public), + Meta: key.Meta, + } +} + +// logMessageV1 is the JSON representation of an AUM as both raw bytes and +// in its expanded form, and the CLI output is a list of these entries. +type logMessageV1 struct { + // The BLAKE2s digest of the CBOR-encoded AUM. This is printed as a + // base32-encoded string, e.g. KCE…XZQ + Hash string + + // The expanded form of the AUM, which presents the fields in a more + // accessible format than doing a CBOR decoding. + AUM expandedAUMV1 + + // The raw bytes of the CBOR-encoded AUM, encoded as base64. + // This is useful for verifying the AUM hash. + Raw string +} + +// expandedAUMV1 is the expanded version of a [tka.AUM], designed so external tools +// can read the AUM without knowing our CBOR definitions. +type expandedAUMV1 struct { + MessageKind string + PrevAUMHash string `json:"PrevAUMHash,omitzero"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key expandedKeyV1 `json:"Key,omitzero"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID string `json:"KeyID,omitzero"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State expandedStateV1 `json:"State,omitzero"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes uint `json:"Votes,omitzero"` + Meta map[string]string `json:"Meta,omitzero"` + + // Signatures lists the signatures over this AUM. + Signatures []expandedSignatureV1 `json:"Signatures,omitzero"` +} + +// expandedAUMV1 is the expanded version of a [tka.Key], which describes +// the public components of a key known to network-lock. +type expandedKeyV1 struct { + Kind string + + // Votes describes the weight applied to signatures using this key. + Votes uint + + // Public encodes the public key of the key as a hex string. + Public string + + // Meta describes arbitrary metadata about the key. This could be + // used to store the name of the key, for instance. + Meta map[string]string `json:"Meta,omitzero"` +} + +// expandedStateV1 is the expanded version of a [tka.State], which describes +// Tailnet Key Authority state at an instant in time. +type expandedStateV1 struct { + // LastAUMHash is the blake2s digest of the last-applied AUM. + LastAUMHash string `json:"LastAUMHash,omitzero"` + + // DisablementSecrets are KDF-derived values which can be used + // to turn off the TKA in the event of a consensus-breaking bug. + DisablementSecrets []string + + // Keys are the public keys of either: + // + // 1. The signing nodes currently trusted by the TKA. + // 2. Ephemeral keys that were used to generate pre-signed auth keys. + Keys []expandedKeyV1 + + // StateID's are nonce's, generated on enablement and fixed for + // the lifetime of the Tailnet Key Authority. + StateID1 uint64 + StateID2 uint64 +} + +// expandedSignatureV1 is the expanded form of a [tka.Signature], which +// describes a signature over an AUM. This signature can be verified +// using the key referenced by KeyID. +type expandedSignatureV1 struct { + KeyID string + Signature string +} diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index a15d9ab88b596..73b1d62016a75 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -10,10 +10,11 @@ import ( "context" "crypto/rand" "encoding/hex" - "encoding/json" + jsonv1 "encoding/json" "errors" "flag" "fmt" + "io" "os" "strconv" "strings" @@ -21,6 +22,7 @@ import ( "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/cmd/tailscale/cli/jsonoutput" "tailscale.com/ipn/ipnstate" "tailscale.com/tka" "tailscale.com/tsconst" @@ -219,7 +221,7 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { } if nlStatusArgs.json { - enc := json.NewEncoder(os.Stdout) + enc := jsonv1.NewEncoder(os.Stdout) enc.SetIndent("", " ") return enc.Encode(st) } @@ -600,7 +602,7 @@ func runNetworkLockDisablementKDF(ctx context.Context, args []string) error { var nlLogArgs struct { limit int - json bool + json jsonoutput.JSONSchemaVersion } var nlLogCmd = &ffcli.Command{ @@ -612,7 +614,7 @@ var nlLogCmd = &ffcli.Command{ FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock log") fs.IntVar(&nlLogArgs.limit, "limit", 50, "max number of updates to list") - fs.BoolVar(&nlLogArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") + fs.Var(&nlLogArgs.json, "json", "output in JSON format") return fs })(), } @@ -678,7 +680,7 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er default: // Print a JSON encoding of the AUM as a fallback. - e := json.NewEncoder(&stanza) + e := jsonv1.NewEncoder(&stanza) e.SetIndent("", "\t") if err := e.Encode(aum); err != nil { return "", err @@ -702,14 +704,21 @@ func runNetworkLockLog(ctx context.Context, args []string) error { if err != nil { return fixTailscaledConnectError(err) } - if nlLogArgs.json { - enc := json.NewEncoder(Stdout) - enc.SetIndent("", " ") - return enc.Encode(updates) - } out, useColor := colorableOutput() + return printNetworkLockLog(updates, out, nlLogArgs.json, useColor) +} + +func printNetworkLockLog(updates []ipnstate.NetworkLockUpdate, out io.Writer, jsonSchema jsonoutput.JSONSchemaVersion, useColor bool) error { + if jsonSchema.IsSet { + if jsonSchema.Value == 1 { + return jsonoutput.PrintNetworkLockJSONV1(out, updates) + } else { + return fmt.Errorf("unrecognised version: %q", jsonSchema.Value) + } + } + for _, update := range updates { stanza, err := nlDescribeUpdate(update, useColor) if err != nil { diff --git a/cmd/tailscale/cli/network-lock_test.go b/cmd/tailscale/cli/network-lock_test.go new file mode 100644 index 0000000000000..ccd2957ab560e --- /dev/null +++ b/cmd/tailscale/cli/network-lock_test.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "bytes" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/cmd/tailscale/cli/jsonoutput" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/tkatype" +) + +func TestNetworkLockLogOutput(t *testing.T) { + votes := uint(1) + aum1 := tka.AUM{ + MessageKind: tka.AUMAddKey, + Key: &tka.Key{ + Kind: tka.Key25519, + Votes: 1, + Public: []byte{2, 2}, + }, + } + h1 := aum1.Hash() + aum2 := tka.AUM{ + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 3}, + PrevAUMHash: h1[:], + Signatures: []tkatype.Signature{ + { + KeyID: []byte{3, 4}, + Signature: []byte{4, 5}, + }, + }, + Meta: map[string]string{"en": "three", "de": "drei", "es": "tres"}, + } + h2 := aum2.Hash() + aum3 := tka.AUM{ + MessageKind: tka.AUMCheckpoint, + PrevAUMHash: h2[:], + State: &tka.State{ + Keys: []tka.Key{ + { + Kind: tka.Key25519, + Votes: 1, + Public: []byte{1, 1}, + Meta: map[string]string{"en": "one", "de": "eins", "es": "uno"}, + }, + }, + DisablementSecrets: [][]byte{ + {1, 2, 3}, + {4, 5, 6}, + {7, 8, 9}, + }, + }, + Votes: &votes, + } + + updates := []ipnstate.NetworkLockUpdate{ + { + Hash: aum3.Hash(), + Change: aum3.MessageKind.String(), + Raw: aum3.Serialize(), + }, + { + Hash: aum2.Hash(), + Change: aum2.MessageKind.String(), + Raw: aum2.Serialize(), + }, + { + Hash: aum1.Hash(), + Change: aum1.MessageKind.String(), + Raw: aum1.Serialize(), + }, + } + + t.Run("human-readable", func(t *testing.T) { + t.Parallel() + + var outBuf bytes.Buffer + json := jsonoutput.JSONSchemaVersion{} + useColor := false + + printNetworkLockLog(updates, &outBuf, json, useColor) + + t.Logf("%s", outBuf.String()) + + want := `update 4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA (checkpoint) +Disablement values: + - 010203 + - 040506 + - 070809 +Keys: + Type: 25519 + KeyID: tlpub:0101 + Metadata: map[de:eins en:one es:uno] + +update BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ (remove-key) +KeyID: tlpub:0303 + +update UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA (add-key) +Type: 25519 +KeyID: tlpub:0202 + +` + + if diff := cmp.Diff(outBuf.String(), want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) + + jsonV1 := `{ + "SchemaVersion": "1", + "Messages": [ + { + "Hash": "4M4Q3IXBARPQMFVXHJBDCYQMWU5H5FBKD7MFF75HE4O5JMIWR2UA", + "AUM": { + "MessageKind": "checkpoint", + "PrevAUMHash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ", + "State": { + "DisablementSecrets": [ + "010203", + "040506", + "070809" + ], + "Keys": [ + { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0101", + "Meta": { + "de": "eins", + "en": "one", + "es": "uno" + } + } + ], + "StateID1": 0, + "StateID2": 0 + }, + "Votes": 1 + }, + "Raw": "pAEFAlggCqtbndUNv4_i-JrrVbGywbw5dNWNZYysEm02CCgf3q8FowH2AoNDAQIDQwQFBkMHCAkDgaQBAQIBA0IBAQyjYmRlZGVpbnNiZW5jb25lYmVzY3VubwYB" + }, + { + "Hash": "BKVVXHOVBW7Y7YXYTLVVLMNSYG6DS5GVRVSYZLASNU3AQKA732XQ", + "AUM": { + "MessageKind": "remove-key", + "PrevAUMHash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA", + "KeyID": "tlpub:0303", + "Meta": { + "de": "drei", + "en": "three", + "es": "tres" + }, + "Signatures": [ + { + "KeyID": "tlpub:0304", + "Signature": "BAU=" + } + ] + }, + "Raw": "pQECAlggopKFFOhcPaARv2QQU90-kWozQFAG3Hqja7Vez-_EZIAEQgMDB6NiZGVkZHJlaWJlbmV0aHJlZWJlc2R0cmVzF4GiAUIDBAJCBAU=" + }, + { + "Hash": "UKJIKFHILQ62AEN7MQIFHXJ6SFVDGQCQA3OHVI3LWVPM736EMSAA", + "AUM": { + "MessageKind": "add-key", + "Key": { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0202" + } + }, + "Raw": "owEBAvYDowEBAgEDQgIC" + } + ] +} +` + + t.Run("json-1", func(t *testing.T) { + t.Parallel() + t.Logf("BOOM") + + var outBuf bytes.Buffer + json := jsonoutput.JSONSchemaVersion{ + IsSet: true, + Value: 1, + } + useColor := false + + printNetworkLockLog(updates, &outBuf, json, useColor) + + want := jsonV1 + t.Logf("%s", outBuf.String()) + + if diff := cmp.Diff(outBuf.String(), want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 53dc998bda611..8b576ffc3a4dd 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete + tailscale.com/cmd/tailscale/cli/jsonoutput from tailscale.com/cmd/tailscale/cli tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp diff --git a/tka/aum.go b/tka/aum.go index 6d75830bd293f..bd17b2098e0c8 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -55,6 +55,17 @@ func (h AUMHash) IsZero() bool { return h == (AUMHash{}) } +// PrevAUMHash represents the BLAKE2s digest of an Authority Update Message (AUM). +// Unlike an AUMHash, this can be empty if there is no previous AUM hash +// (which occurs in the genesis AUM). +type PrevAUMHash []byte + +// String returns the PrevAUMHash encoded as base32. +// This is suitable for use as a filename, and for storing in text-preferred media. +func (h PrevAUMHash) String() string { + return base32StdNoPad.EncodeToString(h[:]) +} + // AUMKind describes valid AUM types. type AUMKind uint8 @@ -119,8 +130,8 @@ func (k AUMKind) String() string { // behavior of old clients (which will ignore the field). // - No floats! type AUM struct { - MessageKind AUMKind `cbor:"1,keyasint"` - PrevAUMHash []byte `cbor:"2,keyasint"` + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash PrevAUMHash `cbor:"2,keyasint"` // Key encodes a public key to be added to the key authority. // This field is used for AddKey AUMs. From a5b2f185679e1eb280f3056e224a4ed92268896d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 16 Nov 2025 18:36:27 -0800 Subject: [PATCH 0696/1093] control/controlclient: remove some public API, move to Options & test-only Includes adding StartPaused, which will be used in a future change to enable netmap caching testing. Updates #12639 Change-Id: Iec39915d33b8d75e9b8315b281b1af2f5d13a44a Signed-off-by: Brad Fitzpatrick --- control/controlclient/auto.go | 35 ++++++++++++++++++++++++------- control/controlclient/direct.go | 8 +++++++ ipn/ipnlocal/network-lock_test.go | 4 +++- 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 9d648409b4c47..20795d5a7dd92 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -23,6 +23,7 @@ import ( "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/execqueue" + "tailscale.com/util/testenv" ) type LoginGoal struct { @@ -123,6 +124,7 @@ type Auto struct { mu sync.Mutex // mutex guards the following fields + started bool // whether [Auto.Start] has been called wantLoggedIn bool // whether the user wants to be logged in per last method call urlToVisit string // the last url we were told to visit expiry time.Time @@ -150,15 +152,21 @@ type Auto struct { // New creates and starts a new Auto. func New(opts Options) (*Auto, error) { - c, err := NewNoStart(opts) - if c != nil { - c.Start() + c, err := newNoStart(opts) + if err != nil { + return nil, err + } + if opts.StartPaused { + c.SetPaused(true) + } + if !opts.SkipStartForTests { + c.start() } return c, err } -// NewNoStart creates a new Auto, but without calling Start on it. -func NewNoStart(opts Options) (_ *Auto, err error) { +// newNoStart creates a new Auto, but without calling Start on it. +func newNoStart(opts Options) (_ *Auto, err error) { direct, err := NewDirect(opts) if err != nil { return nil, err @@ -218,10 +226,21 @@ func (c *Auto) SetPaused(paused bool) { c.unpauseWaiters = nil } -// Start starts the client's goroutines. +// StartForTest starts the client's goroutines. // -// It should only be called for clients created by NewNoStart. -func (c *Auto) Start() { +// It should only be called for clients created with [Options.SkipStartForTests]. +func (c *Auto) StartForTest() { + testenv.AssertInTest() + c.start() +} + +func (c *Auto) start() { + c.mu.Lock() + defer c.mu.Unlock() + if c.started { + return + } + c.started = true go c.authRoutine() go c.mapRoutine() go c.updateRoutine() diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 9e7d10d8dcf0b..1e1ce781fe511 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -146,6 +146,14 @@ type Options struct { ControlKnobs *controlknobs.Knobs // or nil to ignore Bus *eventbus.Bus // non-nil, for setting up publishers + SkipStartForTests bool // if true, don't call [Auto.Start] to avoid any background goroutines (for tests only) + + // StartPaused indicates whether the client should start in a paused state + // where it doesn't do network requests. This primarily exists for testing + // but not necessarily "go test" tests, so it isn't restricted to only + // being used in tests. + StartPaused bool + // Observer is called when there's a change in status to report // from the control client. // If nil, no status updates are reported. diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 00d4ff6d9f11a..5fa0728830eb6 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -60,9 +60,11 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even NoiseTestClient: c, Dialer: dialer, Bus: bus, + + SkipStartForTests: true, } - cc, err := controlclient.NewNoStart(opts) + cc, err := controlclient.New(opts) if err != nil { t.Fatal(err) } From 1e95bfa1848209b004cacb612dda6a899539653f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 07:54:29 -0800 Subject: [PATCH 0697/1093] ipn: fix typo in comment Updates #cleanup Change-Id: Iec66518abd656c64943a58eb6d92f342e627a613 Signed-off-by: Brad Fitzpatrick --- ipn/ipn_view.go | 2 +- ipn/prefs.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 3179e3bb5e2e0..12fe93bab3896 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -444,7 +444,7 @@ func (v PrefsView) RelayServerPort() views.ValuePointer[int] { // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale -// peers got /32 or /127 routes for each other. +// peers got /32 or /128 routes for each other. // As of 2024-05-17 we're starting to ignore it, but to let // people still downgrade Tailscale versions and not break // all peer-to-peer networking we still write it to disk (as JSON) diff --git a/ipn/prefs.go b/ipn/prefs.go index 81dd1c1c3dc49..796098c8ad949 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -284,7 +284,7 @@ type Prefs struct { // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale - // peers got /32 or /127 routes for each other. + // peers got /32 or /128 routes for each other. // As of 2024-05-17 we're starting to ignore it, but to let // people still downgrade Tailscale versions and not break // all peer-to-peer networking we still write it to disk (as JSON) From 200383dce5d93faedc77aa0c769d96468df41d6c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 16:35:36 +0000 Subject: [PATCH 0698/1093] various: add more missing apostrophes in comments Updates #cleanup Change-Id: I79a0fda9783064a226ee9bcee2c1148212f6df7b Signed-off-by: Alex Chan --- tka/tailchonk.go | 4 ++-- wgengine/magicsock/magicsock_test.go | 2 +- wgengine/router/osrouter/router_linux.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 90f99966cde34..3e8d1b6c816d3 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -678,7 +678,7 @@ func markAncestorIntersectionAUMs(storage Chonk, verdict map[AUMHash]retainState toScan := make([]AUMHash, 0, len(verdict)) for h, v := range verdict { if (v & retainAUMMask) == 0 { - continue // not marked for retention, so dont need to consider it + continue // not marked for retention, so don't need to consider it } if h == candidateAncestor { continue @@ -781,7 +781,7 @@ func markDescendantAUMs(storage Chonk, verdict map[AUMHash]retainState) error { toScan := make([]AUMHash, 0, len(verdict)) for h, v := range verdict { if v&retainAUMMask == 0 { - continue // not marked, so dont need to mark descendants + continue // not marked, so don't need to mark descendants } toScan = append(toScan, h) } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 09c54f504bd92..a0142134a1b1f 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2462,7 +2462,7 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { if len(state.recentPongs) != 1 { t.Errorf("IPv4 address did not have a recentPong entry: got %v, want %v", len(state.recentPongs), 1) } - // Set the latency extremely high so we dont choose endpoint during the next + // Set the latency extremely high so we don't choose endpoint during the next // addrForSendLocked call. state.recentPongs[state.recentPong].latency = time.Second } diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 196e1d5529025..7442c045ee079 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -1617,7 +1617,7 @@ func checkOpenWRTUsingMWAN3() (bool, error) { // We want to match on a rule like this: // 2001: from all fwmark 0x100/0x3f00 lookup 1 // - // We dont match on the mask because it can vary, or the + // We don't match on the mask because it can vary, or the // table because I'm not sure if it can vary. if r.Priority >= 2001 && r.Priority <= 2004 && r.Mark != 0 { return true, nil From d01081683c44ef728c1273e3de2b285cd4c30ee1 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 17 Nov 2025 09:05:18 -0800 Subject: [PATCH 0699/1093] go.mod: bump golang.org/x/crypto (#17907) Pick up a fix for https://pkg.go.dev/vuln/GO-2025-4116 (even though we're not affected). Updates #cleanup Change-Id: I9f2571b17c1f14db58ece8a5a34785805217d9dd Signed-off-by: Andrew Lytvynov --- atomicfile/zsyscall_windows.go | 2 +- cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- flake.nix | 2 +- go.mod | 21 ++++++---- go.mod.sri | 2 +- go.sum | 42 +++++++++++-------- ipn/desktop/zsyscall_windows.go | 24 +++++------ net/netns/zsyscall_windows.go | 2 +- net/portmapper/pmpresultcode_string.go | 5 ++- net/sockstats/label_string.go | 5 ++- net/tshttpproxy/zsyscall_windows.go | 8 ++-- shell.nix | 2 +- tsnet/depaware.txt | 2 +- util/osdiag/zsyscall_windows.go | 10 ++--- util/winutil/authenticode/zsyscall_windows.go | 20 ++++----- util/winutil/gp/zsyscall_windows.go | 12 +++--- util/winutil/s4u/zsyscall_windows.go | 16 +++---- util/winutil/winenv/zsyscall_windows.go | 6 +-- util/winutil/zsyscall_windows.go | 26 ++++++------ wgengine/magicsock/discopingpurpose_string.go | 5 ++- 22 files changed, 115 insertions(+), 103 deletions(-) diff --git a/atomicfile/zsyscall_windows.go b/atomicfile/zsyscall_windows.go index f2f0b6d08cbb7..bd1bf8113ca2a 100644 --- a/atomicfile/zsyscall_windows.go +++ b/atomicfile/zsyscall_windows.go @@ -44,7 +44,7 @@ var ( ) func replaceFileW(replaced *uint16, replacement *uint16, backup *uint16, flags uint32, exclude unsafe.Pointer, reserved unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procReplaceFileW.Addr(), 6, uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procReplaceFileW.Addr(), uintptr(unsafe.Pointer(replaced)), uintptr(unsafe.Pointer(replacement)), uintptr(unsafe.Pointer(backup)), uintptr(flags), uintptr(exclude), uintptr(reserved)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 4542fcad6e6fe..16ad089f3b213 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -997,7 +997,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index be0fd799e66d1..d154020923fbd 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -570,7 +570,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index c68fab6340cad..14db7414a64ba 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -399,7 +399,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/flake.nix b/flake.nix index 217df38c3798a..fc3a466fc8720 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= +# nix-direnv cache busting line: sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= diff --git a/go.mod b/go.mod index fc8870bafda2b..3b4f34b2df254 100644 --- a/go.mod +++ b/go.mod @@ -102,21 +102,21 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.38.0 + golang.org/x/crypto v0.44.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac - golang.org/x/mod v0.24.0 - golang.org/x/net v0.40.0 + golang.org/x/mod v0.30.0 + golang.org/x/net v0.47.0 golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.14.0 - golang.org/x/sys v0.33.0 - golang.org/x/term v0.32.0 + golang.org/x/sync v0.18.0 + golang.org/x/sys v0.38.0 + golang.org/x/term v0.37.0 golang.org/x/time v0.11.0 - golang.org/x/tools v0.33.0 + golang.org/x/tools v0.39.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 - honnef.co/go/tools v0.6.1 + honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 k8s.io/api v0.32.0 k8s.io/apimachinery v0.32.0 k8s.io/apiserver v0.32.0 @@ -187,6 +187,9 @@ require ( go.opentelemetry.io/otel/metric v1.33.0 // indirect go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect + golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/tools/go/expect v0.1.1-deprecated // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect k8s.io/component-base v0.32.0 // indirect @@ -409,7 +412,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.27.0 // indirect - golang.org/x/text v0.25.0 // indirect + golang.org/x/text v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/protobuf v1.36.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.mod.sri b/go.mod.sri index b7df2cc2ccccd..76c72f0c9bc14 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= +sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= diff --git a/go.sum b/go.sum index 177efd4f7e293..f0758f2d4ba00 100644 --- a/go.sum +++ b/go.sum @@ -1128,8 +1128,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= -golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1177,8 +1177,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= -golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1218,8 +1218,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1241,8 +1241,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1305,16 +1305,18 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= +golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1325,8 +1327,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1396,8 +1398,12 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= -golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1538,8 +1544,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= -honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho= +honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= diff --git a/ipn/desktop/zsyscall_windows.go b/ipn/desktop/zsyscall_windows.go index 535274016f9ca..8d97c4d8089ef 100644 --- a/ipn/desktop/zsyscall_windows.go +++ b/ipn/desktop/zsyscall_windows.go @@ -57,12 +57,12 @@ var ( ) func setLastError(dwErrorCode uint32) { - syscall.Syscall(procSetLastError.Addr(), 1, uintptr(dwErrorCode), 0, 0) + syscall.SyscallN(procSetLastError.Addr(), uintptr(dwErrorCode)) return } func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, dwStyle uint32, x int32, y int32, nWidth int32, nHeight int32, hWndParent windows.HWND, hMenu windows.Handle, hInstance windows.Handle, lpParam unsafe.Pointer) (hWnd windows.HWND, err error) { - r0, _, e1 := syscall.Syscall12(procCreateWindowExW.Addr(), 12, uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) + r0, _, e1 := syscall.SyscallN(procCreateWindowExW.Addr(), uintptr(dwExStyle), uintptr(unsafe.Pointer(lpClassName)), uintptr(unsafe.Pointer(lpWindowName)), uintptr(dwStyle), uintptr(x), uintptr(y), uintptr(nWidth), uintptr(nHeight), uintptr(hWndParent), uintptr(hMenu), uintptr(hInstance), uintptr(lpParam)) hWnd = windows.HWND(r0) if hWnd == 0 { err = errnoErr(e1) @@ -71,13 +71,13 @@ func createWindowEx(dwExStyle uint32, lpClassName *uint16, lpWindowName *uint16, } func defWindowProc(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { - r0, _, _ := syscall.Syscall6(procDefWindowProcW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + r0, _, _ := syscall.SyscallN(procDefWindowProcW.Addr(), uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam)) res = uintptr(r0) return } func destroyWindow(hwnd windows.HWND) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyWindow.Addr(), uintptr(hwnd)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -85,24 +85,24 @@ func destroyWindow(hwnd windows.HWND) (err error) { } func dispatchMessage(lpMsg *_MSG) (res uintptr) { - r0, _, _ := syscall.Syscall(procDispatchMessageW.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + r0, _, _ := syscall.SyscallN(procDispatchMessageW.Addr(), uintptr(unsafe.Pointer(lpMsg))) res = uintptr(r0) return } func getMessage(lpMsg *_MSG, hwnd windows.HWND, msgMin uint32, msgMax uint32) (ret int32) { - r0, _, _ := syscall.Syscall6(procGetMessageW.Addr(), 4, uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMessageW.Addr(), uintptr(unsafe.Pointer(lpMsg)), uintptr(hwnd), uintptr(msgMin), uintptr(msgMax)) ret = int32(r0) return } func postQuitMessage(exitCode int32) { - syscall.Syscall(procPostQuitMessage.Addr(), 1, uintptr(exitCode), 0, 0) + syscall.SyscallN(procPostQuitMessage.Addr(), uintptr(exitCode)) return } func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { - r0, _, e1 := syscall.Syscall(procRegisterClassExW.Addr(), 1, uintptr(unsafe.Pointer(windowClass)), 0, 0) + r0, _, e1 := syscall.SyscallN(procRegisterClassExW.Addr(), uintptr(unsafe.Pointer(windowClass))) atom = uint16(r0) if atom == 0 { err = errnoErr(e1) @@ -111,19 +111,19 @@ func registerClassEx(windowClass *_WNDCLASSEX) (atom uint16, err error) { } func sendMessage(hwnd windows.HWND, msg uint32, wparam uintptr, lparam uintptr) (res uintptr) { - r0, _, _ := syscall.Syscall6(procSendMessageW.Addr(), 4, uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam), 0, 0) + r0, _, _ := syscall.SyscallN(procSendMessageW.Addr(), uintptr(hwnd), uintptr(msg), uintptr(wparam), uintptr(lparam)) res = uintptr(r0) return } func translateMessage(lpMsg *_MSG) (res bool) { - r0, _, _ := syscall.Syscall(procTranslateMessage.Addr(), 1, uintptr(unsafe.Pointer(lpMsg)), 0, 0) + r0, _, _ := syscall.SyscallN(procTranslateMessage.Addr(), uintptr(unsafe.Pointer(lpMsg))) res = r0 != 0 return } func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWTSRegisterSessionNotificationEx.Addr(), 3, uintptr(hServer), uintptr(hwnd), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procWTSRegisterSessionNotificationEx.Addr(), uintptr(hServer), uintptr(hwnd), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -131,7 +131,7 @@ func registerSessionNotification(hServer windows.Handle, hwnd windows.HWND, flag } func unregisterSessionNotification(hServer windows.Handle, hwnd windows.HWND) (err error) { - r1, _, e1 := syscall.Syscall(procWTSUnRegisterSessionNotificationEx.Addr(), 2, uintptr(hServer), uintptr(hwnd), 0) + r1, _, e1 := syscall.SyscallN(procWTSUnRegisterSessionNotificationEx.Addr(), uintptr(hServer), uintptr(hwnd)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/net/netns/zsyscall_windows.go b/net/netns/zsyscall_windows.go index 07e2181be222c..3d8f06e097340 100644 --- a/net/netns/zsyscall_windows.go +++ b/net/netns/zsyscall_windows.go @@ -45,7 +45,7 @@ var ( ) func getBestInterfaceEx(sockaddr *winipcfg.RawSockaddrInet, bestIfaceIndex *uint32) (ret error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(unsafe.Pointer(sockaddr)), uintptr(unsafe.Pointer(bestIfaceIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(unsafe.Pointer(sockaddr)), uintptr(unsafe.Pointer(bestIfaceIndex))) if r0 != 0 { ret = syscall.Errno(r0) } diff --git a/net/portmapper/pmpresultcode_string.go b/net/portmapper/pmpresultcode_string.go index 603636adec044..18d911d944126 100644 --- a/net/portmapper/pmpresultcode_string.go +++ b/net/portmapper/pmpresultcode_string.go @@ -24,8 +24,9 @@ const _pmpResultCode_name = "OKUnsupportedVersionNotAuthorizedNetworkFailureOutO var _pmpResultCode_index = [...]uint8{0, 2, 20, 33, 47, 61, 78} func (i pmpResultCode) String() string { - if i >= pmpResultCode(len(_pmpResultCode_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_pmpResultCode_index)-1 { return "pmpResultCode(" + strconv.FormatInt(int64(i), 10) + ")" } - return _pmpResultCode_name[_pmpResultCode_index[i]:_pmpResultCode_index[i+1]] + return _pmpResultCode_name[_pmpResultCode_index[idx]:_pmpResultCode_index[idx+1]] } diff --git a/net/sockstats/label_string.go b/net/sockstats/label_string.go index f9a111ad71e08..cc503d943f622 100644 --- a/net/sockstats/label_string.go +++ b/net/sockstats/label_string.go @@ -28,8 +28,9 @@ const _Label_name = "ControlClientAutoControlClientDialerDERPHTTPClientLogtailLo var _Label_index = [...]uint8{0, 17, 36, 50, 63, 78, 93, 107, 123, 140, 157, 169, 186, 201} func (i Label) String() string { - if i >= Label(len(_Label_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Label_index)-1 { return "Label(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Label_name[_Label_index[i]:_Label_index[i+1]] + return _Label_name[_Label_index[idx]:_Label_index[idx+1]] } diff --git a/net/tshttpproxy/zsyscall_windows.go b/net/tshttpproxy/zsyscall_windows.go index c07e9ee03a69e..5dcfae83ea1a4 100644 --- a/net/tshttpproxy/zsyscall_windows.go +++ b/net/tshttpproxy/zsyscall_windows.go @@ -48,7 +48,7 @@ var ( ) func globalFree(hglobal winHGlobal) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalFree.Addr(), 1, uintptr(hglobal), 0, 0) + r1, _, e1 := syscall.SyscallN(procGlobalFree.Addr(), uintptr(hglobal)) if r1 == 0 { err = errnoErr(e1) } @@ -56,7 +56,7 @@ func globalFree(hglobal winHGlobal) (err error) { } func winHTTPCloseHandle(whi winHTTPInternet) (err error) { - r1, _, e1 := syscall.Syscall(procWinHttpCloseHandle.Addr(), 1, uintptr(whi), 0, 0) + r1, _, e1 := syscall.SyscallN(procWinHttpCloseHandle.Addr(), uintptr(whi)) if r1 == 0 { err = errnoErr(e1) } @@ -64,7 +64,7 @@ func winHTTPCloseHandle(whi winHTTPInternet) (err error) { } func winHTTPGetProxyForURL(whi winHTTPInternet, url *uint16, options *winHTTPAutoProxyOptions, proxyInfo *winHTTPProxyInfo) (err error) { - r1, _, e1 := syscall.Syscall6(procWinHttpGetProxyForUrl.Addr(), 4, uintptr(whi), uintptr(unsafe.Pointer(url)), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(proxyInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWinHttpGetProxyForUrl.Addr(), uintptr(whi), uintptr(unsafe.Pointer(url)), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(proxyInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -72,7 +72,7 @@ func winHTTPGetProxyForURL(whi winHTTPInternet, url *uint16, options *winHTTPAut } func winHTTPOpen(agent *uint16, accessType uint32, proxy *uint16, proxyBypass *uint16, flags uint32) (whi winHTTPInternet, err error) { - r0, _, e1 := syscall.Syscall6(procWinHttpOpen.Addr(), 5, uintptr(unsafe.Pointer(agent)), uintptr(accessType), uintptr(unsafe.Pointer(proxy)), uintptr(unsafe.Pointer(proxyBypass)), uintptr(flags), 0) + r0, _, e1 := syscall.SyscallN(procWinHttpOpen.Addr(), uintptr(unsafe.Pointer(agent)), uintptr(accessType), uintptr(unsafe.Pointer(proxy)), uintptr(unsafe.Pointer(proxyBypass)), uintptr(flags)) whi = winHTTPInternet(r0) if whi == 0 { err = errnoErr(e1) diff --git a/shell.nix b/shell.nix index f43108753da13..ffb28a18358b0 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-ADuIW4GAiskY0nRFWFk50rzihRIXIdm1Wk7IxfYKMPg= +# nix-direnv cache busting line: sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 6eb493ef8d07c..7d5ec0a606e4d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -392,7 +392,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls + crypto/fips140 from crypto/tls/internal/fips140tls+ crypto/hkdf from crypto/internal/hpke+ crypto/hmac from crypto/tls+ crypto/internal/boring from crypto/aes+ diff --git a/util/osdiag/zsyscall_windows.go b/util/osdiag/zsyscall_windows.go index ab0d18d3f9c98..2a11b4644fca8 100644 --- a/util/osdiag/zsyscall_windows.go +++ b/util/osdiag/zsyscall_windows.go @@ -51,7 +51,7 @@ var ( ) func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLen *uint32, reserved *uint32, valueType *uint32, pData *byte, cbData *uint32) (ret error) { - r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(valueName)), uintptr(unsafe.Pointer(valueNameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(pData)), uintptr(unsafe.Pointer(cbData))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -59,7 +59,7 @@ func regEnumValue(key registry.Key, index uint32, valueName *uint16, valueNameLe } func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) { - r1, _, e1 := syscall.Syscall(procGlobalMemoryStatusEx.Addr(), 1, uintptr(unsafe.Pointer(memStatus)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGlobalMemoryStatusEx.Addr(), uintptr(unsafe.Pointer(memStatus))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -67,19 +67,19 @@ func globalMemoryStatusEx(memStatus *_MEMORYSTATUSEX) (err error) { } func wscEnumProtocols(iProtocols *int32, protocolBuffer *wsaProtocolInfo, bufLen *uint32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCEnumProtocols.Addr(), 4, uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno)), 0, 0) + r0, _, _ := syscall.SyscallN(procWSCEnumProtocols.Addr(), uintptr(unsafe.Pointer(iProtocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufLen)), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } func wscGetProviderInfo(providerId *windows.GUID, infoType _WSC_PROVIDER_INFO_TYPE, info unsafe.Pointer, infoSize *uintptr, flags uint32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCGetProviderInfo.Addr(), 6, uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno))) + r0, _, _ := syscall.SyscallN(procWSCGetProviderInfo.Addr(), uintptr(unsafe.Pointer(providerId)), uintptr(infoType), uintptr(info), uintptr(unsafe.Pointer(infoSize)), uintptr(flags), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } func wscGetProviderPath(providerId *windows.GUID, providerDllPath *uint16, providerDllPathLen *int32, errno *int32) (ret int32) { - r0, _, _ := syscall.Syscall6(procWSCGetProviderPath.Addr(), 4, uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno)), 0, 0) + r0, _, _ := syscall.SyscallN(procWSCGetProviderPath.Addr(), uintptr(unsafe.Pointer(providerId)), uintptr(unsafe.Pointer(providerDllPath)), uintptr(unsafe.Pointer(providerDllPathLen)), uintptr(unsafe.Pointer(errno))) ret = int32(r0) return } diff --git a/util/winutil/authenticode/zsyscall_windows.go b/util/winutil/authenticode/zsyscall_windows.go index 643721e06aad5..f1fba2828713c 100644 --- a/util/winutil/authenticode/zsyscall_windows.go +++ b/util/winutil/authenticode/zsyscall_windows.go @@ -56,7 +56,7 @@ var ( ) func cryptMsgClose(cryptMsg windows.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCryptMsgClose.Addr(), 1, uintptr(cryptMsg), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptMsgClose.Addr(), uintptr(cryptMsg)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -64,7 +64,7 @@ func cryptMsgClose(cryptMsg windows.Handle) (err error) { } func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, data unsafe.Pointer, dataLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptMsgGetParam.Addr(), 5, uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen)), 0) + r1, _, e1 := syscall.SyscallN(procCryptMsgGetParam.Addr(), uintptr(cryptMsg), uintptr(paramType), uintptr(index), uintptr(data), uintptr(unsafe.Pointer(dataLen))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -72,7 +72,7 @@ func cryptMsgGetParam(cryptMsg windows.Handle, paramType uint32, index uint32, d } func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signerIndex uint32, pbSignedBlob *byte, cbSignedBlob uint32, pbDecoded *byte, pdbDecoded *uint32, ppSignerCert **windows.CertContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptVerifyMessageSignature.Addr(), 7, uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptVerifyMessageSignature.Addr(), uintptr(unsafe.Pointer(pVerifyPara)), uintptr(signerIndex), uintptr(unsafe.Pointer(pbSignedBlob)), uintptr(cbSignedBlob), uintptr(unsafe.Pointer(pbDecoded)), uintptr(unsafe.Pointer(pdbDecoded)), uintptr(unsafe.Pointer(ppSignerCert))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -80,13 +80,13 @@ func cryptVerifyMessageSignature(pVerifyPara *_CRYPT_VERIFY_MESSAGE_PARA, signer } func msiGetFileSignatureInformation(signedObjectPath *uint16, flags uint32, certCtx **windows.CertContext, pbHashData *byte, cbHashData *uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall6(procMsiGetFileSignatureInformationW.Addr(), 5, uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData)), 0) + r0, _, _ := syscall.SyscallN(procMsiGetFileSignatureInformationW.Addr(), uintptr(unsafe.Pointer(signedObjectPath)), uintptr(flags), uintptr(unsafe.Pointer(certCtx)), uintptr(unsafe.Pointer(pbHashData)), uintptr(unsafe.Pointer(cbHashData))) ret = wingoes.HRESULT(r0) return } func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GUID, hashAlgorithm *uint16, strongHashPolicy *windows.CertStrongSignPara, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptCATAdminAcquireContext2.Addr(), 5, uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminAcquireContext2.Addr(), uintptr(unsafe.Pointer(hCatAdmin)), uintptr(unsafe.Pointer(pgSubsystem)), uintptr(unsafe.Pointer(hashAlgorithm)), uintptr(unsafe.Pointer(strongHashPolicy)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -94,7 +94,7 @@ func cryptCATAdminAcquireContext2(hCatAdmin *_HCATADMIN, pgSubsystem *windows.GU } func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Handle, pcbHash *uint32, pbHash *byte, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptCATAdminCalcHashFromFileHandle2.Addr(), 5, uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminCalcHashFromFileHandle2.Addr(), uintptr(hCatAdmin), uintptr(file), uintptr(unsafe.Pointer(pcbHash)), uintptr(unsafe.Pointer(pbHash)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -102,7 +102,7 @@ func cryptCATAdminCalcHashFromFileHandle2(hCatAdmin _HCATADMIN, file windows.Han } func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash uint32, flags uint32, prevCatInfo *_HCATINFO) (ret _HCATINFO, err error) { - r0, _, e1 := syscall.Syscall6(procCryptCATAdminEnumCatalogFromHash.Addr(), 5, uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo)), 0) + r0, _, e1 := syscall.SyscallN(procCryptCATAdminEnumCatalogFromHash.Addr(), uintptr(hCatAdmin), uintptr(unsafe.Pointer(pbHash)), uintptr(cbHash), uintptr(flags), uintptr(unsafe.Pointer(prevCatInfo))) ret = _HCATINFO(r0) if ret == 0 { err = errnoErr(e1) @@ -111,7 +111,7 @@ func cryptCATAdminEnumCatalogFromHash(hCatAdmin _HCATADMIN, pbHash *byte, cbHash } func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseCatalogContext.Addr(), 3, uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminReleaseCatalogContext.Addr(), uintptr(hCatAdmin), uintptr(hCatInfo), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -119,7 +119,7 @@ func cryptCATAdminReleaseCatalogContext(hCatAdmin _HCATADMIN, hCatInfo _HCATINFO } func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATAdminReleaseContext.Addr(), 2, uintptr(hCatAdmin), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptCATAdminReleaseContext.Addr(), uintptr(hCatAdmin), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -127,7 +127,7 @@ func cryptCATAdminReleaseContext(hCatAdmin _HCATADMIN, flags uint32) (err error) } func cryptCATAdminCatalogInfoFromContext(hCatInfo _HCATINFO, catInfo *_CATALOG_INFO, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptCATCatalogInfoFromContext.Addr(), 3, uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCryptCATCatalogInfoFromContext.Addr(), uintptr(hCatInfo), uintptr(unsafe.Pointer(catInfo)), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/winutil/gp/zsyscall_windows.go b/util/winutil/gp/zsyscall_windows.go index 5e40ec3d1e093..41c240c264e6d 100644 --- a/util/winutil/gp/zsyscall_windows.go +++ b/util/winutil/gp/zsyscall_windows.go @@ -50,7 +50,7 @@ var ( ) func impersonateLoggedOnUser(token windows.Token) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateLoggedOnUser.Addr(), uintptr(token)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -62,7 +62,7 @@ func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err erro if machine { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procEnterCriticalPolicySection.Addr(), 1, uintptr(_p0), 0, 0) + r0, _, e1 := syscall.SyscallN(procEnterCriticalPolicySection.Addr(), uintptr(_p0)) handle = policyLockHandle(r0) if int32(handle) == 0 { err = errnoErr(e1) @@ -71,7 +71,7 @@ func enterCriticalPolicySection(machine bool) (handle policyLockHandle, err erro } func leaveCriticalPolicySection(handle policyLockHandle) (err error) { - r1, _, e1 := syscall.Syscall(procLeaveCriticalPolicySection.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procLeaveCriticalPolicySection.Addr(), uintptr(handle)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -83,7 +83,7 @@ func refreshPolicyEx(machine bool, flags uint32) (err error) { if machine { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procRefreshPolicyEx.Addr(), 2, uintptr(_p0), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procRefreshPolicyEx.Addr(), uintptr(_p0), uintptr(flags)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -95,7 +95,7 @@ func registerGPNotification(event windows.Handle, machine bool) (err error) { if machine { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procRegisterGPNotification.Addr(), 2, uintptr(event), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procRegisterGPNotification.Addr(), uintptr(event), uintptr(_p0)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -103,7 +103,7 @@ func registerGPNotification(event windows.Handle, machine bool) (err error) { } func unregisterGPNotification(event windows.Handle) (err error) { - r1, _, e1 := syscall.Syscall(procUnregisterGPNotification.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnregisterGPNotification.Addr(), uintptr(event)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/util/winutil/s4u/zsyscall_windows.go b/util/winutil/s4u/zsyscall_windows.go index 6a8c78427dbd3..db647dee483e2 100644 --- a/util/winutil/s4u/zsyscall_windows.go +++ b/util/winutil/s4u/zsyscall_windows.go @@ -52,7 +52,7 @@ var ( ) func allocateLocallyUniqueId(luid *windows.LUID) (err error) { - r1, _, e1 := syscall.Syscall(procAllocateLocallyUniqueId.Addr(), 1, uintptr(unsafe.Pointer(luid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procAllocateLocallyUniqueId.Addr(), uintptr(unsafe.Pointer(luid))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -60,7 +60,7 @@ func allocateLocallyUniqueId(luid *windows.LUID) (err error) { } func impersonateLoggedOnUser(token windows.Token) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateLoggedOnUser.Addr(), 1, uintptr(token), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateLoggedOnUser.Addr(), uintptr(token)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -68,37 +68,37 @@ func impersonateLoggedOnUser(token windows.Token) (err error) { } func lsaConnectUntrusted(lsaHandle *_LSAHANDLE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaConnectUntrusted.Addr(), 1, uintptr(unsafe.Pointer(lsaHandle)), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaConnectUntrusted.Addr(), uintptr(unsafe.Pointer(lsaHandle))) ret = windows.NTStatus(r0) return } func lsaDeregisterLogonProcess(lsaHandle _LSAHANDLE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaDeregisterLogonProcess.Addr(), 1, uintptr(lsaHandle), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaDeregisterLogonProcess.Addr(), uintptr(lsaHandle)) ret = windows.NTStatus(r0) return } func lsaFreeReturnBuffer(buffer uintptr) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaFreeReturnBuffer.Addr(), 1, uintptr(buffer), 0, 0) + r0, _, _ := syscall.SyscallN(procLsaFreeReturnBuffer.Addr(), uintptr(buffer)) ret = windows.NTStatus(r0) return } func lsaLogonUser(lsaHandle _LSAHANDLE, originName *windows.NTString, logonType _SECURITY_LOGON_TYPE, authenticationPackage uint32, authenticationInformation unsafe.Pointer, authenticationInformationLength uint32, localGroups *windows.Tokengroups, sourceContext *_TOKEN_SOURCE, profileBuffer *uintptr, profileBufferLength *uint32, logonID *windows.LUID, token *windows.Token, quotas *_QUOTA_LIMITS, subStatus *windows.NTStatus) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall15(procLsaLogonUser.Addr(), 14, uintptr(lsaHandle), uintptr(unsafe.Pointer(originName)), uintptr(logonType), uintptr(authenticationPackage), uintptr(authenticationInformation), uintptr(authenticationInformationLength), uintptr(unsafe.Pointer(localGroups)), uintptr(unsafe.Pointer(sourceContext)), uintptr(unsafe.Pointer(profileBuffer)), uintptr(unsafe.Pointer(profileBufferLength)), uintptr(unsafe.Pointer(logonID)), uintptr(unsafe.Pointer(token)), uintptr(unsafe.Pointer(quotas)), uintptr(unsafe.Pointer(subStatus)), 0) + r0, _, _ := syscall.SyscallN(procLsaLogonUser.Addr(), uintptr(lsaHandle), uintptr(unsafe.Pointer(originName)), uintptr(logonType), uintptr(authenticationPackage), uintptr(authenticationInformation), uintptr(authenticationInformationLength), uintptr(unsafe.Pointer(localGroups)), uintptr(unsafe.Pointer(sourceContext)), uintptr(unsafe.Pointer(profileBuffer)), uintptr(unsafe.Pointer(profileBufferLength)), uintptr(unsafe.Pointer(logonID)), uintptr(unsafe.Pointer(token)), uintptr(unsafe.Pointer(quotas)), uintptr(unsafe.Pointer(subStatus))) ret = windows.NTStatus(r0) return } func lsaLookupAuthenticationPackage(lsaHandle _LSAHANDLE, packageName *windows.NTString, authenticationPackage *uint32) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaLookupAuthenticationPackage.Addr(), 3, uintptr(lsaHandle), uintptr(unsafe.Pointer(packageName)), uintptr(unsafe.Pointer(authenticationPackage))) + r0, _, _ := syscall.SyscallN(procLsaLookupAuthenticationPackage.Addr(), uintptr(lsaHandle), uintptr(unsafe.Pointer(packageName)), uintptr(unsafe.Pointer(authenticationPackage))) ret = windows.NTStatus(r0) return } func lsaRegisterLogonProcess(logonProcessName *windows.NTString, lsaHandle *_LSAHANDLE, securityMode *_LSA_OPERATIONAL_MODE) (ret windows.NTStatus) { - r0, _, _ := syscall.Syscall(procLsaRegisterLogonProcess.Addr(), 3, uintptr(unsafe.Pointer(logonProcessName)), uintptr(unsafe.Pointer(lsaHandle)), uintptr(unsafe.Pointer(securityMode))) + r0, _, _ := syscall.SyscallN(procLsaRegisterLogonProcess.Addr(), uintptr(unsafe.Pointer(logonProcessName)), uintptr(unsafe.Pointer(lsaHandle)), uintptr(unsafe.Pointer(securityMode))) ret = windows.NTStatus(r0) return } diff --git a/util/winutil/winenv/zsyscall_windows.go b/util/winutil/winenv/zsyscall_windows.go index 2bdfdd9b1180b..7e93c7952f32e 100644 --- a/util/winutil/winenv/zsyscall_windows.go +++ b/util/winutil/winenv/zsyscall_windows.go @@ -55,7 +55,7 @@ func isDeviceRegisteredWithManagement(isMDMRegistered *bool, upnBufLen uint32, u if *isMDMRegistered { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procIsDeviceRegisteredWithManagement.Addr(), 3, uintptr(unsafe.Pointer(&_p0)), uintptr(upnBufLen), uintptr(unsafe.Pointer(upnBuf))) + r0, _, e1 := syscall.SyscallN(procIsDeviceRegisteredWithManagement.Addr(), uintptr(unsafe.Pointer(&_p0)), uintptr(upnBufLen), uintptr(unsafe.Pointer(upnBuf))) *isMDMRegistered = _p0 != 0 hr = int32(r0) if hr == 0 { @@ -65,13 +65,13 @@ func isDeviceRegisteredWithManagement(isMDMRegistered *bool, upnBufLen uint32, u } func verSetConditionMask(condMask verCondMask, typ verTypeMask, cond verCond) (res verCondMask) { - r0, _, _ := syscall.Syscall(procVerSetConditionMask.Addr(), 3, uintptr(condMask), uintptr(typ), uintptr(cond)) + r0, _, _ := syscall.SyscallN(procVerSetConditionMask.Addr(), uintptr(condMask), uintptr(typ), uintptr(cond)) res = verCondMask(r0) return } func verifyVersionInfo(verInfo *osVersionInfoEx, typ verTypeMask, cond verCondMask) (res bool) { - r0, _, _ := syscall.Syscall(procVerifyVersionInfoW.Addr(), 3, uintptr(unsafe.Pointer(verInfo)), uintptr(typ), uintptr(cond)) + r0, _, _ := syscall.SyscallN(procVerifyVersionInfoW.Addr(), uintptr(unsafe.Pointer(verInfo)), uintptr(typ), uintptr(cond)) res = r0 != 0 return } diff --git a/util/winutil/zsyscall_windows.go b/util/winutil/zsyscall_windows.go index b4674dff340ec..56aedb4c7f59c 100644 --- a/util/winutil/zsyscall_windows.go +++ b/util/winutil/zsyscall_windows.go @@ -62,7 +62,7 @@ var ( ) func queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, bufLen uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(hService), uintptr(infoLevel), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(hService), uintptr(infoLevel), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -70,19 +70,19 @@ func queryServiceConfig2(hService windows.Handle, infoLevel uint32, buf *byte, b } func getApplicationRestartSettings(process windows.Handle, commandLine *uint16, commandLineLen *uint32, flags *uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall6(procGetApplicationRestartSettings.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(commandLineLen)), uintptr(unsafe.Pointer(flags)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetApplicationRestartSettings.Addr(), uintptr(process), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(commandLineLen)), uintptr(unsafe.Pointer(flags))) ret = wingoes.HRESULT(r0) return } func registerApplicationRestart(cmdLineExclExeName *uint16, flags uint32) (ret wingoes.HRESULT) { - r0, _, _ := syscall.Syscall(procRegisterApplicationRestart.Addr(), 2, uintptr(unsafe.Pointer(cmdLineExclExeName)), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procRegisterApplicationRestart.Addr(), uintptr(unsafe.Pointer(cmdLineExclExeName)), uintptr(flags)) ret = wingoes.HRESULT(r0) return } func dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.GUID, siteName *uint16, flags dsGetDcNameFlag, dcInfo **_DOMAIN_CONTROLLER_INFO) (ret error) { - r0, _, _ := syscall.Syscall6(procDsGetDcNameW.Addr(), 6, uintptr(unsafe.Pointer(computerName)), uintptr(unsafe.Pointer(domainName)), uintptr(unsafe.Pointer(domainGuid)), uintptr(unsafe.Pointer(siteName)), uintptr(flags), uintptr(unsafe.Pointer(dcInfo))) + r0, _, _ := syscall.SyscallN(procDsGetDcNameW.Addr(), uintptr(unsafe.Pointer(computerName)), uintptr(unsafe.Pointer(domainName)), uintptr(unsafe.Pointer(domainGuid)), uintptr(unsafe.Pointer(siteName)), uintptr(flags), uintptr(unsafe.Pointer(dcInfo))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -90,7 +90,7 @@ func dsGetDcName(computerName *uint16, domainName *uint16, domainGuid *windows.G } func netValidateName(server *uint16, name *uint16, account *uint16, password *uint16, nameType _NETSETUP_NAME_TYPE) (ret error) { - r0, _, _ := syscall.Syscall6(procNetValidateName.Addr(), 5, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(account)), uintptr(unsafe.Pointer(password)), uintptr(nameType), 0) + r0, _, _ := syscall.SyscallN(procNetValidateName.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(account)), uintptr(unsafe.Pointer(password)), uintptr(nameType)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -98,7 +98,7 @@ func netValidateName(server *uint16, name *uint16, account *uint16, password *ui } func rmEndSession(session _RMHANDLE) (ret error) { - r0, _, _ := syscall.Syscall(procRmEndSession.Addr(), 1, uintptr(session), 0, 0) + r0, _, _ := syscall.SyscallN(procRmEndSession.Addr(), uintptr(session)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -106,7 +106,7 @@ func rmEndSession(session _RMHANDLE) (ret error) { } func rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rgAffectedApps *_RM_PROCESS_INFO, pRebootReasons *uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procRmGetList.Addr(), 5, uintptr(session), uintptr(unsafe.Pointer(nProcInfoNeeded)), uintptr(unsafe.Pointer(nProcInfo)), uintptr(unsafe.Pointer(rgAffectedApps)), uintptr(unsafe.Pointer(pRebootReasons)), 0) + r0, _, _ := syscall.SyscallN(procRmGetList.Addr(), uintptr(session), uintptr(unsafe.Pointer(nProcInfoNeeded)), uintptr(unsafe.Pointer(nProcInfo)), uintptr(unsafe.Pointer(rgAffectedApps)), uintptr(unsafe.Pointer(pRebootReasons))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -114,7 +114,7 @@ func rmGetList(session _RMHANDLE, nProcInfoNeeded *uint32, nProcInfo *uint32, rg } func rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) { - r0, _, _ := syscall.Syscall(procRmJoinSession.Addr(), 2, uintptr(unsafe.Pointer(pSession)), uintptr(unsafe.Pointer(sessionKey)), 0) + r0, _, _ := syscall.SyscallN(procRmJoinSession.Addr(), uintptr(unsafe.Pointer(pSession)), uintptr(unsafe.Pointer(sessionKey))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -122,7 +122,7 @@ func rmJoinSession(pSession *_RMHANDLE, sessionKey *uint16) (ret error) { } func rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16, nApplications uint32, rgApplications *_RM_UNIQUE_PROCESS, nServices uint32, rgsServiceNames **uint16) (ret error) { - r0, _, _ := syscall.Syscall9(procRmRegisterResources.Addr(), 7, uintptr(session), uintptr(nFiles), uintptr(unsafe.Pointer(rgsFileNames)), uintptr(nApplications), uintptr(unsafe.Pointer(rgApplications)), uintptr(nServices), uintptr(unsafe.Pointer(rgsServiceNames)), 0, 0) + r0, _, _ := syscall.SyscallN(procRmRegisterResources.Addr(), uintptr(session), uintptr(nFiles), uintptr(unsafe.Pointer(rgsFileNames)), uintptr(nApplications), uintptr(unsafe.Pointer(rgApplications)), uintptr(nServices), uintptr(unsafe.Pointer(rgsServiceNames))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -130,7 +130,7 @@ func rmRegisterResources(session _RMHANDLE, nFiles uint32, rgsFileNames **uint16 } func rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret error) { - r0, _, _ := syscall.Syscall(procRmStartSession.Addr(), 3, uintptr(unsafe.Pointer(pSession)), uintptr(flags), uintptr(unsafe.Pointer(sessionKey))) + r0, _, _ := syscall.SyscallN(procRmStartSession.Addr(), uintptr(unsafe.Pointer(pSession)), uintptr(flags), uintptr(unsafe.Pointer(sessionKey))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -138,7 +138,7 @@ func rmStartSession(pSession *_RMHANDLE, flags uint32, sessionKey *uint16) (ret } func expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint16, dstLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procExpandEnvironmentStringsForUserW.Addr(), 4, uintptr(token), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(dstLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsForUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(dstLen)) if int32(r1) == 0 { err = errnoErr(e1) } @@ -146,7 +146,7 @@ func expandEnvironmentStringsForUser(token windows.Token, src *uint16, dst *uint } func loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) { - r1, _, e1 := syscall.Syscall(procLoadUserProfileW.Addr(), 2, uintptr(token), uintptr(unsafe.Pointer(profileInfo)), 0) + r1, _, e1 := syscall.SyscallN(procLoadUserProfileW.Addr(), uintptr(token), uintptr(unsafe.Pointer(profileInfo))) if int32(r1) == 0 { err = errnoErr(e1) } @@ -154,7 +154,7 @@ func loadUserProfile(token windows.Token, profileInfo *_PROFILEINFO) (err error) } func unloadUserProfile(token windows.Token, profile registry.Key) (err error) { - r1, _, e1 := syscall.Syscall(procUnloadUserProfile.Addr(), 2, uintptr(token), uintptr(profile), 0) + r1, _, e1 := syscall.SyscallN(procUnloadUserProfile.Addr(), uintptr(token), uintptr(profile)) if int32(r1) == 0 { err = errnoErr(e1) } diff --git a/wgengine/magicsock/discopingpurpose_string.go b/wgengine/magicsock/discopingpurpose_string.go index 3dc327de1d2ae..8eebf97a2dbd9 100644 --- a/wgengine/magicsock/discopingpurpose_string.go +++ b/wgengine/magicsock/discopingpurpose_string.go @@ -22,8 +22,9 @@ const _discoPingPurpose_name = "DiscoveryHeartbeatCLIHeartbeatForUDPLifetime" var _discoPingPurpose_index = [...]uint8{0, 9, 18, 21, 44} func (i discoPingPurpose) String() string { - if i < 0 || i >= discoPingPurpose(len(_discoPingPurpose_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_discoPingPurpose_index)-1 { return "discoPingPurpose(" + strconv.FormatInt(int64(i), 10) + ")" } - return _discoPingPurpose_name[_discoPingPurpose_index[i]:_discoPingPurpose_index[i+1]] + return _discoPingPurpose_name[_discoPingPurpose_index[idx]:_discoPingPurpose_index[idx+1]] } From 1723cb83ed95db76fa933348e8d9df7d9fcb960d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 29 Oct 2025 11:09:28 +0000 Subject: [PATCH 0700/1093] ipn/ipnlocal: use an in-memory TKA store if FS is unavailable This requires making the internals of LocalBackend a bit more generic, and implementing the `tka.CompactableChonk` interface for `tka.Mem`. Signed-off-by: Alex Chan Updates https://github.com/tailscale/corp/issues/33599 --- cmd/tailscale/cli/up.go | 1 + health/healthmsg/healthmsg.go | 11 ++-- ipn/ipnlocal/network-lock.go | 54 +++++++++++------- tka/tailchonk.go | 92 +++++++++++++++++++++++++++++- tka/tailchonk_test.go | 37 ++++++++++++ tstest/chonktest/tailchonk_test.go | 6 ++ 6 files changed, 174 insertions(+), 27 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 91a6b60878a93..61cade8de68d0 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -818,6 +818,7 @@ func upWorthyWarning(s string) bool { strings.Contains(s, healthmsg.WarnAcceptRoutesOff) || strings.Contains(s, healthmsg.LockedOut) || strings.Contains(s, healthmsg.WarnExitNodeUsage) || + strings.Contains(s, healthmsg.InMemoryTailnetLockState) || strings.Contains(strings.ToLower(s), "update available: ") } diff --git a/health/healthmsg/healthmsg.go b/health/healthmsg/healthmsg.go index 2384103738cf3..5ea1c736d8851 100644 --- a/health/healthmsg/healthmsg.go +++ b/health/healthmsg/healthmsg.go @@ -8,9 +8,10 @@ package healthmsg const ( - WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" - TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller - LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" - WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" - DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" + WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" + TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller + LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" + WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" + DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" + InMemoryTailnetLockState = "Tailnet Lock state is only being stored in-memory. Set --statedir to store state on disk, which is more secure. See https://tailscale.com/kb/1226/tailnet-lock#tailnet-lock-state" ) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index f26c81011e824..14a3b105b59b0 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -23,6 +23,7 @@ import ( "slices" "time" + "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -54,7 +55,7 @@ var ( type tkaState struct { profile ipn.ProfileID authority *tka.Authority - storage *tka.FS + storage tka.CompactableChonk filtered []ipnstate.TKAPeer } @@ -75,7 +76,7 @@ func (b *LocalBackend) initTKALocked() error { root := b.TailscaleVarRoot() if root == "" { b.tka = nil - b.logf("network-lock unavailable; no state directory") + b.logf("cannot fetch existing TKA state; no state directory for network-lock") return nil } @@ -90,6 +91,7 @@ func (b *LocalBackend) initTKALocked() error { if err != nil { return fmt.Errorf("initializing tka: %v", err) } + if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { b.logf("tka compaction failed: %v", err) } @@ -105,6 +107,16 @@ func (b *LocalBackend) initTKALocked() error { return nil } +// noNetworkLockStateDirWarnable is a Warnable to warn the user that Tailnet Lock data +// (in particular, the list of AUMs in the TKA state) is being stored in memory and will +// be lost when tailscaled restarts. +var noNetworkLockStateDirWarnable = health.Register(&health.Warnable{ + Code: "no-tailnet-lock-state-dir", + Title: "No statedir for Tailnet Lock", + Severity: health.SeverityMedium, + Text: health.StaticMessage(healthmsg.InMemoryTailnetLockState), +}) + // tkaFilterNetmapLocked checks the signatures on each node key, dropping // nodes from the netmap whose signature does not verify. // @@ -447,7 +459,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { // b.mu must be held & TKA must be initialized. func (b *LocalBackend) tkaApplyDisablementLocked(secret []byte) error { if b.tka.authority.ValidDisablement(secret) { - if err := os.RemoveAll(b.chonkPathLocked()); err != nil { + if err := b.tka.storage.RemoveAll(); err != nil { return err } b.tka = nil @@ -491,19 +503,21 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per } } - chonkDir := b.chonkPathLocked() - if err := os.Mkdir(filepath.Dir(chonkDir), 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("creating chonk root dir: %v", err) - } - if err := os.Mkdir(chonkDir, 0755); err != nil && !os.IsExist(err) { - return fmt.Errorf("mkdir: %v", err) - } - - chonk, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("chonk: %v", err) + root := b.TailscaleVarRoot() + var storage tka.CompactableChonk + if root == "" { + b.health.SetUnhealthy(noNetworkLockStateDirWarnable, nil) + b.logf("network-lock using in-memory storage; no state directory") + storage = &tka.Mem{} + } else { + chonkDir := b.chonkPathLocked() + chonk, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("chonk: %v", err) + } + storage = chonk } - authority, err := tka.Bootstrap(chonk, genesis) + authority, err := tka.Bootstrap(storage, genesis) if err != nil { return fmt.Errorf("tka bootstrap: %v", err) } @@ -511,7 +525,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per b.tka = &tkaState{ profile: b.pm.CurrentProfile().ID(), authority: authority, - storage: chonk, + storage: storage, } return nil } @@ -524,10 +538,6 @@ func (b *LocalBackend) CanSupportNetworkLock() error { return nil } - if b.TailscaleVarRoot() == "" { - return errors.New("network-lock is not supported in this configuration, try setting --statedir") - } - // There's a var root (aka --statedir), so if network lock gets // initialized we have somewhere to store our AUMs. That's all // we need. @@ -647,6 +657,7 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { // needing signatures is returned as a response. // The Finish RPC submits signatures for all these nodes, at which point // Control has everything it needs to atomically enable network lock. +// TODO(alexc): Only with persistent backend func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) error { if err := b.CanSupportNetworkLock(); err != nil { return err @@ -767,7 +778,7 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { return fmt.Errorf("saving prefs: %w", err) } - if err := os.RemoveAll(b.chonkPathLocked()); err != nil { + if err := b.tka.storage.RemoveAll(); err != nil { return fmt.Errorf("deleting TKA state: %w", err) } b.tka = nil @@ -776,6 +787,7 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { // NetworkLockSign signs the given node-key and submits it to the control plane. // rotationPublic, if specified, must be an ed25519 public key. +// TODO(alexc): in-memory only func (b *LocalBackend) NetworkLockSign(nodeKey key.NodePublic, rotationPublic []byte) error { ourNodeKey, sig, err := func(nodeKey key.NodePublic, rotationPublic []byte) (key.NodePublic, tka.NodeKeySignature, error) { b.mu.Lock() diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 3e8d1b6c816d3..2dc03a6f62649 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "log" + "maps" "os" "path/filepath" "slices" @@ -57,6 +58,10 @@ type Chonk interface { // as a hint to pick the correct chain in the event that the Chonk stores // multiple distinct chains. LastActiveAncestor() (*AUMHash, error) + + // RemoveAll permanently and completely clears the TKA state. This should + // be called when the user disables Tailnet Lock. + RemoveAll() error } // CompactableChonk implementation are extensions of Chonk, which are @@ -78,12 +83,21 @@ type CompactableChonk interface { } // Mem implements in-memory storage of TKA state, suitable for -// tests. +// tests or cases where filesystem storage is unavailable. // // Mem implements the Chonk interface. +// +// Mem is thread-safe. type Mem struct { mu sync.RWMutex aums map[AUMHash]AUM + commitTimes map[AUMHash]time.Time + + // parentIndex is a map of AUMs to the AUMs for which they are + // the parent. + // + // For example, if parent index is {1 -> {2, 3, 4}}, that means + // that AUMs 2, 3, 4 all have aum.PrevAUMHash = 1. parentIndex map[AUMHash][]AUMHash lastActiveAncestor *AUMHash @@ -152,12 +166,14 @@ func (c *Mem) CommitVerifiedAUMs(updates []AUM) error { if c.aums == nil { c.parentIndex = make(map[AUMHash][]AUMHash, 64) c.aums = make(map[AUMHash]AUM, 64) + c.commitTimes = make(map[AUMHash]time.Time, 64) } updateLoop: for _, aum := range updates { aumHash := aum.Hash() c.aums[aumHash] = aum + c.commitTimes[aumHash] = time.Now() parent, ok := aum.Parent() if ok { @@ -173,6 +189,71 @@ updateLoop: return nil } +// RemoveAll permanently and completely clears the TKA state. +func (c *Mem) RemoveAll() error { + c.mu.Lock() + defer c.mu.Unlock() + c.aums = nil + c.commitTimes = nil + c.parentIndex = nil + c.lastActiveAncestor = nil + return nil +} + +// AllAUMs returns all AUMs stored in the chonk. +func (c *Mem) AllAUMs() ([]AUMHash, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + return slices.Collect(maps.Keys(c.aums)), nil +} + +// CommitTime returns the time at which the AUM was committed. +// +// If the AUM does not exist, then os.ErrNotExist is returned. +func (c *Mem) CommitTime(h AUMHash) (time.Time, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + t, ok := c.commitTimes[h] + if ok { + return t, nil + } else { + return time.Time{}, os.ErrNotExist + } +} + +// PurgeAUMs marks the specified AUMs for deletion from storage. +func (c *Mem) PurgeAUMs(hashes []AUMHash) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, h := range hashes { + // Remove the deleted AUM from the list of its parents' children. + // + // However, we leave the list of this AUM's children in parentIndex, + // so we can find them later in ChildAUMs(). + if aum, ok := c.aums[h]; ok { + parent, hasParent := aum.Parent() + if hasParent { + c.parentIndex[parent] = slices.DeleteFunc( + c.parentIndex[parent], + func(other AUMHash) bool { return bytes.Equal(h[:], other[:]) }, + ) + if len(c.parentIndex[parent]) == 0 { + delete(c.parentIndex, parent) + } + } + } + + // Delete this AUM from the list of AUMs and commit times. + delete(c.aums, h) + delete(c.commitTimes, h) + } + + return nil +} + // FS implements filesystem storage of TKA state. // // FS implements the Chonk interface. @@ -184,6 +265,10 @@ type FS struct { // ChonkDir returns an implementation of Chonk which uses the // given directory to store TKA state. func ChonkDir(dir string) (*FS, error) { + if err := os.MkdirAll(dir, 0755); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("creating chonk root dir: %v", err) + } + stat, err := os.Stat(dir) if err != nil { return nil, err @@ -376,6 +461,11 @@ func (c *FS) Heads() ([]AUM, error) { return out, nil } +// RemoveAll permanently and completely clears the TKA state. +func (c *FS) RemoveAll() error { + return os.RemoveAll(c.base) +} + // AllAUMs returns all AUMs stored in the chonk. func (c *FS) AllAUMs() ([]AUMHash, error) { c.mu.RLock() diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 7816d2dc158b5..70b7dc9a72fbb 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -127,6 +127,43 @@ func TestTailchonkFS_IgnoreTempFile(t *testing.T) { } } +// If we use a non-existent directory with filesystem Chonk storage, +// it's automatically created. +func TestTailchonkFS_CreateChonkDir(t *testing.T) { + base := filepath.Join(t.TempDir(), "a", "b", "c") + + chonk, err := ChonkDir(base) + if err != nil { + t.Fatalf("ChonkDir: %v", err) + } + + aum := AUM{MessageKind: AUMNoOp} + must.Do(chonk.CommitVerifiedAUMs([]AUM{aum})) + + got, err := chonk.AUM(aum.Hash()) + if err != nil { + t.Errorf("Chonk.AUM: %v", err) + } + if diff := cmp.Diff(got, aum); diff != "" { + t.Errorf("wrong AUM; (-got+want):%v", diff) + } + + if _, err := os.Stat(base); err != nil { + t.Errorf("os.Stat: %v", err) + } +} + +// You can't use a file as the root of your filesystem Chonk storage. +func TestTailchonkFS_CannotUseFile(t *testing.T) { + base := filepath.Join(t.TempDir(), "tka_storage.txt") + must.Do(os.WriteFile(base, []byte("this won't work"), 0644)) + + _, err := ChonkDir(base) + if err == nil { + t.Fatal("ChonkDir succeeded; expected an error") + } +} + func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go index ce6b043248de1..6dfab798ed11f 100644 --- a/tstest/chonktest/tailchonk_test.go +++ b/tstest/chonktest/tailchonk_test.go @@ -39,6 +39,12 @@ func TestImplementsCompactableChonk(t *testing.T) { name string newChonk func(t *testing.T) tka.CompactableChonk }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.CompactableChonk { + return &tka.Mem{} + }, + }, { name: "FS", newChonk: func(t *testing.T) tka.CompactableChonk { From 165a24744e7dfef778e9e4fb7ac65b9f8cc03b29 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 09:18:48 -0800 Subject: [PATCH 0701/1093] tka: fix typo in comment Let's fix all the typos, which lets the code be more readable, lest we confuse our readers. Updates #cleanup Change-Id: I4954601b0592b1fda40269009647bb517a4457be Signed-off-by: Brad Fitzpatrick --- tka/tka.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tka/tka.go b/tka/tka.go index 9dce74e9a8046..ed029c82e0592 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -779,8 +779,8 @@ func (a *Authority) findParentForRewrite(storage Chonk, removeKeys []tkatype.Key } } if !keyTrusted { - // Success: the revoked keys are not trusted! - // Lets check that our key was trusted to ensure + // Success: the revoked keys are not trusted. + // Check that our key was trusted to ensure // we can sign a fork from here. if _, err := state.GetKey(ourKey); err == nil { break From f1cddc6ecf4624b7608b1aeb06bd108c24687fef Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 08:06:16 -0800 Subject: [PATCH 0702/1093] ipn{,/local},cmd/tailscale: add "sync" flag and pref to disable control map poll For manual (human) testing, this lets the user disable control plane map polls with "tailscale set --sync=false" (which survives restarts) and "tailscale set --sync" to restore. A high severity health warning is shown while this is active. Updates #12639 Updates #17945 Change-Id: I83668fa5de3b5e5e25444df0815ec2a859153a6d Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/set.go | 3 +++ cmd/tailscale/cli/up.go | 3 ++- ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 7 ++++++ ipn/ipnlocal/local.go | 28 ++++++++++++++++++++++- ipn/ipnlocal/profiles_test.go | 6 +++-- ipn/prefs.go | 20 ++++++++++++++-- ipn/prefs_test.go | 43 +++++++++++++++++++++++++++++++++++ types/opt/bool.go | 11 +++++++++ types/opt/bool_test.go | 20 ++++++++++++++++ 10 files changed, 136 insertions(+), 6 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 43f8bbbc34afd..3b5e032db124b 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -63,6 +63,7 @@ type setArgsT struct { reportPosture bool snat bool statefulFiltering bool + sync bool netfilterMode string relayServerPort string } @@ -85,6 +86,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") + setf.BoolVar(&setArgs.sync, "sync", false, hidden+"actively sync configuration from the control plane (set to false only for network failure testing)") setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { @@ -149,6 +151,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { OperatorUser: setArgs.opUser, NoSNAT: !setArgs.snat, ForceDaemon: setArgs.forceDaemon, + Sync: opt.NewBool(setArgs.sync), AutoUpdate: ipn.AutoUpdatePrefs{ Check: setArgs.updateCheck, Apply: opt.NewBool(setArgs.updateApply), diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 61cade8de68d0..c341559559149 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -890,6 +890,7 @@ func init() { addPrefFlagMapping("advertise-connector", "AppConnector") addPrefFlagMapping("report-posture", "PostureChecking") addPrefFlagMapping("relay-server-port", "RelayServerPort") + addPrefFlagMapping("sync", "Sync") } func addPrefFlagMapping(flagName string, prefNames ...string) { @@ -925,7 +926,7 @@ func updateMaskedPrefsFromUpOrSetFlag(mp *ipn.MaskedPrefs, flagName string) { if prefs, ok := prefsOfFlag[flagName]; ok { for _, pref := range prefs { f := reflect.ValueOf(mp).Elem() - for _, name := range strings.Split(pref, ".") { + for name := range strings.SplitSeq(pref, ".") { f = f.FieldByName(name + "Set") } f.SetBool(true) diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index d5af906ee95e8..1be7161970726 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -90,6 +90,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { Egg bool AdvertiseRoutes []netip.Prefix AdvertiseServices []string + Sync opt.Bool NoSNAT bool NoStatefulFiltering opt.Bool NetfilterMode preftype.NetfilterMode diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 12fe93bab3896..d3836416b7bd5 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -363,6 +363,12 @@ func (v PrefsView) AdvertiseServices() views.Slice[string] { return views.SliceOf(v.ж.AdvertiseServices) } +// Sync is whether this node should sync its configuration from +// the control plane. If unset, this defaults to true. +// This exists primarily for testing, to verify that netmap caching +// and offline operation work correctly. +func (v PrefsView) Sync() opt.Bool { return v.ж.Sync } + // NoSNAT specifies whether to source NAT traffic going to // destinations in AdvertiseRoutes. The default is to apply source // NAT, which makes the traffic appear to come from the router @@ -482,6 +488,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { Egg bool AdvertiseRoutes []netip.Prefix AdvertiseServices []string + Sync opt.Bool NoSNAT bool NoStatefulFiltering opt.Bool NetfilterMode preftype.NetfilterMode diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ed183e508e69d..24ab417352061 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -870,6 +870,7 @@ func (b *LocalBackend) initPrefsFromConfig(conf *conffile.Config) error { if err := b.pm.SetPrefs(p.View(), ipn.NetworkProfile{}); err != nil { return err } + b.updateWarnSync(p.View()) b.setStaticEndpointsFromConfigLocked(conf) b.conf = conf return nil @@ -931,7 +932,12 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { return } networkUp := b.prevIfState.AnyInterfaceUp() - b.cc.SetPaused((b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest())) + pauseForNetwork := (b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest()) + + prefs := b.pm.CurrentPrefs() + pauseForSyncPref := prefs.Valid() && prefs.Sync().EqualBool(false) + + b.cc.SetPaused(pauseForNetwork || pauseForSyncPref) } // DisconnectControl shuts down control client. This can be run before node shutdown to force control to consider this ndoe @@ -2519,6 +2525,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { logf("serverMode=%v", inServerMode) } b.applyPrefsToHostinfoLocked(hostinfo, prefs) + b.updateWarnSync(prefs) persistv := prefs.Persist().AsStruct() if persistv == nil { @@ -2570,6 +2577,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { ControlKnobs: b.sys.ControlKnobs(), Shutdown: ccShutdown, Bus: b.sys.Bus.Get(), + StartPaused: prefs.Sync().EqualBool(false), // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -4658,6 +4666,9 @@ func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { b.resetAlwaysOnOverrideLocked() } + b.pauseOrResumeControlClientLocked() // for prefs.Sync changes + b.updateWarnSync(prefs) + if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { b.doSetHostinfoFilterServicesLocked() } @@ -6665,6 +6676,13 @@ func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { return b.sshServer, nil } +var warnSyncDisabled = health.Register(&health.Warnable{ + Code: "sync-disabled", + Title: "Tailscale Sync is Disabled", + Severity: health.SeverityHigh, + Text: health.StaticMessage("Tailscale control plane syncing is disabled; run `tailscale set --sync` to restore"), +}) + var warnSSHSELinuxWarnable = health.Register(&health.Warnable{ Code: "ssh-unavailable-selinux-enabled", Title: "Tailscale SSH and SELinux", @@ -6680,6 +6698,14 @@ func (b *LocalBackend) updateSELinuxHealthWarning() { } } +func (b *LocalBackend) updateWarnSync(prefs ipn.PrefsView) { + if prefs.Sync().EqualBool(false) { + b.health.SetUnhealthy(warnSyncDisabled, nil) + } else { + b.health.SetHealthy(warnSyncDisabled) + } +} + func (b *LocalBackend) handleSSHConn(c net.Conn) (err error) { s, err := b.sshServerOrInit() if err != nil { diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index deeab2ade9b15..95834284e91d5 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -1129,10 +1129,12 @@ func TestProfileStateChangeCallback(t *testing.T) { } gotChanges := make([]stateChange, 0, len(tt.wantChanges)) - pm.StateChangeHook = func(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + pm.StateChangeHook = func(profile ipn.LoginProfileView, prefView ipn.PrefsView, sameNode bool) { + prefs := prefView.AsStruct() + prefs.Sync = prefs.Sync.Normalized() gotChanges = append(gotChanges, stateChange{ Profile: profile.AsStruct(), - Prefs: prefs.AsStruct(), + Prefs: prefs, SameNode: sameNode, }) } diff --git a/ipn/prefs.go b/ipn/prefs.go index 796098c8ad949..7f8216c60f8e0 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -207,6 +207,12 @@ type Prefs struct { // control server. AdvertiseServices []string + // Sync is whether this node should sync its configuration from + // the control plane. If unset, this defaults to true. + // This exists primarily for testing, to verify that netmap caching + // and offline operation work correctly. + Sync opt.Bool + // NoSNAT specifies whether to source NAT traffic going to // destinations in AdvertiseRoutes. The default is to apply source // NAT, which makes the traffic appear to come from the router @@ -364,12 +370,13 @@ type MaskedPrefs struct { EggSet bool `json:",omitempty"` AdvertiseRoutesSet bool `json:",omitempty"` AdvertiseServicesSet bool `json:",omitempty"` + SyncSet bool `json:",omitzero"` NoSNATSet bool `json:",omitempty"` NoStatefulFilteringSet bool `json:",omitempty"` NetfilterModeSet bool `json:",omitempty"` OperatorUserSet bool `json:",omitempty"` ProfileNameSet bool `json:",omitempty"` - AutoUpdateSet AutoUpdatePrefsMask `json:",omitempty"` + AutoUpdateSet AutoUpdatePrefsMask `json:",omitzero"` AppConnectorSet bool `json:",omitempty"` PostureCheckingSet bool `json:",omitempty"` NetfilterKindSet bool `json:",omitempty"` @@ -547,6 +554,9 @@ func (p *Prefs) pretty(goos string) string { if p.LoggedOut { sb.WriteString("loggedout=true ") } + if p.Sync.EqualBool(false) { + sb.WriteString("sync=false ") + } if p.ForceDaemon { sb.WriteString("server=true ") } @@ -653,6 +663,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.ExitNodeAllowLANAccess == p2.ExitNodeAllowLANAccess && p.CorpDNS == p2.CorpDNS && p.RunSSH == p2.RunSSH && + p.Sync.Normalized() == p2.Sync.Normalized() && p.RunWebClient == p2.RunWebClient && p.WantRunning == p2.WantRunning && p.LoggedOut == p2.LoggedOut && @@ -956,10 +967,15 @@ func PrefsFromBytes(b []byte, base *Prefs) error { if len(b) == 0 { return nil } - return json.Unmarshal(b, base) } +func (p *Prefs) normalizeOptBools() { + if p.Sync == opt.ExplicitlyUnset { + p.Sync = "" + } +} + var jsonEscapedZero = []byte(`\u0000`) // LoadPrefsWindows loads a legacy relaynode config file into Prefs with diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 2336164096c14..7c9c3ef43f7df 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -57,6 +57,7 @@ func TestPrefsEqual(t *testing.T) { "Egg", "AdvertiseRoutes", "AdvertiseServices", + "Sync", "NoSNAT", "NoStatefulFiltering", "NetfilterMode", @@ -404,6 +405,7 @@ func checkPrefs(t *testing.T, p Prefs) { if err != nil { t.Fatalf("PrefsFromBytes(p2) failed: bytes=%q; err=%v\n", p2.ToBytes(), err) } + p2b.normalizeOptBools() p2p := p2.Pretty() p2bp := p2b.Pretty() t.Logf("\np2p: %#v\np2bp: %#v\n", p2p, p2bp) @@ -419,6 +421,42 @@ func checkPrefs(t *testing.T, p Prefs) { } } +// PrefsFromBytes documents that it preserves fields unset in the JSON. +// This verifies that stays true. +func TestPrefsFromBytesPreservesOldValues(t *testing.T) { + tests := []struct { + name string + old Prefs + json []byte + want Prefs + }{ + { + name: "preserve-control-url", + old: Prefs{ControlURL: "https://foo"}, + json: []byte(`{"RouteAll": true}`), + want: Prefs{ControlURL: "https://foo", RouteAll: true}, + }, + { + name: "opt.Bool", // test that we don't normalize it early + old: Prefs{Sync: "unset"}, + json: []byte(`{}`), + want: Prefs{Sync: "unset"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + old := tt.old // shallow + err := PrefsFromBytes(tt.json, &old) + if err != nil { + t.Fatalf("PrefsFromBytes failed: %v", err) + } + if !old.Equals(&tt.want) { + t.Fatalf("got %+v; want %+v", old, tt.want) + } + }) + } +} + func TestBasicPrefs(t *testing.T) { tstest.PanicOnLog() @@ -591,6 +629,11 @@ func TestPrefsPretty(t *testing.T) { "linux", `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist=nil}`, }, + { + Prefs{Sync: "false"}, + "linux", + "Prefs{ra=false dns=false want=false sync=false routes=[] nf=off update=off Persist=nil}", + }, } for i, tt := range tests { got := tt.p.pretty(tt.os) diff --git a/types/opt/bool.go b/types/opt/bool.go index e2fd6a054ff0d..fbc39e1dc3754 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -83,6 +83,17 @@ func (b *Bool) Scan(src any) error { } } +// Normalized returns the normalized form of b, mapping "unset" to "" +// and leaving other values unchanged. +func (b Bool) Normalized() Bool { + switch b { + case ExplicitlyUnset: + return Empty + default: + return b + } +} + // EqualBool reports whether b is equal to v. // If b is empty or not a valid bool, it reports false. func (b Bool) EqualBool(v bool) bool { diff --git a/types/opt/bool_test.go b/types/opt/bool_test.go index dddbcfc195d04..e61d66dbe9e96 100644 --- a/types/opt/bool_test.go +++ b/types/opt/bool_test.go @@ -106,6 +106,8 @@ func TestBoolEqualBool(t *testing.T) { }{ {"", true, false}, {"", false, false}, + {"unset", true, false}, + {"unset", false, false}, {"sdflk;", true, false}, {"sldkf;", false, false}, {"true", true, true}, @@ -122,6 +124,24 @@ func TestBoolEqualBool(t *testing.T) { } } +func TestBoolNormalized(t *testing.T) { + tests := []struct { + in Bool + want Bool + }{ + {"", ""}, + {"true", "true"}, + {"false", "false"}, + {"unset", ""}, + {"foo", "foo"}, + } + for _, tt := range tests { + if got := tt.in.Normalized(); got != tt.want { + t.Errorf("(%q).Normalized() = %q; want %q", string(tt.in), string(got), string(tt.want)) + } + } +} + func TestUnmarshalAlloc(t *testing.T) { b := json.Unmarshaler(new(Bool)) n := testing.AllocsPerRun(10, func() { b.UnmarshalJSON(trueBytes) }) From 26f9b50247c9ba82ee33e4ae3acb5a107424c3a4 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 17 Nov 2025 14:42:15 -0800 Subject: [PATCH 0703/1093] feature/tpm: disable dictionary attack protection on sealing key (#17952) DA protection is not super helpful because we don't set an authorization password on the key. But if authorization fails for other reasons (like TPM being reset), we will eventually cause DA lockout with tailscaled trying to load the key. DA lockout then leads to (1) issues for other processes using the TPM and (2) the underlying authorization error being masked in logs. Updates #17654 Signed-off-by: Andrew Lytvynov --- feature/tpm/attestation.go | 10 ++++++---- feature/tpm/tpm.go | 3 +++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 49b80ade1e410..197a8d6b8798a 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -59,10 +59,12 @@ func newAttestationKey() (ak *attestationKey, retErr error) { SensitiveDataOrigin: true, UserWithAuth: true, AdminWithPolicy: true, - NoDA: true, - FixedTPM: true, - FixedParent: true, - SignEncrypt: true, + // We don't set an authorization policy on this key, so + // DA isn't helpful. + NoDA: true, + FixedTPM: true, + FixedParent: true, + SignEncrypt: true, }, Parameters: tpm2.NewTPMUPublicParms( tpm2.TPMAlgECC, diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 7cbdec088de04..8df269b95bc2e 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -414,6 +414,9 @@ func tpmSeal(logf logger.Logf, data []byte) (*tpmSealedData, error) { FixedTPM: true, FixedParent: true, UserWithAuth: true, + // We don't set an authorization policy on this key, so DA + // isn't helpful. + NoDA: true, }, }), } From 41662f51288465842091a357f7e9bc633da6bd4c Mon Sep 17 00:00:00 2001 From: James Tucker Date: Sat, 15 Nov 2025 18:35:39 -0800 Subject: [PATCH 0704/1093] ssh/tailssh: fix incubator tests on macOS arm64 Perform a path check first before attempting exec of `true`. Try /usr/bin/true first, as that is now and increasingly so, the more common and more portable path. Fixes tests on macOS arm64 where exec was returning a different kind of path error than previously checked. Updates #16569 Signed-off-by: James Tucker --- ssh/tailssh/incubator.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index dd280143e36e3..f75646771057a 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -74,6 +74,9 @@ var maybeStartLoginSession = func(dlogf logger.Logf, ia incubatorArgs) (close fu return nil } +// truePaths are the common locations to find the true binary, in likelihood order. +var truePaths = [...]string{"/usr/bin/true", "/bin/true"} + // tryExecInDir tries to run a command in dir and returns nil if it succeeds. // Otherwise, it returns a filesystem error or a timeout error if the command // took too long. @@ -93,10 +96,14 @@ func tryExecInDir(ctx context.Context, dir string) error { windir := os.Getenv("windir") return run(filepath.Join(windir, "system32", "doskey.exe")) } - if err := run("/bin/true"); !errors.Is(err, exec.ErrNotFound) { // including nil - return err + // Execute the first "true" we find in the list. + for _, path := range truePaths { + // Note: LookPath does not consult $PATH when passed multi-label paths. + if p, err := exec.LookPath(path); err == nil { + return run(p) + } } - return run("/usr/bin/true") + return exec.ErrNotFound } // newIncubatorCommand returns a new exec.Cmd configured with From 4860c460f5072cdb977417fc03405b3accc731d9 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Mon, 17 Nov 2025 19:17:02 -0800 Subject: [PATCH 0705/1093] wgengine/netlog: strip dot suffix from node name (#17954) The REST API does not return a node name with a trailing dot, while the internal node name reported in the netmap does have one. In order to be consistent with the API, strip the dot when recording node information. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- wgengine/netlog/record.go | 6 +++++- wgengine/netlog/record_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go index 45e30fabec1a6..25b6b1148793a 100644 --- a/wgengine/netlog/record.go +++ b/wgengine/netlog/record.go @@ -9,6 +9,7 @@ import ( "cmp" "net/netip" "slices" + "strings" "time" "unicode/utf8" @@ -169,7 +170,10 @@ func (nu nodeUser) toNode() netlogtype.Node { if !nu.Valid() { return netlogtype.Node{} } - n := netlogtype.Node{NodeID: nu.StableID(), Name: nu.Name()} + n := netlogtype.Node{ + NodeID: nu.StableID(), + Name: strings.TrimSuffix(nu.Name(), "."), + } var ipv4, ipv6 netip.Addr for _, addr := range nu.Addresses().All() { switch { diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go index 7dd840d29f052..ec0229534f244 100644 --- a/wgengine/netlog/record_test.go +++ b/wgengine/netlog/record_test.go @@ -53,7 +53,7 @@ func TestToMessage(t *testing.T) { selfNode: nodeUser{NodeView: (&tailcfg.Node{ ID: 123456, StableID: "n123456CNTL", - Name: "src.tail123456.ts.net", + Name: "src.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.3")}, Tags: []string{"tag:src"}, }).View()}, @@ -64,14 +64,14 @@ func TestToMessage(t *testing.T) { addr("100.1.2.4"): {NodeView: (&tailcfg.Node{ ID: 123457, StableID: "n123457CNTL", - Name: "dst1.tail123456.ts.net", + Name: "dst1.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.4")}, Tags: []string{"tag:dst1"}, }).View()}, addr("100.1.2.5"): {NodeView: (&tailcfg.Node{ ID: 123458, StableID: "n123458CNTL", - Name: "dst2.tail123456.ts.net", + Name: "dst2.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.5")}, Tags: []string{"tag:dst2"}, }).View()}, @@ -163,7 +163,7 @@ func TestToNode(t *testing.T) { { node: &tailcfg.Node{ StableID: "n123456CNTL", - Name: "test.tail123456.ts.net", + Name: "test.tail123456.ts.net.", Addresses: []netip.Prefix{prefix("100.1.2.3")}, Tags: []string{"tag:dupe", "tag:test", "tag:dupe"}, User: 12345, // should be ignored From a2e9dfacde52a083555074c1660ce237b12ed7e6 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Tue, 18 Nov 2025 07:53:42 +0000 Subject: [PATCH 0706/1093] cmd/tailscale/cli: warn if a simple up would change prefs (#17877) Updates tailscale/corp#21570 Signed-off-by: James Sanderson --- cmd/tailscale/cli/cli_test.go | 73 +++++++++++++++++++++++++++++------ cmd/tailscale/cli/up.go | 23 ++++++----- 2 files changed, 73 insertions(+), 23 deletions(-) diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 2e1bec8c9bcb0..8762b7aaeb905 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -174,6 +174,7 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { curUser string // os.Getenv("USER") on the client side goos string // empty means "linux" distro distro.Distro + backendState string // empty means "Running" want string }{ @@ -188,6 +189,28 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { }, want: "", }, + { + name: "bare_up_needs_login_default_prefs", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + backendState: ipn.NeedsLogin.String(), + want: "", + }, + { + name: "bare_up_needs_login_losing_prefs", + flags: []string{}, + curPrefs: &ipn.Prefs{ + // defaults: + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + NetfilterMode: preftype.NetfilterOn, + NoStatefulFiltering: opt.NewBool(true), + // non-default: + CorpDNS: false, + }, + backendState: ipn.NeedsLogin.String(), + want: accidentalUpPrefix + " --accept-dns=false", + }, { name: "losing_hostname", flags: []string{"--accept-dns"}, @@ -620,9 +643,13 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - goos := "linux" - if tt.goos != "" { - goos = tt.goos + goos := stdcmp.Or(tt.goos, "linux") + backendState := stdcmp.Or(tt.backendState, ipn.Running.String()) + // Needs to match the other conditions in checkForAccidentalSettingReverts + tt.curPrefs.Persist = &persist.Persist{ + UserProfile: tailcfg.UserProfile{ + LoginName: "janet", + }, } var upArgs upArgsT flagSet := newUpFlagSet(goos, &upArgs, "up") @@ -638,10 +665,11 @@ func TestCheckForAccidentalSettingReverts(t *testing.T) { curExitNodeIP: tt.curExitNodeIP, distro: tt.distro, user: tt.curUser, + backendState: backendState, } applyImplicitPrefs(newPrefs, tt.curPrefs, upEnv) var got string - if err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { + if _, err := checkForAccidentalSettingReverts(newPrefs, tt.curPrefs, upEnv); err != nil { got = err.Error() } if strings.TrimSpace(got) != tt.want { @@ -1011,13 +1039,10 @@ func TestUpdatePrefs(t *testing.T) { wantErrSubtr string }{ { - name: "bare_up_means_up", - flags: []string{}, - curPrefs: &ipn.Prefs{ - ControlURL: ipn.DefaultControlURL, - WantRunning: false, - Hostname: "foo", - }, + name: "bare_up_means_up", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + wantSimpleUp: false, // user profile not set, so no simple up }, { name: "just_up", @@ -1031,6 +1056,32 @@ func TestUpdatePrefs(t *testing.T) { }, wantSimpleUp: true, }, + { + name: "just_up_needs_login_default_prefs", + flags: []string{}, + curPrefs: ipn.NewPrefs(), + env: upCheckEnv{ + backendState: "NeedsLogin", + }, + wantSimpleUp: false, + }, + { + name: "just_up_needs_login_losing_prefs", + flags: []string{}, + curPrefs: &ipn.Prefs{ + // defaults: + ControlURL: ipn.DefaultControlURL, + WantRunning: false, + NetfilterMode: preftype.NetfilterOn, + // non-default: + CorpDNS: false, + }, + env: upCheckEnv{ + backendState: "NeedsLogin", + }, + wantSimpleUp: false, + wantErrSubtr: "tailscale up --accept-dns=false", + }, { name: "just_edit", flags: []string{}, diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index c341559559149..e8b0cd0d37145 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -388,7 +388,8 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus if !env.upArgs.reset { applyImplicitPrefs(prefs, curPrefs, env) - if err := checkForAccidentalSettingReverts(prefs, curPrefs, env); err != nil { + simpleUp, err = checkForAccidentalSettingReverts(prefs, curPrefs, env) + if err != nil { return false, nil, err } } @@ -420,11 +421,6 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus tagsChanged := !reflect.DeepEqual(curPrefs.AdvertiseTags, prefs.AdvertiseTags) - simpleUp = env.flagSet.NFlag() == 0 && - curPrefs.Persist != nil && - curPrefs.Persist.UserProfile.LoginName != "" && - env.backendState != ipn.NeedsLogin.String() - justEdit := env.backendState == ipn.Running.String() && !env.upArgs.forceReauth && env.upArgs.authKeyOrFile == "" && @@ -968,10 +964,10 @@ type upCheckEnv struct { // // mp is the mask of settings actually set, where mp.Prefs is the new // preferences to set, including any values set from implicit flags. -func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) error { +func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, err error) { if curPrefs.ControlURL == "" { // Don't validate things on initial "up" before a control URL has been set. - return nil + return false, nil } flagIsSet := map[string]bool{} @@ -979,10 +975,13 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck flagIsSet[f.Name] = true }) - if len(flagIsSet) == 0 { + if len(flagIsSet) == 0 && + curPrefs.Persist != nil && + curPrefs.Persist.UserProfile.LoginName != "" && + env.backendState != ipn.NeedsLogin.String() { // A bare "tailscale up" is a special case to just // mean bringing the network up without any changes. - return nil + return true, nil } // flagsCur is what flags we'd need to use to keep the exact @@ -1024,7 +1023,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck missing = append(missing, fmtFlagValueArg(flagName, valCur)) } if len(missing) == 0 { - return nil + return false, nil } // Some previously provided flags are missing. This run of 'tailscale @@ -1057,7 +1056,7 @@ func checkForAccidentalSettingReverts(newPrefs, curPrefs *ipn.Prefs, env upCheck fmt.Fprintf(&sb, " %s", a) } sb.WriteString("\n\n") - return errors.New(sb.String()) + return false, errors.New(sb.String()) } // applyImplicitPrefs mutates prefs to add implicit preferences for the user operator. From 9048ea25db8064b3833b7a2fcbe4b421e4a820dc Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Tue, 18 Nov 2025 08:04:03 +0000 Subject: [PATCH 0707/1093] ipn/localapi: log calls to localapi (#17880) Updates tailscale/corp#34238 Signed-off-by: James Sanderson --- ipn/localapi/localapi.go | 24 +++++++++++++++++------- ipn/localapi/localapi_test.go | 33 ++++++++++++++++++++++++--------- 2 files changed, 41 insertions(+), 16 deletions(-) diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ddd55234ae84d..c4ba2a40bd000 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -264,7 +264,8 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - if fn, ok := handlerForPath(r.URL.Path); ok { + if fn, route, ok := handlerForPath(r.URL.Path); ok { + h.logRequest(r.Method, route) fn(h, w, r) } else { http.NotFound(w, r) @@ -300,9 +301,9 @@ func (h *Handler) validHost(hostname string) bool { // handlerForPath returns the LocalAPI handler for the provided Request.URI.Path. // (the path doesn't include any query parameters) -func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { +func handlerForPath(urlPath string) (h LocalAPIHandler, route string, ok bool) { if urlPath == "/" { - return (*Handler).serveLocalAPIRoot, true + return (*Handler).serveLocalAPIRoot, "/", true } suff, ok := strings.CutPrefix(urlPath, "/localapi/v0/") if !ok { @@ -310,22 +311,31 @@ func handlerForPath(urlPath string) (h LocalAPIHandler, ok bool) { // to people that they're not necessarily stable APIs. In practice we'll // probably need to keep them pretty stable anyway, but for now treat // them as an internal implementation detail. - return nil, false + return nil, "", false } if fn, ok := handler[suff]; ok { // Here we match exact handler suffixes like "status" or ones with a // slash already in their name, like "tka/status". - return fn, true + return fn, "/localapi/v0/" + suff, true } // Otherwise, it might be a prefix match like "files/*" which we look up // by the prefix including first trailing slash. if i := strings.IndexByte(suff, '/'); i != -1 { suff = suff[:i+1] if fn, ok := handler[suff]; ok { - return fn, true + return fn, "/localapi/v0/" + suff, true } } - return nil, false + return nil, "", false +} + +func (h *Handler) logRequest(method, route string) { + switch method { + case httpm.GET, httpm.HEAD, httpm.OPTIONS: + // don't log safe methods + default: + h.Logf("localapi: [%s] %s", method, route) + } } func (*Handler) serveLocalAPIRoot(w http.ResponseWriter, r *http.Request) { diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index d00b4117be43d..6bb9b51829b1d 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -40,6 +40,19 @@ import ( "tailscale.com/wgengine" ) +func handlerForTest(t testing.TB, h *Handler) *Handler { + if h.Actor == nil { + h.Actor = &ipnauth.TestActor{} + } + if h.b == nil { + h.b = &ipnlocal.LocalBackend{} + } + if h.logf == nil { + h.logf = logger.TestLogger(t) + } + return h +} + func TestValidHost(t *testing.T) { tests := []struct { host string @@ -57,7 +70,7 @@ func TestValidHost(t *testing.T) { for _, test := range tests { t.Run(test.host, func(t *testing.T) { - h := &Handler{} + h := handlerForTest(t, &Handler{}) if got := h.validHost(test.host); got != test.valid { t.Errorf("validHost(%q)=%v, want %v", test.host, got, test.valid) } @@ -68,10 +81,9 @@ func TestValidHost(t *testing.T) { func TestSetPushDeviceToken(t *testing.T) { tstest.Replace(t, &validLocalHostForTesting, true) - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitWrite: true, - b: &ipnlocal.LocalBackend{}, - } + }) s := httptest.NewServer(h) defer s.Close() c := s.Client() @@ -125,9 +137,9 @@ func (b whoIsBackend) PeerCaps(ip netip.Addr) tailcfg.PeerCapMap { // // And https://github.com/tailscale/tailscale/issues/12465 func TestWhoIsArgTypes(t *testing.T) { - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitRead: true, - } + }) match := func() (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) { return (&tailcfg.Node{ @@ -190,7 +202,10 @@ func TestWhoIsArgTypes(t *testing.T) { func TestShouldDenyServeConfigForGOOSAndUserContext(t *testing.T) { newHandler := func(connIsLocalAdmin bool) *Handler { - return &Handler{Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, b: newTestLocalBackend(t)} + return handlerForTest(t, &Handler{ + Actor: &ipnauth.TestActor{LocalAdmin: connIsLocalAdmin}, + b: newTestLocalBackend(t), + }) } tests := []struct { name string @@ -298,11 +313,11 @@ func TestServeWatchIPNBus(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - h := &Handler{ + h := handlerForTest(t, &Handler{ PermitRead: tt.permitRead, PermitWrite: tt.permitWrite, b: newTestLocalBackend(t), - } + }) s := httptest.NewServer(h) defer s.Close() c := s.Client() From c2e474e729b4b665cf5acafa29f89c11af71ac35 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 18:13:44 +0000 Subject: [PATCH 0708/1093] all: rename variables with lowercase-l/uppercase-I See http://go/no-ell Signed-off-by: Alex Chan Updates #cleanup Change-Id: I8c976b51ce7a60f06315048b1920516129cc1d5d --- appc/appconnector.go | 14 +- atomicfile/atomicfile_test.go | 4 +- chirp/chirp_test.go | 8 +- client/web/src/hooks/exit-nodes.ts | 8 +- clientupdate/clientupdate.go | 8 +- cmd/containerboot/main_test.go | 40 +-- cmd/derper/derper.go | 16 +- cmd/k8s-operator/egress-eps.go | 46 ++-- cmd/k8s-operator/egress-pod-readiness.go | 32 +-- cmd/k8s-operator/egress-services-readiness.go | 32 +-- .../egress-services-readiness_test.go | 26 +- cmd/k8s-operator/egress-services.go | 124 ++++----- cmd/k8s-operator/egress-services_test.go | 4 +- cmd/k8s-operator/operator_test.go | 4 +- cmd/k8s-operator/proxygroup_specs.go | 12 +- cmd/k8s-operator/tsrecorder_specs.go | 12 +- cmd/k8s-proxy/internal/config/config.go | 44 +-- cmd/k8s-proxy/internal/config/config_test.go | 14 +- cmd/natc/ippool/consensusippool.go | 4 +- cmd/sniproxy/sniproxy_test.go | 4 +- cmd/stunstamp/stunstamp.go | 28 +- cmd/sync-containers/main.go | 4 +- cmd/tl-longchain/tl-longchain.go | 4 +- drive/driveimpl/connlistener.go | 24 +- drive/driveimpl/connlistener_test.go | 6 +- drive/driveimpl/drive_test.go | 18 +- drive/driveimpl/fileserver.go | 14 +- feature/sdnotify/sdnotify_linux.go | 4 +- ipn/localapi/tailnetlock.go | 4 +- .../apis/v1alpha1/types_proxyclass.go | 8 +- k8s-operator/sessionrecording/ws/conn_test.go | 4 +- kube/egressservices/egressservices.go | 12 +- kube/localclient/local-client.go | 8 +- log/sockstatlog/logger.go | 60 ++-- logpolicy/logpolicy.go | 4 +- logtail/logtail.go | 258 +++++++++--------- logtail/logtail_test.go | 64 ++--- net/art/stride_table.go | 12 +- net/art/stride_table_test.go | 4 +- net/dns/manager_windows_test.go | 4 +- net/ktimeout/ktimeout_linux_test.go | 12 +- net/ktimeout/ktimeout_test.go | 4 +- net/memnet/listener.go | 28 +- net/memnet/listener_test.go | 10 +- net/netaddr/netaddr.go | 2 +- net/netcheck/netcheck.go | 8 +- net/socks5/socks5.go | 6 +- net/speedtest/speedtest_server.go | 4 +- net/speedtest/speedtest_test.go | 10 +- packages/deb/deb.go | 10 +- prober/derp.go | 28 +- prober/prober.go | 34 +-- tka/aum.go | 4 +- tka/sig_test.go | 8 +- tsconsensus/monitor.go | 4 +- tsconsensus/tsconsensus_test.go | 4 +- tsnet/tsnet_test.go | 10 +- tstest/integration/vms/vms_test.go | 6 +- tsweb/tsweb.go | 38 +-- types/geo/quantize_test.go | 16 +- types/key/disco.go | 10 +- types/prefs/list.go | 24 +- types/prefs/prefs_test.go | 20 +- types/prefs/struct_list.go | 24 +- types/prefs/struct_map.go | 8 +- util/limiter/limiter.go | 68 ++--- util/limiter/limiter_test.go | 144 +++++----- util/linuxfw/detector.go | 4 +- util/lru/lru_test.go | 8 +- util/syspolicy/setting/setting.go | 22 +- util/syspolicy/setting/setting_test.go | 4 +- util/winutil/gp/gp_windows_test.go | 10 +- util/winutil/gp/policylock_windows.go | 80 +++--- util/winutil/s4u/lsa_windows.go | 4 +- util/winutil/s4u/s4u_windows.go | 8 +- util/winutil/startupinfo_windows.go | 4 +- util/winutil/winutil_windows_test.go | 4 +- wf/firewall.go | 8 +- wgengine/magicsock/magicsock_test.go | 20 +- wgengine/netstack/link_endpoint.go | 110 ++++---- wgengine/router/osrouter/router_linux_test.go | 18 +- 81 files changed, 924 insertions(+), 924 deletions(-) diff --git a/appc/appconnector.go b/appc/appconnector.go index 5625decbfa062..d41f9e8ba6357 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -203,12 +203,12 @@ func NewAppConnector(c Config) *AppConnector { ac.wildcards = c.RouteInfo.Wildcards ac.controlRoutes = c.RouteInfo.Control } - ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { - ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) - metricStoreRoutes(c, l) + ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, ln int64) { + ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, ln) + metricStoreRoutes(c, ln) }) - ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, l int64) { - ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, l) + ac.writeRateDay = newRateLogger(time.Now, 24*time.Hour, func(c int64, s time.Time, ln int64) { + ac.logf("routeInfo write rate: %d in 24 hours starting at %v (%d routes)", c, s, ln) }) return ac } @@ -510,8 +510,8 @@ func (e *AppConnector) addDomainAddrLocked(domain string, addr netip.Addr) { slices.SortFunc(e.domains[domain], compareAddr) } -func compareAddr(l, r netip.Addr) int { - return l.Compare(r) +func compareAddr(a, b netip.Addr) int { + return a.Compare(b) } // routesWithout returns a without b where a and b diff --git a/atomicfile/atomicfile_test.go b/atomicfile/atomicfile_test.go index 78c93e664f738..a081c90409788 100644 --- a/atomicfile/atomicfile_test.go +++ b/atomicfile/atomicfile_test.go @@ -31,11 +31,11 @@ func TestDoesNotOverwriteIrregularFiles(t *testing.T) { // The least troublesome thing to make that is not a file is a unix socket. // Making a null device sadly requires root. - l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) + ln, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"}) if err != nil { t.Fatal(err) } - defer l.Close() + defer ln.Close() err = WriteFile(path, []byte("hello"), 0644) if err == nil { diff --git a/chirp/chirp_test.go b/chirp/chirp_test.go index a57ef224b2c1b..c545c277d6e87 100644 --- a/chirp/chirp_test.go +++ b/chirp/chirp_test.go @@ -24,7 +24,7 @@ type fakeBIRD struct { func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { sock := filepath.Join(t.TempDir(), "sock") - l, err := net.Listen("unix", sock) + ln, err := net.Listen("unix", sock) if err != nil { t.Fatal(err) } @@ -33,7 +33,7 @@ func newFakeBIRD(t *testing.T, protocols ...string) *fakeBIRD { pe[p] = false } return &fakeBIRD{ - Listener: l, + Listener: ln, protocolsEnabled: pe, sock: sock, } @@ -123,12 +123,12 @@ type hangingListener struct { func newHangingListener(t *testing.T) *hangingListener { sock := filepath.Join(t.TempDir(), "sock") - l, err := net.Listen("unix", sock) + ln, err := net.Listen("unix", sock) if err != nil { t.Fatal(err) } return &hangingListener{ - Listener: l, + Listener: ln, t: t, done: make(chan struct{}), sock: sock, diff --git a/client/web/src/hooks/exit-nodes.ts b/client/web/src/hooks/exit-nodes.ts index b3ce0a9fa12ec..5e47fbc227cd4 100644 --- a/client/web/src/hooks/exit-nodes.ts +++ b/client/web/src/hooks/exit-nodes.ts @@ -66,7 +66,7 @@ export default function useExitNodes(node: NodeData, filter?: string) { // match from a list of exit node `options` to `nodes`. const addBestMatchNode = ( options: ExitNode[], - name: (l: ExitNodeLocation) => string + name: (loc: ExitNodeLocation) => string ) => { const bestNode = highestPriorityNode(options) if (!bestNode || !bestNode.Location) { @@ -86,7 +86,7 @@ export default function useExitNodes(node: NodeData, filter?: string) { locationNodesMap.forEach( // add one node per country (countryNodes) => - addBestMatchNode(flattenMap(countryNodes), (l) => l.Country) + addBestMatchNode(flattenMap(countryNodes), (loc) => loc.Country) ) } else { // Otherwise, show the best match on a city-level, @@ -97,12 +97,12 @@ export default function useExitNodes(node: NodeData, filter?: string) { countryNodes.forEach( // add one node per city (cityNodes) => - addBestMatchNode(cityNodes, (l) => `${l.Country}: ${l.City}`) + addBestMatchNode(cityNodes, (loc) => `${loc.Country}: ${loc.City}`) ) // add the "Country: Best Match" node addBestMatchNode( flattenMap(countryNodes), - (l) => `${l.Country}: Best Match` + (loc) => `${loc.Country}: Best Match` ) }) } diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 84b289615f911..3a0a8d03e0425 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -418,13 +418,13 @@ func parseSynoinfo(path string) (string, error) { // Extract the CPU in the middle (88f6282 in the above example). s := bufio.NewScanner(f) for s.Scan() { - l := s.Text() - if !strings.HasPrefix(l, "unique=") { + line := s.Text() + if !strings.HasPrefix(line, "unique=") { continue } - parts := strings.SplitN(l, "_", 3) + parts := strings.SplitN(line, "_", 3) if len(parts) != 3 { - return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, l) + return "", fmt.Errorf(`malformed %q: found %q, expected format like 'unique="synology_$cpu_$model'`, path, line) } return parts[1], nil } diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 96feef682af5b..f92f353334de2 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -1287,8 +1287,8 @@ type localAPI struct { notify *ipn.Notify } -func (l *localAPI) Start() error { - path := filepath.Join(l.FSRoot, "tmp/tailscaled.sock.fake") +func (lc *localAPI) Start() error { + path := filepath.Join(lc.FSRoot, "tmp/tailscaled.sock.fake") if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { return err } @@ -1298,30 +1298,30 @@ func (l *localAPI) Start() error { return err } - l.srv = &http.Server{ - Handler: l, + lc.srv = &http.Server{ + Handler: lc, } - l.Path = path - l.cond = sync.NewCond(&l.Mutex) - go l.srv.Serve(ln) + lc.Path = path + lc.cond = sync.NewCond(&lc.Mutex) + go lc.srv.Serve(ln) return nil } -func (l *localAPI) Close() { - l.srv.Close() +func (lc *localAPI) Close() { + lc.srv.Close() } -func (l *localAPI) Notify(n *ipn.Notify) { +func (lc *localAPI) Notify(n *ipn.Notify) { if n == nil { return } - l.Lock() - defer l.Unlock() - l.notify = n - l.cond.Broadcast() + lc.Lock() + defer lc.Unlock() + lc.notify = n + lc.cond.Broadcast() } -func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/localapi/v0/serve-config": if r.Method != "POST" { @@ -1348,11 +1348,11 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Flush() } enc := json.NewEncoder(w) - l.Lock() - defer l.Unlock() + lc.Lock() + defer lc.Unlock() for { - if l.notify != nil { - if err := enc.Encode(l.notify); err != nil { + if lc.notify != nil { + if err := enc.Encode(lc.notify); err != nil { // Usually broken pipe as the test client disconnects. return } @@ -1360,7 +1360,7 @@ func (l *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.Flush() } } - l.cond.Wait() + lc.cond.Wait() } } diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 857d7def3b6ff..f177986a59f91 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -481,32 +481,32 @@ func newRateLimitedListener(ln net.Listener, limit rate.Limit, burst int) *rateL return &rateLimitedListener{Listener: ln, lim: rate.NewLimiter(limit, burst)} } -func (l *rateLimitedListener) ExpVar() expvar.Var { +func (ln *rateLimitedListener) ExpVar() expvar.Var { m := new(metrics.Set) - m.Set("counter_accepted_connections", &l.numAccepts) - m.Set("counter_rejected_connections", &l.numRejects) + m.Set("counter_accepted_connections", &ln.numAccepts) + m.Set("counter_rejected_connections", &ln.numRejects) return m } var errLimitedConn = errors.New("cannot accept connection; rate limited") -func (l *rateLimitedListener) Accept() (net.Conn, error) { +func (ln *rateLimitedListener) Accept() (net.Conn, error) { // Even under a rate limited situation, we accept the connection immediately // and close it, rather than being slow at accepting new connections. // This provides two benefits: 1) it signals to the client that something // is going on on the server, and 2) it prevents new connections from // piling up and occupying resources in the OS kernel. // The client will retry as needing (with backoffs in place). - cn, err := l.Listener.Accept() + cn, err := ln.Listener.Accept() if err != nil { return nil, err } - if !l.lim.Allow() { - l.numRejects.Add(1) + if !ln.lim.Allow() { + ln.numRejects.Add(1) cn.Close() return nil, errLimitedConn } - l.numAccepts.Add(1) + ln.numAccepts.Add(1) return cn, nil } diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 3441e12ba93ec..88da9935320bf 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -36,21 +36,21 @@ type egressEpsReconciler struct { // It compares tailnet service state stored in egress proxy state Secrets by containerboot with the desired // configuration stored in proxy-cfg ConfigMap to determine if the endpoint is ready. func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := er.logger.With("Service", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := er.logger.With("Service", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") eps := new(discoveryv1.EndpointSlice) err = er.Get(ctx, req.NamespacedName, eps) if apierrors.IsNotFound(err) { - l.Debugf("EndpointSlice not found") + lg.Debugf("EndpointSlice not found") return reconcile.Result{}, nil } if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get EndpointSlice: %w", err) } if !eps.DeletionTimestamp.IsZero() { - l.Debugf("EnpointSlice is being deleted") + lg.Debugf("EnpointSlice is being deleted") return res, nil } @@ -64,7 +64,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ } err = er.Get(ctx, client.ObjectKeyFromObject(svc), svc) if apierrors.IsNotFound(err) { - l.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) + lg.Infof("ExternalName Service %s/%s not found, perhaps it was deleted", svc.Namespace, svc.Name) return res, nil } if err != nil { @@ -77,7 +77,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ oldEps := eps.DeepCopy() tailnetSvc := tailnetSvcName(svc) - l = l.With("tailnet-service-name", tailnetSvc) + lg = lg.With("tailnet-service-name", tailnetSvc) // Retrieve the desired tailnet service configuration from the ConfigMap. proxyGroupName := eps.Labels[labelProxyGroup] @@ -88,12 +88,12 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ if cfgs == nil { // TODO(irbekrm): this path would be hit if egress service was once exposed on a ProxyGroup that later // got deleted. Probably the EndpointSlices then need to be deleted too- need to rethink this flow. - l.Debugf("No egress config found, likely because ProxyGroup has not been created") + lg.Debugf("No egress config found, likely because ProxyGroup has not been created") return res, nil } cfg, ok := (*cfgs)[tailnetSvc] if !ok { - l.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) + lg.Infof("[unexpected] configuration for tailnet service %s not found", tailnetSvc) return res, nil } @@ -105,7 +105,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ } newEndpoints := make([]discoveryv1.Endpoint, 0) for _, pod := range podList.Items { - ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, l) + ready, err := er.podIsReadyToRouteTraffic(ctx, pod, &cfg, tailnetSvc, lg) if err != nil { return res, fmt.Errorf("error verifying if Pod is ready to route traffic: %w", err) } @@ -130,7 +130,7 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ // run a cleanup for deleted Pods etc. eps.Endpoints = newEndpoints if !reflect.DeepEqual(eps, oldEps) { - l.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") + lg.Infof("Updating EndpointSlice to ensure traffic is routed to ready proxy Pods") if err := er.Update(ctx, eps); err != nil { return res, fmt.Errorf("error updating EndpointSlice: %w", err) } @@ -154,11 +154,11 @@ func podIPv4(pod *corev1.Pod) (string, error) { // podIsReadyToRouteTraffic returns true if it appears that the proxy Pod has configured firewall rules to be able to // route traffic to the given tailnet service. It retrieves the proxy's state Secret and compares the tailnet service // status written there to the desired service configuration. -func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, l *zap.SugaredLogger) (bool, error) { - l = l.With("proxy_pod", pod.Name) - l.Debugf("checking whether proxy is ready to route to egress service") +func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod corev1.Pod, cfg *egressservices.Config, tailnetSvcName string, lg *zap.SugaredLogger) (bool, error) { + lg = lg.With("proxy_pod", pod.Name) + lg.Debugf("checking whether proxy is ready to route to egress service") if !pod.DeletionTimestamp.IsZero() { - l.Debugf("proxy Pod is being deleted, ignore") + lg.Debugf("proxy Pod is being deleted, ignore") return false, nil } podIP, err := podIPv4(&pod) @@ -166,7 +166,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod return false, fmt.Errorf("error determining Pod IP address: %v", err) } if podIP == "" { - l.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") + lg.Infof("[unexpected] Pod does not have an IPv4 address, and IPv6 is not currently supported") return false, nil } stateS := &corev1.Secret{ @@ -177,7 +177,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod } err = er.Get(ctx, client.ObjectKeyFromObject(stateS), stateS) if apierrors.IsNotFound(err) { - l.Debugf("proxy does not have a state Secret, waiting...") + lg.Debugf("proxy does not have a state Secret, waiting...") return false, nil } if err != nil { @@ -185,7 +185,7 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod } svcStatusBS := stateS.Data[egressservices.KeyEgressServices] if len(svcStatusBS) == 0 { - l.Debugf("proxy's state Secret does not contain egress services status, waiting...") + lg.Debugf("proxy's state Secret does not contain egress services status, waiting...") return false, nil } svcStatus := &egressservices.Status{} @@ -193,22 +193,22 @@ func (er *egressEpsReconciler) podIsReadyToRouteTraffic(ctx context.Context, pod return false, fmt.Errorf("error unmarshalling egress service status: %w", err) } if !strings.EqualFold(podIP, svcStatus.PodIPv4) { - l.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) + lg.Infof("proxy's egress service status is for Pod IP %s, current proxy's Pod IP %s, waiting for the proxy to reconfigure...", svcStatus.PodIPv4, podIP) return false, nil } st, ok := (*svcStatus).Services[tailnetSvcName] if !ok { - l.Infof("proxy's state Secret does not have egress service status, waiting...") + lg.Infof("proxy's state Secret does not have egress service status, waiting...") return false, nil } if !reflect.DeepEqual(cfg.TailnetTarget, st.TailnetTarget) { - l.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) + lg.Infof("proxy has configured egress service for tailnet target %v, current target is %v, waiting for proxy to reconfigure...", st.TailnetTarget, cfg.TailnetTarget) return false, nil } if !reflect.DeepEqual(cfg.Ports, st.Ports) { - l.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) + lg.Debugf("proxy has configured egress service for ports %#+v, wants ports %#+v, waiting for proxy to reconfigure", st.Ports, cfg.Ports) return false, nil } - l.Debugf("proxy is ready to route traffic to egress service") + lg.Debugf("proxy is ready to route traffic to egress service") return true, nil } diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index f3a812ecb9030..a732e08612c86 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -71,9 +71,9 @@ type egressPodsReconciler struct { // If the Pod does not appear to be serving the health check endpoint (pre-v1.80 proxies), the reconciler just sets the // readiness condition for backwards compatibility reasons. func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := er.logger.With("Pod", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := er.logger.With("Pod", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") pod := new(corev1.Pod) err = er.Get(ctx, req.NamespacedName, pod) @@ -84,11 +84,11 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err) } if !pod.DeletionTimestamp.IsZero() { - l.Debugf("Pod is being deleted, do nothing") + lg.Debugf("Pod is being deleted, do nothing") return res, nil } if pod.Labels[LabelParentType] != proxyTypeProxyGroup { - l.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") + lg.Infof("[unexpected] reconciler called for a Pod that is not a ProxyGroup Pod") return res, nil } @@ -97,7 +97,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req if !slices.ContainsFunc(pod.Spec.ReadinessGates, func(r corev1.PodReadinessGate) bool { return r.ConditionType == tsEgressReadinessGate }) { - l.Debug("Pod does not have egress readiness gate set, skipping") + lg.Debug("Pod does not have egress readiness gate set, skipping") return res, nil } @@ -107,7 +107,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return res, fmt.Errorf("error getting ProxyGroup %q: %w", proxyGroupName, err) } if pg.Spec.Type != typeEgress { - l.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) + lg.Infof("[unexpected] reconciler called for %q ProxyGroup Pod", pg.Spec.Type) return res, nil } // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. @@ -125,7 +125,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return c.Type == tsEgressReadinessGate }) if idx != -1 { - l.Debugf("Pod is already ready, do nothing") + lg.Debugf("Pod is already ready, do nothing") return res, nil } @@ -134,7 +134,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req for _, svc := range svcs.Items { s := svc go func() { - ll := l.With("service_name", s.Name) + ll := lg.With("service_name", s.Name) d := retrieveClusterDomain(er.tsNamespace, ll) healthCheckAddr := healthCheckForSvc(&s, d) if healthCheckAddr == "" { @@ -178,22 +178,22 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req return res, fmt.Errorf("error verifying conectivity: %w", err) } if rm := routesMissing.Load(); rm { - l.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") + lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") return reconcile.Result{RequeueAfter: shortRequeue}, nil } - if err := er.setPodReady(ctx, pod, l); err != nil { + if err := er.setPodReady(ctx, pod, lg); err != nil { return res, fmt.Errorf("error setting Pod as ready: %w", err) } return res, nil } -func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, l *zap.SugaredLogger) error { +func (er *egressPodsReconciler) setPodReady(ctx context.Context, pod *corev1.Pod, lg *zap.SugaredLogger) error { if slices.ContainsFunc(pod.Status.Conditions, func(c corev1.PodCondition) bool { return c.Type == tsEgressReadinessGate }) { return nil } - l.Infof("Pod is ready to route traffic to all egress targets") + lg.Infof("Pod is ready to route traffic to all egress targets") pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{ Type: tsEgressReadinessGate, Status: corev1.ConditionTrue, @@ -216,11 +216,11 @@ const ( ) // lookupPodRouteViaSvc attempts to reach a Pod using a health check endpoint served by a Service and returns the state of the health check. -func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, l *zap.SugaredLogger) (healthCheckState, error) { +func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *corev1.Pod, healthCheckAddr string, lg *zap.SugaredLogger) (healthCheckState, error) { if !slices.ContainsFunc(pod.Spec.Containers[0].Env, func(e corev1.EnvVar) bool { return e.Name == "TS_ENABLE_HEALTH_CHECK" && e.Value == "true" }) { - l.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") + lg.Debugf("Pod does not have health check enabled, unable to verify if it is currently routable via Service") return cannotVerify, nil } wantsIP, err := podIPv4(pod) @@ -248,7 +248,7 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c defer resp.Body.Close() gotIP := resp.Header.Get(kubetypes.PodIPv4Header) if gotIP == "" { - l.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") + lg.Debugf("Health check does not return Pod's IP header, unable to verify if Pod is currently routable via Service") return cannotVerify, nil } if !strings.EqualFold(wantsIP, gotIP) { diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index ecf99b63cda44..80f3c7d285141 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -47,13 +47,13 @@ type egressSvcsReadinessReconciler struct { // route traffic to the target. It compares proxy Pod IPs with the endpoints set on the EndpointSlice for the egress // service to determine how many replicas are currently able to route traffic. func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := esrr.logger.With("Service", req.NamespacedName) - l.Debugf("starting reconcile") - defer l.Debugf("reconcile finished") + lg := esrr.logger.With("Service", req.NamespacedName) + lg.Debugf("starting reconcile") + defer lg.Debugf("reconcile finished") svc := new(corev1.Service) if err = esrr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Debugf("Service not found") + lg.Debugf("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re ) oldStatus := svc.Status.DeepCopy() defer func() { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, lg) if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) { err = errors.Join(err, esrr.Status().Update(ctx, svc)) } @@ -79,7 +79,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if eps == nil { - l.Infof("EndpointSlice for Service does not yet exist, waiting...") + lg.Infof("EndpointSlice for Service does not yet exist, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -91,7 +91,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re } err = esrr.Get(ctx, client.ObjectKeyFromObject(pg), pg) if apierrors.IsNotFound(err) { - l.Infof("ProxyGroup for Service does not exist, waiting...") + lg.Infof("ProxyGroup for Service does not exist, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -103,7 +103,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if !tsoperator.ProxyGroupAvailable(pg) { - l.Infof("ProxyGroup for Service is not ready, waiting...") + lg.Infof("ProxyGroup for Service is not ready, waiting...") reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady st = metav1.ConditionFalse return res, nil @@ -111,7 +111,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re replicas := pgReplicas(pg) if replicas == 0 { - l.Infof("ProxyGroup replicas set to 0") + lg.Infof("ProxyGroup replicas set to 0") reason, msg = reasonNoProxies, reasonNoProxies st = metav1.ConditionFalse return res, nil @@ -128,16 +128,16 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re return res, err } if pod == nil { - l.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) + lg.Warnf("[unexpected] ProxyGroup is ready, but replica %d was not found", i) reason, msg = reasonClusterResourcesNotReady, reasonClusterResourcesNotReady return res, nil } - l.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) + lg.Debugf("looking at Pod with IPs %v", pod.Status.PodIPs) ready := false for _, ep := range eps.Endpoints { - l.Debugf("looking at endpoint with addresses %v", ep.Addresses) - if endpointReadyForPod(&ep, pod, l) { - l.Debugf("endpoint is ready for Pod") + lg.Debugf("looking at endpoint with addresses %v", ep.Addresses) + if endpointReadyForPod(&ep, pod, lg) { + lg.Debugf("endpoint is ready for Pod") ready = true break } @@ -163,10 +163,10 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re // endpointReadyForPod returns true if the endpoint is for the Pod's IPv4 address and is ready to serve traffic. // Endpoint must not be nil. -func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, l *zap.SugaredLogger) bool { +func endpointReadyForPod(ep *discoveryv1.Endpoint, pod *corev1.Pod, lg *zap.SugaredLogger) bool { podIP, err := podIPv4(pod) if err != nil { - l.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) + lg.Warnf("[unexpected] error retrieving Pod's IPv4 address: %v", err) return false } // Currently we only ever set a single address on and Endpoint and nothing else is meant to modify this. diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index f80759aef927b..fdff4fafa3240 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -49,12 +49,12 @@ func TestEgressServiceReadiness(t *testing.T) { }, } fakeClusterIPSvc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "my-app", Namespace: "operator-ns"}} - l := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) + labels := egressSvcEpsLabels(egressSvc, fakeClusterIPSvc) eps := &discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ Name: "my-app", Namespace: "operator-ns", - Labels: l, + Labels: labels, }, AddressType: discoveryv1.AddressTypeIPv4, } @@ -118,26 +118,26 @@ func TestEgressServiceReadiness(t *testing.T) { }) } -func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, l) +func setClusterNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger) { + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonClusterResourcesNotReady, reasonClusterResourcesNotReady, cl, lg) } -func setNotReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas int32) { +func setNotReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas int32) { msg := fmt.Sprintf(msgReadyToRouteTemplate, 0, replicas) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionFalse, reasonNotReady, msg, cl, lg) } -func setReady(svc *corev1.Service, cl tstime.Clock, l *zap.SugaredLogger, replicas, readyReplicas int32) { +func setReady(svc *corev1.Service, cl tstime.Clock, lg *zap.SugaredLogger, replicas, readyReplicas int32) { reason := reasonPartiallyReady if readyReplicas == replicas { reason = reasonReady } msg := fmt.Sprintf(msgReadyToRouteTemplate, readyReplicas, replicas) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, metav1.ConditionTrue, reason, msg, cl, lg) } -func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, l *zap.SugaredLogger) { - tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, l) +func setPGReady(pg *tsapi.ProxyGroup, cl tstime.Clock, lg *zap.SugaredLogger) { + tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, "foo", "foo", pg.Generation, cl, lg) } func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1.EndpointSlice) { @@ -153,14 +153,14 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1 } func pod(pg *tsapi.ProxyGroup, ordinal int32) *corev1.Pod { - l := pgLabels(pg.Name, nil) - l[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) + labels := pgLabels(pg.Name, nil) + labels[appsv1.PodIndexLabel] = fmt.Sprintf("%d", ordinal) ip := fmt.Sprintf("10.0.0.%d", ordinal) return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-%d", pg.Name, ordinal), Namespace: "operator-ns", - Labels: l, + Labels: labels, }, Status: corev1.PodStatus{ PodIPs: []corev1.PodIP{{IP: ip}}, diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index ca6562071eba7..05be8efed9402 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -98,12 +98,12 @@ type egressSvcsReconciler struct { // - updates the egress service config in a ConfigMap mounted to the ProxyGroup proxies with the tailnet target and the // portmappings. func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - l := esr.logger.With("Service", req.NamespacedName) - defer l.Info("reconcile finished") + lg := esr.logger.With("Service", req.NamespacedName) + defer lg.Info("reconcile finished") svc := new(corev1.Service) if err = esr.Get(ctx, req.NamespacedName, svc); apierrors.IsNotFound(err) { - l.Info("Service not found") + lg.Info("Service not found") return res, nil } else if err != nil { return res, fmt.Errorf("failed to get Service: %w", err) @@ -111,7 +111,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re // Name of the 'egress service', meaning the tailnet target. tailnetSvc := tailnetSvcName(svc) - l = l.With("tailnet-service", tailnetSvc) + lg = lg.With("tailnet-service", tailnetSvc) // Note that resources for egress Services are only cleaned up when the // Service is actually deleted (and not if, for example, user decides to @@ -119,8 +119,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re // assume that the egress ExternalName Services are always created for // Tailscale operator specifically. if !svc.DeletionTimestamp.IsZero() { - l.Info("Service is being deleted, ensuring resource cleanup") - return res, esr.maybeCleanup(ctx, svc, l) + lg.Info("Service is being deleted, ensuring resource cleanup") + return res, esr.maybeCleanup(ctx, svc, lg) } oldStatus := svc.Status.DeepCopy() @@ -131,7 +131,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re }() // Validate the user-created ExternalName Service and the associated ProxyGroup. - if ok, err := esr.validateClusterResources(ctx, svc, l); err != nil { + if ok, err := esr.validateClusterResources(ctx, svc, lg); err != nil { return res, fmt.Errorf("error validating cluster resources: %w", err) } else if !ok { return res, nil @@ -141,8 +141,8 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re svc.Finalizers = append(svc.Finalizers, FinalizerName) if err := esr.updateSvcSpec(ctx, svc); err != nil { err := fmt.Errorf("failed to add finalizer: %w", err) - r := svcConfiguredReason(svc, false, l) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + r := svcConfiguredReason(svc, false, lg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) return res, err } esr.mu.Lock() @@ -151,16 +151,16 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re esr.mu.Unlock() } - if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, l); err != nil { + if err := esr.maybeCleanupProxyGroupConfig(ctx, svc, lg); err != nil { err = fmt.Errorf("cleaning up resources for previous ProxyGroup failed: %w", err) - r := svcConfiguredReason(svc, false, l) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, l) + r := svcConfiguredReason(svc, false, lg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, metav1.ConditionFalse, r, err.Error(), esr.clock, lg) return res, err } - if err := esr.maybeProvision(ctx, svc, l); err != nil { + if err := esr.maybeProvision(ctx, svc, lg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { - l.Infof("optimistic lock error, retrying: %s", err) + lg.Infof("optimistic lock error, retrying: %s", err) } else { return reconcile.Result{}, err } @@ -169,15 +169,15 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re return res, nil } -func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (err error) { - r := svcConfiguredReason(svc, false, l) +func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (err error) { + r := svcConfiguredReason(svc, false, lg) st := metav1.ConditionFalse defer func() { msg := r if st != metav1.ConditionTrue && err != nil { msg = err.Error() } - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcConfigured, st, r, msg, esr.clock, lg) }() crl := egressSvcChildResourceLabels(svc) @@ -189,36 +189,36 @@ func (esr *egressSvcsReconciler) maybeProvision(ctx context.Context, svc *corev1 if clusterIPSvc == nil { clusterIPSvc = esr.clusterIPSvcForEgress(crl) } - upToDate := svcConfigurationUpToDate(svc, l) + upToDate := svcConfigurationUpToDate(svc, lg) provisioned := true if !upToDate { - if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, l); err != nil { + if clusterIPSvc, provisioned, err = esr.provision(ctx, svc.Annotations[AnnotationProxyGroup], svc, clusterIPSvc, lg); err != nil { return err } } if !provisioned { - l.Infof("unable to provision cluster resources") + lg.Infof("unable to provision cluster resources") return nil } // Update ExternalName Service to point at the ClusterIP Service. - clusterDomain := retrieveClusterDomain(esr.tsNamespace, l) + clusterDomain := retrieveClusterDomain(esr.tsNamespace, lg) clusterIPSvcFQDN := fmt.Sprintf("%s.%s.svc.%s", clusterIPSvc.Name, clusterIPSvc.Namespace, clusterDomain) if svc.Spec.ExternalName != clusterIPSvcFQDN { - l.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) + lg.Infof("Configuring ExternalName Service to point to ClusterIP Service %s", clusterIPSvcFQDN) svc.Spec.ExternalName = clusterIPSvcFQDN if err = esr.updateSvcSpec(ctx, svc); err != nil { err = fmt.Errorf("error updating ExternalName Service: %w", err) return err } } - r = svcConfiguredReason(svc, true, l) + r = svcConfiguredReason(svc, true, lg) st = metav1.ConditionTrue return nil } -func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, l *zap.SugaredLogger) (*corev1.Service, bool, error) { - l.Infof("updating configuration...") +func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName string, svc, clusterIPSvc *corev1.Service, lg *zap.SugaredLogger) (*corev1.Service, bool, error) { + lg.Infof("updating configuration...") usedPorts, err := esr.usedPortsForPG(ctx, proxyGroupName) if err != nil { return nil, false, fmt.Errorf("error calculating used ports for ProxyGroup %s: %w", proxyGroupName, err) @@ -246,7 +246,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s } } if !found { - l.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) + lg.Debugf("portmapping %s:%d -> %s:%d is no longer required, removing", pm.Protocol, pm.TargetPort.IntVal, pm.Protocol, pm.Port) clusterIPSvc.Spec.Ports = slices.Delete(clusterIPSvc.Spec.Ports, i, i+1) } } @@ -277,7 +277,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("unable to allocate additional ports on ProxyGroup %s, %d ports already used. Create another ProxyGroup or open an issue if you believe this is unexpected.", proxyGroupName, maxPorts) } p := unusedPort(usedPorts) - l.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) + lg.Debugf("mapping tailnet target port %d to container port %d", wantsPM.Port, p) usedPorts.Insert(p) clusterIPSvc.Spec.Ports = append(clusterIPSvc.Spec.Ports, corev1.ServicePort{ Name: wantsPM.Name, @@ -343,14 +343,14 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("error retrieving egress services configuration: %w", err) } if cm == nil { - l.Info("ConfigMap not yet created, waiting..") + lg.Info("ConfigMap not yet created, waiting..") return nil, false, nil } tailnetSvc := tailnetSvcName(svc) gotCfg := (*cfgs)[tailnetSvc] - wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, l) + wantsCfg := egressSvcCfg(svc, clusterIPSvc, esr.tsNamespace, lg) if !reflect.DeepEqual(gotCfg, wantsCfg) { - l.Debugf("updating egress services ConfigMap %s", cm.Name) + lg.Debugf("updating egress services ConfigMap %s", cm.Name) mak.Set(cfgs, tailnetSvc, wantsCfg) bs, err := json.Marshal(cfgs) if err != nil { @@ -361,7 +361,7 @@ func (esr *egressSvcsReconciler) provision(ctx context.Context, proxyGroupName s return nil, false, fmt.Errorf("error updating egress services ConfigMap: %w", err) } } - l.Infof("egress service configuration has been updated") + lg.Infof("egress service configuration has been updated") return clusterIPSvc, true, nil } @@ -402,7 +402,7 @@ func (esr *egressSvcsReconciler) maybeCleanup(ctx context.Context, svc *corev1.S return nil } -func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) error { +func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) error { wantsProxyGroup := svc.Annotations[AnnotationProxyGroup] cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { @@ -416,7 +416,7 @@ func (esr *egressSvcsReconciler) maybeCleanupProxyGroupConfig(ctx context.Contex return nil } esr.logger.Infof("egress Service configured on ProxyGroup %s, wants ProxyGroup %s, cleaning up...", ss[2], wantsProxyGroup) - if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, l); err != nil { + if err := esr.ensureEgressSvcCfgDeleted(ctx, svc, lg); err != nil { return fmt.Errorf("error deleting egress service config: %w", err) } return nil @@ -471,17 +471,17 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, Namespace: esr.tsNamespace, }, } - l := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) - l.Debug("ensuring that egress service configuration is removed from proxy config") + lggr := logger.With("ConfigMap", client.ObjectKeyFromObject(cm)) + lggr.Debug("ensuring that egress service configuration is removed from proxy config") if err := esr.Get(ctx, client.ObjectKeyFromObject(cm), cm); apierrors.IsNotFound(err) { - l.Debugf("ConfigMap not found") + lggr.Debugf("ConfigMap not found") return nil } else if err != nil { return fmt.Errorf("error retrieving ConfigMap: %w", err) } bs := cm.BinaryData[egressservices.KeyEgressServices] if len(bs) == 0 { - l.Debugf("ConfigMap does not contain egress service configs") + lggr.Debugf("ConfigMap does not contain egress service configs") return nil } cfgs := &egressservices.Configs{} @@ -491,12 +491,12 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, tailnetSvc := tailnetSvcName(svc) _, ok := (*cfgs)[tailnetSvc] if !ok { - l.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") + lggr.Debugf("ConfigMap does not contain egress service config, likely because it was already deleted") return nil } - l.Infof("before deleting config %+#v", *cfgs) + lggr.Infof("before deleting config %+#v", *cfgs) delete(*cfgs, tailnetSvc) - l.Infof("after deleting config %+#v", *cfgs) + lggr.Infof("after deleting config %+#v", *cfgs) bs, err := json.Marshal(cfgs) if err != nil { return fmt.Errorf("error marshalling egress services configs: %w", err) @@ -505,7 +505,7 @@ func (esr *egressSvcsReconciler) ensureEgressSvcCfgDeleted(ctx context.Context, return esr.Update(ctx, cm) } -func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, l *zap.SugaredLogger) (bool, error) { +func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, svc *corev1.Service, lg *zap.SugaredLogger) (bool, error) { proxyGroupName := svc.Annotations[AnnotationProxyGroup] pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -513,36 +513,36 @@ func (esr *egressSvcsReconciler) validateClusterResources(ctx context.Context, s }, } if err := esr.Get(ctx, client.ObjectKeyFromObject(pg), pg); apierrors.IsNotFound(err) { - l.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + lg.Infof("ProxyGroup %q not found, waiting...", proxyGroupName) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } else if err != nil { err := fmt.Errorf("unable to retrieve ProxyGroup %s: %w", proxyGroupName, err) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, err.Error(), esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, err } if violations := validateEgressService(svc, pg); len(violations) > 0 { msg := fmt.Sprintf("invalid egress Service: %s", strings.Join(violations, ", ")) esr.recorder.Event(svc, corev1.EventTypeWarning, "INVALIDSERVICE", msg) - l.Info(msg) - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, l) + lg.Info(msg) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionFalse, reasonEgressSvcInvalid, msg, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) return false, nil } if !tsoperator.ProxyGroupAvailable(pg) { - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, l) + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionUnknown, reasonProxyGroupNotReady, reasonProxyGroupNotReady, esr.clock, lg) tsoperator.RemoveServiceCondition(svc, tsapi.EgressSvcConfigured) } - l.Debugf("egress service is valid") - tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, l) + lg.Debugf("egress service is valid") + tsoperator.SetServiceCondition(svc, tsapi.EgressSvcValid, metav1.ConditionTrue, reasonEgressSvcValid, reasonEgressSvcValid, esr.clock, lg) return true, nil } -func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, l *zap.SugaredLogger) egressservices.Config { - d := retrieveClusterDomain(ns, l) +func egressSvcCfg(externalNameSvc, clusterIPSvc *corev1.Service, ns string, lg *zap.SugaredLogger) egressservices.Config { + d := retrieveClusterDomain(ns, lg) tt := tailnetTargetFromSvc(externalNameSvc) hep := healthCheckForSvc(clusterIPSvc, d) cfg := egressservices.Config{ @@ -691,18 +691,18 @@ func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { // egressEpsLabels returns labels to be added to an EndpointSlice created for an egress service. func egressSvcEpsLabels(extNSvc, clusterIPSvc *corev1.Service) map[string]string { - l := egressSvcChildResourceLabels(extNSvc) + lbels := egressSvcChildResourceLabels(extNSvc) // Adding this label is what makes kube proxy set up rules to route traffic sent to the clusterIP Service to the // endpoints defined on this EndpointSlice. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership - l[discoveryv1.LabelServiceName] = clusterIPSvc.Name + lbels[discoveryv1.LabelServiceName] = clusterIPSvc.Name // Kubernetes recommends setting this label. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#management - l[discoveryv1.LabelManagedBy] = "tailscale.com" - return l + lbels[discoveryv1.LabelManagedBy] = "tailscale.com" + return lbels } -func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { +func svcConfigurationUpToDate(svc *corev1.Service, lg *zap.SugaredLogger) bool { cond := tsoperator.GetServiceCondition(svc, tsapi.EgressSvcConfigured) if cond == nil { return false @@ -710,21 +710,21 @@ func svcConfigurationUpToDate(svc *corev1.Service, l *zap.SugaredLogger) bool { if cond.Status != metav1.ConditionTrue { return false } - wantsReadyReason := svcConfiguredReason(svc, true, l) + wantsReadyReason := svcConfiguredReason(svc, true, lg) return strings.EqualFold(wantsReadyReason, cond.Reason) } -func cfgHash(c cfg, l *zap.SugaredLogger) string { +func cfgHash(c cfg, lg *zap.SugaredLogger) string { bs, err := json.Marshal(c) if err != nil { // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. - l.Infof("error marhsalling Config: %v", err) + lg.Infof("error marhsalling Config: %v", err) return "" } h := sha256.New() if _, err := h.Write(bs); err != nil { // Don't use l.Error as that messes up component logs with, in this case, unnecessary stack trace. - l.Infof("error producing Config hash: %v", err) + lg.Infof("error producing Config hash: %v", err) return "" } return fmt.Sprintf("%x", h.Sum(nil)) @@ -736,7 +736,7 @@ type cfg struct { ProxyGroup string `json:"proxyGroup"` } -func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLogger) string { +func svcConfiguredReason(svc *corev1.Service, configured bool, lg *zap.SugaredLogger) string { var r string if configured { r = "ConfiguredFor:" @@ -750,7 +750,7 @@ func svcConfiguredReason(svc *corev1.Service, configured bool, l *zap.SugaredLog TailnetTarget: tt, ProxyGroup: svc.Annotations[AnnotationProxyGroup], } - r += fmt.Sprintf(":Config:%s", cfgHash(s, l)) + r += fmt.Sprintf(":Config:%s", cfgHash(s, lg)) return r } diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index d8a5dfd32c1c2..202804d3011fd 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -249,9 +249,9 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort { return ports } -func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, l *zap.Logger) { +func mustHaveConfigForSvc(t *testing.T, cl client.Client, extNSvc, clusterIPSvc *corev1.Service, cm *corev1.ConfigMap, lg *zap.Logger) { t.Helper() - wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, l.Sugar()) + wantsCfg := egressSvcCfg(extNSvc, clusterIPSvc, clusterIPSvc.Namespace, lg.Sugar()) if err := cl.Get(context.Background(), client.ObjectKeyFromObject(cm), cm); err != nil { t.Fatalf("Error retrieving ConfigMap: %v", err) } diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index b15c93b1c93d0..e11235768dea2 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1282,8 +1282,8 @@ func TestServiceProxyClassAnnotation(t *testing.T) { slist := &corev1.SecretList{} fc.List(context.Background(), slist, client.InNamespace("operator-ns")) for _, i := range slist.Items { - l, _ := json.Marshal(i.Labels) - t.Logf("found secret %q with labels %q ", i.Name, string(l)) + labels, _ := json.Marshal(i.Labels) + t.Logf("found secret %q with labels %q ", i.Name, string(labels)) } _, shortName := findGenName(t, fc, "default", "test", "svc") diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index e185499f0e19d..34db86db27846 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -524,16 +524,16 @@ func pgSecretLabels(pgName, secretType string) map[string]string { } func pgLabels(pgName string, customLabels map[string]string) map[string]string { - l := make(map[string]string, len(customLabels)+3) + labels := make(map[string]string, len(customLabels)+3) for k, v := range customLabels { - l[k] = v + labels[k] = v } - l[kubetypes.LabelManaged] = "true" - l[LabelParentType] = "proxygroup" - l[LabelParentName] = pgName + labels[kubetypes.LabelManaged] = "true" + labels[LabelParentType] = "proxygroup" + labels[LabelParentName] = pgName - return l + return labels } func pgOwnerReference(owner *tsapi.ProxyGroup) []metav1.OwnerReference { diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index f5eedc2a1d1da..83d7439db3f57 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -281,17 +281,17 @@ func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { } func labels(app, instance string, customLabels map[string]string) map[string]string { - l := make(map[string]string, len(customLabels)+3) + labels := make(map[string]string, len(customLabels)+3) for k, v := range customLabels { - l[k] = v + labels[k] = v } // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ - l["app.kubernetes.io/name"] = app - l["app.kubernetes.io/instance"] = instance - l["app.kubernetes.io/managed-by"] = "tailscale-operator" + labels["app.kubernetes.io/name"] = app + labels["app.kubernetes.io/instance"] = instance + labels["app.kubernetes.io/managed-by"] = "tailscale-operator" - return l + return labels } func tsrOwnerReference(owner metav1.Object) []metav1.OwnerReference { diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go index 4013047e76f0c..0f0bd1bfcf39d 100644 --- a/cmd/k8s-proxy/internal/config/config.go +++ b/cmd/k8s-proxy/internal/config/config.go @@ -50,32 +50,32 @@ func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interf } } -func (l *configLoader) WatchConfig(ctx context.Context, path string) error { +func (ld *configLoader) WatchConfig(ctx context.Context, path string) error { secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:") if isKubeSecret { secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator)) if !ok { return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format /", path) } - if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { + if err := ld.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err) } return nil } - if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { + if err := ld.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("error watching config file %q: %w", path, err) } return nil } -func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { - if bytes.Equal(raw, l.previous) { - if l.cfgIgnored != nil && testenv.InTest() { - l.once.Do(func() { - close(l.cfgIgnored) +func (ld *configLoader) reloadConfig(ctx context.Context, raw []byte) error { + if bytes.Equal(raw, ld.previous) { + if ld.cfgIgnored != nil && testenv.InTest() { + ld.once.Do(func() { + close(ld.cfgIgnored) }) } return nil @@ -89,14 +89,14 @@ func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error { select { case <-ctx.Done(): return ctx.Err() - case l.cfgChan <- &cfg: + case ld.cfgChan <- &cfg: } - l.previous = raw + ld.previous = raw return nil } -func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { +func (ld *configLoader) watchConfigFileChanges(ctx context.Context, path string) error { var ( tickChan <-chan time.Time eventChan <-chan fsnotify.Event @@ -106,14 +106,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) if w, err := fsnotify.NewWatcher(); err != nil { // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. // See https://github.com/tailscale/tailscale/issues/15081 - l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) + ld.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() tickChan = ticker.C } else { dir := filepath.Dir(path) file := filepath.Base(path) - l.logger.Infof("Watching directory %q for changes to config file %q", dir, file) + ld.logger.Infof("Watching directory %q for changes to config file %q", dir, file) defer w.Close() if err := w.Add(dir); err != nil { return fmt.Errorf("failed to add fsnotify watch: %w", err) @@ -128,7 +128,7 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) if err != nil { return fmt.Errorf("error reading config file %q: %w", path, err) } - if err := l.reloadConfig(ctx, b); err != nil { + if err := ld.reloadConfig(ctx, b); err != nil { return fmt.Errorf("error loading initial config file %q: %w", path, err) } @@ -163,14 +163,14 @@ func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) if len(b) == 0 { continue } - if err := l.reloadConfig(ctx, b); err != nil { + if err := ld.reloadConfig(ctx, b); err != nil { return fmt.Errorf("error reloading config file %q: %v", path, err) } } } -func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { - secrets := l.client.Secrets(secretNamespace) +func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error { + secrets := ld.client.Secrets(secretNamespace) w, err := secrets.Watch(ctx, metav1.ListOptions{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", @@ -198,11 +198,11 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames return fmt.Errorf("failed to get config Secret %q: %w", secretName, err) } - if err := l.configFromSecret(ctx, secret); err != nil { + if err := ld.configFromSecret(ctx, secret); err != nil { return fmt.Errorf("error loading initial config: %w", err) } - l.logger.Infof("Watching config Secret %q for changes", secretName) + ld.logger.Infof("Watching config Secret %q for changes", secretName) for { var secret *corev1.Secret select { @@ -237,7 +237,7 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames if secret == nil || secret.Data == nil { continue } - if err := l.configFromSecret(ctx, secret); err != nil { + if err := ld.configFromSecret(ctx, secret); err != nil { return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err) } case watch.Error: @@ -250,13 +250,13 @@ func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNames } } -func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { +func (ld *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error { b := s.Data[kubetypes.KubeAPIServerConfigFile] if len(b) == 0 { return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile) } - if err := l.reloadConfig(ctx, b); err != nil { + if err := ld.reloadConfig(ctx, b); err != nil { return err } diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go index 1603dbe1f398f..bcb1b9ebd14e6 100644 --- a/cmd/k8s-proxy/internal/config/config_test.go +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -125,15 +125,15 @@ func TestWatchConfig(t *testing.T) { } } configChan := make(chan *conf.Config) - l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) - l.cfgIgnored = make(chan struct{}) + loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + loader.cfgIgnored = make(chan struct{}) errs := make(chan error) ctx, cancel := context.WithCancel(t.Context()) defer cancel() writeFile(t, tc.initialConfig) go func() { - errs <- l.WatchConfig(ctx, cfgPath) + errs <- loader.WatchConfig(ctx, cfgPath) }() for i, p := range tc.phases { @@ -159,7 +159,7 @@ func TestWatchConfig(t *testing.T) { } else if !strings.Contains(err.Error(), p.expectedErr) { t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error()) } - case <-l.cfgIgnored: + case <-loader.cfgIgnored: if p.expectedConf != nil { t.Fatalf("expected config to be reloaded, but got ignored signal") } @@ -192,13 +192,13 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) { }) configChan := make(chan *conf.Config) - l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) + loader := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan) mustCreateOrUpdate(t, cl, secretFrom(expected[0])) errs := make(chan error) go func() { - errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret") + errs <- loader.watchConfigSecretChanges(t.Context(), "default", "config-secret") }() for i := range 2 { @@ -212,7 +212,7 @@ func TestWatchConfigSecret_Rewatches(t *testing.T) { } case err := <-errs: t.Fatalf("unexpected error: %v", err) - case <-l.cfgIgnored: + case <-loader.cfgIgnored: t.Fatalf("expected config to be reloaded, but got ignored signal") case <-time.After(5 * time.Second): t.Fatalf("timed out waiting for expected event") diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index 64807b6c272f5..bfa909b69a3b4 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -422,9 +422,9 @@ func (ipp *ConsensusIPPool) applyCheckoutAddr(nid tailcfg.NodeID, domain string, } // Apply is part of the raft.FSM interface. It takes an incoming log entry and applies it to the state. -func (ipp *ConsensusIPPool) Apply(l *raft.Log) any { +func (ipp *ConsensusIPPool) Apply(lg *raft.Log) any { var c tsconsensus.Command - if err := json.Unmarshal(l.Data, &c); err != nil { + if err := json.Unmarshal(lg.Data, &c); err != nil { panic(fmt.Sprintf("failed to unmarshal command: %s", err.Error())) } switch c.Name { diff --git a/cmd/sniproxy/sniproxy_test.go b/cmd/sniproxy/sniproxy_test.go index 07fbd2eceb839..65e059efaa1d4 100644 --- a/cmd/sniproxy/sniproxy_test.go +++ b/cmd/sniproxy/sniproxy_test.go @@ -156,13 +156,13 @@ func TestSNIProxyWithNetmapConfig(t *testing.T) { client, _, _ := startNode(t, ctx, controlURL, "client") // Make sure that the sni node has received its config. - l, err := sni.LocalClient() + lc, err := sni.LocalClient() if err != nil { t.Fatal(err) } gotConfigured := false for range 100 { - s, err := l.StatusWithoutPeers(ctx) + s, err := lc.StatusWithoutPeers(ctx) if err != nil { t.Fatal(err) } diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index 71ed505690243..153dc9303bbb0 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -135,18 +135,18 @@ type lportsPool struct { ports []int } -func (l *lportsPool) get() int { - l.Lock() - defer l.Unlock() - ret := l.ports[0] - l.ports = append(l.ports[:0], l.ports[1:]...) +func (pl *lportsPool) get() int { + pl.Lock() + defer pl.Unlock() + ret := pl.ports[0] + pl.ports = append(pl.ports[:0], pl.ports[1:]...) return ret } -func (l *lportsPool) put(i int) { - l.Lock() - defer l.Unlock() - l.ports = append(l.ports, int(i)) +func (pl *lportsPool) put(i int) { + pl.Lock() + defer pl.Unlock() + pl.ports = append(pl.ports, int(i)) } var ( @@ -173,19 +173,19 @@ func init() { // measure dial time. type lportForTCPConn int -func (l *lportForTCPConn) Close() error { - if *l == 0 { +func (lp *lportForTCPConn) Close() error { + if *lp == 0 { return nil } - lports.put(int(*l)) + lports.put(int(*lp)) return nil } -func (l *lportForTCPConn) Write([]byte) (int, error) { +func (lp *lportForTCPConn) Write([]byte) (int, error) { return 0, errors.New("unimplemented") } -func (l *lportForTCPConn) Read([]byte) (int, error) { +func (lp *lportForTCPConn) Read([]byte) (int, error) { return 0, errors.New("unimplemented") } diff --git a/cmd/sync-containers/main.go b/cmd/sync-containers/main.go index 6317b4943ae82..63efa54531b10 100644 --- a/cmd/sync-containers/main.go +++ b/cmd/sync-containers/main.go @@ -65,9 +65,9 @@ func main() { } add, remove := diffTags(stags, dtags) - if l := len(add); l > 0 { + if ln := len(add); ln > 0 { log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", ")) - if *max > 0 && l > *max { + if *max > 0 && ln > *max { log.Printf("Limiting sync to %d tags", *max) add = add[:*max] } diff --git a/cmd/tl-longchain/tl-longchain.go b/cmd/tl-longchain/tl-longchain.go index 2a4dc10ba331c..384d24222e6d5 100644 --- a/cmd/tl-longchain/tl-longchain.go +++ b/cmd/tl-longchain/tl-longchain.go @@ -75,8 +75,8 @@ func peerInfo(peer *ipnstate.TKAPeer) string { // print prints a message about a node key signature and a re-signing command if needed. func print(info string, nodeKey key.NodePublic, sig tka.NodeKeySignature) { - if l := chainLength(sig); l > *maxRotations { - log.Printf("%s: chain length %d, printing command to re-sign", info, l) + if ln := chainLength(sig); ln > *maxRotations { + log.Printf("%s: chain length %d, printing command to re-sign", info, ln) wrapping, _ := sig.UnverifiedWrappingPublic() fmt.Printf("tailscale lock sign %s %s\n", nodeKey, key.NLPublicFromEd25519Unsafe(wrapping).CLIString()) } else { diff --git a/drive/driveimpl/connlistener.go b/drive/driveimpl/connlistener.go index e1fcb3b675924..ff60f73404230 100644 --- a/drive/driveimpl/connlistener.go +++ b/drive/driveimpl/connlistener.go @@ -25,12 +25,12 @@ func newConnListener() *connListener { } } -func (l *connListener) Accept() (net.Conn, error) { +func (ln *connListener) Accept() (net.Conn, error) { select { - case <-l.closedCh: + case <-ln.closedCh: // TODO(oxtoacart): make this error match what a regular net.Listener does return nil, syscall.EINVAL - case conn := <-l.ch: + case conn := <-ln.ch: return conn, nil } } @@ -38,32 +38,32 @@ func (l *connListener) Accept() (net.Conn, error) { // Addr implements net.Listener. This always returns nil. It is assumed that // this method is currently unused, so it logs a warning if it ever does get // called. -func (l *connListener) Addr() net.Addr { +func (ln *connListener) Addr() net.Addr { log.Println("warning: unexpected call to connListener.Addr()") return nil } -func (l *connListener) Close() error { - l.closeMu.Lock() - defer l.closeMu.Unlock() +func (ln *connListener) Close() error { + ln.closeMu.Lock() + defer ln.closeMu.Unlock() select { - case <-l.closedCh: + case <-ln.closedCh: // Already closed. return syscall.EINVAL default: // We don't close l.ch because someone maybe trying to send to that, // which would cause a panic. - close(l.closedCh) + close(ln.closedCh) return nil } } -func (l *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { +func (ln *connListener) HandleConn(c net.Conn, remoteAddr net.Addr) error { select { - case <-l.closedCh: + case <-ln.closedCh: return syscall.EINVAL - case l.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: + case ln.ch <- &connWithRemoteAddr{Conn: c, remoteAddr: remoteAddr}: // Connection has been accepted. } return nil diff --git a/drive/driveimpl/connlistener_test.go b/drive/driveimpl/connlistener_test.go index d8666448af6ef..6adf15acbd56f 100644 --- a/drive/driveimpl/connlistener_test.go +++ b/drive/driveimpl/connlistener_test.go @@ -10,20 +10,20 @@ import ( ) func TestConnListener(t *testing.T) { - l, err := net.Listen("tcp", "127.0.0.1:") + ln, err := net.Listen("tcp", "127.0.0.1:") if err != nil { t.Fatalf("failed to Listen: %s", err) } cl := newConnListener() // Test that we can accept a connection - cc, err := net.Dial("tcp", l.Addr().String()) + cc, err := net.Dial("tcp", ln.Addr().String()) if err != nil { t.Fatalf("failed to Dial: %s", err) } defer cc.Close() - sc, err := l.Accept() + sc, err := ln.Accept() if err != nil { t.Fatalf("failed to Accept: %s", err) } diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index cff55fbb2c858..818e84990baef 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -467,14 +467,14 @@ func newSystem(t *testing.T) *system { tstest.ResourceCheck(t) fs := newFileSystemForLocal(log.Printf, nil) - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("failed to Listen: %s", err) } - t.Logf("FileSystemForLocal listening at %s", l.Addr()) + t.Logf("FileSystemForLocal listening at %s", ln.Addr()) go func() { for { - conn, err := l.Accept() + conn, err := ln.Accept() if err != nil { t.Logf("Accept: %v", err) return @@ -483,11 +483,11 @@ func newSystem(t *testing.T) *system { } }() - client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", l.Addr()), &noopAuthorizer{}) + client := gowebdav.NewAuthClient(fmt.Sprintf("http://%s", ln.Addr()), &noopAuthorizer{}) client.SetTransport(&http.Transport{DisableKeepAlives: true}) s := &system{ t: t, - local: &local{l: l, fs: fs}, + local: &local{l: ln, fs: fs}, client: client, remotes: make(map[string]*remote), } @@ -496,11 +496,11 @@ func newSystem(t *testing.T) *system { } func (s *system) addRemote(name string) string { - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { s.t.Fatalf("failed to Listen: %s", err) } - s.t.Logf("Remote for %v listening at %s", name, l.Addr()) + s.t.Logf("Remote for %v listening at %s", name, ln.Addr()) fileServer, err := NewFileServer() if err != nil { @@ -510,14 +510,14 @@ func (s *system) addRemote(name string) string { s.t.Logf("FileServer for %v listening at %s", name, fileServer.Addr()) r := &remote{ - l: l, + l: ln, fileServer: fileServer, fs: NewFileSystemForRemote(log.Printf), shares: make(map[string]string), permissions: make(map[string]drive.Permission), } r.fs.SetFileServerAddr(fileServer.Addr()) - go http.Serve(l, r) + go http.Serve(ln, r) s.remotes[name] = r remotes := make([]*drive.Remote, 0, len(s.remotes)) diff --git a/drive/driveimpl/fileserver.go b/drive/driveimpl/fileserver.go index 113cb3b440218..d448d83af761d 100644 --- a/drive/driveimpl/fileserver.go +++ b/drive/driveimpl/fileserver.go @@ -20,7 +20,7 @@ import ( // It's typically used in a separate process from the actual Taildrive server to // serve up files as an unprivileged user. type FileServer struct { - l net.Listener + ln net.Listener secretToken string shareHandlers map[string]http.Handler sharesMu sync.RWMutex @@ -41,10 +41,10 @@ type FileServer struct { // called. func NewFileServer() (*FileServer, error) { // path := filepath.Join(os.TempDir(), fmt.Sprintf("%v.socket", uuid.New().String())) - // l, err := safesocket.Listen(path) + // ln, err := safesocket.Listen(path) // if err != nil { // TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???) - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, fmt.Errorf("listen: %w", err) } @@ -55,7 +55,7 @@ func NewFileServer() (*FileServer, error) { } return &FileServer{ - l: l, + ln: ln, secretToken: secretToken, shareHandlers: make(map[string]http.Handler), }, nil @@ -74,12 +74,12 @@ func generateSecretToken() (string, error) { // Addr returns the address at which this FileServer is listening. This // includes the secret token in front of the address, delimited by a pipe |. func (s *FileServer) Addr() string { - return fmt.Sprintf("%s|%s", s.secretToken, s.l.Addr().String()) + return fmt.Sprintf("%s|%s", s.secretToken, s.ln.Addr().String()) } // Serve() starts serving files and blocks until it encounters a fatal error. func (s *FileServer) Serve() error { - return http.Serve(s.l, s) + return http.Serve(s.ln, s) } // LockShares locks the map of shares in preparation for manipulating it. @@ -162,5 +162,5 @@ func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (s *FileServer) Close() error { - return s.l.Close() + return s.ln.Close() } diff --git a/feature/sdnotify/sdnotify_linux.go b/feature/sdnotify/sdnotify_linux.go index b005f1bdb2bb2..2b13e24bbe509 100644 --- a/feature/sdnotify/sdnotify_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -29,8 +29,8 @@ type logOnce struct { sync.Once } -func (l *logOnce) logf(format string, args ...any) { - l.Once.Do(func() { +func (lg *logOnce) logf(format string, args ...any) { + lg.Once.Do(func() { log.Printf(format, args...) }) } diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go index 4baadb7339871..e5f999bb8847e 100644 --- a/ipn/localapi/tailnetlock.go +++ b/ipn/localapi/tailnetlock.go @@ -266,12 +266,12 @@ func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { limit := 50 if limitStr := r.FormValue("limit"); limitStr != "" { - l, err := strconv.Atoi(limitStr) + lm, err := strconv.Atoi(limitStr) if err != nil { http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) return } - limit = int(l) + limit = int(lm) } updates, err := h.b.NetworkLockLog(limit) diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 4026f90848ef1..670df3b95097e 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -352,12 +352,12 @@ type ServiceMonitor struct { type Labels map[string]LabelValue -func (l Labels) Parse() map[string]string { - if l == nil { +func (lb Labels) Parse() map[string]string { + if lb == nil { return nil } - m := make(map[string]string, len(l)) - for k, v := range l { + m := make(map[string]string, len(lb)) + for k, v := range lb { m[k] = string(v) } return m diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index f2fd4ea55f554..87205c4e6f610 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -99,7 +99,7 @@ func Test_conn_Read(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - l := zl.Sugar() + log := zl.Sugar() tc := &fakes.TestConn{} sr := &fakes.TestSessionRecorder{} rec := tsrecorder.New(sr, cl, cl.Now(), true, zl.Sugar()) @@ -110,7 +110,7 @@ func Test_conn_Read(t *testing.T) { c := &conn{ ctx: ctx, Conn: tc, - log: l, + log: log, hasTerm: true, initialCastHeaderSent: make(chan struct{}), rec: rec, diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index 2515f1bf3a476..56c874f31dbb1 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -69,12 +69,12 @@ var _ json.Unmarshaler = &PortMaps{} func (p *PortMaps) UnmarshalJSON(data []byte) error { *p = make(map[PortMap]struct{}) - var l []PortMap - if err := json.Unmarshal(data, &l); err != nil { + var v []PortMap + if err := json.Unmarshal(data, &v); err != nil { return err } - for _, pm := range l { + for _, pm := range v { (*p)[pm] = struct{}{} } @@ -82,12 +82,12 @@ func (p *PortMaps) UnmarshalJSON(data []byte) error { } func (p PortMaps) MarshalJSON() ([]byte, error) { - l := make([]PortMap, 0, len(p)) + v := make([]PortMap, 0, len(p)) for pm := range p { - l = append(l, pm) + v = append(v, pm) } - return json.Marshal(l) + return json.Marshal(v) } // Status represents the currently configured firewall rules for all egress diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go index 5d541e3655ddb..550b3ae742c34 100644 --- a/kube/localclient/local-client.go +++ b/kube/localclient/local-client.go @@ -40,10 +40,10 @@ type localClient struct { lc *local.Client } -func (l *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { - return l.lc.WatchIPNBus(ctx, mask) +func (lc *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { + return lc.lc.WatchIPNBus(ctx, mask) } -func (l *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { - return l.lc.CertPair(ctx, domain) +func (lc *localClient) CertPair(ctx context.Context, domain string) ([]byte, []byte, error) { + return lc.lc.CertPair(ctx, domain) } diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index e0744de0f089a..8ddfabb866745 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -146,33 +146,33 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne // SetLoggingEnabled enables or disables logging. // When disabled, socket stats are not polled and no new logs are written to disk. // Existing logs can still be fetched via the C2N API. -func (l *Logger) SetLoggingEnabled(v bool) { - old := l.enabled.Load() - if old != v && l.enabled.CompareAndSwap(old, v) { +func (lg *Logger) SetLoggingEnabled(v bool) { + old := lg.enabled.Load() + if old != v && lg.enabled.CompareAndSwap(old, v) { if v { - if l.eventCh == nil { + if lg.eventCh == nil { // eventCh should be large enough for the number of events that will occur within logInterval. // Add an extra second's worth of events to ensure we don't drop any. - l.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) + lg.eventCh = make(chan event, (logInterval+time.Second)/pollInterval) } - l.ctx, l.cancelFn = context.WithCancel(context.Background()) - go l.poll() - go l.logEvents() + lg.ctx, lg.cancelFn = context.WithCancel(context.Background()) + go lg.poll() + go lg.logEvents() } else { - l.cancelFn() + lg.cancelFn() } } } -func (l *Logger) Write(p []byte) (int, error) { - return l.logger.Write(p) +func (lg *Logger) Write(p []byte) (int, error) { + return lg.logger.Write(p) } // poll fetches the current socket stats at the configured time interval, // calculates the delta since the last poll, // and writes any non-zero values to the logger event channel. // This method does not return. -func (l *Logger) poll() { +func (lg *Logger) poll() { // last is the last set of socket stats we saw. var lastStats *sockstats.SockStats var lastTime time.Time @@ -180,7 +180,7 @@ func (l *Logger) poll() { ticker := time.NewTicker(pollInterval) for { select { - case <-l.ctx.Done(): + case <-lg.ctx.Done(): ticker.Stop() return case t := <-ticker.C: @@ -196,7 +196,7 @@ func (l *Logger) poll() { if stats.CurrentInterfaceCellular { e.IsCellularInterface = 1 } - l.eventCh <- e + lg.eventCh <- e } } lastTime = t @@ -207,14 +207,14 @@ func (l *Logger) poll() { // logEvents reads events from the event channel at logInterval and logs them to disk. // This method does not return. -func (l *Logger) logEvents() { - enc := json.NewEncoder(l) +func (lg *Logger) logEvents() { + enc := json.NewEncoder(lg) flush := func() { for { select { - case e := <-l.eventCh: + case e := <-lg.eventCh: if err := enc.Encode(e); err != nil { - l.logf("sockstatlog: error encoding log: %v", err) + lg.logf("sockstatlog: error encoding log: %v", err) } default: return @@ -224,7 +224,7 @@ func (l *Logger) logEvents() { ticker := time.NewTicker(logInterval) for { select { - case <-l.ctx.Done(): + case <-lg.ctx.Done(): ticker.Stop() return case <-ticker.C: @@ -233,29 +233,29 @@ func (l *Logger) logEvents() { } } -func (l *Logger) LogID() string { - if l.logger == nil { +func (lg *Logger) LogID() string { + if lg.logger == nil { return "" } - return l.logger.PrivateID().Public().String() + return lg.logger.PrivateID().Public().String() } // Flush sends pending logs to the log server and flushes them from the local buffer. -func (l *Logger) Flush() { - l.logger.StartFlush() +func (lg *Logger) Flush() { + lg.logger.StartFlush() } -func (l *Logger) Shutdown(ctx context.Context) { - if l.cancelFn != nil { - l.cancelFn() +func (lg *Logger) Shutdown(ctx context.Context) { + if lg.cancelFn != nil { + lg.cancelFn() } - l.filch.Close() - l.logger.Shutdown(ctx) + lg.filch.Close() + lg.logger.Shutdown(ctx) type closeIdler interface { CloseIdleConnections() } - if tr, ok := l.tr.(closeIdler); ok { + if tr, ok := lg.tr.(closeIdler); ok { tr.CloseIdleConnections() } } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 26858b7132ef6..f7491783ad781 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -193,8 +193,8 @@ type logWriter struct { logger *log.Logger } -func (l logWriter) Write(buf []byte) (int, error) { - l.logger.Printf("%s", buf) +func (lg logWriter) Write(buf []byte) (int, error) { + lg.logger.Printf("%s", buf) return len(buf), nil } diff --git a/logtail/logtail.go b/logtail/logtail.go index 6ff4dd04f069a..2879c6b0d3cf8 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -100,7 +100,7 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if !cfg.CopyPrivateID.IsZero() { urlSuffix = "?copyId=" + cfg.CopyPrivateID.String() } - l := &Logger{ + logger := &Logger{ privateID: cfg.PrivateID, stderr: cfg.Stderr, stderrLevel: int64(cfg.StderrLevel), @@ -124,19 +124,19 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { } if cfg.Bus != nil { - l.eventClient = cfg.Bus.Client("logtail.Logger") + logger.eventClient = cfg.Bus.Client("logtail.Logger") // Subscribe to change deltas from NetMon to detect when the network comes up. - eventbus.SubscribeFunc(l.eventClient, l.onChangeDelta) + eventbus.SubscribeFunc(logger.eventClient, logger.onChangeDelta) } - l.SetSockstatsLabel(sockstats.LabelLogtailLogger) - l.compressLogs = cfg.CompressLogs + logger.SetSockstatsLabel(sockstats.LabelLogtailLogger) + logger.compressLogs = cfg.CompressLogs ctx, cancel := context.WithCancel(context.Background()) - l.uploadCancel = cancel + logger.uploadCancel = cancel - go l.uploading(ctx) - l.Write([]byte("logtail started")) - return l + go logger.uploading(ctx) + logger.Write([]byte("logtail started")) + return logger } // Logger writes logs, splitting them as configured between local @@ -190,27 +190,27 @@ func (p *atomicSocktatsLabel) Store(label sockstats.Label) { p.p.Store(uint32(la // SetVerbosityLevel controls the verbosity level that should be // written to stderr. 0 is the default (not verbose). Levels 1 or higher // are increasingly verbose. -func (l *Logger) SetVerbosityLevel(level int) { - atomic.StoreInt64(&l.stderrLevel, int64(level)) +func (lg *Logger) SetVerbosityLevel(level int) { + atomic.StoreInt64(&lg.stderrLevel, int64(level)) } // SetNetMon sets the network monitor. // // It should not be changed concurrently with log writes and should // only be set once. -func (l *Logger) SetNetMon(lm *netmon.Monitor) { - l.netMonitor = lm +func (lg *Logger) SetNetMon(lm *netmon.Monitor) { + lg.netMonitor = lm } // SetSockstatsLabel sets the label used in sockstat logs to identify network traffic from this logger. -func (l *Logger) SetSockstatsLabel(label sockstats.Label) { - l.sockstatsLabel.Store(label) +func (lg *Logger) SetSockstatsLabel(label sockstats.Label) { + lg.sockstatsLabel.Store(label) } // PrivateID returns the logger's private log ID. // // It exists for internal use only. -func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } +func (lg *Logger) PrivateID() logid.PrivateID { return lg.privateID } // Shutdown gracefully shuts down the logger while completing any // remaining uploads. @@ -218,33 +218,33 @@ func (l *Logger) PrivateID() logid.PrivateID { return l.privateID } // It will block, continuing to try and upload unless the passed // context object interrupts it by being done. // If the shutdown is interrupted, an error is returned. -func (l *Logger) Shutdown(ctx context.Context) error { +func (lg *Logger) Shutdown(ctx context.Context) error { done := make(chan struct{}) go func() { select { case <-ctx.Done(): - l.uploadCancel() - <-l.shutdownDone - case <-l.shutdownDone: + lg.uploadCancel() + <-lg.shutdownDone + case <-lg.shutdownDone: } close(done) - l.httpc.CloseIdleConnections() + lg.httpc.CloseIdleConnections() }() - if l.eventClient != nil { - l.eventClient.Close() + if lg.eventClient != nil { + lg.eventClient.Close() } - l.shutdownStartMu.Lock() + lg.shutdownStartMu.Lock() select { - case <-l.shutdownStart: - l.shutdownStartMu.Unlock() + case <-lg.shutdownStart: + lg.shutdownStartMu.Unlock() return nil default: } - close(l.shutdownStart) - l.shutdownStartMu.Unlock() + close(lg.shutdownStart) + lg.shutdownStartMu.Unlock() - io.WriteString(l, "logger closing down\n") + io.WriteString(lg, "logger closing down\n") <-done return nil @@ -254,8 +254,8 @@ func (l *Logger) Shutdown(ctx context.Context) error { // process, and any associated goroutines. // // Deprecated: use Shutdown -func (l *Logger) Close() { - l.Shutdown(context.Background()) +func (lg *Logger) Close() { + lg.Shutdown(context.Background()) } // drainBlock is called by drainPending when there are no logs to drain. @@ -265,11 +265,11 @@ func (l *Logger) Close() { // // If the caller specified FlushInterface, drainWake is only sent to // periodically. -func (l *Logger) drainBlock() (shuttingDown bool) { +func (lg *Logger) drainBlock() (shuttingDown bool) { select { - case <-l.shutdownStart: + case <-lg.shutdownStart: return true - case <-l.drainWake: + case <-lg.drainWake: } return false } @@ -277,20 +277,20 @@ func (l *Logger) drainBlock() (shuttingDown bool) { // drainPending drains and encodes a batch of logs from the buffer for upload. // If no logs are available, drainPending blocks until logs are available. // The returned buffer is only valid until the next call to drainPending. -func (l *Logger) drainPending() (b []byte) { - b = l.drainBuf[:0] +func (lg *Logger) drainPending() (b []byte) { + b = lg.drainBuf[:0] b = append(b, '[') defer func() { b = bytes.TrimRight(b, ",") b = append(b, ']') - l.drainBuf = b + lg.drainBuf = b if len(b) <= len("[]") { b = nil } }() - maxLen := cmp.Or(l.maxUploadSize, maxSize) - if l.lowMem { + maxLen := cmp.Or(lg.maxUploadSize, maxSize) + if lg.lowMem { // When operating in a low memory environment, it is better to upload // in multiple operations than it is to allocate a large body and OOM. // Even if maxLen is less than maxSize, we can still upload an entry @@ -298,13 +298,13 @@ func (l *Logger) drainPending() (b []byte) { maxLen /= lowMemRatio } for len(b) < maxLen { - line, err := l.buffer.TryReadLine() + line, err := lg.buffer.TryReadLine() switch { case err == io.EOF: return b case err != nil: b = append(b, '{') - b = l.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) + b = lg.appendMetadata(b, false, true, 0, 0, "reading ringbuffer: "+err.Error(), nil, 0) b = bytes.TrimRight(b, ",") b = append(b, '}') return b @@ -318,10 +318,10 @@ func (l *Logger) drainPending() (b []byte) { // in our buffer from a previous large write, let it go. if cap(b) > bufferSize { b = bytes.Clone(b) - l.drainBuf = b + lg.drainBuf = b } - if shuttingDown := l.drainBlock(); shuttingDown { + if shuttingDown := lg.drainBlock(); shuttingDown { return b } continue @@ -338,18 +338,18 @@ func (l *Logger) drainPending() (b []byte) { default: // This is probably a log added to stderr by filch // outside of the logtail logger. Encode it. - if !l.explainedRaw { - fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") - fmt.Fprintf(l.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") - fmt.Fprintf(l.stderr, "RAW-STDERR: ***\n") - fmt.Fprintf(l.stderr, "RAW-STDERR:\n") - l.explainedRaw = true + if !lg.explainedRaw { + fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR: *** Lines prefixed with RAW-STDERR below bypassed logtail and probably come from a previous run of the program\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR: ***\n") + fmt.Fprintf(lg.stderr, "RAW-STDERR:\n") + lg.explainedRaw = true } - fmt.Fprintf(l.stderr, "RAW-STDERR: %s", b) + fmt.Fprintf(lg.stderr, "RAW-STDERR: %s", b) // Do not add a client time, as it could be really old. // Do not include instance key or ID either, // since this came from a different instance. - b = l.appendText(b, line, true, 0, 0, 0) + b = lg.appendText(b, line, true, 0, 0, 0) } b = append(b, ',') } @@ -357,14 +357,14 @@ func (l *Logger) drainPending() (b []byte) { } // This is the goroutine that repeatedly uploads logs in the background. -func (l *Logger) uploading(ctx context.Context) { - defer close(l.shutdownDone) +func (lg *Logger) uploading(ctx context.Context) { + defer close(lg.shutdownDone) for { - body := l.drainPending() + body := lg.drainPending() origlen := -1 // sentinel value: uncompressed // Don't attempt to compress tiny bodies; not worth the CPU cycles. - if l.compressLogs && len(body) > 256 { + if lg.compressLogs && len(body) > 256 { zbody := zstdframe.AppendEncode(nil, body, zstdframe.FastestCompression, zstdframe.LowMemory(true)) @@ -381,20 +381,20 @@ func (l *Logger) uploading(ctx context.Context) { var numFailures int var firstFailure time.Time for len(body) > 0 && ctx.Err() == nil { - retryAfter, err := l.upload(ctx, body, origlen) + retryAfter, err := lg.upload(ctx, body, origlen) if err != nil { numFailures++ - firstFailure = l.clock.Now() + firstFailure = lg.clock.Now() - if !l.internetUp() { - fmt.Fprintf(l.stderr, "logtail: internet down; waiting\n") - l.awaitInternetUp(ctx) + if !lg.internetUp() { + fmt.Fprintf(lg.stderr, "logtail: internet down; waiting\n") + lg.awaitInternetUp(ctx) continue } // Only print the same message once. if currError := err.Error(); lastError != currError { - fmt.Fprintf(l.stderr, "logtail: upload: %v\n", err) + fmt.Fprintf(lg.stderr, "logtail: upload: %v\n", err) lastError = currError } @@ -407,55 +407,55 @@ func (l *Logger) uploading(ctx context.Context) { } else { // Only print a success message after recovery. if numFailures > 0 { - fmt.Fprintf(l.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, l.clock.Since(firstFailure).Round(time.Second)) + fmt.Fprintf(lg.stderr, "logtail: upload succeeded after %d failures and %s\n", numFailures, lg.clock.Since(firstFailure).Round(time.Second)) } break } } select { - case <-l.shutdownStart: + case <-lg.shutdownStart: return default: } } } -func (l *Logger) internetUp() bool { +func (lg *Logger) internetUp() bool { select { - case <-l.networkIsUp.Ready(): + case <-lg.networkIsUp.Ready(): return true default: - if l.netMonitor == nil { + if lg.netMonitor == nil { return true // No way to tell, so assume it is. } - return l.netMonitor.InterfaceState().AnyInterfaceUp() + return lg.netMonitor.InterfaceState().AnyInterfaceUp() } } // onChangeDelta is an eventbus subscriber function that handles // [netmon.ChangeDelta] events to detect whether the Internet is expected to be // reachable. -func (l *Logger) onChangeDelta(delta *netmon.ChangeDelta) { +func (lg *Logger) onChangeDelta(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { - fmt.Fprintf(l.stderr, "logtail: internet back up\n") - l.networkIsUp.Set() + fmt.Fprintf(lg.stderr, "logtail: internet back up\n") + lg.networkIsUp.Set() } else { - fmt.Fprintf(l.stderr, "logtail: network changed, but is not up\n") - l.networkIsUp.Reset() + fmt.Fprintf(lg.stderr, "logtail: network changed, but is not up\n") + lg.networkIsUp.Reset() } } -func (l *Logger) awaitInternetUp(ctx context.Context) { - if l.eventClient != nil { +func (lg *Logger) awaitInternetUp(ctx context.Context) { + if lg.eventClient != nil { select { - case <-l.networkIsUp.Ready(): + case <-lg.networkIsUp.Ready(): case <-ctx.Done(): } return } upc := make(chan bool, 1) - defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { + defer lg.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { select { case upc <- true: @@ -463,12 +463,12 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { } } })() - if l.internetUp() { + if lg.internetUp() { return } select { case <-upc: - fmt.Fprintf(l.stderr, "logtail: internet back up\n") + fmt.Fprintf(lg.stderr, "logtail: internet back up\n") case <-ctx.Done(): } } @@ -476,13 +476,13 @@ func (l *Logger) awaitInternetUp(ctx context.Context) { // upload uploads body to the log server. // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. -func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { +func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { const maxUploadTime = 45 * time.Second - ctx = sockstats.WithSockStats(ctx, l.sockstatsLabel.Load(), l.Logf) + ctx = sockstats.WithSockStats(ctx, lg.sockstatsLabel.Load(), lg.Logf) ctx, cancel := context.WithTimeout(ctx, maxUploadTime) defer cancel() - req, err := http.NewRequestWithContext(ctx, "POST", l.url, bytes.NewReader(body)) + req, err := http.NewRequestWithContext(ctx, "POST", lg.url, bytes.NewReader(body)) if err != nil { // I know of no conditions under which this could fail. // Report it very loudly. @@ -513,8 +513,8 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft compressedNote = "compressed" } - l.httpDoCalls.Add(1) - resp, err := l.httpc.Do(req) + lg.httpDoCalls.Add(1) + resp, err := lg.httpc.Do(req) if err != nil { return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err) } @@ -533,16 +533,16 @@ func (l *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAft // // TODO(bradfitz): this apparently just returns nil, as of tailscale/corp@9c2ec35. // Finish cleaning this up. -func (l *Logger) Flush() error { +func (lg *Logger) Flush() error { return nil } // StartFlush starts a log upload, if anything is pending. // // If l is nil, StartFlush is a no-op. -func (l *Logger) StartFlush() { - if l != nil { - l.tryDrainWake() +func (lg *Logger) StartFlush() { + if lg != nil { + lg.tryDrainWake() } } @@ -558,41 +558,41 @@ var debugWakesAndUploads = envknob.RegisterBool("TS_DEBUG_LOGTAIL_WAKES") // tryDrainWake tries to send to lg.drainWake, to cause an uploading wakeup. // It does not block. -func (l *Logger) tryDrainWake() { - l.flushPending.Store(false) +func (lg *Logger) tryDrainWake() { + lg.flushPending.Store(false) if debugWakesAndUploads() { // Using println instead of log.Printf here to avoid recursing back into // ourselves. - println("logtail: try drain wake, numHTTP:", l.httpDoCalls.Load()) + println("logtail: try drain wake, numHTTP:", lg.httpDoCalls.Load()) } select { - case l.drainWake <- struct{}{}: + case lg.drainWake <- struct{}{}: default: } } -func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { +func (lg *Logger) sendLocked(jsonBlob []byte) (int, error) { tapSend(jsonBlob) if logtailDisabled.Load() { return len(jsonBlob), nil } - n, err := l.buffer.Write(jsonBlob) + n, err := lg.buffer.Write(jsonBlob) flushDelay := defaultFlushDelay - if l.flushDelayFn != nil { - flushDelay = l.flushDelayFn() + if lg.flushDelayFn != nil { + flushDelay = lg.flushDelayFn() } if flushDelay > 0 { - if l.flushPending.CompareAndSwap(false, true) { - if l.flushTimer == nil { - l.flushTimer = l.clock.AfterFunc(flushDelay, l.tryDrainWake) + if lg.flushPending.CompareAndSwap(false, true) { + if lg.flushTimer == nil { + lg.flushTimer = lg.clock.AfterFunc(flushDelay, lg.tryDrainWake) } else { - l.flushTimer.Reset(flushDelay) + lg.flushTimer.Reset(flushDelay) } } } else { - l.tryDrainWake() + lg.tryDrainWake() } return n, err } @@ -600,13 +600,13 @@ func (l *Logger) sendLocked(jsonBlob []byte) (int, error) { // appendMetadata appends optional "logtail", "metrics", and "v" JSON members. // This assumes dst is already within a JSON object. // Each member is comma-terminated. -func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { +func (lg *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, procID uint32, procSequence uint64, errDetail string, errData jsontext.Value, level int) []byte { // Append optional logtail metadata. if !skipClientTime || procID != 0 || procSequence != 0 || errDetail != "" || errData != nil { dst = append(dst, `"logtail":{`...) if !skipClientTime { dst = append(dst, `"client_time":"`...) - dst = l.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) + dst = lg.clock.Now().UTC().AppendFormat(dst, time.RFC3339Nano) dst = append(dst, '"', ',') } if procID != 0 { @@ -639,8 +639,8 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr } // Append optional metrics metadata. - if !skipMetrics && l.metricsDelta != nil { - if d := l.metricsDelta(); d != "" { + if !skipMetrics && lg.metricsDelta != nil { + if d := lg.metricsDelta(); d != "" { dst = append(dst, `"metrics":"`...) dst = append(dst, d...) dst = append(dst, '"', ',') @@ -660,10 +660,10 @@ func (l *Logger) appendMetadata(dst []byte, skipClientTime, skipMetrics bool, pr } // appendText appends a raw text message in the Tailscale JSON log entry format. -func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { +func (lg *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, procSequence uint64, level int) []byte { dst = slices.Grow(dst, len(src)) dst = append(dst, '{') - dst = l.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) + dst = lg.appendMetadata(dst, skipClientTime, false, procID, procSequence, "", nil, level) if len(src) == 0 { dst = bytes.TrimRight(dst, ",") return append(dst, "}\n"...) @@ -672,7 +672,7 @@ func (l *Logger) appendText(dst, src []byte, skipClientTime bool, procID uint32, // Append the text string, which may be truncated. // Invalid UTF-8 will be mangled with the Unicode replacement character. max := maxTextSize - if l.lowMem { + if lg.lowMem { max /= lowMemRatio } dst = append(dst, `"text":`...) @@ -697,12 +697,12 @@ func appendTruncatedString(dst, src []byte, n int) []byte { // appendTextOrJSONLocked appends a raw text message or a raw JSON object // in the Tailscale JSON log format. -func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { - if l.includeProcSequence { - l.procSequence++ +func (lg *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { + if lg.includeProcSequence { + lg.procSequence++ } if len(src) == 0 || src[0] != '{' { - return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) + return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level) } // Check whether the input is a valid JSON object and @@ -714,11 +714,11 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // However, bytes.NewBuffer normally allocates unless // we immediately shallow copy it into a pre-allocated Buffer struct. // See https://go.dev/issue/67004. - l.bytesBuf = *bytes.NewBuffer(src) - defer func() { l.bytesBuf = bytes.Buffer{} }() // avoid pinning src + lg.bytesBuf = *bytes.NewBuffer(src) + defer func() { lg.bytesBuf = bytes.Buffer{} }() // avoid pinning src - dec := &l.jsonDec - dec.Reset(&l.bytesBuf) + dec := &lg.jsonDec + dec.Reset(&lg.bytesBuf) if tok, err := dec.ReadToken(); tok.Kind() != '{' || err != nil { return false } @@ -750,7 +750,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // Treat invalid JSON as a raw text message. if !validJSON { - return l.appendText(dst, src, l.skipClientTime, l.procID, l.procSequence, level) + return lg.appendText(dst, src, lg.skipClientTime, lg.procID, lg.procSequence, level) } // Check whether the JSON payload is too large. @@ -758,13 +758,13 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { // That's okay as the Tailscale log service limit is actually 2*maxSize. // However, so long as logging applications aim to target the maxSize limit, // there should be no trouble eventually uploading logs. - maxLen := cmp.Or(l.maxUploadSize, maxSize) + maxLen := cmp.Or(lg.maxUploadSize, maxSize) if len(src) > maxLen { errDetail := fmt.Sprintf("entry too large: %d bytes", len(src)) errData := appendTruncatedString(nil, src, maxLen/len(`\uffff`)) // escaping could increase size dst = append(dst, '{') - dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) + dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level) dst = bytes.TrimRight(dst, ",") return append(dst, "}\n"...) } @@ -781,7 +781,7 @@ func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { } dst = slices.Grow(dst, len(src)) dst = append(dst, '{') - dst = l.appendMetadata(dst, l.skipClientTime, true, l.procID, l.procSequence, errDetail, errData, level) + dst = lg.appendMetadata(dst, lg.skipClientTime, true, lg.procID, lg.procSequence, errDetail, errData, level) if logtailValLength > 0 { // Exclude original logtail member from the message. dst = appendWithoutNewline(dst, src[len("{"):logtailKeyOffset]) @@ -808,8 +808,8 @@ func appendWithoutNewline(dst, src []byte) []byte { } // Logf logs to l using the provided fmt-style format and optional arguments. -func (l *Logger) Logf(format string, args ...any) { - fmt.Fprintf(l, format, args...) +func (lg *Logger) Logf(format string, args ...any) { + fmt.Fprintf(lg, format, args...) } // Write logs an encoded JSON blob. @@ -818,29 +818,29 @@ func (l *Logger) Logf(format string, args ...any) { // then contents is fit into a JSON blob and written. // // This is intended as an interface for the stdlib "log" package. -func (l *Logger) Write(buf []byte) (int, error) { +func (lg *Logger) Write(buf []byte) (int, error) { if len(buf) == 0 { return 0, nil } inLen := len(buf) // length as provided to us, before modifications to downstream writers level, buf := parseAndRemoveLogLevel(buf) - if l.stderr != nil && l.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&l.stderrLevel) { + if lg.stderr != nil && lg.stderr != io.Discard && int64(level) <= atomic.LoadInt64(&lg.stderrLevel) { if buf[len(buf)-1] == '\n' { - l.stderr.Write(buf) + lg.stderr.Write(buf) } else { // The log package always line-terminates logs, // so this is an uncommon path. withNL := append(buf[:len(buf):len(buf)], '\n') - l.stderr.Write(withNL) + lg.stderr.Write(withNL) } } - l.writeLock.Lock() - defer l.writeLock.Unlock() + lg.writeLock.Lock() + defer lg.writeLock.Unlock() - b := l.appendTextOrJSONLocked(l.writeBuf[:0], buf, level) - _, err := l.sendLocked(b) + b := lg.appendTextOrJSONLocked(lg.writeBuf[:0], buf, level) + _, err := lg.sendLocked(b) return inLen, err } diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index a92f88b4bb03e..b618fc0d7bc65 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -29,11 +29,11 @@ func TestFastShutdown(t *testing.T) { func(w http.ResponseWriter, r *http.Request) {})) defer testServ.Close() - l := NewLogger(Config{ + logger := NewLogger(Config{ BaseURL: testServ.URL, Bus: eventbustest.NewBus(t), }, t.Logf) - err := l.Shutdown(ctx) + err := logger.Shutdown(ctx) if err != nil { t.Error(err) } @@ -64,7 +64,7 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Cleanup(ts.srv.Close) - l := NewLogger(Config{ + logger := NewLogger(Config{ BaseURL: ts.srv.URL, Bus: eventbustest.NewBus(t), }, t.Logf) @@ -75,14 +75,14 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Errorf("unknown start logging statement: %q", string(body)) } - return &ts, l + return &ts, logger } func TestDrainPendingMessages(t *testing.T) { - ts, l := NewLogtailTestHarness(t) + ts, logger := NewLogtailTestHarness(t) for range logLines { - l.Write([]byte("log line")) + logger.Write([]byte("log line")) } // all of the "log line" messages usually arrive at once, but poll if needed. @@ -96,14 +96,14 @@ func TestDrainPendingMessages(t *testing.T) { // if we never find count == logLines, the test will eventually time out. } - err := l.Shutdown(context.Background()) + err := logger.Shutdown(context.Background()) if err != nil { t.Error(err) } } func TestEncodeAndUploadMessages(t *testing.T) { - ts, l := NewLogtailTestHarness(t) + ts, logger := NewLogtailTestHarness(t) tests := []struct { name string @@ -123,7 +123,7 @@ func TestEncodeAndUploadMessages(t *testing.T) { } for _, tt := range tests { - io.WriteString(l, tt.log) + io.WriteString(logger, tt.log) body := <-ts.uploaded data := unmarshalOne(t, body) @@ -144,7 +144,7 @@ func TestEncodeAndUploadMessages(t *testing.T) { } } - err := l.Shutdown(context.Background()) + err := logger.Shutdown(context.Background()) if err != nil { t.Error(err) } @@ -322,9 +322,9 @@ func TestLoggerWriteResult(t *testing.T) { } func TestAppendMetadata(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } for _, tt := range []struct { skipClientTime bool @@ -350,7 +350,7 @@ func TestAppendMetadata(t *testing.T) { {procID: 1, procSeq: 2, errDetail: "error", errData: jsontext.Value(`["something","bad","happened"]`), level: 2, want: `"logtail":{"client_time":"2000-01-01T00:00:00Z","proc_id":1,"proc_seq":2,"error":{"detail":"error","bad_data":["something","bad","happened"]}},"metrics":"metrics","v":2,`}, } { - got := string(l.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) + got := string(lg.appendMetadata(nil, tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level)) if got != tt.want { t.Errorf("appendMetadata(%v, %v, %v, %v, %v, %v, %v):\n\tgot %s\n\twant %s", tt.skipClientTime, tt.skipMetrics, tt.procID, tt.procSeq, tt.errDetail, tt.errData, tt.level, got, tt.want) } @@ -362,10 +362,10 @@ func TestAppendMetadata(t *testing.T) { } func TestAppendText(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } - l.lowMem = true + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } + lg.lowMem = true for _, tt := range []struct { text string @@ -382,7 +382,7 @@ func TestAppendText(t *testing.T) { {text: "\b\f\n\r\t\"\\", want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"\b\f\n\r\t\"\\"}`}, {text: "x" + strings.Repeat("😐", maxSize), want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z"},"metrics":"metrics","text":"x` + strings.Repeat("😐", 1023) + `…+1044484"}`}, } { - got := string(l.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) + got := string(lg.appendText(nil, []byte(tt.text), tt.skipClientTime, tt.procID, tt.procSeq, tt.level)) if !strings.HasSuffix(got, "\n") { t.Errorf("`%s` does not end with a newline", got) } @@ -397,10 +397,10 @@ func TestAppendText(t *testing.T) { } func TestAppendTextOrJSON(t *testing.T) { - var l Logger - l.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) - l.metricsDelta = func() string { return "metrics" } - l.lowMem = true + var lg Logger + lg.clock = tstest.NewClock(tstest.ClockOpts{Start: time.Date(2000, 01, 01, 0, 0, 0, 0, time.UTC)}) + lg.metricsDelta = func() string { return "metrics" } + lg.lowMem = true for _, tt := range []struct { in string @@ -419,7 +419,7 @@ func TestAppendTextOrJSON(t *testing.T) { {in: `{ "fizz" : "buzz" , "logtail" : "duplicate" , "wizz" : "wuzz" }`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"duplicate logtail member","bad_data":"duplicate"}}, "fizz" : "buzz" , "wizz" : "wuzz"}`}, {in: `{"long":"` + strings.Repeat("a", maxSize) + `"}`, want: `{"logtail":{"client_time":"2000-01-01T00:00:00Z","error":{"detail":"entry too large: 262155 bytes","bad_data":"{\"long\":\"` + strings.Repeat("a", 43681) + `…+218465"}}}`}, } { - got := string(l.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) + got := string(lg.appendTextOrJSONLocked(nil, []byte(tt.in), tt.level)) if !strings.HasSuffix(got, "\n") { t.Errorf("`%s` does not end with a newline", got) } @@ -461,21 +461,21 @@ var testdataTextLog = []byte(`netcheck: report: udp=true v6=false v6os=true mapv var testdataJSONLog = []byte(`{"end":"2024-04-08T21:39:15.715291586Z","nodeId":"nQRJBE7CNTRL","physicalTraffic":[{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"127.x.x.x:2","src":"100.x.x.x:0","txBytes":148,"txPkts":1},{"dst":"98.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"24.x.x.x:49973","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"73.x.x.x:41641","rxBytes":732,"rxPkts":6,"src":"100.x.x.x:0","txBytes":820,"txPkts":7},{"dst":"75.x.x.x:1025","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"75.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"174.x.x.x:35497","rxBytes":13008,"rxPkts":98,"src":"100.x.x.x:0","txBytes":26688,"txPkts":150},{"dst":"47.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5},{"dst":"64.x.x.x:41641","rxBytes":640,"rxPkts":5,"src":"100.x.x.x:0","txBytes":640,"txPkts":5}],"start":"2024-04-08T21:39:11.099495616Z","virtualTraffic":[{"dst":"100.x.x.x:33008","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32984","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:32998","proto":6,"src":"100.x.x.x:22","txBytes":1020,"txPkts":10},{"dst":"100.x.x.x:32994","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:32980","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32950","proto":6,"src":"100.x.x.x:22","txBytes":1340,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53332","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:0","proto":1,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32966","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57882","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:53326","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:57892","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:32934","proto":6,"src":"100.x.x.x:22","txBytes":8712,"txPkts":55},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32942","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:32964","proto":6,"src":"100.x.x.x:22","txBytes":1260,"txPkts":10},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:0","proto":1,"rxBytes":420,"rxPkts":5,"src":"100.x.x.x:0","txBytes":420,"txPkts":5},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37238","txBytes":60,"txPkts":1},{"dst":"100.x.x.x:22","proto":6,"src":"100.x.x.x:37252","txBytes":60,"txPkts":1}]}`) func BenchmarkWriteText(b *testing.B) { - var l Logger - l.clock = tstime.StdClock{} - l.buffer = discardBuffer{} + var lg Logger + lg.clock = tstime.StdClock{} + lg.buffer = discardBuffer{} b.ReportAllocs() for range b.N { - must.Get(l.Write(testdataTextLog)) + must.Get(lg.Write(testdataTextLog)) } } func BenchmarkWriteJSON(b *testing.B) { - var l Logger - l.clock = tstime.StdClock{} - l.buffer = discardBuffer{} + var lg Logger + lg.clock = tstime.StdClock{} + lg.buffer = discardBuffer{} b.ReportAllocs() for range b.N { - must.Get(l.Write(testdataJSONLog)) + must.Get(lg.Write(testdataJSONLog)) } } diff --git a/net/art/stride_table.go b/net/art/stride_table.go index 5ff0455fed872..5050df24500ce 100644 --- a/net/art/stride_table.go +++ b/net/art/stride_table.go @@ -303,21 +303,21 @@ func formatPrefixTable(addr uint8, len int) string { // // For example, childPrefixOf("192.168.0.0/16", 8) == "192.168.8.0/24". func childPrefixOf(parent netip.Prefix, stride uint8) netip.Prefix { - l := parent.Bits() - if l%8 != 0 { + ln := parent.Bits() + if ln%8 != 0 { panic("parent prefix is not 8-bit aligned") } - if l >= parent.Addr().BitLen() { + if ln >= parent.Addr().BitLen() { panic("parent prefix cannot be extended further") } - off := l / 8 + off := ln / 8 if parent.Addr().Is4() { bs := parent.Addr().As4() bs[off] = stride - return netip.PrefixFrom(netip.AddrFrom4(bs), l+8) + return netip.PrefixFrom(netip.AddrFrom4(bs), ln+8) } else { bs := parent.Addr().As16() bs[off] = stride - return netip.PrefixFrom(netip.AddrFrom16(bs), l+8) + return netip.PrefixFrom(netip.AddrFrom16(bs), ln+8) } } diff --git a/net/art/stride_table_test.go b/net/art/stride_table_test.go index bff2bb7c507fd..4ccef1fe083cb 100644 --- a/net/art/stride_table_test.go +++ b/net/art/stride_table_test.go @@ -377,8 +377,8 @@ func pfxMask(pfxLen int) uint8 { func allPrefixes() []slowEntry[int] { ret := make([]slowEntry[int], 0, lastHostIndex) for i := 1; i < lastHostIndex+1; i++ { - a, l := inversePrefixIndex(i) - ret = append(ret, slowEntry[int]{a, l, i}) + a, ln := inversePrefixIndex(i) + ret = append(ret, slowEntry[int]{a, ln, i}) } return ret } diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index 7c0139f455d70..aa538a0f66dcb 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -550,8 +550,8 @@ func genRandomSubdomains(t *testing.T, n int) []dnsname.FQDN { const charset = "abcdefghijklmnopqrstuvwxyz" for len(domains) < cap(domains) { - l := r.Intn(19) + 1 - b := make([]byte, l) + ln := r.Intn(19) + 1 + b := make([]byte, ln) for i := range b { b[i] = charset[r.Intn(len(charset))] } diff --git a/net/ktimeout/ktimeout_linux_test.go b/net/ktimeout/ktimeout_linux_test.go index df41567454f4b..0330923a96c13 100644 --- a/net/ktimeout/ktimeout_linux_test.go +++ b/net/ktimeout/ktimeout_linux_test.go @@ -19,11 +19,11 @@ func TestSetUserTimeout(t *testing.T) { // set in ktimeout.UserTimeout above. lc.SetMultipathTCP(false) - l := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) - defer l.Close() + ln := must.Get(lc.Listen(context.Background(), "tcp", "localhost:0")) + defer ln.Close() var err error - if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { + if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { err = SetUserTimeout(fd, 0) }); e != nil { t.Fatal(e) @@ -31,12 +31,12 @@ func TestSetUserTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - v := must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) + v := must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) if v != 0 { t.Errorf("TCP_USER_TIMEOUT: got %v; want 0", v) } - if e := must.Get(l.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { + if e := must.Get(ln.(*net.TCPListener).SyscallConn()).Control(func(fd uintptr) { err = SetUserTimeout(fd, 30*time.Second) }); e != nil { t.Fatal(e) @@ -44,7 +44,7 @@ func TestSetUserTimeout(t *testing.T) { if err != nil { t.Fatal(err) } - v = must.Get(unix.GetsockoptInt(int(must.Get(l.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) + v = must.Get(unix.GetsockoptInt(int(must.Get(ln.(*net.TCPListener).File()).Fd()), unix.SOL_TCP, unix.TCP_USER_TIMEOUT)) if v != 30000 { t.Errorf("TCP_USER_TIMEOUT: got %v; want 30000", v) } diff --git a/net/ktimeout/ktimeout_test.go b/net/ktimeout/ktimeout_test.go index 7befa3b1ab077..b534f046caddb 100644 --- a/net/ktimeout/ktimeout_test.go +++ b/net/ktimeout/ktimeout_test.go @@ -14,11 +14,11 @@ func ExampleUserTimeout() { lc := net.ListenConfig{ Control: UserTimeout(30 * time.Second), } - l, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") + ln, err := lc.Listen(context.TODO(), "tcp", "127.0.0.1:0") if err != nil { fmt.Printf("error: %v", err) return } - l.Close() + ln.Close() // Output: } diff --git a/net/memnet/listener.go b/net/memnet/listener.go index 202026e160b27..dded97995bbc1 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -39,16 +39,16 @@ func Listen(addr string) *Listener { } // Addr implements net.Listener.Addr. -func (l *Listener) Addr() net.Addr { - return l.addr +func (ln *Listener) Addr() net.Addr { + return ln.addr } // Close closes the pipe listener. -func (l *Listener) Close() error { +func (ln *Listener) Close() error { var cleanup func() - l.closeOnce.Do(func() { - cleanup = l.onClose - close(l.closed) + ln.closeOnce.Do(func() { + cleanup = ln.onClose + close(ln.closed) }) if cleanup != nil { cleanup() @@ -57,11 +57,11 @@ func (l *Listener) Close() error { } // Accept blocks until a new connection is available or the listener is closed. -func (l *Listener) Accept() (net.Conn, error) { +func (ln *Listener) Accept() (net.Conn, error) { select { - case c := <-l.ch: + case c := <-ln.ch: return c, nil - case <-l.closed: + case <-ln.closed: return nil, net.ErrClosed } } @@ -70,18 +70,18 @@ func (l *Listener) Accept() (net.Conn, error) { // The provided Context must be non-nil. If the context expires before the // connection is complete, an error is returned. Once successfully connected // any expiration of the context will not affect the connection. -func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { +func (ln *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, err error) { if !strings.HasSuffix(network, "tcp") { return nil, net.UnknownNetworkError(network) } - if connAddr(addr) != l.addr { + if connAddr(addr) != ln.addr { return nil, &net.AddrError{ Err: "invalid address", Addr: addr, } } - newConn := l.NewConn + newConn := ln.NewConn if newConn == nil { newConn = func(network, addr string, maxBuf int) (Conn, Conn) { return NewConn(addr, maxBuf) @@ -98,9 +98,9 @@ func (l *Listener) Dial(ctx context.Context, network, addr string) (_ net.Conn, select { case <-ctx.Done(): return nil, ctx.Err() - case <-l.closed: + case <-ln.closed: return nil, net.ErrClosed - case l.ch <- s: + case ln.ch <- s: return c, nil } } diff --git a/net/memnet/listener_test.go b/net/memnet/listener_test.go index 73b67841ad08c..b6ceb3dfa94cf 100644 --- a/net/memnet/listener_test.go +++ b/net/memnet/listener_test.go @@ -9,10 +9,10 @@ import ( ) func TestListener(t *testing.T) { - l := Listen("srv.local") - defer l.Close() + ln := Listen("srv.local") + defer ln.Close() go func() { - c, err := l.Accept() + c, err := ln.Accept() if err != nil { t.Error(err) return @@ -20,11 +20,11 @@ func TestListener(t *testing.T) { defer c.Close() }() - if c, err := l.Dial(context.Background(), "tcp", "invalid"); err == nil { + if c, err := ln.Dial(context.Background(), "tcp", "invalid"); err == nil { c.Close() t.Fatalf("dial to invalid address succeeded") } - c, err := l.Dial(context.Background(), "tcp", "srv.local") + c, err := ln.Dial(context.Background(), "tcp", "srv.local") if err != nil { t.Fatalf("dial failed: %v", err) return diff --git a/net/netaddr/netaddr.go b/net/netaddr/netaddr.go index 1ab6c053a523e..a04acd57aa670 100644 --- a/net/netaddr/netaddr.go +++ b/net/netaddr/netaddr.go @@ -34,7 +34,7 @@ func FromStdIPNet(std *net.IPNet) (prefix netip.Prefix, ok bool) { } ip = ip.Unmap() - if l := len(std.Mask); l != net.IPv4len && l != net.IPv6len { + if ln := len(std.Mask); ln != net.IPv4len && ln != net.IPv6len { // Invalid mask. return netip.Prefix{}, false } diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 95750b2d066f6..c5a3d2392007e 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -993,9 +993,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe c.logf("[v1] netcheck: measuring HTTPS latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err) } else { rs.mu.Lock() - if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { + if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok { mak.Set(&rs.report.RegionLatency, reg.RegionID, d) - } else if l >= d { + } else if latency >= d { rs.report.RegionLatency[reg.RegionID] = d } // We set these IPv4 and IPv6 but they're not really used @@ -1214,9 +1214,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee } else if ok { c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d) rs.mu.Lock() - if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok { + if latency, ok := rs.report.RegionLatency[reg.RegionID]; !ok { mak.Set(&rs.report.RegionLatency, reg.RegionID, d) - } else if l >= d { + } else if latency >= d { rs.report.RegionLatency[reg.RegionID] = d } diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index 4a5befa1d2fef..2e277147bc50d 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -120,10 +120,10 @@ func (s *Server) logf(format string, args ...any) { } // Serve accepts and handles incoming connections on the given listener. -func (s *Server) Serve(l net.Listener) error { - defer l.Close() +func (s *Server) Serve(ln net.Listener) error { + defer ln.Close() for { - c, err := l.Accept() + c, err := ln.Accept() if err != nil { return err } diff --git a/net/speedtest/speedtest_server.go b/net/speedtest/speedtest_server.go index 9dd78b195fff4..72f85fa15b019 100644 --- a/net/speedtest/speedtest_server.go +++ b/net/speedtest/speedtest_server.go @@ -17,9 +17,9 @@ import ( // connections and handles each one in a goroutine. Because it runs in an infinite loop, // this function only returns if any of the speedtests return with errors, or if the // listener is closed. -func Serve(l net.Listener) error { +func Serve(ln net.Listener) error { for { - conn, err := l.Accept() + conn, err := ln.Accept() if errors.Is(err, net.ErrClosed) { return nil } diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index 69fdb6b5685c0..bb8f2676af8c3 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -21,13 +21,13 @@ func TestDownload(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") // start a listener and find the port where the server will be listening. - l, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", ":0") if err != nil { t.Fatal(err) } - t.Cleanup(func() { l.Close() }) + t.Cleanup(func() { ln.Close() }) - serverIP := l.Addr().String() + serverIP := ln.Addr().String() t.Log("server IP found:", serverIP) type state struct { @@ -40,7 +40,7 @@ func TestDownload(t *testing.T) { stateChan := make(chan state, 1) go func() { - err := Serve(l) + err := Serve(ln) stateChan <- state{err: err} }() @@ -84,7 +84,7 @@ func TestDownload(t *testing.T) { }) // causes the server goroutine to finish - l.Close() + ln.Close() testState := <-stateChan if testState.err != nil { diff --git a/packages/deb/deb.go b/packages/deb/deb.go index 30e3f2b4d360c..cab0fea075e74 100644 --- a/packages/deb/deb.go +++ b/packages/deb/deb.go @@ -166,14 +166,14 @@ var ( func findArchAndVersion(control []byte) (arch string, version string, err error) { b := bytes.NewBuffer(control) for { - l, err := b.ReadBytes('\n') + ln, err := b.ReadBytes('\n') if err != nil { return "", "", err } - if bytes.HasPrefix(l, archKey) { - arch = string(bytes.TrimSpace(l[len(archKey):])) - } else if bytes.HasPrefix(l, versionKey) { - version = string(bytes.TrimSpace(l[len(versionKey):])) + if bytes.HasPrefix(ln, archKey) { + arch = string(bytes.TrimSpace(ln[len(archKey):])) + } else if bytes.HasPrefix(ln, versionKey) { + version = string(bytes.TrimSpace(ln[len(versionKey):])) } if arch != "" && version != "" { return arch, version, nil diff --git a/prober/derp.go b/prober/derp.go index 52e56fd4eff1e..22843b53a4049 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -323,14 +323,14 @@ func (d *derpProber) probeBandwidth(from, to string, size int64) ProbeClass { "derp_path": derpPath, "tcp_in_tcp": strconv.FormatBool(d.bwTUNIPv4Prefix != nil), }, - Metrics: func(l prometheus.Labels) []prometheus.Metric { + Metrics: func(lb prometheus.Labels) []prometheus.Metric { metrics := []prometheus.Metric{ - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, l), prometheus.GaugeValue, float64(size)), - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, l), prometheus.CounterValue, transferTimeSeconds.Value()), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_probe_size_bytes", "Payload size of the bandwidth prober", nil, lb), prometheus.GaugeValue, float64(size)), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_transfer_time_seconds_total", "Time it took to transfer data", nil, lb), prometheus.CounterValue, transferTimeSeconds.Value()), } if d.bwTUNIPv4Prefix != nil { // For TCP-in-TCP probes, also record cumulative bytes transferred. - metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, l), prometheus.CounterValue, totalBytesTransferred.Value())) + metrics = append(metrics, prometheus.MustNewConstMetric(prometheus.NewDesc("derp_bw_bytes_total", "Amount of data transferred", nil, lb), prometheus.CounterValue, totalBytesTransferred.Value())) } return metrics }, @@ -361,11 +361,11 @@ func (d *derpProber) probeQueuingDelay(from, to string, packetsPerSecond int, pa }, Class: "derp_qd", Labels: Labels{"derp_path": derpPath}, - Metrics: func(l prometheus.Labels) []prometheus.Metric { + Metrics: func(lb prometheus.Labels) []prometheus.Metric { qdh.mx.Lock() result := []prometheus.Metric{ - prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, l), prometheus.CounterValue, float64(packetsDropped.Value())), - prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, l), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), + prometheus.MustNewConstMetric(prometheus.NewDesc("derp_qd_probe_dropped_packets", "Total packets dropped", nil, lb), prometheus.CounterValue, float64(packetsDropped.Value())), + prometheus.MustNewConstHistogram(prometheus.NewDesc("derp_qd_probe_delays_seconds", "Distribution of queuing delays", nil, lb), qdh.count, qdh.sum, maps.Clone(qdh.bucketedCounts)), } qdh.mx.Unlock() return result @@ -1046,11 +1046,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT }() // Start a listener to receive the data - l, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) + ln, err := net.Listen("tcp", net.JoinHostPort(ifAddr.String(), "0")) if err != nil { return fmt.Errorf("failed to listen: %s", err) } - defer l.Close() + defer ln.Close() // 128KB by default const writeChunkSize = 128 << 10 @@ -1062,9 +1062,9 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT } // Dial ourselves - _, port, err := net.SplitHostPort(l.Addr().String()) + _, port, err := net.SplitHostPort(ln.Addr().String()) if err != nil { - return fmt.Errorf("failed to split address %q: %w", l.Addr().String(), err) + return fmt.Errorf("failed to split address %q: %w", ln.Addr().String(), err) } connAddr := net.JoinHostPort(destinationAddr.String(), port) @@ -1085,7 +1085,7 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT go func() { defer wg.Done() - readConn, err := l.Accept() + readConn, err := ln.Accept() if err != nil { readFinishedC <- err return @@ -1146,11 +1146,11 @@ func derpProbeBandwidthTUN(ctx context.Context, transferTimeSeconds, totalBytesT func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode, isProber bool, meshKey key.DERPMesh) (*derphttp.Client, error) { // To avoid spamming the log with regular connection messages. - l := logger.Filtered(log.Printf, func(s string) bool { + logf := logger.Filtered(log.Printf, func(s string) bool { return !strings.Contains(s, "derphttp.Client.Connect: connecting to") }) priv := key.NewNode() - dc := derphttp.NewRegionClient(priv, l, netmon.NewStatic(), func() *tailcfg.DERPRegion { + dc := derphttp.NewRegionClient(priv, logf, netmon.NewStatic(), func() *tailcfg.DERPRegion { rid := n.RegionID return &tailcfg.DERPRegion{ RegionID: rid, diff --git a/prober/prober.go b/prober/prober.go index 9073a95029163..6b904dd97d231 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -118,25 +118,25 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob panic(fmt.Sprintf("probe named %q already registered", name)) } - l := prometheus.Labels{ + lb := prometheus.Labels{ "name": name, "class": pc.Class, } for k, v := range pc.Labels { - l[k] = v + lb[k] = v } for k, v := range labels { - l[k] = v + lb[k] = v } - probe := newProbe(p, name, interval, l, pc) + probe := newProbe(p, name, interval, lb, pc) p.probes[name] = probe go probe.loop() return probe } // newProbe creates a new Probe with the given parameters, but does not start it. -func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Labels, pc ProbeClass) *Probe { +func newProbe(p *Prober, name string, interval time.Duration, lg prometheus.Labels, pc ProbeClass) *Probe { ctx, cancel := context.WithCancel(context.Background()) probe := &Probe{ prober: p, @@ -155,17 +155,17 @@ func newProbe(p *Prober, name string, interval time.Duration, l prometheus.Label latencyHist: ring.New(recentHistSize), metrics: prometheus.NewRegistry(), - metricLabels: l, - mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, l), - mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, l), - mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, l), - mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, l), - mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, l), + metricLabels: lg, + mInterval: prometheus.NewDesc("interval_secs", "Probe interval in seconds", nil, lg), + mStartTime: prometheus.NewDesc("start_secs", "Latest probe start time (seconds since epoch)", nil, lg), + mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, lg), + mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, lg), + mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, lg), mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: l, + Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: lg, }, []string{"status"}), mSeconds: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: l, + Name: "seconds_total", Help: "Total amount of time spent executing the probe", ConstLabels: lg, }, []string{"status"}), } if p.metrics != nil { @@ -512,8 +512,8 @@ func (probe *Probe) probeInfoLocked() ProbeInfo { inf.Latency = probe.latency } probe.latencyHist.Do(func(v any) { - if l, ok := v.(time.Duration); ok { - inf.RecentLatencies = append(inf.RecentLatencies, l) + if latency, ok := v.(time.Duration); ok { + inf.RecentLatencies = append(inf.RecentLatencies, latency) } }) probe.successHist.Do(func(v any) { @@ -719,8 +719,8 @@ func initialDelay(seed string, interval time.Duration) time.Duration { // Labels is a set of metric labels used by a prober. type Labels map[string]string -func (l Labels) With(k, v string) Labels { - new := maps.Clone(l) +func (lb Labels) With(k, v string) Labels { + new := maps.Clone(lb) new[k] = v return new } diff --git a/tka/aum.go b/tka/aum.go index bd17b2098e0c8..b8c4b6c9e14d4 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -31,8 +31,8 @@ func (h AUMHash) String() string { // UnmarshalText implements encoding.TextUnmarshaler. func (h *AUMHash) UnmarshalText(text []byte) error { - if l := base32StdNoPad.DecodedLen(len(text)); l != len(h) { - return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", l, len(text)) + if ln := base32StdNoPad.DecodedLen(len(text)); ln != len(h) { + return fmt.Errorf("tka.AUMHash.UnmarshalText: text wrong length: %d, want %d", ln, len(text)) } if _, err := base32StdNoPad.Decode(h[:], text); err != nil { return fmt.Errorf("tka.AUMHash.UnmarshalText: %w", err) diff --git a/tka/sig_test.go b/tka/sig_test.go index 2fafb0436de1f..c5c03ef2e0055 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -76,8 +76,8 @@ func TestSigNested(t *testing.T) { if err := nestedSig.verifySignature(oldNode.Public(), k); err != nil { t.Fatalf("verifySignature(oldNode) failed: %v", err) } - if l := sigChainLength(nestedSig); l != 1 { - t.Errorf("nestedSig chain length = %v, want 1", l) + if ln := sigChainLength(nestedSig); ln != 1 { + t.Errorf("nestedSig chain length = %v, want 1", ln) } // The signature authorizing the rotation, signed by the @@ -93,8 +93,8 @@ func TestSigNested(t *testing.T) { if err := sig.verifySignature(node.Public(), k); err != nil { t.Fatalf("verifySignature(node) failed: %v", err) } - if l := sigChainLength(sig); l != 2 { - t.Errorf("sig chain length = %v, want 2", l) + if ln := sigChainLength(sig); ln != 2 { + t.Errorf("sig chain length = %v, want 2", ln) } // Test verification fails if the wrong verification key is provided diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index 2aa4c863b3e4c..c84e83454f3f7 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -92,8 +92,8 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { } slices.Sort(lines) - for _, l := range lines { - _, err = w.Write([]byte(fmt.Sprintf("%s\n", l))) + for _, ln := range lines { + _, err = w.Write([]byte(fmt.Sprintf("%s\n", ln))) if err != nil { log.Printf("monitor: error writing status: %v", err) return diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 17f3d881f8687..7f89eb48a7ab7 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -75,10 +75,10 @@ func fromCommand(bs []byte) (string, error) { return args, nil } -func (f *fsm) Apply(l *raft.Log) any { +func (f *fsm) Apply(lg *raft.Log) any { f.mu.Lock() defer f.mu.Unlock() - s, err := fromCommand(l.Data) + s, err := fromCommand(lg.Data) if err != nil { return CommandResult{ Err: err, diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index b0deb20796221..f1531d013d4b7 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1021,11 +1021,11 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { } var b strings.Builder b.WriteString("{") - for i, l := range labels { + for i, lb := range labels { if i > 0 { b.WriteString(",") } - b.WriteString(fmt.Sprintf("%s=%q", l.GetName(), l.GetValue())) + b.WriteString(fmt.Sprintf("%s=%q", lb.GetName(), lb.GetValue())) } b.WriteString("}") return b.String() @@ -1033,8 +1033,8 @@ func promMetricLabelsStr(labels []*dto.LabelPair) string { // sendData sends a given amount of bytes from s1 to s2. func sendData(logf func(format string, args ...any), ctx context.Context, bytesCount int, s1, s2 *Server, s1ip, s2ip netip.Addr) error { - l := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) - defer l.Close() + lb := must.Get(s1.Listen("tcp", fmt.Sprintf("%s:8081", s1ip))) + defer lb.Close() // Dial to s1 from s2 w, err := s2.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) @@ -1049,7 +1049,7 @@ func sendData(logf func(format string, args ...any), ctx context.Context, bytesC defer close(allReceived) go func() { - conn, err := l.Accept() + conn, err := lb.Accept() if err != nil { allReceived <- err return diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index 0bab3ba5d96d5..c3a3775de9407 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -184,14 +184,14 @@ type ipMapping struct { // it is difficult to be 100% sure. This function should be used with care. It // will probably do what you want, but it is very easy to hold this wrong. func getProbablyFreePortNumber() (int, error) { - l, err := net.Listen("tcp", ":0") + ln, err := net.Listen("tcp", ":0") if err != nil { return 0, err } - defer l.Close() + defer ln.Close() - _, port, err := net.SplitHostPort(l.Addr().String()) + _, port, err := net.SplitHostPort(ln.Addr().String()) if err != nil { return 0, err } diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index 119fed2e61012..869b4cc8ea566 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -628,8 +628,8 @@ type loggingResponseWriter struct { // from r, or falls back to logf. If a nil logger is given, the logs are // discarded. func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Request) *loggingResponseWriter { - if l, ok := logger.LogfKey.ValueOk(r.Context()); ok && l != nil { - logf = l + if lg, ok := logger.LogfKey.ValueOk(r.Context()); ok && lg != nil { + logf = lg } if logf == nil { logf = logger.Discard @@ -642,46 +642,46 @@ func newLogResponseWriter(logf logger.Logf, w http.ResponseWriter, r *http.Reque } // WriteHeader implements [http.ResponseWriter]. -func (l *loggingResponseWriter) WriteHeader(statusCode int) { - if l.code != 0 { - l.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", l.code, statusCode) +func (lg *loggingResponseWriter) WriteHeader(statusCode int) { + if lg.code != 0 { + lg.logf("[unexpected] HTTP handler set statusCode twice (%d and %d)", lg.code, statusCode) return } - if l.ctx.Err() == nil { - l.code = statusCode + if lg.ctx.Err() == nil { + lg.code = statusCode } - l.ResponseWriter.WriteHeader(statusCode) + lg.ResponseWriter.WriteHeader(statusCode) } // Write implements [http.ResponseWriter]. -func (l *loggingResponseWriter) Write(bs []byte) (int, error) { - if l.code == 0 { - l.code = 200 +func (lg *loggingResponseWriter) Write(bs []byte) (int, error) { + if lg.code == 0 { + lg.code = 200 } - n, err := l.ResponseWriter.Write(bs) - l.bytes += n + n, err := lg.ResponseWriter.Write(bs) + lg.bytes += n return n, err } // Hijack implements http.Hijacker. Note that hijacking can still fail // because the wrapped ResponseWriter is not required to implement // Hijacker, as this breaks HTTP/2. -func (l *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h, ok := l.ResponseWriter.(http.Hijacker) +func (lg *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h, ok := lg.ResponseWriter.(http.Hijacker) if !ok { return nil, nil, errors.New("ResponseWriter is not a Hijacker") } conn, buf, err := h.Hijack() if err == nil { - l.hijacked = true + lg.hijacked = true } return conn, buf, err } -func (l loggingResponseWriter) Flush() { - f, _ := l.ResponseWriter.(http.Flusher) +func (lg loggingResponseWriter) Flush() { + f, _ := lg.ResponseWriter.(http.Flusher) if f == nil { - l.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") + lg.logf("[unexpected] tried to Flush a ResponseWriter that can't flush") return } f.Flush() diff --git a/types/geo/quantize_test.go b/types/geo/quantize_test.go index 3c707e303c250..bc1f62c9be32f 100644 --- a/types/geo/quantize_test.go +++ b/types/geo/quantize_test.go @@ -32,20 +32,20 @@ func TestPointAnonymize(t *testing.T) { last := geo.MakePoint(llat, 0) cur := geo.MakePoint(lat, 0) anon := cur.Quantize() - switch l, g, err := anon.LatLng(); { + switch latlng, g, err := anon.LatLng(); { case err != nil: t.Fatal(err) case lat == southPole: // initialize llng, to the first snapped longitude - llat = l + llat = latlng goto Lng case g != 0: t.Fatalf("%v is west or east of %v", anon, last) - case l < llat: + case latlng < llat: t.Fatalf("%v is south of %v", anon, last) - case l == llat: + case latlng == llat: continue - case l > llat: + case latlng > llat: switch dist, err := last.DistanceTo(anon); { case err != nil: t.Fatal(err) @@ -55,7 +55,7 @@ func TestPointAnonymize(t *testing.T) { t.Logf("lat=%v last=%v cur=%v anon=%v", lat, last, cur, anon) t.Fatalf("%v is too close to %v", anon, last) default: - llat = l + llat = latlng } } @@ -65,14 +65,14 @@ func TestPointAnonymize(t *testing.T) { last := geo.MakePoint(llat, llng) cur := geo.MakePoint(lat, lng) anon := cur.Quantize() - switch l, g, err := anon.LatLng(); { + switch latlng, g, err := anon.LatLng(); { case err != nil: t.Fatal(err) case lng == dateLine: // initialize llng, to the first snapped longitude llng = g continue - case l != llat: + case latlng != llat: t.Fatalf("%v is north or south of %v", anon, last) case g != llng: const tolerance = geo.MinSeparation * 0x1p-9 diff --git a/types/key/disco.go b/types/key/disco.go index ce5f9b36fd9a1..52b40c766fbbf 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -167,11 +167,11 @@ func (k DiscoPublic) String() string { } // Compare returns an integer comparing DiscoPublic k and l lexicographically. -// The result will be 0 if k == l, -1 if k < l, and +1 if k > l. This is useful -// for situations requiring only one node in a pair to perform some operation, -// e.g. probing UDP path lifetime. -func (k DiscoPublic) Compare(l DiscoPublic) int { - return bytes.Compare(k.k[:], l.k[:]) +// The result will be 0 if k == other, -1 if k < other, and +1 if k > other. +// This is useful for situations requiring only one node in a pair to perform +// some operation, e.g. probing UDP path lifetime. +func (k DiscoPublic) Compare(other DiscoPublic) int { + return bytes.Compare(k.k[:], other.k[:]) } // AppendText implements encoding.TextAppender. diff --git a/types/prefs/list.go b/types/prefs/list.go index 7db473887d195..ae6b2fae335db 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -45,36 +45,36 @@ func ListWithOpts[T ImmutableType](opts ...Options) List[T] { // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *List[T]) SetValue(val []T) error { - return l.preference.SetValue(cloneSlice(val)) +func (ls *List[T]) SetValue(val []T) error { + return ls.preference.SetValue(cloneSlice(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *List[T]) SetManagedValue(val []T) { - l.preference.SetManagedValue(cloneSlice(val)) +func (ls *List[T]) SetManagedValue(val []T) { + ls.preference.SetManagedValue(cloneSlice(val)) } // View returns a read-only view of l. -func (l *List[T]) View() ListView[T] { - return ListView[T]{l} +func (ls *List[T]) View() ListView[T] { + return ListView[T]{ls} } // Clone returns a copy of l that aliases no memory with l. -func (l List[T]) Clone() *List[T] { - res := ptr.To(l) - if v, ok := l.s.Value.GetOk(); ok { +func (ls List[T]) Clone() *List[T] { + res := ptr.To(ls) + if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(append(v[:0:0], v...)) } return res } // Equal reports whether l and l2 are equal. -func (l List[T]) Equal(l2 List[T]) bool { - if l.s.Metadata != l2.s.Metadata { +func (ls List[T]) Equal(l2 List[T]) bool { + if ls.s.Metadata != l2.s.Metadata { return false } - v1, ok1 := l.s.Value.GetOk() + v1, ok1 := ls.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk() if ok1 != ok2 { return false diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index d6af745bf83b8..dc1213adb27ab 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -487,31 +487,31 @@ func TestItemView(t *testing.T) { } func TestListView(t *testing.T) { - l := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) + ls := ListOf([]int{4, 8, 15, 16, 23, 42}, ReadOnly) - lv := l.View() + lv := ls.View() checkIsSet(t, lv, true) checkIsManaged(t, lv, false) checkIsReadOnly(t, lv, true) - checkValue(t, lv, views.SliceOf(l.Value())) - checkValueOk(t, lv, views.SliceOf(l.Value()), true) + checkValue(t, lv, views.SliceOf(ls.Value())) + checkValueOk(t, lv, views.SliceOf(ls.Value()), true) l2 := *lv.AsStruct() - checkEqual(t, l, l2, true) + checkEqual(t, ls, l2, true) } func TestStructListView(t *testing.T) { - l := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) + ls := StructListOf([]*TestBundle{{Name: "E1"}, {Name: "E2"}}, ReadOnly) - lv := StructListViewOf(&l) + lv := StructListViewOf(&ls) checkIsSet(t, lv, true) checkIsManaged(t, lv, false) checkIsReadOnly(t, lv, true) - checkValue(t, lv, views.SliceOfViews(l.Value())) - checkValueOk(t, lv, views.SliceOfViews(l.Value()), true) + checkValue(t, lv, views.SliceOfViews(ls.Value())) + checkValueOk(t, lv, views.SliceOfViews(ls.Value()), true) l2 := *lv.AsStruct() - checkEqual(t, l, l2, true) + checkEqual(t, ls, l2, true) } func TestStructMapView(t *testing.T) { diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index 65f11011af8fb..ba145e2cf7086 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -33,20 +33,20 @@ func StructListWithOpts[T views.Cloner[T]](opts ...Options) StructList[T] { // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *StructList[T]) SetValue(val []T) error { - return l.preference.SetValue(deepCloneSlice(val)) +func (ls *StructList[T]) SetValue(val []T) error { + return ls.preference.SetValue(deepCloneSlice(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *StructList[T]) SetManagedValue(val []T) { - l.preference.SetManagedValue(deepCloneSlice(val)) +func (ls *StructList[T]) SetManagedValue(val []T) { + ls.preference.SetManagedValue(deepCloneSlice(val)) } // Clone returns a copy of l that aliases no memory with l. -func (l StructList[T]) Clone() *StructList[T] { - res := ptr.To(l) - if v, ok := l.s.Value.GetOk(); ok { +func (ls StructList[T]) Clone() *StructList[T] { + res := ptr.To(ls) + if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(deepCloneSlice(v)) } return res @@ -56,11 +56,11 @@ func (l StructList[T]) Clone() *StructList[T] { // If the template type T implements an Equal(T) bool method, it will be used // instead of the == operator for value comparison. // It panics if T is not comparable. -func (l StructList[T]) Equal(l2 StructList[T]) bool { - if l.s.Metadata != l2.s.Metadata { +func (ls StructList[T]) Equal(l2 StructList[T]) bool { + if ls.s.Metadata != l2.s.Metadata { return false } - v1, ok1 := l.s.Value.GetOk() + v1, ok1 := ls.s.Value.GetOk() v2, ok2 := l2.s.Value.GetOk() if ok1 != ok2 { return false @@ -105,8 +105,8 @@ type StructListView[T views.ViewCloner[T, V], V views.StructView[T]] struct { // StructListViewOf returns a read-only view of l. // It is used by [tailscale.com/cmd/viewer]. -func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](l *StructList[T]) StructListView[T, V] { - return StructListView[T, V]{l} +func StructListViewOf[T views.ViewCloner[T, V], V views.StructView[T]](ls *StructList[T]) StructListView[T, V] { + return StructListView[T, V]{ls} } // Valid reports whether the underlying [StructList] is non-nil. diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index a081f7c7468e2..83cc7447baedd 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -31,14 +31,14 @@ func StructMapWithOpts[K MapKeyType, V views.Cloner[V]](opts ...Options) StructM // SetValue configures the preference with the specified value. // It fails and returns [ErrManaged] if p is a managed preference, // and [ErrReadOnly] if p is a read-only preference. -func (l *StructMap[K, V]) SetValue(val map[K]V) error { - return l.preference.SetValue(deepCloneMap(val)) +func (m *StructMap[K, V]) SetValue(val map[K]V) error { + return m.preference.SetValue(deepCloneMap(val)) } // SetManagedValue configures the preference with the specified value // and marks the preference as managed. -func (l *StructMap[K, V]) SetManagedValue(val map[K]V) { - l.preference.SetManagedValue(deepCloneMap(val)) +func (m *StructMap[K, V]) SetManagedValue(val map[K]V) { + m.preference.SetManagedValue(deepCloneMap(val)) } // Clone returns a copy of m that aliases no memory with m. diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index 30e0b74ed60ff..b86efdf29cfd0 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -94,59 +94,59 @@ type bucket struct { // Allow charges the key one token (up to the overdraft limit), and // reports whether the key can perform an action. -func (l *Limiter[K]) Allow(key K) bool { - return l.allow(key, time.Now()) +func (lm *Limiter[K]) Allow(key K) bool { + return lm.allow(key, time.Now()) } -func (l *Limiter[K]) allow(key K, now time.Time) bool { - l.mu.Lock() - defer l.mu.Unlock() - return l.allowBucketLocked(l.getBucketLocked(key, now), now) +func (lm *Limiter[K]) allow(key K, now time.Time) bool { + lm.mu.Lock() + defer lm.mu.Unlock() + return lm.allowBucketLocked(lm.getBucketLocked(key, now), now) } -func (l *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { - if l.cache == nil { - l.cache = &lru.Cache[K, *bucket]{MaxEntries: l.Size} - } else if b := l.cache.Get(key); b != nil { +func (lm *Limiter[K]) getBucketLocked(key K, now time.Time) *bucket { + if lm.cache == nil { + lm.cache = &lru.Cache[K, *bucket]{MaxEntries: lm.Size} + } else if b := lm.cache.Get(key); b != nil { return b } b := &bucket{ - cur: l.Max, - lastUpdate: now.Truncate(l.RefillInterval), + cur: lm.Max, + lastUpdate: now.Truncate(lm.RefillInterval), } - l.cache.Set(key, b) + lm.cache.Set(key, b) return b } -func (l *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { +func (lm *Limiter[K]) allowBucketLocked(b *bucket, now time.Time) bool { // Only update the bucket quota if needed to process request. if b.cur <= 0 { - l.updateBucketLocked(b, now) + lm.updateBucketLocked(b, now) } ret := b.cur > 0 - if b.cur > -l.Overdraft { + if b.cur > -lm.Overdraft { b.cur-- } return ret } -func (l *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { - now = now.Truncate(l.RefillInterval) +func (lm *Limiter[K]) updateBucketLocked(b *bucket, now time.Time) { + now = now.Truncate(lm.RefillInterval) if now.Before(b.lastUpdate) { return } timeDelta := max(now.Sub(b.lastUpdate), 0) - tokenDelta := int64(timeDelta / l.RefillInterval) - b.cur = min(b.cur+tokenDelta, l.Max) + tokenDelta := int64(timeDelta / lm.RefillInterval) + b.cur = min(b.cur+tokenDelta, lm.Max) b.lastUpdate = now } // peekForTest returns the number of tokens for key, also reporting // whether key was present. -func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { - l.mu.Lock() - defer l.mu.Unlock() - if b, ok := l.cache.PeekOk(key); ok { +func (lm *Limiter[K]) tokensForTest(key K) (int64, bool) { + lm.mu.Lock() + defer lm.mu.Unlock() + if b, ok := lm.cache.PeekOk(key); ok { return b.cur, true } return 0, false @@ -159,12 +159,12 @@ func (l *Limiter[K]) tokensForTest(key K) (int64, bool) { // DumpHTML blocks other callers of the limiter while it collects the // state for dumping. It should not be called on large limiters // involved in hot codepaths. -func (l *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { - l.dumpHTML(w, onlyLimited, time.Now()) +func (lm *Limiter[K]) DumpHTML(w io.Writer, onlyLimited bool) { + lm.dumpHTML(w, onlyLimited, time.Now()) } -func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { - dump := l.collectDump(now) +func (lm *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { + dump := lm.collectDump(now) io.WriteString(w, "") for _, line := range dump { if onlyLimited && line.Tokens > 0 { @@ -183,13 +183,13 @@ func (l *Limiter[K]) dumpHTML(w io.Writer, onlyLimited bool, now time.Time) { } // collectDump grabs a copy of the limiter state needed by DumpHTML. -func (l *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { - l.mu.Lock() - defer l.mu.Unlock() +func (lm *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { + lm.mu.Lock() + defer lm.mu.Unlock() - ret := make([]dumpEntry[K], 0, l.cache.Len()) - l.cache.ForEach(func(k K, v *bucket) { - l.updateBucketLocked(v, now) // so stats are accurate + ret := make([]dumpEntry[K], 0, lm.cache.Len()) + lm.cache.ForEach(func(k K, v *bucket) { + lm.updateBucketLocked(v, now) // so stats are accurate ret = append(ret, dumpEntry[K]{k, v.cur}) }) return ret diff --git a/util/limiter/limiter_test.go b/util/limiter/limiter_test.go index 1f466d88257ab..77b1d562b23fb 100644 --- a/util/limiter/limiter_test.go +++ b/util/limiter/limiter_test.go @@ -16,7 +16,7 @@ const testRefillInterval = time.Second func TestLimiter(t *testing.T) { // 1qps, burst of 10, 2 keys tracked - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 2, Max: 10, RefillInterval: testRefillInterval, @@ -24,48 +24,48 @@ func TestLimiter(t *testing.T) { // Consume entire burst now := time.Now().Truncate(testRefillInterval) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 10, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 10, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // Refill 1 token for both foo and bar now = now.Add(time.Second + time.Millisecond) - allowed(t, l, "foo", 1, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 1, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 1, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 1, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // Refill 2 tokens for foo and bar now = now.Add(2*time.Second + time.Millisecond) - allowed(t, l, "foo", 2, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "foo", 2, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) - allowed(t, l, "bar", 2, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", 0) + allowed(t, limiter, "bar", 2, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", 0) // qux can burst 10, evicts foo so it can immediately burst 10 again too - allowed(t, l, "qux", 10, now) - denied(t, l, "qux", 1, now) - notInLimiter(t, l, "foo") - denied(t, l, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled - - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", 0) + allowed(t, limiter, "qux", 10, now) + denied(t, limiter, "qux", 1, now) + notInLimiter(t, limiter, "foo") + denied(t, limiter, "bar", 1, now) // refresh bar so foo lookup doesn't evict it - still throttled + + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", 0) } func TestLimiterOverdraft(t *testing.T) { // 1qps, burst of 10, overdraft of 2, 2 keys tracked - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 2, Max: 10, Overdraft: 2, @@ -74,51 +74,51 @@ func TestLimiterOverdraft(t *testing.T) { // Consume entire burst, go 1 into debt now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) - allowed(t, l, "bar", 10, now) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -1) + allowed(t, limiter, "bar", 10, now) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -1) // Refill 1 token for both foo and bar. // Still denied, still in debt. now = now.Add(time.Second) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -1) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -1) // Refill 2 tokens for foo and bar (1 available after debt), try // to consume 4. Overdraft is capped to 2. now = now.Add(2 * time.Second) - allowed(t, l, "foo", 1, now) - denied(t, l, "foo", 3, now) - hasTokens(t, l, "foo", -2) + allowed(t, limiter, "foo", 1, now) + denied(t, limiter, "foo", 3, now) + hasTokens(t, limiter, "foo", -2) - allowed(t, l, "bar", 1, now) - denied(t, l, "bar", 3, now) - hasTokens(t, l, "bar", -2) + allowed(t, limiter, "bar", 1, now) + denied(t, limiter, "bar", 3, now) + hasTokens(t, limiter, "bar", -2) // Refill 1, not enough to allow. now = now.Add(time.Second) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -2) - denied(t, l, "bar", 1, now) - hasTokens(t, l, "bar", -2) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -2) + denied(t, limiter, "bar", 1, now) + hasTokens(t, limiter, "bar", -2) // qux evicts foo, foo can immediately burst 10 again. - allowed(t, l, "qux", 1, now) - hasTokens(t, l, "qux", 9) - notInLimiter(t, l, "foo") - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 1, now) - hasTokens(t, l, "foo", -1) + allowed(t, limiter, "qux", 1, now) + hasTokens(t, limiter, "qux", 9) + notInLimiter(t, limiter, "foo") + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 1, now) + hasTokens(t, limiter, "foo", -1) } func TestDumpHTML(t *testing.T) { - l := &Limiter[string]{ + limiter := &Limiter[string]{ Size: 3, Max: 10, Overdraft: 10, @@ -126,13 +126,13 @@ func TestDumpHTML(t *testing.T) { } now := time.Now().Truncate(testRefillInterval).Add(time.Millisecond) - allowed(t, l, "foo", 10, now) - denied(t, l, "foo", 2, now) - allowed(t, l, "bar", 4, now) - allowed(t, l, "qux", 1, now) + allowed(t, limiter, "foo", 10, now) + denied(t, limiter, "foo", 2, now) + allowed(t, limiter, "bar", 4, now) + allowed(t, limiter, "qux", 1, now) var out bytes.Buffer - l.DumpHTML(&out, false) + limiter.DumpHTML(&out, false) want := strings.Join([]string{ "
      KeyTokens
      ", "", @@ -146,7 +146,7 @@ func TestDumpHTML(t *testing.T) { } out.Reset() - l.DumpHTML(&out, true) + limiter.DumpHTML(&out, true) want = strings.Join([]string{ "
      KeyTokens
      ", "", @@ -161,7 +161,7 @@ func TestDumpHTML(t *testing.T) { // organically. now = now.Add(3 * time.Second) out.Reset() - l.dumpHTML(&out, false, now) + limiter.dumpHTML(&out, false, now) want = strings.Join([]string{ "
      KeyTokens
      ", "", @@ -175,29 +175,29 @@ func TestDumpHTML(t *testing.T) { } } -func allowed(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { +func allowed(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { - if !l.allow(key, now) { - toks, ok := l.tokensForTest(key) + if !limiter.allow(key, now) { + toks, ok := limiter.tokensForTest(key) t.Errorf("after %d times: allow(%q, %q) = false, want true (%d tokens available, in cache = %v)", i, key, now, toks, ok) } } } -func denied(t *testing.T, l *Limiter[string], key string, count int, now time.Time) { +func denied(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { - if l.allow(key, now) { - toks, ok := l.tokensForTest(key) + if limiter.allow(key, now) { + toks, ok := limiter.tokensForTest(key) t.Errorf("after %d times: allow(%q, %q) = true, want false (%d tokens available, in cache = %v)", i, key, now, toks, ok) } } } -func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { +func hasTokens(t *testing.T, limiter *Limiter[string], key string, want int64) { t.Helper() - got, ok := l.tokensForTest(key) + got, ok := limiter.tokensForTest(key) if !ok { t.Errorf("key %q missing from limiter", key) } else if got != want { @@ -205,9 +205,9 @@ func hasTokens(t *testing.T, l *Limiter[string], key string, want int64) { } } -func notInLimiter(t *testing.T, l *Limiter[string], key string) { +func notInLimiter(t *testing.T, limiter *Limiter[string], key string) { t.Helper() - if tokens, ok := l.tokensForTest(key); ok { + if tokens, ok := limiter.tokensForTest(key); ok { t.Errorf("key %q unexpectedly tracked by limiter, with %d tokens", key, tokens) } } diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index 644126131bbba..149e0c96049c8 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -85,7 +85,7 @@ type tableDetector interface { type linuxFWDetector struct{} // iptDetect returns the number of iptables rules in the current namespace. -func (l linuxFWDetector) iptDetect() (int, error) { +func (ld linuxFWDetector) iptDetect() (int, error) { return detectIptables() } @@ -96,7 +96,7 @@ var hookDetectNetfilter feature.Hook[func() (int, error)] var ErrUnsupported = errors.New("linuxfw:unsupported") // nftDetect returns the number of nftables rules in the current namespace. -func (l linuxFWDetector) nftDetect() (int, error) { +func (ld linuxFWDetector) nftDetect() (int, error) { if f, ok := hookDetectNetfilter.GetOk(); ok { return f() } diff --git a/util/lru/lru_test.go b/util/lru/lru_test.go index 5500e5e0f309f..04de2e5070c87 100644 --- a/util/lru/lru_test.go +++ b/util/lru/lru_test.go @@ -84,8 +84,8 @@ func TestStressEvictions(t *testing.T) { for range numProbes { v := vals[rand.Intn(len(vals))] c.Set(v, true) - if l := c.Len(); l > cacheSize { - t.Fatalf("Cache size now %d, want max %d", l, cacheSize) + if ln := c.Len(); ln > cacheSize { + t.Fatalf("Cache size now %d, want max %d", ln, cacheSize) } } } @@ -119,8 +119,8 @@ func TestStressBatchedEvictions(t *testing.T) { c.DeleteOldest() } } - if l := c.Len(); l > cacheSizeMax { - t.Fatalf("Cache size now %d, want max %d", l, cacheSizeMax) + if ln := c.Len(); ln > cacheSizeMax { + t.Fatalf("Cache size now %d, want max %d", ln, cacheSizeMax) } } } diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 0ca36176e675c..97362b1dca8e0 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -322,33 +322,33 @@ func Definitions() ([]*Definition, error) { type PlatformList []string // Has reports whether l contains the target platform. -func (l PlatformList) Has(target string) bool { - if len(l) == 0 { +func (ls PlatformList) Has(target string) bool { + if len(ls) == 0 { return true } - return slices.ContainsFunc(l, func(os string) bool { + return slices.ContainsFunc(ls, func(os string) bool { return strings.EqualFold(os, target) }) } // HasCurrent is like Has, but for the current platform. -func (l PlatformList) HasCurrent() bool { - return l.Has(internal.OS()) +func (ls PlatformList) HasCurrent() bool { + return ls.Has(internal.OS()) } // mergeFrom merges l2 into l. Since an empty list indicates no platform restrictions, // if either l or l2 is empty, the merged result in l will also be empty. -func (l *PlatformList) mergeFrom(l2 PlatformList) { +func (ls *PlatformList) mergeFrom(l2 PlatformList) { switch { - case len(*l) == 0: + case len(*ls) == 0: // No-op. An empty list indicates no platform restrictions. case len(l2) == 0: // Merging with an empty list results in an empty list. - *l = l2 + *ls = l2 default: // Append, sort and dedup. - *l = append(*l, l2...) - slices.Sort(*l) - *l = slices.Compact(*l) + *ls = append(*ls, l2...) + slices.Sort(*ls) + *ls = slices.Compact(*ls) } } diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index e43495a160e12..9d99884f6436f 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -311,8 +311,8 @@ func TestListSettingDefinitions(t *testing.T) { t.Fatalf("SetDefinitionsForTest failed: %v", err) } - cmp := func(l, r *Definition) int { - return strings.Compare(string(l.Key()), string(r.Key())) + cmp := func(a, b *Definition) int { + return strings.Compare(string(a.Key()), string(b.Key())) } want := append([]*Definition{}, definitions...) slices.SortFunc(want, cmp) diff --git a/util/winutil/gp/gp_windows_test.go b/util/winutil/gp/gp_windows_test.go index e2520b46d56ae..f892068835bce 100644 --- a/util/winutil/gp/gp_windows_test.go +++ b/util/winutil/gp/gp_windows_test.go @@ -182,16 +182,16 @@ func doWithMachinePolicyLocked(t *testing.T, f func()) { f() } -func doWithCustomEnterLeaveFuncs(t *testing.T, f func(l *PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { +func doWithCustomEnterLeaveFuncs(t *testing.T, f func(*PolicyLock), enter func(bool) (policyLockHandle, error), leave func(policyLockHandle) error) { t.Helper() - l := NewMachinePolicyLock() - l.enterFn, l.leaveFn = enter, leave + lock := NewMachinePolicyLock() + lock.enterFn, lock.leaveFn = enter, leave t.Cleanup(func() { - if err := l.Close(); err != nil { + if err := lock.Close(); err != nil { t.Fatalf("(*PolicyLock).Close failed: %v", err) } }) - f(l) + f(lock) } diff --git a/util/winutil/gp/policylock_windows.go b/util/winutil/gp/policylock_windows.go index 69c5ff01697f4..6c3ca0baf6d21 100644 --- a/util/winutil/gp/policylock_windows.go +++ b/util/winutil/gp/policylock_windows.go @@ -127,32 +127,32 @@ func NewUserPolicyLock(token windows.Token) (*PolicyLock, error) { return lock, nil } -// Lock locks l. -// It returns [ErrInvalidLockState] if l has a zero value or has already been closed, +// Lock locks lk. +// It returns [ErrInvalidLockState] if lk has a zero value or has already been closed, // [ErrLockRestricted] if the lock cannot be acquired due to a restriction in place, // or a [syscall.Errno] if the underlying Group Policy lock cannot be acquired. // // As a special case, it fails with [windows.ERROR_ACCESS_DENIED] -// if l is a user policy lock, and the corresponding user is not logged in +// if lk is a user policy lock, and the corresponding user is not logged in // interactively at the time of the call. -func (l *PolicyLock) Lock() error { +func (lk *PolicyLock) Lock() error { if policyLockRestricted.Load() > 0 { return ErrLockRestricted } - l.mu.Lock() - defer l.mu.Unlock() - if l.lockCnt.Add(2)&1 == 0 { + lk.mu.Lock() + defer lk.mu.Unlock() + if lk.lockCnt.Add(2)&1 == 0 { // The lock cannot be acquired because it has either never been properly // created or its Close method has already been called. However, we need // to call Unlock to both decrement lockCnt and leave the underlying // CriticalPolicySection if we won the race with another goroutine and // now own the lock. - l.Unlock() + lk.Unlock() return ErrInvalidLockState } - if l.handle != 0 { + if lk.handle != 0 { // The underlying CriticalPolicySection is already acquired. // It is an R-Lock (with the W-counterpart owned by the Group Policy service), // meaning that it can be acquired by multiple readers simultaneously. @@ -160,20 +160,20 @@ func (l *PolicyLock) Lock() error { return nil } - return l.lockSlow() + return lk.lockSlow() } // lockSlow calls enterCriticalPolicySection to acquire the underlying GP read lock. // It waits for either the lock to be acquired, or for the Close method to be called. // // l.mu must be held. -func (l *PolicyLock) lockSlow() (err error) { +func (lk *PolicyLock) lockSlow() (err error) { defer func() { if err != nil { // Decrement the counter if the lock cannot be acquired, // and complete the pending close request if we're the last owner. - if l.lockCnt.Add(-2) == 0 { - l.closeInternal() + if lk.lockCnt.Add(-2) == 0 { + lk.closeInternal() } } }() @@ -190,12 +190,12 @@ func (l *PolicyLock) lockSlow() (err error) { resultCh := make(chan policyLockResult) go func() { - closing := l.closing - if l.scope == UserPolicy && l.token != 0 { + closing := lk.closing + if lk.scope == UserPolicy && lk.token != 0 { // Impersonate the user whose critical policy section we want to acquire. runtime.LockOSThread() defer runtime.UnlockOSThread() - if err := impersonateLoggedOnUser(l.token); err != nil { + if err := impersonateLoggedOnUser(lk.token); err != nil { initCh <- err return } @@ -209,10 +209,10 @@ func (l *PolicyLock) lockSlow() (err error) { close(initCh) var machine bool - if l.scope == MachinePolicy { + if lk.scope == MachinePolicy { machine = true } - handle, err := l.enterFn(machine) + handle, err := lk.enterFn(machine) send_result: for { @@ -226,7 +226,7 @@ func (l *PolicyLock) lockSlow() (err error) { // The lock is being closed, and we lost the race to l.closing // it the calling goroutine. if err == nil { - l.leaveFn(handle) + lk.leaveFn(handle) } break send_result default: @@ -247,21 +247,21 @@ func (l *PolicyLock) lockSlow() (err error) { select { case result := <-resultCh: if result.err == nil { - l.handle = result.handle + lk.handle = result.handle } return result.err - case <-l.closing: + case <-lk.closing: return ErrInvalidLockState } } // Unlock unlocks l. // It panics if l is not locked on entry to Unlock. -func (l *PolicyLock) Unlock() { - l.mu.Lock() - defer l.mu.Unlock() +func (lk *PolicyLock) Unlock() { + lk.mu.Lock() + defer lk.mu.Unlock() - lockCnt := l.lockCnt.Add(-2) + lockCnt := lk.lockCnt.Add(-2) if lockCnt < 0 { panic("negative lockCnt") } @@ -273,33 +273,33 @@ func (l *PolicyLock) Unlock() { return } - if l.handle != 0 { + if lk.handle != 0 { // Impersonation is not required to unlock a critical policy section. // The handle we pass determines which mutex will be unlocked. - leaveCriticalPolicySection(l.handle) - l.handle = 0 + leaveCriticalPolicySection(lk.handle) + lk.handle = 0 } if lockCnt == 0 { // Complete the pending close request if there's no more readers. - l.closeInternal() + lk.closeInternal() } } // Close releases resources associated with l. // It is a no-op for the machine policy lock. -func (l *PolicyLock) Close() error { - lockCnt := l.lockCnt.Load() +func (lk *PolicyLock) Close() error { + lockCnt := lk.lockCnt.Load() if lockCnt&1 == 0 { // The lock has never been initialized, or close has already been called. return nil } - close(l.closing) + close(lk.closing) // Unset the LSB to indicate a pending close request. - for !l.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { - lockCnt = l.lockCnt.Load() + for !lk.lockCnt.CompareAndSwap(lockCnt, lockCnt&^int32(1)) { + lockCnt = lk.lockCnt.Load() } if lockCnt != 0 { @@ -307,16 +307,16 @@ func (l *PolicyLock) Close() error { return nil } - return l.closeInternal() + return lk.closeInternal() } -func (l *PolicyLock) closeInternal() error { - if l.token != 0 { - if err := l.token.Close(); err != nil { +func (lk *PolicyLock) closeInternal() error { + if lk.token != 0 { + if err := lk.token.Close(); err != nil { return err } - l.token = 0 + lk.token = 0 } - l.closing = nil + lk.closing = nil return nil } diff --git a/util/winutil/s4u/lsa_windows.go b/util/winutil/s4u/lsa_windows.go index 3ff2171f91d70..3276b26766c08 100644 --- a/util/winutil/s4u/lsa_windows.go +++ b/util/winutil/s4u/lsa_windows.go @@ -256,8 +256,8 @@ func checkDomainAccount(username string) (sanitizedUserName string, isDomainAcco // errors.Is to check for it. When capLevel == CapCreateProcess, the logon // enforces the user's logon hours policy (when present). func (ls *lsaSession) logonAs(srcName string, u *user.User, capLevel CapabilityLevel) (token windows.Token, err error) { - if l := len(srcName); l == 0 || l > _TOKEN_SOURCE_LENGTH { - return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, l) + if ln := len(srcName); ln == 0 || ln > _TOKEN_SOURCE_LENGTH { + return 0, fmt.Errorf("%w, actual length is %d", ErrBadSrcName, ln) } if err := checkASCII(srcName); err != nil { return 0, fmt.Errorf("%w: %v", ErrBadSrcName, err) diff --git a/util/winutil/s4u/s4u_windows.go b/util/winutil/s4u/s4u_windows.go index 8926aaedc5071..8c8e02dbe83bc 100644 --- a/util/winutil/s4u/s4u_windows.go +++ b/util/winutil/s4u/s4u_windows.go @@ -938,10 +938,10 @@ func mergeEnv(existingEnv []string, extraEnv map[string]string) []string { result = append(result, strings.Join([]string{k, v}, "=")) } - slices.SortFunc(result, func(l, r string) int { - kl, _, _ := strings.Cut(l, "=") - kr, _, _ := strings.Cut(r, "=") - return strings.Compare(kl, kr) + slices.SortFunc(result, func(a, b string) int { + ka, _, _ := strings.Cut(a, "=") + kb, _, _ := strings.Cut(b, "=") + return strings.Compare(ka, kb) }) return result } diff --git a/util/winutil/startupinfo_windows.go b/util/winutil/startupinfo_windows.go index e04e9ea9b3d3a..edf48fa651cb5 100644 --- a/util/winutil/startupinfo_windows.go +++ b/util/winutil/startupinfo_windows.go @@ -83,8 +83,8 @@ func (sib *StartupInfoBuilder) Resolve() (startupInfo *windows.StartupInfo, inhe // Always create a Unicode environment. createProcessFlags = windows.CREATE_UNICODE_ENVIRONMENT - if l := uint32(len(sib.attrs)); l > 0 { - attrCont, err := windows.NewProcThreadAttributeList(l) + if ln := uint32(len(sib.attrs)); ln > 0 { + attrCont, err := windows.NewProcThreadAttributeList(ln) if err != nil { return nil, false, 0, err } diff --git a/util/winutil/winutil_windows_test.go b/util/winutil/winutil_windows_test.go index d437ffa383d82..ead10a45d7ee8 100644 --- a/util/winutil/winutil_windows_test.go +++ b/util/winutil/winutil_windows_test.go @@ -68,8 +68,8 @@ func checkContiguousBuffer[T any, BU BufUnit](t *testing.T, extra []BU, pt *T, p if gotLen := int(ptLen); gotLen != expectedLen { t.Errorf("allocation length got %d, want %d", gotLen, expectedLen) } - if l := len(slcs); l != 1 { - t.Errorf("len(slcs) got %d, want 1", l) + if ln := len(slcs); ln != 1 { + t.Errorf("len(slcs) got %d, want 1", ln) } if len(extra) == 0 && slcs[0] != nil { t.Error("slcs[0] got non-nil, want nil") diff --git a/wf/firewall.go b/wf/firewall.go index dc1045ff84934..07e160eb36071 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -66,8 +66,8 @@ func (p protocol) getLayers(d direction) []wf.LayerID { return layers } -func ruleName(action wf.Action, l wf.LayerID, name string) string { - switch l { +func ruleName(action wf.Action, layerID wf.LayerID, name string) string { + switch layerID { case wf.LayerALEAuthConnectV4: return fmt.Sprintf("%s outbound %s (IPv4)", action, name) case wf.LayerALEAuthConnectV6: @@ -307,8 +307,8 @@ func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions [ func (f *Firewall) addRules(name string, w weight, conditions []*wf.Match, action wf.Action, p protocol, d direction) ([]*wf.Rule, error) { var rules []*wf.Rule - for _, l := range p.getLayers(d) { - r, err := f.newRule(name, w, l, conditions, action) + for _, layer := range p.getLayers(d) { + r, err := f.newRule(name, w, layer, conditions, action) if err != nil { return nil, err } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index a0142134a1b1f..f9d76105298cf 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -111,7 +111,7 @@ func (c *Conn) WaitReady(t testing.TB) { } } -func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { +func runDERPAndStun(t *testing.T, logf logger.Logf, ln nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { d := derpserver.New(key.NewNode(), logf) httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) @@ -119,7 +119,7 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() - stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l) + stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, ln) m := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{ @@ -172,12 +172,12 @@ type magicStack struct { // newMagicStack builds and initializes an idle magicsock and // friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig // before anything interesting happens. -func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { +func newMagicStack(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { privateKey := key.NewNode() - return newMagicStackWithKey(t, logf, l, derpMap, privateKey) + return newMagicStackWithKey(t, logf, ln, derpMap, privateKey) } -func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { +func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() bus := eventbustest.NewBus(t) @@ -197,7 +197,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListen Logf: logf, HealthTracker: ht, DisablePortMapper: true, - TestOnlyPacketListener: l, + TestOnlyPacketListener: ln, EndpointsFunc: func(eps []tailcfg.Endpoint) { epCh <- eps }, @@ -687,13 +687,13 @@ func (localhostListener) ListenPacket(ctx context.Context, network, address stri func TestTwoDevicePing(t *testing.T) { flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/11762") - l, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) + ln, ip := localhostListener{}, netaddr.IPv4(127, 0, 0, 1) n := &devices{ - m1: l, + m1: ln, m1IP: ip, - m2: l, + m2: ln, m2IP: ip, - stun: l, + stun: ln, stunIP: ip, } testTwoDevicePing(t, n) diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 260b3196ab2fc..c5a9dbcbca538 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -126,24 +126,24 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported return le } -// gro attempts to enqueue p on g if l supports a GRO kind matching the +// gro attempts to enqueue p on g if ep supports a GRO kind matching the // transport protocol carried in p. gro may allocate g if it is nil. gro can // either return the existing g, a newly allocated one, or nil. Callers are // responsible for calling Flush() on the returned value if it is non-nil once // they have finished iterating through all GRO candidates for a given vector. -// If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via +// If gro allocates a *gro.GRO it will have ep's stack.NetworkDispatcher set via // SetDispatcher(). -func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { - if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { +func (ep *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { + if !buildfeatures.HasGRO || ep.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { // IPv6 may have extension headers preceding a TCP header, but we trade // for a fast path and assume p cannot be coalesced in such a case. - l.injectInbound(p) + ep.injectInbound(p) return g } if g == nil { - l.mu.RLock() - d := l.dispatcher - l.mu.RUnlock() + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() g = gro.NewGRO() g.SetDispatcher(d) } @@ -154,39 +154,39 @@ func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { // Close closes l. Further packet injections will return an error, and all // pending packets are discarded. Close may be called concurrently with // WritePackets. -func (l *linkEndpoint) Close() { - l.mu.Lock() - l.dispatcher = nil - l.mu.Unlock() - l.q.Close() - l.Drain() +func (ep *linkEndpoint) Close() { + ep.mu.Lock() + ep.dispatcher = nil + ep.mu.Unlock() + ep.q.Close() + ep.Drain() } // Read does non-blocking read one packet from the outbound packet queue. -func (l *linkEndpoint) Read() *stack.PacketBuffer { - return l.q.Read() +func (ep *linkEndpoint) Read() *stack.PacketBuffer { + return ep.q.Read() } // ReadContext does blocking read for one packet from the outbound packet queue. // It can be cancelled by ctx, and in this case, it returns nil. -func (l *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { - return l.q.ReadContext(ctx) +func (ep *linkEndpoint) ReadContext(ctx context.Context) *stack.PacketBuffer { + return ep.q.ReadContext(ctx) } // Drain removes all outbound packets from the channel and counts them. -func (l *linkEndpoint) Drain() int { - return l.q.Drain() +func (ep *linkEndpoint) Drain() int { + return ep.q.Drain() } // NumQueued returns the number of packets queued for outbound. -func (l *linkEndpoint) NumQueued() int { - return l.q.Num() +func (ep *linkEndpoint) NumQueued() int { + return ep.q.Num() } -func (l *linkEndpoint) injectInbound(p *packet.Parsed) { - l.mu.RLock() - d := l.dispatcher - l.mu.RUnlock() +func (ep *linkEndpoint) injectInbound(p *packet.Parsed) { + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() if d == nil || !buildfeatures.HasNetstack { return } @@ -200,35 +200,35 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { // Attach saves the stack network-layer dispatcher for use later when packets // are injected. -func (l *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { - l.mu.Lock() - defer l.mu.Unlock() - l.dispatcher = dispatcher +func (ep *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.dispatcher = dispatcher } // IsAttached implements stack.LinkEndpoint.IsAttached. -func (l *linkEndpoint) IsAttached() bool { - l.mu.RLock() - defer l.mu.RUnlock() - return l.dispatcher != nil +func (ep *linkEndpoint) IsAttached() bool { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.dispatcher != nil } // MTU implements stack.LinkEndpoint.MTU. -func (l *linkEndpoint) MTU() uint32 { - l.mu.RLock() - defer l.mu.RUnlock() - return l.mtu +func (ep *linkEndpoint) MTU() uint32 { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.mtu } // SetMTU implements stack.LinkEndpoint.SetMTU. -func (l *linkEndpoint) SetMTU(mtu uint32) { - l.mu.Lock() - defer l.mu.Unlock() - l.mtu = mtu +func (ep *linkEndpoint) SetMTU(mtu uint32) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.mtu = mtu } // Capabilities implements stack.LinkEndpoint.Capabilities. -func (l *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { +func (ep *linkEndpoint) Capabilities() stack.LinkEndpointCapabilities { // We are required to offload RX checksum validation for the purposes of // GRO. return stack.CapabilityRXChecksumOffload @@ -242,8 +242,8 @@ func (*linkEndpoint) GSOMaxSize() uint32 { } // SupportedGSO implements stack.GSOEndpoint. -func (l *linkEndpoint) SupportedGSO() stack.SupportedGSO { - return l.SupportedGSOKind +func (ep *linkEndpoint) SupportedGSO() stack.SupportedGSO { + return ep.SupportedGSOKind } // MaxHeaderLength returns the maximum size of the link layer header. Given it @@ -253,22 +253,22 @@ func (*linkEndpoint) MaxHeaderLength() uint16 { } // LinkAddress returns the link address of this endpoint. -func (l *linkEndpoint) LinkAddress() tcpip.LinkAddress { - l.mu.RLock() - defer l.mu.RUnlock() - return l.linkAddr +func (ep *linkEndpoint) LinkAddress() tcpip.LinkAddress { + ep.mu.RLock() + defer ep.mu.RUnlock() + return ep.linkAddr } // SetLinkAddress implements stack.LinkEndpoint.SetLinkAddress. -func (l *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { - l.mu.Lock() - defer l.mu.Unlock() - l.linkAddr = addr +func (ep *linkEndpoint) SetLinkAddress(addr tcpip.LinkAddress) { + ep.mu.Lock() + defer ep.mu.Unlock() + ep.linkAddr = addr } // WritePackets stores outbound packets into the channel. // Multiple concurrent calls are permitted. -func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { +func (ep *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) { n := 0 // TODO(jwhited): evaluate writing a stack.PacketBufferList instead of a // single packet. We can split 2 x 64K GSO across @@ -278,7 +278,7 @@ func (l *linkEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Err // control MTU (and by effect TCP MSS in gVisor) we *shouldn't* expect to // ever overflow 128 slots (see wireguard-go/tun.ErrTooManySegments usage). for _, pkt := range pkts.AsSlice() { - if err := l.q.Write(pkt); err != nil { + if err := ep.q.Write(pkt); err != nil { if _, ok := err.(*tcpip.ErrNoBufferSpace); !ok && n == 0 { return 0, err } diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index 929fda1b42e35..68ed8dbb2bb64 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -870,7 +870,7 @@ func (o *fakeOS) run(args ...string) error { rest = family + " " + strings.Join(args[3:], " ") } - var l *[]string + var ls *[]string switch args[1] { case "link": got := strings.Join(args[2:], " ") @@ -884,31 +884,31 @@ func (o *fakeOS) run(args ...string) error { } return nil case "addr": - l = &o.ips + ls = &o.ips case "route": - l = &o.routes + ls = &o.routes case "rule": - l = &o.rules + ls = &o.rules default: return unexpected() } switch args[2] { case "add": - for _, el := range *l { + for _, el := range *ls { if el == rest { o.t.Errorf("can't add %q, already present", rest) return errors.New("already exists") } } - *l = append(*l, rest) - sort.Strings(*l) + *ls = append(*ls, rest) + sort.Strings(*ls) case "del": found := false - for i, el := range *l { + for i, el := range *ls { if el == rest { found = true - *l = append((*l)[:i], (*l)[i+1:]...) + *ls = append((*ls)[:i], (*ls)[i+1:]...) break } } From 85373ef822645f66242c9b44dbd754247d0d9c63 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 18 Nov 2025 09:44:12 +0000 Subject: [PATCH 0709/1093] tka: move RemoveAll() to CompactableChonk I added a RemoveAll() method on tka.Chonk in #17946, but it's only used in the node to purge local AUMs. We don't need it in the SQLite storage, which currently implements tka.Chonk, so move it to CompactableChonk instead. Also add some automated tests, as a safety net. Updates tailscale/corp#33599 Change-Id: I54de9ccf1d6a3d29b36a94eccb0ebd235acd4ebc Signed-off-by: Alex Chan --- tka/tailchonk.go | 8 +++--- tstest/chonktest/chonktest.go | 47 +++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 2dc03a6f62649..0b7191747f830 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -58,10 +58,6 @@ type Chonk interface { // as a hint to pick the correct chain in the event that the Chonk stores // multiple distinct chains. LastActiveAncestor() (*AUMHash, error) - - // RemoveAll permanently and completely clears the TKA state. This should - // be called when the user disables Tailnet Lock. - RemoveAll() error } // CompactableChonk implementation are extensions of Chonk, which are @@ -80,6 +76,10 @@ type CompactableChonk interface { // PurgeAUMs permanently and irrevocably deletes the specified // AUMs from storage. PurgeAUMs(hashes []AUMHash) error + + // RemoveAll permanently and completely clears the TKA state. This should + // be called when the user disables Tailnet Lock. + RemoveAll() error } // Mem implements in-memory storage of TKA state, suitable for diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go index bfe394b28fcaf..404f1ec47f16c 100644 --- a/tstest/chonktest/chonktest.go +++ b/tstest/chonktest/chonktest.go @@ -9,6 +9,7 @@ package chonktest import ( "bytes" "encoding/binary" + "errors" "math/rand" "os" "testing" @@ -253,4 +254,50 @@ func RunCompactableChonkTests(t *testing.T, newChonk func(t *testing.T) tka.Comp t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) } }) + + t.Run("RemoveAll", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check we can retrieve the AUMs we just stored + for _, want := range data { + got, err := chonk.AUM(want.Hash()) + if err != nil { + t.Fatalf("could not get %s: %v", want.Hash(), err) + } + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("stored AUM %s differs (-want, +got):\n%s", want.Hash(), diff) + } + } + + // Call RemoveAll() to drop all the AUM state + if err := chonk.RemoveAll(); err != nil { + t.Fatalf("RemoveAll failed: %v", err) + } + + // Check we can no longer retrieve the previously-stored AUMs + for _, want := range data { + aum, err := chonk.AUM(want.Hash()) + if !errors.Is(err, os.ErrNotExist) { + t.Fatalf("expected os.ErrNotExist for %s, instead got aum=%v, err=%v", want.Hash(), aum, err) + } + } + }) } From af7c26aa054e7778383bade11a38c62907e92200 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 18 Nov 2025 10:36:14 +0000 Subject: [PATCH 0710/1093] cmd/vet/jsontags: fix a typo in an error message Updates #17945 Change-Id: I8987271420feb190f5e4d85caff305c8d4e84aae Signed-off-by: Alex Chan --- cmd/vet/jsontags/report.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/vet/jsontags/report.go b/cmd/vet/jsontags/report.go index 19d40799b8875..8e5869060799c 100644 --- a/cmd/vet/jsontags/report.go +++ b/cmd/vet/jsontags/report.go @@ -80,9 +80,9 @@ const ( func (k ReportKind) message() string { switch k { case OmitEmptyUnsupportedInV1: - return "uses `omitempty` on an unspported type in json/v1; should probably use `omitzero` instead" + return "uses `omitempty` on an unsupported type in json/v1; should probably use `omitzero` instead" case OmitEmptyUnsupportedInV2: - return "uses `omitempty` on an unspported type in json/v2; should probably use `omitzero` instead" + return "uses `omitempty` on an unsupported type in json/v2; should probably use `omitzero` instead" case OmitEmptyShouldBeOmitZero: return "should use `omitzero` instead of `omitempty`" case OmitEmptyShouldBeOmitZeroButHasIsZero: From 4e2f2d10889e79d338e1039e3b1263de0043235e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 20:53:14 -0800 Subject: [PATCH 0711/1093] feature/buildfeatures: re-run go generate 6a73c0bdf55 added a feature tag but didn't re-run go generate on ./feature/buildfeatures. Updates #9192 Change-Id: I7819450453e6b34c60cad29d2273e3e118291643 Signed-off-by: Brad Fitzpatrick --- ...n_disabled.go => feature_identityfederation_disabled.go} | 6 +++--- ...ion_enabled.go => feature_identityfederation_enabled.go} | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) rename feature/buildfeatures/{feature_identity_federation_disabled.go => feature_identityfederation_disabled.go} (70%) rename feature/buildfeatures/{feature_identity_federation_enabled.go => feature_identityfederation_enabled.go} (70%) diff --git a/feature/buildfeatures/feature_identity_federation_disabled.go b/feature/buildfeatures/feature_identityfederation_disabled.go similarity index 70% rename from feature/buildfeatures/feature_identity_federation_disabled.go rename to feature/buildfeatures/feature_identityfederation_disabled.go index c7b16f729cbc5..94488adc8637c 100644 --- a/feature/buildfeatures/feature_identity_federation_disabled.go +++ b/feature/buildfeatures/feature_identityfederation_disabled.go @@ -3,11 +3,11 @@ // Code generated by gen.go; DO NOT EDIT. -//go:build ts_omit_identity_federation +//go:build ts_omit_identityfederation package buildfeatures -// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". -// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// HasIdentityFederation is whether the binary was built with support for modular feature "Auth key generation via identity federation support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identityfederation" build tag. // It's a const so it can be used for dead code elimination. const HasIdentityFederation = false diff --git a/feature/buildfeatures/feature_identity_federation_enabled.go b/feature/buildfeatures/feature_identityfederation_enabled.go similarity index 70% rename from feature/buildfeatures/feature_identity_federation_enabled.go rename to feature/buildfeatures/feature_identityfederation_enabled.go index 1f7cf17423c96..892d62d66c37c 100644 --- a/feature/buildfeatures/feature_identity_federation_enabled.go +++ b/feature/buildfeatures/feature_identityfederation_enabled.go @@ -3,11 +3,11 @@ // Code generated by gen.go; DO NOT EDIT. -//go:build !ts_omit_identity_federation +//go:build !ts_omit_identityfederation package buildfeatures -// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". -// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// HasIdentityFederation is whether the binary was built with support for modular feature "Auth key generation via identity federation support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identityfederation" build tag. // It's a const so it can be used for dead code elimination. const HasIdentityFederation = true From 2a6cbb70d9cca049dd079fbc25285fd13649a700 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 17 Nov 2025 20:57:51 -0800 Subject: [PATCH 0712/1093] .github/workflows: make go_generate check detect new files Updates #17957 Change-Id: I904fd5b544ac3090b58c678c4726e7ace41a52dd Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b6d41e937c2db..35b4ea3ef1f68 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -703,6 +703,7 @@ jobs: run: | pkgs=$(./tool/go list ./... | grep -Ev 'dnsfallback|k8s-operator|xdp') ./tool/go generate $pkgs + git add -N . # ensure untracked files are noticed echo echo git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) From bd29b189fe8b15783b59c63ec5ebbb2584a9d5f7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 07:25:10 -0800 Subject: [PATCH 0713/1093] types/netmap,*: remove some redundant fields from NetMap Updates #12639 Change-Id: Ia50b15529bd1c002cdd2c937cdfbe69c06fa2dc8 Signed-off-by: Brad Fitzpatrick --- cmd/tsconnect/wasm/wasm_js.go | 2 +- control/controlclient/auto.go | 2 +- control/controlclient/direct.go | 2 +- control/controlclient/map.go | 2 -- ipn/ipnlocal/c2n_test.go | 1 - ipn/ipnlocal/dnsconfig_test.go | 14 ++++++++------ ipn/ipnlocal/local.go | 4 ++-- ipn/ipnlocal/local_test.go | 8 ++++---- ipn/ipnlocal/node_backend.go | 2 +- ipn/ipnlocal/state_test.go | 21 ++++++++++++++------- net/tsdial/dnsmap.go | 8 ++++---- net/tsdial/dnsmap_test.go | 6 +++--- types/netmap/netmap.go | 23 +++++++++++++++++------ wgengine/magicsock/magicsock_test.go | 6 +++--- 14 files changed, 59 insertions(+), 42 deletions(-) diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 2e81fa4a8a2e7..c7aa00d1d794f 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -261,7 +261,7 @@ func (i *jsIPN) run(jsCallbacks js.Value) { jsNetMap := jsNetMap{ Self: jsNetMapSelfNode{ jsNetMapNode: jsNetMapNode{ - Name: nm.Name, + Name: nm.SelfName(), Addresses: mapSliceView(nm.GetAddresses(), func(a netip.Prefix) string { return a.Addr().String() }), NodeKey: nm.NodeKey.String(), MachineKey: nm.MachineKey.String(), diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 20795d5a7dd92..3cbfe85818fbf 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -443,7 +443,7 @@ func (mrs mapRoutineState) UpdateFullNetmap(nm *netmap.NetworkMap) { c.mu.Lock() c.inMapPoll = true - c.expiry = nm.Expiry + c.expiry = nm.SelfKeyExpiry() stillAuthed := c.loggedIn c.logf("[v1] mapRoutine: netmap received: loggedIn=%v inMapPoll=true", stillAuthed) c.mu.Unlock() diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 1e1ce781fe511..62bbb35861fd2 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1093,7 +1093,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.persist = newPersist.View() persist = c.persist } - c.expiry = nm.Expiry + c.expiry = nm.SelfKeyExpiry() } // gotNonKeepAliveMessage is whether we've yet received a MapResponse message without diff --git a/control/controlclient/map.go b/control/controlclient/map.go index a9db25517f87d..9aa8e37107a99 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -891,8 +891,6 @@ func (ms *mapSession) netmap() *netmap.NetworkMap { if node := ms.lastNode; node.Valid() { nm.SelfNode = node - nm.Expiry = node.KeyExpiry() - nm.Name = node.Name() nm.AllCaps = ms.lastCapSet } diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 420633c87b554..86cc6a5490865 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -142,7 +142,6 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { func TestHandleC2NDebugNetmap(t *testing.T) { nm := &netmap.NetworkMap{ - Name: "myhost", SelfNode: (&tailcfg.Node{ ID: 100, Name: "myhost", diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 71f1751488788..e23d8a057546f 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -70,8 +70,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "self_name_and_peers", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), }, @@ -109,15 +109,15 @@ func TestDNSConfigForNetmap(t *testing.T) { // even if they have IPv4. name: "v6_only_self", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("fe75::1"), }).View(), }, peers: nodeViews([]*tailcfg.Node{ { ID: 1, - Name: "peera.net", + Name: "peera.net.", Addresses: ipps("100.102.0.1", "100.102.0.2", "fe75::1001"), }, { @@ -146,8 +146,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "extra_records", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), DNS: tailcfg.DNSConfig{ @@ -171,7 +171,9 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "corp_dns_misc", nm: &netmap.NetworkMap{ - Name: "host.some.domain.net.", + SelfNode: (&tailcfg.Node{ + Name: "host.some.domain.net.", + }).View(), DNS: tailcfg.DNSConfig{ Proxied: true, Domains: []string{"foo.com", "bar.com"}, @@ -331,8 +333,8 @@ func TestDNSConfigForNetmap(t *testing.T) { { name: "self_expired", nm: &netmap.NetworkMap{ - Name: "myname.net", SelfNode: (&tailcfg.Node{ + Name: "myname.net.", Addresses: ipps("100.101.101.101"), }).View(), }, diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 24ab417352061..7eb673e6de056 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1301,7 +1301,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { if hi := nm.SelfNode.Hostinfo(); hi.Valid() { ss.HostName = hi.Hostname() } - ss.DNSName = nm.Name + ss.DNSName = nm.SelfName() ss.UserID = nm.User() if sn := nm.SelfNode; sn.Valid() { peerStatusFromNode(ss, sn) @@ -1617,7 +1617,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control keyExpiryExtended := false if st.NetMap != nil { wasExpired := b.keyExpired - isExpired := !st.NetMap.Expiry.IsZero() && st.NetMap.Expiry.Before(b.clock.Now()) + isExpired := !st.NetMap.SelfKeyExpiry().IsZero() && st.NetMap.SelfKeyExpiry().Before(b.clock.Now()) if wasExpired && !isExpired { keyExpiryExtended = true } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 5df0ae5bbe920..f17fabb60f5fa 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2712,8 +2712,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIPWant: "127.0.0.1", prefsChanged: false, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -2749,8 +2749,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -2787,8 +2787,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -2827,8 +2827,8 @@ func TestSetExitNodeIDPolicy(t *testing.T) { exitNodeIDWant: "123", prefsChanged: true, nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 6880440bdd600..efef57ea492e7 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -748,7 +748,7 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } dcfg.Hosts[fqdn] = ips } - set(nm.Name, nm.GetAddresses()) + set(nm.SelfName(), nm.GetAddresses()) for _, peer := range peers { set(peer.Name(), peer.Addresses()) } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 0c95ef4fcf5f6..b7325e957be38 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -999,8 +999,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nExpireKey") notifies.expect(1) cc.send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + MachineAuthorized: true, + }).View(), }}) { nn := notifies.drain(1) @@ -1015,8 +1017,10 @@ func runTestStateMachine(t *testing.T, seamless bool) { t.Logf("\n\nExtendKey") notifies.expect(1) cc.send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(time.Minute), - SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + SelfNode: (&tailcfg.Node{ + MachineAuthorized: true, + KeyExpiry: time.Now().Add(time.Minute), + }).View(), }}) { nn := notifies.drain(1) @@ -1427,7 +1431,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) cc().send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + }).View(), }}) }, wantState: ipn.NeedsLogin, @@ -1550,7 +1556,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { mustDo2(t)(lb.EditPrefs(connect)) cc().authenticated(node1) cc().send(sendOpt{nm: &netmap.NetworkMap{ - Expiry: time.Now().Add(-time.Minute), + SelfNode: (&tailcfg.Node{ + KeyExpiry: time.Now().Add(-time.Minute), + }).View(), }}) }, // Even with seamless, if the key we are using expires, we want to disconnect: @@ -1725,7 +1733,6 @@ func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *net return &netmap.NetworkMap{ SelfNode: self, - Name: self.Name(), Domain: domain, Peers: peers, UserProfiles: users, diff --git a/net/tsdial/dnsmap.go b/net/tsdial/dnsmap.go index 2ef1cb1f171c0..37fedd14c899d 100644 --- a/net/tsdial/dnsmap.go +++ b/net/tsdial/dnsmap.go @@ -36,11 +36,11 @@ func dnsMapFromNetworkMap(nm *netmap.NetworkMap) dnsMap { suffix := nm.MagicDNSSuffix() have4 := false addrs := nm.GetAddresses() - if nm.Name != "" && addrs.Len() > 0 { + if name := nm.SelfName(); name != "" && addrs.Len() > 0 { ip := addrs.At(0).Addr() - ret[canonMapKey(nm.Name)] = ip - if dnsname.HasSuffix(nm.Name, suffix) { - ret[canonMapKey(dnsname.TrimSuffix(nm.Name, suffix))] = ip + ret[canonMapKey(name)] = ip + if dnsname.HasSuffix(name, suffix) { + ret[canonMapKey(dnsname.TrimSuffix(name, suffix))] = ip } for _, p := range addrs.All() { if p.Addr().Is4() { diff --git a/net/tsdial/dnsmap_test.go b/net/tsdial/dnsmap_test.go index 43461a135e1c5..41a957f186f4a 100644 --- a/net/tsdial/dnsmap_test.go +++ b/net/tsdial/dnsmap_test.go @@ -31,8 +31,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -47,8 +47,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self_and_peers", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100.102.103.104/32"), pfx("100::123/128"), @@ -82,8 +82,8 @@ func TestDNSMapFromNetworkMap(t *testing.T) { { name: "self_has_v6_only", nm: &netmap.NetworkMap{ - Name: "foo.tailnet", SelfNode: (&tailcfg.Node{ + Name: "foo.tailnet.", Addresses: []netip.Prefix{ pfx("100::123/128"), }, diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 0a2f3ea71fd09..c54562f4d5b53 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -29,10 +29,6 @@ type NetworkMap struct { SelfNode tailcfg.NodeView AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap NodeKey key.NodePublic - Expiry time.Time - // Name is the DNS name assigned to this node. - // It is the MapResponse.Node.Name value and ends with a period. - Name string MachineKey key.MachinePublic @@ -235,10 +231,25 @@ func MagicDNSSuffixOfNodeName(nodeName string) string { // // It will neither start nor end with a period. func (nm *NetworkMap) MagicDNSSuffix() string { - if nm == nil { + return MagicDNSSuffixOfNodeName(nm.SelfName()) +} + +// SelfName returns nm.SelfNode.Name, or the empty string +// if nm is nil or nm.SelfNode is invalid. +func (nm *NetworkMap) SelfName() string { + if nm == nil || !nm.SelfNode.Valid() { return "" } - return MagicDNSSuffixOfNodeName(nm.Name) + return nm.SelfNode.Name() +} + +// SelfKeyExpiry returns nm.SelfNode.KeyExpiry, or the zero +// value if nil or nm.SelfNode is invalid. +func (nm *NetworkMap) SelfKeyExpiry() time.Time { + if nm == nil || !nm.SelfNode.Valid() { + return time.Time{} + } + return nm.SelfNode.KeyExpiry() } // DomainName returns the name of the NetworkMap's diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index f9d76105298cf..2a20b3cf602c3 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2200,9 +2200,9 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2264,9 +2264,9 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { defer m.Close() nm := &netmap.NetworkMap{ - Name: "ts", NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ @@ -2400,9 +2400,9 @@ func TestIsWireGuardOnlyPickEndpointByPing(t *testing.T) { wgEpV6 := netip.MustParseAddrPort(v6.LocalAddr().String()) nm := &netmap.NetworkMap{ - Name: "ts", NodeKey: m.privateKey.Public(), SelfNode: (&tailcfg.Node{ + Name: "ts.", Addresses: []netip.Prefix{tsaip}, }).View(), Peers: nodeViews([]*tailcfg.Node{ From 04a9d25a545824d499af9bcff967a235566c8389 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 18 Nov 2025 17:04:08 +0000 Subject: [PATCH 0714/1093] tka: mark young AUMs as active even if the chain is long Existing compaction logic seems to have had an assumption that markActiveChain would cover a longer part of the chain than markYoungAUMs. This prevented long, but fresh, chains, from being compacted correctly. Updates tailscale/corp#33537 Signed-off-by: Anton Tolchanov --- tka/tailchonk.go | 23 +++++++++++++---------- tka/tailchonk_test.go | 31 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 0b7191747f830..d92016c45d71f 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -668,7 +668,7 @@ const ( ) // markActiveChain marks AUMs in the active chain. -// All AUMs that are within minChain ancestors of head are +// All AUMs that are within minChain ancestors of head, or are marked as young, are // marked retainStateActive, and all remaining ancestors are // marked retainStateCandidate. // @@ -700,19 +700,22 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in // If we got this far, we have at least minChain AUMs stored, and minChain number // of ancestors have been marked for retention. We now continue to iterate backwards - // till we find an AUM which we can compact to (a Checkpoint AUM). + // till we find an AUM which we can compact to: either a Checkpoint AUM which is old + // enough, or the genesis AUM. for { h := next.Hash() verdict[h] |= retainStateActive + + parent, hasParent := next.Parent() + isYoung := verdict[h]&retainStateYoung != 0 + if next.MessageKind == AUMCheckpoint { lastActiveAncestor = h - break + if !isYoung || !hasParent { + break + } } - parent, hasParent := next.Parent() - if !hasParent { - return AUMHash{}, errors.New("reached genesis AUM without finding an appropriate lastActiveAncestor") - } if next, err = storage.AUM(parent); err != nil { return AUMHash{}, fmt.Errorf("searching for compaction target (%v): %w", parent, err) } @@ -917,12 +920,12 @@ func Compact(storage CompactableChonk, head AUMHash, opts CompactionOptions) (la verdict[h] = 0 } - if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil { - return AUMHash{}, fmt.Errorf("marking active chain: %w", err) - } if err := markYoungAUMs(storage, verdict, opts.MinAge); err != nil { return AUMHash{}, fmt.Errorf("marking young AUMs: %w", err) } + if lastActiveAncestor, err = markActiveChain(storage, verdict, opts.MinChain, head); err != nil { + return AUMHash{}, fmt.Errorf("marking active chain: %w", err) + } if err := markDescendantAUMs(storage, verdict); err != nil { return AUMHash{}, fmt.Errorf("marking descendant AUMs: %w", err) } diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 70b7dc9a72fbb..7125c99fefe36 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/crypto/blake2s" + "tailscale.com/types/key" "tailscale.com/util/must" ) @@ -601,3 +602,33 @@ func TestCompact(t *testing.T) { } } } + +func TestCompactLongButYoung(t *testing.T) { + ourPriv := key.NewNLPrivate() + ourKey := Key{Kind: Key25519, Public: ourPriv.Public().Verifier(), Votes: 1} + someOtherKey := Key{Kind: Key25519, Public: key.NewNLPrivate().Public().Verifier(), Votes: 1} + + storage := &Mem{} + auth, _, err := Create(storage, State{ + Keys: []Key{ourKey, someOtherKey}, + DisablementSecrets: [][]byte{DisablementKDF(bytes.Repeat([]byte{0xa5}, 32))}, + }, ourPriv) + if err != nil { + t.Fatalf("tka.Create() failed: %v", err) + } + + genesis := auth.Head() + + for range 100 { + upd := auth.NewUpdater(ourPriv) + must.Do(upd.RemoveKey(someOtherKey.MustID())) + must.Do(upd.AddKey(someOtherKey)) + aums := must.Get(upd.Finalize(storage)) + must.Do(auth.Inform(storage, aums)) + } + + lastActiveAncestor := must.Get(Compact(storage, auth.Head(), CompactionOptions{MinChain: 5, MinAge: time.Hour})) + if lastActiveAncestor != genesis { + t.Errorf("last active ancestor = %v, want %v", lastActiveAncestor, genesis) + } +} From d0daa5a398ec4a17499938c3c25ce1cf5058d1b9 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 17:12:05 +0000 Subject: [PATCH 0715/1093] tka: marshal AUMHash totext even if Tailnet Lock is omitted We use `tka.AUMHash` in `netmap.NetworkMap`, and we serialise it as JSON in the `/debug/netmap` C2N endpoint. If the binary omits Tailnet Lock support, the debug endpoint returns an error because it's unable to marshal the AUMHash. This patch adds a sentinel value so this marshalling works, and we can use the debug endpoint. Updates https://github.com/tailscale/tailscale/issues/17115 Signed-off-by: Alex Chan Change-Id: I51ec1491a74e9b9f49d1766abd89681049e09ce4 --- tka/disabled_stub.go | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go index 15bf12c333fc8..4c4afa3706d98 100644 --- a/tka/disabled_stub.go +++ b/tka/disabled_stub.go @@ -22,7 +22,24 @@ type Authority struct { func (*Authority) Head() AUMHash { return AUMHash{} } -func (AUMHash) MarshalText() ([]byte, error) { return nil, errNoTailnetLock } +// MarshalText returns a dummy value explaining that Tailnet Lock +// is not compiled in to this binary. +// +// We need to be able to marshal AUMHash to text because it's included +// in [netmap.NetworkMap], which gets serialised as JSON in the +// c2n /debug/netmap endpoint. +// +// We provide a basic marshaller so that endpoint works correctly +// with nodes that omit Tailnet Lock support, but we don't want the +// base32 dependency used for the regular marshaller, and we don't +// need unmarshalling support at time of writing (2025-11-18). +func (h AUMHash) MarshalText() ([]byte, error) { + return []byte(""), nil +} + +func (h *AUMHash) UnmarshalText(text []byte) error { + return errors.New("tailnet lock is not supported by this binary") +} type State struct{} @@ -128,12 +145,6 @@ type NodeKeySignature struct { type DeeplinkValidationResult struct { } -func (h *AUMHash) UnmarshalText(text []byte) error { - return errNoTailnetLock -} - -var errNoTailnetLock = errors.New("tailnet lock is not enabled") - func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) { return wrappedAuthKey, false, nil, nil } From da508c504de626e1dcd9a218bed6cfb758298ba6 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Mon, 17 Nov 2025 13:58:59 -0800 Subject: [PATCH 0716/1093] appc: add ippool type As part of the conn25 work we will want to be able to keep track of a pool of IP Addresses and know which have been used and which have not. Fixes tailscale/corp#34247 Signed-off-by: Fran Bull --- appc/ippool.go | 61 +++++++++++++++++++++++++++++++++++++++++++++ appc/ippool_test.go | 60 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 appc/ippool.go create mode 100644 appc/ippool_test.go diff --git a/appc/ippool.go b/appc/ippool.go new file mode 100644 index 0000000000000..a2e86a7c296a8 --- /dev/null +++ b/appc/ippool.go @@ -0,0 +1,61 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "errors" + "net/netip" + + "go4.org/netipx" +) + +// errPoolExhausted is returned when there are no more addresses to iterate over. +var errPoolExhausted = errors.New("ip pool exhausted") + +// ippool allows for iteration over all the addresses within a netipx.IPSet. +// netipx.IPSet has a Ranges call that returns the "minimum and sorted set of IP ranges that covers [the set]". +// netipx.IPRange is "an inclusive range of IP addresses from the same address family.". So we can iterate over +// all the addresses in the set by keeping a track of the last address we returned, calling Next on the last address +// to get the new one, and if we run off the edge of the current range, starting on the next one. +type ippool struct { + // ranges defines the addresses in the pool + ranges []netipx.IPRange + // last is internal tracking of which the last address provided was. + last netip.Addr + // rangeIdx is internal tracking of which netipx.IPRange from the IPSet we are currently on. + rangeIdx int +} + +func newIPPool(ipset *netipx.IPSet) *ippool { + if ipset == nil { + return &ippool{} + } + return &ippool{ranges: ipset.Ranges()} +} + +// next returns the next address from the set, or errPoolExhausted if we have +// iterated over the whole set. +func (ipp *ippool) next() (netip.Addr, error) { + if ipp.rangeIdx >= len(ipp.ranges) { + // ipset is empty or we have iterated off the end + return netip.Addr{}, errPoolExhausted + } + if !ipp.last.IsValid() { + // not initialized yet + ipp.last = ipp.ranges[0].From() + return ipp.last, nil + } + currRange := ipp.ranges[ipp.rangeIdx] + if ipp.last == currRange.To() { + // then we need to move to the next range + ipp.rangeIdx++ + if ipp.rangeIdx >= len(ipp.ranges) { + return netip.Addr{}, errPoolExhausted + } + ipp.last = ipp.ranges[ipp.rangeIdx].From() + return ipp.last, nil + } + ipp.last = ipp.last.Next() + return ipp.last, nil +} diff --git a/appc/ippool_test.go b/appc/ippool_test.go new file mode 100644 index 0000000000000..64b76738f661e --- /dev/null +++ b/appc/ippool_test.go @@ -0,0 +1,60 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "errors" + "net/netip" + "testing" + + "go4.org/netipx" + "tailscale.com/util/must" +) + +func TestNext(t *testing.T) { + a := ippool{} + _, err := a.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + + var isb netipx.IPSetBuilder + ipset := must.Get(isb.IPSet()) + b := newIPPool(ipset) + _, err = b.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("192.168.0.0"), netip.MustParseAddr("192.168.0.2"))) + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("200.0.0.0"), netip.MustParseAddr("200.0.0.0"))) + isb.AddRange(netipx.IPRangeFrom(netip.MustParseAddr("201.0.0.0"), netip.MustParseAddr("201.0.0.1"))) + ipset = must.Get(isb.IPSet()) + c := newIPPool(ipset) + expected := []string{ + "192.168.0.0", + "192.168.0.1", + "192.168.0.2", + "200.0.0.0", + "201.0.0.0", + "201.0.0.1", + } + for i, want := range expected { + addr, err := c.next() + if err != nil { + t.Fatal(err) + } + if addr != netip.MustParseAddr(want) { + t.Fatalf("next call %d want: %s, got: %v", i, want, addr) + } + } + _, err = c.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } + _, err = c.next() + if !errors.Is(err, errPoolExhausted) { + t.Fatalf("expected errPoolExhausted, got %v", err) + } +} From c09c95ef67d5fe9ff127cf2102f189e47e41b119 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 3 Nov 2025 16:41:37 -0800 Subject: [PATCH 0717/1093] types/key,wgengine/magicsock,control/controlclient,ipn: add debug disco key rotation Adds the ability to rotate discovery keys on running clients, needed for testing upcoming disco key distribution changes. Introduces key.DiscoKey, an atomic container for a disco private key, public key, and the public key's ShortString, replacing the prior separate atomic fields. magicsock.Conn has a new RotateDiscoKey method, and access to this is provided via localapi and a CLI debug command. Note that this implementation is primarily for testing as it stands, and regular use should likely introduce an additional mechanism that allows the old key to be used for some time, to provide a seamless key rotation rather than one that invalidates all sessions. Updates tailscale/corp#34037 Signed-off-by: James Tucker --- cmd/tailscale/cli/debug.go | 6 +++ control/controlclient/auto.go | 7 +++ control/controlclient/client.go | 6 +++ control/controlclient/direct.go | 16 ++++-- control/controlclient/direct_test.go | 26 +++++++++ ipn/ipnlocal/local.go | 24 +++++++++ ipn/ipnlocal/state_test.go | 5 ++ ipn/localapi/debug.go | 20 +++++++ wgengine/magicsock/disco_atomic.go | 58 ++++++++++++++++++++ wgengine/magicsock/disco_atomic_test.go | 70 +++++++++++++++++++++++++ wgengine/magicsock/endpoint.go | 4 +- wgengine/magicsock/endpoint_test.go | 25 +++++++-- wgengine/magicsock/magicsock.go | 63 ++++++++++++++-------- wgengine/magicsock/magicsock_test.go | 70 +++++++++++++++++++++++++ wgengine/magicsock/relaymanager.go | 4 +- wgengine/magicsock/relaymanager_test.go | 8 ++- 16 files changed, 375 insertions(+), 37 deletions(-) create mode 100644 wgengine/magicsock/disco_atomic.go create mode 100644 wgengine/magicsock/disco_atomic_test.go diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ffed51a63e112..2facd66ae0278 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -182,6 +182,12 @@ func debugCmd() *ffcli.Command { Exec: localAPIAction("rebind"), ShortHelp: "Force a magicsock rebind", }, + { + Name: "rotate-disco-key", + ShortUsage: "tailscale debug rotate-disco-key", + Exec: localAPIAction("rotate-disco-key"), + ShortHelp: "Rotate the discovery key", + }, { Name: "derp-set-on-demand", ShortUsage: "tailscale debug derp-set-on-demand", diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 3cbfe85818fbf..336a8d491bc9c 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -767,6 +767,13 @@ func (c *Auto) UpdateEndpoints(endpoints []tailcfg.Endpoint) { } } +// SetDiscoPublicKey sets the client's Disco public to key and sends the change +// to the control server. +func (c *Auto) SetDiscoPublicKey(key key.DiscoPublic) { + c.direct.SetDiscoPublicKey(key) + c.updateControl() +} + func (c *Auto) Shutdown() { c.mu.Lock() if c.closed { diff --git a/control/controlclient/client.go b/control/controlclient/client.go index d0aa129ae95b4..41b39622b0199 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -12,6 +12,7 @@ import ( "context" "tailscale.com/tailcfg" + "tailscale.com/types/key" ) // LoginFlags is a bitmask of options to change the behavior of Client.Login @@ -80,7 +81,12 @@ type Client interface { // TODO: a server-side change would let us simply upload this // in a separate http request. It has nothing to do with the rest of // the state machine. + // Note: the auto client uploads the new endpoints to control immediately. UpdateEndpoints(endpoints []tailcfg.Endpoint) + // SetDiscoPublicKey updates the disco public key that will be sent in + // future map requests. This should be called after rotating the discovery key. + // Note: the auto client uploads the new key to control immediately. + SetDiscoPublicKey(key.DiscoPublic) // ClientID returns the ClientID of a client. This ID is meant to // distinguish one client from another. ClientID() int64 diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 62bbb35861fd2..006a801eff505 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -74,7 +74,6 @@ type Direct struct { logf logger.Logf netMon *netmon.Monitor // non-nil health *health.Tracker - discoPubKey key.DiscoPublic busClient *eventbus.Client clientVersionPub *eventbus.Publisher[tailcfg.ClientVersion] autoUpdatePub *eventbus.Publisher[AutoUpdate] @@ -95,6 +94,7 @@ type Direct struct { mu syncs.Mutex // mutex guards the following fields serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic + discoPubKey key.DiscoPublic // protected by mu; can be updated via [SetDiscoPublicKey] sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. noiseClient *ts2021.Client // also protected by mu @@ -316,7 +316,6 @@ func NewDirect(opts Options) (*Direct, error) { logf: opts.Logf, persist: opts.Persist.View(), authKey: opts.AuthKey, - discoPubKey: opts.DiscoPublicKey, debugFlags: opts.DebugFlags, netMon: netMon, health: opts.HealthTracker, @@ -329,6 +328,7 @@ func NewDirect(opts Options) (*Direct, error) { dnsCache: dnsCache, dialPlan: opts.DialPlan, } + c.discoPubKey = opts.DiscoPublicKey c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) c.controlClientID = nextControlClientID.Add(1) @@ -853,6 +853,14 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } +// SetDiscoPublicKey updates the disco public key in local state. +// It does not implicitly trigger [SendUpdate]; callers should arrange for that. +func (c *Direct) SetDiscoPublicKey(key key.DiscoPublic) { + c.mu.Lock() + defer c.mu.Unlock() + c.discoPubKey = key +} + // ClientID returns the controlClientID of the controlClient. func (c *Direct) ClientID() int64 { return c.controlClientID @@ -902,6 +910,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap persist := c.persist serverURL := c.serverURL serverNoiseKey := c.serverNoiseKey + discoKey := c.discoPubKey hi := c.hostInfoLocked() backendLogID := hi.BackendLogID connectionHandleForTest := c.connectionHandleForTest @@ -945,11 +954,12 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap } nodeKey := persist.PublicNodeKey() + request := &tailcfg.MapRequest{ Version: tailcfg.CurrentCapabilityVersion, KeepAlive: true, NodeKey: nodeKey, - DiscoKey: c.discoPubKey, + DiscoKey: discoKey, Endpoints: eps, EndpointTypes: epTypes, Stream: isStreaming, diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index dd93dc7b33d61..4329fc878ceb3 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -20,6 +20,32 @@ import ( "tailscale.com/util/eventbus/eventbustest" ) +func TestSetDiscoPublicKey(t *testing.T) { + initialKey := key.NewDisco().Public() + + c := &Direct{ + discoPubKey: initialKey, + } + + c.mu.Lock() + if c.discoPubKey != initialKey { + t.Fatalf("initial disco key mismatch: got %v, want %v", c.discoPubKey, initialKey) + } + c.mu.Unlock() + + newKey := key.NewDisco().Public() + c.SetDiscoPublicKey(newKey) + + c.mu.Lock() + if c.discoPubKey != newKey { + t.Fatalf("disco key not updated: got %v, want %v", c.discoPubKey, newKey) + } + if c.discoPubKey == initialKey { + t.Fatal("disco key should have changed") + } + c.mu.Unlock() +} + func TestNewDirect(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 7eb673e6de056..0ff2993990b59 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6620,6 +6620,30 @@ func (b *LocalBackend) DebugReSTUN() error { return nil } +func (b *LocalBackend) DebugRotateDiscoKey() error { + if !buildfeatures.HasDebug { + return nil + } + + mc := b.MagicConn() + mc.RotateDiscoKey() + + newDiscoKey := mc.DiscoPublicKey() + + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.SetDiscoKey(newDiscoKey) + } + + b.mu.Lock() + cc := b.cc + b.mu.Unlock() + if cc != nil { + cc.SetDiscoPublicKey(newDiscoKey) + } + + return nil +} + func (b *LocalBackend) DebugPeerRelayServers() set.Set[netip.Addr] { return b.MagicConn().PeerRelays() } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index b7325e957be38..152b375b0f7b8 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -316,6 +316,11 @@ func (cc *mockControl) UpdateEndpoints(endpoints []tailcfg.Endpoint) { cc.called("UpdateEndpoints") } +func (cc *mockControl) SetDiscoPublicKey(key key.DiscoPublic) { + cc.logf("SetDiscoPublicKey: %v", key) + cc.called("SetDiscoPublicKey") +} + func (cc *mockControl) ClientID() int64 { return cc.controlClientID } diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index 8aca7f0093f7d..ae9cb01e02fe9 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -31,6 +31,7 @@ import ( func init() { Register("component-debug-logging", (*Handler).serveComponentDebugLogging) Register("debug", (*Handler).serveDebug) + Register("debug-rotate-disco-key", (*Handler).serveDebugRotateDiscoKey) Register("dev-set-state-store", (*Handler).serveDevSetStateStore) Register("debug-bus-events", (*Handler).serveDebugBusEvents) Register("debug-bus-graph", (*Handler).serveEventBusGraph) @@ -232,6 +233,8 @@ func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { if err == nil { return } + case "rotate-disco-key": + err = h.b.DebugRotateDiscoKey() case "": err = fmt.Errorf("missing parameter 'action'") default: @@ -473,3 +476,20 @@ func (h *Handler) serveDebugOptionalFeatures(w http.ResponseWriter, r *http.Requ w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(of) } + +func (h *Handler) serveDebugRotateDiscoKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.DebugRotateDiscoKey(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} diff --git a/wgengine/magicsock/disco_atomic.go b/wgengine/magicsock/disco_atomic.go new file mode 100644 index 0000000000000..5b765fbc2c9a0 --- /dev/null +++ b/wgengine/magicsock/disco_atomic.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "sync/atomic" + + "tailscale.com/types/key" +) + +type discoKeyPair struct { + private key.DiscoPrivate + public key.DiscoPublic + short string // public.ShortString() +} + +// discoAtomic is an atomic container for a disco private key, public key, and +// the public key's ShortString. The private and public keys are always kept +// synchronized. +// +// The zero value is not ready for use. Use [Set] to provide a usable value. +type discoAtomic struct { + pair atomic.Pointer[discoKeyPair] +} + +// Pair returns the private and public keys together atomically. +// Code that needs both the private and public keys synchronized should +// use Pair instead of calling Private and Public separately. +func (dk *discoAtomic) Pair() (key.DiscoPrivate, key.DiscoPublic) { + p := dk.pair.Load() + return p.private, p.public +} + +// Private returns the private key. +func (dk *discoAtomic) Private() key.DiscoPrivate { + return dk.pair.Load().private +} + +// Public returns the public key. +func (dk *discoAtomic) Public() key.DiscoPublic { + return dk.pair.Load().public +} + +// Short returns the short string of the public key (see [DiscoPublic.ShortString]). +func (dk *discoAtomic) Short() string { + return dk.pair.Load().short +} + +// Set updates the private key (and the cached public key and short string). +func (dk *discoAtomic) Set(private key.DiscoPrivate) { + public := private.Public() + dk.pair.Store(&discoKeyPair{ + private: private, + public: public, + short: public.ShortString(), + }) +} diff --git a/wgengine/magicsock/disco_atomic_test.go b/wgengine/magicsock/disco_atomic_test.go new file mode 100644 index 0000000000000..a1de9b843379f --- /dev/null +++ b/wgengine/magicsock/disco_atomic_test.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package magicsock + +import ( + "testing" + + "tailscale.com/types/key" +) + +func TestDiscoAtomic(t *testing.T) { + var dk discoAtomic + dk.Set(key.NewDisco()) + + private := dk.Private() + public := dk.Public() + short := dk.Short() + + if private.IsZero() { + t.Fatal("DiscoKey private key should not be zero") + } + if public.IsZero() { + t.Fatal("DiscoKey public key should not be zero") + } + if short == "" { + t.Fatal("DiscoKey short string should not be empty") + } + + if public != private.Public() { + t.Fatal("DiscoKey public key doesn't match private key") + } + if short != public.ShortString() { + t.Fatal("DiscoKey short string doesn't match public key") + } + + gotPrivate, gotPublic := dk.Pair() + if !gotPrivate.Equal(private) { + t.Fatal("Pair() returned different private key") + } + if gotPublic != public { + t.Fatal("Pair() returned different public key") + } +} + +func TestDiscoAtomicSet(t *testing.T) { + var dk discoAtomic + dk.Set(key.NewDisco()) + oldPrivate := dk.Private() + oldPublic := dk.Public() + + newPrivate := key.NewDisco() + dk.Set(newPrivate) + + currentPrivate := dk.Private() + currentPublic := dk.Public() + + if currentPrivate.Equal(oldPrivate) { + t.Fatal("DiscoKey private key should have changed after Set") + } + if currentPublic == oldPublic { + t.Fatal("DiscoKey public key should have changed after Set") + } + if !currentPrivate.Equal(newPrivate) { + t.Fatal("DiscoKey private key doesn't match the set key") + } + if currentPublic != newPrivate.Public() { + t.Fatal("DiscoKey public key doesn't match derived from set private key") + } +} diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index c2e5dcca37417..eda589e14b1b6 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -697,7 +697,7 @@ func (de *endpoint) maybeProbeUDPLifetimeLocked() (afterInactivityFor time.Durat // shuffling probing probability where the local node ends up with a large // key value lexicographically relative to the other nodes it tends to // communicate with. If de's disco key changes, the cycle will reset. - if de.c.discoPublic.Compare(epDisco.key) >= 0 { + if de.c.discoAtomic.Public().Compare(epDisco.key) >= 0 { // lower disco pub key node probes higher return afterInactivityFor, false } @@ -1739,7 +1739,7 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd } if sp.purpose != pingHeartbeat && sp.purpose != pingHeartbeatForUDPLifetime { - de.c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pktlen=%v pong.src=%v%v", de.c.discoShort, de.discoShort(), de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), pktLen, m.Src, logger.ArgWriter(func(bw *bufio.Writer) { + de.c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pktlen=%v pong.src=%v%v", de.c.discoAtomic.Short(), de.discoShort(), de.publicKey.ShortString(), src, m.TxID[:6], latency.Round(time.Millisecond), pktLen, m.Src, logger.ArgWriter(func(bw *bufio.Writer) { if sp.to != src { fmt.Fprintf(bw, " ping.to=%v", sp.to) } diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index df1c9340657e4..f1dab924f5d3b 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -146,15 +146,22 @@ func TestProbeUDPLifetimeConfig_Valid(t *testing.T) { } func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { + var lowerPriv, higherPriv key.DiscoPrivate var lower, higher key.DiscoPublic - a := key.NewDisco().Public() - b := key.NewDisco().Public() + privA := key.NewDisco() + privB := key.NewDisco() + a := privA.Public() + b := privB.Public() if a.String() < b.String() { lower = a higher = b + lowerPriv = privA + higherPriv = privB } else { lower = b higher = a + lowerPriv = privB + higherPriv = privA } addr := addrQuality{epAddr: epAddr{ap: netip.MustParseAddrPort("1.1.1.1:1")}} newProbeUDPLifetime := func() *probeUDPLifetime { @@ -281,10 +288,18 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + c := &Conn{} + if tt.localDisco.IsZero() { + c.discoAtomic.Set(key.NewDisco()) + } else if tt.localDisco.Compare(lower) == 0 { + c.discoAtomic.Set(lowerPriv) + } else if tt.localDisco.Compare(higher) == 0 { + c.discoAtomic.Set(higherPriv) + } else { + t.Fatalf("unexpected localDisco value") + } de := &endpoint{ - c: &Conn{ - discoPublic: tt.localDisco, - }, + c: c, bestAddr: tt.bestAddr, } if tt.remoteDisco != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f610d6adbf01e..064838a2d540c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -273,14 +273,8 @@ type Conn struct { // channel operations and goroutine creation. hasPeerRelayServers atomic.Bool - // discoPrivate is the private naclbox key used for active - // discovery traffic. It is always present, and immutable. - discoPrivate key.DiscoPrivate - // public of discoPrivate. It is always present and immutable. - discoPublic key.DiscoPublic - // ShortString of discoPublic (to save logging work later). It is always - // present and immutable. - discoShort string + // discoAtomic is the current disco private and public keypair for this conn. + discoAtomic discoAtomic // ============================================================ // mu guards all following fields; see userspaceEngine lock @@ -603,11 +597,9 @@ func newConn(logf logger.Logf) *Conn { peerLastDerp: make(map[key.NodePublic]int), peerMap: newPeerMap(), discoInfo: make(map[key.DiscoPublic]*discoInfo), - discoPrivate: discoPrivate, - discoPublic: discoPrivate.Public(), cloudInfo: newCloudInfo(logf), } - c.discoShort = c.discoPublic.ShortString() + c.discoAtomic.Set(discoPrivate) c.bind = &connBind{Conn: c, closed: true} c.receiveBatchPool = sync.Pool{New: func() any { msgs := make([]ipv6.Message, c.bind.BatchSize()) @@ -635,7 +627,7 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { // now versus taking a network round-trip through DERP. selfNodeKey := c.publicKeyAtomic.Load() if selfNodeKey.Compare(allocResp.ReqRxFromNodeKey) == 0 && - allocResp.ReqRxFromDiscoKey.Compare(c.discoPublic) == 0 { + allocResp.ReqRxFromDiscoKey.Compare(c.discoAtomic.Public()) == 0 { c.relayManager.handleRxDiscoMsg(c, allocResp.Message, selfNodeKey, allocResp.ReqRxFromDiscoKey, epAddr{}) metricLocalDiscoAllocUDPRelayEndpointResponse.Add(1) } @@ -765,7 +757,7 @@ func NewConn(opts Options) (*Conn, error) { c.logf("[v1] couldn't create raw v6 disco listener, using regular listener instead: %v", err) } - c.logf("magicsock: disco key = %v", c.discoShort) + c.logf("magicsock: disco key = %v", c.discoAtomic.Short()) return c, nil } @@ -1244,7 +1236,32 @@ func (c *Conn) GetEndpointChanges(peer tailcfg.NodeView) ([]EndpointChange, erro // DiscoPublicKey returns the discovery public key. func (c *Conn) DiscoPublicKey() key.DiscoPublic { - return c.discoPublic + return c.discoAtomic.Public() +} + +// RotateDiscoKey generates a new discovery key pair and updates the connection +// to use it. This invalidates all existing disco sessions and will cause peers +// to re-establish discovery sessions with the new key. +// +// This is primarily for debugging and testing purposes, a future enhancement +// should provide a mechanism for seamless rotation by supporting short term use +// of the old key. +func (c *Conn) RotateDiscoKey() { + oldShort := c.discoAtomic.Short() + newPrivate := key.NewDisco() + + c.mu.Lock() + c.discoAtomic.Set(newPrivate) + newShort := c.discoAtomic.Short() + c.discoInfo = make(map[key.DiscoPublic]*discoInfo) + connCtx := c.connCtx + c.mu.Unlock() + + c.logf("magicsock: rotated disco key from %v to %v", oldShort, newShort) + + if connCtx != nil { + c.ReSTUN("disco-key-rotation") + } } // determineEndpoints returns the machine's endpoint addresses. It does a STUN @@ -1914,7 +1931,7 @@ func (c *Conn) sendDiscoAllocateUDPRelayEndpointRequest(dst epAddr, dstKey key.N if isDERP && dstKey.Compare(selfNodeKey) == 0 { c.allocRelayEndpointPub.Publish(UDPRelayAllocReq{ RxFromNodeKey: selfNodeKey, - RxFromDiscoKey: c.discoPublic, + RxFromDiscoKey: c.discoAtomic.Public(), Message: allocReq, }) metricLocalDiscoAllocUDPRelayEndpointRequest.Add(1) @@ -1985,7 +2002,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. } } pkt = append(pkt, disco.Magic...) - pkt = c.discoPublic.AppendTo(pkt) + pkt = c.discoAtomic.Public().AppendTo(pkt) if isDERP { metricSendDiscoDERP.Add(1) @@ -2003,7 +2020,7 @@ func (c *Conn) sendDiscoMessage(dst epAddr, dstKey key.NodePublic, dstDisco key. if !dstKey.IsZero() { node = dstKey.ShortString() } - c.dlogf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v len %v\n", c.discoShort, dstDisco.ShortString(), node, derpStr(dst.String()), disco.MessageSummary(m), len(pkt)) + c.dlogf("[v1] magicsock: disco: %v->%v (%v, %v) sent %v len %v\n", c.discoAtomic.Short(), dstDisco.ShortString(), node, derpStr(dst.String()), disco.MessageSummary(m), len(pkt)) } if isDERP { metricSentDiscoDERP.Add(1) @@ -2352,13 +2369,13 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake } if isVia { c.dlogf("[v1] magicsock: disco: %v<-%v via %v (%v, %v) got call-me-maybe-via, %d endpoints", - c.discoShort, epDisco.short, via.ServerDisco.ShortString(), + c.discoAtomic.Short(), epDisco.short, via.ServerDisco.ShortString(), ep.publicKey.ShortString(), derpStr(src.String()), len(via.AddrPorts)) c.relayManager.handleCallMeMaybeVia(ep, lastBest, lastBestIsTrusted, via) } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), len(cmm.MyNumber)) go ep.handleCallMeMaybe(cmm) @@ -2404,7 +2421,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake if isResp { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s, %d endpoints", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, len(resp.AddrPorts)) @@ -2418,7 +2435,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake return } else { c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got %s disco[0]=%v disco[1]=%v", - c.discoShort, epDisco.short, + c.discoAtomic.Short(), epDisco.short, ep.publicKey.ShortString(), derpStr(src.String()), msgType, req.ClientDisco[0].ShortString(), req.ClientDisco[1].ShortString()) @@ -2583,7 +2600,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN if numNodes > 1 { pingNodeSrcStr = "[one-of-multi]" } - c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x padding=%v", c.discoShort, di.discoShort, pingNodeSrcStr, src, dm.TxID[:6], dm.Padding) + c.dlogf("[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x padding=%v", c.discoAtomic.Short(), di.discoShort, pingNodeSrcStr, src, dm.TxID[:6], dm.Padding) } ipDst := src @@ -2656,7 +2673,7 @@ func (c *Conn) discoInfoForKnownPeerLocked(k key.DiscoPublic) *discoInfo { di = &discoInfo{ discoKey: k, discoShort: k.ShortString(), - sharedKey: c.discoPrivate.Shared(k), + sharedKey: c.discoAtomic.Private().Shared(k), } c.discoInfo[k] = di } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 2a20b3cf602c3..7ae422906b84c 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -4235,3 +4235,73 @@ func Test_lazyEndpoint_FromPeer(t *testing.T) { }) } } + +func TestRotateDiscoKey(t *testing.T) { + c := newConn(t.Logf) + + oldPrivate, oldPublic := c.discoAtomic.Pair() + oldShort := c.discoAtomic.Short() + + if oldPublic != oldPrivate.Public() { + t.Fatalf("old public key doesn't match old private key") + } + if oldShort != oldPublic.ShortString() { + t.Fatalf("old short string doesn't match old public key") + } + + testDiscoKey := key.NewDisco().Public() + c.mu.Lock() + c.discoInfo[testDiscoKey] = &discoInfo{ + discoKey: testDiscoKey, + discoShort: testDiscoKey.ShortString(), + } + if len(c.discoInfo) != 1 { + t.Fatalf("expected 1 discoInfo entry, got %d", len(c.discoInfo)) + } + c.mu.Unlock() + + c.RotateDiscoKey() + + newPrivate, newPublic := c.discoAtomic.Pair() + newShort := c.discoAtomic.Short() + + if newPublic.Compare(oldPublic) == 0 { + t.Fatalf("disco key didn't change after rotation") + } + if newShort == oldShort { + t.Fatalf("short string didn't change after rotation") + } + + if newPublic != newPrivate.Public() { + t.Fatalf("new public key doesn't match new private key") + } + if newShort != newPublic.ShortString() { + t.Fatalf("new short string doesn't match new public key") + } + + c.mu.Lock() + if len(c.discoInfo) != 0 { + t.Fatalf("expected discoInfo to be cleared, got %d entries", len(c.discoInfo)) + } + c.mu.Unlock() +} + +func TestRotateDiscoKeyMultipleTimes(t *testing.T) { + c := newConn(t.Logf) + + keys := make([]key.DiscoPublic, 0, 5) + keys = append(keys, c.discoAtomic.Public()) + + for i := 0; i < 4; i++ { + c.RotateDiscoKey() + newKey := c.discoAtomic.Public() + + for j, oldKey := range keys { + if newKey.Compare(oldKey) == 0 { + t.Fatalf("rotation %d produced same key as rotation %d", i+1, j) + } + } + + keys = append(keys, newKey) + } +} diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 2f93f1085168a..69831a4df19f8 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -361,7 +361,7 @@ func (r *relayManager) ensureDiscoInfoFor(work *relayHandshakeWork) { di.di = &discoInfo{ discoKey: work.se.ServerDisco, discoShort: work.se.ServerDisco.ShortString(), - sharedKey: work.wlb.ep.c.discoPrivate.Shared(work.se.ServerDisco), + sharedKey: work.wlb.ep.c.discoAtomic.Private().Shared(work.se.ServerDisco), } } } @@ -1031,7 +1031,7 @@ func (r *relayManager) allocateAllServersRunLoop(wlb endpointWithLastBest) { if remoteDisco == nil { return } - discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoPublic, remoteDisco.key) + discoKeys := key.NewSortedPairOfDiscoPublic(wlb.ep.c.discoAtomic.Public(), remoteDisco.key) for _, v := range r.serversByNodeKey { byDiscoKeys, ok := r.allocWorkByDiscoKeysByServerNodeKey[v.nodeKey] if !ok { diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index d400818394c47..e8fddfd91b46e 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -22,11 +22,15 @@ func TestRelayManagerInitAndIdle(t *testing.T) { <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleCallMeMaybeVia(&endpoint{c: &Conn{discoPrivate: key.NewDisco()}}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) + c1 := &Conn{} + c1.discoAtomic.Set(key.NewDisco()) + rm.handleCallMeMaybeVia(&endpoint{c: c1}, addrQuality{}, false, &disco.CallMeMaybeVia{UDPRelayEndpoint: disco.UDPRelayEndpoint{ServerDisco: key.NewDisco().Public()}}) <-rm.runLoopStoppedCh rm = relayManager{} - rm.handleRxDiscoMsg(&Conn{discoPrivate: key.NewDisco()}, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) + c2 := &Conn{} + c2.discoAtomic.Set(key.NewDisco()) + rm.handleRxDiscoMsg(c2, &disco.BindUDPRelayEndpointChallenge{}, key.NodePublic{}, key.DiscoPublic{}, epAddr{}) <-rm.runLoopStoppedCh rm = relayManager{} From 3b865d7c33b1e945e9122dbe6f4eeff696a84e0a Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 18 Nov 2025 14:16:27 -0800 Subject: [PATCH 0718/1093] cmd/netlogfmt: support resolving IP addresses to synonymous labels (#17955) We now embed node information into network flow logs. By default, netlogfmt still prints out using Tailscale IP addresses. Support a "--resolve-addrs=TYPE" flag that can be used to specify resolving IP addresses as node IDs, hostnames, users, or tags. Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/netlogfmt/main.go | 149 ++++++++++++++++++++++++------------------ 1 file changed, 87 insertions(+), 62 deletions(-) diff --git a/cmd/netlogfmt/main.go b/cmd/netlogfmt/main.go index 65e87098fec5e..b8aba4aaa6196 100644 --- a/cmd/netlogfmt/main.go +++ b/cmd/netlogfmt/main.go @@ -44,25 +44,51 @@ import ( "github.com/dsnet/try" jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/bools" "tailscale.com/types/logid" "tailscale.com/types/netlogtype" "tailscale.com/util/must" ) var ( - resolveNames = flag.Bool("resolve-names", false, "convert tailscale IP addresses to hostnames; must also specify --api-key and --tailnet-id") - apiKey = flag.String("api-key", "", "API key to query the Tailscale API with; see https://login.tailscale.com/admin/settings/keys") - tailnetName = flag.String("tailnet-name", "", "tailnet domain name to lookup devices in; see https://login.tailscale.com/admin/settings/general") + resolveNames = flag.Bool("resolve-names", false, "This is equivalent to specifying \"--resolve-addrs=name\".") + resolveAddrs = flag.String("resolve-addrs", "", "Resolve each tailscale IP address as a node ID, name, or user.\n"+ + "If network flow logs do not support embedded node information,\n"+ + "then --api-key and --tailnet-name must also be provided.\n"+ + "Valid values include \"nodeId\", \"name\", or \"user\".") + apiKey = flag.String("api-key", "", "The API key to query the Tailscale API with.\nSee https://login.tailscale.com/admin/settings/keys") + tailnetName = flag.String("tailnet-name", "", "The Tailnet name to lookup nodes within.\nSee https://login.tailscale.com/admin/settings/general") ) -var namesByAddr map[netip.Addr]string +var ( + tailnetNodesByAddr map[netip.Addr]netlogtype.Node + tailnetNodesByID map[tailcfg.StableNodeID]netlogtype.Node +) func main() { flag.Parse() if *resolveNames { - namesByAddr = mustMakeNamesByAddr() + *resolveAddrs = "name" + } + *resolveAddrs = strings.ToLower(*resolveAddrs) // make case-insensitive + *resolveAddrs = strings.TrimSuffix(*resolveAddrs, "s") // allow plural form + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, " ", "") // ignore spaces + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "-", "") // ignore dashes + *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "_", "") // ignore underscores + switch *resolveAddrs { + case "id", "nodeid": + *resolveAddrs = "nodeid" + case "name", "hostname": + *resolveAddrs = "name" + case "user", "tag", "usertag", "taguser": + *resolveAddrs = "user" // tag resolution is implied + default: + log.Fatalf("--resolve-addrs must be \"nodeId\", \"name\", or \"user\"") } + mustLoadTailnetNodes() + // The logic handles a stream of arbitrary JSON. // So long as a JSON object seems like a network log message, // then this will unmarshal and print it. @@ -103,7 +129,7 @@ func processArray(dec *jsontext.Decoder) { func processObject(dec *jsontext.Decoder) { var hasTraffic bool - var rawMsg []byte + var rawMsg jsontext.Value try.E1(dec.ReadToken()) // parse '{' for dec.PeekKind() != '}' { // Capture any members that could belong to a network log message. @@ -111,13 +137,13 @@ func processObject(dec *jsontext.Decoder) { case "virtualTraffic", "subnetTraffic", "exitTraffic", "physicalTraffic": hasTraffic = true fallthrough - case "logtail", "nodeId", "logged", "start", "end": + case "logtail", "nodeId", "logged", "srcNode", "dstNodes", "start", "end": if len(rawMsg) == 0 { rawMsg = append(rawMsg, '{') } else { rawMsg = append(rawMsg[:len(rawMsg)-1], ',') } - rawMsg = append(append(append(rawMsg, '"'), name.String()...), '"') + rawMsg, _ = jsontext.AppendQuote(rawMsg, name.String()) rawMsg = append(rawMsg, ':') rawMsg = append(rawMsg, try.E1(dec.ReadValue())...) rawMsg = append(rawMsg, '}') @@ -145,6 +171,32 @@ type message struct { } func printMessage(msg message) { + var nodesByAddr map[netip.Addr]netlogtype.Node + var tailnetDNS string // e.g., ".acme-corp.ts.net" + if *resolveAddrs != "" { + nodesByAddr = make(map[netip.Addr]netlogtype.Node) + insertNode := func(node netlogtype.Node) { + for _, addr := range node.Addresses { + nodesByAddr[addr] = node + } + } + for _, node := range msg.DstNodes { + insertNode(node) + } + insertNode(msg.SrcNode) + + // Derive the Tailnet DNS of the self node. + detectTailnetDNS := func(nodeName string) { + if prefix, ok := strings.CutSuffix(nodeName, ".ts.net"); ok { + if i := strings.LastIndexByte(prefix, '.'); i > 0 { + tailnetDNS = nodeName[i:] + } + } + } + detectTailnetDNS(msg.SrcNode.Name) + detectTailnetDNS(tailnetNodesByID[msg.NodeID].Name) + } + // Construct a table of network traffic per connection. rows := [][7]string{{3: "Tx[P/s]", 4: "Tx[B/s]", 5: "Rx[P/s]", 6: "Rx[B/s]"}} duration := msg.End.Sub(msg.Start) @@ -175,16 +227,25 @@ func printMessage(msg message) { if !a.IsValid() { return "" } - if name, ok := namesByAddr[a.Addr()]; ok { - if a.Port() == 0 { - return name + name := a.Addr().String() + node, ok := tailnetNodesByAddr[a.Addr()] + if !ok { + node, ok = nodesByAddr[a.Addr()] + } + if ok { + switch *resolveAddrs { + case "nodeid": + name = cmp.Or(string(node.NodeID), name) + case "name": + name = cmp.Or(strings.TrimSuffix(string(node.Name), tailnetDNS), name) + case "user": + name = cmp.Or(bools.IfElse(len(node.Tags) > 0, fmt.Sprint(node.Tags), node.User), name) } - return name + ":" + strconv.Itoa(int(a.Port())) } - if a.Port() == 0 { - return a.Addr().String() + if a.Port() != 0 { + return name + ":" + strconv.Itoa(int(a.Port())) } - return a.String() + return name } for _, cc := range traffic { row := [7]string{ @@ -279,8 +340,10 @@ func printMessage(msg message) { } } -func mustMakeNamesByAddr() map[netip.Addr]string { +func mustLoadTailnetNodes() { switch { + case *apiKey == "" && *tailnetName == "": + return // rely on embedded node information in the logs themselves case *apiKey == "": log.Fatalf("--api-key must be specified with --resolve-names") case *tailnetName == "": @@ -300,57 +363,19 @@ func mustMakeNamesByAddr() map[netip.Addr]string { // Unmarshal the API response. var m struct { - Devices []struct { - Name string `json:"name"` - Addrs []netip.Addr `json:"addresses"` - } `json:"devices"` + Devices []netlogtype.Node `json:"devices"` } must.Do(json.Unmarshal(b, &m)) - // Construct a unique mapping of Tailscale IP addresses to hostnames. - // For brevity, we start with the first segment of the name and - // use more segments until we find the shortest prefix that is unique - // for all names in the tailnet. - seen := make(map[string]bool) - namesByAddr := make(map[netip.Addr]string) -retry: - for i := range 10 { - clear(seen) - clear(namesByAddr) - for _, d := range m.Devices { - name := fieldPrefix(d.Name, i) - if seen[name] { - continue retry - } - seen[name] = true - for _, a := range d.Addrs { - namesByAddr[a] = name - } - } - return namesByAddr - } - panic("unable to produce unique mapping of address to names") -} - -// fieldPrefix returns the first n number of dot-separated segments. -// -// Example: -// -// fieldPrefix("foo.bar.baz", 0) returns "" -// fieldPrefix("foo.bar.baz", 1) returns "foo" -// fieldPrefix("foo.bar.baz", 2) returns "foo.bar" -// fieldPrefix("foo.bar.baz", 3) returns "foo.bar.baz" -// fieldPrefix("foo.bar.baz", 4) returns "foo.bar.baz" -func fieldPrefix(s string, n int) string { - s0 := s - for i := 0; i < n && len(s) > 0; i++ { - if j := strings.IndexByte(s, '.'); j >= 0 { - s = s[j+1:] - } else { - s = "" + // Construct a mapping of Tailscale IP addresses to node information. + tailnetNodesByAddr = make(map[netip.Addr]netlogtype.Node) + tailnetNodesByID = make(map[tailcfg.StableNodeID]netlogtype.Node) + for _, node := range m.Devices { + for _, addr := range node.Addresses { + tailnetNodesByAddr[addr] = node } + tailnetNodesByID[node.NodeID] = node } - return strings.TrimSuffix(s0[:len(s0)-len(s)], ".") } func appendRepeatByte(b []byte, c byte, n int) []byte { From 5b0c57f497ffe1c83dc2e4c7026541264ecd0f8a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 14:35:02 -0800 Subject: [PATCH 0719/1093] tailcfg: add some omitzero, adjust some omitempty to omitzero Updates tailscale/corp#25406 Change-Id: I7832dbe3dce3774bcc831e3111feb75bcc9e021d Signed-off-by: Brad Fitzpatrick --- tailcfg/tailcfg.go | 228 ++++++++++++++++++++++----------------------- 1 file changed, 114 insertions(+), 114 deletions(-) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 346957803d235..41e0a0b284c44 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -255,9 +255,9 @@ func (u StableNodeID) IsZero() bool { // have a general gmail address login associated with the user. type User struct { ID UserID - DisplayName string // if non-empty overrides Login field - ProfilePicURL string // if non-empty overrides Login field - Created time.Time + DisplayName string // if non-empty overrides Login field + ProfilePicURL string `json:",omitzero"` // if non-empty overrides Login field + Created time.Time `json:",omitzero"` } // Login is a user from a specific identity provider, not associated with any @@ -268,7 +268,7 @@ type Login struct { Provider string // "google", "github", "okta_foo", etc. LoginName string // an email address or "email-ish" string (like alice@github) DisplayName string // from the IdP - ProfilePicURL string // from the IdP + ProfilePicURL string `json:",omitzero"` // from the IdP } // A UserProfile is display-friendly data for a [User]. @@ -278,7 +278,7 @@ type UserProfile struct { ID UserID LoginName string // "alice@smith.com"; for display purposes only (provider is not listed) DisplayName string // "Alice Smith" - ProfilePicURL string `json:",omitempty"` + ProfilePicURL string `json:",omitzero"` } func (p *UserProfile) Equal(p2 *UserProfile) bool { @@ -345,13 +345,13 @@ type Node struct { User UserID // Sharer, if non-zero, is the user who shared this node, if different than User. - Sharer UserID `json:",omitempty"` + Sharer UserID `json:",omitzero"` Key key.NodePublic - KeyExpiry time.Time // the zero value if this node does not expire + KeyExpiry time.Time `json:",omitzero"` // the zero value if this node does not expire KeySignature tkatype.MarshaledSignature `json:",omitempty"` - Machine key.MachinePublic - DiscoKey key.DiscoPublic + Machine key.MachinePublic `json:",omitzero"` + DiscoKey key.DiscoPublic `json:",omitzero"` // Addresses are the IP addresses of this Node directly. Addresses []netip.Prefix @@ -361,7 +361,7 @@ type Node struct { // As of CapabilityVersion 112, this may be nil (null or undefined) on the wire // to mean the same as Addresses. Internally, it is always filled in with // its possibly-implicit value. - AllowedIPs []netip.Prefix + AllowedIPs []netip.Prefix `json:",omitzero"` // _not_ omitempty; only nil is special Endpoints []netip.AddrPort `json:",omitempty"` // IP+port (public via STUN, and local LANs) @@ -375,18 +375,18 @@ type Node struct { // this field. See tailscale/tailscale#14636. Do not use this field in code // other than in the upgradeNode func, which canonicalizes it to HomeDERP // if it arrives as a LegacyDERPString string on the wire. - LegacyDERPString string `json:"DERP,omitempty"` // DERP-in-IP:port ("127.3.3.40:N") endpoint + LegacyDERPString string `json:"DERP,omitzero"` // DERP-in-IP:port ("127.3.3.40:N") endpoint // HomeDERP is the modern version of the DERP string field, with just an // integer. The client advertises support for this as of capver 111. // // HomeDERP may be zero if not (yet) known, but ideally always be non-zero // for magicsock connectivity to function normally. - HomeDERP int `json:",omitempty"` // DERP region ID of the node's home DERP + HomeDERP int `json:",omitzero"` // DERP region ID of the node's home DERP - Hostinfo HostinfoView - Created time.Time - Cap CapabilityVersion `json:",omitempty"` // if non-zero, the node's capability version; old servers might not send + Hostinfo HostinfoView `json:",omitzero"` + Created time.Time `json:",omitzero"` + Cap CapabilityVersion `json:",omitzero"` // if non-zero, the node's capability version; old servers might not send // Tags are the list of ACL tags applied to this node. // Tags take the form of `tag:` where value starts @@ -453,25 +453,25 @@ type Node struct { // it do anything. It is the tailscaled client's job to double-check the // MapResponse's PacketFilter to verify that its AllowedIPs will not be // accepted by the packet filter. - UnsignedPeerAPIOnly bool `json:",omitempty"` + UnsignedPeerAPIOnly bool `json:",omitzero"` // The following three computed fields hold the various names that can // be used for this node in UIs. They are populated from controlclient // (not from control) by calling node.InitDisplayNames. These can be // used directly or accessed via node.DisplayName or node.DisplayNames. - ComputedName string `json:",omitempty"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) + ComputedName string `json:",omitzero"` // MagicDNS base name (for normal non-shared-in nodes), FQDN (without trailing dot, for shared-in nodes), or Hostname (if no MagicDNS) computedHostIfDifferent string // hostname, if different than ComputedName, otherwise empty - ComputedNameWithHost string `json:",omitempty"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set + ComputedNameWithHost string `json:",omitzero"` // either "ComputedName" or "ComputedName (computedHostIfDifferent)", if computedHostIfDifferent is set // DataPlaneAuditLogID is the per-node logtail ID used for data plane audit logging. - DataPlaneAuditLogID string `json:",omitempty"` + DataPlaneAuditLogID string `json:",omitzero"` // Expired is whether this node's key has expired. Control may send // this; clients are only allowed to set this from false to true. On // the client, this is calculated client-side based on a timestamp sent // from control, to avoid clock skew issues. - Expired bool `json:",omitempty"` + Expired bool `json:",omitzero"` // SelfNodeV4MasqAddrForThisPeer is the IPv4 that this peer knows the current node as. // It may be empty if the peer knows the current node by its native @@ -486,7 +486,7 @@ type Node struct { // This only applies to traffic originating from the current node to the // peer or any of its subnets. Traffic originating from subnet routes will // not be masqueraded (e.g. in case of --snat-subnet-routes). - SelfNodeV4MasqAddrForThisPeer *netip.Addr `json:",omitempty"` + SelfNodeV4MasqAddrForThisPeer *netip.Addr `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // SelfNodeV6MasqAddrForThisPeer is the IPv6 that this peer knows the current node as. // It may be empty if the peer knows the current node by its native @@ -501,17 +501,17 @@ type Node struct { // This only applies to traffic originating from the current node to the // peer or any of its subnets. Traffic originating from subnet routes will // not be masqueraded (e.g. in case of --snat-subnet-routes). - SelfNodeV6MasqAddrForThisPeer *netip.Addr `json:",omitempty"` + SelfNodeV6MasqAddrForThisPeer *netip.Addr `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // IsWireGuardOnly indicates that this is a non-Tailscale WireGuard peer, it // is not expected to speak Disco or DERP, and it must have Endpoints in // order to be reachable. - IsWireGuardOnly bool `json:",omitempty"` + IsWireGuardOnly bool `json:",omitzero"` // IsJailed indicates that this node is jailed and should not be allowed // initiate connections, however outbound connections to it should still be // allowed. - IsJailed bool `json:",omitempty"` + IsJailed bool `json:",omitzero"` // ExitNodeDNSResolvers is the list of DNS servers that should be used when this // node is marked IsWireGuardOnly and being used as an exit node. @@ -827,10 +827,10 @@ type Location struct { // Because it contains pointers (slices), this type should not be used // as a value type. type Hostinfo struct { - IPNVersion string `json:",omitempty"` // version of this code (in version.Long format) - FrontendLogID string `json:",omitempty"` // logtail ID of frontend instance - BackendLogID string `json:",omitempty"` // logtail ID of backend instance - OS string `json:",omitempty"` // operating system the client runs on (a version.OS value) + IPNVersion string `json:",omitzero"` // version of this code (in version.Long format) + FrontendLogID string `json:",omitzero"` // logtail ID of frontend instance + BackendLogID string `json:",omitzero"` // logtail ID of backend instance + OS string `json:",omitzero"` // operating system the client runs on (a version.OS value) // OSVersion is the version of the OS, if available. // @@ -842,25 +842,25 @@ type Hostinfo struct { // string on Linux, like "Debian 10.4; kernel=xxx; container; env=kn" and so // on. As of Tailscale 1.32, this is simply the kernel version on Linux, like // "5.10.0-17-amd64". - OSVersion string `json:",omitempty"` + OSVersion string `json:",omitzero"` - Container opt.Bool `json:",omitempty"` // best-effort whether the client is running in a container - Env string `json:",omitempty"` // a hostinfo.EnvType in string form - Distro string `json:",omitempty"` // "debian", "ubuntu", "nixos", ... - DistroVersion string `json:",omitempty"` // "20.04", ... - DistroCodeName string `json:",omitempty"` // "jammy", "bullseye", ... + Container opt.Bool `json:",omitzero"` // best-effort whether the client is running in a container + Env string `json:",omitzero"` // a hostinfo.EnvType in string form + Distro string `json:",omitzero"` // "debian", "ubuntu", "nixos", ... + DistroVersion string `json:",omitzero"` // "20.04", ... + DistroCodeName string `json:",omitzero"` // "jammy", "bullseye", ... // App is used to disambiguate Tailscale clients that run using tsnet. - App string `json:",omitempty"` // "k8s-operator", "golinks", ... - - Desktop opt.Bool `json:",omitempty"` // if a desktop was detected on Linux - Package string `json:",omitempty"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) - DeviceModel string `json:",omitempty"` // mobile phone model ("Pixel 3a", "iPhone12,3") - PushDeviceToken string `json:",omitempty"` // macOS/iOS APNs device token for notifications (and Android in the future) - Hostname string `json:",omitempty"` // name of the host the client runs on - ShieldsUp bool `json:",omitempty"` // indicates whether the host is blocking incoming connections - ShareeNode bool `json:",omitempty"` // indicates this node exists in netmap because it's owned by a shared-to user - NoLogsNoSupport bool `json:",omitempty"` // indicates that the user has opted out of sending logs and support + App string `json:",omitzero"` // "k8s-operator", "golinks", ... + + Desktop opt.Bool `json:",omitzero"` // if a desktop was detected on Linux + Package string `json:",omitzero"` // Tailscale package to disambiguate ("choco", "appstore", etc; "" for unknown) + DeviceModel string `json:",omitzero"` // mobile phone model ("Pixel 3a", "iPhone12,3") + PushDeviceToken string `json:",omitzero"` // macOS/iOS APNs device token for notifications (and Android in the future) + Hostname string `json:",omitzero"` // name of the host the client runs on + ShieldsUp bool `json:",omitzero"` // indicates whether the host is blocking incoming connections + ShareeNode bool `json:",omitzero"` // indicates this node exists in netmap because it's owned by a shared-to user + NoLogsNoSupport bool `json:",omitzero"` // indicates that the user has opted out of sending logs and support // WireIngress indicates that the node would like to be wired up server-side // (DNS, etc) to be able to use Tailscale Funnel, even if it's not currently // enabled. For example, the user might only use it for intermittent @@ -868,38 +868,38 @@ type Hostinfo struct { // away, even if it's disabled most of the time. As an optimization, this is // only sent if IngressEnabled is false, as IngressEnabled implies that this // option is true. - WireIngress bool `json:",omitempty"` - IngressEnabled bool `json:",omitempty"` // if the node has any funnel endpoint enabled - AllowsUpdate bool `json:",omitempty"` // indicates that the node has opted-in to admin-console-drive remote updates - Machine string `json:",omitempty"` // the current host's machine type (uname -m) - GoArch string `json:",omitempty"` // GOARCH value (of the built binary) - GoArchVar string `json:",omitempty"` // GOARM, GOAMD64, etc (of the built binary) - GoVersion string `json:",omitempty"` // Go version binary was built with + WireIngress bool `json:",omitzero"` + IngressEnabled bool `json:",omitzero"` // if the node has any funnel endpoint enabled + AllowsUpdate bool `json:",omitzero"` // indicates that the node has opted-in to admin-console-drive remote updates + Machine string `json:",omitzero"` // the current host's machine type (uname -m) + GoArch string `json:",omitzero"` // GOARCH value (of the built binary) + GoArchVar string `json:",omitzero"` // GOARM, GOAMD64, etc (of the built binary) + GoVersion string `json:",omitzero"` // Go version binary was built with RoutableIPs []netip.Prefix `json:",omitempty"` // set of IP ranges this client can route RequestTags []string `json:",omitempty"` // set of ACL tags this node wants to claim WoLMACs []string `json:",omitempty"` // MAC address(es) to send Wake-on-LAN packets to wake this node (lowercase hex w/ colons) Services []Service `json:",omitempty"` // services advertised by this machine - NetInfo *NetInfo `json:",omitempty"` + NetInfo *NetInfo `json:",omitzero"` SSH_HostKeys []string `json:"sshHostKeys,omitempty"` // if advertised - Cloud string `json:",omitempty"` - Userspace opt.Bool `json:",omitempty"` // if the client is running in userspace (netstack) mode - UserspaceRouter opt.Bool `json:",omitempty"` // if the client's subnet router is running in userspace (netstack) mode - AppConnector opt.Bool `json:",omitempty"` // if the client is running the app-connector service - ServicesHash string `json:",omitempty"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n - ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. + Cloud string `json:",omitzero"` + Userspace opt.Bool `json:",omitzero"` // if the client is running in userspace (netstack) mode + UserspaceRouter opt.Bool `json:",omitzero"` // if the client's subnet router is running in userspace (netstack) mode + AppConnector opt.Bool `json:",omitzero"` // if the client is running the app-connector service + ServicesHash string `json:",omitzero"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. // Location represents geographical location data about a // Tailscale host. Location is optional and only set if // explicitly declared by a node. - Location *Location `json:",omitempty"` + Location *Location `json:",omitzero"` - TPM *TPMInfo `json:",omitempty"` // TPM device metadata, if available + TPM *TPMInfo `json:",omitzero"` // TPM device metadata, if available // StateEncrypted reports whether the node state is stored encrypted on // disk. The actual mechanism is platform-specific: // * Apple nodes use the Keychain // * Linux and Windows nodes use the TPM // * Android apps use EncryptedSharedPreferences - StateEncrypted opt.Bool `json:",omitempty"` + StateEncrypted opt.Bool `json:",omitzero"` // NOTE: any new fields containing pointers in this type // require changes to Hostinfo.Equal. @@ -913,25 +913,25 @@ type TPMInfo struct { // https://trustedcomputinggroup.org/resource/vendor-id-registry/, // for example "MSFT" for Microsoft. // Read from TPM_PT_MANUFACTURER. - Manufacturer string `json:",omitempty"` + Manufacturer string `json:",omitzero"` // Vendor is a vendor ID string, up to 16 characters. // Read from TPM_PT_VENDOR_STRING_*. - Vendor string `json:",omitempty"` + Vendor string `json:",omitzero"` // Model is a vendor-defined TPM model. // Read from TPM_PT_VENDOR_TPM_TYPE. - Model int `json:",omitempty"` + Model int `json:",omitzero"` // FirmwareVersion is the version number of the firmware. // Read from TPM_PT_FIRMWARE_VERSION_*. - FirmwareVersion uint64 `json:",omitempty"` + FirmwareVersion uint64 `json:",omitzero"` // SpecRevision is the TPM 2.0 spec revision encoded as a single number. All // revisions can be found at // https://trustedcomputinggroup.org/resource/tpm-library-specification/. // Before revision 184, TCG used the "01.83" format for revision 183. - SpecRevision int `json:",omitempty"` + SpecRevision int `json:",omitzero"` // FamilyIndicator is the TPM spec family, like "2.0". // Read from TPM_PT_FAMILY_INDICATOR. - FamilyIndicator string `json:",omitempty"` + FamilyIndicator string `json:",omitzero"` } // Present reports whether a TPM device is present on this machine. @@ -1016,37 +1016,37 @@ func (v HostinfoView) TailscaleSSHEnabled() bool { return v.ж.TailscaleSSHEnabl type NetInfo struct { // MappingVariesByDestIP says whether the host's NAT mappings // vary based on the destination IP. - MappingVariesByDestIP opt.Bool + MappingVariesByDestIP opt.Bool `json:",omitzero"` // WorkingIPv6 is whether the host has IPv6 internet connectivity. - WorkingIPv6 opt.Bool + WorkingIPv6 opt.Bool `json:",omitzero"` // OSHasIPv6 is whether the OS supports IPv6 at all, regardless of // whether IPv6 internet connectivity is available. - OSHasIPv6 opt.Bool + OSHasIPv6 opt.Bool `json:",omitzero"` // WorkingUDP is whether the host has UDP internet connectivity. - WorkingUDP opt.Bool + WorkingUDP opt.Bool `json:",omitzero"` // WorkingICMPv4 is whether ICMPv4 works. // Empty means not checked. - WorkingICMPv4 opt.Bool + WorkingICMPv4 opt.Bool `json:",omitzero"` // HavePortMap is whether we have an existing portmap open // (UPnP, PMP, or PCP). - HavePortMap bool `json:",omitempty"` + HavePortMap bool `json:",omitzero"` // UPnP is whether UPnP appears present on the LAN. // Empty means not checked. - UPnP opt.Bool + UPnP opt.Bool `json:",omitzero"` // PMP is whether NAT-PMP appears present on the LAN. // Empty means not checked. - PMP opt.Bool + PMP opt.Bool `json:",omitzero"` // PCP is whether PCP appears present on the LAN. // Empty means not checked. - PCP opt.Bool + PCP opt.Bool `json:",omitzero"` // PreferredDERP is this node's preferred (home) DERP region ID. // This is where the node expects to be contacted to begin a @@ -1055,10 +1055,10 @@ type NetInfo struct { // that are located elsewhere) but PreferredDERP is the region ID // that the node subscribes to traffic at. // Zero means disconnected or unknown. - PreferredDERP int + PreferredDERP int `json:",omitzero"` // LinkType is the current link type, if known. - LinkType string `json:",omitempty"` // "wired", "wifi", "mobile" (LTE, 4G, 3G, etc) + LinkType string `json:",omitzero"` // "wired", "wifi", "mobile" (LTE, 4G, 3G, etc) // DERPLatency is the fastest recent time to reach various // DERP STUN servers, in seconds. The map key is the @@ -1076,7 +1076,7 @@ type NetInfo struct { // "{nft,ift}-REASON", like "nft-forced" or "ipt-default". Empty means // either not Linux or a configuration in which the host firewall rules // are not managed by tailscaled. - FirewallMode string `json:",omitempty"` + FirewallMode string `json:",omitzero"` // Update BasicallyEqual when adding fields. } @@ -1364,8 +1364,8 @@ type MapRequest struct { // For current values and history, see the CapabilityVersion type's docs. Version CapabilityVersion - Compress string // "zstd" or "" (no compression) - KeepAlive bool // whether server should send keep-alives back to us + Compress string `json:",omitzero"` // "zstd" or "" (no compression) + KeepAlive bool `json:",omitzero"` // whether server should send keep-alives back to us NodeKey key.NodePublic DiscoKey key.DiscoPublic @@ -1388,7 +1388,7 @@ type MapRequest struct { // // If true and Version >= 68, the server should treat this as a read-only // request and ignore any Hostinfo or other fields that might be set. - Stream bool + Stream bool `json:",omitzero"` // Hostinfo is the client's current Hostinfo. Although it is always included // in the request, the server may choose to ignore it when Stream is true @@ -1405,14 +1405,14 @@ type MapRequest struct { // // The server may choose to ignore the request for any reason and start a // new map session. This is only applicable when Stream is true. - MapSessionHandle string `json:",omitempty"` + MapSessionHandle string `json:",omitzero"` // MapSessionSeq is the sequence number in the map session identified by // MapSesssionHandle that was most recently processed by the client. // It is only applicable when MapSessionHandle is specified. // If the server chooses to honor the MapSessionHandle request, only sequence // numbers greater than this value will be returned. - MapSessionSeq int64 `json:",omitempty"` + MapSessionSeq int64 `json:",omitzero"` // Endpoints are the client's magicsock UDP ip:port endpoints (IPv4 or IPv6). // These can be ignored if Stream is true and Version >= 68. @@ -1423,7 +1423,7 @@ type MapRequest struct { // TKAHead describes the hash of the latest AUM applied to the local // tailnet key authority, if one is operating. // It is encoded as tka.AUMHash.MarshalText. - TKAHead string `json:",omitempty"` + TKAHead string `json:",omitzero"` // ReadOnly was set when client just wanted to fetch the MapResponse, // without updating their Endpoints. The intended use was for clients to @@ -1431,7 +1431,7 @@ type MapRequest struct { // update. // // Deprecated: always false as of Version 68. - ReadOnly bool `json:",omitempty"` + ReadOnly bool `json:",omitzero"` // OmitPeers is whether the client is okay with the Peers list being omitted // in the response. @@ -1447,7 +1447,7 @@ type MapRequest struct { // If OmitPeers is true, Stream is false, but ReadOnly is true, // then all the response fields are included. (This is what the client does // when initially fetching the DERP map.) - OmitPeers bool `json:",omitempty"` + OmitPeers bool `json:",omitzero"` // DebugFlags is a list of strings specifying debugging and // development features to enable in handling this map @@ -1467,7 +1467,7 @@ type MapRequest struct { // identifies this specific connection to the server. The server may choose to // use this handle to identify the connection for debugging or testing // purposes. It has no semantic meaning. - ConnectionHandleForTest string `json:",omitempty"` + ConnectionHandleForTest string `json:",omitzero"` } // PortRange represents a range of UDP or TCP port numbers. @@ -1758,7 +1758,7 @@ type DNSConfig struct { // in the network map, aka MagicDNS. // Despite the (legacy) name, does not necessarily cause request // proxying to be enabled. - Proxied bool `json:",omitempty"` + Proxied bool `json:",omitzero"` // Nameservers are the IP addresses of the global nameservers to use. // @@ -1795,7 +1795,7 @@ type DNSConfig struct { // TempCorpIssue13969 is a temporary (2023-08-16) field for an internal hack day prototype. // It contains a user inputed URL that should have a list of domains to be blocked. // See https://github.com/tailscale/corp/issues/13969. - TempCorpIssue13969 string `json:",omitempty"` + TempCorpIssue13969 string `json:",omitzero"` } // DNSRecord is an extra DNS record to add to MagicDNS. @@ -1807,7 +1807,7 @@ type DNSRecord struct { // Type is the DNS record type. // Empty means A or AAAA, depending on value. // Other values are currently ignored. - Type string `json:",omitempty"` + Type string `json:",omitzero"` // Value is the IP address in string form. // TODO(bradfitz): if we ever add support for record types @@ -1855,11 +1855,11 @@ type PingRequest struct { // URLIsNoise, if true, means that the client should hit URL over the Noise // transport instead of TLS. - URLIsNoise bool `json:",omitempty"` + URLIsNoise bool `json:",omitzero"` // Log is whether to log about this ping in the success case. // For failure cases, the client will log regardless. - Log bool `json:",omitempty"` + Log bool `json:",omitzero"` // Types is the types of ping that are initiated. Can be any PingType, comma // separated, e.g. "disco,TSMP" @@ -1869,10 +1869,10 @@ type PingRequest struct { // node's c2n handler and the HTTP response sent in a POST to URL. For c2n, // the value of URLIsNoise is ignored and only the Noise transport (back to // the control plane) will be used, as if URLIsNoise were true. - Types string `json:",omitempty"` + Types string `json:",omitzero"` // IP is the ping target, when needed by the PingType(s) given in Types. - IP netip.Addr + IP netip.Addr `json:",omitzero"` // Payload is the ping payload. // @@ -3043,29 +3043,29 @@ type SSHRecordingAttempt struct { // See QueryFeatureResponse for response structure. type QueryFeatureRequest struct { // Feature is the string identifier for a feature. - Feature string `json:",omitempty"` + Feature string `json:",omitzero"` // NodeKey is the client's current node key. - NodeKey key.NodePublic `json:",omitempty"` + NodeKey key.NodePublic `json:",omitzero"` } // QueryFeatureResponse is the response to an QueryFeatureRequest. // See cli.enableFeatureInteractive for usage. type QueryFeatureResponse struct { // Complete is true when the feature is already enabled. - Complete bool `json:",omitempty"` + Complete bool `json:",omitzero"` // Text holds lines to display in the CLI with information // about the feature and how to enable it. // // Lines are separated by newline characters. The final // newline may be omitted. - Text string `json:",omitempty"` + Text string `json:",omitzero"` // URL is the link for the user to visit to take action on // enabling the feature. // // When empty, there is no action for this user to take. - URL string `json:",omitempty"` + URL string `json:",omitzero"` // ShouldWait specifies whether the CLI should block and // wait for the user to enable the feature. @@ -3078,7 +3078,7 @@ type QueryFeatureResponse struct { // // The CLI can watch the IPN notification bus for changes in // required node capabilities to know when to continue. - ShouldWait bool `json:",omitempty"` + ShouldWait bool `json:",omitzero"` } // WebClientAuthResponse is the response to a web client authentication request @@ -3088,15 +3088,15 @@ type WebClientAuthResponse struct { // ID is a unique identifier for the session auth request. // It can be supplied to "/machine/webclient/wait" to pause until // the session authentication has been completed. - ID string `json:",omitempty"` + ID string `json:",omitzero"` // URL is the link for the user to visit to authenticate the session. // // When empty, there is no action for the user to take. - URL string `json:",omitempty"` + URL string `json:",omitzero"` // Complete is true when the session authentication has been completed. - Complete bool `json:",omitempty"` + Complete bool `json:",omitzero"` } // OverTLSPublicKeyResponse is the JSON response to /key?v= @@ -3172,10 +3172,10 @@ type PeerChange struct { // DERPRegion, if non-zero, means that NodeID's home DERP // region ID is now this number. - DERPRegion int `json:",omitempty"` + DERPRegion int `json:",omitzero"` // Cap, if non-zero, means that NodeID's capability version has changed. - Cap CapabilityVersion `json:",omitempty"` + Cap CapabilityVersion `json:",omitzero"` // CapMap, if non-nil, means that NodeID's capability map has changed. CapMap NodeCapMap `json:",omitempty"` @@ -3185,23 +3185,23 @@ type PeerChange struct { Endpoints []netip.AddrPort `json:",omitempty"` // Key, if non-nil, means that the NodeID's wireguard public key changed. - Key *key.NodePublic `json:",omitempty"` + Key *key.NodePublic `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // KeySignature, if non-nil, means that the signature of the wireguard // public key has changed. KeySignature tkatype.MarshaledSignature `json:",omitempty"` // DiscoKey, if non-nil, means that the NodeID's discokey changed. - DiscoKey *key.DiscoPublic `json:",omitempty"` + DiscoKey *key.DiscoPublic `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // Online, if non-nil, means that the NodeID's online status changed. - Online *bool `json:",omitempty"` + Online *bool `json:",omitzero"` // LastSeen, if non-nil, means that the NodeID's online status changed. - LastSeen *time.Time `json:",omitempty"` + LastSeen *time.Time `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 // KeyExpiry, if non-nil, changes the NodeID's key expiry. - KeyExpiry *time.Time `json:",omitempty"` + KeyExpiry *time.Time `json:",omitzero"` // TODO: de-pointer: tailscale/tailscale#17978 } // DerpMagicIP is a fake WireGuard endpoint IP address that means to @@ -3279,14 +3279,14 @@ const ( // POST https:///machine/audit-log type AuditLogRequest struct { // Version is the client's current CapabilityVersion. - Version CapabilityVersion `json:",omitempty"` + Version CapabilityVersion `json:",omitzero"` // NodeKey is the client's current node key. NodeKey key.NodePublic `json:",omitzero"` // Action is the action to be logged. It must correspond to a known action in the control plane. - Action ClientAuditAction `json:",omitempty"` + Action ClientAuditAction `json:",omitzero"` // Details is an opaque string, specific to the action being logged. Empty strings may not // be valid depending on the action being logged. - Details string `json:",omitempty"` + Details string `json:",omitzero"` // Timestamp is the time at which the audit log was generated on the node. Timestamp time.Time `json:",omitzero"` } From 408336a0891288ab3bb7466734d9646fe17fbee1 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 17:39:37 -0800 Subject: [PATCH 0720/1093] feature/featuretags: add CacheNetMap feature tag for upcoming work (trying to get in smaller obvious chunks ahead of later PRs to make them smaller) Updates #17925 Change-Id: I184002001055790484e4792af8ffe2a9a2465b2e Signed-off-by: Brad Fitzpatrick --- .../buildfeatures/feature_cachenetmap_disabled.go | 13 +++++++++++++ .../buildfeatures/feature_cachenetmap_enabled.go | 13 +++++++++++++ feature/featuretags/featuretags.go | 4 ++++ 3 files changed, 30 insertions(+) create mode 100644 feature/buildfeatures/feature_cachenetmap_disabled.go create mode 100644 feature/buildfeatures/feature_cachenetmap_enabled.go diff --git a/feature/buildfeatures/feature_cachenetmap_disabled.go b/feature/buildfeatures/feature_cachenetmap_disabled.go new file mode 100644 index 0000000000000..22407fe38a57f --- /dev/null +++ b/feature/buildfeatures/feature_cachenetmap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cachenetmap + +package buildfeatures + +// HasCacheNetMap is whether the binary was built with support for modular feature "Cache the netmap on disk between runs". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cachenetmap" build tag. +// It's a const so it can be used for dead code elimination. +const HasCacheNetMap = false diff --git a/feature/buildfeatures/feature_cachenetmap_enabled.go b/feature/buildfeatures/feature_cachenetmap_enabled.go new file mode 100644 index 0000000000000..02663c416bcbb --- /dev/null +++ b/feature/buildfeatures/feature_cachenetmap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cachenetmap + +package buildfeatures + +// HasCacheNetMap is whether the binary was built with support for modular feature "Cache the netmap on disk between runs". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cachenetmap" build tag. +// It's a const so it can be used for dead code elimination. +const HasCacheNetMap = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c93e8b15b1001..44b1295769c56 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -123,6 +123,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Control-to-node (C2N) support", ImplementationDetail: true, }, + "cachenetmap": { + Sym: "CacheNetMap", + Desc: "Cache the netmap on disk between runs", + }, "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, "capture": {Sym: "Capture", Desc: "Packet capture"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, From 38ccdbe35c88c08311d79db651ef7d4161a2ffc2 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 19 Nov 2025 11:57:27 +0000 Subject: [PATCH 0721/1093] cmd/k8s-operator: default to stable image (#17848) This commit modifies the helm/static manifest configuration for the k8s-operator to prefer the stable image tag. This avoids making those using static manifests seeing unstable behaviour by default if they do not manually make the change. This is managed for us when using helm but not when generating the static manifests. Updates https://github.com/tailscale/tailscale/issues/10655 Signed-off-by: David Bond --- cmd/k8s-operator/deploy/chart/Chart.yaml | 2 +- cmd/k8s-operator/deploy/manifests/operator.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/Chart.yaml b/cmd/k8s-operator/deploy/chart/Chart.yaml index 363d87d15954a..9db6389d1d944 100644 --- a/cmd/k8s-operator/deploy/chart/Chart.yaml +++ b/cmd/k8s-operator/deploy/chart/Chart.yaml @@ -26,4 +26,4 @@ maintainers: version: 0.1.0 # appVersion will be set to Tailscale repo tag at release time. -appVersion: "unstable" +appVersion: "stable" diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index c7c5ef0a7d3b2..c5da367e099a6 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5366,7 +5366,7 @@ spec: - name: CLIENT_SECRET_FILE value: /oauth/client_secret - name: PROXY_IMAGE - value: tailscale/tailscale:unstable + value: tailscale/tailscale:stable - name: PROXY_TAGS value: tag:k8s - name: APISERVER_PROXY @@ -5381,7 +5381,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.uid - image: tailscale/k8s-operator:unstable + image: tailscale/k8s-operator:stable imagePullPolicy: Always name: operator volumeMounts: From e1dd9222d4a8e8147f6067b7f3b3956995b5bc6c Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 17 Nov 2025 16:38:57 +0000 Subject: [PATCH 0722/1093] ipn/ipnlocal, tka: compact TKA state after every sync Previously a TKA compaction would only run when a node starts, which means a long-running node could use unbounded storage as it accumulates ever-increasing amounts of TKA state. This patch changes TKA so it runs a compaction after every sync. Updates https://github.com/tailscale/corp/issues/33537 Change-Id: I91df887ea0c5a5b00cb6caced85aeffa2a4b24ee Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 11 +- ipn/ipnlocal/network-lock_test.go | 224 ++++++++++++++++++++++++++++- tka/builder_test.go | 14 +- tka/chaintest_test.go | 8 +- tka/key_test.go | 2 +- tka/sync_test.go | 2 +- tka/tailchonk.go | 22 ++- tka/tailchonk_test.go | 6 +- tka/tka_test.go | 14 +- tstest/chonktest/tailchonk_test.go | 4 +- 10 files changed, 276 insertions(+), 31 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 14a3b105b59b0..8c77cd92dcf98 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -360,6 +360,13 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie if err := b.tkaSyncLocked(ourNodeKey); err != nil { return fmt.Errorf("tka sync: %w", err) } + // Try to compact the TKA state, to avoid unbounded storage on nodes. + // + // We run this on every sync so that clients compact consistently. In many + // cases this will be a no-op. + if err := b.tka.authority.Compact(b.tka.storage, tkaCompactionDefaults); err != nil { + return fmt.Errorf("tka compact: %w", err) + } } return nil @@ -508,7 +515,7 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per if root == "" { b.health.SetUnhealthy(noNetworkLockStateDirWarnable, nil) b.logf("network-lock using in-memory storage; no state directory") - storage = &tka.Mem{} + storage = tka.ChonkMem() } else { chonkDir := b.chonkPathLocked() chonk, err := tka.ChonkDir(chonkDir) @@ -686,7 +693,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt // We use an in-memory tailchonk because we don't want to commit to // the filesystem until we've finished the initialization sequence, // just in case something goes wrong. - _, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{ + _, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{ Keys: keys, // TODO(tom): s/tka.State.DisablementSecrets/tka.State.DisablementValues // This will center on consistent nomenclature: diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 5fa0728830eb6..c07e65ee35e27 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -17,6 +17,7 @@ import ( "path/filepath" "reflect" "testing" + "time" go4mem "go4.org/mem" @@ -31,6 +32,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/tsd" + "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/persist" @@ -89,7 +91,7 @@ func TestTKAEnablementFlow(t *testing.T) { // our mock server can communicate. nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - a1, genesisAUM, err := tka.Create(&tka.Mem{}, tka.State{ + a1, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{ Keys: []tka.Key{key}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, }, nlPriv) @@ -399,7 +401,7 @@ func TestTKASync(t *testing.T) { // Setup the tka authority on the control plane. key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - controlStorage := &tka.Mem{} + controlStorage := tka.ChonkMem() controlAuthority, bootstrap, err := tka.Create(controlStorage, tka.State{ Keys: []tka.Key{key, someKey}, DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)}, @@ -548,10 +550,226 @@ func TestTKASync(t *testing.T) { } } +// Whenever we run a TKA sync and get new state from control, we compact the +// local state. +func TestTKASyncTriggersCompact(t *testing.T) { + someKeyPriv := key.NewNLPrivate() + someKey := tka.Key{Kind: tka.Key25519, Public: someKeyPriv.Public().Verifier(), Votes: 1} + + disablementSecret := bytes.Repeat([]byte{0xa5}, 32) + + nodePriv := key.NewNode() + nlPriv := key.NewNLPrivate() + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) + must.Do(pm.SetPrefs((&ipn.Prefs{ + Persist: &persist.Persist{ + PrivateNodeKey: nodePriv, + NetworkLockKey: nlPriv, + }, + }).View(), ipn.NetworkProfile{})) + + // Create a clock, and roll it back by 30 days. + // + // Our compaction algorithm preserves AUMs received in the last 14 days, so + // we need to backdate the commit times to make the AUMs eligible for compaction. + clock := tstest.NewClock(tstest.ClockOpts{}) + clock.Advance(-30 * 24 * time.Hour) + + // Set up the TKA authority on the control plane. + key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} + controlStorage := tka.ChonkMem() + controlStorage.SetClock(clock) + controlAuthority, bootstrap, err := tka.Create(controlStorage, tka.State{ + Keys: []tka.Key{key, someKey}, + DisablementSecrets: [][]byte{tka.DisablementKDF(disablementSecret)}, + }, nlPriv) + if err != nil { + t.Fatalf("tka.Create() failed: %v", err) + } + + // Fill the control plane TKA authority with a lot of AUMs, enough so that: + // + // 1. the chain of AUMs includes some checkpoints + // 2. the chain is long enough it would be trimmed if we ran the compaction + // algorithm with the defaults + for range 100 { + upd := controlAuthority.NewUpdater(nlPriv) + if err := upd.RemoveKey(someKey.MustID()); err != nil { + t.Fatalf("RemoveKey: %v", err) + } + if err := upd.AddKey(someKey); err != nil { + t.Fatalf("AddKey: %v", err) + } + aums, err := upd.Finalize(controlStorage) + if err != nil { + t.Fatalf("Finalize: %v", err) + } + if err := controlAuthority.Inform(controlStorage, aums); err != nil { + t.Fatalf("controlAuthority.Inform() failed: %v", err) + } + } + + // Set up the TKA authority on the node. + nodeStorage := tka.ChonkMem() + nodeStorage.SetClock(clock) + nodeAuthority, err := tka.Bootstrap(nodeStorage, bootstrap) + if err != nil { + t.Fatalf("tka.Bootstrap() failed: %v", err) + } + + // Make a mock control server. + ts, client := fakeNoiseServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + switch r.URL.Path { + case "/machine/tka/sync/offer": + body := new(tailcfg.TKASyncOfferRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + t.Fatal(err) + } + t.Logf("got sync offer:\n%+v", body) + nodeOffer, err := toSyncOffer(body.Head, body.Ancestors) + if err != nil { + t.Fatal(err) + } + controlOffer, err := controlAuthority.SyncOffer(controlStorage) + if err != nil { + t.Fatal(err) + } + sendAUMs, err := controlAuthority.MissingAUMs(controlStorage, nodeOffer) + if err != nil { + t.Fatal(err) + } + + head, ancestors, err := fromSyncOffer(controlOffer) + if err != nil { + t.Fatal(err) + } + resp := tailcfg.TKASyncOfferResponse{ + Head: head, + Ancestors: ancestors, + MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), + } + for i, a := range sendAUMs { + resp.MissingAUMs[i] = a.Serialize() + } + + t.Logf("responding to sync offer with:\n%+v", resp) + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + t.Fatal(err) + } + + case "/machine/tka/sync/send": + body := new(tailcfg.TKASyncSendRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + t.Fatal(err) + } + t.Logf("got sync send:\n%+v", body) + + var remoteHead tka.AUMHash + if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { + t.Fatalf("head unmarshal: %v", err) + } + toApply := make([]tka.AUM, len(body.MissingAUMs)) + for i, a := range body.MissingAUMs { + if err := toApply[i].Unserialize(a); err != nil { + t.Fatalf("decoding missingAUM[%d]: %v", i, err) + } + } + + if len(toApply) > 0 { + if err := controlAuthority.Inform(controlStorage, toApply); err != nil { + t.Fatalf("control.Inform(%+v) failed: %v", toApply, err) + } + } + head, err := controlAuthority.Head().MarshalText() + if err != nil { + t.Fatal(err) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ + Head: string(head), + }); err != nil { + t.Fatal(err) + } + + default: + t.Errorf("unhandled endpoint path: %v", r.URL.Path) + w.WriteHeader(404) + } + })) + defer ts.Close() + + // Setup the client. + cc, _ := fakeControlClient(t, client) + b := LocalBackend{ + cc: cc, + ccAuto: cc, + logf: t.Logf, + pm: pm, + store: pm.Store(), + tka: &tkaState{ + authority: nodeAuthority, + storage: nodeStorage, + }, + } + + // Trigger a sync. + err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ + TKAEnabled: true, + TKAHead: controlAuthority.Head(), + }, pm.CurrentPrefs()) + if err != nil { + t.Errorf("tkaSyncIfNeeded() failed: %v", err) + } + + // Add a new AUM in control. + upd := controlAuthority.NewUpdater(nlPriv) + if err := upd.RemoveKey(someKey.MustID()); err != nil { + t.Fatalf("RemoveKey: %v", err) + } + aums, err := upd.Finalize(controlStorage) + if err != nil { + t.Fatalf("Finalize: %v", err) + } + if err := controlAuthority.Inform(controlStorage, aums); err != nil { + t.Fatalf("controlAuthority.Inform() failed: %v", err) + } + + // Run a second sync, which should trigger a compaction. + err = b.tkaSyncIfNeeded(&netmap.NetworkMap{ + TKAEnabled: true, + TKAHead: controlAuthority.Head(), + }, pm.CurrentPrefs()) + if err != nil { + t.Errorf("tkaSyncIfNeeded() failed: %v", err) + } + + // Check that the node and control plane are in sync. + if nodeHead, controlHead := b.tka.authority.Head(), controlAuthority.Head(); nodeHead != controlHead { + t.Errorf("node head = %v, want %v", nodeHead, controlHead) + } + + // Check the node has compacted away some of its AUMs; that it has purged some AUMs which + // are still kept in the control plane. + nodeAUMs, err := b.tka.storage.AllAUMs() + if err != nil { + t.Errorf("AllAUMs() for node failed: %v", err) + } + controlAUMS, err := controlStorage.AllAUMs() + if err != nil { + t.Errorf("AllAUMs() for control failed: %v", err) + } + if len(nodeAUMs) == len(controlAUMS) { + t.Errorf("node has not compacted; it has the same number of AUMs as control (node = control = %d)", len(nodeAUMs)) + } +} + func TestTKAFilterNetmap(t *testing.T) { nlPriv := key.NewNLPrivate() nlKey := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - storage := &tka.Mem{} + storage := tka.ChonkMem() authority, _, err := tka.Create(storage, tka.State{ Keys: []tka.Key{nlKey}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, diff --git a/tka/builder_test.go b/tka/builder_test.go index 52907186b6d30..3fd32f64eac12 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -28,7 +28,7 @@ func TestAuthorityBuilderAddKey(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -62,7 +62,7 @@ func TestAuthorityBuilderMaxKey(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -109,7 +109,7 @@ func TestAuthorityBuilderRemoveKey(t *testing.T) { pub2, _ := testingKey25519(t, 2) key2 := Key{Kind: Key25519, Public: pub2, Votes: 1} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key, key2}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -155,7 +155,7 @@ func TestAuthorityBuilderSetKeyVote(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -191,7 +191,7 @@ func TestAuthorityBuilderSetKeyMeta(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2, Meta: map[string]string{"a": "b"}} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -227,7 +227,7 @@ func TestAuthorityBuilderMultiple(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, @@ -275,7 +275,7 @@ func TestAuthorityBuilderCheckpointsAfterXUpdates(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - storage := &Mem{} + storage := ChonkMem() a, _, err := Create(storage, State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, diff --git a/tka/chaintest_test.go b/tka/chaintest_test.go index 5811f9c8381ed..a3122b5d19da8 100644 --- a/tka/chaintest_test.go +++ b/tka/chaintest_test.go @@ -285,25 +285,25 @@ func (c *testChain) makeAUM(v *testchainNode) AUM { // Chonk returns a tailchonk containing all AUMs. func (c *testChain) Chonk() Chonk { - var out Mem + out := ChonkMem() for _, update := range c.AUMs { if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil { panic(err) } } - return &out + return out } // ChonkWith returns a tailchonk containing the named AUMs. func (c *testChain) ChonkWith(names ...string) Chonk { - var out Mem + out := ChonkMem() for _, name := range names { update := c.AUMs[name] if err := out.CommitVerifiedAUMs([]AUM{update}); err != nil { panic(err) } } - return &out + return out } type testchainOpt struct { diff --git a/tka/key_test.go b/tka/key_test.go index fc379e246ad32..327de1a0e2851 100644 --- a/tka/key_test.go +++ b/tka/key_test.go @@ -72,7 +72,7 @@ func TestNLPrivate(t *testing.T) { // Test that key.NLPrivate implements Signer by making a new // authority. k := Key{Kind: Key25519, Public: pub.Verifier(), Votes: 1} - _, aum, err := Create(&Mem{}, State{ + _, aum, err := Create(ChonkMem(), State{ Keys: []Key{k}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{1}, 32)}, }, p) diff --git a/tka/sync_test.go b/tka/sync_test.go index f9d86c16a9e0c..ea14a37e57e9b 100644 --- a/tka/sync_test.go +++ b/tka/sync_test.go @@ -346,7 +346,7 @@ func TestSyncSimpleE2E(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - nodeStorage := &Mem{} + nodeStorage := ChonkMem() node, err := Bootstrap(nodeStorage, c.AUMs["G1"]) if err != nil { t.Fatalf("node Bootstrap() failed: %v", err) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index d92016c45d71f..a55033bcd8bb7 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -19,6 +19,8 @@ import ( "github.com/fxamacker/cbor/v2" "tailscale.com/atomicfile" + "tailscale.com/tstime" + "tailscale.com/util/testenv" ) // Chonk implementations provide durable storage for AUMs and other @@ -92,6 +94,7 @@ type Mem struct { mu sync.RWMutex aums map[AUMHash]AUM commitTimes map[AUMHash]time.Time + clock tstime.Clock // parentIndex is a map of AUMs to the AUMs for which they are // the parent. @@ -103,6 +106,23 @@ type Mem struct { lastActiveAncestor *AUMHash } +// ChonkMem returns an implementation of Chonk which stores TKA state +// in-memory. +func ChonkMem() *Mem { + return &Mem{ + clock: tstime.DefaultClock{}, + } +} + +// SetClock sets the clock used by [Mem]. This is only for use in tests, +// and will panic if called from non-test code. +func (c *Mem) SetClock(clock tstime.Clock) { + if !testenv.InTest() { + panic("used SetClock in non-test code") + } + c.clock = clock +} + func (c *Mem) SetLastActiveAncestor(hash AUMHash) error { c.mu.Lock() defer c.mu.Unlock() @@ -173,7 +193,7 @@ updateLoop: for _, aum := range updates { aumHash := aum.Hash() c.aums[aumHash] = aum - c.commitTimes[aumHash] = time.Now() + c.commitTimes[aumHash] = c.clock.Now() parent, ok := aum.Parent() if ok { diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 7125c99fefe36..eeb6edfff3018 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -35,7 +35,7 @@ func randHash(t *testing.T, seed int64) [blake2s.Size]byte { } func TestImplementsChonk(t *testing.T) { - impls := []Chonk{&Mem{}, &FS{}} + impls := []Chonk{ChonkMem(), &FS{}} t.Logf("chonks: %v", impls) } @@ -229,7 +229,7 @@ func TestMarkActiveChain(t *testing.T) { verdict := make(map[AUMHash]retainState, len(tc.chain)) // Build the state of the tailchonk for tests. - storage := &Mem{} + storage := ChonkMem() var prev AUMHash for i := range tc.chain { if !prev.IsZero() { @@ -608,7 +608,7 @@ func TestCompactLongButYoung(t *testing.T) { ourKey := Key{Kind: Key25519, Public: ourPriv.Public().Verifier(), Votes: 1} someOtherKey := Key{Kind: Key25519, Public: key.NewNLPrivate().Public().Verifier(), Votes: 1} - storage := &Mem{} + storage := ChonkMem() auth, _, err := Create(storage, State{ Keys: []Key{ourKey, someOtherKey}, DisablementSecrets: [][]byte{DisablementKDF(bytes.Repeat([]byte{0xa5}, 32))}, diff --git a/tka/tka_test.go b/tka/tka_test.go index 9e3c4e79d05bd..78af7400daff3 100644 --- a/tka/tka_test.go +++ b/tka/tka_test.go @@ -253,7 +253,7 @@ func TestOpenAuthority(t *testing.T) { } // Construct the state of durable storage. - chonk := &Mem{} + chonk := ChonkMem() err := chonk.CommitVerifiedAUMs([]AUM{g1, i1, l1, i2, i3, l2, l3, g2, l4}) if err != nil { t.Fatal(err) @@ -275,7 +275,7 @@ func TestOpenAuthority(t *testing.T) { } func TestOpenAuthority_EmptyErrors(t *testing.T) { - _, err := Open(&Mem{}) + _, err := Open(ChonkMem()) if err == nil { t.Error("Expected an error initializing an empty authority, got nil") } @@ -319,7 +319,7 @@ func TestCreateBootstrapAuthority(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} - a1, genesisAUM, err := Create(&Mem{}, State{ + a1, genesisAUM, err := Create(ChonkMem(), State{ Keys: []Key{key}, DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, }, signer25519(priv)) @@ -327,7 +327,7 @@ func TestCreateBootstrapAuthority(t *testing.T) { t.Fatalf("Create() failed: %v", err) } - a2, err := Bootstrap(&Mem{}, genesisAUM) + a2, err := Bootstrap(ChonkMem(), genesisAUM) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) } @@ -366,7 +366,7 @@ func TestAuthorityInformNonLinear(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - storage := &Mem{} + storage := ChonkMem() a, err := Bootstrap(storage, c.AUMs["G1"]) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) @@ -411,7 +411,7 @@ func TestAuthorityInformLinear(t *testing.T) { optKey("key", key, priv), optSignAllUsing("key")) - storage := &Mem{} + storage := ChonkMem() a, err := Bootstrap(storage, c.AUMs["G1"]) if err != nil { t.Fatalf("Bootstrap() failed: %v", err) @@ -444,7 +444,7 @@ func TestInteropWithNLKey(t *testing.T) { pub2 := key.NewNLPrivate().Public() pub3 := key.NewNLPrivate().Public() - a, _, err := Create(&Mem{}, State{ + a, _, err := Create(ChonkMem(), State{ Keys: []Key{ { Kind: Key25519, diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go index 6dfab798ed11f..d9343e9160ea9 100644 --- a/tstest/chonktest/tailchonk_test.go +++ b/tstest/chonktest/tailchonk_test.go @@ -18,7 +18,7 @@ func TestImplementsChonk(t *testing.T) { { name: "Mem", newChonk: func(t *testing.T) tka.Chonk { - return &tka.Mem{} + return tka.ChonkMem() }, }, { @@ -42,7 +42,7 @@ func TestImplementsCompactableChonk(t *testing.T) { { name: "Mem", newChonk: func(t *testing.T) tka.CompactableChonk { - return &tka.Mem{} + return tka.ChonkMem() }, }, { From 62d64c05e1e8f6335627de9eca17aebc2c9910c1 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Wed, 19 Nov 2025 07:08:40 -0600 Subject: [PATCH 0723/1093] cmd/k8s-operator: fix type comparison in apiserver proxy template (#17981) ArgoCD sends boolean values but the template expects strings, causing "incompatible types for comparison" errors. Wrap values with toString so both work. Fixes #17158 Signed-off-by: Raj Singh --- .../deploy/chart/templates/apiserverproxy-rbac.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml index ad0a6fb66f51e..d6e9d1bf48ef8 100644 --- a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml @@ -3,8 +3,8 @@ # If old setting used, enable both old (operator) and new (ProxyGroup) workflows. # If new setting used, enable only new workflow. -{{ if or (eq .Values.apiServerProxyConfig.mode "true") - (eq .Values.apiServerProxyConfig.allowImpersonation "true") }} +{{ if or (eq (toString .Values.apiServerProxyConfig.mode) "true") + (eq (toString .Values.apiServerProxyConfig.allowImpersonation) "true") }} apiVersion: v1 kind: ServiceAccount metadata: @@ -25,7 +25,7 @@ kind: ClusterRoleBinding metadata: name: tailscale-auth-proxy subjects: -{{- if eq .Values.apiServerProxyConfig.mode "true" }} +{{- if eq (toString .Values.apiServerProxyConfig.mode) "true" }} - kind: ServiceAccount name: operator namespace: {{ .Release.Namespace }} From aeda3e81832158fb70715bb99a7d249a55b21694 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 19 Nov 2025 09:41:43 +0000 Subject: [PATCH 0724/1093] ipn/ipnlocal: reduce profileManager boilerplate in network-lock tests Updates tailscale/corp#33537 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock_test.go | 116 +++++++++--------------------- 1 file changed, 33 insertions(+), 83 deletions(-) diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index c07e65ee35e27..5d22425a1e5cb 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -37,13 +37,12 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" - "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" ) -func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { +func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni @@ -51,7 +50,6 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even k := key.NewMachine() dialer := tsdial.NewDialer(netmon.NewStatic()) - dialer.SetBus(bus) opts := controlclient.Options{ ServerURL: "https://example.com", Hostinfo: hi, @@ -70,10 +68,11 @@ func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *even if err != nil { t.Fatal(err) } - return cc, bus + return cc } func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) { + t.Helper() ts := httptest.NewUnstartedServer(handler) ts.StartTLS() client := ts.Client() @@ -84,6 +83,17 @@ func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, return ts, client } +func setupProfileManager(t *testing.T, nodePriv key.NodePrivate, nlPriv key.NLPrivate) *profileManager { + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) + must.Do(pm.SetPrefs((&ipn.Prefs{ + Persist: &persist.Persist{ + PrivateNodeKey: nodePriv, + NetworkLockKey: nlPriv, + }, + }).View(), ipn.NetworkProfile{})) + return pm +} + func TestTKAEnablementFlow(t *testing.T) { nodePriv := key.NewNode() @@ -158,14 +168,8 @@ func TestTKAEnablementFlow(t *testing.T) { defer ts.Close() temp := t.TempDir() - cc, bus := fakeControlClient(t, client) - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(bus))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + cc := fakeControlClient(t, client) + pm := setupProfileManager(t, nodePriv, nlPriv) b := LocalBackend{ capTailnetLock: true, varRoot: temp, @@ -199,13 +203,7 @@ func TestTKADisablementFlow(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -267,7 +265,7 @@ func TestTKADisablementFlow(t *testing.T) { })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -391,13 +389,7 @@ func TestTKASync(t *testing.T) { t.Run(tc.name, func(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Setup the tka authority on the control plane. key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} @@ -518,7 +510,7 @@ func TestTKASync(t *testing.T) { defer ts.Close() // Setup the client. - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -560,13 +552,7 @@ func TestTKASyncTriggersCompact(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Create a clock, and roll it back by 30 days. // @@ -702,7 +688,7 @@ func TestTKASyncTriggersCompact(t *testing.T) { defer ts.Close() // Setup the client. - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ cc: cc, ccAuto: cc, @@ -923,13 +909,7 @@ func TestTKADisable(t *testing.T) { disablementSecret := bytes.Repeat([]byte{0xa5}, 32) nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -985,7 +965,7 @@ func TestTKADisable(t *testing.T) { })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1014,13 +994,7 @@ func TestTKASign(t *testing.T) { toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1076,7 +1050,7 @@ func TestTKASign(t *testing.T) { } })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1103,13 +1077,7 @@ func TestTKAForceDisable(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) temp := t.TempDir() tkaPath := filepath.Join(temp, "tka-profile", string(pm.CurrentProfile().ID())) @@ -1156,7 +1124,7 @@ func TestTKAForceDisable(t *testing.T) { })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) sys := tsd.NewSystem() sys.Set(pm.Store()) @@ -1201,13 +1169,7 @@ func TestTKAAffectedSigs(t *testing.T) { // toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1292,7 +1254,7 @@ func TestTKAAffectedSigs(t *testing.T) { } })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1334,13 +1296,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { cosignPriv := key.NewNLPrivate() compromisedPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: nlPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, nlPriv) // Make a fake TKA authority, to seed local state. disablementSecret := bytes.Repeat([]byte{0xa5}, 32) @@ -1404,7 +1360,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { } })) defer ts.Close() - cc, _ := fakeControlClient(t, client) + cc := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1425,13 +1381,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { // Cosign using the cosigning key. { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - must.Do(pm.SetPrefs((&ipn.Prefs{ - Persist: &persist.Persist{ - PrivateNodeKey: nodePriv, - NetworkLockKey: cosignPriv, - }, - }).View(), ipn.NetworkProfile{})) + pm := setupProfileManager(t, nodePriv, cosignPriv) b := LocalBackend{ varRoot: temp, logf: t.Logf, From 336df56f85459be76c4117baf76ce7851df4ba68 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 18 Nov 2025 17:04:01 +0000 Subject: [PATCH 0725/1093] cmd/tailscale/cli: remove Latin abbreviations from CLI help text Our style guide recommends avoiding Latin abbreviations in technical documentation, which includes the CLI help text. This is causing linter issues for the docs site, because this help text is copied into the docs. See http://go/style-guide/kb/language-and-grammar/abbreviations#latin-abbreviations Updates #cleanup Change-Id: I980c28d996466f0503aaaa65127685f4af608039 Signed-off-by: Alex Chan --- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/up.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 3b5e032db124b..cb3a07a6fe0ec 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -110,7 +110,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { switch goos { case "linux": setf.BoolVar(&setArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") - setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)") + setf.BoolVar(&setArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") setf.StringVar(&setArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") case "windows": setf.BoolVar(&setArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index e8b0cd0d37145..7f5b2e6b4a61d 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -122,7 +122,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { switch goos { case "linux": upf.BoolVar(&upArgs.snat, "snat-subnet-routes", true, "source NAT traffic to local routes advertised with --advertise-routes") - upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, etc.)") + upf.BoolVar(&upArgs.statefulFiltering, "stateful-filtering", false, "apply stateful filtering to forwarded packets (subnet routers, exit nodes, and so on)") upf.StringVar(&upArgs.netfilterMode, "netfilter-mode", defaultNetfilterMode(), "netfilter mode (one of on, nodivert, off)") case "windows": upf.BoolVar(&upArgs.forceDaemon, "unattended", false, "run in \"Unattended Mode\" where Tailscale keeps running even after the current GUI user logs out (Windows-only)") From 6ac4356bce25daf4f9e7da9612a95607666d3c5f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 18 Nov 2025 18:15:08 -0800 Subject: [PATCH 0726/1093] util/eventbus: simplify some reflect in Bus.pump Updates #cleanup Change-Id: Ib7b497e22c6cdd80578c69cf728d45754e6f909e Signed-off-by: Brad Fitzpatrick --- util/eventbus/bus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 46fa5b1988334..aa6880d01614e 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -134,7 +134,7 @@ func (b *Bus) pump(ctx context.Context) { // queue space for it. for !vals.Empty() { val := vals.Peek() - dests := b.dest(reflect.ValueOf(val.Event).Type()) + dests := b.dest(reflect.TypeOf(val.Event)) if b.routeDebug.active() { clients := make([]*Client, len(dests)) From 976bf24f5e9e2e3a1ea93598f0a20e7820a94f11 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 19 Nov 2025 13:57:14 +0000 Subject: [PATCH 0727/1093] ipn/ipnlocal: remove the always-true CanSupportNetworkLock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we support using an in-memory backend for TKA state (#17946), this function always returns `nil` – we can always support Network Lock. We don't need it any more. Plus, clean up a couple of errant TODOs from that PR. Updates tailscale/corp#33599 Change-Id: Ief93bb9adebb82b9ad1b3e406d1ae9d2fa234877 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 8c77cd92dcf98..78d4d236d5007 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -300,10 +300,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - isEnabled := b.tka != nil wantEnabled := nm.TKAEnabled @@ -488,10 +484,6 @@ func (b *LocalBackend) chonkPathLocked() string { // // b.mu must be held. func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, persist persist.PersistView) error { - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - var genesis tka.AUM if err := genesis.Unserialize(g); err != nil { return fmt.Errorf("reading genesis: %v", err) @@ -537,20 +529,6 @@ func (b *LocalBackend) tkaBootstrapFromGenesisLocked(g tkatype.MarshaledAUM, per return nil } -// CanSupportNetworkLock returns nil if tailscaled is able to operate -// a local tailnet key authority (and hence enforce network lock). -func (b *LocalBackend) CanSupportNetworkLock() error { - if b.tka != nil { - // If the TKA is being used, it is supported. - return nil - } - - // There's a var root (aka --statedir), so if network lock gets - // initialized we have somewhere to store our AUMs. That's all - // we need. - return nil -} - // NetworkLockStatus returns a structure describing the state of the // tailnet key authority, if any. func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { @@ -664,12 +642,7 @@ func tkaStateFromPeer(p tailcfg.NodeView) ipnstate.TKAPeer { // needing signatures is returned as a response. // The Finish RPC submits signatures for all these nodes, at which point // Control has everything it needs to atomically enable network lock. -// TODO(alexc): Only with persistent backend func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) error { - if err := b.CanSupportNetworkLock(); err != nil { - return err - } - var ourNodeKey key.NodePublic var nlPriv key.NLPrivate @@ -794,7 +767,6 @@ func (b *LocalBackend) NetworkLockForceLocalDisable() error { // NetworkLockSign signs the given node-key and submits it to the control plane. // rotationPublic, if specified, must be an ed25519 public key. -// TODO(alexc): in-memory only func (b *LocalBackend) NetworkLockSign(nodeKey key.NodePublic, rotationPublic []byte) error { ourNodeKey, sig, err := func(nodeKey key.NodePublic, rotationPublic []byte) (key.NodePublic, tka.NodeKeySignature, error) { b.mu.Lock() From 12c598de285f9fc719061892a59960c7ec5cf820 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 17 Nov 2025 15:02:51 +0000 Subject: [PATCH 0728/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/tailscale.md | 1 + 1 file changed, 1 insertion(+) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index c04e555637d2d..163a76d404202 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -69,6 +69,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) From a0d059d74c3d3e7274d224a4cb91f7348b3faa53 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 19 Nov 2025 12:29:08 -0500 Subject: [PATCH 0729/1093] cmd/tailscale/cli: allow remote target as service destination (#17607) This commit enables user to set service backend to remote destinations, that can be a partial URL or a full URL. The commit also prevents user to set remote destinations on linux system when socket mark is not working. For user on any version of mac extension they can't serve a service either. The socket mark usability is determined by a new local api. Fixes tailscale/corp#24783 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- client/local/local.go | 17 +++++ cmd/tailscale/cli/serve_legacy.go | 1 + cmd/tailscale/cli/serve_legacy_test.go | 5 ++ cmd/tailscale/cli/serve_v2.go | 100 ++++++++++++++++++++++--- cmd/tailscale/cli/serve_v2_test.go | 43 ++++++++--- ipn/localapi/localapi.go | 47 ++++++++---- ipn/serve.go | 38 ++++++++-- ipn/serve_test.go | 6 +- net/netns/netns_default.go | 4 + net/netns/netns_dw.go | 4 + 10 files changed, 221 insertions(+), 44 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index a5e04f122ca54..72ddbb55f773a 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1401,6 +1401,23 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti return decodeJSON[apitype.ExitNodeSuggestionResponse](body) } +// CheckSOMarkInUse reports whether the socket mark option is in use. This will only +// be true if tailscale is running on Linux and tailscaled uses SO_MARK. +func (lc *Client) CheckSOMarkInUse(ctx context.Context) (bool, error) { + body, err := lc.get200(ctx, "/localapi/v0/check-so-mark-in-use") + if err != nil { + return false, err + } + var res struct { + UseSOMark bool `json:"useSoMark"` + } + + if err := json.Unmarshal(body, &res); err != nil { + return false, fmt.Errorf("invalid JSON from check-so-mark-in-use: %w", err) + } + return res.UseSOMark, nil +} + // ShutdownTailscaled requests a graceful shutdown of tailscaled. func (lc *Client) ShutdownTailscaled(ctx context.Context) error { _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 171ec335c008b..580393ce489b1 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -149,6 +149,7 @@ type localServeClient interface { IncrementCounter(ctx context.Context, name string, delta int) error GetPrefs(ctx context.Context) (*ipn.Prefs, error) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) + CheckSOMarkInUse(ctx context.Context) (bool, error) } // serveEnv is the environment the serve command runs within. All I/O should be diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 1d3854b0b0f74..819017ad81bb5 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -860,6 +860,7 @@ type fakeLocalServeClient struct { setCount int // counts calls to SetServeConfig queryFeatureResponse *mockQueryFeatureResponse // mock response to QueryFeature calls prefs *ipn.Prefs // fake preferences, used to test GetPrefs and SetPrefs + SOMarkInUse bool // fake SO mark in use status statusWithoutPeers *ipnstate.Status // nil for fakeStatus } @@ -937,6 +938,10 @@ func (lc *fakeLocalServeClient) IncrementCounter(ctx context.Context, name strin return nil // unused in tests } +func (lc *fakeLocalServeClient) CheckSOMarkInUse(ctx context.Context) (bool, error) { + return lc.SOMarkInUse, nil +} + // exactError returns an error checker that wants exactly the provided want error. // If optName is non-empty, it's used in the error message. func exactErr(want error, optName ...string) func(error) string { diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 33b676bf86a1f..b60e645f345ed 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -21,6 +21,7 @@ import ( "path" "path/filepath" "regexp" + "runtime" "slices" "sort" "strconv" @@ -33,6 +34,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" "tailscale.com/util/prompt" "tailscale.com/util/set" @@ -516,6 +518,9 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { if len(args) > 0 { target = args[0] } + if err := e.shouldWarnRemoteDestCompatibility(ctx, target); err != nil { + return err + } err = e.setServe(sc, dnsName, srvType, srvPort, mount, target, funnel, magicDNSSuffix, e.acceptAppCaps, int(e.proxyProtocol)) msg = e.messageForPort(sc, st, dnsName, srvType, srvPort) } @@ -999,16 +1004,17 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy } var ( - msgFunnelAvailable = "Available on the internet:" - msgServeAvailable = "Available within your tailnet:" - msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" - msgRunningInBackground = "%s started and running in the background." - msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." - msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" - msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" - msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" - msgDisableService = "To remove config for the service, run: tailscale serve clear %s" - msgToExit = "Press Ctrl+C to exit." + msgFunnelAvailable = "Available on the internet:" + msgServeAvailable = "Available within your tailnet:" + msgServiceWaitingApproval = "This machine is configured as a service proxy for %s, but approval from an admin is required. Once approved, it will be available in your Tailnet as:" + msgRunningInBackground = "%s started and running in the background." + msgRunningTunService = "IPv4 and IPv6 traffic to %s is being routed to your operating system." + msgDisableProxy = "To disable the proxy, run: tailscale %s --%s=%d off" + msgDisableServiceProxy = "To disable the proxy, run: tailscale serve --service=%s --%s=%d off" + msgDisableServiceTun = "To disable the service in TUN mode, run: tailscale serve --service=%s --tun off" + msgDisableService = "To remove config for the service, run: tailscale serve clear %s" + msgWarnRemoteDestCompatibility = "Warning: %s doesn't support connecting to remote destinations from non-default route, see tailscale.com/kb/1552/tailscale-services for detail." + msgToExit = "Press Ctrl+C to exit." ) // messageForPort returns a message for the given port based on the @@ -1134,6 +1140,77 @@ func (e *serveEnv) messageForPort(sc *ipn.ServeConfig, st *ipnstate.Status, dnsN return output.String() } +// isRemote reports whether the given destination from serve config +// is a remote destination. +func isRemote(target string) bool { + // target being a port number means it's localhost + if _, err := strconv.ParseUint(target, 10, 16); err == nil { + return false + } + + // prepend tmp:// if no scheme is present just to help parsing + if !strings.Contains(target, "://") { + target = "tmp://" + target + } + + // make sure we can parse the target, wether it's a full URL or just a host:port + u, err := url.ParseRequestURI(target) + if err != nil { + // If we can't parse the target, it doesn't matter if it's remote or not + return false + } + validHN := dnsname.ValidHostname(u.Hostname()) == nil + validIP := net.ParseIP(u.Hostname()) != nil + if !validHN && !validIP { + return false + } + if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" { + return false + } + return true +} + +// shouldWarnRemoteDestCompatibility reports whether we should warn the user +// that their current OS/environment may not be compatible with +// service's proxy destination. +func (e *serveEnv) shouldWarnRemoteDestCompatibility(ctx context.Context, target string) error { + // no target means nothing to check + if target == "" { + return nil + } + + if filepath.IsAbs(target) || strings.HasPrefix(target, "text:") { + // local path or text target, nothing to check + return nil + } + + // only check for remote destinations + if !isRemote(target) { + return nil + } + + // Check if running as Mac extension and warn + if version.IsMacAppStore() || version.IsMacSysExt() { + return fmt.Errorf(msgWarnRemoteDestCompatibility, "the MacOS extension") + } + + // Check for linux, if it's running with TS_FORCE_LINUX_BIND_TO_DEVICE=true + // and tailscale bypass mark is not working. If any of these conditions are true, and the dest is + // a remote destination, return true. + if runtime.GOOS == "linux" { + SOMarkInUse, err := e.lc.CheckSOMarkInUse(ctx) + if err != nil { + log.Printf("error checking SO mark in use: %v", err) + return nil + } + if !SOMarkInUse { + return fmt.Errorf(msgWarnRemoteDestCompatibility, "the Linux tailscaled without SO_MARK") + } + } + + return nil +} + func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort uint16, useTLS bool, mount, target, mds string, caps []tailcfg.PeerCapability) error { h := new(ipn.HTTPHandler) switch { @@ -1193,6 +1270,8 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("invalid TCP target %q", target) } + svcName := tailcfg.AsServiceName(dnsName) + targetURL, err := ipn.ExpandProxyTargetValue(target, []string{"tcp"}, "tcp") if err != nil { return fmt.Errorf("unable to expand target: %v", err) @@ -1204,7 +1283,6 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se } // TODO: needs to account for multiple configs from foreground mode - svcName := tailcfg.AsServiceName(dnsName) if sc.IsServingWeb(srcPort, svcName) { return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 5cdb395587031..491baf9dd3ae8 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -220,10 +220,20 @@ func TestServeDevConfigMutations(t *testing.T) { }}, }, { - name: "invalid_host", + name: "ip_host", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ - command: cmd("serve --https=443 --bg http://somehost:3000"), // invalid host - wantErr: anyErr(), + command: cmd("serve --https=443 --bg http://192.168.1.1:3000"), + want: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://192.168.1.1:3000"}, + }}, + }, + }, }}, }, { @@ -233,6 +243,16 @@ func TestServeDevConfigMutations(t *testing.T) { wantErr: anyErr(), }}, }, + { + name: "no_scheme_remote_host_tcp", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, + steps: []step{{ + command: cmd("serve --https=443 --bg 192.168.1.1:3000"), + wantErr: exactErrMsg(errHelp), + }}, + }, { name: "turn_off_https", steps: []step{ @@ -402,15 +422,11 @@ func TestServeDevConfigMutations(t *testing.T) { }, }}, }, - { - name: "unknown_host_tcp", - steps: []step{{ - command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:5432"), - wantErr: exactErrMsg(errHelp), - }}, - }, { name: "tcp_port_too_low", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:0"), wantErr: exactErrMsg(errHelp), @@ -418,6 +434,9 @@ func TestServeDevConfigMutations(t *testing.T) { }, { name: "tcp_port_too_high", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --tls-terminated-tcp=443 --bg tcp://somehost:65536"), wantErr: exactErrMsg(errHelp), @@ -532,6 +551,9 @@ func TestServeDevConfigMutations(t *testing.T) { }, { name: "bad_path", + initialState: fakeLocalServeClient{ + SOMarkInUse: true, + }, steps: []step{{ command: cmd("serve --bg --https=443 bad/path"), wantErr: exactErrMsg(errHelp), @@ -832,6 +854,7 @@ func TestServeDevConfigMutations(t *testing.T) { }, CurrentTailnet: &ipnstate.TailnetStatus{MagicDNSSuffix: "test.ts.net"}, }, + SOMarkInUse: true, }, steps: []step{{ command: cmd("serve --service=svc:foo --http=80 text:foo"), diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index c4ba2a40bd000..d3503d3024e96 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -35,6 +35,7 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" + "tailscale.com/net/netns" "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -72,20 +73,21 @@ var handler = map[string]LocalAPIHandler{ // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "check-prefs": (*Handler).serveCheckPrefs, - "derpmap": (*Handler).serveDERPMap, - "goroutines": (*Handler).serveGoroutines, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "ping": (*Handler).servePing, - "prefs": (*Handler).servePrefs, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "shutdown": (*Handler).serveShutdown, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "whois": (*Handler).serveWhoIs, + "check-prefs": (*Handler).serveCheckPrefs, + "check-so-mark-in-use": (*Handler).serveCheckSOMarkInUse, + "derpmap": (*Handler).serveDERPMap, + "goroutines": (*Handler).serveGoroutines, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "ping": (*Handler).servePing, + "prefs": (*Handler).servePrefs, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "shutdown": (*Handler).serveShutdown, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "whois": (*Handler).serveWhoIs, } func init() { @@ -760,6 +762,23 @@ func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) }) } +// serveCheckSOMarkInUse reports whether SO_MARK is in use on the linux while +// running without TUN. For any other OS, it reports false. +func (h *Handler) serveCheckSOMarkInUse(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "SO_MARK check access denied", http.StatusForbidden) + return + } + usingSOMark := netns.UseSocketMark() + usingUserspaceNetworking := h.b.Sys().IsNetstack() + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(struct { + UseSOMark bool + }{ + UseSOMark: usingSOMark || usingUserspaceNetworking, + }) +} + func (h *Handler) serveCheckReversePathFiltering(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "reverse path filtering check access denied", http.StatusForbidden) diff --git a/ipn/serve.go b/ipn/serve.go index 1aab829feeec7..74195191c727d 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -17,6 +17,7 @@ import ( "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" "tailscale.com/util/set" ) @@ -673,7 +674,8 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { // ExpandProxyTargetValue expands the supported target values to be proxied // allowing for input values to be a port number, a partial URL, or a full URL -// including a path. +// including a path. If it's for a service, remote addresses are allowed and +// there doesn't have to be a port specified. // // examples: // - 3000 @@ -683,17 +685,25 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { // - https://localhost:3000 // - https-insecure://localhost:3000 // - https-insecure://localhost:3000/foo +// - https://tailscale.com func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultScheme string) (string, error) { const host = "127.0.0.1" + // empty target is invalid + if target == "" { + return "", fmt.Errorf("empty target") + } + // support target being a port number if port, err := strconv.ParseUint(target, 10, 16); err == nil { return fmt.Sprintf("%s://%s:%d", defaultScheme, host, port), nil } + hasScheme := true // prepend scheme if not present if !strings.Contains(target, "://") { target = defaultScheme + "://" + target + hasScheme = false } // make sure we can parse the target @@ -707,16 +717,28 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch return "", fmt.Errorf("must be a URL starting with one of the supported schemes: %v", supportedSchemes) } - // validate the host. - switch u.Hostname() { - case "localhost", "127.0.0.1": - default: - return "", errors.New("only localhost or 127.0.0.1 proxies are currently supported") + // validate port according to host. + if u.Hostname() == "localhost" || u.Hostname() == "127.0.0.1" || u.Hostname() == "::1" { + // require port for localhost targets + if u.Port() == "" { + return "", fmt.Errorf("port required for localhost target %q", target) + } + } else { + validHN := dnsname.ValidHostname(u.Hostname()) == nil + validIP := net.ParseIP(u.Hostname()) != nil + if !validHN && !validIP { + return "", fmt.Errorf("invalid hostname or IP address %q", u.Hostname()) + } + // require scheme for non-localhost targets + if !hasScheme { + return "", fmt.Errorf("non-localhost target %q must include a scheme", target) + } } - - // validate the port port, err := strconv.ParseUint(u.Port(), 10, 16) if err != nil || port == 0 { + if u.Port() == "" { + return u.String(), nil // allow no port for remote destinations + } return "", fmt.Errorf("invalid port %q", u.Port()) } diff --git a/ipn/serve_test.go b/ipn/serve_test.go index 7028c1e17cd71..063ff3a87a744 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -260,12 +260,16 @@ func TestExpandProxyTargetDev(t *testing.T) { {name: "https+insecure-scheme", input: "https+insecure://localhost:8080", expected: "https+insecure://localhost:8080"}, {name: "change-default-scheme", input: "localhost:8080", defaultScheme: "https", expected: "https://localhost:8080"}, {name: "change-supported-schemes", input: "localhost:8080", defaultScheme: "tcp", supportedSchemes: []string{"tcp"}, expected: "tcp://localhost:8080"}, + {name: "remote-target", input: "https://example.com:8080", expected: "https://example.com:8080"}, + {name: "remote-IP-target", input: "http://120.133.20.2:8080", expected: "http://120.133.20.2:8080"}, + {name: "remote-target-no-port", input: "https://example.com", expected: "https://example.com"}, // errors {name: "invalid-port", input: "localhost:9999999", wantErr: true}, + {name: "invalid-hostname", input: "192.168.1:8080", wantErr: true}, {name: "unsupported-scheme", input: "ftp://localhost:8080", expected: "", wantErr: true}, - {name: "not-localhost", input: "https://tailscale.com:8080", expected: "", wantErr: true}, {name: "empty-input", input: "", expected: "", wantErr: true}, + {name: "localhost-no-port", input: "localhost", expected: "", wantErr: true}, } for _, tt := range tests { diff --git a/net/netns/netns_default.go b/net/netns/netns_default.go index 94f24d8fa4e19..58c5936640e4f 100644 --- a/net/netns/netns_default.go +++ b/net/netns/netns_default.go @@ -20,3 +20,7 @@ func control(logger.Logf, *netmon.Monitor) func(network, address string, c sysca func controlC(network, address string, c syscall.RawConn) error { return nil } + +func UseSocketMark() bool { + return false +} diff --git a/net/netns/netns_dw.go b/net/netns/netns_dw.go index f92ba9462c32a..b9f750e8a6657 100644 --- a/net/netns/netns_dw.go +++ b/net/netns/netns_dw.go @@ -25,3 +25,7 @@ func parseAddress(address string) (addr netip.Addr, err error) { return netip.ParseAddr(host) } + +func UseSocketMark() bool { + return false +} From 86a849860e7a407977226359ab5e211bb0b52b34 Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 20 Nov 2025 00:00:27 +0000 Subject: [PATCH 0730/1093] cmd/k8s-operator: use stable image for k8s-nameserver (#17985) This commit modifies the kubernetes operator to use the "stable" version of `k8s-nameserver` by default. Updates: https://github.com/tailscale/corp/issues/19028 Signed-off-by: David Bond --- cmd/k8s-operator/nameserver.go | 6 ++---- cmd/k8s-operator/nameserver_test.go | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 5de1c47ba2b7e..39db5f0f9cf16 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/yaml" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -45,10 +46,7 @@ const ( messageMultipleDNSConfigsPresent = "Multiple DNSConfig resources found in cluster. Please ensure no more than one is present." defaultNameserverImageRepo = "tailscale/k8s-nameserver" - // TODO (irbekrm): once we start publishing nameserver images for stable - // track, replace 'unstable' here with the version of this operator - // instance. - defaultNameserverImageTag = "unstable" + defaultNameserverImageTag = "stable" ) // NameserverReconciler knows how to create nameserver resources in cluster in diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 6da52d8a21490..858cd973d82c2 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -19,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/yaml" + operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" @@ -182,7 +183,7 @@ func TestNameserverReconciler(t *testing.T) { dnsCfg.Spec.Nameserver.Image = nil }) expectReconciled(t, reconciler, "", "test") - wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:unstable" + wantsDeploy.Spec.Template.Spec.Containers[0].Image = "tailscale/k8s-nameserver:stable" expectEqual(t, fc, wantsDeploy) }) } From 7d19813618e862d0a00cc66b600b470275c5b0bc Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 19 Nov 2025 14:53:43 -0800 Subject: [PATCH 0731/1093] net/batching: fix import formatting From #17842 Updates #cleanup Change-Id: Ie041b50659361b50558d5ec1f557688d09935f7c Signed-off-by: Brad Fitzpatrick --- net/batching/conn_linux_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index 5e3c29e5ce37b..c2cc463ebc6ad 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "net" "testing" - "unsafe" "github.com/tailscale/wireguard-go/conn" From 682172ca2d39163b2bbfbc4b1422b1dfa8d453f0 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 19 Nov 2025 11:59:51 -0500 Subject: [PATCH 0732/1093] net/netns: remove spammy logs for interface binding caps fixes tailscale/tailscale#17990 The logging for the netns caps is spammy. Log only on changes to the values and don't log Darwin specific stuff on non Darwin clients. Signed-off-by: Jonathan Nobels --- net/netns/netns.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/net/netns/netns.go b/net/netns/netns.go index ccb20d27ed890..81ab5e2a212a6 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -17,6 +17,7 @@ import ( "context" "net" "net/netip" + "runtime" "sync/atomic" "tailscale.com/net/netknob" @@ -40,8 +41,9 @@ var bindToInterfaceByRoute atomic.Bool // // Currently, this only changes the behaviour on macOS and Windows. func SetBindToInterfaceByRoute(logf logger.Logf, v bool) { - logf("netns: bindToInterfaceByRoute to %v", v) - bindToInterfaceByRoute.Store(v) + if bindToInterfaceByRoute.Swap(v) != v { + logf("netns: bindToInterfaceByRoute changed to %v", v) + } } var disableBindConnToInterface atomic.Bool @@ -54,8 +56,9 @@ var disableBindConnToInterface atomic.Bool // SetDisableBindConnToInterfaceAppleExt which will disable explicit interface // binding only when tailscaled is running inside a network extension process. func SetDisableBindConnToInterface(logf logger.Logf, v bool) { - logf("netns: disableBindConnToInterface set to %v", v) - disableBindConnToInterface.Store(v) + if disableBindConnToInterface.Swap(v) != v { + logf("netns: disableBindConnToInterface changed to %v", v) + } } var disableBindConnToInterfaceAppleExt atomic.Bool @@ -64,8 +67,9 @@ var disableBindConnToInterfaceAppleExt atomic.Bool // connections to the default network interface but only on Apple clients where // tailscaled is running inside a network extension. func SetDisableBindConnToInterfaceAppleExt(logf logger.Logf, v bool) { - logf("netns: disableBindConnToInterfaceAppleExt set to %v", v) - disableBindConnToInterfaceAppleExt.Store(v) + if runtime.GOOS == "darwin" && disableBindConnToInterfaceAppleExt.Swap(v) != v { + logf("netns: disableBindConnToInterfaceAppleExt changed to %v", v) + } } // Listener returns a new net.Listener with its Control hook func From 42a52620168f9171c378b06eecb3c8d262f85e2e Mon Sep 17 00:00:00 2001 From: David Bond Date: Thu, 20 Nov 2025 11:46:34 +0000 Subject: [PATCH 0733/1093] cmd/k8s-operator: add multi replica support for recorders (#17864) This commit adds the `spec.replicas` field to the `Recorder` custom resource that allows for a highly available deployment of `tsrecorder` within a kubernetes cluster. Many changes were required here as the code hard-coded the assumption of a single replica. This has required a few loops, similar to what we do for the `Connector` resource to create auth and state secrets. It was also required to add a check to remove dangling state and auth secrets should the recorder be scaled down. Updates: https://github.com/tailscale/tailscale/issues/17965 Signed-off-by: David Bond --- .../deploy/crds/tailscale.com_recorders.yaml | 8 + .../deploy/manifests/operator.yaml | 8 + cmd/k8s-operator/operator.go | 2 +- cmd/k8s-operator/tsrecorder.go | 296 ++++++++++++------ cmd/k8s-operator/tsrecorder_specs.go | 95 ++++-- cmd/k8s-operator/tsrecorder_specs_test.go | 20 +- cmd/k8s-operator/tsrecorder_test.go | 89 ++++-- k8s-operator/api.md | 3 +- k8s-operator/apis/v1alpha1/types_recorder.go | 7 + .../apis/v1alpha1/zz_generated.deepcopy.go | 5 + 10 files changed, 381 insertions(+), 152 deletions(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 0f3dcfcca52c8..48db3ef4bd84d 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -68,6 +68,11 @@ spec: Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. Required if S3 storage is not set up, to ensure that recordings are accessible. type: boolean + replicas: + description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1. + type: integer + format: int32 + minimum: 0 statefulSet: description: |- Configuration parameters for the Recorder's StatefulSet. The operator @@ -1683,6 +1688,9 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + x-kubernetes-validations: + - rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))' + message: S3 storage must be used when deploying multiple Recorder replicas status: description: |- RecorderStatus describes the status of the recorder. This is set diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index c5da367e099a6..2757f09e5f36b 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -3348,6 +3348,11 @@ spec: Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node. Required if S3 storage is not set up, to ensure that recordings are accessible. type: boolean + replicas: + description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1. + format: int32 + minimum: 0 + type: integer statefulSet: description: |- Configuration parameters for the Recorder's StatefulSet. The operator @@ -4964,6 +4969,9 @@ spec: type: string type: array type: object + x-kubernetes-validations: + - message: S3 storage must be used when deploying multiple Recorder replicas + rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))' status: description: |- RecorderStatus describes the status of the recorder. This is set diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 6b545a8273567..816fea5664557 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -44,10 +44,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "tailscale.com/envknob" "tailscale.com/client/local" "tailscale.com/client/tailscale" + "tailscale.com/envknob" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store/kubestore" diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index c922f78feff38..bfb01fa86de67 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -12,6 +12,7 @@ import ( "fmt" "net/http" "slices" + "strconv" "strings" "sync" @@ -29,6 +30,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -69,13 +71,13 @@ func (r *RecorderReconciler) logger(name string) *zap.SugaredLogger { return r.log.With("Recorder", name) } -func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) { +func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { logger := r.logger(req.Name) logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") tsr := new(tsapi.Recorder) - err = r.Get(ctx, req.NamespacedName, tsr) + err := r.Get(ctx, req.NamespacedName, tsr) if apierrors.IsNotFound(err) { logger.Debugf("Recorder not found, assuming it was deleted") return reconcile.Result{}, nil @@ -98,7 +100,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques } tsr.Finalizers = slices.Delete(tsr.Finalizers, ix, ix+1) - if err := r.Update(ctx, tsr); err != nil { + if err = r.Update(ctx, tsr); err != nil { return reconcile.Result{}, err } return reconcile.Result{}, nil @@ -110,10 +112,11 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { // An error encountered here should get returned by the Reconcile function. if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { - err = errors.Join(err, updateErr) + return reconcile.Result{}, errors.Join(err, updateErr) } } - return reconcile.Result{}, err + + return reconcile.Result{}, nil } if !slices.Contains(tsr.Finalizers, FinalizerName) { @@ -123,12 +126,12 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques // operation is underway. logger.Infof("ensuring Recorder is set up") tsr.Finalizers = append(tsr.Finalizers, FinalizerName) - if err := r.Update(ctx, tsr); err != nil { + if err = r.Update(ctx, tsr); err != nil { return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderCreationFailed, reasonRecorderCreationFailed) } } - if err := r.validate(ctx, tsr); err != nil { + if err = r.validate(ctx, tsr); err != nil { message := fmt.Sprintf("Recorder is invalid: %s", err) r.recorder.Eventf(tsr, corev1.EventTypeWarning, reasonRecorderInvalid, message) return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) @@ -160,19 +163,29 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco gaugeRecorderResources.Set(int64(r.recorders.Len())) r.mu.Unlock() - if err := r.ensureAuthSecretCreated(ctx, tsr); err != nil { + if err := r.ensureAuthSecretsCreated(ctx, tsr); err != nil { return fmt.Errorf("error creating secrets: %w", err) } - // State Secret is precreated so we can use the Recorder CR as its owner ref. - sec := tsrStateSecret(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { - s.ObjectMeta.Labels = sec.ObjectMeta.Labels - s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations - }); err != nil { - return fmt.Errorf("error creating state Secret: %w", err) + + // State Secrets are pre-created so we can use the Recorder CR as its owner ref. + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + for replica := range replicas { + sec := tsrStateSecret(tsr, r.tsNamespace, replica) + _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, sec, func(s *corev1.Secret) { + s.ObjectMeta.Labels = sec.ObjectMeta.Labels + s.ObjectMeta.Annotations = sec.ObjectMeta.Annotations + }) + if err != nil { + return fmt.Errorf("error creating state Secret %q: %w", sec.Name, err) + } } + sa := tsrServiceAccount(tsr, r.tsNamespace) - if _, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error { + _, err := createOrMaybeUpdate(ctx, r.Client, r.tsNamespace, sa, func(s *corev1.ServiceAccount) error { // Perform this check within the update function to make sure we don't // have a race condition between the previous check and the update. if err := saOwnedByRecorder(s, tsr); err != nil { @@ -183,54 +196,68 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco s.ObjectMeta.Annotations = sa.ObjectMeta.Annotations return nil - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating ServiceAccount: %w", err) } + role := tsrRole(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, role, func(r *rbacv1.Role) { r.ObjectMeta.Labels = role.ObjectMeta.Labels r.ObjectMeta.Annotations = role.ObjectMeta.Annotations r.Rules = role.Rules - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating Role: %w", err) } + roleBinding := tsrRoleBinding(tsr, r.tsNamespace) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, roleBinding, func(r *rbacv1.RoleBinding) { r.ObjectMeta.Labels = roleBinding.ObjectMeta.Labels r.ObjectMeta.Annotations = roleBinding.ObjectMeta.Annotations r.RoleRef = roleBinding.RoleRef r.Subjects = roleBinding.Subjects - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating RoleBinding: %w", err) } + ss := tsrStatefulSet(tsr, r.tsNamespace, r.loginServer) - if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { + _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations s.Spec = ss.Spec - }); err != nil { + }) + if err != nil { return fmt.Errorf("error creating StatefulSet: %w", err) } // ServiceAccount name may have changed, in which case we need to clean up // the previous ServiceAccount. RoleBinding will already be updated to point // to the new ServiceAccount. - if err := r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil { + if err = r.maybeCleanupServiceAccounts(ctx, tsr, sa.Name); err != nil { return fmt.Errorf("error cleaning up ServiceAccounts: %w", err) } + // If we have scaled the recorder down, we will have dangling state secrets + // that we need to clean up. + if err = r.maybeCleanupSecrets(ctx, tsr); err != nil { + return fmt.Errorf("error cleaning up Secrets: %w", err) + } + var devices []tsapi.RecorderTailnetDevice + for replica := range replicas { + dev, ok, err := r.getDeviceInfo(ctx, tsr.Name, replica) + switch { + case err != nil: + return fmt.Errorf("failed to get device info: %w", err) + case !ok: + logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth") + continue + } - device, ok, err := r.getDeviceInfo(ctx, tsr.Name) - if err != nil { - return fmt.Errorf("failed to get device info: %w", err) + devices = append(devices, dev) } - if !ok { - logger.Debugf("no Tailscale hostname known yet, waiting for Recorder pod to finish auth") - return nil - } - - devices = append(devices, device) tsr.Status.Devices = devices @@ -257,22 +284,89 @@ func saOwnedByRecorder(sa *corev1.ServiceAccount, tsr *tsapi.Recorder) error { func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, tsr *tsapi.Recorder, currentName string) error { logger := r.logger(tsr.Name) - // List all ServiceAccounts owned by this Recorder. + options := []client.ListOption{ + client.InNamespace(r.tsNamespace), + client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)), + } + sas := &corev1.ServiceAccountList{} - if err := r.List(ctx, sas, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels("recorder", tsr.Name, nil))); err != nil { + if err := r.List(ctx, sas, options...); err != nil { return fmt.Errorf("error listing ServiceAccounts for cleanup: %w", err) } - for _, sa := range sas.Items { - if sa.Name == currentName { + + for _, serviceAccount := range sas.Items { + if serviceAccount.Name == currentName { + continue + } + + err := r.Delete(ctx, &serviceAccount) + switch { + case apierrors.IsNotFound(err): + logger.Debugf("ServiceAccount %s not found, likely already deleted", serviceAccount.Name) + continue + case err != nil: + return fmt.Errorf("error deleting ServiceAccount %s: %w", serviceAccount.Name, err) + } + } + + return nil +} + +func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsr *tsapi.Recorder) error { + options := []client.ListOption{ + client.InNamespace(r.tsNamespace), + client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)), + } + + secrets := &corev1.SecretList{} + if err := r.List(ctx, secrets, options...); err != nil { + return fmt.Errorf("error listing Secrets for cleanup: %w", err) + } + + // Get the largest ordinal suffix that we expect. Then we'll go through the list of secrets owned by this + // recorder and remove them. + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + for _, secret := range secrets.Items { + parts := strings.Split(secret.Name, "-") + if len(parts) == 0 { + continue + } + + ordinal, err := strconv.ParseUint(parts[len(parts)-1], 10, 32) + if err != nil { + return fmt.Errorf("error parsing secret name %q: %w", secret.Name, err) + } + + if int32(ordinal) < replicas { continue } - if err := r.Delete(ctx, &sa); err != nil { - if apierrors.IsNotFound(err) { - logger.Debugf("ServiceAccount %s not found, likely already deleted", sa.Name) - } else { - return fmt.Errorf("error deleting ServiceAccount %s: %w", sa.Name, err) + + devicePrefs, ok, err := getDevicePrefs(&secret) + if err != nil { + return err + } + + if ok { + var errResp *tailscale.ErrResponse + + r.log.Debugf("deleting device %s", devicePrefs.Config.NodeID) + err = r.tsClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID)) + switch { + case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: + // This device has possibly already been deleted in the admin console. So we can ignore this + // and move on to removing the secret. + case err != nil: + return err } } + + if err = r.Delete(ctx, &secret); err != nil { + return err + } } return nil @@ -284,30 +378,38 @@ func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, ts func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { logger := r.logger(tsr.Name) - prefs, ok, err := r.getDevicePrefs(ctx, tsr.Name) - if err != nil { - return false, err + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas } - if !ok { - logger.Debugf("state Secret %s-0 not found or does not contain node ID, continuing cleanup", tsr.Name) - r.mu.Lock() - r.recorders.Remove(tsr.UID) - gaugeRecorderResources.Set(int64(r.recorders.Len())) - r.mu.Unlock() - return true, nil - } - - id := string(prefs.Config.NodeID) - logger.Debugf("deleting device %s from control", string(id)) - if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) - } else { + + for replica := range replicas { + devicePrefs, ok, err := r.getDevicePrefs(ctx, tsr.Name, replica) + if err != nil { + return false, err + } + if !ok { + logger.Debugf("state Secret %s-%d not found or does not contain node ID, continuing cleanup", tsr.Name, replica) + r.mu.Lock() + r.recorders.Remove(tsr.UID) + gaugeRecorderResources.Set(int64(r.recorders.Len())) + r.mu.Unlock() + return true, nil + } + + nodeID := string(devicePrefs.Config.NodeID) + logger.Debugf("deleting device %s from control", nodeID) + if err = r.tsClient.DeleteDevice(ctx, nodeID); err != nil { + errResp := &tailscale.ErrResponse{} + if errors.As(err, errResp) && errResp.Status == http.StatusNotFound { + logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID) + continue + } + return false, fmt.Errorf("error deleting device: %w", err) } - } else { - logger.Debugf("device %s deleted from control", string(id)) + + logger.Debugf("device %s deleted from control", nodeID) } // Unlike most log entries in the reconcile loop, this will get printed @@ -319,38 +421,46 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record r.recorders.Remove(tsr.UID) gaugeRecorderResources.Set(int64(r.recorders.Len())) r.mu.Unlock() + return true, nil } -func (r *RecorderReconciler) ensureAuthSecretCreated(ctx context.Context, tsr *tsapi.Recorder) error { - logger := r.logger(tsr.Name) - key := types.NamespacedName{ - Namespace: r.tsNamespace, - Name: tsr.Name, - } - if err := r.Get(ctx, key, &corev1.Secret{}); err == nil { - // No updates, already created the auth key. - logger.Debugf("auth Secret %s already exists", key.Name) - return nil - } else if !apierrors.IsNotFound(err) { - return err +func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tsr *tsapi.Recorder) error { + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas } - // Create the auth key Secret which is going to be used by the StatefulSet - // to authenticate with Tailscale. - logger.Debugf("creating authkey for new Recorder") tags := tsr.Spec.Tags if len(tags) == 0 { tags = tsapi.Tags{"tag:k8s"} } - authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify()) - if err != nil { - return err - } - logger.Debug("creating a new Secret for the Recorder") - if err := r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey)); err != nil { - return err + logger := r.logger(tsr.Name) + + for replica := range replicas { + key := types.NamespacedName{ + Namespace: r.tsNamespace, + Name: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), + } + + err := r.Get(ctx, key, &corev1.Secret{}) + switch { + case err == nil: + logger.Debugf("auth Secret %q already exists", key.Name) + continue + case !apierrors.IsNotFound(err): + return fmt.Errorf("failed to get Secret %q: %w", key.Name, err) + } + + authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify()) + if err != nil { + return err + } + + if err = r.Create(ctx, tsrAuthSecret(tsr, r.tsNamespace, authKey, replica)); err != nil { + return err + } } return nil @@ -361,6 +471,10 @@ func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder) return errors.New("must either enable UI or use S3 storage to ensure recordings are accessible") } + if tsr.Spec.Replicas != nil && *tsr.Spec.Replicas > 1 && tsr.Spec.Storage.S3 == nil { + return errors.New("must use S3 storage when using multiple replicas to ensure recordings are accessible") + } + // Check any custom ServiceAccount config doesn't conflict with pre-existing // ServiceAccounts. This check is performed once during validation to ensure // errors are raised early, but also again during any Updates to prevent a race. @@ -394,11 +508,11 @@ func (r *RecorderReconciler) validate(ctx context.Context, tsr *tsapi.Recorder) return nil } -func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) (*corev1.Secret, error) { +func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string, replica int32) (*corev1.Secret, error) { secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: r.tsNamespace, - Name: fmt.Sprintf("%s-0", tsrName), + Name: fmt.Sprintf("%s-%d", tsrName, replica), }, } if err := r.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { @@ -412,8 +526,8 @@ func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) return secret, nil } -func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string) (prefs prefs, ok bool, err error) { - secret, err := r.getStateSecret(ctx, tsrName) +func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string, replica int32) (prefs prefs, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName, replica) if err != nil || secret == nil { return prefs, false, err } @@ -441,8 +555,8 @@ func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) { return prefs, ok, nil } -func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) { - secret, err := r.getStateSecret(ctx, tsrName) +func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { + secret, err := r.getStateSecret(ctx, tsrName, replica) if err != nil || secret == nil { return tsapi.RecorderTailnetDevice{}, false, err } diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 83d7439db3f57..b4a10f2962ae9 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -12,30 +12,36 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/types/ptr" "tailscale.com/version" ) func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) *appsv1.StatefulSet { - return &appsv1.StatefulSet{ + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + ss := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels), + Labels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Labels), OwnerReferences: tsrOwnerReference(tsr), Annotations: tsr.Spec.StatefulSet.Annotations, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: ptr.To(replicas), Selector: &metav1.LabelSelector{ - MatchLabels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), + MatchLabels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), + Labels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), Annotations: tsr.Spec.StatefulSet.Pod.Annotations, }, Spec: corev1.PodSpec{ @@ -59,7 +65,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) * ImagePullPolicy: tsr.Spec.StatefulSet.Pod.Container.ImagePullPolicy, Resources: tsr.Spec.StatefulSet.Pod.Container.Resources, SecurityContext: tsr.Spec.StatefulSet.Pod.Container.SecurityContext, - Env: env(tsr, loginServer), + Env: tsrEnv(tsr, loginServer), EnvFrom: func() []corev1.EnvFromSource { if tsr.Spec.Storage.S3 == nil || tsr.Spec.Storage.S3.Credentials.Secret.Name == "" { return nil @@ -95,6 +101,28 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) * }, }, } + + for replica := range replicas { + volumeName := fmt.Sprintf("authkey-%d", replica) + + ss.Spec.Template.Spec.Containers[0].VolumeMounts = append(ss.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ + Name: volumeName, + ReadOnly: true, + MountPath: fmt.Sprintf("/etc/tailscaled/%s-%d", ss.Name, replica), + }) + + ss.Spec.Template.Spec.Volumes = append(ss.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), + Items: []corev1.KeyToPath{{Key: "authkey", Path: "authkey"}}, + }, + }, + }) + } + + return ss } func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAccount { @@ -102,7 +130,7 @@ func tsrServiceAccount(tsr *tsapi.Recorder, namespace string) *corev1.ServiceAcc ObjectMeta: metav1.ObjectMeta{ Name: tsrServiceAccountName(tsr), Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), Annotations: tsr.Spec.StatefulSet.Pod.ServiceAccount.Annotations, }, @@ -120,11 +148,24 @@ func tsrServiceAccountName(tsr *tsapi.Recorder) string { } func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + + resourceNames := make([]string, 0) + for replica := range replicas { + resourceNames = append(resourceNames, + fmt.Sprintf("%s-%d", tsr.Name, replica), // State secret. + fmt.Sprintf("%s-auth-%d", tsr.Name, replica), // Auth key secret. + ) + } + return &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, Rules: []rbacv1.PolicyRule{ @@ -136,10 +177,7 @@ func tsrRole(tsr *tsapi.Recorder, namespace string) *rbacv1.Role { "patch", "update", }, - ResourceNames: []string{ - tsr.Name, // Contains the auth key. - fmt.Sprintf("%s-0", tsr.Name), // Contains the node state. - }, + ResourceNames: resourceNames, }, { APIGroups: []string{""}, @@ -159,7 +197,7 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding { ObjectMeta: metav1.ObjectMeta{ Name: tsr.Name, Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, Subjects: []rbacv1.Subject{ @@ -176,12 +214,12 @@ func tsrRoleBinding(tsr *tsapi.Recorder, namespace string) *rbacv1.RoleBinding { } } -func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev1.Secret { +func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string, replica int32) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: tsr.Name, - Labels: labels("recorder", tsr.Name, nil), + Name: fmt.Sprintf("%s-auth-%d", tsr.Name, replica), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, StringData: map[string]string{ @@ -190,30 +228,19 @@ func tsrAuthSecret(tsr *tsapi.Recorder, namespace string, authKey string) *corev } } -func tsrStateSecret(tsr *tsapi.Recorder, namespace string) *corev1.Secret { +func tsrStateSecret(tsr *tsapi.Recorder, namespace string, replica int32) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-0", tsr.Name), + Name: fmt.Sprintf("%s-%d", tsr.Name, replica), Namespace: namespace, - Labels: labels("recorder", tsr.Name, nil), + Labels: tsrLabels("recorder", tsr.Name, nil), OwnerReferences: tsrOwnerReference(tsr), }, } } -func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { +func tsrEnv(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { envs := []corev1.EnvVar{ - { - Name: "TS_AUTHKEY", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: tsr.Name, - }, - Key: "authkey", - }, - }, - }, { Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{ @@ -231,6 +258,10 @@ func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { }, }, }, + { + Name: "TS_AUTHKEY_FILE", + Value: "/etc/tailscaled/$(POD_NAME)/authkey", + }, { Name: "TS_STATE", Value: "kube:$(POD_NAME)", @@ -280,7 +311,7 @@ func env(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { return envs } -func labels(app, instance string, customLabels map[string]string) map[string]string { +func tsrLabels(app, instance string, customLabels map[string]string) map[string]string { labels := make(map[string]string, len(customLabels)+3) for k, v := range customLabels { labels[k] = v diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go index 49332d09b6a08..0d78129fc76b3 100644 --- a/cmd/k8s-operator/tsrecorder_specs_test.go +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -12,6 +12,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/types/ptr" ) @@ -23,6 +24,7 @@ func TestRecorderSpecs(t *testing.T) { Name: "test", }, Spec: tsapi.RecorderSpec{ + Replicas: ptr.To[int32](3), StatefulSet: tsapi.RecorderStatefulSet{ Labels: map[string]string{ "ss-label-key": "ss-label-value", @@ -101,10 +103,10 @@ func TestRecorderSpecs(t *testing.T) { } // Pod-level. - if diff := cmp.Diff(ss.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" { + if diff := cmp.Diff(ss.Labels, tsrLabels("recorder", "test", tsr.Spec.StatefulSet.Labels)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } - if diff := cmp.Diff(ss.Spec.Template.Labels, labels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" { + if diff := cmp.Diff(ss.Spec.Template.Labels, tsrLabels("recorder", "test", tsr.Spec.StatefulSet.Pod.Labels)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } if diff := cmp.Diff(ss.Spec.Template.Spec.Affinity, tsr.Spec.StatefulSet.Pod.Affinity); diff != "" { @@ -124,7 +126,7 @@ func TestRecorderSpecs(t *testing.T) { } // Container-level. - if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, env(tsr, tsLoginServer)); diff != "" { + if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Env, tsrEnv(tsr, tsLoginServer)); diff != "" { t.Errorf("(-got +want):\n%s", diff) } if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Image, tsr.Spec.StatefulSet.Pod.Container.Image); diff != "" { @@ -139,5 +141,17 @@ func TestRecorderSpecs(t *testing.T) { if diff := cmp.Diff(ss.Spec.Template.Spec.Containers[0].Resources, tsr.Spec.StatefulSet.Pod.Container.Resources); diff != "" { t.Errorf("(-got +want):\n%s", diff) } + + if *ss.Spec.Replicas != *tsr.Spec.Replicas { + t.Errorf("expected %d replicas, got %d", *tsr.Spec.Replicas, *ss.Spec.Replicas) + } + + if len(ss.Spec.Template.Spec.Volumes) != int(*tsr.Spec.Replicas)+1 { + t.Errorf("expected %d volumes, got %d", *tsr.Spec.Replicas+1, len(ss.Spec.Template.Spec.Volumes)) + } + + if len(ss.Spec.Template.Spec.Containers[0].VolumeMounts) != int(*tsr.Spec.Replicas)+1 { + t.Errorf("expected %d volume mounts, got %d", *tsr.Spec.Replicas+1, len(ss.Spec.Template.Spec.Containers[0].VolumeMounts)) + } }) } diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index 184af23447c7c..f7ff797b1ebba 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "fmt" "strings" "testing" @@ -20,9 +21,11 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" + "tailscale.com/types/ptr" ) const ( @@ -36,6 +39,9 @@ func TestRecorder(t *testing.T) { Name: "test", Finalizers: []string{"tailscale.com/finalizer"}, }, + Spec: tsapi.RecorderSpec{ + Replicas: ptr.To[int32](3), + }, } fc := fake.NewClientBuilder(). @@ -80,6 +86,15 @@ func TestRecorder(t *testing.T) { }) expectReconciled(t, reconciler, "", tsr.Name) + expectedEvent = "Warning RecorderInvalid Recorder is invalid: must use S3 storage when using multiple replicas to ensure recordings are accessible" + expectEvents(t, fr, []string{expectedEvent}) + + tsr.Spec.Storage.S3 = &tsapi.S3{} + mustUpdate(t, fc, "", "test", func(t *tsapi.Recorder) { + t.Spec = tsr.Spec + }) + expectReconciled(t, reconciler, "", tsr.Name) + // Only check part of this error message, because it's defined in an // external package and may change. if err := fc.Get(context.Background(), client.ObjectKey{ @@ -180,33 +195,47 @@ func TestRecorder(t *testing.T) { }) t.Run("populate_node_info_in_state_secret_and_see_it_appear_in_status", func(t *testing.T) { - bytes, err := json.Marshal(map[string]any{ - "Config": map[string]any{ - "NodeID": "nodeid-123", - "UserProfile": map[string]any{ - "LoginName": "test-0.example.ts.net", - }, - }, - }) - if err != nil { - t.Fatal(err) - } const key = "profile-abc" - mustUpdate(t, fc, tsNamespace, "test-0", func(s *corev1.Secret) { - s.Data = map[string][]byte{ - currentProfileKey: []byte(key), - key: bytes, + for replica := range *tsr.Spec.Replicas { + bytes, err := json.Marshal(map[string]any{ + "Config": map[string]any{ + "NodeID": fmt.Sprintf("node-%d", replica), + "UserProfile": map[string]any{ + "LoginName": fmt.Sprintf("test-%d.example.ts.net", replica), + }, + }, + }) + if err != nil { + t.Fatal(err) } - }) + + name := fmt.Sprintf("%s-%d", "test", replica) + mustUpdate(t, fc, tsNamespace, name, func(s *corev1.Secret) { + s.Data = map[string][]byte{ + currentProfileKey: []byte(key), + key: bytes, + } + }) + } expectReconciled(t, reconciler, "", tsr.Name) tsr.Status.Devices = []tsapi.RecorderTailnetDevice{ { - Hostname: "hostname-nodeid-123", + Hostname: "hostname-node-0", TailnetIPs: []string{"1.2.3.4", "::1"}, URL: "https://test-0.example.ts.net", }, + { + Hostname: "hostname-node-1", + TailnetIPs: []string{"1.2.3.4", "::1"}, + URL: "https://test-1.example.ts.net", + }, + { + Hostname: "hostname-node-2", + TailnetIPs: []string{"1.2.3.4", "::1"}, + URL: "https://test-2.example.ts.net", + }, } expectEqual(t, fc, tsr) }) @@ -222,7 +251,7 @@ func TestRecorder(t *testing.T) { if expected := 0; reconciler.recorders.Len() != expected { t.Fatalf("expected %d recorders, got %d", expected, reconciler.recorders.Len()) } - if diff := cmp.Diff(tsClient.deleted, []string{"nodeid-123"}); diff != "" { + if diff := cmp.Diff(tsClient.deleted, []string{"node-0", "node-1", "node-2"}); diff != "" { t.Fatalf("unexpected deleted devices (-got +want):\n%s", diff) } // The fake client does not clean up objects whose owner has been @@ -233,26 +262,38 @@ func TestRecorder(t *testing.T) { func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recorder, shouldExist bool) { t.Helper() - auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey") - state := tsrStateSecret(tsr, tsNamespace) + var replicas int32 = 1 + if tsr.Spec.Replicas != nil { + replicas = *tsr.Spec.Replicas + } + role := tsrRole(tsr, tsNamespace) roleBinding := tsrRoleBinding(tsr, tsNamespace) serviceAccount := tsrServiceAccount(tsr, tsNamespace) statefulSet := tsrStatefulSet(tsr, tsNamespace, tsLoginServer) if shouldExist { - expectEqual(t, fc, auth) - expectEqual(t, fc, state) expectEqual(t, fc, role) expectEqual(t, fc, roleBinding) expectEqual(t, fc, serviceAccount) expectEqual(t, fc, statefulSet, removeResourceReqs) } else { - expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name) - expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name) expectMissing[rbacv1.Role](t, fc, role.Namespace, role.Name) expectMissing[rbacv1.RoleBinding](t, fc, roleBinding.Namespace, roleBinding.Name) expectMissing[corev1.ServiceAccount](t, fc, serviceAccount.Namespace, serviceAccount.Name) expectMissing[appsv1.StatefulSet](t, fc, statefulSet.Namespace, statefulSet.Name) } + + for replica := range replicas { + auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey", replica) + state := tsrStateSecret(tsr, tsNamespace, replica) + + if shouldExist { + expectEqual(t, fc, auth) + expectEqual(t, fc, state) + } else { + expectMissing[corev1.Secret](t, fc, auth.Namespace, auth.Name) + expectMissing[corev1.Secret](t, fc, state.Namespace, state.Name) + } + } } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 979d199cb0783..3a4e692d902ec 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -887,7 +887,7 @@ _Appears in:_ - +RecorderSpec describes a tsrecorder instance to be deployed in the cluster @@ -900,6 +900,7 @@ _Appears in:_ | `tags` _[Tags](#tags)_ | Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s].
      If you specify custom tags here, make sure you also make the operator
      an owner of these tags.
      See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
      Tags cannot be changed once a Recorder node has been created.
      Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
      Type: string
      | | `enableUI` _boolean_ | Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.
      The UI will be served at :443. Defaults to false.
      Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
      Required if S3 storage is not set up, to ensure that recordings are accessible. | | | | `storage` _[Storage](#storage)_ | Configure where to store session recordings. By default, recordings will
      be stored in a local ephemeral volume, and will not be persisted past the
      lifetime of a specific pod. | | | +| `replicas` _integer_ | Replicas specifies how many instances of tsrecorder to run. Defaults to 1. | | Minimum: 0
      | #### RecorderStatefulSet diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index 16a610b26d179..67cffbf09e969 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -44,6 +44,8 @@ type RecorderList struct { Items []Recorder `json:"items"` } +// RecorderSpec describes a tsrecorder instance to be deployed in the cluster +// +kubebuilder:validation:XValidation:rule="!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))",message="S3 storage must be used when deploying multiple Recorder replicas" type RecorderSpec struct { // Configuration parameters for the Recorder's StatefulSet. The operator // deploys a StatefulSet for each Recorder resource. @@ -74,6 +76,11 @@ type RecorderSpec struct { // lifetime of a specific pod. // +optional Storage Storage `json:"storage,omitempty"` + + // Replicas specifies how many instances of tsrecorder to run. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitzero"` } type RecorderStatefulSet struct { diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 7492f1e547395..ff0f3f6ace415 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -1068,6 +1068,11 @@ func (in *RecorderSpec) DeepCopyInto(out *RecorderSpec) { copy(*out, *in) } in.Storage.DeepCopyInto(&out.Storage) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecorderSpec. From ac74d28190e73af85fe181b81173ef686331f51c Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Thu, 20 Nov 2025 12:40:05 -0700 Subject: [PATCH 0734/1093] ipn/ipnlocal: add validations when setting serve config (#17950) These validations were previously performed in the CLI frontend. There are two motivations for moving these to the local backend: 1. The backend controls synchronization around the relevant state, so only the backend can guarantee many of these validations. 2. Doing these validations in the back-end avoids the need to repeat them across every frontend (e.g. the CLI and tsnet). Updates tailscale/corp#27200 Signed-off-by: Harry Harpham --- cmd/tailscale/cli/serve_v2.go | 68 ------ cmd/tailscale/cli/serve_v2_test.go | 204 ------------------ ipn/ipnlocal/serve.go | 155 +++++++++++++- ipn/ipnlocal/serve_test.go | 326 ++++++++++++++++++++++++++++- ipn/serve.go | 44 ++-- 5 files changed, 483 insertions(+), 314 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index b60e645f345ed..89d247be9f773 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -478,11 +478,6 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { } wantFg := !e.bg.Value && !turnOff if wantFg { - // validate the config before creating a WatchIPNBus session - if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { - return err - } - // if foreground mode, create a WatchIPNBus session // and use the nested config for all following operations // TODO(marwan-at-work): nested-config validations should happen here or previous to this point. @@ -508,9 +503,6 @@ func (e *serveEnv) runServeCombined(subcmd serveMode) execFunc { // only unset serve when trying to unset with type and port flags. err = e.unsetServe(sc, dnsName, srvType, srvPort, mount, magicDNSSuffix) } else { - if err := e.validateConfig(parentSC, srvPort, srvType, svcName); err != nil { - return err - } if forService { e.addServiceToPrefs(ctx, svcName) } @@ -907,66 +899,6 @@ func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err er return e.lc.SetServeConfig(ctx, sc) } -const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" - -// validateConfig checks if the serve config is valid to serve the type wanted on the port. -// dnsName is a FQDN or a serviceName (with `svc:` prefix). -func (e *serveEnv) validateConfig(sc *ipn.ServeConfig, port uint16, wantServe serveType, svcName tailcfg.ServiceName) error { - var tcpHandlerForPort *ipn.TCPPortHandler - if svcName != noService { - svc := sc.Services[svcName] - if svc == nil { - return nil - } - if wantServe == serveTypeTUN && (svc.TCP != nil || svc.Web != nil) { - return errors.New("service already has a TCP or Web handler, cannot serve in TUN mode") - } - if svc.Tun && wantServe != serveTypeTUN { - return errors.New("service is already being served in TUN mode") - } - if svc.TCP[port] == nil { - return nil - } - tcpHandlerForPort = svc.TCP[port] - } else { - sc, isFg := sc.FindConfig(port) - if sc == nil { - return nil - } - if isFg { - return errors.New("foreground already exists under this port") - } - if !e.bg.Value { - return fmt.Errorf(backgroundExistsMsg, infoMap[e.subcmd].Name, wantServe.String(), port) - } - tcpHandlerForPort = sc.TCP[port] - } - existingServe := serveFromPortHandler(tcpHandlerForPort) - if wantServe != existingServe { - target := svcName - if target == noService { - target = "machine" - } - return fmt.Errorf("want to serve %q but port is already serving %q for %q", wantServe, existingServe, target) - } - return nil -} - -func serveFromPortHandler(tcp *ipn.TCPPortHandler) serveType { - switch { - case tcp.HTTP: - return serveTypeHTTP - case tcp.HTTPS: - return serveTypeHTTPS - case tcp.TerminateTLS != "": - return serveTypeTLSTerminatedTCP - case tcp.TCPForward != "": - return serveTypeTCP - default: - return -1 - } -} - func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveType, srvPort uint16, mount string, target string, allowFunnel bool, mds string, caps []tailcfg.PeerCapability, proxyProtocol int) error { // update serve config based on the type switch srvType { diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 491baf9dd3ae8..513c0d1ec97d4 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -819,26 +819,6 @@ func TestServeDevConfigMutations(t *testing.T) { }, }, }, - { - name: "forground_with_bg_conflict", - steps: []step{ - { - command: cmd("serve --bg --http=3000 localhost:3000"), - want: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{3000: {HTTP: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{ - "foo.test.ts.net:3000": {Handlers: map[string]*ipn.HTTPHandler{ - "/": {Proxy: "http://localhost:3000"}, - }}, - }, - }, - }, - { - command: cmd("serve --http=3000 localhost:3000"), - wantErr: exactErrMsg(fmt.Errorf(backgroundExistsMsg, "serve", "http", 3000)), - }, - }, - }, { name: "advertise_service", initialState: fakeLocalServeClient{ @@ -1067,190 +1047,6 @@ func TestServeDevConfigMutations(t *testing.T) { } } -func TestValidateConfig(t *testing.T) { - tests := [...]struct { - name string - desc string - cfg *ipn.ServeConfig - svc tailcfg.ServiceName - servePort uint16 - serveType serveType - bg bgBoolFlag - wantErr bool - }{ - { - name: "nil_config", - desc: "when config is nil, all requests valid", - cfg: nil, - servePort: 3000, - serveType: serveTypeHTTPS, - }, - { - name: "new_bg_tcp", - desc: "no error when config exists but we're adding a new bg tcp port", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - }, - bg: bgBoolFlag{true, false}, - servePort: 10000, - serveType: serveTypeHTTPS, - }, - { - name: "override_bg_tcp", - desc: "no error when overwriting previous port under the same serve type", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:4545"}, - }, - }, - bg: bgBoolFlag{true, false}, - servePort: 443, - serveType: serveTypeTCP, - }, - { - name: "override_bg_tcp", - desc: "error when overwriting previous port under a different serve type", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - }, - bg: bgBoolFlag{true, false}, - servePort: 443, - serveType: serveTypeHTTP, - wantErr: true, - }, - { - name: "new_fg_port", - desc: "no error when serving a new foreground port", - cfg: &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - Foreground: map[string]*ipn.ServeConfig{ - "abc123": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 3000: {HTTPS: true}, - }, - }, - }, - }, - servePort: 4040, - serveType: serveTypeTCP, - }, - { - name: "same_fg_port", - desc: "error when overwriting a previous fg port", - cfg: &ipn.ServeConfig{ - Foreground: map[string]*ipn.ServeConfig{ - "abc123": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 3000: {HTTPS: true}, - }, - }, - }, - }, - servePort: 3000, - serveType: serveTypeTCP, - wantErr: true, - }, - { - name: "new_service_tcp", - desc: "no error when adding a new service port", - cfg: &ipn.ServeConfig{ - Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ - "svc:foo": { - TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, - }, - }, - }, - svc: "svc:foo", - servePort: 8080, - serveType: serveTypeTCP, - }, - { - name: "override_service_tcp", - desc: "no error when overwriting a previous service port", - cfg: &ipn.ServeConfig{ - Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ - "svc:foo": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:4545"}, - }, - }, - }, - }, - svc: "svc:foo", - servePort: 443, - serveType: serveTypeTCP, - }, - { - name: "override_service_tcp", - desc: "error when overwriting a previous service port with a different serve type", - cfg: &ipn.ServeConfig{ - Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ - "svc:foo": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {HTTPS: true}, - }, - }, - }, - }, - svc: "svc:foo", - servePort: 443, - serveType: serveTypeHTTP, - wantErr: true, - }, - { - name: "override_service_tcp", - desc: "error when setting previous tcp service to tun mode", - cfg: &ipn.ServeConfig{ - Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ - "svc:foo": { - TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:4545"}, - }, - }, - }, - }, - svc: "svc:foo", - serveType: serveTypeTUN, - wantErr: true, - }, - { - name: "override_service_tun", - desc: "error when setting previous tun service to tcp forwarder", - cfg: &ipn.ServeConfig{ - Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ - "svc:foo": { - Tun: true, - }, - }, - }, - svc: "svc:foo", - serveType: serveTypeTCP, - servePort: 443, - wantErr: true, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - se := serveEnv{bg: tc.bg} - err := se.validateConfig(tc.cfg, tc.servePort, tc.serveType, tc.svc) - if err == nil && tc.wantErr { - t.Fatal("expected an error but got nil") - } - if err != nil && !tc.wantErr { - t.Fatalf("expected no error but got: %v", err) - } - }) - } - -} - func TestSrcTypeFromFlags(t *testing.T) { tests := []struct { name string diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index b5118873b2fca..ef4e9154557a4 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -292,6 +292,10 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1 // SetServeConfig establishes or replaces the current serve config. // ETag is an optional parameter to enforce Optimistic Concurrency Control. // If it is an empty string, then the config will be overwritten. +// +// New foreground config cannot override existing listeners--neither existing +// foreground listeners nor existing background listeners. Background config can +// change as long as the serve type (e.g. HTTP, TCP, etc.) remains the same. func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig, etag string) error { b.mu.Lock() defer b.mu.Unlock() @@ -307,12 +311,6 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string return errors.New("can't reconfigure tailscaled when using a config file; config file is locked") } - if config != nil { - if err := config.CheckValidServicesConfig(); err != nil { - return err - } - } - nm := b.NetMap() if nm == nil { return errors.New("netMap is nil") @@ -340,6 +338,10 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string } } + if err := validateServeConfigUpdate(prevConfig, config.View()); err != nil { + return err + } + var bs []byte if config != nil { j, err := json.Marshal(config) @@ -1566,3 +1568,144 @@ func vipServiceHash(logf logger.Logf, services []*tailcfg.VIPService) string { h.Sum(buf[:0]) return hex.EncodeToString(buf[:]) } + +// validateServeConfigUpdate validates changes proposed by incoming serve +// configuration. +func validateServeConfigUpdate(existing, incoming ipn.ServeConfigView) error { + // Error messages returned by this function may be presented to end-users by + // frontends like the CLI. Thus these error messages should provide enough + // information for end-users to diagnose and resolve conflicts. + + if !incoming.Valid() { + return nil + } + + // For Services, TUN mode is mutually exclusive with L4 or L7 handlers. + for svcName, svcCfg := range incoming.Services().All() { + hasTCP := svcCfg.TCP().Len() > 0 + hasWeb := svcCfg.Web().Len() > 0 + if svcCfg.Tun() && (hasTCP || hasWeb) { + return fmt.Errorf("cannot configure TUN mode in combination with TCP or web handlers for %s", svcName) + } + } + + if !existing.Valid() { + return nil + } + + // New foreground listeners must be on open ports. + for sessionID, incomingFg := range incoming.Foreground().All() { + if !existing.Foreground().Has(sessionID) { + // This is a new session. + for port := range incomingFg.TCPs() { + if _, exists := existing.FindTCP(port); exists { + return fmt.Errorf("listener already exists for port %d", port) + } + } + } + } + + // New background listeners cannot overwrite existing foreground listeners. + for port := range incoming.TCP().All() { + if _, exists := existing.FindForegroundTCP(port); exists { + return fmt.Errorf("foreground listener already exists for port %d", port) + } + } + + // Incoming configuration cannot change the serve type in use by a port. + for port, incomingHandler := range incoming.TCP().All() { + existingHandler, exists := existing.FindTCP(port) + if !exists { + continue + } + + existingServeType := serveTypeFromPortHandler(existingHandler) + incomingServeType := serveTypeFromPortHandler(incomingHandler) + if incomingServeType != existingServeType { + return fmt.Errorf("want to serve %q, but port %d is already serving %q", incomingServeType, port, existingServeType) + } + } + + // Validations for Tailscale Services. + for svcName, incomingSvcCfg := range incoming.Services().All() { + existingSvcCfg, exists := existing.Services().GetOk(svcName) + if !exists { + continue + } + + // Incoming configuration cannot change the serve type in use by a port. + for port, incomingHandler := range incomingSvcCfg.TCP().All() { + existingHandler, exists := existingSvcCfg.TCP().GetOk(port) + if !exists { + continue + } + + existingServeType := serveTypeFromPortHandler(existingHandler) + incomingServeType := serveTypeFromPortHandler(incomingHandler) + if incomingServeType != existingServeType { + return fmt.Errorf("want to serve %q, but port %d is already serving %q for %s", incomingServeType, port, existingServeType, svcName) + } + } + + existingHasTCP := existingSvcCfg.TCP().Len() > 0 + existingHasWeb := existingSvcCfg.Web().Len() > 0 + + // A Service cannot turn on TUN mode if TCP or web handlers exist. + if incomingSvcCfg.Tun() && (existingHasTCP || existingHasWeb) { + return fmt.Errorf("cannot turn on TUN mode with existing TCP or web handlers for %s", svcName) + } + + incomingHasTCP := incomingSvcCfg.TCP().Len() > 0 + incomingHasWeb := incomingSvcCfg.Web().Len() > 0 + + // A Service cannot add TCP or web handlers if TUN mode is enabled. + if (incomingHasTCP || incomingHasWeb) && existingSvcCfg.Tun() { + return fmt.Errorf("cannot add TCP or web handlers as TUN mode is enabled for %s", svcName) + } + } + + return nil +} + +// serveType is a high-level descriptor of the kind of serve performed by a TCP +// port handler. +type serveType int + +const ( + serveTypeHTTPS serveType = iota + serveTypeHTTP + serveTypeTCP + serveTypeTLSTerminatedTCP +) + +func (s serveType) String() string { + switch s { + case serveTypeHTTP: + return "http" + case serveTypeHTTPS: + return "https" + case serveTypeTCP: + return "tcp" + case serveTypeTLSTerminatedTCP: + return "tls-terminated-tcp" + default: + return "unknownServeType" + } +} + +// serveTypeFromPortHandler is used to get a high-level descriptor of the kind +// of serve being performed by a port handler. +func serveTypeFromPortHandler(ph ipn.TCPPortHandlerView) serveType { + switch { + case ph.HTTP(): + return serveTypeHTTP + case ph.HTTPS(): + return serveTypeHTTPS + case ph.TerminateTLS() != "": + return serveTypeTLSTerminatedTCP + case ph.TCPForward() != "": + return serveTypeTCP + default: + return -1 + } +} diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index c3e5b2ff968b2..6ee2181a0aaa2 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -388,7 +388,7 @@ func TestServeConfigServices(t *testing.T) { tests := []struct { name string conf *ipn.ServeConfig - expectedErr error + errExpected bool packetDstAddrPort []netip.AddrPort intercepted bool }{ @@ -412,7 +412,7 @@ func TestServeConfigServices(t *testing.T) { }, }, }, - expectedErr: ipn.ErrServiceConfigHasBothTCPAndTun, + errExpected: true, }, { // one correctly configured service with packet should be intercepted @@ -519,13 +519,13 @@ func TestServeConfigServices(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := b.SetServeConfig(tt.conf, "") - if err != nil && tt.expectedErr != nil { - if !errors.Is(err, tt.expectedErr) { - t.Fatalf("expected error %v,\n got %v", tt.expectedErr, err) - } - return + if err == nil && tt.errExpected { + t.Fatal("expected error") } if err != nil { + if tt.errExpected { + return + } t.Fatal(err) } for _, addrPort := range tt.packetDstAddrPort { @@ -1454,3 +1454,315 @@ func TestServeHTTPRedirect(t *testing.T) { }) } } + +func TestValidateServeConfigUpdate(t *testing.T) { + tests := []struct { + name, description string + existing, incoming *ipn.ServeConfig + wantError bool + }{ + { + name: "empty existing config", + description: "should be able to update with empty existing config", + existing: &ipn.ServeConfig{}, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8080: {}, + }, + }, + wantError: false, + }, + { + name: "no existing config", + description: "should be able to update with no existing config", + existing: nil, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8080: {}, + }, + }, + wantError: false, + }, + { + name: "empty incoming config", + description: "wiping config should work", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{}, + wantError: false, + }, + { + name: "no incoming config", + description: "missing incoming config should not result in an error", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: nil, + wantError: false, + }, + { + name: "non-overlapping update", + description: "non-overlapping update should work", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 8080: {}, + }, + }, + wantError: false, + }, + { + name: "overwriting background port", + description: "should be able to overwrite a background port", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:8080", + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:9999", + }, + }, + }, + wantError: false, + }, + { + name: "broken existing config", + description: "broken existing config should not prevent new config updates", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + // Broken because HTTPS and TCPForward are mutually exclusive. + 9000: { + HTTPS: true, + TCPForward: "127.0.0.1:9000", + }, + // Broken because foreground and background handlers cannot coexist. + 443: {}, + }, + Foreground: map[string]*ipn.ServeConfig{ + "12345": { + TCP: map[uint16]*ipn.TCPPortHandler{ + // Broken because foreground and background handlers cannot coexist. + 443: {}, + }, + }, + }, + // Broken because Services cannot specify TUN mode and a TCP handler. + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 6060: {}, + }, + Tun: true, + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + wantError: false, + }, + { + name: "services same port as background", + description: "services should be able to use the same port as background listeners", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + }, + }, + wantError: false, + }, + { + name: "services tun mode", + description: "TUN mode should be mutually exclusive with TCP or web handlers for new Services", + existing: &ipn.ServeConfig{}, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 6060: {}, + }, + Tun: true, + }, + }, + }, + wantError: true, + }, + { + name: "new foreground listener", + description: "new foreground listeners must be on open ports", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + incoming: &ipn.ServeConfig{ + Foreground: map[string]*ipn.ServeConfig{ + "12345": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + }, + }, + wantError: true, + }, + { + name: "new background listener", + description: "new background listers cannot overwrite foreground listeners", + existing: &ipn.ServeConfig{ + Foreground: map[string]*ipn.ServeConfig{ + "12345": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {}, + }, + }, + wantError: true, + }, + { + name: "serve type overwrite", + description: "incoming configuration cannot change the serve type in use by a port", + existing: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + HTTP: true, + }, + }, + }, + incoming: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:8080", + }, + }, + }, + wantError: true, + }, + { + name: "serve type overwrite services", + description: "incoming Services configuration cannot change the serve type in use by a port", + existing: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + HTTP: true, + }, + }, + }, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: { + TCPForward: "localhost:8080", + }, + }, + }, + }, + }, + wantError: true, + }, + { + name: "tun mode with handlers", + description: "Services cannot enable TUN mode if L4 or L7 handlers already exist", + existing: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "127.0.0.1:443": { + Handlers: map[string]*ipn.HTTPHandler{}, + }, + }, + }, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + wantError: true, + }, + { + name: "handlers with tun mode", + description: "Services cannot add L4 or L7 handlers if TUN mode is already enabled", + existing: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + Tun: true, + }, + }, + }, + incoming: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:foo": { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: { + HTTPS: true, + }, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "127.0.0.1:443": { + Handlers: map[string]*ipn.HTTPHandler{}, + }, + }, + }, + }, + }, + wantError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := validateServeConfigUpdate(tt.existing.View(), tt.incoming.View()) + if err != nil && !tt.wantError { + t.Error("unexpected error:", err) + } + if err == nil && tt.wantError { + t.Error("expected error, got nil;", tt.description) + } + }) + } +} diff --git a/ipn/serve.go b/ipn/serve.go index 74195191c727d..7ee78ef0d66bb 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -802,6 +802,7 @@ func (v ServeConfigView) FindServiceTCP(svcName tailcfg.ServiceName, port uint16 return svcCfg.TCP().GetOk(port) } +// FindServiceWeb returns the web handler for the service's host-port. func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort) (res WebServerConfigView, ok bool) { if svcCfg, ok := v.Services().GetOk(svcName); ok { if res, ok := svcCfg.Web().GetOk(hp); ok { @@ -815,10 +816,9 @@ func (v ServeConfigView) FindServiceWeb(svcName tailcfg.ServiceName, hp HostPort // prefers a foreground match first followed by a background search if none // existed. func (v ServeConfigView) FindTCP(port uint16) (res TCPPortHandlerView, ok bool) { - for _, conf := range v.Foreground().All() { - if res, ok := conf.TCP().GetOk(port); ok { - return res, ok - } + res, ok = v.FindForegroundTCP(port) + if ok { + return res, ok } return v.TCP().GetOk(port) } @@ -835,6 +835,17 @@ func (v ServeConfigView) FindWeb(hp HostPort) (res WebServerConfigView, ok bool) return v.Web().GetOk(hp) } +// FindForegroundTCP returns the first foreground TCP handler matching the input +// port. +func (v ServeConfigView) FindForegroundTCP(port uint16) (res TCPPortHandlerView, ok bool) { + for _, conf := range v.Foreground().All() { + if res, ok := conf.TCP().GetOk(port); ok { + return res, ok + } + } + return res, false +} + // HasAllowFunnel returns whether this config has at least one AllowFunnel // set in the background or foreground configs. func (v ServeConfigView) HasAllowFunnel() bool { @@ -863,17 +874,6 @@ func (v ServeConfigView) HasFunnelForTarget(target HostPort) bool { return false } -// CheckValidServicesConfig reports whether the ServeConfig has -// invalid service configurations. -func (sc *ServeConfig) CheckValidServicesConfig() error { - for svcName, service := range sc.Services { - if err := service.checkValidConfig(); err != nil { - return fmt.Errorf("invalid service configuration for %q: %w", svcName, err) - } - } - return nil -} - // ServicePortRange returns the list of tailcfg.ProtoPortRange that represents // the proto/ports pairs that are being served by the service. // @@ -911,17 +911,3 @@ func (v ServiceConfigView) ServicePortRange() []tailcfg.ProtoPortRange { } return ranges } - -// ErrServiceConfigHasBothTCPAndTun signals that a service -// in Tun mode cannot also has TCP or Web handlers set. -var ErrServiceConfigHasBothTCPAndTun = errors.New("the VIP Service configuration can not set TUN at the same time as TCP or Web") - -// checkValidConfig checks if the service configuration is valid. -// Currently, the only invalid configuration is when the service is in Tun mode -// and has TCP or Web handlers. -func (v *ServiceConfig) checkValidConfig() error { - if v.Tun && (len(v.TCP) > 0 || len(v.Web) > 0) { - return ErrServiceConfigHasBothTCPAndTun - } - return nil -} From de8ed203e08b9e32e40648331c47980faab92c46 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 20 Nov 2025 14:10:38 -0600 Subject: [PATCH 0735/1093] go.mod: bump golang.org/x/crypto (#18011) Pick up fixes for https://pkg.go.dev/vuln/GO-2025-4134 Updates #cleanup Signed-off-by: Andrew Lytvynov --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index fc3a466fc8720..c075bce0e3131 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= +# nix-direnv cache busting line: sha256-3jAfCtp714acePnwgdNto8Sj3vFwtpO9os6IwXQ07A4= diff --git a/go.mod b/go.mod index 3b4f34b2df254..e6baad0dc5057 100644 --- a/go.mod +++ b/go.mod @@ -102,7 +102,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.44.0 + golang.org/x/crypto v0.45.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/mod v0.30.0 golang.org/x/net v0.47.0 diff --git a/go.mod.sri b/go.mod.sri index 76c72f0c9bc14..737ea7d2b09e4 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= +sha256-3jAfCtp714acePnwgdNto8Sj3vFwtpO9os6IwXQ07A4= diff --git a/go.sum b/go.sum index f0758f2d4ba00..1106932f21444 100644 --- a/go.sum +++ b/go.sum @@ -1128,8 +1128,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= -golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/shell.nix b/shell.nix index ffb28a18358b0..8554b92580e5c 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-sGPgML2YM/XNWfsAdDZvzWHagcydwCmR6nKOHJj5COs= +# nix-direnv cache busting line: sha256-3jAfCtp714acePnwgdNto8Sj3vFwtpO9os6IwXQ07A4= From c679aaba32c27681845466df9e6df69fe0704b95 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 20 Nov 2025 15:52:58 -0600 Subject: [PATCH 0736/1093] cmd/tailscaled,ipn: show a health warning when state store fails to open (#17883) With the introduction of node sealing, store.New fails in some cases due to the TPM device being reset or unavailable. Currently it results in tailscaled crashing at startup, which is not obvious to the user until they check the logs. Instead of crashing tailscaled at startup, start with an in-memory store with a health warning about state initialization and a link to (future) docs on what to do. When this health message is set, also block any login attempts to avoid masking the problem with an ephemeral node registration. Updates #15830 Updates #17654 Signed-off-by: Andrew Lytvynov --- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/tailscaled.go | 13 +++- cmd/tailscaled/tailscaled_test.go | 50 +++++++++++++ ipn/ipnlocal/local.go | 9 +++ ipn/localapi/localapi.go | 10 ++- ipn/localapi/localapi_test.go | 72 +++++++++++++++++++ ipn/store.go | 15 ++++ tstest/integration/integration_test.go | 37 ++++++++++ .../tailscaled_deps_test_darwin.go | 1 + .../tailscaled_deps_test_freebsd.go | 1 + .../integration/tailscaled_deps_test_linux.go | 1 + .../tailscaled_deps_test_openbsd.go | 1 + .../tailscaled_deps_test_windows.go | 1 + 14 files changed, 211 insertions(+), 4 deletions(-) diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index e750f86e6d4e5..3c111470f32d9 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -69,7 +69,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver tailscale.com/ipn/store from tailscale.com/cmd/tailscaled - tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/ipn/store/mem from tailscale.com/ipn/store+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 17f1a22b24da0..40a1fb2a4a70d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -92,7 +92,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver tailscale.com/ipn/store from tailscale.com/cmd/tailscaled - tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/ipn/store/mem from tailscale.com/ipn/store+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/cmd/tailscale/cli tailscale.com/log/filelogger from tailscale.com/logpolicy diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index f14cdcff072b1..d923ca1edcfad 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -33,12 +33,14 @@ import ( "tailscale.com/feature" "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnserver" "tailscale.com/ipn/store" + "tailscale.com/ipn/store/mem" "tailscale.com/logpolicy" "tailscale.com/logtail" "tailscale.com/net/dns" @@ -644,7 +646,16 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID store, err := store.New(logf, statePathOrDefault()) if err != nil { - return nil, fmt.Errorf("store.New: %w", err) + // If we can't create the store (for example if it's TPM-sealed and the + // TPM is reset), create a dummy in-memory store to propagate the error + // to the user. + ht, ok := sys.HealthTracker.GetOK() + if !ok { + return nil, fmt.Errorf("store.New: %w", err) + } + logf("store.New failed: %v; starting with in-memory store with a health warning", err) + store = new(mem.Store) + ht.SetUnhealthy(ipn.StateStoreHealth, health.Args{health.ArgError: err.Error()}) } sys.Set(store) diff --git a/cmd/tailscaled/tailscaled_test.go b/cmd/tailscaled/tailscaled_test.go index c50c237591170..1188ad35f3b5b 100644 --- a/cmd/tailscaled/tailscaled_test.go +++ b/cmd/tailscaled/tailscaled_test.go @@ -4,9 +4,17 @@ package main // import "tailscale.com/cmd/tailscaled" import ( + "os" + "strings" "testing" + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/net/netmon" + "tailscale.com/tsd" "tailscale.com/tstest/deptest" + "tailscale.com/types/logid" + "tailscale.com/util/must" ) func TestNothing(t *testing.T) { @@ -38,3 +46,45 @@ func TestDeps(t *testing.T) { }, }.Check(t) } + +func TestStateStoreError(t *testing.T) { + logID, err := logid.NewPrivateID() + if err != nil { + t.Fatal(err) + } + // Don't upload any logs from tests. + envknob.SetNoLogsNoSupport() + + args.statedir = t.TempDir() + args.tunname = "userspace-networking" + + t.Run("new state", func(t *testing.T) { + sys := tsd.NewSystem() + sys.NetMon.Set(must.Get(netmon.New(sys.Bus.Get(), t.Logf))) + lb, err := getLocalBackend(t.Context(), t.Logf, logID.Public(), sys) + if err != nil { + t.Fatal(err) + } + defer lb.Shutdown() + if lb.HealthTracker().IsUnhealthy(ipn.StateStoreHealth) { + t.Errorf("StateStoreHealth is unhealthy on fresh LocalBackend:\n%s", strings.Join(lb.HealthTracker().Strings(), "\n")) + } + }) + t.Run("corrupt state", func(t *testing.T) { + sys := tsd.NewSystem() + sys.NetMon.Set(must.Get(netmon.New(sys.Bus.Get(), t.Logf))) + // Populate the state file with something that will fail to parse to + // trigger an error from store.New. + if err := os.WriteFile(statePathOrDefault(), []byte("bad json"), 0644); err != nil { + t.Fatal(err) + } + lb, err := getLocalBackend(t.Context(), t.Logf, logID.Public(), sys) + if err != nil { + t.Fatal(err) + } + defer lb.Shutdown() + if !lb.HealthTracker().IsUnhealthy(ipn.StateStoreHealth) { + t.Errorf("StateStoreHealth is healthy when state file is corrupt") + } + }) +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0ff2993990b59..72b2303273243 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3747,6 +3747,9 @@ func (b *LocalBackend) StartLoginInteractive(ctx context.Context) error { // the control plane sends us one. Otherwise, the notification will be delivered to all // active [watchSession]s. func (b *LocalBackend) StartLoginInteractiveAs(ctx context.Context, user ipnauth.Actor) error { + if b.health.IsUnhealthy(ipn.StateStoreHealth) { + return errors.New("cannot log in when state store is unhealthy") + } b.mu.Lock() defer b.mu.Unlock() if b.cc == nil { @@ -5677,6 +5680,9 @@ func (b *LocalBackend) NodeKey() key.NodePublic { // // b.mu must be held func (b *LocalBackend) nextStateLocked() ipn.State { + if b.health.IsUnhealthy(ipn.StateStoreHealth) { + return ipn.NoState + } var ( cc = b.cc cn = b.currentNode() @@ -6936,6 +6942,9 @@ func (b *LocalBackend) CurrentProfile() ipn.LoginProfileView { // NewProfile creates and switches to the new profile. func (b *LocalBackend) NewProfile() error { + if b.health.IsUnhealthy(ipn.StateStoreHealth) { + return errors.New("cannot log in when state store is unhealthy") + } b.mu.Lock() defer b.mu.Unlock() diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index d3503d3024e96..7f249fe530e15 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -930,7 +930,10 @@ func (h *Handler) serveLoginInteractive(w http.ResponseWriter, r *http.Request) http.Error(w, "want POST", http.StatusBadRequest) return } - h.b.StartLoginInteractiveAs(r.Context(), h.Actor) + if err := h.b.StartLoginInteractiveAs(r.Context(), h.Actor); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } w.WriteHeader(http.StatusNoContent) return } @@ -949,6 +952,11 @@ func (h *Handler) serveStart(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } + + if h.b.HealthTracker().IsUnhealthy(ipn.StateStoreHealth) { + http.Error(w, "cannot start backend when state store is unhealthy", http.StatusInternalServerError) + return + } err := h.b.Start(o) if err != nil { // TODO(bradfitz): map error to a good HTTP error diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 6bb9b51829b1d..5d228ffd69343 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -25,9 +25,11 @@ import ( "testing" "tailscale.com/client/tailscale/apitype" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tsd" @@ -428,3 +430,73 @@ func TestKeepItSorted(t *testing.T) { } } } + +func TestServeWithUnhealthyState(t *testing.T) { + tstest.Replace(t, &validLocalHostForTesting, true) + h := &Handler{ + PermitRead: true, + PermitWrite: true, + b: newTestLocalBackend(t), + logf: t.Logf, + } + h.b.HealthTracker().SetUnhealthy(ipn.StateStoreHealth, health.Args{health.ArgError: "testing"}) + if err := h.b.Start(ipn.Options{}); err != nil { + t.Fatal(err) + } + + check500Body := func(wantResp string) func(t *testing.T, code int, resp []byte) { + return func(t *testing.T, code int, resp []byte) { + if code != http.StatusInternalServerError { + t.Errorf("got code: %v, want %v\nresponse: %q", code, http.StatusInternalServerError, resp) + } + if got := strings.TrimSpace(string(resp)); got != wantResp { + t.Errorf("got response: %q, want %q", got, wantResp) + } + } + } + tests := []struct { + desc string + req *http.Request + check func(t *testing.T, code int, resp []byte) + }{ + { + desc: "status", + req: httptest.NewRequest("GET", "http://localhost:1234/localapi/v0/status", nil), + check: func(t *testing.T, code int, resp []byte) { + if code != http.StatusOK { + t.Errorf("got code: %v, want %v\nresponse: %q", code, http.StatusOK, resp) + } + var status ipnstate.Status + if err := json.Unmarshal(resp, &status); err != nil { + t.Fatal(err) + } + if status.BackendState != "NoState" { + t.Errorf("got backend state: %q, want %q", status.BackendState, "NoState") + } + }, + }, + { + desc: "login-interactive", + req: httptest.NewRequest("POST", "http://localhost:1234/localapi/v0/login-interactive", nil), + check: check500Body("cannot log in when state store is unhealthy"), + }, + { + desc: "start", + req: httptest.NewRequest("POST", "http://localhost:1234/localapi/v0/start", strings.NewReader("{}")), + check: check500Body("cannot start backend when state store is unhealthy"), + }, + { + desc: "new-profile", + req: httptest.NewRequest("PUT", "http://localhost:1234/localapi/v0/profiles/", nil), + check: check500Body("cannot log in when state store is unhealthy"), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + resp := httptest.NewRecorder() + h.ServeHTTP(resp, tt.req) + tt.check(t, resp.Code, resp.Body.Bytes()) + }) + } +} diff --git a/ipn/store.go b/ipn/store.go index 9da5288c0d371..2034ae09a92f9 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -10,6 +10,8 @@ import ( "fmt" "net" "strconv" + + "tailscale.com/health" ) // ErrStateNotExist is returned by StateStore.ReadState when the @@ -60,6 +62,19 @@ const ( TaildropReceivedKey = StateKey("_taildrop-received") ) +// StateStoreHealth is a Warnable set when store.New fails at startup. If +// unhealthy, we block all login attempts and return a health message in status +// responses. +var StateStoreHealth = health.Register(&health.Warnable{ + Code: "state-store-health", + Severity: health.SeverityHigh, + Title: "Tailscale state store failed to initialize", + Text: func(args health.Args) string { + return fmt.Sprintf("State store failed to initialize, Tailscale will not work until this is resolved. See https://tailscale.com/s/state-store-init-error. Error: %s", args[health.ArgError]) + }, + ImpactsConnectivity: true, +}) + // CurrentProfileID returns the StateKey that stores the // current profile ID. The value is a JSON-encoded LoginProfile. // If the userID is empty, the key returned is CurrentProfileStateKey, diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 9d75cfc29fbb8..543dc125c251c 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -22,6 +22,7 @@ import ( "path/filepath" "regexp" "runtime" + "slices" "strconv" "strings" "sync/atomic" @@ -36,6 +37,7 @@ import ( "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/feature" _ "tailscale.com/feature/clientupdate" + "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" @@ -2246,3 +2248,38 @@ func TestNetworkLock(t *testing.T) { } }) } + +func TestNodeWithBadStateFile(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + if err := os.WriteFile(n1.stateFile, []byte("bad json"), 0644); err != nil { + t.Fatal(err) + } + + d1 := n1.StartDaemon() + n1.AwaitResponding() + + // Make sure the health message shows up in status output. + n1.AwaitBackendState("NoState") + st := n1.MustStatus() + wantHealth := ipn.StateStoreHealth.Text(health.Args{health.ArgError: ""}) + if !slices.ContainsFunc(st.Health, func(m string) bool { return strings.HasPrefix(m, wantHealth) }) { + t.Errorf("Status does not contain expected health message %q\ngot health messages: %q", wantHealth, st.Health) + } + + // Make sure login attempts are rejected. + cmd := n1.Tailscale("up", "--login-server="+n1.env.ControlURL()) + t.Logf("Running %v ...", cmd) + out, err := cmd.CombinedOutput() + if err == nil { + t.Fatalf("up succeeded with output %q", out) + } + wantOut := "cannot start backend when state store is unhealthy" + if !strings.Contains(string(out), wantOut) { + t.Fatalf("got up output:\n%s\nwant:\n%s", string(out), wantOut) + } + + d1.MustCleanShutdown(t) +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 217188f75f6c0..9f92839d8cde7 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -27,6 +27,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 217188f75f6c0..9f92839d8cde7 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -27,6 +27,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 217188f75f6c0..9f92839d8cde7 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -27,6 +27,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 217188f75f6c0..9f92839d8cde7 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -27,6 +27,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index f3cd5e75b9e36..82f8097c8bc36 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -37,6 +37,7 @@ import ( _ "tailscale.com/ipn/ipnlocal" _ "tailscale.com/ipn/ipnserver" _ "tailscale.com/ipn/store" + _ "tailscale.com/ipn/store/mem" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" _ "tailscale.com/net/dns" From ce95bc77fb0c323e2e4335665bc75d93bf1e7cfc Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 21 Nov 2025 16:40:37 +0000 Subject: [PATCH 0737/1093] tka: don't panic if no clock set in tka.Mem This is causing confusing panics in tailscale/corp#34485. We'll keep using the tka.ChonkMem constructor as much as we can, but don't panic if you create a tka.Mem directly -- we know what the sensible thing is. Updates #cleanup Signed-off-by: Alex Chan Change-Id: I49309f5f403fc26ce4f9a6cf0edc8eddf6a6f3a4 --- tka/tailchonk.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index a55033bcd8bb7..13bdf6aac86d4 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -193,7 +193,7 @@ updateLoop: for _, aum := range updates { aumHash := aum.Hash() c.aums[aumHash] = aum - c.commitTimes[aumHash] = c.clock.Now() + c.commitTimes[aumHash] = c.now() parent, ok := aum.Parent() if ok { @@ -209,6 +209,16 @@ updateLoop: return nil } +// now returns the current time, optionally using the overridden +// clock if set. +func (c *Mem) now() time.Time { + if c.clock == nil { + return time.Now() + } else { + return c.clock.Now() + } +} + // RemoveAll permanently and completely clears the TKA state. func (c *Mem) RemoveAll() error { c.mu.Lock() From 016ccae2da9fae1f6d8ffb29c694f86cb78cca4a Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 19 Nov 2025 20:13:18 -0600 Subject: [PATCH 0738/1093] util/eventbus: add tests for a subscriber trying to acquire the same mutex as a publisher As of 2025-11-20, publishing more events than the eventbus's internal queues can hold may deadlock if a subscriber tries to acquire a mutex that can also be held by a publisher. This commit adds a test that demonstrates this deadlock, and skips it until the bug is fixed. Updates #17973 Signed-off-by: Nick Khyl --- util/eventbus/bus_test.go | 70 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 61728fbfd93d2..e025e5bed252c 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -9,6 +9,7 @@ import ( "fmt" "log" "regexp" + "sync" "testing" "testing/synctest" "time" @@ -593,6 +594,75 @@ func TestRegression(t *testing.T) { }) } +const ( + maxQueuedItems = 16 // same as in queue.go + totalMaxQueuedItems = maxQueuedItems * 2 // both publisher and subscriber sides +) + +func TestPublishWithMutex(t *testing.T) { + t.Run("FewEvents", func(t *testing.T) { + // As of 2025-11-20, publishing up to [totalMaxQueuedItems] is fine. + testPublishWithMutex(t, totalMaxQueuedItems) + }) + t.Run("ManyEvents", func(t *testing.T) { + // As of 2025-11-20, publishing more than [totalMaxQueuedItems] may deadlock. + t.Skip("TODO: fix deadlock in https://github.com/tailscale/tailscale/issues/17973") + + const N = 3 // N larger than one increases the chance of deadlock. + testPublishWithMutex(t, totalMaxQueuedItems+N) + }) +} + +// testPublishWithMutex publishes the specified number of events, +// acquiring and releasing a mutex around each publish and each +// subscriber event receive. +// +// The test fails if it loses any events or times out due to a deadlock. +// Unfortunately, a goroutine waiting on a mutex held by a durably blocked +// goroutine is not itself considered durably blocked, so [synctest] cannot +// detect this deadlock on its own. +func testPublishWithMutex(t *testing.T, n int) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + evts := make([]any, n) + for i := range evts { + evts[i] = EventA{Counter: i} + } + exp := expectEvents(t, evts...) + + var mu sync.Mutex + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + // As of 2025-11-20, this can deadlock if n is large enough + // and event queues fill up. + mu.Lock() + mu.Unlock() + + // Mark event as received, so we can check for lost events. + // Not required for the deadlock to occur. + exp.Got(e) + }) + + p := eventbus.Publish[EventA](c) + go func() { + for i := range n { + mu.Lock() + p.Publish(EventA{Counter: i}) + mu.Unlock() + } + }() + + synctest.Wait() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) +} + type queueChecker struct { t *testing.T want []any From 3780f25d51522f7148ae11d5b28b066d292e06e4 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 20 Nov 2025 11:04:54 -0600 Subject: [PATCH 0739/1093] util/eventbus: add tests for a subscriber publishing events As of 2025-11-20, publishing more events than the eventbus's internal queues can hold may deadlock if a subscriber tries to publish events itself. This commit adds a test that demonstrates this deadlock, and skips it until the bug is fixed. Updates #18012 Signed-off-by: Nick Khyl --- util/eventbus/bus_test.go | 60 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index e025e5bed252c..23fe633f358f5 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -636,6 +636,7 @@ func testPublishWithMutex(t *testing.T, n int) { var mu sync.Mutex eventbus.SubscribeFunc[EventA](c, func(e EventA) { + // Acquire the same mutex as the publisher. // As of 2025-11-20, this can deadlock if n is large enough // and event queues fill up. mu.Lock() @@ -648,6 +649,7 @@ func testPublishWithMutex(t *testing.T, n int) { p := eventbus.Publish[EventA](c) go func() { + // Publish events, acquiring the mutex around each publish. for i := range n { mu.Lock() p.Publish(EventA{Counter: i}) @@ -663,6 +665,64 @@ func testPublishWithMutex(t *testing.T, n int) { }) } +func TestPublishFromSubscriber(t *testing.T) { + t.Run("FewEvents", func(t *testing.T) { + // Publishing up to [totalMaxQueuedItems]-1 is fine. + testPublishFromSubscriber(t, totalMaxQueuedItems-1) + }) + t.Run("ManyEvents", func(t *testing.T) { + // As of 2025-11-20, publishing more than [totalMaxQueuedItems] may deadlock. + t.Skip("TODO: fix deadlock in https://github.com/tailscale/tailscale/issues/18012") + + // Using 2x to increase chance of deadlock. + testPublishFromSubscriber(t, totalMaxQueuedItems*2) + }) +} + +// testPublishFromSubscriber publishes the specified number of EventA events. +// Each EventA causes the subscriber to publish an EventB. +// The test fails if it loses any events or if a deadlock occurs. +func testPublishFromSubscriber(t *testing.T, n int) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + // Ultimately we expect to receive n EventB events + // published as a result of receiving n EventA events. + evts := make([]any, n) + for i := range evts { + evts[i] = EventB{Counter: i} + } + exp := expectEvents(t, evts...) + + pubA := eventbus.Publish[EventA](c) + pubB := eventbus.Publish[EventB](c) + + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + // Upon receiving EventA, publish EventB. + // As of 2025-11-20, this can deadlock if n is large enough + // and event queues fill up. + pubB.Publish(EventB{Counter: e.Counter}) + }) + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + // Mark EventB as received. + exp.Got(e) + }) + + for i := range n { + pubA.Publish(EventA{Counter: i}) + } + + synctest.Wait() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) +} + type queueChecker struct { t *testing.T want []any From e7f5ca1d5ed23d2e3ae2fc9711b25dbd936bdb68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 21 Nov 2025 14:49:37 -0500 Subject: [PATCH 0740/1093] wgengine/userspace: run link change subscribers in eventqueue (#18024) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #17996 Signed-off-by: Claus Lensbøl --- wgengine/userspace.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 8ad771fc5e000..e4c99ded20977 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -51,6 +51,7 @@ import ( "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/execqueue" "tailscale.com/util/mak" "tailscale.com/util/set" "tailscale.com/util/testenv" @@ -98,6 +99,8 @@ type userspaceEngine struct { eventBus *eventbus.Bus eventClient *eventbus.Client + linkChangeQueue execqueue.ExecQueue + logf logger.Logf wgLogger *wglog.Logger // a wireguard-go logging wrapper reqCh chan struct{} @@ -544,7 +547,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { f() } - e.linkChange(&cd) + e.linkChangeQueue.Add(func() { e.linkChange(&cd) }) }) e.eventClient = ec e.logf("Engine created.") @@ -1288,6 +1291,9 @@ func (e *userspaceEngine) RequestStatus() { func (e *userspaceEngine) Close() { e.eventClient.Close() + // TODO(cmol): Should we wait for it too? + // Same question raised in appconnector.go. + e.linkChangeQueue.Shutdown() e.mu.Lock() if e.closing { e.mu.Unlock() From 9245c7131b4228810852a18613bcc7badd057f3a Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 21 Nov 2025 11:10:24 -0800 Subject: [PATCH 0741/1093] feature/relayserver: don't publish from within a subscribe fn goroutine Updates #17830 Signed-off-by: Jordan Whited --- feature/relayserver/relayserver.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 7d12d62e5802e..b7457210f3154 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -147,7 +147,12 @@ func (e *extension) onAllocReq(req magicsock.UDPRelayAllocReq) { e.logf("error allocating endpoint: %v", err) return } - e.respPub.Publish(magicsock.UDPRelayAllocResp{ + // Take a defensive stance around publishing from within an + // [*eventbus.SubscribeFunc] by publishing from a separate goroutine. At the + // time of writing (2025-11-21), publishing from within the + // [*eventbus.SubscribeFunc] goroutine is potentially unsafe if publisher + // and subscriber share a lock. + go e.respPub.Publish(magicsock.UDPRelayAllocResp{ ReqRxFromNodeKey: req.RxFromNodeKey, ReqRxFromDiscoKey: req.RxFromDiscoKey, Message: &disco.AllocateUDPRelayEndpointResponse{ From 1ccece0f783ae5059c1d74894566461072db6471 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 21 Nov 2025 07:53:23 -0600 Subject: [PATCH 0742/1093] util/eventbus: use unbounded event queues for DeliveredEvents in subscribers Bounded DeliveredEvent queues reduce memory usage, but they can deadlock under load. Two common scenarios trigger deadlocks when the number of events published in a short period exceeds twice the queue capacity (there's a PublishedEvent queue of the same size): - a subscriber tries to acquire the same mutex as held by a publisher, or - a subscriber for A events publishes B events Avoiding these scenarios is not practical and would limit eventbus usefulness and reduce its adoption, pushing us back to callbacks and other legacy mechanisms. These deadlocks already occurred in customer devices, dev machines, and tests. They also make it harder to identify and fix slow subscribers and similar issues we have been seeing recently. Choosing an arbitrary large fixed queue capacity would only mask the problem. A client running on a sufficiently large and complex customer environment can exceed any meaningful constant limit, since event volume depends on the number of peers and other factors. Behavior also changes based on scheduling of publishers and subscribers by the Go runtime, OS, and hardware, as the issue is essentially a race between publishers and subscribers. Additionally, on lower-end devices, an unreasonably high constant capacity is practically the same as using unbounded queues. Therefore, this PR changes the event queue implementation to be unbounded by default. The PublishedEvent queue keeps its existing capacity of 16 items, while subscribers' DeliveredEvent queues become unbounded. This change fixes known deadlocks and makes the system stable under load, at the cost of higher potential memory usage, including cases where a queue grows during an event burst and does not shrink when load decreases. Further improvements can be implemented in the future as needed. Fixes #17973 Fixes #18012 Signed-off-by: Nick Khyl --- util/eventbus/bus.go | 9 ++++++++- util/eventbus/bus_test.go | 34 ++-------------------------------- util/eventbus/queue.go | 12 ++++++------ 3 files changed, 16 insertions(+), 39 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index aa6880d01614e..880e075ccaf3c 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -120,7 +120,14 @@ func (b *Bus) Close() { } func (b *Bus) pump(ctx context.Context) { - var vals queue[PublishedEvent] + // Limit how many published events we can buffer in the PublishedEvent queue. + // + // Subscribers have unbounded DeliveredEvent queues (see tailscale/tailscale#18020), + // so this queue doesn't need to be unbounded. Keeping it bounded may also help + // catch cases where subscribers stop pumping events completely, such as due to a bug + // in [subscribeState.pump], [Subscriber.dispatch], or [SubscriberFunc.dispatch]). + const maxPublishedEvents = 16 + vals := queue[PublishedEvent]{capacity: maxPublishedEvents} acceptCh := func() chan PublishedEvent { if vals.Full() { return nil diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 23fe633f358f5..88e11e7199aee 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -594,23 +594,8 @@ func TestRegression(t *testing.T) { }) } -const ( - maxQueuedItems = 16 // same as in queue.go - totalMaxQueuedItems = maxQueuedItems * 2 // both publisher and subscriber sides -) - func TestPublishWithMutex(t *testing.T) { - t.Run("FewEvents", func(t *testing.T) { - // As of 2025-11-20, publishing up to [totalMaxQueuedItems] is fine. - testPublishWithMutex(t, totalMaxQueuedItems) - }) - t.Run("ManyEvents", func(t *testing.T) { - // As of 2025-11-20, publishing more than [totalMaxQueuedItems] may deadlock. - t.Skip("TODO: fix deadlock in https://github.com/tailscale/tailscale/issues/17973") - - const N = 3 // N larger than one increases the chance of deadlock. - testPublishWithMutex(t, totalMaxQueuedItems+N) - }) + testPublishWithMutex(t, 1024) // arbitrary large number of events } // testPublishWithMutex publishes the specified number of events, @@ -637,13 +622,10 @@ func testPublishWithMutex(t *testing.T, n int) { var mu sync.Mutex eventbus.SubscribeFunc[EventA](c, func(e EventA) { // Acquire the same mutex as the publisher. - // As of 2025-11-20, this can deadlock if n is large enough - // and event queues fill up. mu.Lock() mu.Unlock() // Mark event as received, so we can check for lost events. - // Not required for the deadlock to occur. exp.Got(e) }) @@ -666,17 +648,7 @@ func testPublishWithMutex(t *testing.T, n int) { } func TestPublishFromSubscriber(t *testing.T) { - t.Run("FewEvents", func(t *testing.T) { - // Publishing up to [totalMaxQueuedItems]-1 is fine. - testPublishFromSubscriber(t, totalMaxQueuedItems-1) - }) - t.Run("ManyEvents", func(t *testing.T) { - // As of 2025-11-20, publishing more than [totalMaxQueuedItems] may deadlock. - t.Skip("TODO: fix deadlock in https://github.com/tailscale/tailscale/issues/18012") - - // Using 2x to increase chance of deadlock. - testPublishFromSubscriber(t, totalMaxQueuedItems*2) - }) + testPublishFromSubscriber(t, 1024) // arbitrary large number of events } // testPublishFromSubscriber publishes the specified number of EventA events. @@ -702,8 +674,6 @@ func testPublishFromSubscriber(t *testing.T, n int) { eventbus.SubscribeFunc[EventA](c, func(e EventA) { // Upon receiving EventA, publish EventB. - // As of 2025-11-20, this can deadlock if n is large enough - // and event queues fill up. pubB.Publish(EventB{Counter: e.Counter}) }) eventbus.SubscribeFunc[EventB](c, func(e EventB) { diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go index a62bf3c62d1d4..2589b75cef999 100644 --- a/util/eventbus/queue.go +++ b/util/eventbus/queue.go @@ -7,18 +7,18 @@ import ( "slices" ) -const maxQueuedItems = 16 - -// queue is an ordered queue of length up to maxQueuedItems. +// queue is an ordered queue of length up to capacity, +// if capacity is non-zero. Otherwise it is unbounded. type queue[T any] struct { - vals []T - start int + vals []T + start int + capacity int // zero means unbounded } // canAppend reports whether a value can be appended to q.vals without // shifting values around. func (q *queue[T]) canAppend() bool { - return cap(q.vals) < maxQueuedItems || len(q.vals) < cap(q.vals) + return q.capacity == 0 || cap(q.vals) < q.capacity || len(q.vals) < cap(q.vals) } func (q *queue[T]) Full() bool { From 16587746ed5446247d44dd0c50cec36cf61a0c80 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 21 Nov 2025 17:55:14 -0500 Subject: [PATCH 0743/1093] portlist,tstest: skip tests on kernels with /proc/net/tcp regression Linux kernel versions 6.6.102-104 and 6.12.42-45 have a regression in /proc/net/tcp that causes seek operations to fail with "illegal seek". This breaks portlist tests on these kernels. Add kernel version detection for Linux systems and a SkipOnKernelVersions helper to tstest. Use it to skip affected portlist tests on the broken kernel versions. Thanks to philiptaron for the list of kernels with the issue and fix. Updates #16966 Signed-off-by: Andrew Dunham --- portlist/portlist_test.go | 15 ++++++++++++ tstest/kernel_linux.go | 50 +++++++++++++++++++++++++++++++++++++++ tstest/kernel_other.go | 11 +++++++++ tstest/tstest.go | 18 ++++++++++++++ tstest/tstest_test.go | 19 ++++++++++++++- 5 files changed, 112 insertions(+), 1 deletion(-) create mode 100644 tstest/kernel_linux.go create mode 100644 tstest/kernel_other.go diff --git a/portlist/portlist_test.go b/portlist/portlist_test.go index 34277fdbaba91..791a8b118427f 100644 --- a/portlist/portlist_test.go +++ b/portlist/portlist_test.go @@ -5,12 +5,24 @@ package portlist import ( "net" + "runtime" "testing" "tailscale.com/tstest" ) +func maybeSkip(t *testing.T) { + if runtime.GOOS == "linux" { + tstest.SkipOnKernelVersions(t, + "https://github.com/tailscale/tailscale/issues/16966", + "6.6.102", "6.6.103", "6.6.104", + "6.12.42", "6.12.43", "6.12.44", "6.12.45", + ) + } +} + func TestGetList(t *testing.T) { + maybeSkip(t) tstest.ResourceCheck(t) var p Poller @@ -25,6 +37,7 @@ func TestGetList(t *testing.T) { } func TestIgnoreLocallyBoundPorts(t *testing.T) { + maybeSkip(t) tstest.ResourceCheck(t) ln, err := net.Listen("tcp", "127.0.0.1:0") @@ -47,6 +60,8 @@ func TestIgnoreLocallyBoundPorts(t *testing.T) { } func TestPoller(t *testing.T) { + maybeSkip(t) + var p Poller p.IncludeLocalhost = true get := func(t *testing.T) []Port { diff --git a/tstest/kernel_linux.go b/tstest/kernel_linux.go new file mode 100644 index 0000000000000..664fe9bdd7b9f --- /dev/null +++ b/tstest/kernel_linux.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package tstest + +import ( + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// KernelVersion returns the major, minor, and patch version of the Linux kernel. +// It returns (0, 0, 0) if the version cannot be determined. +func KernelVersion() (major, minor, patch int) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return 0, 0, 0 + } + release := unix.ByteSliceToString(uname.Release[:]) + + // Parse version string (e.g., "5.15.0-...") + parts := strings.Split(release, ".") + if len(parts) < 3 { + return 0, 0, 0 + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, 0, 0 + } + + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return 0, 0, 0 + } + + // Patch version may have additional info after a hyphen (e.g., "0-76-generic") + // Extract just the numeric part before any hyphen + patchStr, _, _ := strings.Cut(parts[2], "-") + + patch, err = strconv.Atoi(patchStr) + if err != nil { + return 0, 0, 0 + } + + return major, minor, patch +} diff --git a/tstest/kernel_other.go b/tstest/kernel_other.go new file mode 100644 index 0000000000000..bf69be6df4b27 --- /dev/null +++ b/tstest/kernel_other.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package tstest + +// KernelVersion returns (0, 0, 0) on unsupported platforms. +func KernelVersion() (major, minor, patch int) { + return 0, 0, 0 +} diff --git a/tstest/tstest.go b/tstest/tstest.go index 169450686966d..d0828f508a46c 100644 --- a/tstest/tstest.go +++ b/tstest/tstest.go @@ -6,6 +6,7 @@ package tstest import ( "context" + "fmt" "os" "strconv" "strings" @@ -93,3 +94,20 @@ func Parallel(t *testing.T) { t.Parallel() } } + +// SkipOnKernelVersions skips the test if the current +// kernel version is in the specified list. +func SkipOnKernelVersions(t testing.TB, issue string, versions ...string) { + major, minor, patch := KernelVersion() + if major == 0 && minor == 0 && patch == 0 { + t.Logf("could not determine kernel version") + return + } + + current := fmt.Sprintf("%d.%d.%d", major, minor, patch) + for _, v := range versions { + if v == current { + t.Skipf("skipping on kernel version %q - see issue %s", current, issue) + } + } +} diff --git a/tstest/tstest_test.go b/tstest/tstest_test.go index e988d5d5624b6..ce59bde538b9a 100644 --- a/tstest/tstest_test.go +++ b/tstest/tstest_test.go @@ -3,7 +3,10 @@ package tstest -import "testing" +import ( + "runtime" + "testing" +) func TestReplace(t *testing.T) { before := "before" @@ -22,3 +25,17 @@ func TestReplace(t *testing.T) { t.Errorf("before = %q; want %q", before, "before") } } + +func TestKernelVersion(t *testing.T) { + switch runtime.GOOS { + case "linux": + default: + t.Skipf("skipping test on %s", runtime.GOOS) + } + + major, minor, patch := KernelVersion() + if major == 0 && minor == 0 && patch == 0 { + t.Fatal("KernelVersion returned (0, 0, 0); expected valid version") + } + t.Logf("Kernel version: %d.%d.%d", major, minor, patch) +} From a20cdb5c938204d45502d4c52fafc8ad0b0afed9 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 21 Nov 2025 16:50:28 -0500 Subject: [PATCH 0744/1093] tstest/integration/testcontrol: de-flake TestUserMetricsRouteGauges SetSubnetRoutes was not sending update notifications to nodes when their approved routes changed, causing nodes to not fetch updated netmaps with PrimaryRoutes populated. This resulted in TestUserMetricsRouteGauges flaking because it waited for PrimaryRoutes to be set, which only happened if the node happened to poll for other reasons. Now send updateSelfChanged notification to affected nodes so they fetch an updated netmap immediately. Fixes #17962 Signed-off-by: Andrew Dunham --- tstest/integration/testcontrol/testcontrol.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index f9a33705b7f56..268f2f19b4067 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -464,6 +464,9 @@ func (s *Server) SetSubnetRoutes(nodeKey key.NodePublic, routes []netip.Prefix) defer s.mu.Unlock() s.logf("Setting subnet routes for %s: %v", nodeKey.ShortString(), routes) mak.Set(&s.nodeSubnetRoutes, nodeKey, routes) + if node, ok := s.nodes[nodeKey]; ok { + sendUpdate(s.updates[node.ID], updateSelfChanged) + } } // MasqueradePair is a pair of nodes and the IP address that the From 698eecda040e6ee21b2f4502d3b98a6db1b60f6d Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Fri, 21 Nov 2025 17:25:56 -0500 Subject: [PATCH 0745/1093] ipn/ipnlocal: fix panic in driveTransport on network error When the underlying transport returns a network error, the RoundTrip method returns (nil, error). The defer was attempting to access resp without checking if it was nil first, causing a panic. Fix this by checking for nil in the defer. Also changes driveTransport.tr from *http.Transport to http.RoundTripper and adds a test. Fixes #17306 Signed-off-by: Andrew Dunham Change-Id: Icf38a020b45aaa9cfbc1415d55fd8b70b978f54c --- ipn/ipnlocal/drive.go | 75 ++++++++++++++++++++------------------ ipn/ipnlocal/drive_test.go | 50 +++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 36 deletions(-) create mode 100644 ipn/ipnlocal/drive_test.go diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 7d6dc2427adae..456cd45441ba9 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -433,7 +433,7 @@ func (rbw *responseBodyWrapper) Close() error { // b.Dialer().PeerAPITransport() with metrics tracking. type driveTransport struct { b *LocalBackend - tr *http.Transport + tr http.RoundTripper } func (b *LocalBackend) newDriveTransport() *driveTransport { @@ -443,7 +443,7 @@ func (b *LocalBackend) newDriveTransport() *driveTransport { } } -func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { +func (dt *driveTransport) RoundTrip(req *http.Request) (*http.Response, error) { // Some WebDAV clients include origin and refer headers, which peerapi does // not like. Remove them. req.Header.Del("origin") @@ -455,42 +455,45 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err req.Body = bw } - defer func() { - contentType := "unknown" - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } + resp, err := dt.tr.RoundTrip(req) + if err != nil { + return nil, err + } - dt.b.mu.Lock() - selfNodeKey := dt.b.currentNode().Self().Key().ShortString() - dt.b.mu.Unlock() - n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) - shareNodeKey := "unknown" - if ok { - shareNodeKey = string(n.Key().ShortString()) - } + contentType := "unknown" + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct + } - rbw := responseBodyWrapper{ - log: dt.b.logf, - logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level - method: req.Method, - bytesTx: int64(bw.bytesRead), - selfNodeKey: selfNodeKey, - shareNodeKey: shareNodeKey, - contentType: contentType, - contentLength: resp.ContentLength, - fileExtension: parseDriveFileExtensionForLog(req.URL.Path), - statusCode: resp.StatusCode, - ReadCloser: resp.Body, - } + dt.b.mu.Lock() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() + dt.b.mu.Unlock() + n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) + shareNodeKey := "unknown" + if ok { + shareNodeKey = string(n.Key().ShortString()) + } - if resp.StatusCode >= 400 { - // in case of error response, just log immediately - rbw.logAccess("") - } else { - resp.Body = &rbw - } - }() + rbw := responseBodyWrapper{ + log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level + method: req.Method, + bytesTx: int64(bw.bytesRead), + selfNodeKey: selfNodeKey, + shareNodeKey: shareNodeKey, + contentType: contentType, + contentLength: resp.ContentLength, + fileExtension: parseDriveFileExtensionForLog(req.URL.Path), + statusCode: resp.StatusCode, + ReadCloser: resp.Body, + } + + if resp.StatusCode >= 400 { + // in case of error response, just log immediately + rbw.logAccess("") + } else { + resp.Body = &rbw + } - return dt.tr.RoundTrip(req) + return resp, nil } diff --git a/ipn/ipnlocal/drive_test.go b/ipn/ipnlocal/drive_test.go new file mode 100644 index 0000000000000..323c3821499ed --- /dev/null +++ b/ipn/ipnlocal/drive_test.go @@ -0,0 +1,50 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package ipnlocal + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" +) + +// TestDriveTransportRoundTrip_NetworkError tests that driveTransport.RoundTrip +// doesn't panic when the underlying transport returns a nil response with an +// error. +// +// See: https://github.com/tailscale/tailscale/issues/17306 +func TestDriveTransportRoundTrip_NetworkError(t *testing.T) { + b := newTestLocalBackend(t) + + testErr := errors.New("network connection failed") + mockTransport := &mockRoundTripper{ + err: testErr, + } + dt := &driveTransport{ + b: b, + tr: mockTransport, + } + + req := httptest.NewRequest("GET", "http://100.64.0.1:1234/some/path", nil) + resp, err := dt.RoundTrip(req) + if err == nil { + t.Fatal("got nil error, expected non-nil") + } else if !errors.Is(err, testErr) { + t.Errorf("got error %v, expected %v", err, testErr) + } + if resp != nil { + t.Errorf("wanted nil response, got %v", resp) + } +} + +type mockRoundTripper struct { + err error +} + +func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, m.err +} From 6637003cc8c5a73a56ed10f57f207a2a2c9f2c7c Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 18 Nov 2025 17:11:27 +0000 Subject: [PATCH 0746/1093] cmd/cigocacher,go.mod: add cigocacher cmd Adds cmd/cigocacher as the client to cigocached for Go caching over HTTP. The HTTP cache is best-effort only, and builds will fall back to disk-only cache if it's not available, much like regular builds. Not yet used in CI; that will follow in another PR once we have runners available in this repo with the right network setup for reaching cigocached. Updates tailscale/corp#10808 Change-Id: I13ae1a12450eb2a05bd9843f358474243989e967 Signed-off-by: Tom Proctor --- cmd/cigocacher/cigocacher.go | 308 +++++++++++++++++++++++++++++ cmd/cigocacher/http.go | 115 +++++++++++ cmd/derper/depaware.txt | 8 +- cmd/k8s-operator/depaware.txt | 12 +- cmd/stund/depaware.txt | 8 +- cmd/tailscaled/depaware-min.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + flake.nix | 3 +- go.mod | 15 +- go.mod.sri | 2 +- go.sum | 30 +-- shell.nix | 2 +- tsnet/depaware.txt | 1 + 15 files changed, 470 insertions(+), 38 deletions(-) create mode 100644 cmd/cigocacher/cigocacher.go create mode 100644 cmd/cigocacher/http.go diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go new file mode 100644 index 0000000000000..b38df4c2b40a5 --- /dev/null +++ b/cmd/cigocacher/cigocacher.go @@ -0,0 +1,308 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// cigocacher is an opinionated-to-Tailscale client for gocached. It connects +// at a URL like "https://ci-gocached-azure-1.corp.ts.net:31364", but that is +// stored in a GitHub actions variable so that its hostname can be updated for +// all branches at the same time in sync with the actual infrastructure. +// +// It authenticates using GitHub OIDC tokens, and all HTTP errors are ignored +// so that its failure mode is just that builds get slower and fall back to +// disk-only cache. +package main + +import ( + "bytes" + "context" + jsonv1 "encoding/json" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "sync/atomic" + "time" + + "github.com/bradfitz/go-tool-cache/cacheproc" + "github.com/bradfitz/go-tool-cache/cachers" +) + +func main() { + var ( + auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output") + token = flag.String("token", "", "the cigocached access token to use, as created using --auth") + cigocachedURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). empty means to not use one.") + verbose = flag.Bool("verbose", false, "enable verbose logging") + ) + flag.Parse() + + if *auth { + if *cigocachedURL == "" { + log.Print("--cigocached-url is empty, skipping auth") + return + } + tk, err := fetchAccessToken(httpClient(), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *cigocachedURL) + if err != nil { + log.Printf("error fetching access token, skipping auth: %v", err) + return + } + fmt.Println(tk) + return + } + + d, err := os.UserCacheDir() + if err != nil { + log.Fatal(err) + } + d = filepath.Join(d, "go-cacher") + log.Printf("Defaulting to cache dir %v ...", d) + if err := os.MkdirAll(d, 0750); err != nil { + log.Fatal(err) + } + + c := &cigocacher{ + disk: &cachers.DiskCache{Dir: d}, + verbose: *verbose, + } + if *cigocachedURL != "" { + log.Printf("Using cigocached at %s", *cigocachedURL) + c.gocached = &gocachedClient{ + baseURL: *cigocachedURL, + cl: httpClient(), + accessToken: *token, + verbose: *verbose, + } + } + var p *cacheproc.Process + p = &cacheproc.Process{ + Close: func() error { + log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)", + p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load()) + return c.close() + }, + Get: c.get, + Put: c.put, + } + + if err := p.Run(); err != nil { + log.Fatal(err) + } +} + +func httpClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + host, port, err := net.SplitHostPort(addr) + if err == nil { + // This does not run in a tailnet. We serve corp.ts.net + // TLS certs, and override DNS resolution to lookup the + // private IP for the VM by its hostname. + if vm, ok := strings.CutSuffix(host, ".corp.ts.net"); ok { + addr = net.JoinHostPort(vm, port) + } + } + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + }, + } +} + +type cigocacher struct { + disk *cachers.DiskCache + gocached *gocachedClient + verbose bool + + getNanos atomic.Int64 // total nanoseconds spent in gets + putNanos atomic.Int64 // total nanoseconds spent in puts + getHTTP atomic.Int64 // HTTP get requests made + getHTTPBytes atomic.Int64 // HTTP get bytes transferred + getHTTPHits atomic.Int64 // HTTP get hits + getHTTPMisses atomic.Int64 // HTTP get misses + getHTTPErrors atomic.Int64 // HTTP get errors ignored on best-effort basis + getHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP gets + putHTTP atomic.Int64 // HTTP put requests made + putHTTPBytes atomic.Int64 // HTTP put bytes transferred + putHTTPErrors atomic.Int64 // HTTP put errors ignored on best-effort basis + putHTTPNanos atomic.Int64 // total nanoseconds spent in HTTP puts +} + +func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPath string, err error) { + t0 := time.Now() + defer func() { + c.getNanos.Add(time.Since(t0).Nanoseconds()) + }() + if c.gocached == nil { + return c.disk.Get(ctx, actionID) + } + + outputID, diskPath, err = c.disk.Get(ctx, actionID) + if err == nil && outputID != "" { + return outputID, diskPath, nil + } + + c.getHTTP.Add(1) + t0HTTP := time.Now() + defer func() { + c.getHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds()) + }() + outputID, res, err := c.gocached.get(ctx, actionID) + if err != nil { + c.getHTTPErrors.Add(1) + return "", "", nil + } + if outputID == "" || res == nil { + c.getHTTPMisses.Add(1) + return "", "", nil + } + + defer res.Body.Close() + + // TODO(tomhjp): make sure we timeout if cigocached disappears, but for some + // reason, this seemed to tank network performance. + // ctx, cancel := context.WithTimeout(ctx, httpTimeout(res.ContentLength)) + // defer cancel() + diskPath, err = c.disk.Put(ctx, actionID, outputID, res.ContentLength, res.Body) + if err != nil { + return "", "", fmt.Errorf("error filling disk cache from HTTP: %w", err) + } + + c.getHTTPHits.Add(1) + c.getHTTPBytes.Add(res.ContentLength) + return outputID, diskPath, nil +} + +func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size int64, r io.Reader) (diskPath string, err error) { + t0 := time.Now() + defer func() { + c.putNanos.Add(time.Since(t0).Nanoseconds()) + }() + if c.gocached == nil { + return c.disk.Put(ctx, actionID, outputID, size, r) + } + + c.putHTTP.Add(1) + var diskReader, httpReader io.Reader + tee := &bestEffortTeeReader{r: r} + if size == 0 { + // Special case the empty file so NewRequest sets "Content-Length: 0", + // as opposed to thinking we didn't set it and not being able to sniff its size + // from the type. + diskReader, httpReader = bytes.NewReader(nil), bytes.NewReader(nil) + } else { + pr, pw := io.Pipe() + defer pw.Close() + // The diskReader is in the driving seat. We will try to forward data + // to httpReader as well, but only best-effort. + diskReader = tee + tee.w = pw + httpReader = pr + } + httpErrCh := make(chan error) + go func() { + // TODO(tomhjp): make sure we timeout if cigocached disappears, but for some + // reason, this seemed to tank network performance. + // ctx, cancel := context.WithTimeout(ctx, httpTimeout(size)) + // defer cancel() + t0HTTP := time.Now() + defer func() { + c.putHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds()) + }() + httpErrCh <- c.gocached.put(ctx, actionID, outputID, size, httpReader) + }() + + diskPath, err = c.disk.Put(ctx, actionID, outputID, size, diskReader) + if err != nil { + return "", fmt.Errorf("error writing to disk cache: %w", errors.Join(err, tee.err)) + } + + select { + case err := <-httpErrCh: + if err != nil { + c.putHTTPErrors.Add(1) + } else { + c.putHTTPBytes.Add(size) + } + case <-ctx.Done(): + } + + return diskPath, nil +} + +func (c *cigocacher) close() error { + log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)", + c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(), + c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load()) + if !c.verbose || c.gocached == nil { + return nil + } + + stats, err := c.gocached.fetchStats() + if err != nil { + log.Printf("error fetching gocached stats: %v", err) + } else { + log.Printf("gocached session stats: %s", stats) + } + + return nil +} + +func fetchAccessToken(cl *http.Client, idTokenURL, idTokenRequestToken, gocachedURL string) (string, error) { + req, err := http.NewRequest("GET", idTokenURL+"&audience=gocached", nil) + if err != nil { + return "", err + } + req.Header.Set("Authorization", "Bearer "+idTokenRequestToken) + resp, err := cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + type idTokenResp struct { + Value string `json:"value"` + } + var idToken idTokenResp + if err := jsonv1.NewDecoder(resp.Body).Decode(&idToken); err != nil { + return "", err + } + + req, _ = http.NewRequest("POST", gocachedURL+"/auth/exchange-token", strings.NewReader(`{"jwt":"`+idToken.Value+`"}`)) + req.Header.Set("Content-Type", "application/json") + resp, err = cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + type accessTokenResp struct { + AccessToken string `json:"access_token"` + } + var accessToken accessTokenResp + if err := jsonv1.NewDecoder(resp.Body).Decode(&accessToken); err != nil { + return "", err + } + + return accessToken.AccessToken, nil +} + +type bestEffortTeeReader struct { + r io.Reader + w io.WriteCloser + err error +} + +func (t *bestEffortTeeReader) Read(p []byte) (int, error) { + n, err := t.r.Read(p) + if n > 0 && t.w != nil { + if _, err := t.w.Write(p[:n]); err != nil { + t.err = errors.Join(err, t.w.Close()) + t.w = nil + } + } + return n, err +} diff --git a/cmd/cigocacher/http.go b/cmd/cigocacher/http.go new file mode 100644 index 0000000000000..57d3bfb45f53e --- /dev/null +++ b/cmd/cigocacher/http.go @@ -0,0 +1,115 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "context" + "fmt" + "io" + "log" + "net/http" +) + +type gocachedClient struct { + baseURL string // base URL of the cacher server, like "http://localhost:31364". + cl *http.Client // http.Client to use. + accessToken string // Bearer token to use in the Authorization header. + verbose bool +} + +// drainAndClose reads and throws away a small bounded amount of data. This is a +// best-effort attempt to allow connection reuse; Go's HTTP/1 Transport won't +// reuse a TCP connection unless you fully consume HTTP responses. +func drainAndClose(body io.ReadCloser) { + io.CopyN(io.Discard, body, 4<<10) + body.Close() +} + +func tryReadErrorMessage(res *http.Response) []byte { + msg, _ := io.ReadAll(io.LimitReader(res.Body, 4<<10)) + return msg +} + +func (c *gocachedClient) get(ctx context.Context, actionID string) (outputID string, resp *http.Response, err error) { + // TODO(tomhjp): make sure we timeout if cigocached disappears, but for some + // reason, this seemed to tank network performance. + // // Set a generous upper limit on the time we'll wait for a response. We'll + // // shorten this deadline later once we know the content length. + // ctx, cancel := context.WithTimeout(ctx, time.Minute) + // defer cancel() + req, _ := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/action/"+actionID, nil) + req.Header.Set("Want-Object", "1") // opt in to single roundtrip protocol + if c.accessToken != "" { + req.Header.Set("Authorization", "Bearer "+c.accessToken) + } + + res, err := c.cl.Do(req) + if err != nil { + return "", nil, err + } + defer func() { + if resp == nil { + drainAndClose(res.Body) + } + }() + if res.StatusCode == http.StatusNotFound { + return "", nil, nil + } + if res.StatusCode != http.StatusOK { + msg := tryReadErrorMessage(res) + if c.verbose { + log.Printf("error GET /action/%s: %v, %s", actionID, res.Status, msg) + } + return "", nil, fmt.Errorf("unexpected GET /action/%s status %v", actionID, res.Status) + } + + outputID = res.Header.Get("Go-Output-Id") + if outputID == "" { + return "", nil, fmt.Errorf("missing Go-Output-Id header in response") + } + if res.ContentLength == -1 { + return "", nil, fmt.Errorf("no Content-Length from server") + } + return outputID, res, nil +} + +func (c *gocachedClient) put(ctx context.Context, actionID, outputID string, size int64, body io.Reader) error { + req, _ := http.NewRequestWithContext(ctx, "PUT", c.baseURL+"/"+actionID+"/"+outputID, body) + req.ContentLength = size + if c.accessToken != "" { + req.Header.Set("Authorization", "Bearer "+c.accessToken) + } + res, err := c.cl.Do(req) + if err != nil { + if c.verbose { + log.Printf("error PUT /%s/%s: %v", actionID, outputID, err) + } + return err + } + defer res.Body.Close() + if res.StatusCode != http.StatusNoContent { + msg := tryReadErrorMessage(res) + if c.verbose { + log.Printf("error PUT /%s/%s: %v, %s", actionID, outputID, res.Status, msg) + } + return fmt.Errorf("unexpected PUT /%s/%s status %v", actionID, outputID, res.Status) + } + + return nil +} + +func (c *gocachedClient) fetchStats() (string, error) { + req, _ := http.NewRequest("GET", c.baseURL+"/session/stats", nil) + req.Header.Set("Authorization", "Bearer "+c.accessToken) + resp, err := c.cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 0a75ac43e5a28..6608faaf741fc 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -30,9 +30,9 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus - LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs - LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs + L github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + L github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs + L github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio @@ -72,7 +72,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ - google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + 💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ tailscale.com/client/local from tailscale.com/derp/derpserver diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 16ad089f3b213..c76a4236e1105 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -71,8 +71,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd - github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe+ + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag @@ -94,6 +95,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+ github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ + github.com/prometheus/client_golang/prometheus/promhttp/internal from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ @@ -180,10 +182,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+ google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+ google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3+ - google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc - google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ - google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + 💣 google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3+ + 💣 google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc + 💣 google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ + 💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ gopkg.in/evanphx/json-patch.v4 from k8s.io/client-go/testing gopkg.in/inf.v0 from k8s.io/apimachinery/pkg/api/resource gopkg.in/yaml.v3 from github.com/go-openapi/swag+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 7b3d05f94ccb2..7b945dd77ea79 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -14,9 +14,9 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus - LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs - LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs + L github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + L github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs + L github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs 💣 go4.org/mem from tailscale.com/metrics+ go4.org/netipx from tailscale.com/net/tsaddr google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt @@ -47,7 +47,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ google.golang.org/protobuf/runtime/protoimpl from github.com/prometheus/client_model/go+ - google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ + 💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 3c111470f32d9..69e6559a0173b 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -16,6 +16,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 40a1fb2a4a70d..55a21c426b5d5 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -20,6 +20,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index d154020923fbd..79f92deb92f38 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -139,6 +139,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 14db7414a64ba..5c6aae5121196 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -36,6 +36,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd diff --git a/flake.nix b/flake.nix index c075bce0e3131..505061a765362 100644 --- a/flake.nix +++ b/flake.nix @@ -151,5 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-3jAfCtp714acePnwgdNto8Sj3vFwtpO9os6IwXQ07A4= - +# nix-direnv cache busting line: sha256-jJSSXMyUqcJoZuqfSlBsKDQezyqS+jDkRglMMjG1K8g= diff --git a/go.mod b/go.mod index e6baad0dc5057..a49a9724f7af1 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 + github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd github.com/bramvdbogaerde/go-scp v1.4.0 github.com/cilium/ebpf v0.15.0 github.com/coder/websocket v1.8.12 @@ -60,7 +61,7 @@ require ( github.com/jellydator/ttlcache/v3 v3.1.0 github.com/jsimonetti/rtnetlink v1.4.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.17.11 + github.com/klauspost/compress v1.18.0 github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 @@ -74,8 +75,8 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus-community/pro-bing v0.4.0 - github.com/prometheus/client_golang v1.20.5 - github.com/prometheus/common v0.55.0 + github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/common v0.65.0 github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff github.com/safchain/ethtool v0.3.0 github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e @@ -103,7 +104,7 @@ require ( go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/crypto v0.45.0 - golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.30.0 golang.org/x/net v0.47.0 golang.org/x/oauth2 v0.30.0 @@ -355,8 +356,8 @@ require ( github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.4.8 // indirect - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/procfs v0.16.1 // indirect github.com/quasilyte/go-ruleguard v0.4.2 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect @@ -414,7 +415,7 @@ require ( golang.org/x/image v0.27.0 // indirect golang.org/x/text v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.36.3 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.mod.sri b/go.mod.sri index 737ea7d2b09e4..66422652e2262 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-3jAfCtp714acePnwgdNto8Sj3vFwtpO9os6IwXQ07A4= +sha256-jJSSXMyUqcJoZuqfSlBsKDQezyqS+jDkRglMMjG1K8g= diff --git a/go.sum b/go.sum index 1106932f21444..f70fe9159f614 100644 --- a/go.sum +++ b/go.sum @@ -186,6 +186,8 @@ github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd h1:1Df3FBmfyUCIQ4eKzAPXIWTfewY89L0fWPWO56zWCyI= +github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd/go.mod h1:2+xptBAd0m2kZ1wLO4AYZhldLEFPy+KeGwmnlXLvy+w= github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= @@ -662,8 +664,8 @@ github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -840,29 +842,29 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff h1:X1Tly81aZ22DA1fxBdfvR3iw8+yFoUBUHMEd+AX/ZXI= github.com/prometheus/prometheus v0.49.2-0.20240125131847-c3b8ef1694ff/go.mod h1:FvE8dtQ1Ww63IlyKBn1V4s+zMwF9kHkVNkQBR1pM4CU= github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= @@ -1140,8 +1142,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs= -golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= @@ -1498,8 +1500,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/shell.nix b/shell.nix index 8554b92580e5c..d412693d9fdd1 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-3jAfCtp714acePnwgdNto8Sj3vFwtpO9os6IwXQ07A4= +# nix-direnv cache busting line: sha256-jJSSXMyUqcJoZuqfSlBsKDQezyqS+jDkRglMMjG1K8g= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 7d5ec0a606e4d..825a39e34877f 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -36,6 +36,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd From 755309c04eae75e4dda61b79042a4ca1112b5a45 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 20 Nov 2025 19:33:18 -0800 Subject: [PATCH 0747/1093] net/udprelay: use blake2s-256 MAC for handshake challenge This commit replaces crypto/rand challenge generation with a blake2s-256 MAC. This enables the peer relay server to respond to multiple forward disco.BindUDPRelayEndpoint messages per handshake generation without sacrificing the proof of IP ownership properties of the handshake. Responding to multiple forward disco.BindUDPRelayEndpoint messages per handshake generation improves client address/path selection where lowest client->server path/addr one-way delay does not necessarily equate to lowest client<->server round trip delay. It also improves situations where outbound traffic is filtered independent of input, and the first reply disco.BindUDPRelayEndpointChallenge message is dropped on the reply path, but a later reply using a different source would make it through. Reduction in serverEndpoint state saves 112 bytes per instance, trading for slightly more expensive crypto ops: 277ns/op vs 321ns/op on an M1 Macbook Pro. Updates tailscale/corp#34414 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 168 ++++++++++++++++++++++++------------ net/udprelay/server_test.go | 116 +++++++++++++++++++++++++ 2 files changed, 227 insertions(+), 57 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 7138cec7a93dd..b260955e0c952 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "crypto/rand" + "encoding/binary" "errors" "fmt" "net" @@ -20,6 +21,7 @@ import ( "time" "go4.org/mem" + "golang.org/x/crypto/blake2s" "golang.org/x/net/ipv6" "tailscale.com/disco" "tailscale.com/net/batching" @@ -73,7 +75,9 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields + mu sync.Mutex // guards the following fields + macSecrets [][blake2s.Size]byte // [0] is most recent, max 2 elements + macSecretRotatedAt time.Time derpMap *tailcfg.DERPMap onlyStaticAddrPorts bool // no dynamic addr port discovery when set staticAddrPorts views.Slice[netip.AddrPort] // static ip:port pairs set with [Server.SetStaticAddrPorts] @@ -85,6 +89,8 @@ type Server struct { byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } +const macSecretRotationInterval = time.Minute * 2 + const ( minVNI = uint32(1) maxVNI = uint32(1<<24 - 1) @@ -98,22 +104,42 @@ type serverEndpoint struct { // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys key.SortedPairOfDiscoPublic - discoSharedSecrets [2]key.DiscoShared - handshakeGeneration [2]uint32 // or zero if a handshake has never started for that relay leg - handshakeAddrPorts [2]netip.AddrPort // or zero value if a handshake has never started for that relay leg - boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg - lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time - challenge [2][disco.BindUDPRelayChallengeLen]byte - packetsRx [2]uint64 // num packets received from/sent by each client after they are bound - bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound + discoPubKeys key.SortedPairOfDiscoPublic + discoSharedSecrets [2]key.DiscoShared + inProgressGeneration [2]uint32 // or zero if a handshake has never started, or has just completed + boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg + lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time + packetsRx [2]uint64 // num packets received from/sent by each client after they are bound + bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound lamportID uint64 vni uint32 allocatedAt time.Time } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { +func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg disco.BindUDPRelayEndpointCommon) ([blake2s.Size]byte, error) { + input := make([]byte, 8, 4+4+32+18) // vni + generation + invited party disco key + addr:port + binary.BigEndian.PutUint32(input[0:4], msg.VNI) + binary.BigEndian.PutUint32(input[4:8], msg.Generation) + input = msg.RemoteKey.AppendTo(input) + input, err := src.AppendBinary(input) + if err != nil { + return [blake2s.Size]byte{}, err + } + h, err := blake2s.New256(blakeKey[:]) + if err != nil { + return [blake2s.Size]byte{}, err + } + _, err = h.Write(input) + if err != nil { + return [blake2s.Size]byte{}, err + } + var out [blake2s.Size]byte + h.Sum(out[:0]) + return out, nil +} + +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte) (write []byte, to netip.AddrPort) { if senderIndex != 0 && senderIndex != 1 { return nil, netip.AddrPort{} } @@ -144,18 +170,11 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex // Generation must be nonzero, silently drop return nil, netip.AddrPort{} } - if e.handshakeGeneration[senderIndex] == discoMsg.Generation { - // we've seen this generation before, silently drop - return nil, netip.AddrPort{} - } - e.handshakeGeneration[senderIndex] = discoMsg.Generation - e.handshakeAddrPorts[senderIndex] = from + e.inProgressGeneration[senderIndex] = discoMsg.Generation m := new(disco.BindUDPRelayEndpointChallenge) m.VNI = e.vni m.Generation = discoMsg.Generation m.RemoteKey = e.discoPubKeys.Get()[otherSender] - rand.Read(e.challenge[senderIndex][:]) - copy(m.Challenge[:], e.challenge[senderIndex][:]) reply := make([]byte, packet.GeneveFixedHeaderLength, 512) gh := packet.GeneveHeader{Control: true, Protocol: packet.GeneveProtocolDisco} gh.VNI.Set(e.vni) @@ -165,6 +184,11 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } reply = append(reply, disco.Magic...) reply = serverDisco.AppendTo(reply) + mac, err := blakeMACFromBindMsg(macSecrets[0], from, m.BindUDPRelayEndpointCommon) + if err != nil { + return nil, netip.AddrPort{} + } + m.Challenge = mac box := e.discoSharedSecrets[senderIndex].Seal(m.AppendMarshal(nil)) reply = append(reply, box...) return reply, from @@ -174,17 +198,29 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex // silently drop return nil, netip.AddrPort{} } - generation := e.handshakeGeneration[senderIndex] - if generation == 0 || // we have no active handshake - generation != discoMsg.Generation || // mismatching generation for the active handshake - e.handshakeAddrPorts[senderIndex] != from || // mismatching source for the active handshake - !bytes.Equal(e.challenge[senderIndex][:], discoMsg.Challenge[:]) { // mismatching answer for the active handshake + generation := e.inProgressGeneration[senderIndex] + if generation == 0 || // we have no in-progress handshake + generation != discoMsg.Generation { // mismatching generation for the in-progress handshake // silently drop return nil, netip.AddrPort{} } - // Handshake complete. Update the binding for this sender. - e.boundAddrPorts[senderIndex] = from - e.lastSeen[senderIndex] = time.Now() // record last seen as bound time + for _, macSecret := range macSecrets { + mac, err := blakeMACFromBindMsg(macSecret, from, discoMsg.BindUDPRelayEndpointCommon) + if err != nil { + // silently drop + return nil, netip.AddrPort{} + } + // Speed is favored over constant-time comparison here. The sender is + // already authenticated via disco. + if bytes.Equal(mac[:], discoMsg.Challenge[:]) { + // Handshake complete. Update the binding for this sender. + e.boundAddrPorts[senderIndex] = from + e.lastSeen[senderIndex] = time.Now() // record last seen as bound time + e.inProgressGeneration[senderIndex] = 0 // reset to zero, which indicates there is no in-progress handshake + return nil, netip.AddrPort{} + } + } + // MAC does not match, silently drop return nil, netip.AddrPort{} default: // unexpected message types, silently drop @@ -192,7 +228,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message @@ -223,39 +259,29 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by return nil, netip.AddrPort{} } - return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco) + return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets) } -func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeader, b []byte, serverDisco key.DiscoPublic) (write []byte, to netip.AddrPort) { - if !gh.Control { - if !e.isBound() { - // not a control packet, but serverEndpoint isn't bound - return nil, netip.AddrPort{} - } - switch { - case from == e.boundAddrPorts[0]: - e.lastSeen[0] = time.Now() - e.packetsRx[0]++ - e.bytesRx[0] += uint64(len(b)) - return b, e.boundAddrPorts[1] - case from == e.boundAddrPorts[1]: - e.lastSeen[1] = time.Now() - e.packetsRx[1]++ - e.bytesRx[1] += uint64(len(b)) - return b, e.boundAddrPorts[0] - default: - // unrecognized source - return nil, netip.AddrPort{} - } +func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now time.Time) (write []byte, to netip.AddrPort) { + if !e.isBound() { + // not a control packet, but serverEndpoint isn't bound + return nil, netip.AddrPort{} } - - if gh.Protocol != packet.GeneveProtocolDisco { - // control packet, but not Disco + switch { + case from == e.boundAddrPorts[0]: + e.lastSeen[0] = now + e.packetsRx[0]++ + e.bytesRx[0] += uint64(len(b)) + return b, e.boundAddrPorts[1] + case from == e.boundAddrPorts[1]: + e.lastSeen[1] = now + e.packetsRx[1]++ + e.bytesRx[1] += uint64(len(b)) + return b, e.boundAddrPorts[0] + default: + // unrecognized source return nil, netip.AddrPort{} } - - msg := b[packet.GeneveFixedHeaderLength:] - return e.handleSealedDiscoControlMsg(from, msg, serverDisco) } func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { @@ -621,7 +647,35 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n return nil, netip.AddrPort{} } - return e.handlePacket(from, gh, b, s.discoPublic) + now := time.Now() + if gh.Control { + if gh.Protocol != packet.GeneveProtocolDisco { + // control packet, but not Disco + return nil, netip.AddrPort{} + } + msg := b[packet.GeneveFixedHeaderLength:] + s.maybeRotateMACSecretLocked(now) + return e.handleSealedDiscoControlMsg(from, msg, s.discoPublic, s.macSecrets) + } + return e.handleDataPacket(from, b, now) +} + +func (s *Server) maybeRotateMACSecretLocked(now time.Time) { + if !s.macSecretRotatedAt.IsZero() && now.Sub(s.macSecretRotatedAt) < macSecretRotationInterval { + return + } + switch len(s.macSecrets) { + case 0: + s.macSecrets = make([][blake2s.Size]byte, 1, 2) + case 1: + s.macSecrets = append(s.macSecrets, [blake2s.Size]byte{}) + fallthrough + case 2: + s.macSecrets[1] = s.macSecrets[0] + } + rand.Read(s.macSecrets[0][:]) + s.macSecretRotatedAt = now + return } func (s *Server) packetReadLoop(readFromSocket, otherSocket batching.Conn, readFromSocketIsIPv4 bool) { diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 6c3d616586bc9..582d4cf671918 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -5,6 +5,7 @@ package udprelay import ( "bytes" + "crypto/rand" "net" "net/netip" "testing" @@ -14,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go4.org/mem" + "golang.org/x/crypto/blake2s" "tailscale.com/disco" "tailscale.com/net/packet" "tailscale.com/types/key" @@ -352,3 +354,117 @@ func TestServer_getNextVNILocked(t *testing.T) { _, err = s.getNextVNILocked() c.Assert(err, qt.IsNil) } + +func Test_blakeMACFromBindMsg(t *testing.T) { + var macSecret [blake2s.Size]byte + rand.Read(macSecret[:]) + src := netip.MustParseAddrPort("[2001:db8::1]:7") + + msgA := disco.BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 1, + RemoteKey: key.NewDisco().Public(), + Challenge: [32]byte{}, + } + macA, err := blakeMACFromBindMsg(macSecret, src, msgA) + if err != nil { + t.Fatal(err) + } + + msgB := msgA + msgB.VNI++ + macB, err := blakeMACFromBindMsg(macSecret, src, msgB) + if err != nil { + t.Fatal(err) + } + if macA == macB { + t.Fatalf("varying VNI input produced identical mac: %v", macA) + } + + msgC := msgA + msgC.Generation++ + macC, err := blakeMACFromBindMsg(macSecret, src, msgC) + if err != nil { + t.Fatal(err) + } + if macA == macC { + t.Fatalf("varying Generation input produced identical mac: %v", macA) + } + + msgD := msgA + msgD.RemoteKey = key.NewDisco().Public() + macD, err := blakeMACFromBindMsg(macSecret, src, msgD) + if err != nil { + t.Fatal(err) + } + if macA == macD { + t.Fatalf("varying RemoteKey input produced identical mac: %v", macA) + } + + msgE := msgA + msgE.Challenge = [32]byte{0x01} // challenge is not part of the MAC and should be ignored + macE, err := blakeMACFromBindMsg(macSecret, src, msgE) + if err != nil { + t.Fatal(err) + } + if macA != macE { + t.Fatalf("varying Challenge input produced varying mac: %v", macA) + } + + macSecretB := macSecret + macSecretB[0] ^= 0xFF + macF, err := blakeMACFromBindMsg(macSecretB, src, msgA) + if err != nil { + t.Fatal(err) + } + if macA == macF { + t.Fatalf("varying macSecret input produced identical mac: %v", macA) + } + + srcB := netip.AddrPortFrom(src.Addr(), src.Port()+1) + macG, err := blakeMACFromBindMsg(macSecret, srcB, msgA) + if err != nil { + t.Fatal(err) + } + if macA == macG { + t.Fatalf("varying src input produced identical mac: %v", macA) + } +} + +func Benchmark_blakeMACFromBindMsg(b *testing.B) { + var macSecret [blake2s.Size]byte + rand.Read(macSecret[:]) + src := netip.MustParseAddrPort("[2001:db8::1]:7") + msg := disco.BindUDPRelayEndpointCommon{ + VNI: 1, + Generation: 1, + RemoteKey: key.NewDisco().Public(), + Challenge: [32]byte{}, + } + b.ReportAllocs() + for b.Loop() { + _, err := blakeMACFromBindMsg(macSecret, src, msg) + if err != nil { + b.Fatal(err) + } + } +} + +func TestServer_maybeRotateMACSecretLocked(t *testing.T) { + s := &Server{} + start := time.Now() + s.maybeRotateMACSecretLocked(start) + qt.Assert(t, len(s.macSecrets), qt.Equals, 1) + macSecret := s.macSecrets[0] + s.maybeRotateMACSecretLocked(start.Add(macSecretRotationInterval - time.Nanosecond)) + qt.Assert(t, len(s.macSecrets), qt.Equals, 1) + qt.Assert(t, s.macSecrets[0], qt.Equals, macSecret) + s.maybeRotateMACSecretLocked(start.Add(macSecretRotationInterval)) + qt.Assert(t, len(s.macSecrets), qt.Equals, 2) + qt.Assert(t, s.macSecrets[1], qt.Equals, macSecret) + qt.Assert(t, s.macSecrets[0], qt.Not(qt.Equals), s.macSecrets[1]) + s.maybeRotateMACSecretLocked(s.macSecretRotatedAt.Add(macSecretRotationInterval)) + qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets[0]) + qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets[1]) + qt.Assert(t, s.macSecrets[0], qt.Not(qt.Equals), s.macSecrets[1]) +} From 7426eca163354e3f9b400c9dff9ad4c9be5c2d03 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Fri, 21 Nov 2025 15:29:25 -0800 Subject: [PATCH 0748/1093] cmd/tailscale,feature/relayserver,ipn: add relay-server-static-endpoints set flag Updates tailscale/corp#31489 Updates #17791 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/set.go | 65 +++++++++++++------- cmd/tailscale/cli/up.go | 1 + feature/relayserver/relayserver.go | 25 ++++++-- feature/relayserver/relayserver_test.go | 82 ++++++++++++++++++++++--- ipn/ipn_clone.go | 70 +++++++++++---------- ipn/ipn_view.go | 76 +++++++++++++---------- ipn/prefs.go | 76 +++++++++++++---------- ipn/prefs_test.go | 21 +++++++ 8 files changed, 278 insertions(+), 138 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index cb3a07a6fe0ec..c2316580c0ea7 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -11,6 +11,7 @@ import ( "net/netip" "os/exec" "runtime" + "slices" "strconv" "strings" @@ -25,6 +26,7 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/set" "tailscale.com/version" ) @@ -43,29 +45,30 @@ Only settings explicitly mentioned will be set. There are no default values.`, } type setArgsT struct { - acceptRoutes bool - acceptDNS bool - exitNodeIP string - exitNodeAllowLANAccess bool - shieldsUp bool - runSSH bool - runWebClient bool - hostname string - advertiseRoutes string - advertiseDefaultRoute bool - advertiseConnector bool - opUser string - acceptedRisks string - profileName string - forceDaemon bool - updateCheck bool - updateApply bool - reportPosture bool - snat bool - statefulFiltering bool - sync bool - netfilterMode string - relayServerPort string + acceptRoutes bool + acceptDNS bool + exitNodeIP string + exitNodeAllowLANAccess bool + shieldsUp bool + runSSH bool + runWebClient bool + hostname string + advertiseRoutes string + advertiseDefaultRoute bool + advertiseConnector bool + opUser string + acceptedRisks string + profileName string + forceDaemon bool + updateCheck bool + updateApply bool + reportPosture bool + snat bool + statefulFiltering bool + sync bool + netfilterMode string + relayServerPort string + relayServerStaticEndpoints string } func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { @@ -88,6 +91,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") setf.BoolVar(&setArgs.sync, "sync", false, hidden+"actively sync configuration from the control plane (set to false only for network failure testing)") setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") + setf.StringVar(&setArgs.relayServerStaticEndpoints, "relay-server-static-endpoints", "", "static IP:port endpoints to advertise as candidates for relay connections (comma-separated, e.g. \"[2001:db8::1]:40000,192.0.2.1:40000\") or empty string to not advertise any static endpoints") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { st, err := localClient.Status(context.Background()) @@ -248,6 +252,21 @@ func runSet(ctx context.Context, args []string) (retErr error) { maskedPrefs.Prefs.RelayServerPort = ptr.To(int(uport)) } + if setArgs.relayServerStaticEndpoints != "" { + endpointsSet := make(set.Set[netip.AddrPort]) + endpointsSplit := strings.Split(setArgs.relayServerStaticEndpoints, ",") + for _, s := range endpointsSplit { + ap, err := netip.ParseAddrPort(s) + if err != nil { + return fmt.Errorf("failed to set relay server static endpoints: %q is not a valid IP:port", s) + } + endpointsSet.Add(ap) + } + endpoints := endpointsSet.Slice() + slices.SortFunc(endpoints, netip.AddrPort.Compare) + maskedPrefs.Prefs.RelayServerStaticEndpoints = endpoints + } + checkPrefs := curPrefs.Clone() checkPrefs.ApplyEdits(maskedPrefs) if err := localClient.CheckPrefs(ctx, checkPrefs); err != nil { diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 7f5b2e6b4a61d..72515400d8fa1 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -887,6 +887,7 @@ func init() { addPrefFlagMapping("report-posture", "PostureChecking") addPrefFlagMapping("relay-server-port", "RelayServerPort") addPrefFlagMapping("sync", "Sync") + addPrefFlagMapping("relay-server-static-endpoints", "RelayServerStaticEndpoints") } func addPrefFlagMapping(flagName string, prefNames ...string) { diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index b7457210f3154..e85576e50b9af 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "net/http" + "net/netip" "tailscale.com/disco" "tailscale.com/feature" @@ -23,6 +24,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" + "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/wgengine/magicsock" ) @@ -85,6 +87,7 @@ type relayServer interface { AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) GetSessions() []status.ServerSession SetDERPMapView(tailcfg.DERPMapView) + SetStaticAddrPorts(addrPorts views.Slice[netip.AddrPort]) } // extension is an [ipnext.Extension] managing the relay server on platforms @@ -95,12 +98,13 @@ type extension struct { ec *eventbus.Client respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] - mu syncs.Mutex // guards the following fields - shutdown bool // true if Shutdown() has been called - rs relayServer // nil when disabled - port *int // ipn.Prefs.RelayServerPort, nil if disabled - derpMapView tailcfg.DERPMapView // latest seen over the eventbus - hasNodeAttrDisableRelayServer bool // [tailcfg.NodeAttrDisableRelayServer] + mu syncs.Mutex // guards the following fields + shutdown bool // true if Shutdown() has been called + rs relayServer // nil when disabled + port *int // ipn.Prefs.RelayServerPort, nil if disabled + staticEndpoints views.Slice[netip.AddrPort] // ipn.Prefs.RelayServerStaticEndpoints + derpMapView tailcfg.DERPMapView // latest seen over the eventbus + hasNodeAttrDisableRelayServer bool // [tailcfg.NodeAttrDisableRelayServer] } // Name implements [ipnext.Extension]. @@ -186,6 +190,7 @@ func (e *extension) relayServerShouldBeRunningLocked() bool { // handleRelayServerLifetimeLocked handles the lifetime of [e.rs]. func (e *extension) handleRelayServerLifetimeLocked() { + defer e.handleRelayServerStaticAddrPortsLocked() if !e.relayServerShouldBeRunningLocked() { e.stopRelayServerLocked() return @@ -195,6 +200,13 @@ func (e *extension) handleRelayServerLifetimeLocked() { e.tryStartRelayServerLocked() } +func (e *extension) handleRelayServerStaticAddrPortsLocked() { + if e.rs != nil { + // TODO(jwhited): env var support + e.rs.SetStaticAddrPorts(e.staticEndpoints) + } +} + func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { e.mu.Lock() defer e.mu.Unlock() @@ -205,6 +217,7 @@ func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { e.mu.Lock() defer e.mu.Unlock() + e.staticEndpoints = prefs.RelayServerStaticEndpoints() newPort, ok := prefs.RelayServerPort().GetOk() enableOrDisableServer := ok != (e.port != nil) portChanged := ok && e.port != nil && newPort != *e.port diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 3d71c55d76dd5..d77d2df261410 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -5,7 +5,9 @@ package relayserver import ( "errors" + "net/netip" "reflect" + "slices" "testing" "tailscale.com/ipn" @@ -17,15 +19,21 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" + "tailscale.com/types/views" ) func Test_extension_profileStateChanged(t *testing.T) { prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(1)} prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} + prefsWithPortOneRelayEndpoints := ipn.Prefs{ + RelayServerPort: ptr.To(1), + RelayServerStaticEndpoints: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:7777")}, + } type fields struct { - port *int - rs relayServer + port *int + staticEndpoints views.Slice[netip.AddrPort] + rs relayServer } type args struct { prefs ipn.PrefsView @@ -38,6 +46,7 @@ func Test_extension_profileStateChanged(t *testing.T) { wantPort *int wantRelayServerFieldNonNil bool wantRelayServerFieldMutated bool + wantEndpoints []netip.AddrPort }{ { name: "no changes non-nil port previously running", @@ -53,6 +62,52 @@ func Test_extension_profileStateChanged(t *testing.T) { wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, }, + { + name: "set addr ports unchanged port previously running", + fields: fields{ + port: ptr.To(1), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOneRelayEndpoints.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, + wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, + }, + { + name: "set addr ports not previously running", + fields: fields{ + port: nil, + rs: nil, + }, + args: args{ + prefs: prefsWithPortOneRelayEndpoints.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: true, + wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, + }, + { + name: "clear addr ports unchanged port previously running", + fields: fields{ + port: ptr.To(1), + staticEndpoints: views.SliceOf(prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints), + rs: mockRelayServerNotZeroVal(), + }, + args: args{ + prefs: prefsWithPortOne.View(), + sameNode: true, + }, + wantPort: ptr.To(1), + wantRelayServerFieldNonNil: true, + wantRelayServerFieldMutated: false, + wantEndpoints: nil, + }, { name: "prefs port nil", fields: fields{ @@ -160,6 +215,7 @@ func Test_extension_profileStateChanged(t *testing.T) { return &mockRelayServer{}, nil } e.port = tt.fields.port + e.staticEndpoints = tt.fields.staticEndpoints e.rs = tt.fields.rs defer e.Shutdown() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) @@ -174,24 +230,34 @@ func Test_extension_profileStateChanged(t *testing.T) { if tt.wantRelayServerFieldMutated != !reflect.DeepEqual(tt.fields.rs, e.rs) { t.Errorf("wantRelayServerFieldMutated: %v != !reflect.DeepEqual(tt.fields.rs, e.rs): %v", tt.wantRelayServerFieldMutated, !reflect.DeepEqual(tt.fields.rs, e.rs)) } + if !slices.Equal(tt.wantEndpoints, e.staticEndpoints.AsSlice()) { + t.Errorf("wantEndpoints: %v != %v", tt.wantEndpoints, e.staticEndpoints.AsSlice()) + } + if e.rs != nil && !slices.Equal(tt.wantEndpoints, e.rs.(*mockRelayServer).addrPorts.AsSlice()) { + t.Errorf("wantEndpoints: %v != %v", tt.wantEndpoints, e.rs.(*mockRelayServer).addrPorts.AsSlice()) + } }) } } func mockRelayServerNotZeroVal() *mockRelayServer { - return &mockRelayServer{true} + return &mockRelayServer{set: true} } type mockRelayServer struct { - set bool + set bool + addrPorts views.Slice[netip.AddrPort] } -func (mockRelayServer) Close() error { return nil } -func (mockRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { +func (m *mockRelayServer) Close() error { return nil } +func (m *mockRelayServer) AllocateEndpoint(_, _ key.DiscoPublic) (endpoint.ServerEndpoint, error) { return endpoint.ServerEndpoint{}, errors.New("not implemented") } -func (mockRelayServer) GetSessions() []status.ServerSession { return nil } -func (mockRelayServer) SetDERPMapView(tailcfg.DERPMapView) { return } +func (m *mockRelayServer) GetSessions() []status.ServerSession { return nil } +func (m *mockRelayServer) SetDERPMapView(tailcfg.DERPMapView) { return } +func (m *mockRelayServer) SetStaticAddrPorts(aps views.Slice[netip.AddrPort]) { + m.addrPorts = aps +} type mockSafeBackend struct { sys *tsd.System diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 1be7161970726..fae85adee7e2b 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -64,46 +64,48 @@ func (src *Prefs) Clone() *Prefs { if dst.RelayServerPort != nil { dst.RelayServerPort = ptr.To(*src.RelayServerPort) } + dst.RelayServerStaticEndpoints = append(src.RelayServerStaticEndpoints[:0:0], src.RelayServerStaticEndpoints...) dst.Persist = src.Persist.Clone() return dst } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsCloneNeedsRegeneration = Prefs(struct { - ControlURL string - RouteAll bool - ExitNodeID tailcfg.StableNodeID - ExitNodeIP netip.Addr - AutoExitNode ExitNodeExpression - InternalExitNodePrior tailcfg.StableNodeID - ExitNodeAllowLANAccess bool - CorpDNS bool - RunSSH bool - RunWebClient bool - WantRunning bool - LoggedOut bool - ShieldsUp bool - AdvertiseTags []string - Hostname string - NotepadURLs bool - ForceDaemon bool - Egg bool - AdvertiseRoutes []netip.Prefix - AdvertiseServices []string - Sync opt.Bool - NoSNAT bool - NoStatefulFiltering opt.Bool - NetfilterMode preftype.NetfilterMode - OperatorUser string - ProfileName string - AutoUpdate AutoUpdatePrefs - AppConnector AppConnectorPrefs - PostureChecking bool - NetfilterKind string - DriveShares []*drive.Share - RelayServerPort *int - AllowSingleHosts marshalAsTrueInJSON - Persist *persist.Persist + ControlURL string + RouteAll bool + ExitNodeID tailcfg.StableNodeID + ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression + InternalExitNodePrior tailcfg.StableNodeID + ExitNodeAllowLANAccess bool + CorpDNS bool + RunSSH bool + RunWebClient bool + WantRunning bool + LoggedOut bool + ShieldsUp bool + AdvertiseTags []string + Hostname string + NotepadURLs bool + ForceDaemon bool + Egg bool + AdvertiseRoutes []netip.Prefix + AdvertiseServices []string + Sync opt.Bool + NoSNAT bool + NoStatefulFiltering opt.Bool + NetfilterMode preftype.NetfilterMode + OperatorUser string + ProfileName string + AutoUpdate AutoUpdatePrefs + AppConnector AppConnectorPrefs + PostureChecking bool + NetfilterKind string + DriveShares []*drive.Share + RelayServerPort *int + RelayServerStaticEndpoints []netip.AddrPort + AllowSingleHosts marshalAsTrueInJSON + Persist *persist.Persist }{}) // Clone makes a deep copy of ServeConfig. diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index d3836416b7bd5..aac8cb4d7e953 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -448,6 +448,13 @@ func (v PrefsView) RelayServerPort() views.ValuePointer[int] { return views.ValuePointerOf(v.ж.RelayServerPort) } +// RelayServerStaticEndpoints are static IP:port endpoints to advertise as +// candidates for relay connections. Only relevant when RelayServerPort is +// non-nil. +func (v PrefsView) RelayServerStaticEndpoints() views.Slice[netip.AddrPort] { + return views.SliceOf(v.ж.RelayServerStaticEndpoints) +} + // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale // peers got /32 or /128 routes for each other. @@ -468,40 +475,41 @@ func (v PrefsView) Persist() persist.PersistView { return v.ж.Persist.View() } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _PrefsViewNeedsRegeneration = Prefs(struct { - ControlURL string - RouteAll bool - ExitNodeID tailcfg.StableNodeID - ExitNodeIP netip.Addr - AutoExitNode ExitNodeExpression - InternalExitNodePrior tailcfg.StableNodeID - ExitNodeAllowLANAccess bool - CorpDNS bool - RunSSH bool - RunWebClient bool - WantRunning bool - LoggedOut bool - ShieldsUp bool - AdvertiseTags []string - Hostname string - NotepadURLs bool - ForceDaemon bool - Egg bool - AdvertiseRoutes []netip.Prefix - AdvertiseServices []string - Sync opt.Bool - NoSNAT bool - NoStatefulFiltering opt.Bool - NetfilterMode preftype.NetfilterMode - OperatorUser string - ProfileName string - AutoUpdate AutoUpdatePrefs - AppConnector AppConnectorPrefs - PostureChecking bool - NetfilterKind string - DriveShares []*drive.Share - RelayServerPort *int - AllowSingleHosts marshalAsTrueInJSON - Persist *persist.Persist + ControlURL string + RouteAll bool + ExitNodeID tailcfg.StableNodeID + ExitNodeIP netip.Addr + AutoExitNode ExitNodeExpression + InternalExitNodePrior tailcfg.StableNodeID + ExitNodeAllowLANAccess bool + CorpDNS bool + RunSSH bool + RunWebClient bool + WantRunning bool + LoggedOut bool + ShieldsUp bool + AdvertiseTags []string + Hostname string + NotepadURLs bool + ForceDaemon bool + Egg bool + AdvertiseRoutes []netip.Prefix + AdvertiseServices []string + Sync opt.Bool + NoSNAT bool + NoStatefulFiltering opt.Bool + NetfilterMode preftype.NetfilterMode + OperatorUser string + ProfileName string + AutoUpdate AutoUpdatePrefs + AppConnector AppConnectorPrefs + PostureChecking bool + NetfilterKind string + DriveShares []*drive.Share + RelayServerPort *int + RelayServerStaticEndpoints []netip.AddrPort + AllowSingleHosts marshalAsTrueInJSON + Persist *persist.Persist }{}) // View returns a read-only view of ServeConfig. diff --git a/ipn/prefs.go b/ipn/prefs.go index 7f8216c60f8e0..6f3cb65f83914 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -288,6 +288,11 @@ type Prefs struct { // non-nil/enabled. RelayServerPort *int `json:",omitempty"` + // RelayServerStaticEndpoints are static IP:port endpoints to advertise as + // candidates for relay connections. Only relevant when RelayServerPort is + // non-nil. + RelayServerStaticEndpoints []netip.AddrPort `json:",omitempty"` + // AllowSingleHosts was a legacy field that was always true // for the past 4.5 years. It controlled whether Tailscale // peers got /32 or /128 routes for each other. @@ -350,38 +355,39 @@ type AppConnectorPrefs struct { type MaskedPrefs struct { Prefs - ControlURLSet bool `json:",omitempty"` - RouteAllSet bool `json:",omitempty"` - ExitNodeIDSet bool `json:",omitempty"` - ExitNodeIPSet bool `json:",omitempty"` - AutoExitNodeSet bool `json:",omitempty"` - InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients - ExitNodeAllowLANAccessSet bool `json:",omitempty"` - CorpDNSSet bool `json:",omitempty"` - RunSSHSet bool `json:",omitempty"` - RunWebClientSet bool `json:",omitempty"` - WantRunningSet bool `json:",omitempty"` - LoggedOutSet bool `json:",omitempty"` - ShieldsUpSet bool `json:",omitempty"` - AdvertiseTagsSet bool `json:",omitempty"` - HostnameSet bool `json:",omitempty"` - NotepadURLsSet bool `json:",omitempty"` - ForceDaemonSet bool `json:",omitempty"` - EggSet bool `json:",omitempty"` - AdvertiseRoutesSet bool `json:",omitempty"` - AdvertiseServicesSet bool `json:",omitempty"` - SyncSet bool `json:",omitzero"` - NoSNATSet bool `json:",omitempty"` - NoStatefulFilteringSet bool `json:",omitempty"` - NetfilterModeSet bool `json:",omitempty"` - OperatorUserSet bool `json:",omitempty"` - ProfileNameSet bool `json:",omitempty"` - AutoUpdateSet AutoUpdatePrefsMask `json:",omitzero"` - AppConnectorSet bool `json:",omitempty"` - PostureCheckingSet bool `json:",omitempty"` - NetfilterKindSet bool `json:",omitempty"` - DriveSharesSet bool `json:",omitempty"` - RelayServerPortSet bool `json:",omitempty"` + ControlURLSet bool `json:",omitempty"` + RouteAllSet bool `json:",omitempty"` + ExitNodeIDSet bool `json:",omitempty"` + ExitNodeIPSet bool `json:",omitempty"` + AutoExitNodeSet bool `json:",omitempty"` + InternalExitNodePriorSet bool `json:",omitempty"` // Internal; can't be set by LocalAPI clients + ExitNodeAllowLANAccessSet bool `json:",omitempty"` + CorpDNSSet bool `json:",omitempty"` + RunSSHSet bool `json:",omitempty"` + RunWebClientSet bool `json:",omitempty"` + WantRunningSet bool `json:",omitempty"` + LoggedOutSet bool `json:",omitempty"` + ShieldsUpSet bool `json:",omitempty"` + AdvertiseTagsSet bool `json:",omitempty"` + HostnameSet bool `json:",omitempty"` + NotepadURLsSet bool `json:",omitempty"` + ForceDaemonSet bool `json:",omitempty"` + EggSet bool `json:",omitempty"` + AdvertiseRoutesSet bool `json:",omitempty"` + AdvertiseServicesSet bool `json:",omitempty"` + SyncSet bool `json:",omitzero"` + NoSNATSet bool `json:",omitempty"` + NoStatefulFilteringSet bool `json:",omitempty"` + NetfilterModeSet bool `json:",omitempty"` + OperatorUserSet bool `json:",omitempty"` + ProfileNameSet bool `json:",omitempty"` + AutoUpdateSet AutoUpdatePrefsMask `json:",omitzero"` + AppConnectorSet bool `json:",omitempty"` + PostureCheckingSet bool `json:",omitempty"` + NetfilterKindSet bool `json:",omitempty"` + DriveSharesSet bool `json:",omitempty"` + RelayServerPortSet bool `json:",omitempty"` + RelayServerStaticEndpointsSet bool `json:",omitzero"` } // SetsInternal reports whether mp has any of the Internal*Set field bools set @@ -621,6 +627,9 @@ func (p *Prefs) pretty(goos string) string { if buildfeatures.HasRelayServer && p.RelayServerPort != nil { fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort) } + if buildfeatures.HasRelayServer && len(p.RelayServerStaticEndpoints) > 0 { + fmt.Fprintf(&sb, "relayServerStaticEndpoints=%v ", p.RelayServerStaticEndpoints) + } if p.Persist != nil { sb.WriteString(p.Persist.Pretty()) } else { @@ -685,7 +694,8 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.PostureChecking == p2.PostureChecking && slices.EqualFunc(p.DriveShares, p2.DriveShares, drive.SharesEqual) && p.NetfilterKind == p2.NetfilterKind && - compareIntPtrs(p.RelayServerPort, p2.RelayServerPort) + compareIntPtrs(p.RelayServerPort, p2.RelayServerPort) && + slices.Equal(p.RelayServerStaticEndpoints, p2.RelayServerStaticEndpoints) } func (au AutoUpdatePrefs) Pretty() string { diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 7c9c3ef43f7df..cf07507062ab3 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -69,6 +69,7 @@ func TestPrefsEqual(t *testing.T) { "NetfilterKind", "DriveShares", "RelayServerPort", + "RelayServerStaticEndpoints", "AllowSingleHosts", "Persist", } @@ -90,6 +91,16 @@ func TestPrefsEqual(t *testing.T) { } return ns } + aps := func(strs ...string) (ret []netip.AddrPort) { + for _, s := range strs { + n, err := netip.ParseAddrPort(s) + if err != nil { + panic(err) + } + ret = append(ret, n) + } + return ret + } tests := []struct { a, b *Prefs want bool @@ -369,6 +380,16 @@ func TestPrefsEqual(t *testing.T) { &Prefs{RelayServerPort: relayServerPort(1)}, false, }, + { + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.1:40000")}, + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.1:40000")}, + true, + }, + { + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.2:40000")}, + &Prefs{RelayServerStaticEndpoints: aps("[2001:db8::1]:40000", "192.0.2.1:40000")}, + false, + }, } for i, tt := range tests { got := tt.a.Equals(tt.b) From 9c3a2aa79789542262ebae3c3d224da16dc61dbb Mon Sep 17 00:00:00 2001 From: Simon Law Date: Mon, 24 Nov 2025 17:42:58 -0800 Subject: [PATCH 0749/1093] ipn/ipnlocal: replace log.Printf with logf (#18045) Updates #cleanup Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 19 +++++++++++-------- ipn/ipnlocal/local_test.go | 5 ++++- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 72b2303273243..3665999e8d843 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -14,7 +14,6 @@ import ( "errors" "fmt" "io" - "log" "math" "math/rand/v2" "net" @@ -544,7 +543,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo netMon := sys.NetMon.Get() b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { - log.Printf("error setting up sockstat logger: %v", err) + logf("error setting up sockstat logger: %v", err) } // Enable sockstats logs only on non-mobile unstable builds if version.IsUnstableBuild() && !version.IsMobile() && b.sockstatLogger != nil { @@ -7259,7 +7258,12 @@ func (b *LocalBackend) refreshAllowedSuggestions() { } b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() - b.allowedSuggestedExitNodes = fillAllowedSuggestions(b.polc) + + var err error + b.allowedSuggestedExitNodes, err = fillAllowedSuggestions(b.polc) + if err != nil { + b.logf("error refreshing allowed suggestions: %v", err) + } } // selectRegionFunc returns a DERP region from the slice of candidate regions. @@ -7271,20 +7275,19 @@ type selectRegionFunc func(views.Slice[int]) int // choice. type selectNodeFunc func(nodes views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView -func fillAllowedSuggestions(polc policyclient.Client) set.Set[tailcfg.StableNodeID] { +func fillAllowedSuggestions(polc policyclient.Client) (set.Set[tailcfg.StableNodeID], error) { nodes, err := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil) if err != nil { - log.Printf("fillAllowedSuggestions: unable to look up %q policy: %v", pkey.AllowedSuggestedExitNodes, err) - return nil + return nil, fmt.Errorf("fillAllowedSuggestions: unable to look up %q policy: %w", pkey.AllowedSuggestedExitNodes, err) } if nodes == nil { - return nil + return nil, nil } s := make(set.Set[tailcfg.StableNodeID], len(nodes)) for _, n := range nodes { s.Add(tailcfg.StableNodeID(n)) } - return s + return s, nil } // suggestExitNode returns a suggestion for reasonably good exit node based on diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index f17fabb60f5fa..3da014fd6b058 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5590,7 +5590,10 @@ func TestFillAllowedSuggestions(t *testing.T) { var pol policytest.Config pol.Set(pkey.AllowedSuggestedExitNodes, tt.allowPolicy) - got := fillAllowedSuggestions(pol) + got, err := fillAllowedSuggestions(pol) + if err != nil { + t.Fatal(err) + } if got == nil { if tt.want == nil { return From d4821cdc2f49094a933e4379fec1fd140bcc958c Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 25 Nov 2025 12:41:39 +0000 Subject: [PATCH 0750/1093] cmd/k8s-operator: allow HA ingresses to be deleted when VIP service does not exist (#18050) This commit fixes a bug in our HA ingress reconciler where ingress resources would be stuck in a deleting state should their associated VIP service be deleted within control. The reconciliation loop would check for the existence of the VIP service and if not found perform no additional cleanup steps. The code has been modified to continue onwards even if the VIP service is not found. Fixes: https://github.com/tailscale/tailscale/issues/18049 Signed-off-by: David Bond --- cmd/k8s-operator/api-server-proxy-pg_test.go | 8 +-- cmd/k8s-operator/ingress-for-pg.go | 15 ++-- cmd/k8s-operator/ingress-for-pg_test.go | 74 ++++++++++++++++---- 3 files changed, 71 insertions(+), 26 deletions(-) diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go index dfef63f22ff04..dee5057236675 100644 --- a/cmd/k8s-operator/api-server-proxy-pg_test.go +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -182,9 +182,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectEqual(t, fc, certSecretRoleBinding(pg, ns, defaultDomain)) // Simulate certs being issued; should observe AdvertiseServices config change. - if err := populateTLSSecret(t.Context(), fc, pgName, defaultDomain); err != nil { - t.Fatalf("populating TLS Secret: %v", err) - } + populateTLSSecret(t, fc, pgName, defaultDomain) expectReconciled(t, r, "", pgName) expectedCfg.AdvertiseServices = []string{"svc:" + pgName} @@ -247,9 +245,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectMissing[rbacv1.RoleBinding](t, fc, ns, defaultDomain) // Check we get the new hostname in the status once ready. - if err := populateTLSSecret(t.Context(), fc, pgName, updatedDomain); err != nil { - t.Fatalf("populating TLS Secret: %v", err) - } + populateTLSSecret(t, fc, pgName, updatedDomain) mustUpdate(t, fc, "operator-ns", "test-pg-0", func(s *corev1.Secret) { s.Data["profile-foo"] = []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`) }) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 4d831180578eb..460a1914ee799 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -504,10 +505,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) serviceName := tailcfg.ServiceName("svc:" + hostname) svc, err := r.tsClient.GetVIPService(ctx, serviceName) - if err != nil { - if isErrorTailscaleServiceNotFound(err) { - return false, nil - } + if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service: %w", err) } @@ -713,10 +711,15 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc * } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", svc.Name) - return false, r.tsClient.DeleteVIPService(ctx, svc.Name) + if err = r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + return false, err + } + + return false, nil } + o.OwnerRefs = slices.Delete(o.OwnerRefs, ix, ix+1) - logger.Infof("Deleting Tailscale Service %q", svc.Name) + logger.Infof("Creating/Updating Tailscale Service %q", svc.Name) json, err := json.Marshal(o) if err != nil { return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 77e5ecb37a677..5cc806ad1bf7a 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -67,7 +68,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify initial reconciliation expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) @@ -89,7 +90,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") // Verify Tailscale Service uses custom tags - tsSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + tsSvc, err := ft.GetVIPService(t.Context(), "svc:my-svc") if err != nil { t.Fatalf("getting Tailscale Service: %v", err) } @@ -134,7 +135,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify second Ingress reconciliation expectReconciled(t, ingPGR, "default", "my-other-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-other-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-other-svc.ts.net") expectReconciled(t, ingPGR, "default", "my-other-ingress") verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyTailscaleService(t, ft, "svc:my-other-svc", []string{"tcp:443"}) @@ -151,14 +152,14 @@ func TestIngressPGReconciler(t *testing.T) { verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:my-svc", "svc:my-other-svc"}) // Delete second Ingress - if err := fc.Delete(context.Background(), ing2); err != nil { + if err := fc.Delete(t.Context(), ing2); err != nil { t.Fatalf("deleting second Ingress: %v", err) } expectReconciled(t, ingPGR, "default", "my-other-ingress") // Verify second Ingress cleanup cm := &corev1.ConfigMap{} - if err := fc.Get(context.Background(), types.NamespacedName{ + if err := fc.Get(t.Context(), types.NamespacedName{ Name: "test-pg-ingress-config", Namespace: "operator-ns", }, cm); err != nil { @@ -199,7 +200,7 @@ func TestIngressPGReconciler(t *testing.T) { expectEqual(t, fc, certSecretRoleBinding(pg, "operator-ns", "my-svc.ts.net")) // Delete the first Ingress and verify cleanup - if err := fc.Delete(context.Background(), ing); err != nil { + if err := fc.Delete(t.Context(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) } @@ -207,7 +208,7 @@ func TestIngressPGReconciler(t *testing.T) { // Verify the ConfigMap was cleaned up cm = &corev1.ConfigMap{} - if err := fc.Get(context.Background(), types.NamespacedName{ + if err := fc.Get(t.Context(), types.NamespacedName{ Name: "test-pg-second-ingress-config", Namespace: "operator-ns", }, cm); err != nil { @@ -228,6 +229,47 @@ func TestIngressPGReconciler(t *testing.T) { expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-svc.ts.net") expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-svc.ts.net") + + // Create a third ingress + ing3 := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-other-ingress", + Namespace: "default", + UID: types.UID("5678-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-other-svc.tailnetxyz.ts.net"}}, + }, + }, + } + + mustCreate(t, fc, ing3) + expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name) + + // Delete the service from "control" + ft.vipServices = make(map[tailcfg.ServiceName]*tailscale.VIPService) + + // Delete the ingress and confirm we don't get stuck due to the VIP service not existing. + if err = fc.Delete(t.Context(), ing3); err != nil { + t.Fatalf("deleting Ingress: %v", err) + } + + expectReconciled(t, ingPGR, ing3.Namespace, ing3.Name) + expectMissing[networkingv1.Ingress](t, fc, ing3.Namespace, ing3.Name) } func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { @@ -262,7 +304,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { // Verify initial reconciliation expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) @@ -273,13 +315,13 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { ing.Spec.TLS[0].Hosts[0] = "updated-svc" }) expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "updated-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "updated-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:updated-svc", false) verifyTailscaleService(t, ft, "svc:updated-svc", []string{"tcp:443"}) verifyTailscaledConfig(t, fc, "test-pg", []string{"svc:updated-svc"}) - _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) + _, err := ft.GetVIPService(context.Background(), "svc:my-svc") if err == nil { t.Fatalf("svc:my-svc not cleaned up") } @@ -500,7 +542,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { // Verify initial reconciliation with HTTP enabled expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) verifyServeConfig(t, fc, "svc:my-svc", true) @@ -717,7 +759,9 @@ func TestOwnerAnnotations(t *testing.T) { } } -func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain string) error { +func populateTLSSecret(t *testing.T, c client.Client, pgName, domain string) { + t.Helper() + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: domain, @@ -736,10 +780,12 @@ func populateTLSSecret(ctx context.Context, c client.Client, pgName, domain stri }, } - _, err := createOrUpdate(ctx, c, "operator-ns", secret, func(s *corev1.Secret) { + _, err := createOrUpdate(t.Context(), c, "operator-ns", secret, func(s *corev1.Secret) { s.Data = secret.Data }) - return err + if err != nil { + t.Fatalf("failed to populate TLS secret: %v", err) + } } func verifyTailscaleService(t *testing.T, ft *fakeTSClient, serviceName string, wantPorts []string) { From 7073f246d3e94a849d20420eaff69d7be7e494b7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Tue, 25 Nov 2025 08:58:36 -0600 Subject: [PATCH 0751/1093] ipn/ipnlocal: do not call controlclient.Client.Shutdown with b.mu held This fixes a regression in #17804 that caused a deadlock. Updates #18052 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3665999e8d843..8cdfa0608f1d1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -944,12 +944,12 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { // down, clients switch over to other replicas whilst the existing connections are kept alive for some period of time. func (b *LocalBackend) DisconnectControl() { b.mu.Lock() - defer b.mu.Unlock() cc := b.resetControlClientLocked() - if cc == nil { - return + b.mu.Unlock() + + if cc != nil { + cc.Shutdown() } - cc.Shutdown() } // linkChange is our network monitor callback, called whenever the network changes. @@ -2408,7 +2408,8 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { var clientToShutdown controlclient.Client defer func() { if clientToShutdown != nil { - clientToShutdown.Shutdown() + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(clientToShutdown.Shutdown) } }() @@ -6891,7 +6892,8 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) if prevCC := b.resetControlClientLocked(); prevCC != nil { - defer prevCC.Shutdown() + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(prevCC.Shutdown) } // TKA errors should not prevent resetting the backend state. // However, we should still return the error to the caller. @@ -6972,7 +6974,8 @@ func (b *LocalBackend) ResetAuth() error { defer b.mu.Unlock() if prevCC := b.resetControlClientLocked(); prevCC != nil { - defer prevCC.Shutdown() + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(prevCC.Shutdown) } if err := b.clearMachineKeyLocked(); err != nil { return err From 848978e664aebb28e86c17bdad4b048f981079df Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 25 Nov 2025 09:21:55 -0800 Subject: [PATCH 0752/1093] ipn/ipnlocal: test traffic-steering when feature is not enabled (#17997) In PR tailscale/corp#34401, the `traffic-steering` feature flag does not automatically enable traffic steering for all nodes. Instead, an admin must add the `traffic-steering` node attribute to each client node that they want opted-in. For backwards compatibility with older clients, tailscale/corp#34401 strips out the `traffic-steering` node attribute if the feature flag is not enabled, even if it is set in the policy file. This lets us safely disable the feature flag. This PR adds a missing test case for suggested exit nodes that have no priority. Updates tailscale/corp#34399 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 3 +++ ipn/ipnlocal/local_test.go | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8cdfa0608f1d1..defa558ed2976 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7301,6 +7301,9 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta // The traffic-steering feature flag is enabled on this tailnet. return suggestExitNodeUsingTrafficSteering(nb, allowList) default: + // The control plane will always strip the `traffic-steering` + // node attribute if it isn’t enabled for this tailnet, even if + // it is set in the policy file: tailscale/corp#34401 return suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) } } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 3da014fd6b058..68bb2618c278e 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -5173,6 +5173,26 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { wantID: "stable3", wantName: "peer3", }, + { + name: "exit-nodes-without-priority-for-suggestions", + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + Peers: []tailcfg.NodeView{ + makePeer(1, + withExitRoutes(), + withSuggest()), + makePeer(2, + withExitRoutes(), + withSuggest()), + makePeer(3, + withExitRoutes(), + withLocationPriority(1)), + }, + }, + wantID: "stable1", + wantName: "peer1", + wantPri: 0, + }, { name: "exit-nodes-with-and-without-priority", netMap: &netmap.NetworkMap{ From ac0b15356d25c011e0b9f060c06d0f9b87973721 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 29 Sep 2025 12:17:52 -0700 Subject: [PATCH 0753/1093] tailcfg, control/controlclient: start moving MapResponse.DefaultAutoUpdate to a nodeattr And fix up the TestAutoUpdateDefaults integration tests as they weren't testing reality: the DefaultAutoUpdate is supposed to only be relevant on the first MapResponse in the stream, but the tests weren't testing that. They were instead injecting a 2nd+ MapResponse. This changes the test control server to add a hook to modify the first map response, and then makes the test control when the node goes up and down to make new map responses. Also, the test now runs on macOS where the auto-update feature being disabled would've previously t.Skipped the whole test. Updates #11502 Change-Id: If2319bd1f71e108b57d79fe500b2acedbc76e1a6 Signed-off-by: Brad Fitzpatrick --- cmd/vet/jsontags_allowlist | 2 +- control/controlclient/direct.go | 14 ++- feature/feature.go | 15 +++ feature/hooks.go | 9 ++ tailcfg/tailcfg.go | 23 +++- tstest/integration/integration.go | 4 + tstest/integration/integration_test.go | 112 ++++++++++++------ tstest/integration/testcontrol/testcontrol.go | 9 ++ types/netmap/nodemut.go | 2 +- 9 files changed, 147 insertions(+), 43 deletions(-) diff --git a/cmd/vet/jsontags_allowlist b/cmd/vet/jsontags_allowlist index 060a81b053865..9526f44ef9d9a 100644 --- a/cmd/vet/jsontags_allowlist +++ b/cmd/vet/jsontags_allowlist @@ -107,7 +107,7 @@ OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ClientVersion OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.CollectServices OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.ControlDialPlan OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Debug -OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DefaultAutoUpdate +OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DeprecatedDefaultAutoUpdate OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DERPMap OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.DNSConfig OmitEmptyShouldBeOmitZero tailscale.com/tailcfg.MapResponse.Node diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 006a801eff505..d5cd6a13e5120 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1184,7 +1184,19 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap metricMapResponseKeepAlives.Add(1) continue } - if au, ok := resp.DefaultAutoUpdate.Get(); ok { + + // DefaultAutoUpdate in its CapMap and deprecated top-level field forms. + if self := resp.Node; self != nil { + for _, v := range self.CapMap[tailcfg.NodeAttrDefaultAutoUpdate] { + switch v { + case "true", "false": + c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, v == "true"}) + default: + c.logf("netmap: [unexpected] unknown %s in CapMap: %q", tailcfg.NodeAttrDefaultAutoUpdate, v) + } + } + } + if au, ok := resp.DeprecatedDefaultAutoUpdate.Get(); ok { c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, au}) } diff --git a/feature/feature.go b/feature/feature.go index 110b104daae00..48a4aff43b84d 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -7,6 +7,8 @@ package feature import ( "errors" "reflect" + + "tailscale.com/util/testenv" ) var ErrUnavailable = errors.New("feature not included in this build") @@ -55,6 +57,19 @@ func (h *Hook[Func]) Set(f Func) { h.ok = true } +// SetForTest sets the hook function for tests, blowing +// away any previous value. It will panic if called from +// non-test code. +// +// It returns a restore function that resets the hook +// to its previous value. +func (h *Hook[Func]) SetForTest(f Func) (restore func()) { + testenv.AssertInTest() + old := *h + h.f, h.ok = f, true + return func() { *h = old } +} + // Get returns the hook function, or panics if it hasn't been set. // Use IsSet to check if it's been set, or use GetOrNil if you're // okay with a nil return value. diff --git a/feature/hooks.go b/feature/hooks.go index a3c6c0395ee81..7e31061a7eaac 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -6,6 +6,8 @@ package feature import ( "net/http" "net/url" + "os" + "sync" "tailscale.com/types/logger" "tailscale.com/types/persist" @@ -15,9 +17,16 @@ import ( // to conditionally initialize. var HookCanAutoUpdate Hook[func() bool] +var testAllowAutoUpdate = sync.OnceValue(func() bool { + return os.Getenv("TS_TEST_ALLOW_AUTO_UPDATE") == "1" +}) + // CanAutoUpdate reports whether the current binary is built with auto-update // support and, if so, whether the current platform supports it. func CanAutoUpdate() bool { + if testAllowAutoUpdate() { + return true + } if f, ok := HookCanAutoUpdate.GetOk(); ok { return f() } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 41e0a0b284c44..8468aa09efb3e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -177,7 +177,8 @@ type CapabilityVersion int // - 128: 2025-10-02: can handle C2N /debug/health. // - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) // - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest -const CurrentCapabilityVersion CapabilityVersion = 130 +// - 131: 2025-11-25: client respects [NodeAttrDefaultAutoUpdate] +const CurrentCapabilityVersion CapabilityVersion = 131 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2149,12 +2150,14 @@ type MapResponse struct { // or nothing to report. ClientVersion *ClientVersion `json:",omitempty"` - // DefaultAutoUpdate is the default node auto-update setting for this + // DeprecatedDefaultAutoUpdate is the default node auto-update setting for this // tailnet. The node is free to opt-in or out locally regardless of this - // value. This value is only used on first MapResponse from control, the - // auto-update setting doesn't change if the tailnet admin flips the - // default after the node registered. - DefaultAutoUpdate opt.Bool `json:",omitempty"` + // value. Once this value has been set and stored in the client, future + // changes from the control plane are ignored. + // + // Deprecated: use NodeAttrDefaultAutoUpdate instead. See + // https://github.com/tailscale/tailscale/issues/11502. + DeprecatedDefaultAutoUpdate opt.Bool `json:"DefaultAutoUpdate,omitempty"` } // DisplayMessage represents a health state of the node from the control plane's @@ -2721,6 +2724,14 @@ const ( // default behavior is to trust the control plane when it claims that a // node is no longer online, but that is not a reliable signal. NodeAttrClientSideReachability = "client-side-reachability" + + // NodeAttrDefaultAutoUpdate advertises the default node auto-update setting + // for this tailnet. The node is free to opt-in or out locally regardless of + // this value. Once this has been set and stored in the client, future + // changes from the control plane are ignored. + // + // The value of the key in [NodeCapMap] is a JSON boolean. + NodeAttrDefaultAutoUpdate NodeCapability = "default-auto-update" ) // SetDNSRequest is a request to add a DNS record. diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 6700205cf8f55..ea5747b7d5a1d 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -576,6 +576,7 @@ type TestNode struct { stateFile string upFlagGOOS string // if non-empty, sets TS_DEBUG_UP_FLAG_GOOS for cmd/tailscale CLI encryptState bool + allowUpdates bool mu sync.Mutex onLogLine []func([]byte) @@ -840,6 +841,9 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { "TS_DISABLE_PORTMAPPER=1", // shouldn't be needed; test is all localhost "TS_DEBUG_LOG_RATE=all", ) + if n.allowUpdates { + cmd.Env = append(cmd.Env, "TS_TEST_ALLOW_AUTO_UPDATE=1") + } if n.env.loopbackPort != nil { cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 543dc125c251c..3739a3011c4a2 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -25,6 +25,7 @@ import ( "slices" "strconv" "strings" + "sync" "sync/atomic" "testing" "time" @@ -1412,14 +1413,27 @@ func TestLogoutRemovesAllPeers(t *testing.T) { wantNode0PeerCount(expectedPeers) // all existing peers and the new node } -func TestAutoUpdateDefaults(t *testing.T) { - if !feature.CanAutoUpdate() { - t.Skip("auto-updates not supported on this platform") - } +func TestAutoUpdateDefaults(t *testing.T) { testAutoUpdateDefaults(t, false) } +func TestAutoUpdateDefaults_cap(t *testing.T) { testAutoUpdateDefaults(t, true) } + +// useCap is whether to use NodeAttrDefaultAutoUpdate (as opposed to the old +// DeprecatedDefaultAutoUpdate top-level MapResponse field). +func testAutoUpdateDefaults(t *testing.T, useCap bool) { + t.Cleanup(feature.HookCanAutoUpdate.SetForTest(func() bool { return true })) + tstest.Shard(t) - tstest.Parallel(t) env := NewTestEnv(t) + var ( + modifyMu sync.Mutex + modifyFirstMapResponse = func(*tailcfg.MapResponse, *tailcfg.MapRequest) {} + ) + env.Control.ModifyFirstMapResponse = func(mr *tailcfg.MapResponse, req *tailcfg.MapRequest) { + modifyMu.Lock() + defer modifyMu.Unlock() + modifyFirstMapResponse(mr, req) + } + checkDefault := func(n *TestNode, want bool) error { enabled, ok := n.diskPrefs().AutoUpdate.Apply.Get() if !ok { @@ -1431,17 +1445,23 @@ func TestAutoUpdateDefaults(t *testing.T) { return nil } - sendAndCheckDefault := func(t *testing.T, n *TestNode, send, want bool) { - t.Helper() - if !env.Control.AddRawMapResponse(n.MustStatus().Self.PublicKey, &tailcfg.MapResponse{ - DefaultAutoUpdate: opt.NewBool(send), - }) { - t.Fatal("failed to send MapResponse to node") - } - if err := tstest.WaitFor(2*time.Second, func() error { - return checkDefault(n, want) - }); err != nil { - t.Fatal(err) + setDefaultAutoUpdate := func(send bool) { + modifyMu.Lock() + defer modifyMu.Unlock() + modifyFirstMapResponse = func(mr *tailcfg.MapResponse, req *tailcfg.MapRequest) { + if mr.Node == nil { + mr.Node = &tailcfg.Node{} + } + if useCap { + if mr.Node.CapMap == nil { + mr.Node.CapMap = make(tailcfg.NodeCapMap) + } + mr.Node.CapMap[tailcfg.NodeAttrDefaultAutoUpdate] = []tailcfg.RawMessage{ + tailcfg.RawMessage(fmt.Sprintf("%t", send)), + } + } else { + mr.DeprecatedDefaultAutoUpdate = opt.NewBool(send) + } } } @@ -1452,29 +1472,54 @@ func TestAutoUpdateDefaults(t *testing.T) { { desc: "tailnet-default-false", run: func(t *testing.T, n *TestNode) { - // First received default "false". - sendAndCheckDefault(t, n, false, false) - // Should not be changed even if sent "true" later. - sendAndCheckDefault(t, n, true, false) + + // First the server sends "false", and client should remember that. + setDefaultAutoUpdate(false) + n.MustUp() + n.AwaitRunning() + checkDefault(n, false) + + // Now we disconnect and change the server to send "true", which + // the client should ignore, having previously remembered + // "false". + n.MustDown() + setDefaultAutoUpdate(true) // control sends default "true" + n.MustUp() + n.AwaitRunning() + checkDefault(n, false) // still false + // But can be changed explicitly by the user. if out, err := n.TailscaleForOutput("set", "--auto-update").CombinedOutput(); err != nil { t.Fatalf("failed to enable auto-update on node: %v\noutput: %s", err, out) } - sendAndCheckDefault(t, n, false, true) + checkDefault(n, true) }, }, { desc: "tailnet-default-true", run: func(t *testing.T, n *TestNode) { - // First received default "true". - sendAndCheckDefault(t, n, true, true) - // Should not be changed even if sent "false" later. - sendAndCheckDefault(t, n, false, true) + // Same as above but starting with default "true". + + // First the server sends "true", and client should remember that. + setDefaultAutoUpdate(true) + n.MustUp() + n.AwaitRunning() + checkDefault(n, true) + + // Now we disconnect and change the server to send "false", which + // the client should ignore, having previously remembered + // "true". + n.MustDown() + setDefaultAutoUpdate(false) // control sends default "false" + n.MustUp() + n.AwaitRunning() + checkDefault(n, true) // still true + // But can be changed explicitly by the user. if out, err := n.TailscaleForOutput("set", "--auto-update=false").CombinedOutput(); err != nil { - t.Fatalf("failed to disable auto-update on node: %v\noutput: %s", err, out) + t.Fatalf("failed to enable auto-update on node: %v\noutput: %s", err, out) } - sendAndCheckDefault(t, n, true, false) + checkDefault(n, false) }, }, { @@ -1484,22 +1529,21 @@ func TestAutoUpdateDefaults(t *testing.T) { if out, err := n.TailscaleForOutput("set", "--auto-update=false").CombinedOutput(); err != nil { t.Fatalf("failed to disable auto-update on node: %v\noutput: %s", err, out) } - // Defaults sent from control should be ignored. - sendAndCheckDefault(t, n, true, false) - sendAndCheckDefault(t, n, false, false) + + setDefaultAutoUpdate(true) + n.MustUp() + n.AwaitRunning() + checkDefault(n, false) }, }, } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { n := NewTestNode(t, env) + n.allowUpdates = true d := n.StartDaemon() defer d.MustCleanShutdown(t) - n.AwaitResponding() - n.MustUp() - n.AwaitRunning() - tt.run(t, n) }) } diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 268f2f19b4067..d0959ff25b756 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -79,6 +79,10 @@ type Server struct { ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL + // ModifyFirstMapResponse, if non-nil, is called exactly once per + // MapResponse stream to modify the first MapResponse sent in response to it. + ModifyFirstMapResponse func(*tailcfg.MapResponse, *tailcfg.MapRequest) + initMuxOnce sync.Once mux *http.ServeMux @@ -993,6 +997,7 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi // register an updatesCh to get updates. streaming := req.Stream && !req.ReadOnly compress := req.Compress != "" + first := true w.WriteHeader(200) for { @@ -1025,6 +1030,10 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi if allExpired { res.Node.KeyExpiry = time.Now().Add(-1 * time.Minute) } + if f := s.ModifyFirstMapResponse; first && f != nil { + first = false + f(res, req) + } // TODO: add minner if/when needed resBytes, err := json.Marshal(res) if err != nil { diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index f4de1bf0b8f02..4f93be21c6d68 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -177,5 +177,5 @@ func mapResponseContainsNonPatchFields(res *tailcfg.MapResponse) bool { // function is called, so it should never be set anyway. But for // completedness, and for tests, check it too: res.PeersChanged != nil || - res.DefaultAutoUpdate != "" + res.DeprecatedDefaultAutoUpdate != "" } From f4a4bab105a89da491bb9f5ae1effb9b4f44b7f2 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Fri, 21 Nov 2025 08:12:20 -0800 Subject: [PATCH 0754/1093] tsconsensus: skip integration tests in CI There is an issue to add non-integration tests: #18022 Fixes #15627 #16340 Signed-off-by: Fran Bull --- tsconsensus/tsconsensus_test.go | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 7f89eb48a7ab7..796c8f51b76a9 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -17,7 +17,6 @@ import ( "net/netip" "os" "path/filepath" - "runtime" "strings" "sync" "testing" @@ -27,7 +26,6 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/raft" "tailscale.com/client/tailscale" - "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" "tailscale.com/tailcfg" @@ -115,8 +113,8 @@ func (f *fsm) Restore(rc io.ReadCloser) error { } func testConfig(t *testing.T) { - if runtime.GOOS == "windows" && cibuild.On() { - t.Skip("cmd/natc isn't supported on Windows, so skipping tsconsensus tests on CI for now; see https://github.com/tailscale/tailscale/issues/16340") + if cibuild.On() { + t.Skip("these integration tests don't always work well in CI and that's bad for CI; see https://github.com/tailscale/tailscale/issues/16340 and https://github.com/tailscale/tailscale/issues/18022") } // -race AND Parallel makes things start to take too long. if !racebuild.On { @@ -251,7 +249,6 @@ func warnLogConfig() Config { } func TestStart(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) control, controlURL := startControl(t) ctx := context.Background() @@ -372,7 +369,6 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string } func TestApply(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -437,7 +433,6 @@ func assertCommandsWorkOnAnyNode(t testing.TB, participants []*participant) { } func TestConfig(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -477,7 +472,6 @@ func TestConfig(t *testing.T) { } func TestFollowerFailover(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -549,7 +543,6 @@ func TestFollowerFailover(t *testing.T) { } func TestRejoin(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -585,7 +578,6 @@ func TestRejoin(t *testing.T) { } func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" @@ -643,7 +635,6 @@ func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { } func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15627") testConfig(t) ctx := context.Background() clusterTag := "tag:whatever" From b38dd1ae06c456fcd65e31e642990a5f1520c63b Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 25 Nov 2025 10:22:08 +0000 Subject: [PATCH 0755/1093] ipn/ipnlocal: don't panic if there are no suitable exit nodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In suggestExitNodeLocked, if no exit node candidates have a home DERP or valid location info, `bestCandidates` is an empty slice. This slice is passed to `selectNode` (`randomNode` in prod): ```go func randomNode(nodes views.Slice[tailcfg.NodeView], …) tailcfg.NodeView { … return nodes.At(rand.IntN(nodes.Len())) } ``` An empty slice becomes a call to `rand.IntN(0)`, which panics. This patch changes the behaviour, so if we've filtered out all the candidates before calling `selectNode`, reset the list and then pick from any of the available candidates. This patch also updates our tests to give us more coverage of `randomNode`, so we can spot other potential issues. Updates #17661 Change-Id: I63eb5e4494d45a1df5b1f4b1b5c6d5576322aa72 Signed-off-by: Alex Chan --- ipn/ipnlocal/local.go | 10 +++++++ ipn/ipnlocal/local_test.go | 56 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index defa558ed2976..3e70548963e17 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7432,6 +7432,16 @@ func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSugg } } bestCandidates := pickWeighted(pickFrom) + + // We may have an empty list of candidates here, if none of the candidates + // have home DERP info. + // + // We know that candidates is non-empty or we'd already have returned, so if + // we've filtered everything out of bestCandidates, just use candidates. + if len(bestCandidates) == 0 { + bestCandidates = candidates + } + chosen := selectNode(views.SliceOf(bestCandidates), prevSuggestion) if !chosen.Valid() { return res, errors.New("chosen candidate invalid: this is a bug") diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 68bb2618c278e..02997a0e12fce 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -4436,6 +4436,14 @@ func deterministicRegionForTest(t testing.TB, want views.Slice[int], use int) se } } +// deterministicNodeForTest returns a deterministic selectNodeFunc, which +// allows us to make stable assertions about which exit node will be chosen +// from a list of possible candidates. +// +// When given a list of candidates, it checks that `use` is in the list and +// returns that. +// +// It verifies that `wantLast` was passed to `selectNode(…, want)`. func deterministicNodeForTest(t testing.TB, want views.Slice[tailcfg.StableNodeID], wantLast tailcfg.StableNodeID, use tailcfg.StableNodeID) selectNodeFunc { t.Helper() @@ -4444,6 +4452,16 @@ func deterministicNodeForTest(t testing.TB, want views.Slice[tailcfg.StableNodeI } return func(got views.Slice[tailcfg.NodeView], last tailcfg.StableNodeID) tailcfg.NodeView { + // In the tests, we choose nodes deterministically so we can get + // stable results, but in the real code, we choose nodes randomly. + // + // Call the randomNode function anyway, and ensure it returns + // a sensible result. + view := randomNode(got, last) + if !views.SliceContains(got, view) { + t.Fatalf("randomNode returns an unexpected node") + } + var ret tailcfg.NodeView gotIDs := make([]tailcfg.StableNodeID, got.Len()) @@ -4529,6 +4547,7 @@ func TestSuggestExitNode(t *testing.T) { Longitude: -97.3325, Priority: 100, } + var emptyLocation *tailcfg.Location peer1 := makePeer(1, withExitRoutes(), @@ -4568,6 +4587,18 @@ func TestSuggestExitNode(t *testing.T) { withExitRoutes(), withSuggest(), withLocation(fortWorthLowPriority.View())) + emptyLocationPeer9 := makePeer(9, + withoutDERP(), + withExitRoutes(), + withSuggest(), + withLocation(emptyLocation.View()), + ) + emptyLocationPeer10 := makePeer(10, + withoutDERP(), + withExitRoutes(), + withSuggest(), + withLocation(emptyLocation.View()), + ) selfNode := tailcfg.Node{ Addresses: []netip.Prefix{ @@ -4898,6 +4929,31 @@ func TestSuggestExitNode(t *testing.T) { wantName: "San Jose", wantLocation: sanJose.View(), }, + { + // Regression test for https://github.com/tailscale/tailscale/issues/17661 + name: "exit nodes with no home DERP, randomly selected", + lastReport: &netcheck.Report{ + RegionLatency: map[int]time.Duration{ + 1: 10, + 2: 20, + 3: 10, + }, + PreferredDERP: 1, + }, + netMap: &netmap.NetworkMap{ + SelfNode: selfNode.View(), + DERPMap: defaultDERPMap, + Peers: []tailcfg.NodeView{ + emptyLocationPeer9, + emptyLocationPeer10, + }, + }, + wantRegions: []int{1, 2}, + wantName: "peer9", + wantNodes: []tailcfg.StableNodeID{"stable9", "stable10"}, + wantID: "stable9", + useRegion: 1, + }, } for _, tt := range tests { From c54d243690817d664b03ba0139d7930388e62b33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 25 Nov 2025 21:35:38 +0100 Subject: [PATCH 0756/1093] net/tstun: add TSMPDiscoAdvertisement to TSMPPing (#17995) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a new types of TSMP messages for advertising disco keys keys to/from a peer, and implements the advertising triggered by a TSMP ping. Needed as part of the effort to cache the netmap and still let clients connect without control being reachable. Updates #12639 Signed-off-by: Claus Lensbøl Co-authored-by: James Tucker --- net/packet/tsmp.go | 55 +++++++++++++++++++++++ net/packet/tsmp_test.go | 65 ++++++++++++++++++++++++++++ net/tstun/wrap.go | 29 ++++++++++--- net/tstun/wrap_test.go | 62 ++++++++++++++++++++------ wgengine/magicsock/magicsock_test.go | 5 +-- wgengine/userspace.go | 31 ++++++++++++- wgengine/userspace_test.go | 58 +++++++++++++++++++++++++ 7 files changed, 280 insertions(+), 25 deletions(-) diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index 0ea321e84eb2a..8fad1d5037468 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -15,7 +15,9 @@ import ( "fmt" "net/netip" + "go4.org/mem" "tailscale.com/types/ipproto" + "tailscale.com/types/key" ) const minTSMPSize = 7 // the rejected body is 7 bytes @@ -72,6 +74,9 @@ const ( // TSMPTypePong is the type byte for a TailscalePongResponse. TSMPTypePong TSMPType = 'o' + + // TSPMTypeDiscoAdvertisement is the type byte for sending disco keys + TSMPTypeDiscoAdvertisement TSMPType = 'a' ) type TailscaleRejectReason byte @@ -259,3 +264,53 @@ func (h TSMPPongReply) Marshal(buf []byte) error { binary.BigEndian.PutUint16(buf[9:11], h.PeerAPIPort) return nil } + +// TSMPDiscoKeyAdvertisement is a TSMP message that's used for distributing Disco Keys. +// +// On the wire, after the IP header, it's currently 33 bytes: +// - 'a' (TSMPTypeDiscoAdvertisement) +// - 32 disco key bytes +type TSMPDiscoKeyAdvertisement struct { + Src, Dst netip.Addr + Key key.DiscoPublic +} + +func (ka *TSMPDiscoKeyAdvertisement) Marshal() ([]byte, error) { + var iph Header + if ka.Src.Is4() { + iph = IP4Header{ + IPProto: ipproto.TSMP, + Src: ka.Src, + Dst: ka.Dst, + } + } else { + iph = IP6Header{ + IPProto: ipproto.TSMP, + Src: ka.Src, + Dst: ka.Dst, + } + } + payload := make([]byte, 0, 33) + payload = append(payload, byte(TSMPTypeDiscoAdvertisement)) + payload = ka.Key.AppendTo(payload) + if len(payload) != 33 { + // Mostly to safeguard against ourselves changing this in the future. + return []byte{}, fmt.Errorf("expected payload length 33, got %d", len(payload)) + } + + return Generate(iph, payload), nil +} + +func (pp *Parsed) AsTSMPDiscoAdvertisement() (tka TSMPDiscoKeyAdvertisement, ok bool) { + if pp.IPProto != ipproto.TSMP { + return + } + p := pp.Payload() + if len(p) < 33 || p[0] != byte(TSMPTypeDiscoAdvertisement) { + return + } + tka.Src = pp.Src.Addr() + tka.Key = key.DiscoPublicFromRaw32(mem.B(p[1:33])) + + return tka, true +} diff --git a/net/packet/tsmp_test.go b/net/packet/tsmp_test.go index e261e6a4199b3..d8f1d38d57180 100644 --- a/net/packet/tsmp_test.go +++ b/net/packet/tsmp_test.go @@ -4,8 +4,14 @@ package packet import ( + "bytes" + "encoding/hex" "net/netip" + "slices" "testing" + + "go4.org/mem" + "tailscale.com/types/key" ) func TestTailscaleRejectedHeader(t *testing.T) { @@ -71,3 +77,62 @@ func TestTailscaleRejectedHeader(t *testing.T) { } } } + +func TestTSMPDiscoKeyAdvertisementMarshal(t *testing.T) { + var ( + // IPv4: Ver(4)Len(5), TOS, Len(53), ID, Flags, TTL(64), Proto(99), Cksum + headerV4, _ = hex.DecodeString("45000035000000004063705d") + // IPv6: Ver(6)TCFlow, Len(33), NextHdr(99), HopLim(64) + headerV6, _ = hex.DecodeString("6000000000216340") + + packetType = []byte{'a'} + testKey = bytes.Repeat([]byte{'a'}, 32) + + // IPs + srcV4 = netip.MustParseAddr("1.2.3.4") + dstV4 = netip.MustParseAddr("4.3.2.1") + srcV6 = netip.MustParseAddr("2001:db8::1") + dstV6 = netip.MustParseAddr("2001:db8::2") + ) + + join := func(parts ...[]byte) []byte { + return bytes.Join(parts, nil) + } + + tests := []struct { + name string + tka TSMPDiscoKeyAdvertisement + want []byte + }{ + { + name: "v4Header", + tka: TSMPDiscoKeyAdvertisement{ + Src: srcV4, + Dst: dstV4, + Key: key.DiscoPublicFromRaw32(mem.B(testKey)), + }, + want: join(headerV4, srcV4.AsSlice(), dstV4.AsSlice(), packetType, testKey), + }, + { + name: "v6Header", + tka: TSMPDiscoKeyAdvertisement{ + Src: srcV6, + Dst: dstV6, + Key: key.DiscoPublicFromRaw32(mem.B(testKey)), + }, + want: join(headerV6, srcV6.AsSlice(), dstV6.AsSlice(), packetType, testKey), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.tka.Marshal() + if err != nil { + t.Errorf("error mashalling TSMPDiscoAdvertisement: %s", err) + } + if !slices.Equal(got, tt.want) { + t.Errorf("error mashalling TSMPDiscoAdvertisement, expected: \n%x, \ngot:\n%x", tt.want, got) + } + }) + } +} diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index db4f689bf33d0..6e07c7a3dabd0 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -34,6 +34,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netlogfunc" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/netstack/gro" @@ -209,6 +210,9 @@ type Wrapper struct { captureHook syncs.AtomicValue[packet.CaptureCallback] metrics *metrics + + eventClient *eventbus.Client + discoKeyAdvertisementPub *eventbus.Publisher[DiscoKeyAdvertisement] } type metrics struct { @@ -254,15 +258,15 @@ func (w *Wrapper) Start() { close(w.startCh) } -func WrapTAP(logf logger.Logf, tdev tun.Device, m *usermetric.Registry) *Wrapper { - return wrap(logf, tdev, true, m) +func WrapTAP(logf logger.Logf, tdev tun.Device, m *usermetric.Registry, bus *eventbus.Bus) *Wrapper { + return wrap(logf, tdev, true, m, bus) } -func Wrap(logf logger.Logf, tdev tun.Device, m *usermetric.Registry) *Wrapper { - return wrap(logf, tdev, false, m) +func Wrap(logf logger.Logf, tdev tun.Device, m *usermetric.Registry, bus *eventbus.Bus) *Wrapper { + return wrap(logf, tdev, false, m, bus) } -func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry) *Wrapper { +func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry, bus *eventbus.Bus) *Wrapper { logf = logger.WithPrefix(logf, "tstun: ") w := &Wrapper{ logf: logf, @@ -283,6 +287,9 @@ func wrap(logf logger.Logf, tdev tun.Device, isTAP bool, m *usermetric.Registry) metrics: registerMetrics(m), } + w.eventClient = bus.Client("net.tstun") + w.discoKeyAdvertisementPub = eventbus.Publish[DiscoKeyAdvertisement](w.eventClient) + w.vectorBuffer = make([][]byte, tdev.BatchSize()) for i := range w.vectorBuffer { w.vectorBuffer[i] = make([]byte, maxBufferSize) @@ -357,6 +364,7 @@ func (t *Wrapper) Close() error { close(t.vectorOutbound) t.outboundMu.Unlock() err = t.tdev.Close() + t.eventClient.Close() }) return err } @@ -1118,6 +1126,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i return n, err } +type DiscoKeyAdvertisement struct { + Src netip.Addr + Key key.DiscoPublic +} + func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook packet.CaptureCallback, pc *peerConfigTable, gro *gro.GRO) (filter.Response, *gro.GRO) { if captHook != nil { captHook(packet.FromPeer, t.now(), p.Buffer(), p.CaptureMeta) @@ -1128,6 +1141,12 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook pa t.noteActivity() t.injectOutboundPong(p, pingReq) return filter.DropSilently, gro + } else if discoKeyAdvert, ok := p.AsTSMPDiscoAdvertisement(); ok { + t.discoKeyAdvertisementPub.Publish(DiscoKeyAdvertisement{ + Src: discoKeyAdvert.Src, + Key: discoKeyAdvert.Key, + }) + return filter.DropSilently, gro } else if data, ok := p.AsTSMPPong(); ok { if f := t.OnTSMPPongReceived; f != nil { f(data) diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 75cf5afb21f8f..c7d0708df85eb 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -36,6 +36,8 @@ import ( "tailscale.com/types/netlogtype" "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" @@ -170,10 +172,10 @@ func setfilter(logf logger.Logf, tun *Wrapper) { tun.SetFilter(filter.New(matches, nil, ipSet, ipSet, nil, logf)) } -func newChannelTUN(logf logger.Logf, secure bool) (*tuntest.ChannelTUN, *Wrapper) { +func newChannelTUN(logf logger.Logf, bus *eventbus.Bus, secure bool) (*tuntest.ChannelTUN, *Wrapper) { chtun := tuntest.NewChannelTUN() reg := new(usermetric.Registry) - tun := Wrap(logf, chtun.TUN(), reg) + tun := Wrap(logf, chtun.TUN(), reg, bus) if secure { setfilter(logf, tun) } else { @@ -183,10 +185,10 @@ func newChannelTUN(logf logger.Logf, secure bool) (*tuntest.ChannelTUN, *Wrapper return chtun, tun } -func newFakeTUN(logf logger.Logf, secure bool) (*fakeTUN, *Wrapper) { +func newFakeTUN(logf logger.Logf, bus *eventbus.Bus, secure bool) (*fakeTUN, *Wrapper) { ftun := NewFake() reg := new(usermetric.Registry) - tun := Wrap(logf, ftun, reg) + tun := Wrap(logf, ftun, reg, bus) if secure { setfilter(logf, tun) } else { @@ -196,7 +198,8 @@ func newFakeTUN(logf logger.Logf, secure bool) (*fakeTUN, *Wrapper) { } func TestReadAndInject(t *testing.T) { - chtun, tun := newChannelTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, false) defer tun.Close() const size = 2 // all payloads have this size @@ -221,7 +224,7 @@ func TestReadAndInject(t *testing.T) { } var buf [MaxPacketSize]byte - var seen = make(map[string]bool) + seen := make(map[string]bool) sizes := make([]int, 1) // We expect the same packets back, in no particular order. for i := range len(written) + len(injected) { @@ -257,7 +260,8 @@ func TestReadAndInject(t *testing.T) { } func TestWriteAndInject(t *testing.T) { - chtun, tun := newChannelTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, false) defer tun.Close() written := []string{"w0", "w1"} @@ -316,8 +320,8 @@ func mustHexDecode(s string) []byte { } func TestFilter(t *testing.T) { - - chtun, tun := newChannelTUN(t.Logf, true) + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, true) defer tun.Close() // Reset the metrics before test. These are global @@ -462,7 +466,8 @@ func assertMetricPackets(t *testing.T, metricName string, want, got int64) { } func TestAllocs(t *testing.T) { - ftun, tun := newFakeTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + ftun, tun := newFakeTUN(t.Logf, bus, false) defer tun.Close() buf := [][]byte{{0x00}} @@ -473,14 +478,14 @@ func TestAllocs(t *testing.T) { return } }) - if err != nil { t.Error(err) } } func TestClose(t *testing.T) { - ftun, tun := newFakeTUN(t.Logf, false) + bus := eventbustest.NewBus(t) + ftun, tun := newFakeTUN(t.Logf, bus, false) data := [][]byte{udp4("1.2.3.4", "5.6.7.8", 98, 98)} _, err := ftun.Write(data, 0) @@ -497,7 +502,8 @@ func TestClose(t *testing.T) { func BenchmarkWrite(b *testing.B) { b.ReportAllocs() - ftun, tun := newFakeTUN(b.Logf, true) + bus := eventbustest.NewBus(b) + ftun, tun := newFakeTUN(b.Logf, bus, true) defer tun.Close() packet := [][]byte{udp4("5.6.7.8", "1.2.3.4", 89, 89)} @@ -887,7 +893,8 @@ func TestCaptureHook(t *testing.T) { now := time.Unix(1682085856, 0) - _, w := newFakeTUN(t.Logf, true) + bus := eventbustest.NewBus(t) + _, w := newFakeTUN(t.Logf, bus, true) w.timeNow = func() time.Time { return now } @@ -957,3 +964,30 @@ func TestCaptureHook(t *testing.T) { captured, want) } } + +func TestTSMPDisco(t *testing.T) { + t.Run("IPv6DiscoAdvert", func(t *testing.T) { + src := netip.MustParseAddr("2001:db8::1") + dst := netip.MustParseAddr("2001:db8::2") + discoKey := key.NewDisco() + buf, _ := (&packet.TSMPDiscoKeyAdvertisement{ + Src: src, + Dst: dst, + Key: discoKey.Public(), + }).Marshal() + + var p packet.Parsed + p.Decode(buf) + + tda, ok := p.AsTSMPDiscoAdvertisement() + if !ok { + t.Error("Unable to parse message as TSMPDiscoAdversitement") + } + if tda.Src != src { + t.Errorf("Src address did not match, expected %v, got %v", src, tda.Src) + } + if !reflect.DeepEqual(tda.Key, discoKey.Public()) { + t.Errorf("Key did not match, expected %q, got %q", discoKey.Public(), tda.Key) + } + }) +} diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 7ae422906b84c..4e10248861500 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -211,7 +211,7 @@ func newMagicStackWithKey(t testing.TB, logf logger.Logf, ln nettype.PacketListe } tun := tuntest.NewChannelTUN() - tsTun := tstun.Wrap(logf, tun.TUN(), ®) + tsTun := tstun.Wrap(logf, tun.TUN(), ®, bus) tsTun.SetFilter(filter.NewAllowAllForTest(logf)) tsTun.Start() @@ -1771,7 +1771,6 @@ func TestEndpointSetsEqual(t *testing.T) { t.Errorf("%q vs %q = %v; want %v", tt.a, tt.b, got, tt.want) } } - } func TestBetterAddr(t *testing.T) { @@ -1915,7 +1914,6 @@ func TestBetterAddr(t *testing.T) { t.Errorf("[%d] betterAddr(%+v, %+v) and betterAddr(%+v, %+v) both unexpectedly true", i, tt.a, tt.b, tt.b, tt.a) } } - } func epFromTyped(eps []tailcfg.Endpoint) (ret []netip.AddrPort) { @@ -3138,7 +3136,6 @@ func TestMaybeRebindOnError(t *testing.T) { t.Errorf("expected at least 5 seconds between %s and %s", lastRebindTime, newTime) } } - }) }) } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index e4c99ded20977..a369fa343cc76 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -323,9 +323,9 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) var tsTUNDev *tstun.Wrapper if conf.IsTAP { - tsTUNDev = tstun.WrapTAP(logf, conf.Tun, conf.Metrics) + tsTUNDev = tstun.WrapTAP(logf, conf.Tun, conf.Metrics, conf.EventBus) } else { - tsTUNDev = tstun.Wrap(logf, conf.Tun, conf.Metrics) + tsTUNDev = tstun.Wrap(logf, conf.Tun, conf.Metrics, conf.EventBus) } closePool.add(tsTUNDev) @@ -1436,6 +1436,7 @@ func (e *userspaceEngine) Ping(ip netip.Addr, pingType tailcfg.PingType, size in e.magicConn.Ping(peer, res, size, cb) case "TSMP": e.sendTSMPPing(ip, peer, res, cb) + e.sendTSMPDiscoAdvertisement(ip) case "ICMP": e.sendICMPEchoRequest(ip, peer, res, cb) } @@ -1556,6 +1557,29 @@ func (e *userspaceEngine) sendTSMPPing(ip netip.Addr, peer tailcfg.NodeView, res e.tundev.InjectOutbound(tsmpPing) } +func (e *userspaceEngine) sendTSMPDiscoAdvertisement(ip netip.Addr) { + srcIP, err := e.mySelfIPMatchingFamily(ip) + if err != nil { + e.logf("getting matching node: %s", err) + return + } + tdka := packet.TSMPDiscoKeyAdvertisement{ + Src: srcIP, + Dst: ip, + Key: e.magicConn.DiscoPublicKey(), + } + payload, err := tdka.Marshal() + if err != nil { + e.logf("error generating TSMP Advertisement: %s", err) + metricTSMPDiscoKeyAdvertisementError.Add(1) + } else if err := e.tundev.InjectOutbound(payload); err != nil { + e.logf("error sending TSMP Advertisement: %s", err) + metricTSMPDiscoKeyAdvertisementError.Add(1) + } else { + metricTSMPDiscoKeyAdvertisementSent.Add(1) + } +} + func (e *userspaceEngine) setTSMPPongCallback(data [8]byte, cb func(packet.TSMPPongReply)) { e.mu.Lock() defer e.mu.Unlock() @@ -1722,6 +1746,9 @@ var ( metricNumMajorChanges = clientmetric.NewCounter("wgengine_major_changes") metricNumMinorChanges = clientmetric.NewCounter("wgengine_minor_changes") + + metricTSMPDiscoKeyAdvertisementSent = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_sent") + metricTSMPDiscoKeyAdvertisementError = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_error") ) func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) { diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 89d75b98adafb..0a1d2924d593b 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -325,6 +325,64 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { } } +func TestTSMPKeyAdvertisement(t *testing.T) { + var knobs controlknobs.Knobs + + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) + if err != nil { + t.Fatal(err) + } + t.Cleanup(e.Close) + ue := e.(*userspaceEngine) + routerCfg := &router.Config{} + nodeKey := nkFromHex("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + nm := &netmap.NetworkMap{ + Peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Key: nodeKey, + }, + }), + SelfNode: (&tailcfg.Node{ + StableID: "TESTCTRL00000001", + Name: "test-node.test.ts.net", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32"), netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:0:1/128")}, + }).View(), + } + cfg := &wgcfg.Config{ + Peers: []wgcfg.Peer{ + { + PublicKey: nodeKey, + AllowedIPs: []netip.Prefix{ + netip.PrefixFrom(netaddr.IPv4(100, 100, 99, 1), 32), + }, + }, + }, + } + + ue.SetNetworkMap(nm) + err = ue.Reconfig(cfg, routerCfg, &dns.Config{}) + if err != nil { + t.Fatal(err) + } + + addr := netip.MustParseAddr("100.100.99.1") + previousValue := metricTSMPDiscoKeyAdvertisementSent.Value() + ue.sendTSMPDiscoAdvertisement(addr) + if val := metricTSMPDiscoKeyAdvertisementSent.Value(); val <= previousValue { + errs := metricTSMPDiscoKeyAdvertisementError.Value() + t.Errorf("Expected 1 disco key advert, got %d, errors %d", val, errs) + } + // Remove config to have the engine shut down more consistently + err = ue.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) + if err != nil { + t.Fatal(err) + } +} + func nkFromHex(hex string) key.NodePublic { if len(hex) != 64 { panic(fmt.Sprintf("%q is len %d; want 64", hex, len(hex))) From 53476ce8721f049250f835335dbcaef558852c9e Mon Sep 17 00:00:00 2001 From: Sachin Iyer Date: Mon, 10 Nov 2025 16:52:26 -0800 Subject: [PATCH 0757/1093] ipn/serve: validate service paths in HasPathHandler Fixes #17839 Signed-off-by: Sachin Iyer --- ipn/serve.go | 14 ++++++++++++++ ipn/serve_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/ipn/serve.go b/ipn/serve.go index 7ee78ef0d66bb..1f15578893d84 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -238,6 +238,20 @@ func (sc *ServeConfig) HasPathHandler() bool { } } + if sc.Services != nil { + for _, serviceConfig := range sc.Services { + if serviceConfig.Web != nil { + for _, webServerConfig := range serviceConfig.Web { + for _, httpHandler := range webServerConfig.Handlers { + if httpHandler.Path != "" { + return true + } + } + } + } + } + } + if sc.Foreground != nil { for _, fgConfig := range sc.Foreground { if fgConfig.HasPathHandler() { diff --git a/ipn/serve_test.go b/ipn/serve_test.go index 063ff3a87a744..5e0f4a43a38e7 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -117,6 +117,36 @@ func TestHasPathHandler(t *testing.T) { }, want: false, }, + { + name: "with-service-path-handler", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*HTTPHandler{ + "/": {Path: "/tmp"}, + }}, + }, + }, + }, + }, + want: true, + }, + { + name: "with-service-proxy-handler", + cfg: ServeConfig{ + Services: map[tailcfg.ServiceName]*ServiceConfig{ + "svc:foo": { + Web: map[HostPort]*WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*HTTPHandler{ + "/": {Proxy: "http://127.0.0.1:3000"}, + }}, + }, + }, + }, + }, + want: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 824027305a2b986b523b5b29dab7b96dba4475aa Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 25 Nov 2025 15:05:04 -0800 Subject: [PATCH 0758/1093] cmd/tailscale/cli,ipn,all: make peer relay server port a *uint16 In preparation for exposing its configuration via ipn.ConfigVAlpha, change {Masked}Prefs.RelayServerPort from *int to *uint16. This takes a defensive stance against invalid inputs at JSON decode time. 'tailscale set --relay-server-port' is currently the only input to this pref, and has always sanitized input to fit within a uint16. Updates tailscale/corp#34591 Signed-off-by: Jordan Whited --- cmd/tailscale/cli/set.go | 2 +- feature/relayserver/relayserver.go | 6 +-- feature/relayserver/relayserver_test.go | 58 ++++++++++++------------- ipn/ipn_clone.go | 2 +- ipn/ipn_view.go | 8 ++-- ipn/prefs.go | 10 ++--- ipn/prefs_test.go | 2 +- net/udprelay/server.go | 6 +-- net/udprelay/status/status.go | 5 ++- 9 files changed, 48 insertions(+), 51 deletions(-) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index c2316580c0ea7..31662392f8437 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -249,7 +249,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { if err != nil { return fmt.Errorf("failed to set relay server port: %v", err) } - maskedPrefs.Prefs.RelayServerPort = ptr.To(int(uport)) + maskedPrefs.Prefs.RelayServerPort = ptr.To(uint16(uport)) } if setArgs.relayServerStaticEndpoints != "" { diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index e85576e50b9af..4f23ae18e4248 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -69,7 +69,7 @@ func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r * // imported. func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { e := &extension{ - newServerFn: func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { + newServerFn: func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { return udprelay.NewServer(logf, port, onlyStaticAddrPorts) }, logf: logger.WithPrefix(logf, featureName+": "), @@ -93,7 +93,7 @@ type relayServer interface { // extension is an [ipnext.Extension] managing the relay server on platforms // that import this package. type extension struct { - newServerFn func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) // swappable for tests + newServerFn func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) // swappable for tests logf logger.Logf ec *eventbus.Client respPub *eventbus.Publisher[magicsock.UDPRelayAllocResp] @@ -101,7 +101,7 @@ type extension struct { mu syncs.Mutex // guards the following fields shutdown bool // true if Shutdown() has been called rs relayServer // nil when disabled - port *int // ipn.Prefs.RelayServerPort, nil if disabled + port *uint16 // ipn.Prefs.RelayServerPort, nil if disabled staticEndpoints views.Slice[netip.AddrPort] // ipn.Prefs.RelayServerStaticEndpoints derpMapView tailcfg.DERPMapView // latest seen over the eventbus hasNodeAttrDisableRelayServer bool // [tailcfg.NodeAttrDisableRelayServer] diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index d77d2df261410..807306c707bc1 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -23,15 +23,15 @@ import ( ) func Test_extension_profileStateChanged(t *testing.T) { - prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(1)} + prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(uint16(1))} prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} prefsWithPortOneRelayEndpoints := ipn.Prefs{ - RelayServerPort: ptr.To(1), + RelayServerPort: ptr.To(uint16(1)), RelayServerStaticEndpoints: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:7777")}, } type fields struct { - port *int + port *uint16 staticEndpoints views.Slice[netip.AddrPort] rs relayServer } @@ -43,7 +43,7 @@ func Test_extension_profileStateChanged(t *testing.T) { name string fields fields args args - wantPort *int + wantPort *uint16 wantRelayServerFieldNonNil bool wantRelayServerFieldMutated bool wantEndpoints []netip.AddrPort @@ -51,28 +51,28 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "no changes non-nil port previously running", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, }, { name: "set addr ports unchanged port previously running", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOneRelayEndpoints.View(), sameNode: true, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, @@ -87,7 +87,7 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOneRelayEndpoints.View(), sameNode: true, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, @@ -95,7 +95,7 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "clear addr ports unchanged port previously running", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), staticEndpoints: views.SliceOf(prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints), rs: mockRelayServerNotZeroVal(), }, @@ -103,7 +103,7 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, wantEndpoints: nil, @@ -111,7 +111,7 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "prefs port nil", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), }, args: args{ prefs: prefsWithNilPort.View(), @@ -124,7 +124,7 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "prefs port nil previously running", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ @@ -138,54 +138,54 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "prefs port changed", fields: fields{ - port: ptr.To(2), + port: ptr.To(uint16(2)), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, { name: "prefs port changed previously running", fields: fields{ - port: ptr.To(2), + port: ptr.To(uint16(2)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, { name: "sameNode false", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, { name: "sameNode false previously running", fields: fields{ - port: ptr.To(1), + port: ptr.To(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, @@ -198,7 +198,7 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(1), + wantPort: ptr.To(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, @@ -211,7 +211,7 @@ func Test_extension_profileStateChanged(t *testing.T) { t.Fatal(err) } e := ipne.(*extension) - e.newServerFn = func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { + e.newServerFn = func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { return &mockRelayServer{}, nil } e.port = tt.fields.port @@ -271,7 +271,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { tests := []struct { name string shutdown bool - port *int + port *uint16 rs relayServer hasNodeAttrDisableRelayServer bool wantRelayServerFieldNonNil bool @@ -280,7 +280,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "want running", shutdown: false, - port: ptr.To(1), + port: ptr.To(uint16(1)), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, @@ -288,7 +288,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "want running previously running", shutdown: false, - port: ptr.To(1), + port: ptr.To(uint16(1)), rs: mockRelayServerNotZeroVal(), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: true, @@ -297,7 +297,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "shutdown true", shutdown: true, - port: ptr.To(1), + port: ptr.To(uint16(1)), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: false, wantRelayServerFieldMutated: false, @@ -305,7 +305,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "shutdown true previously running", shutdown: true, - port: ptr.To(1), + port: ptr.To(uint16(1)), rs: mockRelayServerNotZeroVal(), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: false, @@ -354,7 +354,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { t.Fatal(err) } e := ipne.(*extension) - e.newServerFn = func(logf logger.Logf, port int, onlyStaticAddrPorts bool) (relayServer, error) { + e.newServerFn = func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { return &mockRelayServer{}, nil } e.shutdown = tt.shutdown diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index fae85adee7e2b..4bf78b40b022b 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -102,7 +102,7 @@ var _PrefsCloneNeedsRegeneration = Prefs(struct { PostureChecking bool NetfilterKind string DriveShares []*drive.Share - RelayServerPort *int + RelayServerPort *uint16 RelayServerStaticEndpoints []netip.AddrPort AllowSingleHosts marshalAsTrueInJSON Persist *persist.Persist diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index aac8cb4d7e953..4157ec76e61a8 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -441,10 +441,8 @@ func (v PrefsView) DriveShares() views.SliceView[*drive.Share, drive.ShareView] // RelayServerPort is the UDP port number for the relay server to bind to, // on all interfaces. A non-nil zero value signifies a random unused port // should be used. A nil value signifies relay server functionality -// should be disabled. This field is currently experimental, and therefore -// no guarantees are made about its current naming and functionality when -// non-nil/enabled. -func (v PrefsView) RelayServerPort() views.ValuePointer[int] { +// should be disabled. +func (v PrefsView) RelayServerPort() views.ValuePointer[uint16] { return views.ValuePointerOf(v.ж.RelayServerPort) } @@ -506,7 +504,7 @@ var _PrefsViewNeedsRegeneration = Prefs(struct { PostureChecking bool NetfilterKind string DriveShares []*drive.Share - RelayServerPort *int + RelayServerPort *uint16 RelayServerStaticEndpoints []netip.AddrPort AllowSingleHosts marshalAsTrueInJSON Persist *persist.Persist diff --git a/ipn/prefs.go b/ipn/prefs.go index 6f3cb65f83914..9f98465d2d883 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -283,10 +283,8 @@ type Prefs struct { // RelayServerPort is the UDP port number for the relay server to bind to, // on all interfaces. A non-nil zero value signifies a random unused port // should be used. A nil value signifies relay server functionality - // should be disabled. This field is currently experimental, and therefore - // no guarantees are made about its current naming and functionality when - // non-nil/enabled. - RelayServerPort *int `json:",omitempty"` + // should be disabled. + RelayServerPort *uint16 `json:",omitempty"` // RelayServerStaticEndpoints are static IP:port endpoints to advertise as // candidates for relay connections. Only relevant when RelayServerPort is @@ -694,7 +692,7 @@ func (p *Prefs) Equals(p2 *Prefs) bool { p.PostureChecking == p2.PostureChecking && slices.EqualFunc(p.DriveShares, p2.DriveShares, drive.SharesEqual) && p.NetfilterKind == p2.NetfilterKind && - compareIntPtrs(p.RelayServerPort, p2.RelayServerPort) && + compareUint16Ptrs(p.RelayServerPort, p2.RelayServerPort) && slices.Equal(p.RelayServerStaticEndpoints, p2.RelayServerStaticEndpoints) } @@ -715,7 +713,7 @@ func (ap AppConnectorPrefs) Pretty() string { return "" } -func compareIntPtrs(a, b *int) bool { +func compareUint16Ptrs(a, b *uint16) bool { if (a == nil) != (b == nil) { return false } diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index cf07507062ab3..aa152843a5af9 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -78,7 +78,7 @@ func TestPrefsEqual(t *testing.T) { have, prefsHandles) } - relayServerPort := func(port int) *int { + relayServerPort := func(port uint16) *uint16 { return &port } nets := func(strs ...string) (ns []netip.Prefix) { diff --git a/net/udprelay/server.go b/net/udprelay/server.go index b260955e0c952..e7ca24960ea1d 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -309,7 +309,7 @@ func (e *serverEndpoint) isBound() bool { // onlyStaticAddrPorts is true, then dynamic addr:port discovery will be // disabled, and only addr:port's set via [Server.SetStaticAddrPorts] will be // used. -func NewServer(logf logger.Logf, port int, onlyStaticAddrPorts bool) (s *Server, err error) { +func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (s *Server, err error) { s = &Server{ logf: logf, disco: key.NewDisco(), @@ -526,9 +526,9 @@ func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { // [magicsock.RebindingConn], which would also remove the need for // [singlePacketConn], as [magicsock.RebindingConn] also handles fallback to // single packet syscall operations. -func (s *Server) listenOn(port int) error { +func (s *Server) listenOn(port uint16) error { for _, network := range []string{"udp4", "udp6"} { - uc, err := net.ListenUDP(network, &net.UDPAddr{Port: port}) + uc, err := net.ListenUDP(network, &net.UDPAddr{Port: int(port)}) if err != nil { if network == "udp4" { return err diff --git a/net/udprelay/status/status.go b/net/udprelay/status/status.go index 3866efada2542..9ed9a0d2a8def 100644 --- a/net/udprelay/status/status.go +++ b/net/udprelay/status/status.go @@ -14,8 +14,9 @@ import ( type ServerStatus struct { // UDPPort is the UDP port number that the peer relay server forwards over, // as configured by the user with 'tailscale set --relay-server-port='. - // If the port has not been configured, UDPPort will be nil. - UDPPort *int + // If the port has not been configured, UDPPort will be nil. A non-nil zero + // value signifies the user has opted for a random unused port. + UDPPort *uint16 // Sessions is a slice of detailed status information about each peer // relay session that this node's peer relay server is involved with. It // may be empty. From b7658a4ad2d13da515daee2bd8dd7d50a9067708 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 19 Nov 2025 09:41:43 +0000 Subject: [PATCH 0759/1093] tstest/integration: add integration test for Tailnet Lock This patch adds an integration test for Tailnet Lock, checking that a node can't talk to peers in the tailnet until it becomes signed. This patch also introduces a new package `tstest/tkatest`, which has some helpers for constructing a mock control server that responds to TKA requests. This allows us to reduce boilerplate in the IPN tests. Updates tailscale/corp#33599 Signed-off-by: Alex Chan --- ipn/ipnlocal/network-lock.go | 37 +-- ipn/ipnlocal/network-lock_test.go | 300 +++--------------- tka/sync.go | 35 ++ tstest/integration/integration.go | 42 ++- tstest/integration/integration_test.go | 75 ++++- tstest/integration/testcontrol/testcontrol.go | 150 ++++++++- tstest/tkatest/tkatest.go | 220 +++++++++++++ 7 files changed, 573 insertions(+), 286 deletions(-) create mode 100644 tstest/tkatest/tkatest.go diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 78d4d236d5007..f25c6fa9b5e36 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -368,20 +368,6 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsVie return nil } -func toSyncOffer(head string, ancestors []string) (tka.SyncOffer, error) { - var out tka.SyncOffer - if err := out.Head.UnmarshalText([]byte(head)); err != nil { - return tka.SyncOffer{}, fmt.Errorf("head.UnmarshalText: %v", err) - } - out.Ancestors = make([]tka.AUMHash, len(ancestors)) - for i, a := range ancestors { - if err := out.Ancestors[i].UnmarshalText([]byte(a)); err != nil { - return tka.SyncOffer{}, fmt.Errorf("ancestor[%d].UnmarshalText: %v", i, err) - } - } - return out, nil -} - // tkaSyncLocked synchronizes TKA state with control. b.mu must be held // and tka must be initialized. b.mu will be stepped out of (and back into) // during network RPCs. @@ -399,7 +385,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { if err != nil { return fmt.Errorf("offer RPC: %w", err) } - controlOffer, err := toSyncOffer(offerResp.Head, offerResp.Ancestors) + controlOffer, err := tka.ToSyncOffer(offerResp.Head, offerResp.Ancestors) if err != nil { return fmt.Errorf("control offer: %v", err) } @@ -694,7 +680,7 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt // Our genesis AUM was accepted but before Control turns on enforcement of // node-key signatures, we need to sign keys for all the existing nodes. - // If we don't get these signatures ahead of time, everyone will loose + // If we don't get these signatures ahead of time, everyone will lose // connectivity because control won't have any signatures to send which // satisfy network-lock checks. sigs := make(map[tailcfg.NodeID]tkatype.MarshaledSignature, len(initResp.NeedSignatures)) @@ -1294,27 +1280,10 @@ func (b *LocalBackend) tkaFetchBootstrap(ourNodeKey key.NodePublic, head tka.AUM return a, nil } -func fromSyncOffer(offer tka.SyncOffer) (head string, ancestors []string, err error) { - headBytes, err := offer.Head.MarshalText() - if err != nil { - return "", nil, fmt.Errorf("head.MarshalText: %v", err) - } - - ancestors = make([]string, len(offer.Ancestors)) - for i, ancestor := range offer.Ancestors { - hash, err := ancestor.MarshalText() - if err != nil { - return "", nil, fmt.Errorf("ancestor[%d].MarshalText: %v", i, err) - } - ancestors[i] = string(hash) - } - return string(headBytes), ancestors, nil -} - // tkaDoSyncOffer sends a /machine/tka/sync/offer RPC to the control plane // over noise. This is the first of two RPCs implementing tka synchronization. func (b *LocalBackend) tkaDoSyncOffer(ourNodeKey key.NodePublic, offer tka.SyncOffer) (*tailcfg.TKASyncOfferResponse, error) { - head, ancestors, err := fromSyncOffer(offer) + head, ancestors, err := tka.FromSyncOffer(offer) if err != nil { return nil, fmt.Errorf("encoding offer: %v", err) } diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 5d22425a1e5cb..e5df38bdb6d76 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -33,6 +33,7 @@ import ( "tailscale.com/tka" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/tstest/tkatest" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/persist" @@ -101,7 +102,8 @@ func TestTKAEnablementFlow(t *testing.T) { // our mock server can communicate. nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - a1, genesisAUM, err := tka.Create(tka.ChonkMem(), tka.State{ + chonk := tka.ChonkMem() + a1, genesisAUM, err := tka.Create(chonk, tka.State{ Keys: []tka.Key{key}, DisablementSecrets: [][]byte{bytes.Repeat([]byte{0xa5}, 32)}, }, nlPriv) @@ -113,51 +115,31 @@ func TestTKAEnablementFlow(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/bootstrap": - body := new(tailcfg.TKABootstrapRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("bootstrap CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("bootstrap nodeKey=%v, want %v", body.NodeKey, nodePriv.Public()) + resp := tailcfg.TKABootstrapResponse{ + GenesisAUM: genesisAUM.Serialize(), } - if body.Head != "" { - t.Errorf("bootstrap head=%s, want empty hash", body.Head) + req, err := tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + t.Errorf("HandleTKABootstrap: %v", err) } - - w.WriteHeader(200) - out := tailcfg.TKABootstrapResponse{ - GenesisAUM: genesisAUM.Serialize(), + if req.NodeKey != nodePriv.Public() { + t.Errorf("bootstrap nodeKey=%v, want %v", req.NodeKey, nodePriv.Public()) } - if err := json.NewEncoder(w).Encode(out); err != nil { - t.Fatal(err) + if req.Head != "" { + t.Errorf("bootstrap head=%s, want empty hash", req.Head) } // Sync offer/send endpoints are hit even though the node is up-to-date, // so we implement enough of a fake that the client doesn't explode. case "/machine/tka/sync/offer": - head, err := a1.Head().MarshalText() + err := tkatest.HandleTKASyncOffer(w, r, a1, chonk) if err != nil { - t.Fatal(err) - } - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncOfferResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } case "/machine/tka/sync/send": - head, err := a1.Head().MarshalText() + err := tkatest.HandleTKASyncSend(w, r, a1, chonk) if err != nil { - t.Fatal(err) - } - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } default: @@ -225,37 +207,28 @@ func TestTKADisablementFlow(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/bootstrap": - body := new(tailcfg.TKABootstrapRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("bootstrap CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("nodeKey=%v, want %v", body.NodeKey, nodePriv.Public()) - } - var head tka.AUMHash - if err := head.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("failed unmarshal of body.Head: %v", err) - } - if head != authority.Head() { - t.Errorf("reported head = %x, want %x", head, authority.Head()) - } - var disablement []byte if returnWrongSecret { disablement = bytes.Repeat([]byte{0x42}, 32) // wrong secret } else { disablement = disablementSecret } - - w.WriteHeader(200) - out := tailcfg.TKABootstrapResponse{ + resp := tailcfg.TKABootstrapResponse{ DisablementSecret: disablement, } - if err := json.NewEncoder(w).Encode(out); err != nil { - t.Fatal(err) + req, err := tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + t.Errorf("HandleTKABootstrap: %v", err) + } + if req.NodeKey != nodePriv.Public() { + t.Errorf("nodeKey=%v, want %v", req.NodeKey, nodePriv.Public()) + } + var head tka.AUMHash + if err := head.UnmarshalText([]byte(req.Head)); err != nil { + t.Fatalf("failed unmarshal of body.Head: %v", err) + } + if head != authority.Head() { + t.Errorf("reported head = %x, want %x", head, authority.Head()) } default: @@ -430,76 +403,15 @@ func TestTKASync(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sync/offer": - body := new(tailcfg.TKASyncOfferRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync offer:\n%+v", body) - nodeOffer, err := toSyncOffer(body.Head, body.Ancestors) + err := tkatest.HandleTKASyncOffer(w, r, controlAuthority, controlStorage) if err != nil { - t.Fatal(err) - } - controlOffer, err := controlAuthority.SyncOffer(controlStorage) - if err != nil { - t.Fatal(err) - } - sendAUMs, err := controlAuthority.MissingAUMs(controlStorage, nodeOffer) - if err != nil { - t.Fatal(err) - } - - head, ancestors, err := fromSyncOffer(controlOffer) - if err != nil { - t.Fatal(err) - } - resp := tailcfg.TKASyncOfferResponse{ - Head: head, - Ancestors: ancestors, - MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), - } - for i, a := range sendAUMs { - resp.MissingAUMs[i] = a.Serialize() - } - - t.Logf("responding to sync offer with:\n%+v", resp) - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(resp); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } case "/machine/tka/sync/send": - body := new(tailcfg.TKASyncSendRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync send:\n%+v", body) - - var remoteHead tka.AUMHash - if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("head unmarshal: %v", err) - } - toApply := make([]tka.AUM, len(body.MissingAUMs)) - for i, a := range body.MissingAUMs { - if err := toApply[i].Unserialize(a); err != nil { - t.Fatalf("decoding missingAUM[%d]: %v", i, err) - } - } - - if len(toApply) > 0 { - if err := controlAuthority.Inform(controlStorage, toApply); err != nil { - t.Fatalf("control.Inform(%+v) failed: %v", toApply, err) - } - } - head, err := controlAuthority.Head().MarshalText() + err := tkatest.HandleTKASyncSend(w, r, controlAuthority, controlStorage) if err != nil { - t.Fatal(err) - } - - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncSend: %v", err) } default: @@ -608,76 +520,15 @@ func TestTKASyncTriggersCompact(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sync/offer": - body := new(tailcfg.TKASyncOfferRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync offer:\n%+v", body) - nodeOffer, err := toSyncOffer(body.Head, body.Ancestors) - if err != nil { - t.Fatal(err) - } - controlOffer, err := controlAuthority.SyncOffer(controlStorage) - if err != nil { - t.Fatal(err) - } - sendAUMs, err := controlAuthority.MissingAUMs(controlStorage, nodeOffer) - if err != nil { - t.Fatal(err) - } - - head, ancestors, err := fromSyncOffer(controlOffer) + err := tkatest.HandleTKASyncOffer(w, r, controlAuthority, controlStorage) if err != nil { - t.Fatal(err) - } - resp := tailcfg.TKASyncOfferResponse{ - Head: head, - Ancestors: ancestors, - MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), - } - for i, a := range sendAUMs { - resp.MissingAUMs[i] = a.Serialize() - } - - t.Logf("responding to sync offer with:\n%+v", resp) - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(resp); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncOffer: %v", err) } case "/machine/tka/sync/send": - body := new(tailcfg.TKASyncSendRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync send:\n%+v", body) - - var remoteHead tka.AUMHash - if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("head unmarshal: %v", err) - } - toApply := make([]tka.AUM, len(body.MissingAUMs)) - for i, a := range body.MissingAUMs { - if err := toApply[i].Unserialize(a); err != nil { - t.Fatalf("decoding missingAUM[%d]: %v", i, err) - } - } - - if len(toApply) > 0 { - if err := controlAuthority.Inform(controlStorage, toApply); err != nil { - t.Fatalf("control.Inform(%+v) failed: %v", toApply, err) - } - } - head, err := controlAuthority.Head().MarshalText() + err := tkatest.HandleTKASyncSend(w, r, controlAuthority, controlStorage) if err != nil { - t.Fatal(err) - } - - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASyncSendResponse{ - Head: string(head), - }); err != nil { - t.Fatal(err) + t.Errorf("HandleTKASyncSend: %v", err) } default: @@ -1019,29 +870,9 @@ func TestTKASign(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sign": - body := new(tailcfg.TKASubmitSignatureRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("sign CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("nodeKey = %v, want %v", body.NodeKey, nodePriv.Public()) - } - - var sig tka.NodeKeySignature - if err := sig.Unserialize(body.Signature); err != nil { - t.Fatalf("malformed signature: %v", err) - } - - if err := authority.NodeKeyAuthorized(toSign.Public(), body.Signature); err != nil { - t.Errorf("signature does not verify: %v", err) - } - - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASubmitSignatureResponse{}); err != nil { - t.Fatal(err) + _, _, err := tkatest.HandleTKASign(w, r, authority) + if err != nil { + t.Errorf("HandleTKASign: %v", err) } default: @@ -1098,23 +929,15 @@ func TestTKAForceDisable(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/bootstrap": - body := new(tailcfg.TKABootstrapRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - if body.Version != tailcfg.CurrentCapabilityVersion { - t.Errorf("bootstrap CapVer = %v, want %v", body.Version, tailcfg.CurrentCapabilityVersion) - } - if body.NodeKey != nodePriv.Public() { - t.Errorf("nodeKey=%v, want %v", body.NodeKey, nodePriv.Public()) - } - - w.WriteHeader(200) - out := tailcfg.TKABootstrapResponse{ + resp := tailcfg.TKABootstrapResponse{ GenesisAUM: genesis.Serialize(), } - if err := json.NewEncoder(w).Encode(out); err != nil { - t.Fatal(err) + req, err := tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + t.Errorf("HandleTKABootstrap: %v", err) + } + if req.NodeKey != nodePriv.Public() { + t.Errorf("nodeKey=%v, want %v", req.NodeKey, nodePriv.Public()) } default: @@ -1323,37 +1146,16 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { defer r.Body.Close() switch r.URL.Path { case "/machine/tka/sync/send": - body := new(tailcfg.TKASyncSendRequest) - if err := json.NewDecoder(r.Body).Decode(body); err != nil { - t.Fatal(err) - } - t.Logf("got sync send:\n%+v", body) - - var remoteHead tka.AUMHash - if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { - t.Fatalf("head unmarshal: %v", err) - } - toApply := make([]tka.AUM, len(body.MissingAUMs)) - for i, a := range body.MissingAUMs { - if err := toApply[i].Unserialize(a); err != nil { - t.Fatalf("decoding missingAUM[%d]: %v", i, err) - } + err := tkatest.HandleTKASyncSend(w, r, authority, chonk) + if err != nil { + t.Errorf("HandleTKASyncSend: %v", err) } - // Apply the recovery AUM to an authority to make sure it works. - if err := authority.Inform(chonk, toApply); err != nil { - t.Errorf("recovery AUM could not be applied: %v", err) - } // Make sure the key we removed isn't trusted. if authority.KeyTrusted(compromisedPriv.KeyID()) { t.Error("compromised key was not removed from tka") } - w.WriteHeader(200) - if err := json.NewEncoder(w).Encode(tailcfg.TKASubmitSignatureResponse{}); err != nil { - t.Fatal(err) - } - default: t.Errorf("unhandled endpoint path: %v", r.URL.Path) w.WriteHeader(404) diff --git a/tka/sync.go b/tka/sync.go index e3a858c155347..2dbfb7ac435b2 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -32,6 +32,41 @@ type SyncOffer struct { Ancestors []AUMHash } +// ToSyncOffer creates a SyncOffer from the fields received in +// a [tailcfg.TKASyncOfferRequest]. +func ToSyncOffer(head string, ancestors []string) (SyncOffer, error) { + var out SyncOffer + if err := out.Head.UnmarshalText([]byte(head)); err != nil { + return SyncOffer{}, fmt.Errorf("head.UnmarshalText: %v", err) + } + out.Ancestors = make([]AUMHash, len(ancestors)) + for i, a := range ancestors { + if err := out.Ancestors[i].UnmarshalText([]byte(a)); err != nil { + return SyncOffer{}, fmt.Errorf("ancestor[%d].UnmarshalText: %v", i, err) + } + } + return out, nil +} + +// FromSyncOffer marshals the fields of a SyncOffer so they can be +// sent in a [tailcfg.TKASyncOfferRequest]. +func FromSyncOffer(offer SyncOffer) (head string, ancestors []string, err error) { + headBytes, err := offer.Head.MarshalText() + if err != nil { + return "", nil, fmt.Errorf("head.MarshalText: %v", err) + } + + ancestors = make([]string, len(offer.Ancestors)) + for i, ancestor := range offer.Ancestors { + hash, err := ancestor.MarshalText() + if err != nil { + return "", nil, fmt.Errorf("ancestor[%d].MarshalText: %v", i, err) + } + ancestors[i] = string(hash) + } + return string(headBytes), ancestors, nil +} + const ( // The starting number of AUMs to skip when listing // ancestors in a SyncOffer. diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index ea5747b7d5a1d..a62173ae3e353 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -918,7 +918,7 @@ func (n *TestNode) Ping(otherNode *TestNode) error { t := n.env.t ip := otherNode.AwaitIP4().String() t.Logf("Running ping %v (from %v)...", ip, n.AwaitIP4()) - return n.Tailscale("ping", ip).Run() + return n.Tailscale("ping", "--timeout=1s", ip).Run() } // AwaitListening waits for the tailscaled to be serving local clients @@ -1077,6 +1077,46 @@ func (n *TestNode) MustStatus() *ipnstate.Status { return st } +// PublicKey returns the hex-encoded public key of this node, +// e.g. `nodekey:123456abc` +func (n *TestNode) PublicKey() string { + tb := n.env.t + tb.Helper() + cmd := n.Tailscale("status", "--json") + out, err := cmd.CombinedOutput() + if err != nil { + tb.Fatalf("running `tailscale status`: %v, %s", err, out) + } + + type Self struct{ PublicKey string } + type StatusOutput struct{ Self Self } + + var st StatusOutput + if err := json.Unmarshal(out, &st); err != nil { + tb.Fatalf("decoding `tailscale status` JSON: %v\njson:\n%s", err, out) + } + return st.Self.PublicKey +} + +// NLPublicKey returns the hex-encoded network lock public key of +// this node, e.g. `tlpub:123456abc` +func (n *TestNode) NLPublicKey() string { + tb := n.env.t + tb.Helper() + cmd := n.Tailscale("lock", "status", "--json") + out, err := cmd.CombinedOutput() + if err != nil { + tb.Fatalf("running `tailscale lock status`: %v, %s", err, out) + } + st := struct { + PublicKey string `json:"PublicKey"` + }{} + if err := json.Unmarshal(out, &st); err != nil { + tb.Fatalf("decoding `tailscale lock status` JSON: %v\njson:\n%s", err, out) + } + return st.PublicKey +} + // trafficTrap is an HTTP proxy handler to note whether any // HTTP traffic tries to leave localhost from tailscaled. We don't // expect any, so any request triggers a failure. diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 3739a3011c4a2..fc891ad722b28 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -2253,7 +2253,7 @@ func TestC2NDebugNetmap(t *testing.T) { } } -func TestNetworkLock(t *testing.T) { +func TestTailnetLock(t *testing.T) { // If you run `tailscale lock log` on a node where Tailnet Lock isn't // enabled, you get an error explaining that. @@ -2291,6 +2291,79 @@ func TestNetworkLock(t *testing.T) { t.Fatalf("stderr: want %q, got %q", wantErr, errBuf.String()) } }) + + // If you create a tailnet with two signed nodes and one unsigned, + // the signed nodes can talk to each other but the unsigned node cannot + // talk to anybody. + t.Run("node-connectivity", func(t *testing.T) { + tstest.Shard(t) + t.Parallel() + + env := NewTestEnv(t) + env.Control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{ + tailcfg.CapabilityTailnetLock: []tailcfg.RawMessage{}, + } + + // Start two nodes which will be our signing nodes. + signing1 := NewTestNode(t, env) + signing2 := NewTestNode(t, env) + + nodes := []*TestNode{signing1, signing2} + for _, n := range nodes { + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + n.MustUp() + n.AwaitRunning() + } + + // Initiate Tailnet Lock with the two signing nodes. + initCmd := signing1.Tailscale("lock", "init", + "--gen-disablements", "10", + "--confirm", + signing1.NLPublicKey(), signing2.NLPublicKey(), + ) + out, err := initCmd.CombinedOutput() + if err != nil { + t.Fatalf("init command failed: %q\noutput=%v", err, string(out)) + } + + // Check that the two signing nodes can ping each other + if err := signing1.Ping(signing2); err != nil { + t.Fatalf("ping signing1 -> signing2: %v", err) + } + if err := signing2.Ping(signing1); err != nil { + t.Fatalf("ping signing2 -> signing1: %v", err) + } + + // Create and start a third node + node3 := NewTestNode(t, env) + d3 := node3.StartDaemon() + defer d3.MustCleanShutdown(t) + node3.MustUp() + node3.AwaitRunning() + + if err := signing1.Ping(node3); err == nil { + t.Fatal("ping signing1 -> node3: expected err, but succeeded") + } + if err := node3.Ping(signing1); err == nil { + t.Fatal("ping node3 -> signing1: expected err, but succeeded") + } + + // Sign node3, and check the nodes can now talk to each other + signCmd := signing1.Tailscale("lock", "sign", node3.PublicKey()) + out, err = signCmd.CombinedOutput() + if err != nil { + t.Fatalf("sign command failed: %q\noutput = %v", err, string(out)) + } + + if err := signing1.Ping(node3); err != nil { + t.Fatalf("ping signing1 -> node3: expected success, got err: %v", err) + } + if err := node3.Ping(signing1); err != nil { + t.Fatalf("ping node3 -> signing1: expected success, got err: %v", err) + } + }) } func TestNodeWithBadStateFile(t *testing.T) { diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index d0959ff25b756..19964c91ff8a4 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -33,6 +33,8 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/syncs" "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/tstest/tkatest" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/opt" @@ -123,6 +125,10 @@ type Server struct { nodeKeyAuthed set.Set[key.NodePublic] msgToSend map[key.NodePublic]any // value is *tailcfg.PingRequest or entire *tailcfg.MapResponse allExpired bool // All nodes will be told their node key is expired. + + // tkaStorage records the Tailnet Lock state, if any. + // If nil, Tailnet Lock is not enabled in the Tailnet. + tkaStorage tka.CompactableChonk } // BaseURL returns the server's base URL, without trailing slash. @@ -329,6 +335,7 @@ func (s *Server) initMux() { w.WriteHeader(http.StatusNoContent) }) s.mux.HandleFunc("/key", s.serveKey) + s.mux.HandleFunc("/machine/tka/", s.serveTKA) s.mux.HandleFunc("/machine/", s.serveMachine) s.mux.HandleFunc("/ts2021", s.serveNoiseUpgrade) s.mux.HandleFunc("/c2n/", s.serveC2N) @@ -439,7 +446,7 @@ func (s *Server) serveKey(w http.ResponseWriter, r *http.Request) { func (s *Server) serveMachine(w http.ResponseWriter, r *http.Request) { if r.Method != "POST" { - http.Error(w, "POST required", 400) + http.Error(w, "POST required for serveMachine", 400) return } ctx := r.Context() @@ -861,6 +868,132 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. w.Write(res) } +func (s *Server) serveTKA(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET required for serveTKA", 400) + return + } + + switch r.URL.Path { + case "/machine/tka/init/begin": + s.serveTKAInitBegin(w, r) + case "/machine/tka/init/finish": + s.serveTKAInitFinish(w, r) + case "/machine/tka/bootstrap": + s.serveTKABootstrap(w, r) + case "/machine/tka/sync/offer": + s.serveTKASyncOffer(w, r) + case "/machine/tka/sign": + s.serveTKASign(w, r) + default: + s.serveUnhandled(w, r) + } +} + +func (s *Server) serveTKAInitBegin(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + + nodes := maps.Values(s.nodes) + genesisAUM, err := tkatest.HandleTKAInitBegin(w, r, nodes) + if err != nil { + go panic(fmt.Sprintf("HandleTKAInitBegin: %v", err)) + } + s.tkaStorage = tka.ChonkMem() + s.tkaStorage.CommitVerifiedAUMs([]tka.AUM{*genesisAUM}) +} + +func (s *Server) serveTKAInitFinish(w http.ResponseWriter, r *http.Request) { + signatures, err := tkatest.HandleTKAInitFinish(w, r) + if err != nil { + go panic(fmt.Sprintf("HandleTKAInitFinish: %v", err)) + } + + s.mu.Lock() + defer s.mu.Unlock() + + // Apply the signatures to each of the nodes. Because s.nodes is keyed + // by public key instead of node ID, we have to do this inefficiently. + // + // We only have small tailnets in the integration tests, so this isn't + // much of an issue. + for nodeID, sig := range signatures { + for _, n := range s.nodes { + if n.ID == nodeID { + n.KeySignature = sig + } + } + } +} + +func (s *Server) serveTKABootstrap(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + if s.tkaStorage == nil { + http.Error(w, "no TKA state when calling serveTKABootstrap", 400) + return + } + + // Find the genesis AUM, which we need to include in the response. + var genesis *tka.AUM + allAUMs, err := s.tkaStorage.AllAUMs() + if err != nil { + http.Error(w, "unable to retrieve all AUMs from TKA state", 500) + return + } + for _, h := range allAUMs { + aum := must.Get(s.tkaStorage.AUM(h)) + if _, hasParent := aum.Parent(); !hasParent { + genesis = &aum + break + } + } + if genesis == nil { + http.Error(w, "unable to find genesis AUM in TKA state", 500) + return + } + + resp := tailcfg.TKABootstrapResponse{ + GenesisAUM: genesis.Serialize(), + } + _, err = tkatest.HandleTKABootstrap(w, r, resp) + if err != nil { + go panic(fmt.Sprintf("HandleTKABootstrap: %v", err)) + } +} + +func (s *Server) serveTKASyncOffer(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + + authority, err := tka.Open(s.tkaStorage) + if err != nil { + go panic(fmt.Sprintf("serveTKASyncOffer: tka.Open: %v", err)) + } + + err = tkatest.HandleTKASyncOffer(w, r, authority, s.tkaStorage) + if err != nil { + go panic(fmt.Sprintf("HandleTKASyncOffer: %v", err)) + } +} + +func (s *Server) serveTKASign(w http.ResponseWriter, r *http.Request) { + s.mu.Lock() + defer s.mu.Unlock() + + authority, err := tka.Open(s.tkaStorage) + if err != nil { + go panic(fmt.Sprintf("serveTKASign: tka.Open: %v", err)) + } + + sig, keyBeingSigned, err := tkatest.HandleTKASign(w, r, authority) + if err != nil { + go panic(fmt.Sprintf("HandleTKASign: %v", err)) + } + s.nodes[*keyBeingSigned].KeySignature = *sig + s.updateLocked("TKASign", s.nodeIDsLocked(0)) +} + // updateType indicates why a long-polling map request is being woken // up for an update. type updateType int @@ -1197,6 +1330,21 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, v6Prefix, } + // If the server is tracking TKA state, and there's a single TKA head, + // add it to the MapResponse. + if s.tkaStorage != nil { + heads, err := s.tkaStorage.Heads() + if err != nil { + log.Printf("unable to get TKA heads: %v", err) + } else if len(heads) != 1 { + log.Printf("unable to get single TKA head, got %v", heads) + } else { + res.TKAInfo = &tailcfg.TKAInfo{ + Head: heads[0].Hash().String(), + } + } + } + s.mu.Lock() defer s.mu.Unlock() res.Node.PrimaryRoutes = s.nodeSubnetRoutes[nk] diff --git a/tstest/tkatest/tkatest.go b/tstest/tkatest/tkatest.go new file mode 100644 index 0000000000000..fb157a1a19315 --- /dev/null +++ b/tstest/tkatest/tkatest.go @@ -0,0 +1,220 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// tkatest has functions for creating a mock control server that responds +// to TKA endpoints. +package tkatest + +import ( + "encoding/json" + "errors" + "fmt" + "iter" + "log" + "net/http" + + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +func serverError(w http.ResponseWriter, format string, a ...any) error { + err := fmt.Sprintf(format, a...) + http.Error(w, err, 500) + log.Printf("returning HTTP 500 error: %v", err) + return errors.New(err) +} + +func userError(w http.ResponseWriter, format string, a ...any) error { + err := fmt.Sprintf(format, a...) + http.Error(w, err, 400) + return errors.New(err) +} + +// HandleTKAInitBegin handles a request to /machine/tka/init/begin. +// +// If the request contains a valid genesis AUM, it sends a response to the +// client, and returns the AUM to the caller. +func HandleTKAInitBegin(w http.ResponseWriter, r *http.Request, nodes iter.Seq[*tailcfg.Node]) (*tka.AUM, error) { + var req *tailcfg.TKAInitBeginRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, userError(w, "Decode: %v", err) + } + var aum tka.AUM + if err := aum.Unserialize(req.GenesisAUM); err != nil { + return nil, userError(w, "invalid genesis AUM: %v", err) + } + beginResp := tailcfg.TKAInitBeginResponse{} + for n := range nodes { + beginResp.NeedSignatures = append( + beginResp.NeedSignatures, + tailcfg.TKASignInfo{ + NodeID: n.ID, + NodePublic: n.Key, + }, + ) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(beginResp); err != nil { + return nil, serverError(w, "Encode: %v", err) + } + return &aum, nil +} + +// HandleTKAInitFinish handles a request to /machine/tka/init/finish. +// +// It sends a response to the client, and gives the caller a list of node +// signatures to apply. +// +// This method assumes that the node signatures are valid, and does not +// verify them with the supplied public key. +func HandleTKAInitFinish(w http.ResponseWriter, r *http.Request) (map[tailcfg.NodeID]tkatype.MarshaledSignature, error) { + var req *tailcfg.TKAInitFinishRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return nil, userError(w, "Decode: %v", err) + } + + w.WriteHeader(200) + w.Write([]byte("{}")) + + return req.Signatures, nil +} + +// HandleTKABootstrap handles a request to /tka/bootstrap. +// +// If the request is valid, it sends a response to the client, and returns +// the parsed request to the caller. +func HandleTKABootstrap(w http.ResponseWriter, r *http.Request, resp tailcfg.TKABootstrapResponse) (*tailcfg.TKABootstrapRequest, error) { + req := new(tailcfg.TKABootstrapRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + return nil, userError(w, "Decode: %v", err) + } + if req.Version != tailcfg.CurrentCapabilityVersion { + return nil, userError(w, "bootstrap CapVer = %v, want %v", req.Version, tailcfg.CurrentCapabilityVersion) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + return nil, serverError(w, "Encode: %v", err) + } + return req, nil +} + +func HandleTKASyncOffer(w http.ResponseWriter, r *http.Request, authority *tka.Authority, chonk tka.Chonk) error { + body := new(tailcfg.TKASyncOfferRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + return userError(w, "Decode: %v", err) + } + + log.Printf("got sync offer:\n%+v", body) + + nodeOffer, err := tka.ToSyncOffer(body.Head, body.Ancestors) + if err != nil { + return userError(w, "ToSyncOffer: %v", err) + } + + controlOffer, err := authority.SyncOffer(chonk) + if err != nil { + return serverError(w, "authority.SyncOffer: %v", err) + } + sendAUMs, err := authority.MissingAUMs(chonk, nodeOffer) + if err != nil { + return serverError(w, "authority.MissingAUMs: %v", err) + } + + head, ancestors, err := tka.FromSyncOffer(controlOffer) + if err != nil { + return serverError(w, "FromSyncOffer: %v", err) + } + resp := tailcfg.TKASyncOfferResponse{ + Head: head, + Ancestors: ancestors, + MissingAUMs: make([]tkatype.MarshaledAUM, len(sendAUMs)), + } + for i, a := range sendAUMs { + resp.MissingAUMs[i] = a.Serialize() + } + + log.Printf("responding to sync offer with:\n%+v", resp) + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + return serverError(w, "Encode: %v", err) + } + return nil +} + +// HandleTKASign handles a request to /machine/tka/sign. +// +// If the signature request is valid, it sends a response to the client, and +// gives the caller the signature and public key of the node being signed. +func HandleTKASign(w http.ResponseWriter, r *http.Request, authority *tka.Authority) (*tkatype.MarshaledSignature, *key.NodePublic, error) { + req := new(tailcfg.TKASubmitSignatureRequest) + if err := json.NewDecoder(r.Body).Decode(req); err != nil { + return nil, nil, userError(w, "Decode: %v", err) + } + if req.Version != tailcfg.CurrentCapabilityVersion { + return nil, nil, userError(w, "sign CapVer = %v, want %v", req.Version, tailcfg.CurrentCapabilityVersion) + } + + var sig tka.NodeKeySignature + if err := sig.Unserialize(req.Signature); err != nil { + return nil, nil, userError(w, "malformed signature: %v", err) + } + var keyBeingSigned key.NodePublic + if err := keyBeingSigned.UnmarshalBinary(sig.Pubkey); err != nil { + return nil, nil, userError(w, "malformed signature pubkey: %v", err) + } + if err := authority.NodeKeyAuthorized(keyBeingSigned, req.Signature); err != nil { + return nil, nil, userError(w, "signature does not verify: %v", err) + } + + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(tailcfg.TKASubmitSignatureResponse{}); err != nil { + return nil, nil, serverError(w, "Encode: %v", err) + } + return &req.Signature, &keyBeingSigned, nil +} + +// HandleTKASyncSend handles a request to /machine/tka/send. +// +// If the request is valid, it adds the new AUMs to the authority, and sends +// a response to the client with the new head. +func HandleTKASyncSend(w http.ResponseWriter, r *http.Request, authority *tka.Authority, chonk tka.Chonk) error { + body := new(tailcfg.TKASyncSendRequest) + if err := json.NewDecoder(r.Body).Decode(body); err != nil { + return userError(w, "Decode: %v", err) + } + log.Printf("got sync send:\n%+v", body) + + var remoteHead tka.AUMHash + if err := remoteHead.UnmarshalText([]byte(body.Head)); err != nil { + return userError(w, "head unmarshal: %v", err) + } + toApply := make([]tka.AUM, len(body.MissingAUMs)) + for i, a := range body.MissingAUMs { + if err := toApply[i].Unserialize(a); err != nil { + return userError(w, "decoding missingAUM[%d]: %v", i, err) + } + } + + if len(toApply) > 0 { + if err := authority.Inform(chonk, toApply); err != nil { + return serverError(w, "control.Inform(%+v) failed: %v", toApply, err) + } + } + head, err := authority.Head().MarshalText() + if err != nil { + return serverError(w, "head marshal: %v", err) + } + + resp := tailcfg.TKASyncSendResponse{ + Head: string(head), + } + w.WriteHeader(200) + if err := json.NewEncoder(w).Encode(resp); err != nil { + return serverError(w, "Encode: %v", err) + } + return nil +} From 8af7778ce04457a5f84a45e7cc8f58f02b7bfb4c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 26 Nov 2025 09:26:08 -0800 Subject: [PATCH 0760/1093] util/execqueue: don't hold mutex in RunSync We don't hold q.mu while running normal ExecQueue.Add funcs, so we shouldn't in RunSync either. Otherwise code it calls can't shut down the queue, as seen in #18502. Updates #18052 Co-authored-by: Nick Khyl Change-Id: Ic5e53440411eca5e9fabac7f4a68a9f6ef026de1 Signed-off-by: Brad Fitzpatrick --- util/execqueue/execqueue.go | 37 +++++++++++++++++--------------- util/execqueue/execqueue_test.go | 9 ++++++++ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index 2ea0c1f2f231f..87616a6b50a45 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -39,21 +39,21 @@ func (q *ExecQueue) Add(f func()) { // RunSync waits for the queue to be drained and then synchronously runs f. // It returns an error if the queue is closed before f is run or ctx expires. func (q *ExecQueue) RunSync(ctx context.Context, f func()) error { - for { - if err := q.Wait(ctx); err != nil { - return err - } - q.mu.Lock() - if q.inFlight { - q.mu.Unlock() - continue - } - defer q.mu.Unlock() - if q.closed { - return errors.New("closed") - } - f() + q.mu.Lock() + q.initCtxLocked() + shutdownCtx := q.ctx + q.mu.Unlock() + + ch := make(chan struct{}) + q.Add(f) + q.Add(func() { close(ch) }) + select { + case <-ch: return nil + case <-ctx.Done(): + return ctx.Err() + case <-shutdownCtx.Done(): + return errExecQueueShutdown } } @@ -94,6 +94,8 @@ func (q *ExecQueue) initCtxLocked() { } } +var errExecQueueShutdown = errors.New("execqueue shut down") + // Wait waits for the queue to be empty or shut down. func (q *ExecQueue) Wait(ctx context.Context) error { q.mu.Lock() @@ -104,10 +106,11 @@ func (q *ExecQueue) Wait(ctx context.Context) error { q.doneWaiter = waitCh } closed := q.closed + shutdownCtx := q.ctx q.mu.Unlock() if closed { - return errors.New("execqueue shut down") + return errExecQueueShutdown } if waitCh == nil { return nil @@ -116,8 +119,8 @@ func (q *ExecQueue) Wait(ctx context.Context) error { select { case <-waitCh: return nil - case <-q.ctx.Done(): - return errors.New("execqueue shut down") + case <-shutdownCtx.Done(): + return errExecQueueShutdown case <-ctx.Done(): return ctx.Err() } diff --git a/util/execqueue/execqueue_test.go b/util/execqueue/execqueue_test.go index d10b741f72f8f..1bce69556e1f7 100644 --- a/util/execqueue/execqueue_test.go +++ b/util/execqueue/execqueue_test.go @@ -20,3 +20,12 @@ func TestExecQueue(t *testing.T) { t.Errorf("n=%d; want 1", got) } } + +// Test that RunSync doesn't hold q.mu and block Shutdown +// as we saw in tailscale/tailscale#18502 +func TestExecQueueRunSyncLocking(t *testing.T) { + q := &ExecQueue{} + q.RunSync(t.Context(), func() { + q.Shutdown() + }) +} From 9eff8a45034bc36a17004dce1fe6e7732af631a4 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 26 Nov 2025 12:35:24 -0600 Subject: [PATCH 0761/1093] feature/tpm: return opening errors from both /dev/tpmrm0 and /dev/tpm0 (#18071) This might help users diagnose why TPM access is failing for tpmrm0. Fixes #18026 Signed-off-by: Andrew Lytvynov --- feature/tpm/tpm_linux.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go index 6c8131e8d8a28..3f05c9a8c38ad 100644 --- a/feature/tpm/tpm_linux.go +++ b/feature/tpm/tpm_linux.go @@ -4,6 +4,8 @@ package tpm import ( + "errors" + "github.com/google/go-tpm/tpm2/transport" "github.com/google/go-tpm/tpm2/transport/linuxtpm" ) @@ -13,5 +15,10 @@ func open() (transport.TPMCloser, error) { if err == nil { return tpm, nil } - return linuxtpm.Open("/dev/tpm0") + errs := []error{err} + tpm, err = linuxtpm.Open("/dev/tpm0") + if err == nil { + return tpm, nil + } + return nil, errors.Join(errs...) } From 5ee0c6bf1df5a96f5f58198dcf9d3b241a8ccef1 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Tue, 25 Nov 2025 15:56:44 -0800 Subject: [PATCH 0762/1093] derp/derpserver: add a unique sender cardinality estimate Adds an observation point that may identify potentially abusive traffic patterns at outlier values. Updates tailscale/corp#24681 Signed-off-by: James Tucker --- cmd/derper/depaware.txt | 2 + derp/derpserver/derpserver.go | 33 ++++- derp/derpserver/derpserver_test.go | 195 +++++++++++++++++++++++++++++ flake.nix | 2 +- go.mod | 2 + go.mod.sri | 2 +- go.sum | 4 + shell.nix | 2 +- 8 files changed, 238 insertions(+), 4 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 6608faaf741fc..9c720fa604869 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -2,6 +2,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 + github.com/axiomhq/hyperloglog from tailscale.com/derp/derpserver github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/cmd/derper+ @@ -9,6 +10,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil + github.com/dgryski/go-metro from github.com/axiomhq/hyperloglog github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ diff --git a/derp/derpserver/derpserver.go b/derp/derpserver/derpserver.go index 0bbc667806a5a..1879e0c536f3d 100644 --- a/derp/derpserver/derpserver.go +++ b/derp/derpserver/derpserver.go @@ -36,6 +36,7 @@ import ( "sync/atomic" "time" + "github.com/axiomhq/hyperloglog" "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" @@ -1643,6 +1644,12 @@ type sclient struct { sawSrc map[key.NodePublic]set.Handle bw *lazyBufioWriter + // senderCardinality estimates the number of unique peers that have + // sent packets to this client. Owned by sendLoop, protected by + // senderCardinalityMu for reads from other goroutines. + senderCardinalityMu sync.Mutex + senderCardinality *hyperloglog.Sketch + // Guarded by s.mu // // peerStateChange is used by mesh peers (a set of regional @@ -1778,6 +1785,8 @@ func (c *sclient) onSendLoopDone() { func (c *sclient) sendLoop(ctx context.Context) error { defer c.onSendLoopDone() + c.senderCardinality = hyperloglog.New() + jitter := rand.N(5 * time.Second) keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(derp.KeepAlive + jitter) defer keepAliveTick.Stop() @@ -2000,6 +2009,11 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) if withKey { pktLen += key.NodePublicRawLen c.noteSendFromSrc(srcKey) + if c.senderCardinality != nil { + c.senderCardinalityMu.Lock() + c.senderCardinality.Insert(srcKey.AppendTo(nil)) + c.senderCardinalityMu.Unlock() + } } if err = derp.WriteFrameHeader(c.bw.bw(), derp.FrameRecvPacket, uint32(pktLen)); err != nil { return err @@ -2013,6 +2027,17 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) return err } +// EstimatedUniqueSenders returns an estimate of the number of unique peers +// that have sent packets to this client. +func (c *sclient) EstimatedUniqueSenders() uint64 { + c.senderCardinalityMu.Lock() + defer c.senderCardinalityMu.Unlock() + if c.senderCardinality == nil { + return 0 + } + return c.senderCardinality.Estimate() +} + // noteSendFromSrc notes that we are about to write a packet // from src to sclient. // @@ -2295,7 +2320,8 @@ type BytesSentRecv struct { Sent uint64 Recv uint64 // Key is the public key of the client which sent/received these bytes. - Key key.NodePublic + Key key.NodePublic + UniqueSenders uint64 `json:",omitzero"` } // parseSSOutput parses the output from the specific call to ss in ServeDebugTraffic. @@ -2349,6 +2375,11 @@ func (s *Server) ServeDebugTraffic(w http.ResponseWriter, r *http.Request) { if prev.Sent < next.Sent || prev.Recv < next.Recv { if pkey, ok := s.keyOfAddr[k]; ok { next.Key = pkey + if cs, ok := s.clients[pkey]; ok { + if c := cs.activeClient.Load(); c != nil { + next.UniqueSenders = c.EstimatedUniqueSenders() + } + } if err := enc.Encode(next); err != nil { s.mu.Unlock() return diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go index 2db5f25bc00b7..1dd86f3146c5c 100644 --- a/derp/derpserver/derpserver_test.go +++ b/derp/derpserver/derpserver_test.go @@ -9,6 +9,7 @@ import ( "context" "crypto/x509" "encoding/asn1" + "encoding/binary" "expvar" "fmt" "log" @@ -20,6 +21,7 @@ import ( "testing" "time" + "github.com/axiomhq/hyperloglog" qt "github.com/frankban/quicktest" "go4.org/mem" "golang.org/x/time/rate" @@ -755,6 +757,35 @@ func TestParseSSOutput(t *testing.T) { } } +func TestServeDebugTrafficUniqueSenders(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + clientKey := key.NewNode().Public() + c := &sclient{ + key: clientKey, + s: s, + logf: logger.Discard, + senderCardinality: hyperloglog.New(), + } + + for i := 0; i < 5; i++ { + c.senderCardinality.Insert(key.NewNode().Public().AppendTo(nil)) + } + + s.mu.Lock() + cs := &clientSet{} + cs.activeClient.Store(c) + s.clients[clientKey] = cs + s.mu.Unlock() + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders: %d", estimate) + if estimate < 4 || estimate > 6 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~5 (4-6 range)", estimate) + } +} + func TestGetPerClientSendQueueDepth(t *testing.T) { c := qt.New(t) envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" @@ -780,3 +811,167 @@ func TestGetPerClientSendQueueDepth(t *testing.T) { }) } } + +func TestSenderCardinality(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + c := &sclient{ + key: key.NewNode().Public(), + s: s, + logf: logger.WithPrefix(t.Logf, "test client: "), + } + + if got := c.EstimatedUniqueSenders(); got != 0 { + t.Errorf("EstimatedUniqueSenders() before init = %d, want 0", got) + } + + c.senderCardinality = hyperloglog.New() + + if got := c.EstimatedUniqueSenders(); got != 0 { + t.Errorf("EstimatedUniqueSenders() with no senders = %d, want 0", got) + } + + senders := make([]key.NodePublic, 10) + for i := range senders { + senders[i] = key.NewNode().Public() + c.senderCardinality.Insert(senders[i].AppendTo(nil)) + } + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders after 10 inserts: %d", estimate) + + if estimate < 8 || estimate > 12 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~10 (8-12 range)", estimate) + } + + for i := 0; i < 5; i++ { + c.senderCardinality.Insert(senders[i].AppendTo(nil)) + } + + estimate2 := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders after duplicates: %d", estimate2) + + if estimate2 < 8 || estimate2 > 12 { + t.Errorf("EstimatedUniqueSenders() after duplicates = %d, want ~10 (8-12 range)", estimate2) + } +} + +func TestSenderCardinality100(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + c := &sclient{ + key: key.NewNode().Public(), + s: s, + logf: logger.WithPrefix(t.Logf, "test client: "), + senderCardinality: hyperloglog.New(), + } + + numSenders := 100 + for i := 0; i < numSenders; i++ { + c.senderCardinality.Insert(key.NewNode().Public().AppendTo(nil)) + } + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders for 100 actual senders: %d", estimate) + + if estimate < 85 || estimate > 115 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~100 (85-115 range)", estimate) + } +} + +func TestSenderCardinalityTracking(t *testing.T) { + s := New(key.NewNode(), t.Logf) + defer s.Close() + + c := &sclient{ + key: key.NewNode().Public(), + s: s, + logf: logger.WithPrefix(t.Logf, "test client: "), + senderCardinality: hyperloglog.New(), + } + + zeroKey := key.NodePublic{} + if zeroKey != (key.NodePublic{}) { + c.senderCardinality.Insert(zeroKey.AppendTo(nil)) + } + + if estimate := c.EstimatedUniqueSenders(); estimate != 0 { + t.Errorf("EstimatedUniqueSenders() after zero key = %d, want 0", estimate) + } + + sender1 := key.NewNode().Public() + sender2 := key.NewNode().Public() + + if sender1 != (key.NodePublic{}) { + c.senderCardinality.Insert(sender1.AppendTo(nil)) + } + if sender2 != (key.NodePublic{}) { + c.senderCardinality.Insert(sender2.AppendTo(nil)) + } + + estimate := c.EstimatedUniqueSenders() + t.Logf("Estimated unique senders after 2 senders: %d", estimate) + + if estimate < 1 || estimate > 3 { + t.Errorf("EstimatedUniqueSenders() = %d, want ~2 (1-3 range)", estimate) + } +} + +func BenchmarkHyperLogLogInsert(b *testing.B) { + hll := hyperloglog.New() + sender := key.NewNode().Public() + senderBytes := sender.AppendTo(nil) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + hll.Insert(senderBytes) + } +} + +func BenchmarkHyperLogLogInsertUnique(b *testing.B) { + hll := hyperloglog.New() + + b.ResetTimer() + + buf := make([]byte, 32) + for i := 0; i < b.N; i++ { + binary.LittleEndian.PutUint64(buf, uint64(i)) + hll.Insert(buf) + } +} + +func BenchmarkHyperLogLogEstimate(b *testing.B) { + hll := hyperloglog.New() + + for i := 0; i < 100; i++ { + hll.Insert(key.NewNode().Public().AppendTo(nil)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = hll.Estimate() + } +} + +func BenchmarkSenderCardinalityOverhead(b *testing.B) { + hll := hyperloglog.New() + sender := key.NewNode().Public() + + b.Run("WithTracking", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if hll != nil { + hll.Insert(sender.AppendTo(nil)) + } + } + }) + + b.Run("WithoutTracking", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = sender.AppendTo(nil) + } + }) +} diff --git a/flake.nix b/flake.nix index 505061a765362..855ce555bb1cc 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-jJSSXMyUqcJoZuqfSlBsKDQezyqS+jDkRglMMjG1K8g= +# nix-direnv cache busting line: sha256-IkodqRYdueML7U2Hh8vRw6Et7+WII+VXuPJ3jZ2xYx8= diff --git a/go.mod b/go.mod index a49a9724f7af1..bd6fe441d0e0a 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 + github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd github.com/bramvdbogaerde/go-scp v1.4.0 github.com/cilium/ebpf v0.15.0 @@ -149,6 +150,7 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/deckarep/golang-set/v2 v2.8.0 // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect diff --git a/go.mod.sri b/go.mod.sri index 66422652e2262..329fe940505e3 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-jJSSXMyUqcJoZuqfSlBsKDQezyqS+jDkRglMMjG1K8g= +sha256-IkodqRYdueML7U2Hh8vRw6Et7+WII+VXuPJ3jZ2xYx8= diff --git a/go.sum b/go.sum index f70fe9159f614..111c99ac909e5 100644 --- a/go.sum +++ b/go.sum @@ -170,6 +170,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5 github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ= +github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -271,6 +273,8 @@ github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+Zlfu github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= diff --git a/shell.nix b/shell.nix index d412693d9fdd1..28bdbdafb8e0d 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-jJSSXMyUqcJoZuqfSlBsKDQezyqS+jDkRglMMjG1K8g= +# nix-direnv cache busting line: sha256-IkodqRYdueML7U2Hh8vRw6Et7+WII+VXuPJ3jZ2xYx8= From 3f9f0ed93c010eb0aae1ddf968ed2f81c4d42a5d Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 26 Nov 2025 15:49:52 -0500 Subject: [PATCH 0763/1093] VERSION.txt: this is v1.93.0 (#18074) Signed-off-by: Jonathan Nobels --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 6979a6c0661bf..95784efddbc41 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.91.0 +1.93.0 From 74ed589042c4fc255d148fc5356dc7e3aa1693be Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 19 Nov 2025 10:54:42 -0800 Subject: [PATCH 0764/1093] syncs: add means of declare locking assumptions for debug mode validation Updates #17852 Change-Id: I42a64a990dcc8f708fa23a516a40731a19967aba Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 39 +++++++++++++++++++++++++++++++++++++++ syncs/mutex.go | 5 +++++ syncs/mutex_debug.go | 4 ++++ 3 files changed, 48 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3e70548963e17..fbf34aa426cea 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -876,6 +876,7 @@ func (b *LocalBackend) initPrefsFromConfig(conf *conffile.Config) error { } func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) { + syncs.RequiresMutex(&b.mu) if conf.Parsed.StaticEndpoints == nil && (b.conf == nil || b.conf.Parsed.StaticEndpoints == nil) { return } @@ -894,6 +895,7 @@ func (b *LocalBackend) setStaticEndpointsFromConfigLocked(conf *conffile.Config) } func (b *LocalBackend) setStateLocked(state ipn.State) { + syncs.RequiresMutex(&b.mu) if b.state == state { return } @@ -906,6 +908,7 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { // setConfigLocked uses the provided config to update the backend's prefs // and other state. func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { + syncs.RequiresMutex(&b.mu) p := b.pm.CurrentPrefs().AsStruct() mp, err := conf.Parsed.ToPrefs() if err != nil { @@ -927,6 +930,7 @@ var assumeNetworkUpdateForTest = envknob.RegisterBool("TS_ASSUME_NETWORK_UP_FOR_ // // b.mu must be held. func (b *LocalBackend) pauseOrResumeControlClientLocked() { + syncs.RequiresMutex(&b.mu) if b.cc == nil { return } @@ -1204,6 +1208,7 @@ func (b *LocalBackend) Prefs() ipn.PrefsView { } func (b *LocalBackend) sanitizedPrefsLocked() ipn.PrefsView { + syncs.RequiresMutex(&b.mu) return stripKeysFromPrefs(b.pm.CurrentPrefs()) } @@ -1335,6 +1340,7 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) { } func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) { + syncs.RequiresMutex(&b.mu) cn := b.currentNode() nm := cn.NetMap() if nm == nil { @@ -1873,6 +1879,8 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { if !buildfeatures.HasSystemPolicy { return false } + syncs.RequiresMutex(&b.mu) + if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1941,6 +1949,8 @@ func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange if !buildfeatures.HasUseExitNode { return false } + syncs.RequiresMutex(&b.mu) + if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) @@ -2182,6 +2192,8 @@ func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged if !buildfeatures.HasUseExitNode { return false } + syncs.RequiresMutex(&b.mu) + // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. // // However, to maintain forward compatibility with future auto exit node expressions, @@ -2295,6 +2307,8 @@ func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { // // b.mu must be held. func (b *LocalBackend) setWgengineStatusLocked(s *wgengine.Status) { + syncs.RequiresMutex(&b.mu) + es := b.parseWgStatusLocked(s) cc := b.cc @@ -4312,6 +4326,7 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip // // b.mu must be held. func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn.PrefsView, mp *ipn.MaskedPrefs) error { + syncs.RequiresMutex(&b.mu) var errs []error if mp.RunSSHSet && mp.RunSSH && !envknob.CanSSHD() { @@ -4362,6 +4377,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // // b.mu must be held. func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + syncs.RequiresMutex(&b.mu) if !buildfeatures.HasUseExitNode { return false } @@ -4403,6 +4419,7 @@ func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change // // b.mu must be held. func (b *LocalBackend) adjustEditPrefsLocked(prefs ipn.PrefsView, mp *ipn.MaskedPrefs) { + syncs.RequiresMutex(&b.mu) // Zeroing the ExitNodeID via localAPI must also zero the prior exit node. if mp.ExitNodeIDSet && mp.ExitNodeID == "" && !mp.InternalExitNodePriorSet { mp.InternalExitNodePrior = "" @@ -4480,6 +4497,7 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o // startReconnectTimerLocked sets a timer to automatically set WantRunning to true // after the specified duration. func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { + syncs.RequiresMutex(&b.mu) if b.reconnectTimer != nil { // Stop may return false if the timer has already fired, // and the function has been called in its own goroutine, @@ -4522,11 +4540,13 @@ func (b *LocalBackend) startReconnectTimerLocked(d time.Duration) { } func (b *LocalBackend) resetAlwaysOnOverrideLocked() { + syncs.RequiresMutex(&b.mu) b.overrideAlwaysOn = false b.stopReconnectTimerLocked() } func (b *LocalBackend) stopReconnectTimerLocked() { + syncs.RequiresMutex(&b.mu) if b.reconnectTimer != nil { // Stop may return false if the timer has already fired, // and the function has been called in its own goroutine, @@ -4542,6 +4562,7 @@ func (b *LocalBackend) stopReconnectTimerLocked() { // b.mu must be held. func (b *LocalBackend) editPrefsLocked(actor ipnauth.Actor, mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { + syncs.RequiresMutex(&b.mu) p0 := b.pm.CurrentPrefs() // Check if the changes in mp are allowed. @@ -5660,6 +5681,7 @@ func (b *LocalBackend) enterStateLocked(newState ipn.State) { } func (b *LocalBackend) hasNodeKeyLocked() bool { + syncs.RequiresMutex(&b.mu) // we can't use b.Prefs(), because it strips the keys, oops! p := b.pm.CurrentPrefs() return p.Valid() && p.Persist().Valid() && !p.Persist().PrivateNodeKey().IsZero() @@ -5680,9 +5702,11 @@ func (b *LocalBackend) NodeKey() key.NodePublic { // // b.mu must be held func (b *LocalBackend) nextStateLocked() ipn.State { + syncs.RequiresMutex(&b.mu) if b.health.IsUnhealthy(ipn.StateStoreHealth) { return ipn.NoState } + var ( cc = b.cc cn = b.currentNode() @@ -5758,6 +5782,8 @@ func (b *LocalBackend) nextStateLocked() ipn.State { // // requires b.mu to be held. func (b *LocalBackend) stateMachineLocked() { + syncs.RequiresMutex(&b.mu) + b.enterStateLocked(b.nextStateLocked()) } @@ -5767,6 +5793,7 @@ func (b *LocalBackend) stateMachineLocked() { // // b.mu must be held. func (b *LocalBackend) stopEngineAndWaitLocked() { + syncs.RequiresMutex(&b.mu) b.logf("stopEngineAndWait...") st, _ := b.e.ResetAndStop() // TODO: what should we do if this returns an error? b.setWgengineStatusLocked(st) @@ -5787,6 +5814,7 @@ func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { // returned value is non-nil, the caller must call Shutdown on it after // releasing b.mu. func (b *LocalBackend) resetControlClientLocked() controlclient.Client { + syncs.RequiresMutex(&b.mu) if b.cc == nil { return nil } @@ -5813,6 +5841,8 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { // resetAuthURLLocked resets authURL, canceling any pending interactive login. func (b *LocalBackend) resetAuthURLLocked() { + syncs.RequiresMutex(&b.mu) + b.authURL = "" b.authURLTime = time.Time{} b.authActor = nil @@ -5842,6 +5872,8 @@ func (b *LocalBackend) ShouldExposeRemoteWebClient() bool { // // b.mu must be held. func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { + syncs.RequiresMutex(&b.mu) + shouldRun := !nm.HasCap(tailcfg.NodeAttrDisableWebClient) wasRunning := b.webClientAtomicBool.Swap(shouldRun) if wasRunning && !shouldRun { @@ -5854,6 +5886,8 @@ func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { // // b.mu must be held. func (b *LocalBackend) setExposeRemoteWebClientAtomicBoolLocked(prefs ipn.PrefsView) { + syncs.RequiresMutex(&b.mu) + if !buildfeatures.HasWebClient { return } @@ -5982,6 +6016,8 @@ func (b *LocalBackend) RefreshExitNode() { // refreshExitNodeLocked is like RefreshExitNode but requires b.mu be held. func (b *LocalBackend) refreshExitNodeLocked() { + syncs.RequiresMutex(&b.mu) + if b.resolveExitNodeLocked() { b.authReconfigLocked() } @@ -5997,6 +6033,8 @@ func (b *LocalBackend) refreshExitNodeLocked() { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeLocked() (changed bool) { + syncs.RequiresMutex(&b.mu) + if !buildfeatures.HasUseExitNode { return false } @@ -6058,6 +6096,7 @@ func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + syncs.RequiresMutex(&b.mu) if !buildfeatures.HasUseExitNode { return false } diff --git a/syncs/mutex.go b/syncs/mutex.go index e61d1d1ab0687..8034e17121717 100644 --- a/syncs/mutex.go +++ b/syncs/mutex.go @@ -16,3 +16,8 @@ type Mutex = sync.Mutex // // It's only not a sync.RWMutex when built with the ts_mutex_debug build tag. type RWMutex = sync.RWMutex + +// RequiresMutex declares the caller assumes it has the given +// mutex held. In non-debug builds, it's a no-op and compiles to +// nothing. +func RequiresMutex(mu *sync.Mutex) {} diff --git a/syncs/mutex_debug.go b/syncs/mutex_debug.go index 14b52ffe3cc51..55a9b1231092f 100644 --- a/syncs/mutex_debug.go +++ b/syncs/mutex_debug.go @@ -15,4 +15,8 @@ type RWMutex struct { sync.RWMutex } +func RequiresMutex(mu *sync.Mutex) { + // TODO: check +} + // TODO(bradfitz): actually track stuff when in debug mode. From 9cc07bf9c0ba448792818b84b53cdf55137977bb Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Wed, 26 Nov 2025 16:55:38 -0700 Subject: [PATCH 0765/1093] .github/workflows: skip draft PRs for request review workflows Skip the "request review" workflows for PRs that are in draft to reduce noise / skip adding reviewers to PRs that are intentionally marked as not ready to review. Updates #cleanup Signed-off-by: Mario Minardi --- .github/workflows/request-dataplane-review.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 7ae5668c3765b..58f6d3d0b5979 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -2,6 +2,7 @@ name: request-dataplane-review on: pull_request: + types: [ opened, synchronize, reopened, ready_for_review ] paths: - ".github/workflows/request-dataplane-review.yml" - "**/*derp*" @@ -10,6 +11,7 @@ on: jobs: request-dataplane-review: + if: github.event.pull_request.draft == false name: Request Dataplane Review runs-on: ubuntu-latest steps: From 9500689bc1bd0c736427fabfb878afb8636073a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 15 Nov 2025 10:53:08 +0000 Subject: [PATCH 0766/1093] build(deps): bump js-yaml from 4.1.0 to 4.1.1 in /client/web Bumps [js-yaml](https://github.com/nodeca/js-yaml) from 4.1.0 to 4.1.1. - [Changelog](https://github.com/nodeca/js-yaml/blob/master/CHANGELOG.md) - [Commits](https://github.com/nodeca/js-yaml/compare/4.1.0...4.1.1) --- updated-dependencies: - dependency-name: js-yaml dependency-version: 4.1.1 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- client/web/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index 7c9d9222ec727..37d35fc8908a6 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -3885,9 +3885,9 @@ js-tokens@^8.0.2: integrity sha512-UfJMcSJc+SEXEl9lH/VLHSZbThQyLpw1vLO1Lb+j4RWDvG3N2f7yj3PVQA3cmkTBNldJ9eFnM+xEXxHIXrYiJw== js-yaml@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" - integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + version "4.1.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.1.tgz#854c292467705b699476e1a2decc0c8a3458806b" + integrity sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== dependencies: argparse "^2.0.1" From 3e2476ec1379589f4748ee9b8702872225cf5ff0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Oct 2025 15:34:26 +0000 Subject: [PATCH 0767/1093] build(deps-dev): bump vite from 5.1.7 to 5.4.21 in /client/web Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.1.7 to 5.4.21. - [Release notes](https://github.com/vitejs/vite/releases) - [Changelog](https://github.com/vitejs/vite/blob/v5.4.21/packages/vite/CHANGELOG.md) - [Commits](https://github.com/vitejs/vite/commits/v5.4.21/packages/vite) --- updated-dependencies: - dependency-name: vite dependency-version: 5.4.21 dependency-type: direct:development ... Signed-off-by: dependabot[bot] --- client/web/package.json | 2 +- client/web/yarn.lock | 556 ++++++++++++++++++++++------------------ 2 files changed, 311 insertions(+), 247 deletions(-) diff --git a/client/web/package.json b/client/web/package.json index c45f7d6a867ec..3d040425e907a 100644 --- a/client/web/package.json +++ b/client/web/package.json @@ -34,7 +34,7 @@ "prettier-plugin-organize-imports": "^3.2.2", "tailwindcss": "^3.3.3", "typescript": "^5.3.3", - "vite": "^5.1.7", + "vite": "^5.4.21", "vite-plugin-svgr": "^4.2.0", "vite-tsconfig-paths": "^3.5.0", "vitest": "^1.3.1" diff --git a/client/web/yarn.lock b/client/web/yarn.lock index 37d35fc8908a6..4e4272d0dada9 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -1130,120 +1130,120 @@ resolved "https://registry.yarnpkg.com/@cush/relative/-/relative-1.0.0.tgz#8cd1769bf9bde3bb27dac356b1bc94af40f6cc16" integrity sha512-RpfLEtTlyIxeNPGKcokS+p3BZII/Q3bYxryFRglh5H3A3T8q9fsLYm72VYAMEOOIBLEa8o93kFLiBDUWKrwXZA== -"@esbuild/aix-ppc64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz#d1bc06aedb6936b3b6d313bf809a5a40387d2b7f" - integrity sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA== - -"@esbuild/android-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz#7ad65a36cfdb7e0d429c353e00f680d737c2aed4" - integrity sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA== - -"@esbuild/android-arm@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.19.12.tgz#b0c26536f37776162ca8bde25e42040c203f2824" - integrity sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w== - -"@esbuild/android-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.19.12.tgz#cb13e2211282012194d89bf3bfe7721273473b3d" - integrity sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew== - -"@esbuild/darwin-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz#cbee41e988020d4b516e9d9e44dd29200996275e" - integrity sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g== - -"@esbuild/darwin-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz#e37d9633246d52aecf491ee916ece709f9d5f4cd" - integrity sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A== - -"@esbuild/freebsd-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz#1ee4d8b682ed363b08af74d1ea2b2b4dbba76487" - integrity sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA== - -"@esbuild/freebsd-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz#37a693553d42ff77cd7126764b535fb6cc28a11c" - integrity sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg== - -"@esbuild/linux-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz#be9b145985ec6c57470e0e051d887b09dddb2d4b" - integrity sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA== - -"@esbuild/linux-arm@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz#207ecd982a8db95f7b5279207d0ff2331acf5eef" - integrity sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w== - -"@esbuild/linux-ia32@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz#d0d86b5ca1562523dc284a6723293a52d5860601" - integrity sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA== - -"@esbuild/linux-loong64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz#9a37f87fec4b8408e682b528391fa22afd952299" - integrity sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA== - -"@esbuild/linux-mips64el@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz#4ddebd4e6eeba20b509d8e74c8e30d8ace0b89ec" - integrity sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w== - -"@esbuild/linux-ppc64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz#adb67dadb73656849f63cd522f5ecb351dd8dee8" - integrity sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg== - -"@esbuild/linux-riscv64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz#11bc0698bf0a2abf8727f1c7ace2112612c15adf" - integrity sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg== - -"@esbuild/linux-s390x@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz#e86fb8ffba7c5c92ba91fc3b27ed5a70196c3cc8" - integrity sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg== - -"@esbuild/linux-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz#5f37cfdc705aea687dfe5dfbec086a05acfe9c78" - integrity sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg== - -"@esbuild/netbsd-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz#29da566a75324e0d0dd7e47519ba2f7ef168657b" - integrity sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA== - -"@esbuild/openbsd-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz#306c0acbdb5a99c95be98bdd1d47c916e7dc3ff0" - integrity sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw== - -"@esbuild/sunos-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz#0933eaab9af8b9b2c930236f62aae3fc593faf30" - integrity sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA== - -"@esbuild/win32-arm64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz#773bdbaa1971b36db2f6560088639ccd1e6773ae" - integrity sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A== - -"@esbuild/win32-ia32@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz#000516cad06354cc84a73f0943a4aa690ef6fd67" - integrity sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ== - -"@esbuild/win32-x64@0.19.12": - version "0.19.12" - resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz#c57c8afbb4054a3ab8317591a0b7320360b444ae" - integrity sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA== +"@esbuild/aix-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz#c7184a326533fcdf1b8ee0733e21c713b975575f" + integrity sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ== + +"@esbuild/android-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz#09d9b4357780da9ea3a7dfb833a1f1ff439b4052" + integrity sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A== + +"@esbuild/android-arm@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz#9b04384fb771926dfa6d7ad04324ecb2ab9b2e28" + integrity sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg== + +"@esbuild/android-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz#29918ec2db754cedcb6c1b04de8cd6547af6461e" + integrity sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA== + +"@esbuild/darwin-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz#e495b539660e51690f3928af50a76fb0a6ccff2a" + integrity sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ== + +"@esbuild/darwin-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz#c13838fa57372839abdddc91d71542ceea2e1e22" + integrity sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw== + +"@esbuild/freebsd-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz#646b989aa20bf89fd071dd5dbfad69a3542e550e" + integrity sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g== + +"@esbuild/freebsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz#aa615cfc80af954d3458906e38ca22c18cf5c261" + integrity sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ== + +"@esbuild/linux-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz#70ac6fa14f5cb7e1f7f887bcffb680ad09922b5b" + integrity sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q== + +"@esbuild/linux-arm@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz#fc6fd11a8aca56c1f6f3894f2bea0479f8f626b9" + integrity sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA== + +"@esbuild/linux-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz#3271f53b3f93e3d093d518d1649d6d68d346ede2" + integrity sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg== + +"@esbuild/linux-loong64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz#ed62e04238c57026aea831c5a130b73c0f9f26df" + integrity sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg== + +"@esbuild/linux-mips64el@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz#e79b8eb48bf3b106fadec1ac8240fb97b4e64cbe" + integrity sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg== + +"@esbuild/linux-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz#5f2203860a143b9919d383ef7573521fb154c3e4" + integrity sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w== + +"@esbuild/linux-riscv64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz#07bcafd99322d5af62f618cb9e6a9b7f4bb825dc" + integrity sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA== + +"@esbuild/linux-s390x@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz#b7ccf686751d6a3e44b8627ababc8be3ef62d8de" + integrity sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A== + +"@esbuild/linux-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz#6d8f0c768e070e64309af8004bb94e68ab2bb3b0" + integrity sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ== + +"@esbuild/netbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz#bbe430f60d378ecb88decb219c602667387a6047" + integrity sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg== + +"@esbuild/openbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz#99d1cf2937279560d2104821f5ccce220cb2af70" + integrity sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow== + +"@esbuild/sunos-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz#08741512c10d529566baba837b4fe052c8f3487b" + integrity sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg== + +"@esbuild/win32-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz#675b7385398411240735016144ab2e99a60fc75d" + integrity sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A== + +"@esbuild/win32-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz#1bfc3ce98aa6ca9a0969e4d2af72144c59c1193b" + integrity sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA== + +"@esbuild/win32-x64@0.21.5": + version "0.21.5" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz#acad351d582d157bb145535db2a6ff53dd514b5c" + integrity sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw== "@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": version "4.4.0" @@ -1626,70 +1626,115 @@ estree-walker "^2.0.2" picomatch "^2.3.1" -"@rollup/rollup-android-arm-eabi@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.12.0.tgz#38c3abd1955a3c21d492af6b1a1dca4bb1d894d6" - integrity sha512-+ac02NL/2TCKRrJu2wffk1kZ+RyqxVUlbjSagNgPm94frxtr+XDL12E5Ll1enWskLrtrZ2r8L3wED1orIibV/w== - -"@rollup/rollup-android-arm64@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.12.0.tgz#3822e929f415627609e53b11cec9a4be806de0e2" - integrity sha512-OBqcX2BMe6nvjQ0Nyp7cC90cnumt8PXmO7Dp3gfAju/6YwG0Tj74z1vKrfRz7qAv23nBcYM8BCbhrsWqO7PzQQ== - -"@rollup/rollup-darwin-arm64@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz#6c082de71f481f57df6cfa3701ab2a7afde96f69" - integrity sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ== - -"@rollup/rollup-darwin-x64@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.12.0.tgz#c34ca0d31f3c46a22c9afa0e944403eea0edcfd8" - integrity sha512-cc71KUZoVbUJmGP2cOuiZ9HSOP14AzBAThn3OU+9LcA1+IUqswJyR1cAJj3Mg55HbjZP6OLAIscbQsQLrpgTOg== - -"@rollup/rollup-linux-arm-gnueabihf@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.12.0.tgz#48e899c1e438629c072889b824a98787a7c2362d" - integrity sha512-a6w/Y3hyyO6GlpKL2xJ4IOh/7d+APaqLYdMf86xnczU3nurFTaVN9s9jOXQg97BE4nYm/7Ga51rjec5nfRdrvA== - -"@rollup/rollup-linux-arm64-gnu@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.12.0.tgz#788c2698a119dc229062d40da6ada8a090a73a68" - integrity sha512-0fZBq27b+D7Ar5CQMofVN8sggOVhEtzFUwOwPppQt0k+VR+7UHMZZY4y+64WJ06XOhBTKXtQB/Sv0NwQMXyNAA== - -"@rollup/rollup-linux-arm64-musl@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.12.0.tgz#3882a4e3a564af9e55804beeb67076857b035ab7" - integrity sha512-eTvzUS3hhhlgeAv6bfigekzWZjaEX9xP9HhxB0Dvrdbkk5w/b+1Sxct2ZuDxNJKzsRStSq1EaEkVSEe7A7ipgQ== - -"@rollup/rollup-linux-riscv64-gnu@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.12.0.tgz#0c6ad792e1195c12bfae634425a3d2aa0fe93ab7" - integrity sha512-ix+qAB9qmrCRiaO71VFfY8rkiAZJL8zQRXveS27HS+pKdjwUfEhqo2+YF2oI+H/22Xsiski+qqwIBxVewLK7sw== - -"@rollup/rollup-linux-x64-gnu@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.12.0.tgz#9d62485ea0f18d8674033b57aa14fb758f6ec6e3" - integrity sha512-TenQhZVOtw/3qKOPa7d+QgkeM6xY0LtwzR8OplmyL5LrgTWIXpTQg2Q2ycBf8jm+SFW2Wt/DTn1gf7nFp3ssVA== - -"@rollup/rollup-linux-x64-musl@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.12.0.tgz#50e8167e28b33c977c1f813def2b2074d1435e05" - integrity sha512-LfFdRhNnW0zdMvdCb5FNuWlls2WbbSridJvxOvYWgSBOYZtgBfW9UGNJG//rwMqTX1xQE9BAodvMH9tAusKDUw== - -"@rollup/rollup-win32-arm64-msvc@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.12.0.tgz#68d233272a2004429124494121a42c4aebdc5b8e" - integrity sha512-JPDxovheWNp6d7AHCgsUlkuCKvtu3RB55iNEkaQcf0ttsDU/JZF+iQnYcQJSk/7PtT4mjjVG8N1kpwnI9SLYaw== - -"@rollup/rollup-win32-ia32-msvc@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.12.0.tgz#366ca62221d1689e3b55a03f4ae12ae9ba595d40" - integrity sha512-fjtuvMWRGJn1oZacG8IPnzIV6GF2/XG+h71FKn76OYFqySXInJtseAqdprVTDTyqPxQOG9Exak5/E9Z3+EJ8ZA== - -"@rollup/rollup-win32-x64-msvc@4.12.0": - version "4.12.0" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.12.0.tgz#9ffdf9ed133a7464f4ae187eb9e1294413fab235" - integrity sha512-ZYmr5mS2wd4Dew/JjT0Fqi2NPB/ZhZ2VvPp7SmvPZb4Y1CG/LRcS6tcRo2cYU7zLK5A7cdbhWnnWmUjoI4qapg== +"@rollup/rollup-android-arm-eabi@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.52.5.tgz#0f44a2f8668ed87b040b6fe659358ac9239da4db" + integrity sha512-8c1vW4ocv3UOMp9K+gToY5zL2XiiVw3k7f1ksf4yO1FlDFQ1C2u72iACFnSOceJFsWskc2WZNqeRhFRPzv+wtQ== + +"@rollup/rollup-android-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.52.5.tgz#25b9a01deef6518a948431564c987bcb205274f5" + integrity sha512-mQGfsIEFcu21mvqkEKKu2dYmtuSZOBMmAl5CFlPGLY94Vlcm+zWApK7F/eocsNzp8tKmbeBP8yXyAbx0XHsFNA== + +"@rollup/rollup-darwin-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.52.5.tgz#8a102869c88f3780c7d5e6776afd3f19084ecd7f" + integrity sha512-takF3CR71mCAGA+v794QUZ0b6ZSrgJkArC+gUiG6LB6TQty9T0Mqh3m2ImRBOxS2IeYBo4lKWIieSvnEk2OQWA== + +"@rollup/rollup-darwin-x64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.52.5.tgz#8e526417cd6f54daf1d0c04cf361160216581956" + integrity sha512-W901Pla8Ya95WpxDn//VF9K9u2JbocwV/v75TE0YIHNTbhqUTv9w4VuQ9MaWlNOkkEfFwkdNhXgcLqPSmHy0fA== + +"@rollup/rollup-freebsd-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.52.5.tgz#0e7027054493f3409b1f219a3eac5efd128ef899" + integrity sha512-QofO7i7JycsYOWxe0GFqhLmF6l1TqBswJMvICnRUjqCx8b47MTo46W8AoeQwiokAx3zVryVnxtBMcGcnX12LvA== + +"@rollup/rollup-freebsd-x64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.52.5.tgz#72b204a920139e9ec3d331bd9cfd9a0c248ccb10" + integrity sha512-jr21b/99ew8ujZubPo9skbrItHEIE50WdV86cdSoRkKtmWa+DDr6fu2c/xyRT0F/WazZpam6kk7IHBerSL7LDQ== + +"@rollup/rollup-linux-arm-gnueabihf@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.52.5.tgz#ab1b522ebe5b7e06c99504cc38f6cd8b808ba41c" + integrity sha512-PsNAbcyv9CcecAUagQefwX8fQn9LQ4nZkpDboBOttmyffnInRy8R8dSg6hxxl2Re5QhHBf6FYIDhIj5v982ATQ== + +"@rollup/rollup-linux-arm-musleabihf@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.52.5.tgz#f8cc30b638f1ee7e3d18eac24af47ea29d9beb00" + integrity sha512-Fw4tysRutyQc/wwkmcyoqFtJhh0u31K+Q6jYjeicsGJJ7bbEq8LwPWV/w0cnzOqR2m694/Af6hpFayLJZkG2VQ== + +"@rollup/rollup-linux-arm64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.52.5.tgz#7af37a9e85f25db59dc8214172907b7e146c12cc" + integrity sha512-a+3wVnAYdQClOTlyapKmyI6BLPAFYs0JM8HRpgYZQO02rMR09ZcV9LbQB+NL6sljzG38869YqThrRnfPMCDtZg== + +"@rollup/rollup-linux-arm64-musl@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.52.5.tgz#a623eb0d3617c03b7a73716eb85c6e37b776f7e0" + integrity sha512-AvttBOMwO9Pcuuf7m9PkC1PUIKsfaAJ4AYhy944qeTJgQOqJYJ9oVl2nYgY7Rk0mkbsuOpCAYSs6wLYB2Xiw0Q== + +"@rollup/rollup-linux-loong64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.52.5.tgz#76ea038b549c5c6c5f0d062942627c4066642ee2" + integrity sha512-DkDk8pmXQV2wVrF6oq5tONK6UHLz/XcEVow4JTTerdeV1uqPeHxwcg7aFsfnSm9L+OO8WJsWotKM2JJPMWrQtA== + +"@rollup/rollup-linux-ppc64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.52.5.tgz#d9a4c3f0a3492bc78f6fdfe8131ac61c7359ccd5" + integrity sha512-W/b9ZN/U9+hPQVvlGwjzi+Wy4xdoH2I8EjaCkMvzpI7wJUs8sWJ03Rq96jRnHkSrcHTpQe8h5Tg3ZzUPGauvAw== + +"@rollup/rollup-linux-riscv64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.52.5.tgz#87ab033eebd1a9a1dd7b60509f6333ec1f82d994" + integrity sha512-sjQLr9BW7R/ZiXnQiWPkErNfLMkkWIoCz7YMn27HldKsADEKa5WYdobaa1hmN6slu9oWQbB6/jFpJ+P2IkVrmw== + +"@rollup/rollup-linux-riscv64-musl@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.52.5.tgz#bda3eb67e1c993c1ba12bc9c2f694e7703958d9f" + integrity sha512-hq3jU/kGyjXWTvAh2awn8oHroCbrPm8JqM7RUpKjalIRWWXE01CQOf/tUNWNHjmbMHg/hmNCwc/Pz3k1T/j/Lg== + +"@rollup/rollup-linux-s390x-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.52.5.tgz#f7bc10fbe096ab44694233dc42a2291ed5453d4b" + integrity sha512-gn8kHOrku8D4NGHMK1Y7NA7INQTRdVOntt1OCYypZPRt6skGbddska44K8iocdpxHTMMNui5oH4elPH4QOLrFQ== + +"@rollup/rollup-linux-x64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.52.5.tgz#a151cb1234cc9b2cf5e8cfc02aa91436b8f9e278" + integrity sha512-hXGLYpdhiNElzN770+H2nlx+jRog8TyynpTVzdlc6bndktjKWyZyiCsuDAlpd+j+W+WNqfcyAWz9HxxIGfZm1Q== + +"@rollup/rollup-linux-x64-musl@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.52.5.tgz#7859e196501cc3b3062d45d2776cfb4d2f3a9350" + integrity sha512-arCGIcuNKjBoKAXD+y7XomR9gY6Mw7HnFBv5Rw7wQRvwYLR7gBAgV7Mb2QTyjXfTveBNFAtPt46/36vV9STLNg== + +"@rollup/rollup-openharmony-arm64@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.52.5.tgz#85d0df7233734df31e547c1e647d2a5300b3bf30" + integrity sha512-QoFqB6+/9Rly/RiPjaomPLmR/13cgkIGfA40LHly9zcH1S0bN2HVFYk3a1eAyHQyjs3ZJYlXvIGtcCs5tko9Cw== + +"@rollup/rollup-win32-arm64-msvc@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.52.5.tgz#e62357d00458db17277b88adbf690bb855cac937" + integrity sha512-w0cDWVR6MlTstla1cIfOGyl8+qb93FlAVutcor14Gf5Md5ap5ySfQ7R9S/NjNaMLSFdUnKGEasmVnu3lCMqB7w== + +"@rollup/rollup-win32-ia32-msvc@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.52.5.tgz#fc7cd40f44834a703c1f1c3fe8bcc27ce476cd50" + integrity sha512-Aufdpzp7DpOTULJCuvzqcItSGDH73pF3ko/f+ckJhxQyHtp67rHw3HMNxoIdDMUITJESNE6a8uh4Lo4SLouOUg== + +"@rollup/rollup-win32-x64-gnu@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.52.5.tgz#1a22acfc93c64a64a48c42672e857ee51774d0d3" + integrity sha512-UGBUGPFp1vkj6p8wCRraqNhqwX/4kNQPS57BCFc8wYh0g94iVIW33wJtQAx3G7vrjjNtRaxiMUylM0ktp/TRSQ== + +"@rollup/rollup-win32-x64-msvc@4.52.5": + version "4.52.5" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.52.5.tgz#1657f56326bbe0ac80eedc9f9c18fc1ddd24e107" + integrity sha512-TAcgQh2sSkykPRWLrdyy2AiceMckNf5loITqXxFI5VuQjS5tSuw3WlwdN8qv8vzjLAUTvYaH/mVjSFpbkFbpTg== "@rushstack/eslint-patch@^1.1.0": version "1.6.0" @@ -1863,7 +1908,12 @@ resolved "https://registry.yarnpkg.com/@swc/types/-/types-0.1.5.tgz#043b731d4f56a79b4897a3de1af35e75d56bc63a" integrity sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw== -"@types/estree@1.0.5", "@types/estree@^1.0.0": +"@types/estree@1.0.8": + version "1.0.8" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.8.tgz#958b91c991b1867ced318bedea0e215ee050726e" + integrity sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== + +"@types/estree@^1.0.0": version "1.0.5" resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4" integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw== @@ -2921,34 +2971,34 @@ es-to-primitive@^1.2.1: is-date-object "^1.0.1" is-symbol "^1.0.2" -esbuild@^0.19.3: - version "0.19.12" - resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.19.12.tgz#dc82ee5dc79e82f5a5c3b4323a2a641827db3e04" - integrity sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg== +esbuild@^0.21.3: + version "0.21.5" + resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.21.5.tgz#9ca301b120922959b766360d8ac830da0d02997d" + integrity sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw== optionalDependencies: - "@esbuild/aix-ppc64" "0.19.12" - "@esbuild/android-arm" "0.19.12" - "@esbuild/android-arm64" "0.19.12" - "@esbuild/android-x64" "0.19.12" - "@esbuild/darwin-arm64" "0.19.12" - "@esbuild/darwin-x64" "0.19.12" - "@esbuild/freebsd-arm64" "0.19.12" - "@esbuild/freebsd-x64" "0.19.12" - "@esbuild/linux-arm" "0.19.12" - "@esbuild/linux-arm64" "0.19.12" - "@esbuild/linux-ia32" "0.19.12" - "@esbuild/linux-loong64" "0.19.12" - "@esbuild/linux-mips64el" "0.19.12" - "@esbuild/linux-ppc64" "0.19.12" - "@esbuild/linux-riscv64" "0.19.12" - "@esbuild/linux-s390x" "0.19.12" - "@esbuild/linux-x64" "0.19.12" - "@esbuild/netbsd-x64" "0.19.12" - "@esbuild/openbsd-x64" "0.19.12" - "@esbuild/sunos-x64" "0.19.12" - "@esbuild/win32-arm64" "0.19.12" - "@esbuild/win32-ia32" "0.19.12" - "@esbuild/win32-x64" "0.19.12" + "@esbuild/aix-ppc64" "0.21.5" + "@esbuild/android-arm" "0.21.5" + "@esbuild/android-arm64" "0.21.5" + "@esbuild/android-x64" "0.21.5" + "@esbuild/darwin-arm64" "0.21.5" + "@esbuild/darwin-x64" "0.21.5" + "@esbuild/freebsd-arm64" "0.21.5" + "@esbuild/freebsd-x64" "0.21.5" + "@esbuild/linux-arm" "0.21.5" + "@esbuild/linux-arm64" "0.21.5" + "@esbuild/linux-ia32" "0.21.5" + "@esbuild/linux-loong64" "0.21.5" + "@esbuild/linux-mips64el" "0.21.5" + "@esbuild/linux-ppc64" "0.21.5" + "@esbuild/linux-riscv64" "0.21.5" + "@esbuild/linux-s390x" "0.21.5" + "@esbuild/linux-x64" "0.21.5" + "@esbuild/netbsd-x64" "0.21.5" + "@esbuild/openbsd-x64" "0.21.5" + "@esbuild/sunos-x64" "0.21.5" + "@esbuild/win32-arm64" "0.21.5" + "@esbuild/win32-ia32" "0.21.5" + "@esbuild/win32-x64" "0.21.5" escalade@^3.1.1: version "3.1.1" @@ -4172,10 +4222,10 @@ mz@^2.7.0: object-assign "^4.0.1" thenify-all "^1.0.0" -nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== +nanoid@^3.3.11: + version "3.3.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== natural-compare@^1.4.0: version "1.4.0" @@ -4408,6 +4458,11 @@ picocolors@^1.0.0: resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== +picocolors@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== + picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" @@ -4476,14 +4531,14 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0: resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== -postcss@^8.4.23, postcss@^8.4.31, postcss@^8.4.35: - version "8.4.35" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.35.tgz#60997775689ce09011edf083a549cea44aabe2f7" - integrity sha512-u5U8qYpBCpN13BsiEB0CbR1Hhh4Gc0zLFuedrHJKMctHCHAGrMdG0PRM/KErzAL3CU6/eckEtmHNB3x6e3c0vA== +postcss@^8.4.23, postcss@^8.4.31, postcss@^8.4.43: + version "8.5.6" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.6.tgz#2825006615a619b4f62a9e7426cc120b349a8f3c" + integrity sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg== dependencies: - nanoid "^3.3.7" - picocolors "^1.0.0" - source-map-js "^1.0.2" + nanoid "^3.3.11" + picocolors "^1.1.1" + source-map-js "^1.2.1" prelude-ls@^1.2.1: version "1.2.1" @@ -4715,26 +4770,35 @@ rimraf@^3.0.2: dependencies: glob "^7.1.3" -rollup@^4.2.0: - version "4.12.0" - resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.12.0.tgz#0b6d1e5f3d46bbcf244deec41a7421dc54cc45b5" - integrity sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q== +rollup@^4.20.0: + version "4.52.5" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.52.5.tgz#96982cdcaedcdd51b12359981f240f94304ec235" + integrity sha512-3GuObel8h7Kqdjt0gxkEzaifHTqLVW56Y/bjN7PSQtkKr0w3V/QYSdt6QWYtd7A1xUtYQigtdUfgj1RvWVtorw== dependencies: - "@types/estree" "1.0.5" + "@types/estree" "1.0.8" optionalDependencies: - "@rollup/rollup-android-arm-eabi" "4.12.0" - "@rollup/rollup-android-arm64" "4.12.0" - "@rollup/rollup-darwin-arm64" "4.12.0" - "@rollup/rollup-darwin-x64" "4.12.0" - "@rollup/rollup-linux-arm-gnueabihf" "4.12.0" - "@rollup/rollup-linux-arm64-gnu" "4.12.0" - "@rollup/rollup-linux-arm64-musl" "4.12.0" - "@rollup/rollup-linux-riscv64-gnu" "4.12.0" - "@rollup/rollup-linux-x64-gnu" "4.12.0" - "@rollup/rollup-linux-x64-musl" "4.12.0" - "@rollup/rollup-win32-arm64-msvc" "4.12.0" - "@rollup/rollup-win32-ia32-msvc" "4.12.0" - "@rollup/rollup-win32-x64-msvc" "4.12.0" + "@rollup/rollup-android-arm-eabi" "4.52.5" + "@rollup/rollup-android-arm64" "4.52.5" + "@rollup/rollup-darwin-arm64" "4.52.5" + "@rollup/rollup-darwin-x64" "4.52.5" + "@rollup/rollup-freebsd-arm64" "4.52.5" + "@rollup/rollup-freebsd-x64" "4.52.5" + "@rollup/rollup-linux-arm-gnueabihf" "4.52.5" + "@rollup/rollup-linux-arm-musleabihf" "4.52.5" + "@rollup/rollup-linux-arm64-gnu" "4.52.5" + "@rollup/rollup-linux-arm64-musl" "4.52.5" + "@rollup/rollup-linux-loong64-gnu" "4.52.5" + "@rollup/rollup-linux-ppc64-gnu" "4.52.5" + "@rollup/rollup-linux-riscv64-gnu" "4.52.5" + "@rollup/rollup-linux-riscv64-musl" "4.52.5" + "@rollup/rollup-linux-s390x-gnu" "4.52.5" + "@rollup/rollup-linux-x64-gnu" "4.52.5" + "@rollup/rollup-linux-x64-musl" "4.52.5" + "@rollup/rollup-openharmony-arm64" "4.52.5" + "@rollup/rollup-win32-arm64-msvc" "4.52.5" + "@rollup/rollup-win32-ia32-msvc" "4.52.5" + "@rollup/rollup-win32-x64-gnu" "4.52.5" + "@rollup/rollup-win32-x64-msvc" "4.52.5" fsevents "~2.3.2" rrweb-cssom@^0.6.0: @@ -4862,10 +4926,10 @@ snake-case@^3.0.4: dot-case "^3.0.4" tslib "^2.0.3" -source-map-js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" - integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== +source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== stackback@0.0.2: version "0.0.2" @@ -5327,14 +5391,14 @@ vite-tsconfig-paths@^3.5.0: recrawl-sync "^2.0.3" tsconfig-paths "^4.0.0" -vite@^5.0.0, vite@^5.1.7: - version "5.1.7" - resolved "https://registry.yarnpkg.com/vite/-/vite-5.1.7.tgz#9f685a2c4c70707fef6d37341b0e809c366da619" - integrity sha512-sgnEEFTZYMui/sTlH1/XEnVNHMujOahPLGMxn1+5sIT45Xjng1Ec1K78jRP15dSmVgg5WBin9yO81j3o9OxofA== +vite@^5.0.0, vite@^5.4.21: + version "5.4.21" + resolved "https://registry.yarnpkg.com/vite/-/vite-5.4.21.tgz#84a4f7c5d860b071676d39ba513c0d598fdc7027" + integrity sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw== dependencies: - esbuild "^0.19.3" - postcss "^8.4.35" - rollup "^4.2.0" + esbuild "^0.21.3" + postcss "^8.4.43" + rollup "^4.20.0" optionalDependencies: fsevents "~2.3.3" From c0c0d451144f50f06c792a8d672ee072f666b25a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Nov 2025 22:01:59 +0000 Subject: [PATCH 0768/1093] build(deps-dev): bump vitest from 1.3.1 to 1.6.1 in /client/web Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.3.1 to 1.6.1. - [Release notes](https://github.com/vitest-dev/vitest/releases) - [Commits](https://github.com/vitest-dev/vitest/commits/v1.6.1/packages/vitest) --- updated-dependencies: - dependency-name: vitest dependency-type: direct:development ... Signed-off-by: dependabot[bot] --- client/web/package.json | 2 +- client/web/yarn.lock | 91 +++++++++++++++++++---------------------- 2 files changed, 44 insertions(+), 49 deletions(-) diff --git a/client/web/package.json b/client/web/package.json index 3d040425e907a..c733b0de06b97 100644 --- a/client/web/package.json +++ b/client/web/package.json @@ -37,7 +37,7 @@ "vite": "^5.4.21", "vite-plugin-svgr": "^4.2.0", "vite-tsconfig-paths": "^3.5.0", - "vitest": "^1.3.1" + "vitest": "^1.6.1" }, "resolutions": { "@typescript-eslint/eslint-plugin": "^6.2.1", diff --git a/client/web/yarn.lock b/client/web/yarn.lock index 4e4272d0dada9..c8ccbb49fd011 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -2124,44 +2124,44 @@ dependencies: "@swc/core" "^1.3.107" -"@vitest/expect@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.3.1.tgz#d4c14b89c43a25fd400a6b941f51ba27fe0cb918" - integrity sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw== +"@vitest/expect@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/expect/-/expect-1.6.1.tgz#b90c213f587514a99ac0bf84f88cff9042b0f14d" + integrity sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog== dependencies: - "@vitest/spy" "1.3.1" - "@vitest/utils" "1.3.1" + "@vitest/spy" "1.6.1" + "@vitest/utils" "1.6.1" chai "^4.3.10" -"@vitest/runner@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.3.1.tgz#e7f96cdf74842934782bfd310eef4b8695bbfa30" - integrity sha512-5FzF9c3jG/z5bgCnjr8j9LNq/9OxV2uEBAITOXfoe3rdZJTdO7jzThth7FXv/6b+kdY65tpRQB7WaKhNZwX+Kg== +"@vitest/runner@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/runner/-/runner-1.6.1.tgz#10f5857c3e376218d58c2bfacfea1161e27e117f" + integrity sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA== dependencies: - "@vitest/utils" "1.3.1" + "@vitest/utils" "1.6.1" p-limit "^5.0.0" pathe "^1.1.1" -"@vitest/snapshot@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.3.1.tgz#193a5d7febf6ec5d22b3f8c5a093f9e4322e7a88" - integrity sha512-EF++BZbt6RZmOlE3SuTPu/NfwBF6q4ABS37HHXzs2LUVPBLx2QoY/K0fKpRChSo8eLiuxcbCVfqKgx/dplCDuQ== +"@vitest/snapshot@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/snapshot/-/snapshot-1.6.1.tgz#90414451a634bb36cd539ccb29ae0d048a8c0479" + integrity sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ== dependencies: magic-string "^0.30.5" pathe "^1.1.1" pretty-format "^29.7.0" -"@vitest/spy@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.3.1.tgz#814245d46d011b99edd1c7528f5725c64e85a88b" - integrity sha512-xAcW+S099ylC9VLU7eZfdT9myV67Nor9w9zhf0mGCYJSO+zM2839tOeROTdikOi/8Qeusffvxb/MyBSOja1Uig== +"@vitest/spy@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/spy/-/spy-1.6.1.tgz#33376be38a5ed1ecd829eb986edaecc3e798c95d" + integrity sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw== dependencies: tinyspy "^2.2.0" -"@vitest/utils@1.3.1": - version "1.3.1" - resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.3.1.tgz#7b05838654557544f694a372de767fcc9594d61a" - integrity sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ== +"@vitest/utils@1.6.1": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@vitest/utils/-/utils-1.6.1.tgz#6d2f36cb6d866f2bbf59da854a324d6bf8040f17" + integrity sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g== dependencies: diff-sequences "^29.6.3" estree-walker "^3.0.3" @@ -4453,12 +4453,7 @@ pathval@^1.1.1: resolved "https://registry.yarnpkg.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== -picocolors@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" - integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== - -picocolors@^1.1.1: +picocolors@^1.0.0, picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -5119,10 +5114,10 @@ tinybench@^2.5.1: resolved "https://registry.yarnpkg.com/tinybench/-/tinybench-2.6.0.tgz#1423284ee22de07c91b3752c048d2764714b341b" integrity sha512-N8hW3PG/3aOoZAN5V/NSAEDz0ZixDSSt5b/a05iqtpgfLWMSVuCo7w0k2vVvEjdrIoeGqZzweX2WlyioNIHchA== -tinypool@^0.8.2: - version "0.8.2" - resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.2.tgz#84013b03dc69dacb322563a475d4c0a9be00f82a" - integrity sha512-SUszKYe5wgsxnNOVlBYO6IC+8VGWdVGZWAqUxp3UErNBtptZvWbwyUOyzNL59zigz2rCA92QiL3wvG+JDSdJdQ== +tinypool@^0.8.3: + version "0.8.4" + resolved "https://registry.yarnpkg.com/tinypool/-/tinypool-0.8.4.tgz#e217fe1270d941b39e98c625dcecebb1408c9aa8" + integrity sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ== tinyspy@^2.2.0: version "2.2.1" @@ -5361,10 +5356,10 @@ util-deprecate@^1.0.2: resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== -vite-node@1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.3.1.tgz#a93f7372212f5d5df38e945046b945ac3f4855d2" - integrity sha512-azbRrqRxlWTJEVbzInZCTchx0X69M/XPTCz4H+TLvlTcR/xH/3hkRqhOakT41fMJCMzXTu4UvegkZiEoJAWvng== +vite-node@1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/vite-node/-/vite-node-1.6.1.tgz#fff3ef309296ea03ceaa6ca4bb660922f5416c57" + integrity sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA== dependencies: cac "^6.7.14" debug "^4.3.4" @@ -5402,16 +5397,16 @@ vite@^5.0.0, vite@^5.4.21: optionalDependencies: fsevents "~2.3.3" -vitest@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.3.1.tgz#2d7e9861f030d88a4669392a4aecb40569d90937" - integrity sha512-/1QJqXs8YbCrfv/GPQ05wAZf2eakUPLPa18vkJAKE7RXOKfVHqMZZ1WlTjiwl6Gcn65M5vpNUB6EFLnEdRdEXQ== +vitest@^1.6.1: + version "1.6.1" + resolved "https://registry.yarnpkg.com/vitest/-/vitest-1.6.1.tgz#b4a3097adf8f79ac18bc2e2e0024c534a7a78d2f" + integrity sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag== dependencies: - "@vitest/expect" "1.3.1" - "@vitest/runner" "1.3.1" - "@vitest/snapshot" "1.3.1" - "@vitest/spy" "1.3.1" - "@vitest/utils" "1.3.1" + "@vitest/expect" "1.6.1" + "@vitest/runner" "1.6.1" + "@vitest/snapshot" "1.6.1" + "@vitest/spy" "1.6.1" + "@vitest/utils" "1.6.1" acorn-walk "^8.3.2" chai "^4.3.10" debug "^4.3.4" @@ -5423,9 +5418,9 @@ vitest@^1.3.1: std-env "^3.5.0" strip-literal "^2.0.0" tinybench "^2.5.1" - tinypool "^0.8.2" + tinypool "^0.8.3" vite "^5.0.0" - vite-node "1.3.1" + vite-node "1.6.1" why-is-node-running "^2.2.2" w3c-xmlserializer@^5.0.0: From 22bdf34a00b082dac66c3fe83ad1db2bfadd502b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Nov 2025 22:13:54 +0000 Subject: [PATCH 0769/1093] build(deps): bump cross-spawn from 7.0.3 to 7.0.6 in /client/web Bumps [cross-spawn](https://github.com/moxystudio/node-cross-spawn) from 7.0.3 to 7.0.6. - [Changelog](https://github.com/moxystudio/node-cross-spawn/blob/master/CHANGELOG.md) - [Commits](https://github.com/moxystudio/node-cross-spawn/compare/v7.0.3...v7.0.6) --- updated-dependencies: - dependency-name: cross-spawn dependency-type: indirect ... Signed-off-by: dependabot[bot] --- client/web/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index c8ccbb49fd011..fc4297ccf08fa 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -2677,9 +2677,9 @@ cosmiconfig@^8.1.3: path-type "^4.0.0" cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" From b40272e76734483c7387840858cfc4e0e4f69811 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Nov 2025 22:21:57 +0000 Subject: [PATCH 0770/1093] build(deps): bump braces from 3.0.2 to 3.0.3 in /client/web Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3. - [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) --- updated-dependencies: - dependency-name: braces dependency-version: 3.0.3 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- client/web/yarn.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index fc4297ccf08fa..e8e5f5bb66450 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -2477,11 +2477,11 @@ brace-expansion@^2.0.1: balanced-match "^1.0.0" braces@^3.0.2, braces@~3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + version "3.0.3" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== dependencies: - fill-range "^7.0.1" + fill-range "^7.1.1" browserslist@^4.21.10, browserslist@^4.21.9, browserslist@^4.22.1: version "4.22.1" @@ -3325,10 +3325,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== dependencies: to-regex-range "^5.0.1" From 411cee0dc9dbff2bfcf68f7588e3276f44ff9c6c Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Thu, 27 Nov 2025 15:31:50 -0700 Subject: [PATCH 0771/1093] .github/workflows: only run golang ci lint when go files have changed Restrict running the golangci-lint workflow to when the workflow file itself or a .go file, go.mod, or go.sum have actually been modified. Updates #cleanup Signed-off-by: Mario Minardi --- .github/workflows/golangci-lint.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index bcf17f8e66243..098b6f387c239 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -2,7 +2,11 @@ name: golangci-lint on: # For now, only lint pull requests, not the main branches. pull_request: - + paths: + - ".github/workflows/golangci-lint.yml" + - "**.go" + - "go.mod" + - "go.sum" # TODO(andrew): enable for main branch after an initial waiting period. #push: # branches: From 7c5c02b77a5cb823a6b90f03e2a94bda87ee223f Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Fri, 28 Nov 2025 09:16:18 -0500 Subject: [PATCH 0772/1093] cmd/k8s-operator: add support for taiscale.com/http-redirect (#17596) * cmd/k8s-operator: add support for taiscale.com/http-redirect The k8s-operator now supports a tailscale.com/http-redirect annotation on Ingress resources. When enabled, this automatically creates port 80 handlers that automatically redirect to the equivalent HTTPS location. Fixes #11252 Signed-off-by: Fernando Serboncini * Fix for permanent redirect Signed-off-by: Fernando Serboncini * lint Signed-off-by: Fernando Serboncini * warn for redirect+endpoint Signed-off-by: Fernando Serboncini * tests Signed-off-by: Fernando Serboncini --------- Signed-off-by: Fernando Serboncini --- cmd/k8s-operator/ingress-for-pg.go | 25 ++- cmd/k8s-operator/ingress-for-pg_test.go | 230 ++++++++++++++++++++++++ cmd/k8s-operator/ingress.go | 46 ++++- cmd/k8s-operator/ingress_test.go | 161 ++++++++++++++--- cmd/k8s-operator/sts.go | 3 +- 5 files changed, 429 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 460a1914ee799..1b35d853688cd 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -290,6 +290,25 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ Handlers: handlers, } + if isHTTPRedirectEnabled(ing) { + logger.Warnf("Both HTTP endpoint and HTTP redirect flags are enabled: ignoring HTTP redirect.") + } + } else if isHTTPRedirectEnabled(ing) { + logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers") + epHTTP := ipn.HostPort(fmt.Sprintf("%s:80", dnsName)) + ingCfg.TCP[80] = &ipn.TCPPortHandler{HTTP: true} + ingCfg.Web[epHTTP] = &ipn.WebServerConfig{ + Handlers: map[string]*ipn.HTTPHandler{}, + } + web80 := ingCfg.Web[epHTTP] + for mountPoint := range handlers { + // We send a 301 - Moved Permanently redirect from HTTP to HTTPS + redirectURL := "301:https://${HOST}${REQUEST_URI}" + logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) + web80.Handlers[mountPoint] = &ipn.HTTPHandler{ + Redirect: redirectURL, + } + } } var gotCfg *ipn.ServiceConfig @@ -316,7 +335,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } tsSvcPorts := []string{"tcp:443"} // always 443 for Ingress - if isHTTPEndpointEnabled(ing) { + if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { tsSvcPorts = append(tsSvcPorts, "tcp:80") } @@ -346,7 +365,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // 5. Update tailscaled's AdvertiseServices config, which should add the Tailscale Service // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. mode := serviceAdvertisementHTTPS - if isHTTPEndpointEnabled(ing) { + if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { mode = serviceAdvertisementHTTPAndHTTPS } if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, mode, logger); err != nil { @@ -377,7 +396,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin Port: 443, }) } - if isHTTPEndpointEnabled(ing) { + if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { ports = append(ports, networkingv1.IngressPortStatus{ Protocol: "TCP", Port: 80, diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 5cc806ad1bf7a..1257336e353c1 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -618,6 +618,236 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { } } +func TestIngressPGReconciler_HTTPRedirect(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + // Create backend Service that the Ingress will route to + backendSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8080, + }, + }, + }, + } + mustCreate(t, fc, backendSvc) + + // Create test Ingress with HTTP redirect enabled + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/http-redirect": "true", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + if err := fc.Create(context.Background(), ing); err != nil { + t.Fatal(err) + } + + // Verify initial reconciliation with HTTP redirect enabled + expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify Tailscale Service includes both tcp:80 and tcp:443 + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) + + // Verify Ingress status includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + // Add the Tailscale Service to prefs to have the Ingress recognised as ready + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + "_current-profile": []byte("profile-foo"), + "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), + }, + }) + + // Reconcile and re-fetch Ingress + expectReconciled(t, ingPGR, "default", "test-ingress") + if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil { + t.Fatal(err) + } + + wantStatus := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } + + // Remove HTTP redirect annotation + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + delete(ing.Annotations, "tailscale.com/http-redirect") + }) + + // Verify reconciliation after removing HTTP redirect + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:443"}) + + // Verify Ingress status no longer includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + wantStatus = []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports after removing redirect: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } +} + +func TestIngressPGReconciler_HTTPEndpointAndRedirectConflict(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + // Create backend Service that the Ingress will route to + backendSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "10.0.0.1", + Ports: []corev1.ServicePort{ + { + Port: 8080, + }, + }, + }, + } + mustCreate(t, fc, backendSvc) + + // Create test Ingress with both HTTP endpoint and HTTP redirect enabled + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + "tailscale.com/http-endpoint": "enabled", + "tailscale.com/http-redirect": "true", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + if err := fc.Create(context.Background(), ing); err != nil { + t.Fatal(err) + } + + // Verify initial reconciliation - HTTP endpoint should take precedence + expectReconciled(t, ingPGR, "default", "test-ingress") + populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + expectReconciled(t, ingPGR, "default", "test-ingress") + + // Verify Tailscale Service includes both tcp:80 and tcp:443 + verifyTailscaleService(t, ft, "svc:my-svc", []string{"tcp:80", "tcp:443"}) + + // Verify the serve config has HTTP endpoint handlers on port 80, NOT redirect handlers + cm := &corev1.ConfigMap{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-pg-ingress-config", + Namespace: "operator-ns", + }, cm); err != nil { + t.Fatalf("getting ConfigMap: %v", err) + } + + // Verify Ingress status includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{ + Name: "test-ingress", + Namespace: "default", + }, ing); err != nil { + t.Fatal(err) + } + + // Add the Tailscale Service to prefs to have the Ingress recognised as ready + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + "_current-profile": []byte("profile-foo"), + "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), + }, + }) + + // Reconcile and re-fetch Ingress + expectReconciled(t, ingPGR, "default", "test-ingress") + if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil { + t.Fatal(err) + } + + wantStatus := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) { + t.Errorf("incorrect status ports: got %v, want %v", + ing.Status.LoadBalancer.Ingress[0].Ports, wantStatus) + } +} + func TestIngressPGReconciler_MultiCluster(t *testing.T) { ingPGR, fc, ft := setupIngressTest(t) ingPGR.operatorID = "operator-1" diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index fb11f717de04e..050b03f55970f 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -204,6 +204,27 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga return nil } + if isHTTPRedirectEnabled(ing) { + logger.Infof("HTTP redirect enabled, setting up port 80 redirect handlers") + const magic80 = "${TS_CERT_DOMAIN}:80" + sc.TCP[80] = &ipn.TCPPortHandler{HTTP: true} + sc.Web[magic80] = &ipn.WebServerConfig{ + Handlers: map[string]*ipn.HTTPHandler{}, + } + if sc.AllowFunnel != nil && sc.AllowFunnel[magic443] { + sc.AllowFunnel[magic80] = true + } + web80 := sc.Web[magic80] + for mountPoint := range handlers { + // We send a 301 - Moved Permanently redirect from HTTP to HTTPS + redirectURL := "301:https://${HOST}${REQUEST_URI}" + logger.Debugf("Creating redirect handler: %s -> %s", mountPoint, redirectURL) + web80.Handlers[mountPoint] = &ipn.HTTPHandler{ + Redirect: redirectURL, + } + } + } + crl := childResourceLabels(ing.Name, ing.Namespace, "ingress") var tags []string if tstr, ok := ing.Annotations[AnnotationTags]; ok { @@ -244,14 +265,21 @@ func (a *IngressReconciler) maybeProvision(ctx context.Context, logger *zap.Suga } logger.Debugf("setting Ingress hostname to %q", dev.ingressDNSName) + ports := []networkingv1.IngressPortStatus{ + { + Protocol: "TCP", + Port: 443, + }, + } + if isHTTPRedirectEnabled(ing) { + ports = append(ports, networkingv1.IngressPortStatus{ + Protocol: "TCP", + Port: 80, + }) + } ing.Status.LoadBalancer.Ingress = append(ing.Status.LoadBalancer.Ingress, networkingv1.IngressLoadBalancerIngress{ Hostname: dev.ingressDNSName, - Ports: []networkingv1.IngressPortStatus{ - { - Protocol: "TCP", - Port: 443, - }, - }, + Ports: ports, }) } @@ -363,6 +391,12 @@ func handlersForIngress(ctx context.Context, ing *networkingv1.Ingress, cl clien return handlers, nil } +// isHTTPRedirectEnabled returns true if HTTP redirect is enabled for the Ingress. +// The annotation is tailscale.com/http-redirect and it should be set to "true". +func isHTTPRedirectEnabled(ing *networkingv1.Ingress) bool { + return ing.Annotations != nil && opt.Bool(ing.Annotations[AnnotationHTTPRedirect]).EqualBool(true) +} + // hostnameForIngress returns the hostname for an Ingress resource. // If the Ingress has TLS configured with a host, it returns the first component of that host. // Otherwise, it returns a hostname derived from the Ingress name and namespace. diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index f5e23cfe92043..038c746a97ca3 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -7,6 +7,7 @@ package main import ( "context" + "reflect" "testing" "go.uber.org/zap" @@ -64,12 +65,14 @@ func TestTailscaleIngress(t *testing.T) { parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}}, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) @@ -156,12 +159,14 @@ func TestTailscaleIngressHostname(t *testing.T) { parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}}, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) @@ -276,12 +281,14 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { parentType: "ingress", hostname: "default-test", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}}, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) @@ -368,10 +375,6 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { } expectReconciled(t, ingR, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "ingress") - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } opts := configOpts{ stsName: shortName, secretName: fullName, @@ -382,8 +385,14 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { app: kubetypes.AppIngressResource, namespaced: true, proxyType: proxyTypeIngressResource, - serveConfig: serveConfig, - resourceVersion: "1", + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}}, + }, + resourceVersion: "1", } // 1. Enable metrics- expect metrics Service to be created @@ -717,12 +726,14 @@ func TestEmptyPath(t *testing.T) { parentType: "ingress", hostname: "foo", app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}}, + }, } - serveConfig := &ipn.ServeConfig{ - TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}}, - Web: map[ipn.HostPort]*ipn.WebServerConfig{"${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{"/": {Proxy: "http://1.2.3.4:8080/"}}}}, - } - opts.serveConfig = serveConfig expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) @@ -816,3 +827,101 @@ func backend() *networkingv1.IngressBackend { }, } } + +func TestTailscaleIngressWithHTTPRedirect(t *testing.T) { + fc := fake.NewFakeClient(ingressClass()) + ft := &fakeTSClient{} + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + ingR := &IngressReconciler{ + Client: fc, + ingressClassName: "tailscale", + ssr: &tailscaleSTSReconciler{ + Client: fc, + tsClient: ft, + tsnetServer: fakeTsnetServer, + defaultTags: []string{"tag:k8s"}, + operatorNamespace: "operator-ns", + proxyImage: "tailscale/tailscale", + }, + logger: zl.Sugar(), + } + + // 1. Create Ingress with HTTP redirect annotation + ing := ingress() + mak.Set(&ing.Annotations, AnnotationHTTPRedirect, "true") + mustCreate(t, fc, ing) + mustCreate(t, fc, service()) + + expectReconciled(t, ingR, "default", "test") + + fullName, shortName := findGenName(t, fc, "default", "test", "ingress") + opts := configOpts{ + replicas: ptr.To[int32](1), + stsName: shortName, + secretName: fullName, + namespace: "default", + parentType: "ingress", + hostname: "default-test", + app: kubetypes.AppIngressResource, + serveConfig: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {HTTPS: true}, + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "http://1.2.3.4:8080/"}, + }}, + "${TS_CERT_DOMAIN}:80": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Redirect: "301:https://${HOST}${REQUEST_URI}"}, + }}, + }, + }, + } + + expectEqual(t, fc, expectedSecret(t, fc, opts)) + expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) + + // 2. Update device info to get status updated + mustUpdate(t, fc, "operator-ns", opts.secretName, func(secret *corev1.Secret) { + mak.Set(&secret.Data, "device_id", []byte("1234")) + mak.Set(&secret.Data, "device_fqdn", []byte("foo.tailnetxyz.ts.net")) + }) + expectReconciled(t, ingR, "default", "test") + + // Verify Ingress status includes both ports 80 and 443 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, ing); err != nil { + t.Fatal(err) + } + wantPorts := []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + {Port: 80, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) { + t.Errorf("incorrect status ports: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) + } + + // 3. Remove HTTP redirect annotation + mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { + delete(ing.Annotations, AnnotationHTTPRedirect) + }) + expectReconciled(t, ingR, "default", "test") + + // 4. Verify Ingress status no longer includes port 80 + ing = &networkingv1.Ingress{} + if err := fc.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: "default"}, ing); err != nil { + t.Fatal(err) + } + wantPorts = []networkingv1.IngressPortStatus{ + {Port: 443, Protocol: "TCP"}, + } + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) { + t.Errorf("incorrect status ports after removing redirect: got %v, want %v", ing.Status.LoadBalancer.Ingress[0].Ports, wantPorts) + } +} diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index c52ffce85495b..3e4e72696b61b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -69,7 +69,8 @@ const ( AnnotationProxyGroup = "tailscale.com/proxy-group" // Annotations settable by users on ingresses. - AnnotationFunnel = "tailscale.com/funnel" + AnnotationFunnel = "tailscale.com/funnel" + AnnotationHTTPRedirect = "tailscale.com/http-redirect" // If set to true, set up iptables/nftables rules in the proxy forward // cluster traffic to the tailnet IP of that proxy. This can only be set From f36eb81e6105401b814548393cc90673abe24639 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Fri, 28 Nov 2025 10:22:43 -0500 Subject: [PATCH 0773/1093] cmd/k8s-operator fix populateTLSSecret on tests (#18088) The call for populateTLSSecret was broken between PRs. Updates #cleanup Signed-off-by: Fernando Serboncini --- cmd/k8s-operator/ingress-for-pg_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 1257336e353c1..0f5527185a738 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -671,7 +671,7 @@ func TestIngressPGReconciler_HTTPRedirect(t *testing.T) { // Verify initial reconciliation with HTTP redirect enabled expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") // Verify Tailscale Service includes both tcp:80 and tcp:443 @@ -795,7 +795,7 @@ func TestIngressPGReconciler_HTTPEndpointAndRedirectConflict(t *testing.T) { // Verify initial reconciliation - HTTP endpoint should take precedence expectReconciled(t, ingPGR, "default", "test-ingress") - populateTLSSecret(context.Background(), fc, "test-pg", "my-svc.ts.net") + populateTLSSecret(t, fc, "test-pg", "my-svc.ts.net") expectReconciled(t, ingPGR, "default", "test-ingress") // Verify Tailscale Service includes both tcp:80 and tcp:443 From 34dff571371b255e59fa36c11dd2b5a31bda51b1 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Thu, 27 Nov 2025 20:03:09 +0000 Subject: [PATCH 0774/1093] feature/posture: log method and full URL for posture identity requests Updates tailscale/corp#34676 Signed-off-by: Anton Tolchanov --- feature/posture/posture.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/feature/posture/posture.go b/feature/posture/posture.go index 8e1945d7dbd0b..977e7429571a8 100644 --- a/feature/posture/posture.go +++ b/feature/posture/posture.go @@ -52,7 +52,7 @@ func handleC2NPostureIdentityGet(b *ipnlocal.LocalBackend, w http.ResponseWriter http.Error(w, "posture extension not available", http.StatusInternalServerError) return } - e.logf("c2n: GET /posture/identity received") + e.logf("c2n: %s %s received", r.Method, r.URL.String()) res := tailcfg.C2NPostureIdentityResponse{} From bd12d8f12f67cbd9f8e69970b0acf4d47f683661 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Thu, 9 Oct 2025 11:58:29 +0100 Subject: [PATCH 0775/1093] cmd/tailscale/cli: soften the warning on `--force-reauth` for seamless Thanks to seamless key renewal, you can now do a force-reauth without losing your connection in all circumstances. We softened the interactive warning (see #17262) so let's soften the help text as well. Updates https://github.com/tailscale/corp/issues/32429 Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 72515400d8fa1..2a3cbf75ace0c 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -137,7 +137,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // Some flags are only for "up", not "login". upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values") - upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this will bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") + upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this may bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") registerAcceptRiskFlag(upf, &upArgs.acceptedRisks) } From 37b4dd047f7f9c1aa41b0fe08d9dd1bbd3c9b29c Mon Sep 17 00:00:00 2001 From: Shaikh Naasir Date: Mon, 1 Dec 2025 21:10:24 +0530 Subject: [PATCH 0776/1093] k8s-operator: Fix typos in egress-pod-readiness.go Updates #cleanup Signed-off-by: Alex Chan --- cmd/k8s-operator/egress-pod-readiness.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index a732e08612c86..0ed64cdb4346b 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -175,7 +175,7 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req err = errors.Join(err, e) } if err != nil { - return res, fmt.Errorf("error verifying conectivity: %w", err) + return res, fmt.Errorf("error verifying connectivity: %w", err) } if rm := routesMissing.Load(); rm { lg.Info("Pod is not yet added as an endpoint for all egress targets, waiting...") @@ -241,7 +241,7 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c req.Close = true resp, err := er.httpClient.Do(req) if err != nil { - // This is most likely because this is the first Pod and is not yet added to Service endoints. Other + // This is most likely because this is the first Pod and is not yet added to service endpints. Other // error types are possible, but checking for those would likely make the system too fragile. return unreachable, nil } From 97f1fd6d48b11ef1e479703889a33e56cb471863 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 1 Dec 2025 21:42:57 +0000 Subject: [PATCH 0777/1093] .github: only save cache on main The cache artifacts from a full run of test.yml are 14GB. Only save artifacts from the main branch to ensure we don't thrash too much. Most branches should get decent performance with a hit from recent main. Fixes tailscale/corp#34739 Change-Id: Ia83269d878e4781e3ddf33f1db2f21d06ea2130f Signed-off-by: Tom Proctor --- .github/workflows/test.yml | 153 +++++++++++++++++++++++-------------- 1 file changed, 95 insertions(+), 58 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 35b4ea3ef1f68..5fcd60161413a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -136,21 +136,20 @@ jobs: key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + id: restore-cache + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. path: | ~/.cache/go-build ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} + key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} restore-keys: | - ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}- + ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go- - name: build all if: matrix.buildflags == '' # skip on race builder working-directory: src @@ -206,6 +205,17 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} windows: # windows-8vpu is a 2022 GitHub-managed runner in our @@ -376,28 +386,26 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: src - - name: Restore Cache - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 - with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar - path: | - ~/.cache/go-build - ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} - restore-keys: | - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - name: Restore Go module cache uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true + - name: Restore Cache + id: restore-cache + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go- - name: build all working-directory: src run: ./tool/go build ./cmd/... @@ -418,6 +426,17 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-${{ matrix.goarm }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} ios: # similar to cross above, but iOS can't build most of the repo. So, just # make it build a few smoke packages. @@ -466,28 +485,26 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: src - - name: Restore Cache - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 - with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar - path: | - ~/.cache/go-build - ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} - restore-keys: | - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2- - name: Restore Go module cache uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true + - name: Restore Cache + id: restore-cache + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go- - name: build core working-directory: src run: ./tool/go build ./cmd/tailscale ./cmd/tailscaled @@ -501,6 +518,17 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} android: # similar to cross above, but android fails to build a few pieces of the @@ -538,28 +566,26 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: path: src - - name: Restore Cache - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 - with: - # Note: unlike the other setups, this is only grabbing the mod download - # cache, rather than the whole mod directory, as the download cache - # contains zips that can be unpacked in parallel faster than they can be - # fetched and extracted by tar - path: | - ~/.cache/go-build - ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} - restore-keys: | - ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ runner.os }}-go-2- - name: Restore Go module cache uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true + - name: Restore Cache + id: restore-cache + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only restoring the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-js-wasm-go- - name: build tsconnect client working-directory: src run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli @@ -578,6 +604,17 @@ jobs: shell: bash run: | find $(go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + # Note: this is only saving the build cache. Mod cache is shared amongst + # all jobs in the workflow. + path: | + ~/.cache/go-build + ~\AppData\Local\go-build + key: ${{ runner.os }}-js-wasm-go-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} tailscale_go: # Subset of tests that depend on our custom Go toolchain. runs-on: ubuntu-24.04 From ece6e27f39ceb11b4c51ef4bfd317cacb5203d89 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 25 Nov 2025 23:01:32 +0000 Subject: [PATCH 0778/1093] .github,cmd/cigocacher: use cigocacher for windows Implements a new disk put function for cigocacher that does not cause locking issues on Windows when there are multiple processes reading and writing the same files concurrently. Integrates cigocacher into test.yml for Windows where we are running on larger runners that support connecting to private Azure vnet resources where cigocached is hosted. Updates tailscale/corp#10808 Change-Id: I0d0e9b670e49e0f9abf01ff3d605cd660dd85ebb Signed-off-by: Tom Proctor --- .github/actions/go-cache/action.sh | 49 +++++++++++++ .github/actions/go-cache/action.yml | 30 ++++++++ .github/workflows/test.yml | 64 ++++++++--------- cmd/cigocacher/cigocacher.go | 51 +++++++------- cmd/cigocacher/disk.go | 88 ++++++++++++++++++++++++ cmd/cigocacher/disk_notwindows.go | 44 ++++++++++++ cmd/cigocacher/disk_windows.go | 102 ++++++++++++++++++++++++++++ 7 files changed, 372 insertions(+), 56 deletions(-) create mode 100755 .github/actions/go-cache/action.sh create mode 100644 .github/actions/go-cache/action.yml create mode 100644 cmd/cigocacher/disk.go create mode 100644 cmd/cigocacher/disk_notwindows.go create mode 100644 cmd/cigocacher/disk_windows.go diff --git a/.github/actions/go-cache/action.sh b/.github/actions/go-cache/action.sh new file mode 100755 index 0000000000000..84fb878f804a6 --- /dev/null +++ b/.github/actions/go-cache/action.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# +# This script sets up cigocacher, but should never fail the build if unsuccessful. +# It expects to run on a GitHub-hosted runner, and connects to cigocached over a +# private Azure network that is configured at the runner group level in GitHub. +# +# Usage: ./action.sh +# Inputs: +# URL: The cigocached server URL. +# Outputs: +# success: Whether cigocacher was set up successfully. + +set -euo pipefail + +if [ -z "${GITHUB_ACTIONS:-}" ]; then + echo "This script is intended to run within GitHub Actions" + exit 1 +fi + +if [ -z "$URL" ]; then + echo "No cigocached URL is set, skipping cigocacher setup" + exit 0 +fi + +JWT="$(curl -sSL -H "Authorization: Bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" "${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=gocached" | jq -r .value)" +# cigocached serves a TLS cert with an FQDN, but DNS is based on VM name. +HOST_AND_PORT="${URL#http*://}" +FIRST_LABEL="${HOST_AND_PORT/.*/}" +# Save CONNECT_TO for later steps to use. +echo "CONNECT_TO=${HOST_AND_PORT}:${FIRST_LABEL}:" >> "${GITHUB_ENV}" +BODY="$(jq -n --arg jwt "$JWT" '{"jwt": $jwt}')" +CIGOCACHER_TOKEN="$(curl -sSL --connect-to "$HOST_AND_PORT:$FIRST_LABEL:" -H "Content-Type: application/json" "$URL/auth/exchange-token" -d "$BODY" | jq -r .access_token || true)" +if [ -z "$CIGOCACHER_TOKEN" ]; then + echo "Failed token exchange with cigocached, skipping cigocacher setup" + exit 0 +fi + +# Wait until we successfully auth before building cigocacher to ensure we know +# it's worth building. +# TODO(tomhjp): bake cigocacher into runner image and use it for auth. +echo "Fetched cigocacher token successfully" +echo "::add-mask::${CIGOCACHER_TOKEN}" +echo "CIGOCACHER_TOKEN=${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}" + +BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(go env GOEXE)" + +go build -o "${BIN_PATH}" ./cmd/cigocacher +echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}" +echo "success=true" >> "${GITHUB_OUTPUT}" diff --git a/.github/actions/go-cache/action.yml b/.github/actions/go-cache/action.yml new file mode 100644 index 0000000000000..a671530f895f9 --- /dev/null +++ b/.github/actions/go-cache/action.yml @@ -0,0 +1,30 @@ +name: go-cache +description: Set up build to use cigocacher + +inputs: + cigocached-url: + description: URL of the cigocached server + required: true + checkout-path: + description: Path to cloned repository + required: true + cache-dir: + description: Directory to use for caching + required: true + +outputs: + success: + description: Whether cigocacher was set up successfully + value: ${{ steps.setup.outputs.success }} + +runs: + using: composite + steps: + - name: Setup cigocacher + id: setup + shell: bash + env: + URL: ${{ inputs.cigocached-url }} + CACHE_DIR: ${{ inputs.cache-dir }} + working-directory: ${{ inputs.checkout-path }} + run: .github/actions/go-cache/action.sh diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5fcd60161413a..fd193401d7c7c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -218,10 +218,13 @@ jobs: key: ${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-${{ matrix.shard }}-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} windows: - # windows-8vpu is a 2022 GitHub-managed runner in our - # org with 8 cores and 32 GB of RAM: - # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/1 - runs-on: windows-8vcpu + permissions: + id-token: write # This is required for requesting the GitHub action identity JWT that can auth to cigocached + contents: read # This is required for actions/checkout + # ci-windows-github-1 is a 2022 GitHub-managed runner in our org with 8 cores + # and 32 GB of RAM. It is connected to a private Azure VNet that hosts cigocached. + # https://github.com/organizations/tailscale/settings/actions/github-hosted-runners/5 + runs-on: ci-windows-github-1 needs: gomod-cache name: Windows (${{ matrix.name || matrix.shard}}) strategy: @@ -230,8 +233,6 @@ jobs: include: - key: "win-bench" name: "benchmarks" - - key: "win-tool-go" - name: "./tool/go" - key: "win-shard-1-2" shard: "1/2" - key: "win-shard-2-2" @@ -240,44 +241,31 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: - path: src + path: ${{ github.workspace }}/src - name: Install Go - if: matrix.key != 'win-tool-go' uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: - go-version-file: src/go.mod + go-version-file: ${{ github.workspace }}/src/go.mod cache: false - name: Restore Go module cache - if: matrix.key != 'win-tool-go' uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - - name: Restore Cache - if: matrix.key != 'win-tool-go' - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + - name: Set up cigocacher + id: cigocacher-setup + uses: ./src/.github/actions/go-cache with: - path: | - ~/.cache/go-build - ~\AppData\Local\go-build - # The -2- here should be incremented when the scheme of data to be - # cached changes (e.g. path above changes). - key: ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }} - restore-keys: | - ${{ github.job }}-${{ matrix.key }}-go-2-${{ hashFiles('**/go.sum') }} - ${{ github.job }}-${{ matrix.key }}-go-2- - - - name: test-tool-go - if: matrix.key == 'win-tool-go' - working-directory: src - run: ./tool/go version + checkout-path: ${{ github.workspace }}/src + cache-dir: ${{ github.workspace }}/cigocacher + cigocached-url: ${{ vars.CIGOCACHED_AZURE_URL }} - name: test - if: matrix.key != 'win-bench' && matrix.key != 'win-tool-go' # skip on bench builder + if: matrix.key != 'win-bench' # skip on bench builder working-directory: src run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} @@ -289,12 +277,24 @@ jobs: # the equals signs cause great confusion. run: go test ./... -bench . -benchtime 1x -run "^$" - - name: Tidy cache - if: matrix.key != 'win-tool-go' - working-directory: src + - name: Print stats shell: bash + if: steps.cigocacher-setup.outputs.success == 'true' run: | - find $(go env GOCACHE) -type f -mmin +90 -delete + curl -sSL --connect-to "${CONNECT_TO}" -H "Authorization: Bearer ${CIGOCACHER_TOKEN}" "${{ vars.CIGOCACHED_AZURE_URL }}/session/stats" | jq . + + win-tool-go: + runs-on: windows-latest + needs: gomod-cache + name: Windows (win-tool-go) + steps: + - name: checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + path: src + - name: test-tool-go + working-directory: src + run: ./tool/go version privileged: needs: gomod-cache diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go index b38df4c2b40a5..1ada62b6a660b 100644 --- a/cmd/cigocacher/cigocacher.go +++ b/cmd/cigocacher/cigocacher.go @@ -37,6 +37,7 @@ func main() { auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output") token = flag.String("token", "", "the cigocached access token to use, as created using --auth") cigocachedURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). empty means to not use one.") + dir = flag.String("cache-dir", "", "cache directory; empty means automatic") verbose = flag.Bool("verbose", false, "enable verbose logging") ) flag.Parse() @@ -55,22 +56,29 @@ func main() { return } - d, err := os.UserCacheDir() - if err != nil { - log.Fatal(err) + if *dir == "" { + d, err := os.UserCacheDir() + if err != nil { + log.Fatal(err) + } + *dir = filepath.Join(d, "go-cacher") + log.Printf("Defaulting to cache dir %v ...", *dir) } - d = filepath.Join(d, "go-cacher") - log.Printf("Defaulting to cache dir %v ...", d) - if err := os.MkdirAll(d, 0750); err != nil { + if err := os.MkdirAll(*dir, 0750); err != nil { log.Fatal(err) } c := &cigocacher{ - disk: &cachers.DiskCache{Dir: d}, + disk: &cachers.DiskCache{ + Dir: *dir, + Verbose: *verbose, + }, verbose: *verbose, } if *cigocachedURL != "" { - log.Printf("Using cigocached at %s", *cigocachedURL) + if *verbose { + log.Printf("Using cigocached at %s", *cigocachedURL) + } c.gocached = &gocachedClient{ baseURL: *cigocachedURL, cl: httpClient(), @@ -81,8 +89,10 @@ func main() { var p *cacheproc.Process p = &cacheproc.Process{ Close: func() error { - log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)", - p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load()) + if c.verbose { + log.Printf("gocacheprog: closing; %d gets (%d hits, %d misses, %d errors); %d puts (%d errors)", + p.Gets.Load(), p.GetHits.Load(), p.GetMisses.Load(), p.GetErrors.Load(), p.Puts.Load(), p.PutErrors.Load()) + } return c.close() }, Get: c.get, @@ -164,11 +174,7 @@ func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPa defer res.Body.Close() - // TODO(tomhjp): make sure we timeout if cigocached disappears, but for some - // reason, this seemed to tank network performance. - // ctx, cancel := context.WithTimeout(ctx, httpTimeout(res.ContentLength)) - // defer cancel() - diskPath, err = c.disk.Put(ctx, actionID, outputID, res.ContentLength, res.Body) + diskPath, err = put(c.disk, actionID, outputID, res.ContentLength, res.Body) if err != nil { return "", "", fmt.Errorf("error filling disk cache from HTTP: %w", err) } @@ -184,7 +190,7 @@ func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size in c.putNanos.Add(time.Since(t0).Nanoseconds()) }() if c.gocached == nil { - return c.disk.Put(ctx, actionID, outputID, size, r) + return put(c.disk, actionID, outputID, size, r) } c.putHTTP.Add(1) @@ -206,10 +212,6 @@ func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size in } httpErrCh := make(chan error) go func() { - // TODO(tomhjp): make sure we timeout if cigocached disappears, but for some - // reason, this seemed to tank network performance. - // ctx, cancel := context.WithTimeout(ctx, httpTimeout(size)) - // defer cancel() t0HTTP := time.Now() defer func() { c.putHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds()) @@ -217,7 +219,7 @@ func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size in httpErrCh <- c.gocached.put(ctx, actionID, outputID, size, httpReader) }() - diskPath, err = c.disk.Put(ctx, actionID, outputID, size, diskReader) + diskPath, err = put(c.disk, actionID, outputID, size, diskReader) if err != nil { return "", fmt.Errorf("error writing to disk cache: %w", errors.Join(err, tee.err)) } @@ -236,13 +238,14 @@ func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size in } func (c *cigocacher) close() error { - log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)", - c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(), - c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load()) if !c.verbose || c.gocached == nil { return nil } + log.Printf("cigocacher HTTP stats: %d gets (%.1fMiB, %.2fs, %d hits, %d misses, %d errors ignored); %d puts (%.1fMiB, %.2fs, %d errors ignored)", + c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(), + c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load()) + stats, err := c.gocached.fetchStats() if err != nil { log.Printf("error fetching gocached stats: %v", err) diff --git a/cmd/cigocacher/disk.go b/cmd/cigocacher/disk.go new file mode 100644 index 0000000000000..57a9b80d5609e --- /dev/null +++ b/cmd/cigocacher/disk.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "time" + + "github.com/bradfitz/go-tool-cache/cachers" +) + +// indexEntry is the metadata that DiskCache stores on disk for an ActionID. +type indexEntry struct { + Version int `json:"v"` + OutputID string `json:"o"` + Size int64 `json:"n"` + TimeNanos int64 `json:"t"` +} + +func validHex(x string) bool { + if len(x) < 4 || len(x) > 100 { + return false + } + for _, b := range x { + if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' { + continue + } + return false + } + return true +} + +// put is like dc.Put but refactored to support safe concurrent writes on Windows. +// TODO(tomhjp): upstream these changes to go-tool-cache once they look stable. +func put(dc *cachers.DiskCache, actionID, outputID string, size int64, body io.Reader) (diskPath string, _ error) { + if len(actionID) < 4 || len(outputID) < 4 { + return "", fmt.Errorf("actionID and outputID must be at least 4 characters long") + } + if !validHex(actionID) { + log.Printf("diskcache: got invalid actionID %q", actionID) + return "", errors.New("actionID must be hex") + } + if !validHex(outputID) { + log.Printf("diskcache: got invalid outputID %q", outputID) + return "", errors.New("outputID must be hex") + } + + actionFile := dc.ActionFilename(actionID) + outputFile := dc.OutputFilename(outputID) + actionDir := filepath.Dir(actionFile) + outputDir := filepath.Dir(outputFile) + + if err := os.MkdirAll(actionDir, 0755); err != nil { + return "", fmt.Errorf("failed to create action directory: %w", err) + } + if err := os.MkdirAll(outputDir, 0755); err != nil { + return "", fmt.Errorf("failed to create output directory: %w", err) + } + + wrote, err := writeOutputFile(outputFile, body, size, outputID) + if err != nil { + return "", err + } + if wrote != size { + return "", fmt.Errorf("wrote %d bytes, expected %d", wrote, size) + } + + ij, err := json.Marshal(indexEntry{ + Version: 1, + OutputID: outputID, + Size: size, + TimeNanos: time.Now().UnixNano(), + }) + if err != nil { + return "", err + } + if err := writeActionFile(dc.ActionFilename(actionID), ij); err != nil { + return "", fmt.Errorf("atomic write failed: %w", err) + } + return outputFile, nil +} diff --git a/cmd/cigocacher/disk_notwindows.go b/cmd/cigocacher/disk_notwindows.go new file mode 100644 index 0000000000000..705ed92e3d8de --- /dev/null +++ b/cmd/cigocacher/disk_notwindows.go @@ -0,0 +1,44 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows + +package main + +import ( + "bytes" + "io" + "os" + "path/filepath" +) + +func writeActionFile(dest string, b []byte) error { + _, err := writeAtomic(dest, bytes.NewReader(b)) + return err +} + +func writeOutputFile(dest string, r io.Reader, _ int64, _ string) (int64, error) { + return writeAtomic(dest, r) +} + +func writeAtomic(dest string, r io.Reader) (int64, error) { + tf, err := os.CreateTemp(filepath.Dir(dest), filepath.Base(dest)+".*") + if err != nil { + return 0, err + } + size, err := io.Copy(tf, r) + if err != nil { + tf.Close() + os.Remove(tf.Name()) + return 0, err + } + if err := tf.Close(); err != nil { + os.Remove(tf.Name()) + return 0, err + } + if err := os.Rename(tf.Name(), dest); err != nil { + os.Remove(tf.Name()) + return 0, err + } + return size, nil +} diff --git a/cmd/cigocacher/disk_windows.go b/cmd/cigocacher/disk_windows.go new file mode 100644 index 0000000000000..9efae2c632087 --- /dev/null +++ b/cmd/cigocacher/disk_windows.go @@ -0,0 +1,102 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "crypto/sha256" + "errors" + "fmt" + "io" + "os" +) + +// The functions in this file are based on go's own cache in +// cmd/go/internal/cache/cache.go, particularly putIndexEntry and copyFile. + +// writeActionFile writes the indexEntry metadata for an ActionID to disk. It +// may be called for the same actionID concurrently from multiple processes, +// and the outputID for a specific actionID may change from time to time due +// to non-deterministic builds. It makes a best-effort to delete the file if +// anything goes wrong. +func writeActionFile(dest string, b []byte) (retErr error) { + f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0o666) + if err != nil { + return err + } + defer func() { + cerr := f.Close() + if retErr != nil || cerr != nil { + retErr = errors.Join(retErr, cerr, os.Remove(dest)) + } + }() + + _, err = f.Write(b) + if err != nil { + return err + } + + // Truncate the file only *after* writing it. + // (This should be a no-op, but truncate just in case of previous corruption.) + // + // This differs from os.WriteFile, which truncates to 0 *before* writing + // via os.O_TRUNC. Truncating only after writing ensures that a second write + // of the same content to the same file is idempotent, and does not - even + // temporarily! - undo the effect of the first write. + return f.Truncate(int64(len(b))) +} + +// writeOutputFile writes content to be cached to disk. The outputID is the +// sha256 hash of the content, and each file should only be written ~once, +// assuming no sha256 hash collisions. It may be written multiple times if +// concurrent processes are both populating the same output. The file is opened +// with FILE_SHARE_READ|FILE_SHARE_WRITE, which means both processes can write +// the same contents concurrently without conflict. +// +// It makes a best effort to clean up if anything goes wrong, but the file may +// be left in an inconsistent state in the event of disk-related errors such as +// another process taking file locks, or power loss etc. +func writeOutputFile(dest string, r io.Reader, size int64, outputID string) (_ int64, retErr error) { + info, err := os.Stat(dest) + if err == nil && info.Size() == size { + // Already exists, check the hash. + if f, err := os.Open(dest); err == nil { + h := sha256.New() + io.Copy(h, f) + f.Close() + if fmt.Sprintf("%x", h.Sum(nil)) == outputID { + // Still drain the reader to ensure associated resources are released. + return io.Copy(io.Discard, r) + } + } + } + + // Didn't successfully find the pre-existing file, write it. + mode := os.O_WRONLY | os.O_CREATE + if err == nil && info.Size() > size { + mode |= os.O_TRUNC // Should never happen, but self-heal. + } + f, err := os.OpenFile(dest, mode, 0644) + if err != nil { + return 0, fmt.Errorf("failed to open output file %q: %w", dest, err) + } + defer func() { + cerr := f.Close() + if retErr != nil || cerr != nil { + retErr = errors.Join(retErr, cerr, os.Remove(dest)) + } + }() + + // Copy file to f, but also into h to double-check hash. + h := sha256.New() + w := io.MultiWriter(f, h) + n, err := io.Copy(w, r) + if err != nil { + return 0, err + } + if fmt.Sprintf("%x", h.Sum(nil)) != outputID { + return 0, errors.New("file content changed underfoot") + } + + return n, nil +} From 77dcdc223ebcd70cd559c0d1b41625e6d897303c Mon Sep 17 00:00:00 2001 From: Naasir Date: Tue, 2 Dec 2025 13:19:47 +0530 Subject: [PATCH 0779/1093] cleanup: fix typos across multiple files Does not affect code. Updates #cleanup Signed-off-by: Naasir --- cmd/k8s-operator/egress-pod-readiness.go | 2 +- cmd/tailscale/cli/debug.go | 2 +- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_v2_test.go | 2 +- control/controlclient/controlclient_test.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index 0ed64cdb4346b..ebab23ed06337 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -241,7 +241,7 @@ func (er *egressPodsReconciler) lookupPodRouteViaSvc(ctx context.Context, pod *c req.Close = true resp, err := er.httpClient.Do(req) if err != nil { - // This is most likely because this is the first Pod and is not yet added to service endpints. Other + // This is most likely because this is the first Pod and is not yet added to service endpoints. Other // error types are possible, but checking for those would likely make the system too fragile. return unreachable, nil } diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 2facd66ae0278..ccbfb59de9221 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -263,7 +263,7 @@ func debugCmd() *ffcli.Command { fs := newFlagSet("watch-ipn") fs.BoolVar(&watchIPNArgs.netmap, "netmap", true, "include netmap in messages") fs.BoolVar(&watchIPNArgs.initial, "initial", false, "include initial status") - fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messags") + fs.BoolVar(&watchIPNArgs.rateLimit, "rate-limit", true, "rate limit messages") fs.IntVar(&watchIPNArgs.count, "count", 0, "exit after printing this many statuses, or 0 to keep going forever") return fs })(), diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 580393ce489b1..0e9b7d0227ccf 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -40,7 +40,7 @@ func init() { var serveCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true - // change is limited to make a revert easier and full cleanup to come after the relase. + // change is limited to make a revert easier and full cleanup to come after the release. // TODO(tylersmalley): cleanup and removal of newServeLegacyCommand as of 2023-10-16 return newServeV2Command(se, serve) } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index 513c0d1ec97d4..b3ebb32a2b4c4 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1765,7 +1765,7 @@ func TestIsLegacyInvocation(t *testing.T) { } if gotTranslation != tt.translation { - t.Fatalf("expected translaction to be %q but got %q", tt.translation, gotTranslation) + t.Fatalf("expected translation to be %q but got %q", tt.translation, gotTranslation) } }) } diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index bc301122673f7..57d3ca7ca7ae3 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -196,7 +196,7 @@ func TestRetryableErrors(t *testing.T) { {fmt.Errorf("%w: %w", errHTTPPostFailure, errors.New("bad post")), true}, {fmt.Errorf("%w: %w", errNoNodeKey, errors.New("not node key")), true}, {errBadHTTPResponse(429, "too may requests"), true}, - {errBadHTTPResponse(500, "internal server eror"), true}, + {errBadHTTPResponse(500, "internal server error"), true}, {errBadHTTPResponse(502, "bad gateway"), true}, {errBadHTTPResponse(503, "service unavailable"), true}, {errBadHTTPResponse(504, "gateway timeout"), true}, From 8976b34cb80ece41b7e8ed0fb54c554bfca6173b Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 1 Dec 2025 15:02:53 +0000 Subject: [PATCH 0780/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 26 ++++++++++++++------------ licenses/apple.md | 25 +++++++++++++------------ licenses/tailscale.md | 20 ++++++++++---------- licenses/windows.md | 12 ++++++------ 4 files changed, 43 insertions(+), 40 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index f578c17cb19e8..d4d8c9d7b5c5f 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -9,6 +9,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) @@ -19,12 +20,13 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) @@ -34,16 +36,16 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.24.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.30.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.31.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.33.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.39.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 2a795ddbb9cdf..6bb109f776c06 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -12,22 +12,22 @@ See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.39.6/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.4.13/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.7.13/internal/endpoints/v2/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.39.6/internal/sync/singleflight/LICENSE)) - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.23.2/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.23.2/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) @@ -56,6 +56,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) + - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) @@ -68,13 +69,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.43.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.46.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.37.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.36.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.30.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.31.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 163a76d404202..85c0f33fc09d2 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -58,9 +58,9 @@ Some packages may only be included on certain architectures or operating systems - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/kballard/go-shellquote](https://pkg.go.dev/github.com/kballard/go-shellquote) ([MIT](https://github.com/kballard/go-shellquote/blob/95032a82bc51/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.17.11/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) @@ -85,15 +85,15 @@ Some packages may only be included on certain architectures or operating systems - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) - [golang.org/x/image](https://pkg.go.dev/golang.org/x/image) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.40.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.30.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.31.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) diff --git a/licenses/windows.md b/licenses/windows.md index 06a5712ceb509..0b8344b4d66d4 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -51,14 +51,14 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.43.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.28.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.46.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.37.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.36.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.30.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.8/LICENSE)) From 22a815b6d2a3d26b23c06f30f0145e5e292b36c9 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 2 Dec 2025 11:32:06 +0000 Subject: [PATCH 0781/1093] tool: bump binaryen wasm optimiser version 111 -> 125 111 is 3 years old, and there have been a lot of speed improvements since then. We run wasm-opt twice as part of the CI wasm job, and it currently takes about 3 minutes each time. With 125, it takes ~40 seconds, a 4.5x speed-up. Updates #cleanup Change-Id: I671ae6cefa3997a23cdcab6871896b6b03e83a4f Signed-off-by: Tom Proctor --- tool/binaryen.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tool/binaryen.rev b/tool/binaryen.rev index 58c9bdf9d017f..d136d6a714260 100644 --- a/tool/binaryen.rev +++ b/tool/binaryen.rev @@ -1 +1 @@ -111 +125 From bd5c50909f47380f61b90d0c6c5c7d61d1219271 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Tue, 2 Dec 2025 09:57:21 -0600 Subject: [PATCH 0782/1093] scripts/installer: add TAILSCALE_VERSION environment variable (#18014) Add support for pinning specific Tailscale versions during installation via the TAILSCALE_VERSION environment variable. Example usage: curl -fsSL https://tailscale.com/install.sh | TAILSCALE_VERSION=1.88.4 sh Fixes #17776 Signed-off-by: Raj Singh --- .github/workflows/installer.yml | 16 ++++++- scripts/installer.sh | 84 ++++++++++++++++++++++++++++----- 2 files changed, 88 insertions(+), 12 deletions(-) diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index bafa9925a647e..3a9ba194d6a61 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -58,6 +58,14 @@ jobs: # Check a few images with wget rather than curl. - { image: "debian:oldstable-slim", deps: "wget" } - { image: "debian:sid-slim", deps: "wget" } + - { image: "debian:stable-slim", deps: "curl" } + - { image: "ubuntu:24.04", deps: "curl" } + - { image: "fedora:latest", deps: "curl" } + # Test TAILSCALE_VERSION pinning on a subset of distros. + # Skip Alpine as community repos don't reliably keep old versions. + - { image: "debian:stable-slim", deps: "curl", version: "1.80.0" } + - { image: "ubuntu:24.04", deps: "curl", version: "1.80.0" } + - { image: "fedora:latest", deps: "curl", version: "1.80.0" } runs-on: ubuntu-latest container: image: ${{ matrix.image }} @@ -94,12 +102,18 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: run installer run: scripts/installer.sh + env: + TAILSCALE_VERSION: ${{ matrix.version }} # Package installation can fail in docker because systemd is not running # as PID 1, so ignore errors at this step. The real check is the # `tailscale --version` command below. continue-on-error: true - name: check tailscale version - run: tailscale --version + run: | + tailscale --version + if [ -n "${{ matrix.version }}" ]; then + tailscale --version | grep -q "^${{ matrix.version }}" || { echo "Version mismatch!"; exit 1; } + fi notify-slack: needs: test runs-on: ubuntu-latest diff --git a/scripts/installer.sh b/scripts/installer.sh index e5b6cd23bc9a7..e21e40e155ca6 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -4,6 +4,15 @@ # # This script detects the current operating system, and installs # Tailscale according to that OS's conventions. +# +# Environment variables: +# TRACK: Set to "stable" or "unstable" (default: stable) +# TAILSCALE_VERSION: Pin to a specific version (e.g., "1.88.4") +# +# Examples: +# curl -fsSL https://tailscale.com/install.sh | sh +# curl -fsSL https://tailscale.com/install.sh | TAILSCALE_VERSION=1.88.4 sh +# curl -fsSL https://tailscale.com/install.sh | TRACK=unstable sh set -eu @@ -25,6 +34,7 @@ main() { APT_KEY_TYPE="" # Only for apt-based distros APT_SYSTEMCTL_START=false # Only needs to be true for Kali TRACK="${TRACK:-stable}" + TAILSCALE_VERSION="${TAILSCALE_VERSION:-}" case "$TRACK" in stable|unstable) @@ -502,7 +512,14 @@ main() { # Step 4: run the installation. OSVERSION="$OS" [ "$VERSION" != "" ] && OSVERSION="$OSVERSION $VERSION" - echo "Installing Tailscale for $OSVERSION, using method $PACKAGETYPE" + + # Prepare package name with optional version + PACKAGE_NAME="tailscale" + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Installing Tailscale $TAILSCALE_VERSION for $OSVERSION, using method $PACKAGETYPE" + else + echo "Installing Tailscale for $OSVERSION, using method $PACKAGETYPE" + fi case "$PACKAGETYPE" in apt) export DEBIAN_FRONTEND=noninteractive @@ -527,7 +544,11 @@ main() { ;; esac $SUDO apt-get update - $SUDO apt-get install -y tailscale tailscale-archive-keyring + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO apt-get install -y "tailscale=$TAILSCALE_VERSION" tailscale-archive-keyring + else + $SUDO apt-get install -y tailscale tailscale-archive-keyring + fi if [ "$APT_SYSTEMCTL_START" = "true" ]; then $SUDO systemctl enable --now tailscaled $SUDO systemctl start tailscaled @@ -538,7 +559,11 @@ main() { set -x $SUDO yum install yum-utils -y $SUDO yum-config-manager -y --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" - $SUDO yum install tailscale -y + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO yum install "tailscale-$TAILSCALE_VERSION" -y + else + $SUDO yum install tailscale -y + fi $SUDO systemctl enable --now tailscaled set +x ;; @@ -578,14 +603,22 @@ main() { echo "unexpected: unknown dnf version $DNF_VERSION" exit 1 fi - $SUDO dnf install -y tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO dnf install -y "tailscale-$TAILSCALE_VERSION" + else + $SUDO dnf install -y tailscale + fi $SUDO systemctl enable --now tailscaled set +x ;; tdnf) set -x curl -fsSL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" > /etc/yum.repos.d/tailscale.repo - $SUDO tdnf install -y tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO tdnf install -y "tailscale-$TAILSCALE_VERSION" + else + $SUDO tdnf install -y tailscale + fi $SUDO systemctl enable --now tailscaled set +x ;; @@ -594,19 +627,33 @@ main() { $SUDO rpm --import "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/repo.gpg" $SUDO zypper --non-interactive ar -g -r "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" $SUDO zypper --non-interactive --gpg-auto-import-keys refresh - $SUDO zypper --non-interactive install tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + $SUDO zypper --non-interactive install "tailscale=$TAILSCALE_VERSION" + else + $SUDO zypper --non-interactive install tailscale + fi $SUDO systemctl enable --now tailscaled set +x ;; pacman) set -x - $SUDO pacman -S tailscale --noconfirm + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Arch Linux maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO pacman -S "tailscale=$TAILSCALE_VERSION" --noconfirm + else + $SUDO pacman -S tailscale --noconfirm + fi $SUDO systemctl enable --now tailscaled set +x ;; pkg) set -x - $SUDO pkg install --yes tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: FreeBSD maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO pkg install --yes "tailscale-$TAILSCALE_VERSION" + else + $SUDO pkg install --yes tailscale + fi $SUDO service tailscaled enable $SUDO service tailscaled start set +x @@ -621,19 +668,34 @@ main() { exit 1 fi fi - $SUDO apk add tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Alpine Linux maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO apk add "tailscale=$TAILSCALE_VERSION" + else + $SUDO apk add tailscale + fi $SUDO rc-update add tailscale $SUDO rc-service tailscale start set +x ;; xbps) set -x - $SUDO xbps-install tailscale -y + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Void Linux maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO xbps-install "tailscale-$TAILSCALE_VERSION" -y + else + $SUDO xbps-install tailscale -y + fi set +x ;; emerge) set -x - $SUDO emerge --ask=n net-vpn/tailscale + if [ -n "$TAILSCALE_VERSION" ]; then + echo "Warning: Gentoo maintains their own Tailscale package. Version pinning may not work as expected, as the target version may no longer be available." + $SUDO emerge --ask=n "=net-vpn/tailscale-$TAILSCALE_VERSION" + else + $SUDO emerge --ask=n net-vpn/tailscale + fi set +x ;; appstore) From 957a443b23f6e328a4aa664b168152470eebbc19 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Tue, 2 Dec 2025 09:08:48 -0800 Subject: [PATCH 0783/1093] cmd/netlogfmt: allow empty --resolve-addrs flag (#18103) Updates tailscale/corp#33352 Signed-off-by: Joe Tsai --- cmd/netlogfmt/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/netlogfmt/main.go b/cmd/netlogfmt/main.go index b8aba4aaa6196..0af52f862936c 100644 --- a/cmd/netlogfmt/main.go +++ b/cmd/netlogfmt/main.go @@ -77,6 +77,7 @@ func main() { *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "-", "") // ignore dashes *resolveAddrs = strings.ReplaceAll(*resolveAddrs, "_", "") // ignore underscores switch *resolveAddrs { + case "": case "id", "nodeid": *resolveAddrs = "nodeid" case "name", "hostname": From 536188c1b53b5f1201649d53d6b133d162dcd174 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Tue, 25 Nov 2025 08:45:11 +0000 Subject: [PATCH 0784/1093] tsnet: enable node registration via federated identity Updates: tailscale.com/corp#34148 Signed-off-by: Gesa Stupperich --- cmd/k8s-operator/depaware.txt | 2 + cmd/tsidp/depaware.txt | 4 +- feature/oauthkey/oauthkey.go | 81 ++++++------ feature/oauthkey/oauthkey_test.go | 187 ++++++++++++++++++++++++++++ tsnet/depaware.txt | 4 +- tsnet/tsnet.go | 92 ++++++++++++-- tsnet/tsnet_test.go | 199 ++++++++++++++++++++++++++++++ 7 files changed, 522 insertions(+), 47 deletions(-) create mode 100644 feature/oauthkey/oauthkey_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index c76a4236e1105..959a8ca728f90 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -727,9 +727,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 5c6aae5121196..045986aedc4e5 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -146,9 +146,11 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy @@ -350,7 +352,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ - golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/feature/oauthkey/oauthkey.go b/feature/oauthkey/oauthkey.go index 5834c33becad6..336340c85109b 100644 --- a/feature/oauthkey/oauthkey.go +++ b/feature/oauthkey/oauthkey.go @@ -33,54 +33,22 @@ func init() { // false. The "baseURL" defaults to https://api.tailscale.com. // The passed in tags are required, and must be non-empty. These will be // set on the authkey generated by the OAuth2 dance. -func resolveAuthKey(ctx context.Context, v string, tags []string) (string, error) { - if !strings.HasPrefix(v, "tskey-client-") { - return v, nil +func resolveAuthKey(ctx context.Context, clientSecret string, tags []string) (string, error) { + if !strings.HasPrefix(clientSecret, "tskey-client-") { + return clientSecret, nil } if len(tags) == 0 { return "", errors.New("oauth authkeys require --advertise-tags") } - clientSecret, named, _ := strings.Cut(v, "?") - attrs, err := url.ParseQuery(named) - if err != nil { - return "", err - } - for k := range attrs { - switch k { - case "ephemeral", "preauthorized", "baseURL": - default: - return "", fmt.Errorf("unknown attribute %q", k) - } - } - getBool := func(name string, def bool) (bool, error) { - v := attrs.Get(name) - if v == "" { - return def, nil - } - ret, err := strconv.ParseBool(v) - if err != nil { - return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) - } - return ret, nil - } - ephemeral, err := getBool("ephemeral", true) - if err != nil { - return "", err - } - preauth, err := getBool("preauthorized", false) + strippedSecret, ephemeral, preauth, baseURL, err := parseOptionalAttributes(clientSecret) if err != nil { return "", err } - baseURL := "https://api.tailscale.com" - if v := attrs.Get("baseURL"); v != "" { - baseURL = v - } - credentials := clientcredentials.Config{ ClientID: "some-client-id", // ignored - ClientSecret: clientSecret, + ClientSecret: strippedSecret, TokenURL: baseURL + "/api/v2/oauth/token", } @@ -106,3 +74,42 @@ func resolveAuthKey(ctx context.Context, v string, tags []string) (string, error } return authkey, nil } + +func parseOptionalAttributes(clientSecret string) (strippedSecret string, ephemeral bool, preauth bool, baseURL string, err error) { + strippedSecret, named, _ := strings.Cut(clientSecret, "?") + attrs, err := url.ParseQuery(named) + if err != nil { + return "", false, false, "", err + } + for k := range attrs { + switch k { + case "ephemeral", "preauthorized", "baseURL": + default: + return "", false, false, "", fmt.Errorf("unknown attribute %q", k) + } + } + getBool := func(name string, def bool) (bool, error) { + v := attrs.Get(name) + if v == "" { + return def, nil + } + ret, err := strconv.ParseBool(v) + if err != nil { + return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) + } + return ret, nil + } + ephemeral, err = getBool("ephemeral", true) + if err != nil { + return "", false, false, "", err + } + preauth, err = getBool("preauthorized", false) + if err != nil { + return "", false, false, "", err + } + baseURL = "https://api.tailscale.com" + if v := attrs.Get("baseURL"); v != "" { + baseURL = v + } + return strippedSecret, ephemeral, preauth, baseURL, nil +} diff --git a/feature/oauthkey/oauthkey_test.go b/feature/oauthkey/oauthkey_test.go new file mode 100644 index 0000000000000..b550d8c2ce77a --- /dev/null +++ b/feature/oauthkey/oauthkey_test.go @@ -0,0 +1,187 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package oauthkey + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + clientID string + tags []string + wantAuthKey string + wantErr bool + }{ + { + name: "keys without client secret prefix pass through unchanged", + clientID: "tskey-auth-regular", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-regular", + wantErr: false, + }, + { + name: "client secret without advertised tags", + clientID: "tskey-client-abc", + tags: nil, + wantAuthKey: "", + wantErr: true, + }, + { + name: "client secret with default attributes", + clientID: "tskey-client-abc", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: false, + }, + { + name: "client secret with custom attributes", + clientID: "tskey-client-abc?ephemeral=false&preauthorized=true", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: false, + }, + { + name: "client secret with unknown attribute", + clientID: "tskey-client-abc?unknown=value", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: true, + }, + { + name: "oauth client secret with invalid attribute value", + clientID: "tskey-client-abc?ephemeral=invalid", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := mockControlServer(t) + defer srv.Close() + + // resolveAuthKey reads custom control plane URLs off the baseURL attribute + // on the client secret string. Therefore, append the baseURL attribute with + // the mock control server URL to any client secret in order to hit the mock + // server instead of the default control API. + if strings.HasPrefix(tt.clientID, "tskey-client") { + if !strings.Contains(tt.clientID, "?") { + tt.clientID += "?baseURL=" + srv.URL + } else { + tt.clientID += "&baseURL=" + srv.URL + } + } + + got, err := resolveAuthKey(context.Background(), tt.clientID, tt.tags) + + if tt.wantErr { + if err == nil { + t.Error("want error but got none") + return + } + return + } + + if err != nil { + t.Errorf("want no error, got %q", err) + return + } + + if got != tt.wantAuthKey { + t.Errorf("want authKey = %q, got %q", tt.wantAuthKey, got) + } + }) + } +} + +func TestResolveAuthKeyAttributes(t *testing.T) { + tests := []struct { + name string + clientSecret string + wantEphemeral bool + wantPreauth bool + wantBaseURL string + }{ + { + name: "default values", + clientSecret: "tskey-client-abc", + wantEphemeral: true, + wantPreauth: false, + wantBaseURL: "https://api.tailscale.com", + }, + { + name: "ephemeral=false", + clientSecret: "tskey-client-abc?ephemeral=false", + wantEphemeral: false, + wantPreauth: false, + wantBaseURL: "https://api.tailscale.com", + }, + { + name: "preauthorized=true", + clientSecret: "tskey-client-abc?preauthorized=true", + wantEphemeral: true, + wantPreauth: true, + wantBaseURL: "https://api.tailscale.com", + }, + { + name: "baseURL=https://api.example.com", + clientSecret: "tskey-client-abc?baseURL=https://api.example.com", + wantEphemeral: true, + wantPreauth: false, + wantBaseURL: "https://api.example.com", + }, + { + name: "all custom values", + clientSecret: "tskey-client-abc?ephemeral=false&preauthorized=true&baseURL=https://api.example.com", + wantEphemeral: false, + wantPreauth: true, + wantBaseURL: "https://api.example.com", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + strippedSecret, ephemeral, preauth, baseURL, err := parseOptionalAttributes(tt.clientSecret) + if err != nil { + t.Fatalf("want no error, got %q", err) + } + if strippedSecret != "tskey-client-abc" { + t.Errorf("want tskey-client-abc, got %q", strippedSecret) + } + if ephemeral != tt.wantEphemeral { + t.Errorf("want ephemeral = %v, got %v", tt.wantEphemeral, ephemeral) + } + if preauth != tt.wantPreauth { + t.Errorf("want preauth = %v, got %v", tt.wantPreauth, preauth) + } + if baseURL != tt.wantBaseURL { + t.Errorf("want baseURL = %v, got %v", tt.wantBaseURL, baseURL) + } + }) + } +} + +func mockControlServer(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.Contains(r.URL.Path, "/api/v2/oauth/token"): + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"access-123","token_type":"Bearer","expires_in":3600}`)) + case strings.Contains(r.URL.Path, "/api/v2/tailnet") && strings.Contains(r.URL.Path, "/keys"): + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"key":"tskey-auth-xyz"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 825a39e34877f..9ef42400f259a 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -142,9 +142,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ tailscale.com/feature/c2n from tailscale.com/tsnet tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/identityfederation from tailscale.com/tsnet tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy @@ -343,7 +345,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from tailscale.com/net/netmon+ - golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 14747650f42ee..ea165e932e4bc 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -30,6 +30,7 @@ import ( "tailscale.com/control/controlclient" "tailscale.com/envknob" _ "tailscale.com/feature/c2n" + _ "tailscale.com/feature/condregister/identityfederation" _ "tailscale.com/feature/condregister/oauthkey" _ "tailscale.com/feature/condregister/portmapper" _ "tailscale.com/feature/condregister/useproxy" @@ -115,6 +116,29 @@ type Server struct { // used. AuthKey string + // ClientSecret, if non-empty, is the OAuth client secret + // that will be used to generate authkeys via OAuth. It + // will be preferred over the TS_CLIENT_SECRET environment + // variable. If the node is already created (from state + // previously stored in Store), then this field is not + // used. + ClientSecret string + + // ClientID, if non-empty, is the client ID used to generate + // authkeys via workload identity federation. It will be + // preferred over the TS_CLIENT_ID environment variable. + // If the node is already created (from state previously + // stored in Store), then this field is not used. + ClientID string + + // IDToken, if non-empty, is the ID token from the identity + // provider to exchange with the control server for workload + // identity federation. It will be preferred over the + // TS_ID_TOKEN environment variable. If the node is already + // created (from state previously stored in Store), then this + // field is not used. + IDToken string + // ControlURL optionally specifies the coordination server URL. // If empty, the Tailscale default is used. ControlURL string @@ -517,6 +541,27 @@ func (s *Server) getAuthKey() string { return os.Getenv("TS_AUTH_KEY") } +func (s *Server) getClientSecret() string { + if v := s.ClientSecret; v != "" { + return v + } + return os.Getenv("TS_CLIENT_SECRET") +} + +func (s *Server) getClientID() string { + if v := s.ClientID; v != "" { + return v + } + return os.Getenv("TS_CLIENT_ID") +} + +func (s *Server) getIDToken() string { + if v := s.IDToken; v != "" { + return v + } + return os.Getenv("TS_ID_TOKEN") +} + func (s *Server) start() (reterr error) { var closePool closeOnErrorPool defer closePool.closeAllIfError(&reterr) @@ -684,14 +729,9 @@ func (s *Server) start() (reterr error) { prefs.ControlURL = s.ControlURL prefs.RunWebClient = s.RunWebClient prefs.AdvertiseTags = s.AdvertiseTags - authKey := s.getAuthKey() - // Try to use an OAuth secret to generate an auth key if that functionality - // is available. - if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { - authKey, err = f(s.shutdownCtx, s.getAuthKey(), prefs.AdvertiseTags) - if err != nil { - return fmt.Errorf("resolving auth key: %w", err) - } + authKey, err := s.resolveAuthKey() + if err != nil { + return fmt.Errorf("error resolving auth key: %w", err) } err = lb.Start(ipn.Options{ UpdatePrefs: prefs, @@ -738,6 +778,42 @@ func (s *Server) start() (reterr error) { return nil } +func (s *Server) resolveAuthKey() (string, error) { + authKey := s.getAuthKey() + var err error + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + resolveViaOAuth, oauthOk := tailscale.HookResolveAuthKey.GetOk() + if oauthOk { + clientSecret := authKey + if authKey == "" { + clientSecret = s.getClientSecret() + } + authKey, err = resolveViaOAuth(s.shutdownCtx, clientSecret, s.AdvertiseTags) + if err != nil { + return "", err + } + } + // Try to resolve the auth key via workload identity federation if that functionality + // is available and no auth key is yet determined. + resolveViaWIF, wifOk := tailscale.HookResolveAuthKeyViaWIF.GetOk() + if wifOk && authKey == "" { + clientID := s.getClientID() + idToken := s.getIDToken() + if clientID != "" && idToken == "" { + return "", fmt.Errorf("client ID for workload identity federation found, but ID token is empty") + } + if clientID == "" && idToken != "" { + return "", fmt.Errorf("ID token for workload identity federation found, but client ID is empty") + } + authKey, err = resolveViaWIF(s.shutdownCtx, s.ControlURL, clientID, idToken, s.AdvertiseTags) + if err != nil { + return "", err + } + } + return authKey, nil +} + func (s *Server) startLogger(closePool *closeOnErrorPool, health *health.Tracker, tsLogf logger.Logf) error { if testenv.InTest() { return nil diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index f1531d013d4b7..838d5f3f5f1a5 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -38,6 +38,7 @@ import ( "golang.org/x/net/proxy" "tailscale.com/client/local" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" @@ -1393,3 +1394,201 @@ func TestDeps(t *testing.T) { }, }.Check(t) } + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + authKey string + clientSecret string + clientID string + idToken string + oauthAvailable bool + wifAvailable bool + resolveViaOAuth func(ctx context.Context, clientSecret string, tags []string) (string, error) + resolveViaWIF func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) + wantAuthKey string + wantErr bool + wantErrContains string + }{ + { + name: "successful resolution via OAuth client secret", + clientSecret: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + if clientSecret != "tskey-client-secret-123" { + return "", fmt.Errorf("unexpected client secret: %s", clientSecret) + } + return "tskey-auth-via-oauth", nil + }, + wantAuthKey: "tskey-auth-via-oauth", + wantErrContains: "", + }, + { + name: "failing resolution via OAuth client secret", + clientSecret: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wantErrContains: "resolution failed", + }, + { + name: "successful resolution via federated ID token", + clientID: "client-id-123", + idToken: "id-token-456", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + if clientID != "client-id-123" { + return "", fmt.Errorf("unexpected client ID: %s", clientID) + } + if idToken != "id-token-456" { + return "", fmt.Errorf("unexpected ID token: %s", idToken) + } + return "tskey-auth-via-wif", nil + }, + wantAuthKey: "tskey-auth-via-wif", + wantErrContains: "", + }, + { + name: "failing resolution via federated ID token", + clientID: "client-id-123", + idToken: "id-token-456", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wantErrContains: "resolution failed", + }, + { + name: "empty client ID", + clientID: "", + idToken: "id-token-456", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "empty", + }, + { + name: "empty ID token", + clientID: "client-id-123", + idToken: "", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "empty", + }, + { + name: "workload identity resolution skipped if resolution via OAuth token succeeds", + clientSecret: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + if clientSecret != "tskey-client-secret-123" { + return "", fmt.Errorf("unexpected client secret: %s", clientSecret) + } + return "tskey-auth-via-oauth", nil + }, + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantAuthKey: "tskey-auth-via-oauth", + wantErrContains: "", + }, + { + name: "workload identity resolution skipped if resolution via OAuth token fails", + clientID: "tskey-client-id-123", + idToken: "", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "failed", + }, + { + name: "authkey set and no resolution available", + authKey: "tskey-auth-123", + oauthAvailable: false, + wifAvailable: false, + wantAuthKey: "tskey-auth-123", + wantErrContains: "", + }, + { + name: "no authkey set and no resolution available", + oauthAvailable: false, + wifAvailable: false, + wantAuthKey: "", + wantErrContains: "", + }, + { + name: "authkey is client secret and resolution via OAuth client secret succeeds", + authKey: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + if clientSecret != "tskey-client-secret-123" { + return "", fmt.Errorf("unexpected client secret: %s", clientSecret) + } + return "tskey-auth-via-oauth", nil + }, + wantAuthKey: "tskey-auth-via-oauth", + wantErrContains: "", + }, + { + name: "authkey is client secret but resolution via OAuth client secret fails", + authKey: "tskey-client-secret-123", + oauthAvailable: true, + resolveViaOAuth: func(ctx context.Context, clientSecret string, tags []string) (string, error) { + return "", fmt.Errorf("resolution failed") + }, + wantErrContains: "resolution failed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.oauthAvailable { + t.Cleanup(tailscale.HookResolveAuthKey.SetForTest(tt.resolveViaOAuth)) + } + + if tt.wifAvailable { + t.Cleanup(tailscale.HookResolveAuthKeyViaWIF.SetForTest(tt.resolveViaWIF)) + } + + s := &Server{ + AuthKey: tt.authKey, + ClientSecret: tt.clientSecret, + ClientID: tt.clientID, + IDToken: tt.idToken, + ControlURL: "https://control.example.com", + } + s.shutdownCtx = context.Background() + + gotAuthKey, err := s.resolveAuthKey() + + if tt.wantErrContains != "" { + if err == nil { + t.Errorf("expected error but got none") + return + } + if !strings.Contains(err.Error(), tt.wantErrContains) { + t.Errorf("expected error containing %q but got error: %v", tt.wantErrContains, err) + } + return + } + + if err != nil { + t.Errorf("resolveAuthKey expected no error but got error: %v", err) + return + } + + if gotAuthKey != tt.wantAuthKey { + t.Errorf("resolveAuthKey() = %q, want %q", gotAuthKey, tt.wantAuthKey) + } + }) + } +} From b8c58ca7c1a49fb772d095c65693cdab06488047 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 2 Dec 2025 10:16:35 -0800 Subject: [PATCH 0785/1093] wgengine: fix TSMP/ICMP callback leak Fixes #18112 Change-Id: I85d5c482b01673799d51faeb6cb0579903597502 Signed-off-by: Brad Fitzpatrick --- wgengine/userspace.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index a369fa343cc76..1b8562d3ffe55 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -451,6 +451,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) cb := e.pongCallback[pong.Data] e.logf("wgengine: got TSMP pong %02x, peerAPIPort=%v; cb=%v", pong.Data, pong.PeerAPIPort, cb != nil) if cb != nil { + delete(e.pongCallback, pong.Data) go cb(pong) } } @@ -464,6 +465,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) // We didn't swallow it, so let it flow to the host. return false } + delete(e.icmpEchoResponseCallback, idSeq) e.logf("wgengine: got diagnostic ICMP response %02x", idSeq) go cb() return true From f8cd07fb8afd451de29c7876d2bdef21b512eeb9 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 2 Dec 2025 17:35:15 +0000 Subject: [PATCH 0786/1093] .github: make cigocacher script more robust We got a flake in https://github.com/tailscale/tailscale/actions/runs/19867229792/job/56933249360 but it's not obvious to me where it failed. Make it more robust and print out more useful error messages for next time. Updates tailscale/corp#10808 Change-Id: I9ca08ea1103b9ad968c9cc0c42a493981ea62435 Signed-off-by: Tom Proctor --- .github/actions/go-cache/action.sh | 43 +++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/.github/actions/go-cache/action.sh b/.github/actions/go-cache/action.sh index 84fb878f804a6..58ceabc861458 100755 --- a/.github/actions/go-cache/action.sh +++ b/.github/actions/go-cache/action.sh @@ -17,23 +17,52 @@ if [ -z "${GITHUB_ACTIONS:-}" ]; then exit 1 fi -if [ -z "$URL" ]; then +if [ -z "${URL:-}" ]; then echo "No cigocached URL is set, skipping cigocacher setup" exit 0 fi -JWT="$(curl -sSL -H "Authorization: Bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" "${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=gocached" | jq -r .value)" +curl_and_parse() { + local jq_filter="$1" + local step="$2" + shift 2 + + local response + local curl_exit + response="$(curl -sSL "$@" 2>&1)" || curl_exit="$?" + if [ "${curl_exit:-0}" -ne "0" ]; then + echo "${step}: ${response}" >&2 + return 1 + fi + + local parsed + local jq_exit + parsed=$(echo "${response}" | jq -e -r "${jq_filter}" 2>&1) || jq_exit=$? + if [ "${jq_exit:-0}" -ne "0" ]; then + echo "${step}: Failed to parse JSON response:" >&2 + echo "${response}" >&2 + return 1 + fi + + echo "${parsed}" + return 0 +} + +JWT="$(curl_and_parse ".value" "Fetching GitHub identity JWT" \ + -H "Authorization: Bearer ${ACTIONS_ID_TOKEN_REQUEST_TOKEN}" \ + "${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=gocached")" || exit 0 + # cigocached serves a TLS cert with an FQDN, but DNS is based on VM name. HOST_AND_PORT="${URL#http*://}" FIRST_LABEL="${HOST_AND_PORT/.*/}" # Save CONNECT_TO for later steps to use. echo "CONNECT_TO=${HOST_AND_PORT}:${FIRST_LABEL}:" >> "${GITHUB_ENV}" BODY="$(jq -n --arg jwt "$JWT" '{"jwt": $jwt}')" -CIGOCACHER_TOKEN="$(curl -sSL --connect-to "$HOST_AND_PORT:$FIRST_LABEL:" -H "Content-Type: application/json" "$URL/auth/exchange-token" -d "$BODY" | jq -r .access_token || true)" -if [ -z "$CIGOCACHER_TOKEN" ]; then - echo "Failed token exchange with cigocached, skipping cigocacher setup" - exit 0 -fi +CIGOCACHER_TOKEN="$(curl_and_parse ".access_token" "Exchanging token with cigocached" \ + --connect-to "${HOST_AND_PORT}:${FIRST_LABEL}:" \ + -H "Content-Type: application/json" \ + "$URL/auth/exchange-token" \ + -d "$BODY")" || exit 0 # Wait until we successfully auth before building cigocacher to ensure we know # it's worth building. From e33f6aa3ba2d3a6f2ed63f224239db5abf1c4616 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Wed, 3 Dec 2025 13:52:33 +0000 Subject: [PATCH 0787/1093] go.mod: bump the version of setec Updates https://github.com/tailscale/corp/issues/34813 Change-Id: I926f1bad5bf143d82ddb36f51f70deb24fa11e71 Signed-off-by: Alex Chan --- cmd/derper/depaware.txt | 2 +- flake.nix | 2 +- go.mod | 5 ++--- go.mod.sri | 2 +- go.sum | 8 ++++---- shell.nix | 2 +- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 9c720fa604869..11a6318c30061 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -9,6 +9,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket + github.com/creachadair/msync/throttle from github.com/tailscale/setec/client/setec W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil github.com/dgryski/go-metro from github.com/axiomhq/hyperloglog github.com/fxamacker/cbor/v2 from tailscale.com/tka @@ -190,7 +191,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ - golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ diff --git a/flake.nix b/flake.nix index 855ce555bb1cc..484b7e0c593fe 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-IkodqRYdueML7U2Hh8vRw6Et7+WII+VXuPJ3jZ2xYx8= +# nix-direnv cache busting line: sha256-DTf2GHnoVXDMA1vWbBzpHA4ipL7UB/n/2Yijj/beBF8= diff --git a/go.mod b/go.mod index bd6fe441d0e0a..51c7c9e3ebf75 100644 --- a/go.mod +++ b/go.mod @@ -91,7 +91,7 @@ require ( github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc - github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb + github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 github.com/tailscale/wireguard-go v0.0.0-20250716170648-1d0488a3d7da @@ -191,7 +191,6 @@ require ( go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect - golang.org/x/tools/go/expect v0.1.1-deprecated // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect @@ -417,7 +416,7 @@ require ( golang.org/x/image v0.27.0 // indirect golang.org/x/text v0.31.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.mod.sri b/go.mod.sri index 329fe940505e3..b36887eeffbc6 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-IkodqRYdueML7U2Hh8vRw6Et7+WII+VXuPJ3jZ2xYx8= +sha256-DTf2GHnoVXDMA1vWbBzpHA4ipL7UB/n/2Yijj/beBF8= diff --git a/go.sum b/go.sum index 111c99ac909e5..19f16c5cd5ce3 100644 --- a/go.sum +++ b/go.sum @@ -1002,8 +1002,8 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb h1:Rtklwm6HUlCtf/MR2MB9iY4FoA16acWWlC5pLrTVa90= -github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb/go.mod h1:R8iCVJnbOB05pGexHK/bKHneIRHpZ3jLl7wMQ0OM/jw= +github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a h1:TApskGPim53XY5WRt5hX4DnO8V6CmVoimSklryIoGMM= +github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a/go.mod h1:+6WyG6kub5/5uPsMdYQuSti8i6F5WuKpFWLQnZt/Mms= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M= @@ -1504,8 +1504,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/shell.nix b/shell.nix index 28bdbdafb8e0d..569057dbd3bb1 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-IkodqRYdueML7U2Hh8vRw6Et7+WII+VXuPJ3jZ2xYx8= +# nix-direnv cache busting line: sha256-DTf2GHnoVXDMA1vWbBzpHA4ipL7UB/n/2Yijj/beBF8= From 6a44990b09b79d5e4fea8283f6baaa5ce6cba87a Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 2 Dec 2025 11:38:47 -0800 Subject: [PATCH 0788/1093] net/udprelay: bind multiple sockets per af on Linux This commit uses SO_REUSEPORT (when supported) to bind multiple sockets per address family. Increasing the number of sockets can increase aggregate throughput when serving many peer relay client flows. Benchmarks show 3x improvement in max aggregate bitrate in some environments. Updates tailscale/corp#34745 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 169 +++++++++++++++++++++----------- net/udprelay/server_linux.go | 35 +++++++ net/udprelay/server_notlinux.go | 19 ++++ 3 files changed, 165 insertions(+), 58 deletions(-) create mode 100644 net/udprelay/server_linux.go create mode 100644 net/udprelay/server_notlinux.go diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e7ca24960ea1d..26b27bb7f5982 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -15,6 +15,7 @@ import ( "fmt" "net" "net/netip" + "runtime" "slices" "strconv" "sync" @@ -66,10 +67,10 @@ type Server struct { bindLifetime time.Duration steadyStateLifetime time.Duration bus *eventbus.Bus - uc4 batching.Conn // always non-nil - uc4Port uint16 // always nonzero - uc6 batching.Conn // may be nil if IPv6 bind fails during initialization - uc6Port uint16 // may be zero if IPv6 bind fails during initialization + uc4 []batching.Conn // length is always nonzero + uc4Port uint16 // always nonzero + uc6 []batching.Conn // length may be zero if udp6 bind fails + uc6Port uint16 // zero if len(uc6) is zero, otherwise nonzero closeOnce sync.Once wg sync.WaitGroup closeCh chan struct{} @@ -337,37 +338,51 @@ func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (s *Serv Logf: logger.WithPrefix(logf, "netcheck: "), SendPacket: func(b []byte, addrPort netip.AddrPort) (int, error) { if addrPort.Addr().Is4() { - return s.uc4.WriteToUDPAddrPort(b, addrPort) - } else if s.uc6 != nil { - return s.uc6.WriteToUDPAddrPort(b, addrPort) + return s.uc4[0].WriteToUDPAddrPort(b, addrPort) + } else if len(s.uc6) > 0 { + return s.uc6[0].WriteToUDPAddrPort(b, addrPort) } else { return 0, errors.New("IPv6 socket is not bound") } }, } - err = s.listenOn(port) + err = s.bindSockets(port) if err != nil { return nil, err } + s.startPacketReaders() if !s.onlyStaticAddrPorts { s.wg.Add(1) go s.addrDiscoveryLoop() } - s.wg.Add(1) - go s.packetReadLoop(s.uc4, s.uc6, true) - if s.uc6 != nil { - s.wg.Add(1) - go s.packetReadLoop(s.uc6, s.uc4, false) - } s.wg.Add(1) go s.endpointGCLoop() return s, nil } +func (s *Server) startPacketReaders() { + for i, uc := range s.uc4 { + var other batching.Conn + if len(s.uc6) > 0 { + other = s.uc6[min(len(s.uc6)-1, i)] + } + s.wg.Add(1) + go s.packetReadLoop(uc, other, true) + } + for i, uc := range s.uc6 { + var other batching.Conn + if len(s.uc4) > 0 { + other = s.uc4[min(len(s.uc4)-1, i)] + } + s.wg.Add(1) + go s.packetReadLoop(uc, other, false) + } +} + func (s *Server) addrDiscoveryLoop() { defer s.wg.Done() @@ -514,70 +529,108 @@ func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { } } -// listenOn binds an IPv4 and IPv6 socket to port. We consider it successful if -// we manage to bind the IPv4 socket. +// bindSockets binds udp4 and udp6 sockets to desiredPort. We consider it +// successful if we manage to bind at least one udp4 socket. Multiple sockets +// may be bound per address family, e.g. SO_REUSEPORT, depending on platform. // -// The requested port may be zero, in which case port selection is left up to -// the host networking stack. We make no attempt to bind a consistent port -// across IPv4 and IPv6 if the requested port is zero. +// desiredPort may be zero, in which case port selection is left up to the host +// networking stack. We make no attempt to bind a consistent port between udp4 +// and udp6 if the requested port is zero, but a consistent port is used +// across multiple sockets within a given address family if SO_REUSEPORT is +// supported. // // TODO: make these "re-bindable" in similar fashion to magicsock as a means to // deal with EDR software closing them. http://go/corp/30118. We could re-use // [magicsock.RebindingConn], which would also remove the need for // [singlePacketConn], as [magicsock.RebindingConn] also handles fallback to // single packet syscall operations. -func (s *Server) listenOn(port uint16) error { +func (s *Server) bindSockets(desiredPort uint16) error { + // maxSocketsPerAF is a conservative starting point, but is somewhat + // arbitrary. + maxSocketsPerAF := min(16, runtime.NumCPU()) + listenConfig := &net.ListenConfig{ + Control: listenControl, + } for _, network := range []string{"udp4", "udp6"} { - uc, err := net.ListenUDP(network, &net.UDPAddr{Port: int(port)}) - if err != nil { + SocketsLoop: + for i := range maxSocketsPerAF { + if i > 0 { + // Use a consistent port per address family if the user-supplied + // port was zero, and we are binding multiple sockets. + if network == "udp4" { + desiredPort = s.uc4Port + } else { + desiredPort = s.uc6Port + } + } + uc, boundPort, err := s.bindSocketTo(listenConfig, network, desiredPort) + if err != nil { + switch { + case i == 0 && network == "udp4": + // At least one udp4 socket is required. + return err + case i == 0 && network == "udp6": + // A udp6 socket is not required. + s.logf("ignoring IPv6 bind failure: %v", err) + break SocketsLoop + default: // i > 0 + // Reusable sockets are not required. + s.logf("ignoring reusable (index=%d network=%v) socket bind failure: %v", i, network, err) + break SocketsLoop + } + } + pc := batching.TryUpgradeToConn(uc, network, batching.IdealBatchSize) + bc, ok := pc.(batching.Conn) + if !ok { + bc = &singlePacketConn{uc} + } if network == "udp4" { - return err + s.uc4 = append(s.uc4, bc) + s.uc4Port = boundPort } else { - s.logf("ignoring IPv6 bind failure: %v", err) - break + s.uc6 = append(s.uc6, bc) + s.uc6Port = boundPort } - } - trySetUDPSocketOptions(uc, s.logf) - // TODO: set IP_PKTINFO sockopt - _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) - if err != nil { - uc.Close() - if s.uc4 != nil { - s.uc4.Close() - } - return err - } - portUint, err := strconv.ParseUint(boundPortStr, 10, 16) - if err != nil { - uc.Close() - if s.uc4 != nil { - s.uc4.Close() + if !isReusableSocket(uc) { + break } - return err - } - pc := batching.TryUpgradeToConn(uc, network, batching.IdealBatchSize) - bc, ok := pc.(batching.Conn) - if !ok { - bc = &singlePacketConn{uc} - } - if network == "udp4" { - s.uc4 = bc - s.uc4Port = uint16(portUint) - } else { - s.uc6 = bc - s.uc6Port = uint16(portUint) } - s.logf("listening on %s:%d", network, portUint) + } + s.logf("listening on udp4:%d sockets=%d", s.uc4Port, len(s.uc4)) + if len(s.uc6) > 0 { + s.logf("listening on udp6:%d sockets=%d", s.uc6Port, len(s.uc6)) } return nil } +func (s *Server) bindSocketTo(listenConfig *net.ListenConfig, network string, port uint16) (*net.UDPConn, uint16, error) { + lis, err := listenConfig.ListenPacket(context.Background(), network, fmt.Sprintf(":%d", port)) + if err != nil { + return nil, 0, err + } + uc := lis.(*net.UDPConn) + trySetUDPSocketOptions(uc, s.logf) + _, boundPortStr, err := net.SplitHostPort(uc.LocalAddr().String()) + if err != nil { + uc.Close() + return nil, 0, err + } + portUint, err := strconv.ParseUint(boundPortStr, 10, 16) + if err != nil { + uc.Close() + return nil, 0, err + } + return uc, uint16(portUint), nil +} + // Close closes the server. func (s *Server) Close() error { s.closeOnce.Do(func() { - s.uc4.Close() - if s.uc6 != nil { - s.uc6.Close() + for _, uc4 := range s.uc4 { + uc4.Close() + } + for _, uc6 := range s.uc6 { + uc6.Close() } close(s.closeCh) s.wg.Wait() diff --git a/net/udprelay/server_linux.go b/net/udprelay/server_linux.go new file mode 100644 index 0000000000000..009ec8cc8bfe9 --- /dev/null +++ b/net/udprelay/server_linux.go @@ -0,0 +1,35 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package udprelay + +import ( + "net" + "syscall" + + "golang.org/x/sys/unix" +) + +func listenControl(_ string, _ string, c syscall.RawConn) error { + c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) + }) + return nil +} + +func isReusableSocket(uc *net.UDPConn) bool { + rc, err := uc.SyscallConn() + if err != nil { + return false + } + var reusable bool + rc.Control(func(fd uintptr) { + val, err := unix.GetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT) + if err == nil && val == 1 { + reusable = true + } + }) + return reusable +} diff --git a/net/udprelay/server_notlinux.go b/net/udprelay/server_notlinux.go new file mode 100644 index 0000000000000..042a6dd68215e --- /dev/null +++ b/net/udprelay/server_notlinux.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package udprelay + +import ( + "net" + "syscall" +) + +func listenControl(_ string, _ string, _ syscall.RawConn) error { + return nil +} + +func isReusableSocket(*net.UDPConn) bool { + return false +} From 7bc25f77f4411e0bd8f569063a3efa6650857953 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 3 Dec 2025 11:20:46 -0800 Subject: [PATCH 0789/1093] go.toolchain.rev: update to Go 1.25.5 (#18123) Updates #18122 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 51c7c9e3ebf75..08062b220d5ec 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.3 +go 1.25.5 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 9ea6b37dcbc32..16058a407c704 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -5c01b77ad0d27a8bd4ef89ef7e713fd7043c5a91 +0bab982699fa5903259ba9b4cba3e5fd6cb3baf2 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index a62a525998ac7..310dcf87fcf1c 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-2TYziJLJrFOW2FehhahKficnDACJEwjuvVYyeQZbrcc= +sha256-fBezkBGRHCnfJiOUmMMqBCPCqjlGC4F6KEt5h1JhsCg= diff --git a/go.toolchain.version b/go.toolchain.version index 5bb76b575e1f5..b45fe310644f7 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.25.3 +1.25.5 From d199ecac80083e64d32baf3b473c67b11a6e6936 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Wed, 3 Dec 2025 19:54:52 -0600 Subject: [PATCH 0790/1093] ipn/ipnlocal: shut down old control client synchronously on reset Previously, callers of (*LocalBackend).resetControlClientLocked were supposed to call Shutdown on the returned controlclient.Client after releasing b.mu. In #17804, we started calling Shutdown while holding b.mu, which caused deadlocks during profile switches due to the (*ExecQueue).RunSync implementation. We first patched this in #18053 by calling Shutdown in a new goroutine, which avoided the deadlocks but made TestStateMachine flaky because the shutdown order was no longer guaranteed. In #18070, we updated (*ExecQueue).RunSync to allow shutting down the queue without waiting for RunSync to return. With that change, shutting down the control client while holding b.mu became safe. Therefore, this PR updates (*LocalBackend).resetControlClientLocked to shut down the old client synchronously during the reset, instead of returning it and shifting that responsibility to the callers. This fixes the flaky tests and simplifies the code. Fixes #18052 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 39 ++++++++++----------------------------- 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fbf34aa426cea..ce2acf311fc48 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -948,12 +948,8 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { // down, clients switch over to other replicas whilst the existing connections are kept alive for some period of time. func (b *LocalBackend) DisconnectControl() { b.mu.Lock() - cc := b.resetControlClientLocked() - b.mu.Unlock() - - if cc != nil { - cc.Shutdown() - } + defer b.mu.Unlock() + b.resetControlClientLocked() } // linkChange is our network monitor callback, called whenever the network changes. @@ -2419,14 +2415,6 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { logf := logger.WithPrefix(b.logf, "Start: ") b.startOnce.Do(b.initOnce) - var clientToShutdown controlclient.Client - defer func() { - if clientToShutdown != nil { - // Shutdown outside of b.mu to avoid deadlocks. - b.goTracker.Go(clientToShutdown.Shutdown) - } - }() - if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { return err @@ -2469,7 +2457,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { // into sync with the minimal changes. But that's not how it // is right now, which is a sign that the code is still too // complicated. - clientToShutdown = b.resetControlClientLocked() + b.resetControlClientLocked() httpTestClient := b.httpTestClient if b.hostinfo != nil { @@ -5810,13 +5798,12 @@ func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.ignoreControlClientUpdates.Store(cc == nil) } -// resetControlClientLocked sets b.cc to nil and returns the old value. If the -// returned value is non-nil, the caller must call Shutdown on it after -// releasing b.mu. -func (b *LocalBackend) resetControlClientLocked() controlclient.Client { +// resetControlClientLocked sets b.cc to nil and shuts down the previous +// control client, if any. +func (b *LocalBackend) resetControlClientLocked() { syncs.RequiresMutex(&b.mu) if b.cc == nil { - return nil + return } b.resetAuthURLLocked() @@ -5836,7 +5823,7 @@ func (b *LocalBackend) resetControlClientLocked() controlclient.Client { } prev := b.cc b.setControlClientLocked(nil) - return prev + prev.Shutdown() } // resetAuthURLLocked resets authURL, canceling any pending interactive login. @@ -6930,10 +6917,7 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { b.updateFilterLocked(ipn.PrefsView{}) // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) - if prevCC := b.resetControlClientLocked(); prevCC != nil { - // Shutdown outside of b.mu to avoid deadlocks. - b.goTracker.Go(prevCC.Shutdown) - } + b.resetControlClientLocked() // TKA errors should not prevent resetting the backend state. // However, we should still return the error to the caller. tkaErr := b.initTKALocked() @@ -7012,10 +6996,7 @@ func (b *LocalBackend) ResetAuth() error { b.mu.Lock() defer b.mu.Unlock() - if prevCC := b.resetControlClientLocked(); prevCC != nil { - // Shutdown outside of b.mu to avoid deadlocks. - b.goTracker.Go(prevCC.Shutdown) - } + b.resetControlClientLocked() if err := b.clearMachineKeyLocked(); err != nil { return err } From 557457f3c2e896a41c123e72278194d9f9f60663 Mon Sep 17 00:00:00 2001 From: Nick Khyl <1761190+nickkhyl@users.noreply.github.com> Date: Thu, 4 Dec 2025 09:13:13 -0600 Subject: [PATCH 0791/1093] ipn/ipnlocal: fix LocalBackend deadlock when packet arrives during profile switch (#18126) If a packet arrives while WireGuard is being reconfigured with b.mu held, such as during a profile switch, calling back into (*LocalBackend).GetPeerAPIPort from (*Wrapper).filterPacketInboundFromWireGuard may deadlock when it tries to acquire b.mu. This occurs because a peer cannot be removed while an inbound packet is being processed. The reconfig and profile switch wait for (*Peer).RoutineSequentialReceiver to return, but it never finishes because GetPeerAPIPort needs b.mu, which the waiting goroutine already holds. In this PR, we make peerAPIPorts a new syncs.AtomicValue field that is written with b.mu held but can be read by GetPeerAPIPort without holding the mutex, which fixes the deadlock. There might be other long-term ways to address the issue, such as moving peer API listeners from LocalBackend to nodeBackend so they can be accessed without holding b.mu, but these changes are too large and risky at this stage in the v1.92 release cycle. Updates #18124 Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ce2acf311fc48..d99dbf8627f70 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -245,6 +245,8 @@ type LocalBackend struct { // to prevent state changes while invoking callbacks. extHost *ExtensionHost + peerAPIPorts syncs.AtomicValue[map[netip.Addr]int] // can be read without b.mu held; TODO(nickkhyl): remove or move to nodeBackend? + // The mutex protects the following elements. mu syncs.Mutex @@ -295,8 +297,8 @@ type LocalBackend struct { authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeBackend egg bool prevIfState *netmon.State - peerAPIServer *peerAPIServer // or nil - peerAPIListeners []*peerAPIListener + peerAPIServer *peerAPIServer // or nil + peerAPIListeners []*peerAPIListener // TODO(nickkhyl): move to nodeBackend loginFlags controlclient.LoginFlags notifyWatchers map[string]*watchSession // by session ID lastStatusTime time.Time // status.AsOf value of the last processed status update @@ -4710,14 +4712,8 @@ func (b *LocalBackend) GetPeerAPIPort(ip netip.Addr) (port uint16, ok bool) { if !buildfeatures.HasPeerAPIServer { return 0, false } - b.mu.Lock() - defer b.mu.Unlock() - for _, pln := range b.peerAPIListeners { - if pln.ip == ip { - return uint16(pln.port), true - } - } - return 0, false + portInt, ok := b.peerAPIPorts.Load()[ip] + return uint16(portInt), ok } // handlePeerAPIConn serves an already-accepted connection c. @@ -5209,6 +5205,7 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { pln.Close() } b.peerAPIListeners = nil + b.peerAPIPorts.Store(nil) } // peerAPIListenAsync is whether the operating system requires that we @@ -5281,6 +5278,7 @@ func (b *LocalBackend) initPeerAPIListenerLocked() { b.peerAPIServer = ps isNetstack := b.sys.IsNetstack() + peerAPIPorts := make(map[netip.Addr]int) for i, a := range addrs.All() { var ln net.Listener var err error @@ -5313,7 +5311,9 @@ func (b *LocalBackend) initPeerAPIListenerLocked() { b.logf("peerapi: serving on %s", pln.urlStr) go pln.serve() b.peerAPIListeners = append(b.peerAPIListeners, pln) + peerAPIPorts[a.Addr()] = pln.port } + b.peerAPIPorts.Store(peerAPIPorts) b.goTracker.Go(b.doSetHostinfoFilterServices) } From f4d34f38bece35652ec0c4d73e2a9ffaf1fa6823 Mon Sep 17 00:00:00 2001 From: "Peter A." Date: Fri, 28 Nov 2025 23:39:41 +0100 Subject: [PATCH 0792/1093] cmd/tailscale,ipn: add Unix socket support for serve Based on PR #16700 by @lox, adapted to current codebase. Adds support for proxying HTTP requests to Unix domain sockets via tailscale serve unix:/path/to/socket, enabling exposure of services like Docker, containerd, PHP-FPM over Tailscale without TCP bridging. The implementation includes reasonable protections against exposure of tailscaled's own socket. Adaptations from original PR: - Use net.Dialer.DialContext instead of net.Dial for context propagation - Use http.Transport with Protocols API (current h2c approach, not http2.Transport) - Resolve conflicts with hasScheme variable in ExpandProxyTargetValue Updates #9771 Signed-off-by: Peter A. Co-authored-by: Lachlan Donald --- cmd/tailscale/cli/serve_v2.go | 7 +- cmd/tailscale/cli/serve_v2_unix_test.go | 86 ++++++++++ cmd/tailscaled/tailscaled.go | 1 + ipn/ipnlocal/serve.go | 71 +++++++- ipn/ipnlocal/serve_unix_test.go | 218 ++++++++++++++++++++++++ ipn/serve.go | 16 ++ ipn/serve_expand_test.go | 82 +++++++++ tsd/tsd.go | 4 + 8 files changed, 482 insertions(+), 3 deletions(-) create mode 100644 cmd/tailscale/cli/serve_v2_unix_test.go create mode 100644 ipn/ipnlocal/serve_unix_test.go create mode 100644 ipn/serve_expand_test.go diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 89d247be9f773..d474696b3bf86 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -138,6 +138,7 @@ var serveHelpCommon = strings.TrimSpace(` can be a file, directory, text, or most commonly the location to a service running on the local machine. The location to the location service can be expressed as a port number (e.g., 3000), a partial URL (e.g., localhost:3000), or a full URL including a path (e.g., http://localhost:3000/foo). +On Unix-like systems, you can also specify a Unix domain socket (e.g., unix:/tmp/myservice.sock). EXAMPLES - Expose an HTTP server running at 127.0.0.1:3000 in the foreground: @@ -149,6 +150,9 @@ EXAMPLES - Expose an HTTPS server with invalid or self-signed certificates at https://localhost:8443 $ tailscale %[1]s https+insecure://localhost:8443 + - Expose a service listening on a Unix socket (Linux/macOS/BSD only): + $ tailscale %[1]s unix:/var/run/myservice.sock + For more examples and use cases visit our docs site https://tailscale.com/kb/1247/funnel-serve-use-cases `) @@ -1172,7 +1176,8 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui } h.Path = target default: - t, err := ipn.ExpandProxyTargetValue(target, []string{"http", "https", "https+insecure"}, "http") + // Include unix in supported schemes for HTTP(S) serve + t, err := ipn.ExpandProxyTargetValue(target, []string{"http", "https", "https+insecure", "unix"}, "http") if err != nil { return err } diff --git a/cmd/tailscale/cli/serve_v2_unix_test.go b/cmd/tailscale/cli/serve_v2_unix_test.go new file mode 100644 index 0000000000000..9064655981288 --- /dev/null +++ b/cmd/tailscale/cli/serve_v2_unix_test.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build unix + +package cli + +import ( + "path/filepath" + "testing" + + "tailscale.com/ipn" +) + +func TestServeUnixSocketCLI(t *testing.T) { + // Create a temporary directory for our socket path + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + // Test that Unix socket targets are accepted by ExpandProxyTargetValue + target := "unix:" + socketPath + result, err := ipn.ExpandProxyTargetValue(target, []string{"http", "https", "https+insecure", "unix"}, "http") + if err != nil { + t.Fatalf("ExpandProxyTargetValue failed: %v", err) + } + + if result != target { + t.Errorf("ExpandProxyTargetValue(%q) = %q, want %q", target, result, target) + } +} + +func TestServeUnixSocketConfigPreserved(t *testing.T) { + // Test that Unix socket URLs are preserved in ServeConfig + sc := &ipn.ServeConfig{ + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ + "/": {Proxy: "unix:/tmp/test.sock"}, + }}, + }, + } + + // Verify the proxy value is preserved + handler := sc.Web["foo.test.ts.net:443"].Handlers["/"] + if handler.Proxy != "unix:/tmp/test.sock" { + t.Errorf("proxy = %q, want %q", handler.Proxy, "unix:/tmp/test.sock") + } +} + +func TestServeUnixSocketVariousPaths(t *testing.T) { + tests := []struct { + name string + target string + wantErr bool + }{ + { + name: "absolute-path", + target: "unix:/var/run/docker.sock", + }, + { + name: "tmp-path", + target: "unix:/tmp/myservice.sock", + }, + { + name: "relative-path", + target: "unix:./local.sock", + }, + { + name: "home-path", + target: "unix:/home/user/.local/service.sock", + }, + { + name: "empty-path", + target: "unix:", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := ipn.ExpandProxyTargetValue(tt.target, []string{"http", "https", "unix"}, "http") + if (err != nil) != tt.wantErr { + t.Errorf("ExpandProxyTargetValue(%q) error = %v, wantErr %v", tt.target, err, tt.wantErr) + } + }) + } +} diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index d923ca1edcfad..d9afffbdbd710 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -401,6 +401,7 @@ func run() (err error) { // Install an event bus as early as possible, so that it's // available universally when setting up everything else. sys := tsd.NewSystem() + sys.SocketPath = args.socketpath // Parse config, if specified, to fail early if it's invalid. var conf *conffile.Config diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index ef4e9154557a4..cda742892695b 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -76,6 +76,10 @@ const ( // current etag of a resource. var ErrETagMismatch = errors.New("etag mismatch") +// ErrProxyToTailscaledSocket is returned when attempting to proxy +// to the tailscaled socket itself, which would create a loop. +var ErrProxyToTailscaledSocket = errors.New("cannot proxy to tailscaled socket") + var serveHTTPContextKey ctxkey.Key[*serveHTTPContext] type serveHTTPContext struct { @@ -812,6 +816,27 @@ func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, // we serve requests for. `backend` is a HTTPHandler.Proxy string (url, hostport or just port). func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, error) { targetURL, insecure := expandProxyArg(backend) + + // Handle unix: scheme specially + if strings.HasPrefix(targetURL, "unix:") { + socketPath := strings.TrimPrefix(targetURL, "unix:") + if socketPath == "" { + return nil, fmt.Errorf("empty unix socket path") + } + if b.isTailscaledSocket(socketPath) { + return nil, ErrProxyToTailscaledSocket + } + u, _ := url.Parse("http://localhost") + return &reverseProxy{ + logf: b.logf, + url: u, + insecure: false, + backend: backend, + lb: b, + socketPath: socketPath, + }, nil + } + u, err := url.Parse(targetURL) if err != nil { return nil, fmt.Errorf("invalid url %s: %w", targetURL, err) @@ -826,6 +851,22 @@ func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, err return p, nil } +// isTailscaledSocket reports whether socketPath refers to the same file +// as the tailscaled socket. It uses os.SameFile to handle symlinks, +// bind mounts, and other path variations. +func (b *LocalBackend) isTailscaledSocket(socketPath string) bool { + tailscaledSocket := b.sys.SocketPath + if tailscaledSocket == "" { + return false + } + fi1, err1 := os.Stat(socketPath) + fi2, err2 := os.Stat(tailscaledSocket) + if err1 != nil || err2 != nil { + return false + } + return os.SameFile(fi1, fi2) +} + // reverseProxy is a proxy that forwards a request to a backend host // (preconfigured via ipn.ServeConfig). If the host is configured with // http+insecure prefix, connection between proxy and backend will be over @@ -840,6 +881,7 @@ type reverseProxy struct { insecure bool backend string lb *LocalBackend + socketPath string // path to unix socket, empty for TCP httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends h2cTransport lazy.SyncValue[*http.Transport] // transport for h2c backends // closed tracks whether proxy is closed/currently closing. @@ -880,7 +922,12 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { r.Out.URL.RawPath = rp.url.RawPath } - r.Out.Host = r.In.Host + // For Unix sockets, use the URL's host (localhost) instead of the incoming host + if rp.socketPath != "" { + r.Out.Host = rp.url.Host + } else { + r.Out.Host = r.In.Host + } addProxyForwardedHeaders(r) rp.lb.addTailscaleIdentityHeaders(r) if err := rp.lb.addAppCapabilitiesHeader(r); err != nil { @@ -905,8 +952,16 @@ func (rp *reverseProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { // to the backend. The Transport gets created lazily, at most once. func (rp *reverseProxy) getTransport() *http.Transport { return rp.httpTransport.Get(func() *http.Transport { + dial := rp.lb.dialer.SystemDial + if rp.socketPath != "" { + dial = func(ctx context.Context, _, _ string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", rp.socketPath) + } + } + return &http.Transport{ - DialContext: rp.lb.dialer.SystemDial, + DialContext: dial, TLSClientConfig: &tls.Config{ InsecureSkipVerify: rp.insecure, }, @@ -929,6 +984,10 @@ func (rp *reverseProxy) getH2CTransport() http.RoundTripper { tr := &http.Transport{ Protocols: &p, DialTLSContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { + if rp.socketPath != "" { + var d net.Dialer + return d.DialContext(ctx, "unix", rp.socketPath) + } return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host) }, } @@ -940,6 +999,10 @@ func (rp *reverseProxy) getH2CTransport() http.RoundTripper { // for a h2c server, but sufficient for our particular use case. func (rp *reverseProxy) shouldProxyViaH2C(r *http.Request) bool { contentType := r.Header.Get(contentTypeHeader) + // For unix sockets, check if it's gRPC content to determine h2c + if rp.socketPath != "" { + return r.ProtoMajor == 2 && isGRPCContentType(contentType) + } return r.ProtoMajor == 2 && strings.HasPrefix(rp.backend, "http://") && isGRPCContentType(contentType) } @@ -1184,6 +1247,10 @@ func expandProxyArg(s string) (targetURL string, insecureSkipVerify bool) { if s == "" { return "", false } + // Unix sockets - return as-is + if strings.HasPrefix(s, "unix:") { + return s, false + } if strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://") { return s, false } diff --git a/ipn/ipnlocal/serve_unix_test.go b/ipn/ipnlocal/serve_unix_test.go new file mode 100644 index 0000000000000..e57aafab212ae --- /dev/null +++ b/ipn/ipnlocal/serve_unix_test.go @@ -0,0 +1,218 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build unix + +package ipnlocal + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "testing" + "time" + + "tailscale.com/tstest" +) + +func TestExpandProxyArgUnix(t *testing.T) { + tests := []struct { + input string + wantURL string + wantInsecure bool + }{ + { + input: "unix:/tmp/test.sock", + wantURL: "unix:/tmp/test.sock", + }, + { + input: "unix:/var/run/docker.sock", + wantURL: "unix:/var/run/docker.sock", + }, + { + input: "unix:./relative.sock", + wantURL: "unix:./relative.sock", + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + gotURL, gotInsecure := expandProxyArg(tt.input) + if gotURL != tt.wantURL { + t.Errorf("expandProxyArg(%q) url = %q, want %q", tt.input, gotURL, tt.wantURL) + } + if gotInsecure != tt.wantInsecure { + t.Errorf("expandProxyArg(%q) insecure = %v, want %v", tt.input, gotInsecure, tt.wantInsecure) + } + }) + } +} + +func TestServeUnixSocket(t *testing.T) { + // Create a temporary directory for our socket + tmpDir := t.TempDir() + socketPath := filepath.Join(tmpDir, "test.sock") + + // Create a test HTTP server on Unix socket + listener, err := net.Listen("unix", socketPath) + if err != nil { + t.Fatalf("failed to create unix socket listener: %v", err) + } + defer listener.Close() + + testResponse := "Hello from Unix socket!" + testServer := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + fmt.Fprint(w, testResponse) + }), + } + + go testServer.Serve(listener) + defer testServer.Close() + + // Wait for server to be ready + time.Sleep(50 * time.Millisecond) + + // Create LocalBackend with test logger + logf := tstest.WhileTestRunningLogger(t) + b := newTestBackend(t) + b.logf = logf + + // Test creating proxy handler for Unix socket + handler, err := b.proxyHandlerForBackend("unix:" + socketPath) + if err != nil { + t.Fatalf("proxyHandlerForBackend failed: %v", err) + } + + // Verify it's a reverseProxy with correct socketPath + rp, ok := handler.(*reverseProxy) + if !ok { + t.Fatalf("expected *reverseProxy, got %T", handler) + } + if rp.socketPath != socketPath { + t.Errorf("socketPath = %q, want %q", rp.socketPath, socketPath) + } + if rp.url.Host != "localhost" { + t.Errorf("url.Host = %q, want %q", rp.url.Host, "localhost") + } +} + +func TestServeUnixSocketErrors(t *testing.T) { + logf := tstest.WhileTestRunningLogger(t) + b := newTestBackend(t) + b.logf = logf + + // Test empty socket path + _, err := b.proxyHandlerForBackend("unix:") + if err == nil { + t.Error("expected error for empty socket path") + } + + // Test non-existent socket - should create handler but fail on request + nonExistentSocket := filepath.Join(t.TempDir(), "nonexistent.sock") + handler, err := b.proxyHandlerForBackend("unix:" + nonExistentSocket) + if err != nil { + t.Fatalf("proxyHandlerForBackend failed: %v", err) + } + + req := httptest.NewRequest("GET", "http://foo.test.ts.net/", nil) + rec := httptest.NewRecorder() + + handler.ServeHTTP(rec, req) + + // Should get a 502 Bad Gateway when socket doesn't exist + if rec.Code != http.StatusBadGateway { + t.Errorf("got status %d, want %d for non-existent socket", rec.Code, http.StatusBadGateway) + } +} + +func TestReverseProxyConfigurationUnix(t *testing.T) { + b := newTestBackend(t) + + // Test that Unix socket backend creates proper reverseProxy + backend := "unix:/var/run/test.sock" + handler, err := b.proxyHandlerForBackend(backend) + if err != nil { + t.Fatalf("proxyHandlerForBackend failed: %v", err) + } + + rp, ok := handler.(*reverseProxy) + if !ok { + t.Fatalf("expected *reverseProxy, got %T", handler) + } + + // Verify configuration + if rp.socketPath != "/var/run/test.sock" { + t.Errorf("socketPath = %q, want %q", rp.socketPath, "/var/run/test.sock") + } + if rp.backend != backend { + t.Errorf("backend = %q, want %q", rp.backend, backend) + } + if rp.insecure { + t.Error("insecure should be false for unix sockets") + } + expectedURL := url.URL{Scheme: "http", Host: "localhost"} + if rp.url.Scheme != expectedURL.Scheme || rp.url.Host != expectedURL.Host { + t.Errorf("url = %v, want %v", rp.url, expectedURL) + } +} + +func TestServeBlocksTailscaledSocket(t *testing.T) { + // Use /tmp to avoid macOS socket path length limits + tmpDir, err := os.MkdirTemp("/tmp", "ts-test-*") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + defer os.RemoveAll(tmpDir) + + tailscaledSocket := filepath.Join(tmpDir, "ts.sock") + + // Create actual socket file + listener, err := net.Listen("unix", tailscaledSocket) + if err != nil { + t.Fatalf("failed to create tailscaled socket: %v", err) + } + defer listener.Close() + + b := newTestBackend(t) + b.sys.SocketPath = tailscaledSocket + + // Direct path to tailscaled socket should be blocked + _, err = b.proxyHandlerForBackend("unix:" + tailscaledSocket) + if !errors.Is(err, ErrProxyToTailscaledSocket) { + t.Errorf("direct path: got err=%v, want ErrProxyToTailscaledSocket", err) + } + + // Symlink to tailscaled socket should be blocked + symlinkPath := filepath.Join(tmpDir, "link") + if err := os.Symlink(tailscaledSocket, symlinkPath); err != nil { + t.Fatalf("failed to create symlink: %v", err) + } + + _, err = b.proxyHandlerForBackend("unix:" + symlinkPath) + if !errors.Is(err, ErrProxyToTailscaledSocket) { + t.Errorf("symlink: got err=%v, want ErrProxyToTailscaledSocket", err) + } + + // Different socket should work + otherSocket := filepath.Join(tmpDir, "ok.sock") + listener2, err := net.Listen("unix", otherSocket) + if err != nil { + t.Fatalf("failed to create other socket: %v", err) + } + defer listener2.Close() + + handler, err := b.proxyHandlerForBackend("unix:" + otherSocket) + if err != nil { + t.Errorf("legitimate socket should not be blocked: %v", err) + } + if handler == nil { + t.Error("expected valid handler for legitimate socket") + } +} diff --git a/ipn/serve.go b/ipn/serve.go index 1f15578893d84..76823a8464977 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -10,6 +10,7 @@ import ( "net" "net/netip" "net/url" + "runtime" "slices" "strconv" "strings" @@ -713,6 +714,21 @@ func ExpandProxyTargetValue(target string, supportedSchemes []string, defaultSch return fmt.Sprintf("%s://%s:%d", defaultScheme, host, port), nil } + // handle unix: scheme specially - it doesn't use standard URL format + if strings.HasPrefix(target, "unix:") { + if !slices.Contains(supportedSchemes, "unix") { + return "", fmt.Errorf("unix sockets are not supported for this target type") + } + if runtime.GOOS == "windows" { + return "", fmt.Errorf("unix socket serve target is not supported on Windows") + } + path := strings.TrimPrefix(target, "unix:") + if path == "" { + return "", fmt.Errorf("unix socket path cannot be empty") + } + return target, nil + } + hasScheme := true // prepend scheme if not present if !strings.Contains(target, "://") { diff --git a/ipn/serve_expand_test.go b/ipn/serve_expand_test.go new file mode 100644 index 0000000000000..b977238fe32ff --- /dev/null +++ b/ipn/serve_expand_test.go @@ -0,0 +1,82 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipn + +import ( + "runtime" + "testing" +) + +func TestExpandProxyTargetValueUnix(t *testing.T) { + tests := []struct { + name string + target string + supportedSchemes []string + defaultScheme string + want string + wantErr bool + skipOnWindows bool + }{ + { + name: "unix-socket-absolute-path", + target: "unix:/tmp/myservice.sock", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + want: "unix:/tmp/myservice.sock", + skipOnWindows: true, + }, + { + name: "unix-socket-var-run", + target: "unix:/var/run/docker.sock", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + want: "unix:/var/run/docker.sock", + skipOnWindows: true, + }, + { + name: "unix-socket-relative-path", + target: "unix:./myservice.sock", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + want: "unix:./myservice.sock", + skipOnWindows: true, + }, + { + name: "unix-socket-empty-path", + target: "unix:", + supportedSchemes: []string{"http", "https", "unix"}, + defaultScheme: "http", + wantErr: true, + }, + { + name: "unix-socket-not-in-supported-schemes", + target: "unix:/tmp/myservice.sock", + supportedSchemes: []string{"http", "https"}, + defaultScheme: "http", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.skipOnWindows && runtime.GOOS == "windows" { + t.Skip("skipping unix socket test on Windows") + } + + // On Windows, unix sockets should always error + if runtime.GOOS == "windows" && !tt.wantErr { + tt.wantErr = true + } + + got, err := ExpandProxyTargetValue(tt.target, tt.supportedSchemes, tt.defaultScheme) + if (err != nil) != tt.wantErr { + t.Errorf("ExpandProxyTargetValue() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && got != tt.want { + t.Errorf("ExpandProxyTargetValue() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/tsd/tsd.go b/tsd/tsd.go index 8223254dae942..8dc0c14278864 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -68,6 +68,10 @@ type System struct { // LocalBackend tracks the current config after any reloads. InitialConfig *conffile.Config + // SocketPath is the path to the tailscaled Unix socket. + // It is used to prevent serve from proxying to our own socket. + SocketPath string + // onlyNetstack is whether the Tun value is a fake TUN device // and we're using netstack for everything. onlyNetstack bool From cf40cf5ccb1be1ab931b427f7f83bba3214ace79 Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Fri, 5 Dec 2025 13:33:47 +0000 Subject: [PATCH 0793/1093] ipn/ipnlocal: add peer API endpoints to Hostinfo on initial client creation (#17851) Previously we only set this when it updated, which was fine for the first call to Start(), but after that point future updates would be skipped if nothing had changed. If Start() was called again, it would wipe the peer API endpoints and they wouldn't get added back again, breaking exit nodes (and anything else requiring peer API to be advertised). Updates tailscale/corp#27173 Signed-off-by: James Sanderson --- ipn/ipnlocal/local.go | 20 +++++++-- ipn/ipnlocal/state_test.go | 92 +++++++++++++++++++++++++++++++++++--- 2 files changed, 102 insertions(+), 10 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index d99dbf8627f70..e5fafb5bd911d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2528,7 +2528,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { if inServerMode := prefs.ForceDaemon(); inServerMode || runtime.GOOS == "windows" { logf("serverMode=%v", inServerMode) } - b.applyPrefsToHostinfoLocked(hostinfo, prefs) + b.applyPrefsToHostinfoLocked(b.hostinfo, prefs) b.updateWarnSync(prefs) persistv := prefs.Persist().AsStruct() @@ -2566,7 +2566,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { Persist: *persistv, ServerURL: serverURL, AuthKey: opts.AuthKey, - Hostinfo: hostinfo, + Hostinfo: b.hostInfoWithServicesLocked(), HTTPTestClient: httpTestClient, DiscoPublicKey: discoPublic, DebugFlags: debugFlags, @@ -4830,6 +4830,17 @@ func (b *LocalBackend) doSetHostinfoFilterServicesLocked() { b.logf("[unexpected] doSetHostinfoFilterServices with nil hostinfo") return } + + hi := b.hostInfoWithServicesLocked() + + cc.SetHostinfo(hi) +} + +// hostInfoWithServicesLocked returns a shallow clone of b.hostinfo with +// services added. +// +// b.mu must be held. +func (b *LocalBackend) hostInfoWithServicesLocked() *tailcfg.Hostinfo { peerAPIServices := b.peerAPIServicesLocked() if b.egg { peerAPIServices = append(peerAPIServices, tailcfg.Service{Proto: "egg", Port: 1}) @@ -4857,7 +4868,7 @@ func (b *LocalBackend) doSetHostinfoFilterServicesLocked() { b.logf("Hostinfo peerAPI ports changed: expected %v, got %v", expectedPorts, actualPorts) } - cc.SetHostinfo(&hi) + return &hi } type portPair struct { @@ -5257,6 +5268,9 @@ func (b *LocalBackend) initPeerAPIListenerLocked() { if allSame { // Nothing to do. b.logf("[v1] initPeerAPIListener: %d netmap addresses match existing listeners", addrs.Len()) + // TODO(zofrex): This is fragile. It doesn't check what's actually in hostinfo, and if + // peerAPIListeners gets out of sync with hostinfo.Services, we won't get back into a good + // state. E.G. see tailscale/corp#27173. return } } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 152b375b0f7b8..27d53fe01b599 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -136,10 +136,12 @@ type mockControl struct { calls []string authBlocked bool shutdown chan struct{} + + hi *tailcfg.Hostinfo } func newClient(tb testing.TB, opts controlclient.Options) *mockControl { - return &mockControl{ + cc := mockControl{ tb: tb, authBlocked: true, logf: opts.Logf, @@ -148,6 +150,10 @@ func newClient(tb testing.TB, opts controlclient.Options) *mockControl { persist: opts.Persist.Clone(), controlClientID: rand.Int64(), } + if opts.Hostinfo != nil { + cc.SetHostinfoDirect(opts.Hostinfo) + } + return &cc } func (cc *mockControl) assertShutdown(wasPaused bool) { @@ -298,6 +304,11 @@ func (cc *mockControl) AuthCantContinue() bool { func (cc *mockControl) SetHostinfo(hi *tailcfg.Hostinfo) { cc.logf("SetHostinfo: %v", *hi) cc.called("SetHostinfo") + cc.SetHostinfoDirect(hi) +} + +func (cc *mockControl) SetHostinfoDirect(hi *tailcfg.Hostinfo) { + cc.hi = hi } func (cc *mockControl) SetNetInfo(ni *tailcfg.NetInfo) { @@ -1634,7 +1645,7 @@ func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { return cc }) - t.Logf("Start") + t.Log("Start") b.Start(ipn.Options{ UpdatePrefs: &ipn.Prefs{ WantRunning: true, @@ -1642,7 +1653,7 @@ func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { }, }) - t.Logf("LoginFinished") + t.Log("LoginFinished") cc.persist.UserProfile.LoginName = "user1" cc.persist.NodeID = "node1" @@ -1654,13 +1665,13 @@ func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), }}) - t.Logf("Running") + t.Log("Running") b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) - t.Logf("Re-auth (StartLoginInteractive)") + t.Log("Re-auth (StartLoginInteractive)") b.StartLoginInteractive(t.Context()) - t.Logf("Re-auth (receive URL)") + t.Log("Re-auth (receive URL)") url1 := "https://localhost:1/1" cc.send(sendOpt{url: url1}) @@ -1668,12 +1679,79 @@ func runTestSendPreservesAuthURL(t *testing.T, seamless bool) { // be set, and once .send has completed, any opportunities for a WG engine // status update to trample it have ended as well. if b.authURL == "" { - t.Fatalf("expected authURL to be set") + t.Fatal("expected authURL to be set") } else { t.Log("authURL was set") } } +func TestServicesNotClearedByStart(t *testing.T) { + connect := &ipn.MaskedPrefs{Prefs: ipn.Prefs{WantRunning: true}, WantRunningSet: true} + node1 := buildNetmapWithPeers( + makePeer(1, withName("node-1"), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))), + ) + + var cc *mockControl + lb := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc.assertCalls("Login") + + // Simulate authentication and wait for goroutines to finish (so peer + // listeners have been set up and hostinfo updated) + cc.authenticated(node1) + waitForGoroutinesToStop(lb) + + if cc.hi == nil || len(cc.hi.Services) == 0 { + t.Fatal("test setup bug: services should be present") + } + + mustDo(t)(lb.Start(ipn.Options{})) + + if len(cc.hi.Services) == 0 { + t.Error("services should still be present in hostinfo after no-op Start") + } + + lb.initPeerAPIListenerLocked() + waitForGoroutinesToStop(lb) + + // Clearing out services on Start would be less of a problem if they would at + // least come back after authreconfig or any other change, but they don't if + // the addresses in the netmap haven't changed and still match the stored + // peerAPIListeners. + if len(cc.hi.Services) == 0 { + t.Error("services STILL not present after authreconfig") + } +} + +func waitForGoroutinesToStop(lb *LocalBackend) { + goroutineDone := make(chan struct{}) + removeTrackerCallback := lb.goTracker.AddDoneCallback(func() { + select { + case goroutineDone <- struct{}{}: + default: + } + }) + defer removeTrackerCallback() + + for { + if lb.goTracker.RunningGoroutines() == 0 { + return + } + + select { + case <-time.Tick(1 * time.Second): + continue + case <-goroutineDone: + continue + } + } +} + func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( firstAutoUserID = tailcfg.UserID(10000) From d349370e5500e6f583a15e38ad945199e5e11ea1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Fri, 5 Dec 2025 11:05:49 -0500 Subject: [PATCH 0794/1093] client/systray: change systray to start after graphical.target (#18138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The service was starting after systemd itself, and while this surprisingly worked for some situations, it broke for others. Change it to start after a GUI has been initialized. Updates #17656 Signed-off-by: Claus Lensbøl --- client/systray/tailscale-systray.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/systray/tailscale-systray.service b/client/systray/tailscale-systray.service index a4d987563ec0a..01d0b383c0634 100644 --- a/client/systray/tailscale-systray.service +++ b/client/systray/tailscale-systray.service @@ -1,6 +1,6 @@ [Unit] Description=Tailscale System Tray -After=systemd.service +After=graphical.target [Service] Type=simple From d5c893195b0795831cd9ad5ef58676420a5bb3a4 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 8 Dec 2025 11:19:01 +0000 Subject: [PATCH 0795/1093] cmd/k8s-operator: don't log errors on not found objects. (#18142) The event queue gets deleted events, which means that sometimes the object that should be reconciled no longer exists. Don't log user facing errors if that is the case. Updates #18141 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/operator.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 816fea5664557..b50be8ce7ba66 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -27,6 +27,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" klabels "k8s.io/apimachinery/pkg/labels" @@ -1018,7 +1019,9 @@ func nodeHandlerForProxyGroup(cl client.Client, defaultProxyClass string, logger proxyClass := &tsapi.ProxyClass{} if err := cl.Get(ctx, types.NamespacedName{Name: pc}, proxyClass); err != nil { - logger.Debugf("error getting ProxyClass %q: %v", pg.Spec.ProxyClass, err) + if !apierrors.IsNotFound(err) { + logger.Debugf("error getting ProxyClass %q: %v", pg.Spec.ProxyClass, err) + } return nil } @@ -1275,7 +1278,9 @@ func ingressSvcFromEps(cl client.Client, logger *zap.SugaredLogger) handler.MapF svc := &corev1.Service{} ns := o.GetNamespace() if err := cl.Get(ctx, types.NamespacedName{Name: svcName, Namespace: ns}, svc); err != nil { - logger.Errorf("failed to get service: %v", err) + if !apierrors.IsNotFound(err) { + logger.Debugf("failed to get service: %v", err) + } return nil } @@ -1450,7 +1455,9 @@ func kubeAPIServerPGsFromSecret(cl client.Client, logger *zap.SugaredLogger) han var pg tsapi.ProxyGroup if err := cl.Get(ctx, types.NamespacedName{Name: secret.ObjectMeta.Labels[LabelParentName]}, &pg); err != nil { - logger.Infof("error getting ProxyGroup %s: %v", secret.ObjectMeta.Labels[LabelParentName], err) + if !apierrors.IsNotFound(err) { + logger.Debugf("error getting ProxyGroup %s: %v", secret.ObjectMeta.Labels[LabelParentName], err) + } return nil } From 2a0ddb7897c15670c5faf75190ae4a53fdf8de8e Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 8 Dec 2025 15:19:28 +0000 Subject: [PATCH 0796/1093] cmd/k8s-operator: warn if users attempt to expose a headless Service (#18140) Previously, if users attempted to expose a headless Service to tailnet, this just silently did not work. This PR makes the operator throw a warning event + update Service's status with an error message. Updates #18139 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/operator_test.go | 169 +++++++++++++++--------------- cmd/k8s-operator/svc.go | 5 +- 2 files changed, 88 insertions(+), 86 deletions(-) diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index e11235768dea2..d0f42fe6dfad5 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -38,10 +38,7 @@ import ( func TestLoadBalancerClass(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -220,10 +217,7 @@ func TestLoadBalancerClass(t *testing.T) { func TestTailnetTargetFQDNAnnotation(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) tailnetTargetFQDN := "foo.bar.ts.net." clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ @@ -333,10 +327,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { func TestTailnetTargetIPAnnotation(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) tailnetTargetIP := "100.66.66.66" clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ @@ -431,12 +422,12 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { }) expectReconciled(t, sr, "default", "test") - // // synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet - // // didn't create any child resources since this is all faked, so the - // // deletion goes through immediately. + // synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet + // didn't create any child resources since this is all faked, so the + // deletion goes through immediately. expectReconciled(t, sr, "default", "test") expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName) - // // The deletion triggers another reconcile, to finish the cleanup. + // The deletion triggers another reconcile, to finish the cleanup. expectReconciled(t, sr, "default", "test") expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName) expectMissing[corev1.Service](t, fc, "operator-ns", shortName) @@ -446,10 +437,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -517,10 +505,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -588,10 +573,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { func TestAnnotations(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -695,10 +677,7 @@ func TestAnnotations(t *testing.T) { func TestAnnotationIntoLB(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -828,10 +807,7 @@ func TestAnnotationIntoLB(t *testing.T) { func TestLBIntoAnnotation(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -966,10 +942,7 @@ func TestLBIntoAnnotation(t *testing.T) { func TestCustomHostname(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1078,10 +1051,7 @@ func TestCustomHostname(t *testing.T) { func TestCustomPriorityClassName(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1333,10 +1303,7 @@ func TestProxyClassForService(t *testing.T) { WithStatusSubresource(pc). Build() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1425,10 +1392,7 @@ func TestProxyClassForService(t *testing.T) { func TestDefaultLoadBalancer(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1482,10 +1446,7 @@ func TestDefaultLoadBalancer(t *testing.T) { func TestProxyFirewallMode(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -1563,14 +1524,70 @@ func Test_isMagicDNSName(t *testing.T) { } } +func Test_HeadlessService(t *testing.T) { + fc := fake.NewFakeClient() + zl := zap.Must(zap.NewDevelopment()) + clock := tstest.NewClock(tstest.ClockOpts{}) + sr := &ServiceReconciler{ + Client: fc, + ssr: &tailscaleSTSReconciler{ + Client: fc, + }, + logger: zl.Sugar(), + clock: clock, + recorder: record.NewFakeRecorder(100), + } + mustCreate(t, fc, &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationExpose: "true", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Type: corev1.ServiceTypeClusterIP, + }, + }) + + expectReconciled(t, sr, "default", "test") + + t0 := conditionTime(clock) + + want := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + AnnotationExpose: "true", + }, + }, + Spec: corev1.ServiceSpec{ + ClusterIP: "None", + Type: corev1.ServiceTypeClusterIP, + }, + Status: corev1.ServiceStatus{ + Conditions: []metav1.Condition{{ + Type: string(tsapi.ProxyReady), + Status: metav1.ConditionFalse, + LastTransitionTime: t0, + Reason: reasonProxyInvalid, + Message: `unable to provision proxy resources: invalid Service: headless Services are not supported.`, + }}, + }, + } + + expectEqual(t, fc, want) +} + func Test_serviceHandlerForIngress(t *testing.T) { const tailscaleIngressClassName = "tailscale" - fc := fake.NewFakeClient() - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) // 1. An event on a headless Service for a tailscale Ingress results in // the Ingress being reconciled. @@ -1700,10 +1717,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { fc := fake.NewFakeClient() - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "backend", Namespace: "default"}, @@ -1735,10 +1749,7 @@ func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { } func Test_clusterDomainFromResolverConf(t *testing.T) { - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) tests := []struct { name string conf *resolvconffile.Config @@ -1806,10 +1817,7 @@ func Test_clusterDomainFromResolverConf(t *testing.T) { func Test_authKeyRemoval(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) // 1. A new Service that should be exposed via Tailscale gets created, a Secret with a config that contains auth // key is generated. @@ -1874,10 +1882,7 @@ func Test_authKeyRemoval(t *testing.T) { func Test_externalNameService(t *testing.T) { fc := fake.NewFakeClient() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) // 1. A External name Service that should be exposed via Tailscale gets // created. @@ -1974,10 +1979,7 @@ func Test_metricsResourceCreation(t *testing.T) { WithStatusSubresource(pc). Build() ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, @@ -2048,10 +2050,7 @@ func TestIgnorePGService(t *testing.T) { _, _, fc, _, _ := setupServiceTest(t) ft := &fakeTSClient{} - zl, err := zap.NewDevelopment() - if err != nil { - t.Fatal(err) - } + zl := zap.Must(zap.NewDevelopment()) clock := tstest.NewClock(tstest.ClockOpts{}) sr := &ServiceReconciler{ Client: fc, diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index eec1924e7902c..5c163e081f5a6 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -377,6 +377,9 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga func validateService(svc *corev1.Service) []string { violations := make([]string, 0) + if svc.Spec.ClusterIP == "None" { + violations = append(violations, "headless Services are not supported.") + } if svc.Annotations[AnnotationTailnetTargetFQDN] != "" && svc.Annotations[AnnotationTailnetTargetIP] != "" { violations = append(violations, fmt.Sprintf("only one of annotations %s and %s can be set", AnnotationTailnetTargetIP, AnnotationTailnetTargetFQDN)) } @@ -415,7 +418,7 @@ func (a *ServiceReconciler) shouldExposeDNSName(svc *corev1.Service) bool { } func (a *ServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool { - if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" { + if svc.Spec.ClusterIP == "" { return false } return isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc) From 7d3097d3b552de3d4441b4909bcec75718cf5d3d Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 8 Dec 2025 12:11:04 +0000 Subject: [PATCH 0797/1093] tka: add some more tests for Bootstrap() This improves our test coverage of the Bootstrap() method, especially around catching AUMs that shouldn't pass validation. Updates #cleanup Change-Id: Idc61fcbc6daaa98c36d20ec61e45ce48771b85de Signed-off-by: Alex Chan --- tka/tka_test.go | 60 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/tka/tka_test.go b/tka/tka_test.go index 78af7400daff3..cc9ea57ee2f6a 100644 --- a/tka/tka_test.go +++ b/tka/tka_test.go @@ -5,6 +5,7 @@ package tka import ( "bytes" + "strings" "testing" "github.com/google/go-cmp/cmp" @@ -345,6 +346,65 @@ func TestCreateBootstrapAuthority(t *testing.T) { } } +// Trying to bootstrap an already-bootstrapped Chonk is an error. +func TestBootstrapChonkMustBeEmpty(t *testing.T) { + chonk := ChonkMem() + + pub, priv := testingKey25519(t, 1) + key := Key{Kind: Key25519, Public: pub, Votes: 2} + state := State{ + Keys: []Key{key}, + DisablementSecrets: [][]byte{DisablementKDF([]byte{1, 2, 3})}, + } + + // Bootstrap our chonk for the first time, which should succeed. + _, _, err := Create(chonk, state, signer25519(priv)) + if err != nil { + t.Fatalf("Create() failed: %v", err) + } + + // Bootstrap our chonk for the second time, which should fail, because + // it already contains data. + _, _, err = Create(chonk, state, signer25519(priv)) + if wantErr := "tailchonk is not empty"; err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("Create() did not fail with expected error: want %q, got %v", wantErr, err) + } +} + +func TestBootstrapWithInvalidAUMs(t *testing.T) { + for _, tt := range []struct { + Name string + GenesisAUM AUM + WantErr string + }{ + { + Name: "invalid-message-kind", + GenesisAUM: AUM{MessageKind: AUMNoOp}, + WantErr: "bootstrap AUMs must be checkpoint messages", + }, + { + Name: "missing-state", + GenesisAUM: AUM{MessageKind: AUMCheckpoint}, + WantErr: "bootstrap AUM is missing state", + }, + { + Name: "no-disablement-secret", + GenesisAUM: AUM{ + MessageKind: AUMCheckpoint, + State: &State{}, + }, + WantErr: "at least one disablement secret required", + }, + } { + t.Run(tt.Name, func(t *testing.T) { + _, err := Bootstrap(ChonkMem(), tt.GenesisAUM) + if err == nil || !strings.Contains(err.Error(), tt.WantErr) { + t.Fatalf("Bootstrap() did not fail with expected error: want %q, got %v", tt.WantErr, err) + } + }) + } +} + func TestAuthorityInformNonLinear(t *testing.T) { pub, priv := testingKey25519(t, 1) key := Key{Kind: Key25519, Public: pub, Votes: 2} From c7b10cb39f578a2f5e5983ab8b4ddd40e13afaca Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 8 Dec 2025 20:19:40 +0000 Subject: [PATCH 0798/1093] scripts/installer.sh: add SteamOS handling (#18159) Fixes #12943 Signed-off-by: Erisa A --- scripts/installer.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index e21e40e155ca6..db94c26ec508a 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -336,6 +336,11 @@ main() { VERSION="$VERSION_MAJOR" PACKAGETYPE="tdnf" ;; + steamos) + echo "To install Tailscale on SteamOS, please follow the instructions here:" + echo "https://github.com/tailscale-dev/deck-tailscale" + exit 1 + ;; # TODO: wsl? # TODO: synology? qnap? From da0ea8ef3e815ada6d424532f71135dfecb96cd2 Mon Sep 17 00:00:00 2001 From: Nick Khyl <1761190+nickkhyl@users.noreply.github.com> Date: Sun, 7 Dec 2025 18:26:45 -0600 Subject: [PATCH 0799/1093] Revert "ipn/ipnlocal: shut down old control client synchronously on reset" It appears (*controlclient.Auto).Shutdown() can still deadlock when called with b.mu held, and therefore the changes in #18127 are unsafe. This reverts #18127 until we figure out what causes it. This reverts commit d199ecac80083e64d32baf3b473c67b11a6e6936. Signed-off-by: Nick Khyl --- ipn/ipnlocal/local.go | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e5fafb5bd911d..51f92656040b7 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -950,8 +950,12 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { // down, clients switch over to other replicas whilst the existing connections are kept alive for some period of time. func (b *LocalBackend) DisconnectControl() { b.mu.Lock() - defer b.mu.Unlock() - b.resetControlClientLocked() + cc := b.resetControlClientLocked() + b.mu.Unlock() + + if cc != nil { + cc.Shutdown() + } } // linkChange is our network monitor callback, called whenever the network changes. @@ -2417,6 +2421,14 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { logf := logger.WithPrefix(b.logf, "Start: ") b.startOnce.Do(b.initOnce) + var clientToShutdown controlclient.Client + defer func() { + if clientToShutdown != nil { + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(clientToShutdown.Shutdown) + } + }() + if opts.UpdatePrefs != nil { if err := b.checkPrefsLocked(opts.UpdatePrefs); err != nil { return err @@ -2459,7 +2471,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { // into sync with the minimal changes. But that's not how it // is right now, which is a sign that the code is still too // complicated. - b.resetControlClientLocked() + clientToShutdown = b.resetControlClientLocked() httpTestClient := b.httpTestClient if b.hostinfo != nil { @@ -5812,12 +5824,13 @@ func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.ignoreControlClientUpdates.Store(cc == nil) } -// resetControlClientLocked sets b.cc to nil and shuts down the previous -// control client, if any. -func (b *LocalBackend) resetControlClientLocked() { +// resetControlClientLocked sets b.cc to nil and returns the old value. If the +// returned value is non-nil, the caller must call Shutdown on it after +// releasing b.mu. +func (b *LocalBackend) resetControlClientLocked() controlclient.Client { syncs.RequiresMutex(&b.mu) if b.cc == nil { - return + return nil } b.resetAuthURLLocked() @@ -5837,7 +5850,7 @@ func (b *LocalBackend) resetControlClientLocked() { } prev := b.cc b.setControlClientLocked(nil) - prev.Shutdown() + return prev } // resetAuthURLLocked resets authURL, canceling any pending interactive login. @@ -6931,7 +6944,10 @@ func (b *LocalBackend) resetForProfileChangeLocked() error { b.updateFilterLocked(ipn.PrefsView{}) // Reset the NetworkMap in the engine b.e.SetNetworkMap(new(netmap.NetworkMap)) - b.resetControlClientLocked() + if prevCC := b.resetControlClientLocked(); prevCC != nil { + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(prevCC.Shutdown) + } // TKA errors should not prevent resetting the backend state. // However, we should still return the error to the caller. tkaErr := b.initTKALocked() @@ -7010,7 +7026,10 @@ func (b *LocalBackend) ResetAuth() error { b.mu.Lock() defer b.mu.Unlock() - b.resetControlClientLocked() + if prevCC := b.resetControlClientLocked(); prevCC != nil { + // Shutdown outside of b.mu to avoid deadlocks. + b.goTracker.Go(prevCC.Shutdown) + } if err := b.clearMachineKeyLocked(); err != nil { return err } From 378ee20b9a06b12da1b0d4c2ef21a168f0aa619f Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Thu, 20 Nov 2025 11:28:18 +0000 Subject: [PATCH 0800/1093] cmd/tailscale/cli: stabilise the output of `tailscale lock status --json` This patch stabilises the JSON output, and improves it in the following ways: * The AUM hash in Head uses the base32-encoded form of an AUM hash, consistent with how it's presented elsewhere * TrustedKeys are the same format as the keys as `tailnet lock log --json` * SigKind, Pubkey and KeyID are all presented consistently with other JSON output in NodeKeySignature * FilteredPeers don't have a NodeKeySignature, because it will always be empty For reference, here's the JSON output from the CLI prior to this change: ```json { "Enabled": true, "Head": [ 196, 69, 63, 243, 213, 133, 123, 46, 183, 203, 143, 34, 184, 85, 80, 1, 221, 92, 49, 213, 93, 106, 5, 206, 176, 250, 58, 165, 155, 136, 11, 13 ], "PublicKey": "nlpub:0f99af5c02216193963ce9304bb4ca418846eddebe237f37a6de1c59097ed0b8", "NodeKey": "nodekey:8abfe98b38151748919f6e346ad16436201c3ecd453b01e9d6d3a38e1826000d", "NodeKeySigned": true, "NodeKeySignature": { "SigKind": 1, "Pubkey": "bnCKv+mLOBUXSJGfbjRq0WQ2IBw+zUU7AenW06OOGCYADQ==", "KeyID": "D5mvXAIhYZOWPOkwS7TKQYhG7d6+I383pt4cWQl+0Lg=", "Signature": "4DPW4v6MyLLwQ8AMDm27BVDGABjeC9gg1EfqRdKgzVXi/mJDwY9PTAoX0+0WTRs5SUksWjY0u1CLxq5xgjFGBA==", "Nested": null, "WrappingPubkey": "D5mvXAIhYZOWPOkwS7TKQYhG7d6+I383pt4cWQl+0Lg=" }, "TrustedKeys": [ { "Key": "nlpub:0f99af5c02216193963ce9304bb4ca418846eddebe237f37a6de1c59097ed0b8", "Metadata": null, "Votes": 1 }, { "Key": "nlpub:de2254c040e728140d92bc967d51284e9daea103a28a97a215694c5bda2128b8", "Metadata": null, "Votes": 1 } ], "VisiblePeers": [ { "Name": "signing2.taila62b.unknown.c.ts.net.", "ID": 7525920332164264, "StableID": "nRX6TbAWm121DEVEL", "TailscaleIPs": [ "100.110.67.20", "fd7a:115c:a1e0::9c01:4314" ], "NodeKey": "nodekey:10bf4a5c168051d700a29123cd81568377849da458abef4b328794ca9cae4313", "NodeKeySignature": { "SigKind": 1, "Pubkey": "bnAQv0pcFoBR1wCikSPNgVaDd4SdpFir70syh5TKnK5DEw==", "KeyID": "D5mvXAIhYZOWPOkwS7TKQYhG7d6+I383pt4cWQl+0Lg=", "Signature": "h9fhwHiNdkTqOGVQNdW6AVFoio6MFaFobPiK9ydywgmtYxcExJ38b76Tabdc56aNLxf8IfCaRw2VYPcQG2J/AA==", "Nested": null, "WrappingPubkey": "3iJUwEDnKBQNkryWfVEoTp2uoQOiipeiFWlMW9ohKLg=" } } ], "FilteredPeers": [ { "Name": "node3.taila62b.unknown.c.ts.net.", "ID": 5200614049042386, "StableID": "n3jAr7KNch11DEVEL", "TailscaleIPs": [ "100.95.29.124", "fd7a:115c:a1e0::f901:1d7c" ], "NodeKey": "nodekey:454d2c8602c10574c5ec3a6790f159714802012b7b8bb8d2ab47d637f9df1d7b", "NodeKeySignature": { "SigKind": 0, "Pubkey": null, "KeyID": null, "Signature": null, "Nested": null, "WrappingPubkey": null } } ], "StateID": 16885615198276932820 } ``` Updates https://github.com/tailscale/corp/issues/22355 Updates https://github.com/tailscale/tailscale/issues/17619 Signed-off-by: Alex Chan Change-Id: I65b58ff4520033e6b70fc3b1ba7fc91c1f70a960 --- ...network-lock-v1.go => network-lock-log.go} | 26 +- .../cli/jsonoutput/network-lock-status.go | 249 ++++++++++++++++++ cmd/tailscale/cli/network-lock.go | 16 +- cmd/tailscale/cli/network-lock_test.go | 169 +++++++++++- ipn/ipnlocal/network-lock.go | 1 + ipn/ipnstate/ipnstate.go | 1 + 6 files changed, 441 insertions(+), 21 deletions(-) rename cmd/tailscale/cli/jsonoutput/{network-lock-v1.go => network-lock-log.go} (90%) create mode 100644 cmd/tailscale/cli/jsonoutput/network-lock-status.go diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-v1.go b/cmd/tailscale/cli/jsonoutput/network-lock-log.go similarity index 90% rename from cmd/tailscale/cli/jsonoutput/network-lock-v1.go rename to cmd/tailscale/cli/jsonoutput/network-lock-log.go index 8a2d2de336b3d..88e449db36d2a 100644 --- a/cmd/tailscale/cli/jsonoutput/network-lock-v1.go +++ b/cmd/tailscale/cli/jsonoutput/network-lock-log.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package jsonoutput import ( @@ -14,7 +16,7 @@ import ( "tailscale.com/tka" ) -// PrintNetworkLockJSONV1 prints the stored TKA state as a JSON object to the CLI, +// PrintNetworkLockLogJSONV1 prints the stored TKA state as a JSON object to the CLI, // in a stable "v1" format. // // This format includes: @@ -22,7 +24,7 @@ import ( // - the AUM hash as a base32-encoded string // - the raw AUM as base64-encoded bytes // - the expanded AUM, which prints named fields for consumption by other tools -func PrintNetworkLockJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error { +func PrintNetworkLockLogJSONV1(out io.Writer, updates []ipnstate.NetworkLockUpdate) error { messages := make([]logMessageV1, len(updates)) for i, update := range updates { @@ -64,7 +66,7 @@ func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 expandedAUM.PrevAUMHash = aum.PrevAUMHash.String() } if key := aum.Key; key != nil { - expandedAUM.Key = toExpandedKeyV1(key) + expandedAUM.Key = toTKAKeyV1(key) } if keyID := aum.KeyID; keyID != nil { expandedAUM.KeyID = fmt.Sprintf("tlpub:%x", keyID) @@ -78,7 +80,7 @@ func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 expandedState.DisablementSecrets = append(expandedState.DisablementSecrets, fmt.Sprintf("%x", secret)) } for _, key := range state.Keys { - expandedState.Keys = append(expandedState.Keys, toExpandedKeyV1(&key)) + expandedState.Keys = append(expandedState.Keys, toTKAKeyV1(&key)) } expandedState.StateID1 = state.StateID1 expandedState.StateID2 = state.StateID2 @@ -102,10 +104,10 @@ func toLogMessageV1(aum tka.AUM, update ipnstate.NetworkLockUpdate) logMessageV1 } } -// toExpandedKeyV1 converts a [tka.Key] to the JSON output returned +// toTKAKeyV1 converts a [tka.Key] to the JSON output returned // by the CLI. -func toExpandedKeyV1(key *tka.Key) expandedKeyV1 { - return expandedKeyV1{ +func toTKAKeyV1(key *tka.Key) tkaKeyV1 { + return tkaKeyV1{ Kind: key.Kind.String(), Votes: key.Votes, Public: fmt.Sprintf("tlpub:%x", key.Public), @@ -137,7 +139,7 @@ type expandedAUMV1 struct { // Key encodes a public key to be added to the key authority. // This field is used for AddKey AUMs. - Key expandedKeyV1 `json:"Key,omitzero"` + Key tkaKeyV1 `json:"Key,omitzero"` // KeyID references a public key which is part of the key authority. // This field is used for RemoveKey and UpdateKey AUMs. @@ -156,10 +158,10 @@ type expandedAUMV1 struct { Signatures []expandedSignatureV1 `json:"Signatures,omitzero"` } -// expandedAUMV1 is the expanded version of a [tka.Key], which describes +// tkaKeyV1 is the expanded version of a [tka.Key], which describes // the public components of a key known to network-lock. -type expandedKeyV1 struct { - Kind string +type tkaKeyV1 struct { + Kind string `json:"Kind,omitzero"` // Votes describes the weight applied to signatures using this key. Votes uint @@ -186,7 +188,7 @@ type expandedStateV1 struct { // // 1. The signing nodes currently trusted by the TKA. // 2. Ephemeral keys that were used to generate pre-signed auth keys. - Keys []expandedKeyV1 + Keys []tkaKeyV1 // StateID's are nonce's, generated on enablement and fixed for // the lifetime of the Tailnet Key Authority. diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-status.go b/cmd/tailscale/cli/jsonoutput/network-lock-status.go new file mode 100644 index 0000000000000..0c6481093c9d6 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/network-lock-status.go @@ -0,0 +1,249 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package jsonoutput + +import ( + "encoding/base64" + jsonv1 "encoding/json" + "fmt" + "io" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" +) + +// PrintNetworkLockStatusJSONV1 prints the current Tailnet Lock status +// as a JSON object to the CLI, in a stable "v1" format. +func PrintNetworkLockStatusJSONV1(out io.Writer, status *ipnstate.NetworkLockStatus) error { + responseEnvelope := ResponseEnvelope{ + SchemaVersion: "1", + } + + var result any + if status.Enabled { + result = struct { + ResponseEnvelope + tailnetLockEnabledStatusV1 + }{ + ResponseEnvelope: responseEnvelope, + tailnetLockEnabledStatusV1: toTailnetLockEnabledStatusV1(status), + } + } else { + result = struct { + ResponseEnvelope + tailnetLockDisabledStatusV1 + }{ + ResponseEnvelope: responseEnvelope, + tailnetLockDisabledStatusV1: toTailnetLockDisabledStatusV1(status), + } + } + + enc := jsonv1.NewEncoder(out) + enc.SetIndent("", " ") + return enc.Encode(result) +} + +func toTailnetLockDisabledStatusV1(status *ipnstate.NetworkLockStatus) tailnetLockDisabledStatusV1 { + out := tailnetLockDisabledStatusV1{ + tailnetLockStatusV1Base: tailnetLockStatusV1Base{ + Enabled: status.Enabled, + }, + } + if !status.PublicKey.IsZero() { + out.PublicKey = status.PublicKey.CLIString() + } + if nk := status.NodeKey; nk != nil { + out.NodeKey = nk.String() + } + return out +} + +func toTailnetLockEnabledStatusV1(status *ipnstate.NetworkLockStatus) tailnetLockEnabledStatusV1 { + out := tailnetLockEnabledStatusV1{ + tailnetLockStatusV1Base: tailnetLockStatusV1Base{ + Enabled: status.Enabled, + }, + } + + if status.Head != nil { + var head tka.AUMHash + h := status.Head + copy(head[:], h[:]) + out.Head = head.String() + } + if !status.PublicKey.IsZero() { + out.PublicKey = status.PublicKey.CLIString() + } + if nk := status.NodeKey; nk != nil { + out.NodeKey = nk.String() + } + out.NodeKeySigned = status.NodeKeySigned + if sig := status.NodeKeySignature; sig != nil { + out.NodeKeySignature = toTKANodeKeySignatureV1(sig) + } + for _, key := range status.TrustedKeys { + out.TrustedKeys = append(out.TrustedKeys, ipnTKAKeytoTKAKeyV1(&key)) + } + for _, vp := range status.VisiblePeers { + out.VisiblePeers = append(out.VisiblePeers, tkaTrustedPeerV1{ + tkaPeerV1: toTKAPeerV1(vp), + NodeKeySignature: toTKANodeKeySignatureV1(&vp.NodeKeySignature), + }) + } + for _, fp := range status.FilteredPeers { + out.FilteredPeers = append(out.FilteredPeers, toTKAPeerV1(fp)) + } + out.StateID = status.StateID + + return out +} + +// toTKAKeyV1 converts an [ipnstate.TKAKey] to the JSON output returned +// by the CLI. +func ipnTKAKeytoTKAKeyV1(key *ipnstate.TKAKey) tkaKeyV1 { + return tkaKeyV1{ + Kind: key.Kind, + Votes: key.Votes, + Public: key.Key.CLIString(), + Meta: key.Metadata, + } +} + +type tailnetLockStatusV1Base struct { + // Enabled is true if Tailnet Lock is enabled. + Enabled bool + + // PublicKey describes the node's network-lock public key. + PublicKey string `json:"PublicKey,omitzero"` + + // NodeKey describes the node's current node-key. This field is not + // populated if the node is not operating (i.e. waiting for a login). + NodeKey string `json:"NodeKey,omitzero"` +} + +// tailnetLockDisabledStatusV1 is the JSON representation of the Tailnet Lock status +// when Tailnet Lock is disabled. +type tailnetLockDisabledStatusV1 struct { + tailnetLockStatusV1Base +} + +// tailnetLockEnabledStatusV1 is the JSON representation of the Tailnet Lock status. +type tailnetLockEnabledStatusV1 struct { + tailnetLockStatusV1Base + + // Head describes the AUM hash of the leaf AUM. + Head string `json:"Head,omitzero"` + + // NodeKeySigned is true if our node is authorized by Tailnet Lock. + NodeKeySigned bool + + // NodeKeySignature is the current signature of this node's key. + NodeKeySignature *tkaNodeKeySignatureV1 + + // TrustedKeys describes the keys currently trusted to make changes + // to network-lock. + TrustedKeys []tkaKeyV1 + + // VisiblePeers describes peers which are visible in the netmap that + // have valid Tailnet Lock signatures signatures. + VisiblePeers []tkaTrustedPeerV1 + + // FilteredPeers describes peers which were removed from the netmap + // (i.e. no connectivity) because they failed Tailnet Lock + // checks. + FilteredPeers []tkaPeerV1 + + // StateID is a nonce associated with the Tailnet Lock authority, + // generated upon enablement. This field is empty if Tailnet Lock + // is disabled. + StateID uint64 `json:"State,omitzero"` +} + +// tkaPeerV1 is the JSON representation of an [ipnstate.TKAPeer], which describes +// a peer and its Tailnet Lock details. +type tkaPeerV1 struct { + // Stable ID, i.e. [tailcfg.StableNodeID] + ID string + + // DNS name + DNSName string + + // Tailscale IP(s) assigned to this node + TailscaleIPs []string + + // The node's public key + NodeKey string +} + +// tkaPeerV1 is the JSON representation of a trusted [ipnstate.TKAPeer], which +// has a node key signature. +type tkaTrustedPeerV1 struct { + tkaPeerV1 + + // The node's key signature + NodeKeySignature *tkaNodeKeySignatureV1 `json:"NodeKeySignature,omitzero"` +} + +func toTKAPeerV1(peer *ipnstate.TKAPeer) tkaPeerV1 { + out := tkaPeerV1{ + DNSName: peer.Name, + ID: string(peer.StableID), + } + for _, ip := range peer.TailscaleIPs { + out.TailscaleIPs = append(out.TailscaleIPs, ip.String()) + } + out.NodeKey = peer.NodeKey.String() + + return out +} + +// tkaNodeKeySignatureV1 is the JSON representation of a [tka.NodeKeySignature], +// which describes a signature that authorizes a specific node key. +type tkaNodeKeySignatureV1 struct { + // SigKind identifies the variety of signature. + SigKind string + + // PublicKey identifies the key.NodePublic which is being authorized. + // SigCredential signatures do not use this field. + PublicKey string `json:"PublicKey,omitzero"` + + // KeyID identifies which key in the tailnet key authority should + // be used to verify this signature. Only set for SigDirect and + // SigCredential signature kinds. + KeyID string `json:"KeyID,omitzero"` + + // Signature is the packed (R, S) ed25519 signature over all other + // fields of the structure. + Signature string + + // Nested describes a NodeKeySignature which authorizes the node-key + // used as Pubkey. Only used for SigRotation signatures. + Nested *tkaNodeKeySignatureV1 `json:"Nested,omitzero"` + + // WrappingPubkey specifies the ed25519 public key which must be used + // to sign a Signature which embeds this one. + WrappingPublicKey string `json:"WrappingPublicKey,omitzero"` +} + +func toTKANodeKeySignatureV1(sig *tka.NodeKeySignature) *tkaNodeKeySignatureV1 { + out := tkaNodeKeySignatureV1{ + SigKind: sig.SigKind.String(), + } + if len(sig.Pubkey) > 0 { + out.PublicKey = fmt.Sprintf("tlpub:%x", sig.Pubkey) + } + if len(sig.KeyID) > 0 { + out.KeyID = fmt.Sprintf("tlpub:%x", sig.KeyID) + } + out.Signature = base64.URLEncoding.EncodeToString(sig.Signature) + if sig.Nested != nil { + out.Nested = toTKANodeKeySignatureV1(sig.Nested) + } + if len(sig.WrappingPubkey) > 0 { + out.WrappingPublicKey = fmt.Sprintf("tlpub:%x", sig.WrappingPubkey) + } + return &out +} diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 73b1d62016a75..3b374ece2543f 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -195,7 +195,7 @@ func runNetworkLockInit(ctx context.Context, args []string) error { } var nlStatusArgs struct { - json bool + json jsonoutput.JSONSchemaVersion } var nlStatusCmd = &ffcli.Command{ @@ -205,7 +205,7 @@ var nlStatusCmd = &ffcli.Command{ Exec: runNetworkLockStatus, FlagSet: (func() *flag.FlagSet { fs := newFlagSet("lock status") - fs.BoolVar(&nlStatusArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") + fs.Var(&nlStatusArgs.json, "json", "output in JSON format") return fs })(), } @@ -220,10 +220,12 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { return fixTailscaledConnectError(err) } - if nlStatusArgs.json { - enc := jsonv1.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - return enc.Encode(st) + if nlStatusArgs.json.IsSet { + if nlStatusArgs.json.Value == 1 { + return jsonoutput.PrintNetworkLockStatusJSONV1(os.Stdout, st) + } else { + return fmt.Errorf("unrecognised version: %q", nlStatusArgs.json.Value) + } } if st.Enabled { @@ -713,7 +715,7 @@ func runNetworkLockLog(ctx context.Context, args []string) error { func printNetworkLockLog(updates []ipnstate.NetworkLockUpdate, out io.Writer, jsonSchema jsonoutput.JSONSchemaVersion, useColor bool) error { if jsonSchema.IsSet { if jsonSchema.Value == 1 { - return jsonoutput.PrintNetworkLockJSONV1(out, updates) + return jsonoutput.PrintNetworkLockLogJSONV1(out, updates) } else { return fmt.Errorf("unrecognised version: %q", jsonSchema.Value) } diff --git a/cmd/tailscale/cli/network-lock_test.go b/cmd/tailscale/cli/network-lock_test.go index ccd2957ab560e..aa777ff922ba1 100644 --- a/cmd/tailscale/cli/network-lock_test.go +++ b/cmd/tailscale/cli/network-lock_test.go @@ -5,12 +5,16 @@ package cli import ( "bytes" + "net/netip" "testing" "github.com/google/go-cmp/cmp" + "go4.org/mem" "tailscale.com/cmd/tailscale/cli/jsonoutput" "tailscale.com/ipn/ipnstate" + "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/types/key" "tailscale.com/types/tkatype" ) @@ -183,7 +187,6 @@ KeyID: tlpub:0202 t.Run("json-1", func(t *testing.T) { t.Parallel() - t.Logf("BOOM") var outBuf bytes.Buffer json := jsonoutput.JSONSchemaVersion{ @@ -195,10 +198,172 @@ KeyID: tlpub:0202 printNetworkLockLog(updates, &outBuf, json, useColor) want := jsonV1 - t.Logf("%s", outBuf.String()) if diff := cmp.Diff(outBuf.String(), want); diff != "" { t.Fatalf("wrong output (-got, +want):\n%s", diff) } }) } + +func TestNetworkLockStatusOutput(t *testing.T) { + aum := tka.AUM{ + MessageKind: tka.AUMNoOp, + } + h := aum.Hash() + head := [32]byte(h[:]) + + nodeKey1 := key.NodePublicFromRaw32(mem.B(bytes.Repeat([]byte{1}, 32))) + nodeKey2 := key.NodePublicFromRaw32(mem.B(bytes.Repeat([]byte{2}, 32))) + nodeKey3 := key.NodePublicFromRaw32(mem.B(bytes.Repeat([]byte{3}, 32))) + + nlPub := key.NLPublicFromEd25519Unsafe(bytes.Repeat([]byte{4}, 32)) + + trustedNlPub := key.NLPublicFromEd25519Unsafe(bytes.Repeat([]byte{5}, 32)) + + tailnetIPv4_A, tailnetIPv6_A := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") + tailnetIPv4_B, tailnetIPv6_B := netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("fd7a:115c:a1e0::4101:512f") + + t.Run("json-1", func(t *testing.T) { + for _, tt := range []struct { + Name string + Status ipnstate.NetworkLockStatus + Want string + }{ + { + Name: "tailnet-lock-disabled", + Status: ipnstate.NetworkLockStatus{Enabled: false}, + Want: `{ + "SchemaVersion": "1", + "Enabled": false +} +`, + }, + { + Name: "tailnet-lock-disabled-with-keys", + Status: ipnstate.NetworkLockStatus{ + Enabled: false, + NodeKey: &nodeKey1, + PublicKey: trustedNlPub, + }, + Want: `{ + "SchemaVersion": "1", + "Enabled": false, + "PublicKey": "tlpub:0505050505050505050505050505050505050505050505050505050505050505", + "NodeKey": "nodekey:0101010101010101010101010101010101010101010101010101010101010101" +} +`, + }, + { + Name: "tailnet-lock-enabled", + Status: ipnstate.NetworkLockStatus{ + Enabled: true, + Head: &head, + PublicKey: nlPub, + NodeKey: &nodeKey1, + NodeKeySigned: false, + NodeKeySignature: nil, + TrustedKeys: []ipnstate.TKAKey{ + { + Kind: tka.Key25519.String(), + Votes: 1, + Key: trustedNlPub, + Metadata: map[string]string{"en": "one", "de": "eins", "es": "uno"}, + }, + }, + VisiblePeers: []*ipnstate.TKAPeer{ + { + Name: "authentic-associate", + ID: tailcfg.NodeID(1234), + StableID: tailcfg.StableNodeID("1234_AAAA_TEST"), + TailscaleIPs: []netip.Addr{tailnetIPv4_A, tailnetIPv6_A}, + NodeKey: nodeKey2, + NodeKeySignature: tka.NodeKeySignature{ + SigKind: tka.SigDirect, + Pubkey: []byte("22222222222222222222222222222222"), + KeyID: []byte("44444444444444444444444444444444"), + Signature: []byte("1234567890"), + WrappingPubkey: []byte("0987654321"), + }, + }, + }, + FilteredPeers: []*ipnstate.TKAPeer{ + { + Name: "bogus-bandit", + ID: tailcfg.NodeID(5678), + StableID: tailcfg.StableNodeID("5678_BBBB_TEST"), + TailscaleIPs: []netip.Addr{tailnetIPv4_B, tailnetIPv6_B}, + NodeKey: nodeKey3, + }, + }, + StateID: 98989898, + }, + Want: `{ + "SchemaVersion": "1", + "Enabled": true, + "PublicKey": "tlpub:0404040404040404040404040404040404040404040404040404040404040404", + "NodeKey": "nodekey:0101010101010101010101010101010101010101010101010101010101010101", + "Head": "WYIVHDR7JUIXBWAJT5UPSCAILEXB7OMINDFEFEPOPNTUCNXMY2KA", + "NodeKeySigned": false, + "NodeKeySignature": null, + "TrustedKeys": [ + { + "Kind": "25519", + "Votes": 1, + "Public": "tlpub:0505050505050505050505050505050505050505050505050505050505050505", + "Meta": { + "de": "eins", + "en": "one", + "es": "uno" + } + } + ], + "VisiblePeers": [ + { + "ID": "1234_AAAA_TEST", + "DNSName": "authentic-associate", + "TailscaleIPs": [ + "100.99.99.99", + "fd7a:115c:a1e0::701:b62a" + ], + "NodeKey": "nodekey:0202020202020202020202020202020202020202020202020202020202020202", + "NodeKeySignature": { + "SigKind": "direct", + "PublicKey": "tlpub:3232323232323232323232323232323232323232323232323232323232323232", + "KeyID": "tlpub:3434343434343434343434343434343434343434343434343434343434343434", + "Signature": "MTIzNDU2Nzg5MA==", + "WrappingPublicKey": "tlpub:30393837363534333231" + } + } + ], + "FilteredPeers": [ + { + "ID": "5678_BBBB_TEST", + "DNSName": "bogus-bandit", + "TailscaleIPs": [ + "100.88.88.88", + "fd7a:115c:a1e0::4101:512f" + ], + "NodeKey": "nodekey:0303030303030303030303030303030303030303030303030303030303030303" + } + ], + "State": 98989898 +} +`, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + t.Parallel() + + var outBuf bytes.Buffer + err := jsonoutput.PrintNetworkLockStatusJSONV1(&outBuf, &tt.Status) + if err != nil { + t.Fatalf("PrintNetworkLockStatusJSONV1: %v", err) + } + + if diff := cmp.Diff(outBuf.String(), tt.Want); diff != "" { + t.Fatalf("wrong output (-got, +want):\n%s", diff) + } + }) + } + }) +} diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index f25c6fa9b5e36..246b26409b2b5 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -563,6 +563,7 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { outKeys := make([]ipnstate.TKAKey, len(keys)) for i, k := range keys { outKeys[i] = ipnstate.TKAKey{ + Kind: k.Kind.String(), Key: key.NLPublicFromEd25519Unsafe(k.Public), Metadata: k.Meta, Votes: k.Votes, diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index e7ae2d62bd6b2..213090b559692 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -89,6 +89,7 @@ type Status struct { // TKAKey describes a key trusted by network lock. type TKAKey struct { + Kind string Key key.NLPublic Metadata map[string]string Votes uint From dd1bb8ee42f90c4636fa4bc984b81cc6e705cfc9 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 8 Dec 2025 15:45:09 +0000 Subject: [PATCH 0801/1093] .github: add cigocacher release workflow To save rebuilding cigocacher on each CI job, build it on-demand, and publish a release similar to how we publish releases for tool/go to consume. Once the first release is done, we can add a new tool/cigocacher script that pins to a specific release for each branch to download. Updates tailscale/corp#10808 Change-Id: I7694b2c2240020ba2335eb467522cdd029469b6c Signed-off-by: Tom Proctor --- .github/workflows/cigocacher.yml | 73 ++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 .github/workflows/cigocacher.yml diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml new file mode 100644 index 0000000000000..c4dd0c3c509a5 --- /dev/null +++ b/.github/workflows/cigocacher.yml @@ -0,0 +1,73 @@ +name: Build cigocacher + +on: + # Released on-demand. The commit will be used as part of the tag, so generally + # prefer to release from main where the commit is stable in linear history. + workflow_dispatch: + +jobs: + build: + strategy: + matrix: + GOOS: ["linux", "darwin", "windows"] + GOARCH: ["amd64", "arm64"] + runs-on: ubuntu-24.04 + env: + GOOS: "${{ matrix.GOOS }}" + GOARCH: "${{ matrix.GOARCH }}" + CGO_ENABLED: "0" + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - name: Build + run: | + OUT="cigocacher$(./tool/go env GOEXE)" + ./tool/go build -o "${OUT}" ./cmd/cigocacher/ + tar -zcf cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz "${OUT}" + + - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }} + path: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz + + release: + runs-on: ubuntu-24.04 + needs: build + permissions: + contents: write + steps: + - name: Download all artifacts + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + with: + pattern: 'cigocacher-*' + merge-multiple: true + # This step is a simplified version of actions/create-release and + # actions/upload-release-asset, which are archived and unmaintained. + - name: Create release + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + with: + script: | + const fs = require('fs'); + const path = require('path'); + + const { data: release } = await github.rest.repos.createRelease({ + owner: context.repo.owner, + repo: context.repo.repo, + tag_name: `cmd/cigocacher/${{ github.sha }}`, + name: `cigocacher-${{ github.sha }}`, + draft: false, + prerelease: true, + target_commitish: `${{ github.sha }}` + }); + + const files = fs.readdirSync('.').filter(f => f.endsWith('.tar.gz')); + + for (const file of files) { + await github.rest.repos.uploadReleaseAsset({ + owner: context.repo.owner, + repo: context.repo.repo, + release_id: release.id, + name: file, + data: fs.readFileSync(file) + }); + console.log(`Uploaded ${file}`); + } From 076d5c72148f285f68e99ac8fa09a60dd14d88bf Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Mon, 8 Dec 2025 10:47:32 -0800 Subject: [PATCH 0802/1093] appc,feature: add the start of new conn25 app connector When peers request an IP address mapping to be stored, the connector stores it in memory. Fixes tailscale/corp#34251 Signed-off-by: Fran Bull --- appc/conn25.go | 110 ++++++++++++++++ appc/conn25_test.go | 188 +++++++++++++++++++++++++++ cmd/tailscaled/depaware-min.txt | 5 +- cmd/tailscaled/depaware-minbox.txt | 5 +- cmd/tailscaled/depaware.txt | 3 +- feature/condregister/maybe_conn25.go | 8 ++ feature/conn25/conn25.go | 84 ++++++++++++ 7 files changed, 398 insertions(+), 5 deletions(-) create mode 100644 appc/conn25.go create mode 100644 appc/conn25_test.go create mode 100644 feature/condregister/maybe_conn25.go create mode 100644 feature/conn25/conn25.go diff --git a/appc/conn25.go b/appc/conn25.go new file mode 100644 index 0000000000000..b4890c26c0268 --- /dev/null +++ b/appc/conn25.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "net/netip" + "sync" + + "tailscale.com/tailcfg" +) + +// Conn25 holds the developing state for the as yet nascent next generation app connector. +// There is currently (2025-12-08) no actual app connecting functionality. +type Conn25 struct { + mu sync.Mutex + transitIPs map[tailcfg.NodeID]map[netip.Addr]netip.Addr +} + +const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest" + +// HandleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest. +// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID). +// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer. +func (c *Conn25) HandleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse { + resp := ConnectorTransitIPResponse{} + seen := map[netip.Addr]bool{} + for _, each := range ctipr.TransitIPs { + if seen[each.TransitIP] { + resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{ + Code: OtherFailure, + Message: dupeTransitIPMessage, + }) + continue + } + tipresp := c.handleTransitIPRequest(nid, each) + seen[each.TransitIP] = true + resp.TransitIPs = append(resp.TransitIPs, tipresp) + } + return resp +} + +func (c *Conn25) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse { + c.mu.Lock() + defer c.mu.Unlock() + if c.transitIPs == nil { + c.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]netip.Addr) + } + peerMap, ok := c.transitIPs[nid] + if !ok { + peerMap = make(map[netip.Addr]netip.Addr) + c.transitIPs[nid] = peerMap + } + peerMap[tipr.TransitIP] = tipr.DestinationIP + return TransitIPResponse{} +} + +func (c *Conn25) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr { + c.mu.Lock() + defer c.mu.Unlock() + return c.transitIPs[nid][tip] +} + +// TransitIPRequest details a single TransitIP allocation request from a client to a +// connector. +type TransitIPRequest struct { + // TransitIP is the intermediate destination IP that will be received at this + // connector and will be replaced by DestinationIP when performing DNAT. + TransitIP netip.Addr `json:"transitIP,omitzero"` + + // DestinationIP is the final destination IP that connections to the TransitIP + // should be mapped to when performing DNAT. + DestinationIP netip.Addr `json:"destinationIP,omitzero"` +} + +// ConnectorTransitIPRequest is the request body for a PeerAPI request to +// /connector/transit-ip and can include zero or more TransitIP allocation requests. +type ConnectorTransitIPRequest struct { + // TransitIPs is the list of requested mappings. + TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"` +} + +// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status. +type TransitIPResponseCode int + +const ( + // OK indicates that the mapping was created as requested. + OK TransitIPResponseCode = 0 + + // OtherFailure indicates that the mapping failed for a reason that does not have + // another relevant [TransitIPResponsecode]. + OtherFailure TransitIPResponseCode = 1 +) + +// TransitIPResponse is the response to a TransitIPRequest +type TransitIPResponse struct { + // Code is an error code indicating success or failure of the [TransitIPRequest]. + Code TransitIPResponseCode `json:"code,omitzero"` + // Message is an error message explaining what happened, suitable for logging but + // not necessarily suitable for displaying in a UI to non-technical users. It + // should be empty when [Code] is [OK]. + Message string `json:"message,omitzero"` +} + +// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest +type ConnectorTransitIPResponse struct { + // TransitIPs is the list of outcomes for each requested mapping. Elements + // correspond to the order of [ConnectorTransitIPRequest.TransitIPs]. + TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"` +} diff --git a/appc/conn25_test.go b/appc/conn25_test.go new file mode 100644 index 0000000000000..ab6c4be37c592 --- /dev/null +++ b/appc/conn25_test.go @@ -0,0 +1,188 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package appc + +import ( + "net/netip" + "testing" + + "tailscale.com/tailcfg" +) + +// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a +// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a +// ConnectorTransitIPResponse with 0 TransitIPResponses. +func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) { + c := &Conn25{} + req := ConnectorTransitIPRequest{} + nid := tailcfg.NodeID(1) + + resp := c.HandleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 0 { + t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs)) + } +} + +// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a +// request with a transit addr and a destination addr we store that mapping +// and can retrieve it. If sent another req with a different dst for that transit addr +// we store that instead. +func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) { + c := &Conn25{} + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + mr := func(t, d netip.Addr) ConnectorTransitIPRequest { + return ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: t, DestinationIP: d}, + }, + } + } + + resp := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip)) + if len(resp.TransitIPs) != 1 { + t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs)) + } + got := resp.TransitIPs[0].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("TransitIP Code: %d, want 0", got) + } + gotAddr := c.transitIPTarget(nid, tip) + if gotAddr != dip { + t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip) + } + + // mapping can be overwritten + resp2 := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip2)) + if len(resp2.TransitIPs) != 1 { + t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs)) + } + got2 := resp.TransitIPs[0].Code + if got2 != TransitIPResponseCode(0) { + t.Fatalf("TransitIP Code: %d, want 0", got2) + } + gotAddr2 := c.transitIPTarget(nid, tip) + if gotAddr2 != dip2 { + t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2) + } +} + +// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can +// get a req with multiple mappings and we store them all. Including +// multiple transit addrs for the same destination. +func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) { + c := &Conn25{} + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + tip2 := netip.MustParseAddr("0.0.0.2") + tip3 := netip.MustParseAddr("0.0.0.3") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + req := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: tip, DestinationIP: dip}, + {TransitIP: tip2, DestinationIP: dip2}, + // can store same dst addr for multiple transit addrs + {TransitIP: tip3, DestinationIP: dip}, + }, + } + resp := c.HandleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 3 { + t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) + } + + for i := 0; i < 3; i++ { + got := resp.TransitIPs[i].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got) + } + } + gotAddr1 := c.transitIPTarget(nid, tip) + if gotAddr1 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) + } + gotAddr2 := c.transitIPTarget(nid, tip2) + if gotAddr2 != dip2 { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2) + } + gotAddr3 := c.transitIPTarget(nid, tip3) + if gotAddr3 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip) + } +} + +// TestHandleConnectorTransitIPRequestSameTIP tests that if we get +// a req that has more than one TransitIPRequest for the same transit addr +// only the first is stored, and the subsequent ones get an error code and +// message in the response. +func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) { + c := &Conn25{} + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + tip2 := netip.MustParseAddr("0.0.0.2") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + dip3 := netip.MustParseAddr("1.2.3.6") + req := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: tip, DestinationIP: dip}, + // cannot have dupe TransitIPs in one ConnectorTransitIPRequest + {TransitIP: tip, DestinationIP: dip2}, + {TransitIP: tip2, DestinationIP: dip3}, + }, + } + + resp := c.HandleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 3 { + t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) + } + + got := resp.TransitIPs[0].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("i=0 TransitIP Code: %d, want 0", got) + } + msg := resp.TransitIPs[0].Message + if msg != "" { + t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "") + } + got1 := resp.TransitIPs[1].Code + if got1 != TransitIPResponseCode(1) { + t.Fatalf("i=1 TransitIP Code: %d, want 1", got1) + } + msg1 := resp.TransitIPs[1].Message + if msg1 != dupeTransitIPMessage { + t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage) + } + got2 := resp.TransitIPs[2].Code + if got2 != TransitIPResponseCode(0) { + t.Fatalf("i=2 TransitIP Code: %d, want 0", got2) + } + msg2 := resp.TransitIPs[2].Message + if msg2 != "" { + t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "") + } + + gotAddr1 := c.transitIPTarget(nid, tip) + if gotAddr1 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) + } + gotAddr2 := c.transitIPTarget(nid, tip2) + if gotAddr2 != dip3 { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3) + } +} + +// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem. +func TestTransitIPTargetUnknownTIP(t *testing.T) { + c := &Conn25{} + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + got := c.transitIPTarget(nid, tip) + want := netip.Addr{} + if got != want { + t.Fatalf("Unknown transit addr, want: %v, got %v", want, got) + } +} diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 69e6559a0173b..942c962280fbf 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -35,7 +35,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled @@ -58,13 +58,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister + tailscale.com/feature/conn25 from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 55a21c426b5d5..acc4241033411 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -48,7 +48,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -80,6 +80,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ + tailscale.com/feature/conn25 from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ @@ -87,7 +88,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 79f92deb92f38..5a5f0a1b31136 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -243,7 +243,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ @@ -285,6 +285,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister + tailscale.com/feature/conn25 from tailscale.com/feature/condregister tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister tailscale.com/feature/doctor from tailscale.com/feature/condregister tailscale.com/feature/drive from tailscale.com/feature/condregister diff --git a/feature/condregister/maybe_conn25.go b/feature/condregister/maybe_conn25.go new file mode 100644 index 0000000000000..fb885bfe32fc1 --- /dev/null +++ b/feature/condregister/maybe_conn25.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_conn25 + +package condregister + +import _ "tailscale.com/feature/conn25" diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go new file mode 100644 index 0000000000000..e7baca4bd10b7 --- /dev/null +++ b/feature/conn25/conn25.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package conn25 registers the conn25 feature and implements its associated ipnext.Extension. +package conn25 + +import ( + "encoding/json" + "net/http" + + "tailscale.com/appc" + "tailscale.com/feature" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/types/logger" +) + +// featureName is the name of the feature implemented by this package. +// It is also the [extension] name and the log prefix. +const featureName = "conn25" + +func init() { + feature.Register(featureName) + newExtension := func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + e := &extension{ + conn: &appc.Conn25{}, + } + return e, nil + } + ipnext.RegisterExtension(featureName, newExtension) + ipnlocal.RegisterPeerAPIHandler("/v0/connector/transit-ip", handleConnectorTransitIP) +} + +func handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](h.LocalBackend()) + if !ok { + http.Error(w, "miswired", http.StatusInternalServerError) + return + } + e.handleConnectorTransitIP(h, w, r) +} + +// extension is an [ipnext.Extension] managing the connector on platforms +// that import this package. +type extension struct { + conn *appc.Conn25 +} + +// Name implements [ipnext.Extension]. +func (e *extension) Name() string { + return featureName +} + +// Init implements [ipnext.Extension]. +func (e *extension) Init(host ipnext.Host) error { + return nil +} + +// Shutdown implements [ipnlocal.Extension]. +func (e *extension) Shutdown() error { + return nil +} + +func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + const maxBodyBytes = 1024 * 1024 + defer r.Body.Close() + if r.Method != "POST" { + http.Error(w, "Method should be POST", http.StatusMethodNotAllowed) + return + } + var req appc.ConnectorTransitIPRequest + err := json.NewDecoder(http.MaxBytesReader(w, r.Body, maxBodyBytes+1)).Decode(&req) + if err != nil { + http.Error(w, "Error decoding JSON", http.StatusBadRequest) + return + } + resp := e.conn.HandleConnectorTransitIPRequest(h.Peer().ID(), req) + bs, err := json.Marshal(resp) + if err != nil { + http.Error(w, "Error encoding JSON", http.StatusInternalServerError) + return + } + w.Write(bs) +} From 363d882306bf8fe2e6ff72bf928516b756371300 Mon Sep 17 00:00:00 2001 From: Simar Date: Mon, 24 Nov 2025 19:12:02 -0700 Subject: [PATCH 0803/1093] net/udprelay: use `mono.Time` instead of `time.Time` Fixes: https://github.com/tailscale/tailscale/issues/18064 Signed-off-by: Simar --- net/udprelay/server.go | 25 +++++++++++++------------ net/udprelay/server_test.go | 3 ++- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 26b27bb7f5982..cf62e7fbc62df 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -36,6 +36,7 @@ import ( "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/tstime" + "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/nettype" @@ -78,7 +79,7 @@ type Server struct { mu sync.Mutex // guards the following fields macSecrets [][blake2s.Size]byte // [0] is most recent, max 2 elements - macSecretRotatedAt time.Time + macSecretRotatedAt mono.Time derpMap *tailcfg.DERPMap onlyStaticAddrPorts bool // no dynamic addr port discovery when set staticAddrPorts views.Slice[netip.AddrPort] // static ip:port pairs set with [Server.SetStaticAddrPorts] @@ -109,13 +110,13 @@ type serverEndpoint struct { discoSharedSecrets [2]key.DiscoShared inProgressGeneration [2]uint32 // or zero if a handshake has never started, or has just completed boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg - lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time - packetsRx [2]uint64 // num packets received from/sent by each client after they are bound - bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound + lastSeen [2]mono.Time + packetsRx [2]uint64 // num packets received from/sent by each client after they are bound + bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound lamportID uint64 vni uint32 - allocatedAt time.Time + allocatedAt mono.Time } func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg disco.BindUDPRelayEndpointCommon) ([blake2s.Size]byte, error) { @@ -216,7 +217,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if bytes.Equal(mac[:], discoMsg.Challenge[:]) { // Handshake complete. Update the binding for this sender. e.boundAddrPorts[senderIndex] = from - e.lastSeen[senderIndex] = time.Now() // record last seen as bound time + e.lastSeen[senderIndex] = mono.Now() // record last seen as bound time e.inProgressGeneration[senderIndex] = 0 // reset to zero, which indicates there is no in-progress handshake return nil, netip.AddrPort{} } @@ -263,7 +264,7 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets) } -func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now time.Time) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mono.Time) (write []byte, to netip.AddrPort) { if !e.isBound() { // not a control packet, but serverEndpoint isn't bound return nil, netip.AddrPort{} @@ -285,7 +286,7 @@ func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now tim } } -func (e *serverEndpoint) isExpired(now time.Time, bindLifetime, steadyStateLifetime time.Duration) bool { +func (e *serverEndpoint) isExpired(now mono.Time, bindLifetime, steadyStateLifetime time.Duration) bool { if !e.isBound() { if now.Sub(e.allocatedAt) > bindLifetime { return true @@ -653,7 +654,7 @@ func (s *Server) endpointGCLoop() { defer ticker.Stop() gc := func() { - now := time.Now() + now := mono.Now() // TODO: consider performance implications of scanning all endpoints and // holding s.mu for the duration. Keep it simple (and slow) for now. s.mu.Lock() @@ -700,7 +701,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n return nil, netip.AddrPort{} } - now := time.Now() + now := mono.Now() if gh.Control { if gh.Protocol != packet.GeneveProtocolDisco { // control packet, but not Disco @@ -713,7 +714,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n return e.handleDataPacket(from, b, now) } -func (s *Server) maybeRotateMACSecretLocked(now time.Time) { +func (s *Server) maybeRotateMACSecretLocked(now mono.Time) { if !s.macSecretRotatedAt.IsZero() && now.Sub(s.macSecretRotatedAt) < macSecretRotationInterval { return } @@ -908,7 +909,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv e = &serverEndpoint{ discoPubKeys: pair, lamportID: s.lamportID, - allocatedAt: time.Now(), + allocatedAt: mono.Now(), vni: vni, } e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 582d4cf671918..bc76801079edc 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -18,6 +18,7 @@ import ( "golang.org/x/crypto/blake2s" "tailscale.com/disco" "tailscale.com/net/packet" + "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/views" ) @@ -452,7 +453,7 @@ func Benchmark_blakeMACFromBindMsg(b *testing.B) { func TestServer_maybeRotateMACSecretLocked(t *testing.T) { s := &Server{} - start := time.Now() + start := mono.Now() s.maybeRotateMACSecretLocked(start) qt.Assert(t, len(s.macSecrets), qt.Equals, 1) macSecret := s.macSecrets[0] From a9b37c510ce46dce9ba8459dd30b6cbef0f23e17 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Tue, 9 Dec 2025 11:25:34 -0800 Subject: [PATCH 0804/1093] net/udprelay: re-use mono.Time in control packet handling Fixes tailscale/corp#35100 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index cf62e7fbc62df..d595787805aba 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -141,7 +141,7 @@ func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg di return out, nil } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte, now mono.Time) (write []byte, to netip.AddrPort) { if senderIndex != 0 && senderIndex != 1 { return nil, netip.AddrPort{} } @@ -217,7 +217,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if bytes.Equal(mac[:], discoMsg.Challenge[:]) { // Handshake complete. Update the binding for this sender. e.boundAddrPorts[senderIndex] = from - e.lastSeen[senderIndex] = mono.Now() // record last seen as bound time + e.lastSeen[senderIndex] = now // record last seen as bound time e.inProgressGeneration[senderIndex] = 0 // reset to zero, which indicates there is no in-progress handshake return nil, netip.AddrPort{} } @@ -230,7 +230,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte, now mono.Time) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message @@ -261,7 +261,7 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by return nil, netip.AddrPort{} } - return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets) + return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets, now) } func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mono.Time) (write []byte, to netip.AddrPort) { @@ -709,7 +709,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n } msg := b[packet.GeneveFixedHeaderLength:] s.maybeRotateMACSecretLocked(now) - return e.handleSealedDiscoControlMsg(from, msg, s.discoPublic, s.macSecrets) + return e.handleSealedDiscoControlMsg(from, msg, s.discoPublic, s.macSecrets, now) } return e.handleDataPacket(from, b, now) } From 1dfdee8521e93cd20eda65254728c4230a216a2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 9 Dec 2025 14:55:26 -0500 Subject: [PATCH 0805/1093] net/dns: retrample resolve.conf when another process has trampled it (#18069) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When using the resolve.conf file for setting DNS, it is possible that some other services will trample the file and overwrite our set DNS server. Experiments has shown this to be a racy error depending on how quickly processes start. Make an attempt to trample back the file a limited number of times if the file is changed. Updates #16635 Signed-off-by: Claus Lensbøl --- cmd/tailscaled/tailscaled.go | 4 +- net/dns/direct.go | 38 ++++++++++- net/dns/direct_linux_test.go | 109 ++++++++++++++++++++++++++++++++ net/dns/manager.go | 49 +++++++++++--- net/dns/manager_darwin.go | 5 +- net/dns/manager_default.go | 3 +- net/dns/manager_freebsd.go | 11 ++-- net/dns/manager_linux.go | 7 +- net/dns/manager_openbsd.go | 11 ++-- net/dns/manager_plan9.go | 3 +- net/dns/manager_solaris.go | 5 +- net/dns/manager_tcp_test.go | 4 +- net/dns/manager_test.go | 42 +++++++++++- net/dns/manager_windows.go | 7 +- net/dns/manager_windows_test.go | 4 +- net/dns/wsl_windows.go | 2 +- wgengine/userspace.go | 2 +- 17 files changed, 261 insertions(+), 45 deletions(-) create mode 100644 net/dns/direct_linux_test.go diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index d9afffbdbd710..5c8611c8e41d1 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -772,7 +772,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.Bus.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -806,7 +806,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.Bus.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() diff --git a/net/dns/direct.go b/net/dns/direct.go index 59eb0696498e8..78495d4737d1d 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -21,6 +21,7 @@ import ( "slices" "strings" "sync" + "sync/atomic" "time" "tailscale.com/feature" @@ -29,6 +30,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/version/distro" ) @@ -135,6 +137,11 @@ type directManager struct { // but is better than having non-functioning DNS. renameBroken bool + trampleCount atomic.Int64 + trampleTimer *time.Timer + eventClient *eventbus.Client + trampleDNSPub *eventbus.Publisher[TrampleDNS] + ctx context.Context // valid until Close ctxClose context.CancelFunc // closes ctx @@ -145,11 +152,13 @@ type directManager struct { } //lint:ignore U1000 used in manager_{freebsd,openbsd}.go -func newDirectManager(logf logger.Logf, health *health.Tracker) *directManager { - return newDirectManagerOnFS(logf, health, directFS{}) +func newDirectManager(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus) *directManager { + return newDirectManagerOnFS(logf, health, bus, directFS{}) } -func newDirectManagerOnFS(logf logger.Logf, health *health.Tracker, fs wholeFileFS) *directManager { +var trampleWatchDuration = 5 * time.Second + +func newDirectManagerOnFS(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, fs wholeFileFS) *directManager { ctx, cancel := context.WithCancel(context.Background()) m := &directManager{ logf: logf, @@ -158,6 +167,13 @@ func newDirectManagerOnFS(logf logger.Logf, health *health.Tracker, fs wholeFile ctx: ctx, ctxClose: cancel, } + if bus != nil { + m.eventClient = bus.Client("dns.directManager") + m.trampleDNSPub = eventbus.Publish[TrampleDNS](m.eventClient) + } + m.trampleTimer = time.AfterFunc(trampleWatchDuration, func() { + m.trampleCount.Store(0) + }) go m.runFileWatcher() return m } @@ -481,10 +497,26 @@ func (m *directManager) checkForFileTrample() { } m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) m.health.SetUnhealthy(resolvTrampleWarnable, nil) + if m.trampleDNSPub != nil { + n := m.trampleCount.Add(1) + + if n < 10 { + m.trampleDNSPub.Publish(TrampleDNS{ + LastTrample: time.Now(), + TramplesInTimeout: n, + }) + m.trampleTimer.Reset(trampleWatchDuration) + } else { + m.logf("trample: resolv.conf overwritten %d times, no longer attempting to replace it.", n) + } + } } func (m *directManager) Close() error { m.ctxClose() + if m.eventClient != nil { + m.eventClient.Close() + } // We used to keep a file for the tailscale config and symlinked // to it, but then we stopped because /etc/resolv.conf being a diff --git a/net/dns/direct_linux_test.go b/net/dns/direct_linux_test.go new file mode 100644 index 0000000000000..035763a45f30d --- /dev/null +++ b/net/dns/direct_linux_test.go @@ -0,0 +1,109 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package dns + +import ( + "context" + "fmt" + "net/netip" + "os" + "path/filepath" + "testing" + "testing/synctest" + + "github.com/illarion/gonotify/v3" + + "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" +) + +func TestDNSTrampleRecovery(t *testing.T) { + HookWatchFile.Set(watchFile) + synctest.Test(t, func(t *testing.T) { + tmp := t.TempDir() + if err := os.MkdirAll(filepath.Join(tmp, "etc"), 0700); err != nil { + t.Fatal(err) + } + const resolvPath = "/etc/resolv.conf" + fs := directFS{prefix: tmp} + readFile := func(t *testing.T, path string) string { + t.Helper() + b, err := fs.ReadFile(path) + if err != nil { + t.Errorf("Reading DNS config: %v", err) + } + return string(b) + } + + bus := eventbustest.NewBus(t) + eventbustest.LogAllEvents(t, bus) + m := newDirectManagerOnFS(t.Logf, nil, bus, fs) + defer m.Close() + + if err := m.SetDNS(OSConfig{ + Nameservers: []netip.Addr{netip.MustParseAddr("8.8.8.8"), netip.MustParseAddr("8.8.4.4")}, + SearchDomains: []dnsname.FQDN{"ts.net.", "ts-dns.test."}, + MatchDomains: []dnsname.FQDN{"ignored."}, + }); err != nil { + t.Fatal(err) + } + + const want = `# resolv.conf(5) file generated by tailscale +# For more info, see https://tailscale.com/s/resolvconf-overwrite +# DO NOT EDIT THIS FILE BY HAND -- CHANGES WILL BE OVERWRITTEN + +nameserver 8.8.8.8 +nameserver 8.8.4.4 +search ts.net ts-dns.test +` + if got := readFile(t, resolvPath); got != want { + t.Fatalf("resolv.conf:\n%s, want:\n%s", got, want) + } + + tw := eventbustest.NewWatcher(t, bus) + + const trample = "Hvem er det som tramper på min bro?" + if err := fs.WriteFile(resolvPath, []byte(trample), 0644); err != nil { + t.Fatal(err) + } + synctest.Wait() + + if err := eventbustest.Expect(tw, eventbustest.Type[TrampleDNS]()); err != nil { + t.Errorf("did not see trample event: %s", err) + } + }) +} + +// watchFile is generally copied from linuxtrample, but cancels the context +// after the first call to cb() after the first trample to end the test. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + const events = gonotify.IN_ATTRIB | + gonotify.IN_CLOSE_WRITE | + gonotify.IN_CREATE | + gonotify.IN_DELETE | + gonotify.IN_MODIFY | + gonotify.IN_MOVE + + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) + } + + for { + select { + case event := <-watcher.C: + if event.Name == filename { + cb() + cancel() + } + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/net/dns/manager.go b/net/dns/manager.go index de99fe646f786..4441c4f69ef70 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -55,6 +55,8 @@ type Manager struct { logf logger.Logf health *health.Tracker + eventClient *eventbus.Client + activeQueriesAtomic int32 ctx context.Context // good until Down @@ -69,10 +71,10 @@ type Manager struct { config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called. } -// NewManagers created a new manager from the given config. +// NewManager created a new manager from the given config. // // knobs may be nil. -func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string) *Manager { +func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string, bus *eventbus.Bus) *Manager { if !buildfeatures.HasDNS { return nil } @@ -96,6 +98,20 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, goos: goos, } + m.eventClient = bus.Client("dns.Manager") + eventbus.SubscribeFunc(m.eventClient, func(trample TrampleDNS) { + m.mu.Lock() + defer m.mu.Unlock() + if m.config == nil { + m.logf("resolve.conf was trampled, but there is no DNS config") + return + } + m.logf("resolve.conf was trampled, setting existing config again") + if err := m.setLocked(*m.config); err != nil { + m.logf("error setting DNS config: %s", err) + } + }) + m.ctx, m.ctxCancel = context.WithCancel(context.Background()) m.logf("using %T", m.os) return m @@ -178,9 +194,7 @@ func (m *Manager) setLocked(cfg Config) error { m.config = nil return err } - if err := m.os.SetDNS(ocfg); err != nil { - m.config = nil - m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) + if err := m.setDNSLocked(ocfg); err != nil { return err } @@ -190,6 +204,15 @@ func (m *Manager) setLocked(cfg Config) error { return nil } +func (m *Manager) setDNSLocked(ocfg OSConfig) error { + if err := m.os.SetDNS(ocfg); err != nil { + m.config = nil + m.health.SetUnhealthy(osConfigurationSetWarnable, health.Args{health.ArgError: err.Error()}) + return err + } + return nil +} + // compileHostEntries creates a list of single-label resolutions possible // from the configured hosts and search domains. // The entries are compiled in the order of the search domains, then the hosts. @@ -457,6 +480,13 @@ const ( maxReqSizeTCP = 4096 ) +// TrampleDNS is an an event indicating we detected that DNS config was +// overwritten by another process. +type TrampleDNS struct { + LastTrample time.Time + TramplesInTimeout int64 +} + // dnsTCPSession services DNS requests sent over TCP. type dnsTCPSession struct { m *Manager @@ -585,6 +615,7 @@ func (m *Manager) Down() error { if err := m.os.Close(); err != nil { return err } + m.eventClient.Close() m.resolver.Close() return nil } @@ -605,7 +636,7 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health if !buildfeatures.HasDNS { return } - oscfg, err := NewOSConfigurator(logf, health, policyclient.Get(), nil, interfaceName) + oscfg, err := NewOSConfigurator(logf, health, bus, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) return @@ -613,12 +644,10 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health d := &tsdial.Dialer{Logf: logf} d.SetNetMon(netMon) d.SetBus(bus) - dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS) + dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS, bus) if err := dns.Down(); err != nil { logf("dns down: %v", err) } } -var ( - metricDNSQueryErrorQueue = clientmetric.NewCounter("dns_query_local_error_queue") -) +var metricDNSQueryErrorQueue = clientmetric.NewCounter("dns_query_local_error_queue") diff --git a/net/dns/manager_darwin.go b/net/dns/manager_darwin.go index d73ad71a829a5..01c920626e466 100644 --- a/net/dns/manager_darwin.go +++ b/net/dns/manager_darwin.go @@ -13,14 +13,15 @@ import ( "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // -// The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { +// The health tracker, bus and the knobs may be nil and are ignored on this platform. +func NewOSConfigurator(logf logger.Logf, _ *health.Tracker, _ *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, ifName string) (OSConfigurator, error) { return &darwinConfigurator{logf: logf, ifName: ifName}, nil } diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index 1a86690c5d829..42e7d295d713f 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -9,12 +9,13 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker and the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logger.Logf, *health.Tracker, policyclient.Client, *controlknobs.Knobs, string) (OSConfigurator, error) { +func NewOSConfigurator(logger.Logf, *health.Tracker, *eventbus.Bus, policyclient.Client, *controlknobs.Knobs, string) (OSConfigurator, error) { return NewNoopManager() } diff --git a/net/dns/manager_freebsd.go b/net/dns/manager_freebsd.go index 3237fb382fbd3..da3a821ce3cc4 100644 --- a/net/dns/manager_freebsd.go +++ b/net/dns/manager_freebsd.go @@ -10,16 +10,17 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/policyclient" ) // NewOSConfigurator creates a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, _ string) (OSConfigurator, error) { bs, err := os.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } if err != nil { return nil, fmt.Errorf("reading /etc/resolv.conf: %w", err) @@ -29,16 +30,16 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. case "resolvconf": switch resolvconfStyle() { case "": - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil case "debian": return newDebianResolvconfManager(logf) case "openresolv": return newOpenresolvManager(logf) default: logf("[unexpected] got unknown flavor of resolvconf %q, falling back to direct manager", resolvconfStyle()) - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } default: - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } } diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 4304df2616e98..4fbf6a8dbffa2 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -21,6 +21,7 @@ import ( "tailscale.com/net/netaddr" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -63,7 +64,7 @@ var ( // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { if !buildfeatures.HasDNS || distro.Get() == distro.JetKVM { return NewNoopManager() } @@ -100,7 +101,7 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. logf("dns: using %q mode", mode) switch mode { case "direct": - return newDirectManagerOnFS(logf, health, env.fs), nil + return newDirectManagerOnFS(logf, health, bus, env.fs), nil case "systemd-resolved": if f, ok := optNewResolvedManager.GetOk(); ok { return f(logf, health, interfaceName) @@ -119,7 +120,7 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. logf("[unexpected] detected unknown DNS mode %q, using direct manager as last resort", mode) } - return newDirectManagerOnFS(logf, health, env.fs), nil + return newDirectManagerOnFS(logf, health, bus, env.fs), nil } // newOSConfigEnv are the funcs newOSConfigurator needs, pulled out for testing. diff --git a/net/dns/manager_openbsd.go b/net/dns/manager_openbsd.go index 6168a9e0818cd..766c82f981218 100644 --- a/net/dns/manager_openbsd.go +++ b/net/dns/manager_openbsd.go @@ -11,6 +11,7 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/policyclient" ) @@ -25,8 +26,8 @@ func (kv kv) String() string { // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { - return newOSConfigurator(logf, health, interfaceName, +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { + return newOSConfigurator(logf, health, bus, interfaceName, newOSConfigEnv{ rcIsResolvd: rcIsResolvd, fs: directFS{}, @@ -39,7 +40,7 @@ type newOSConfigEnv struct { rcIsResolvd func(resolvConfContents []byte) bool } -func newOSConfigurator(logf logger.Logf, health *health.Tracker, interfaceName string, env newOSConfigEnv) (ret OSConfigurator, err error) { +func newOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, interfaceName string, env newOSConfigEnv) (ret OSConfigurator, err error) { var debug []kv dbg := func(k, v string) { debug = append(debug, kv{k, v}) @@ -54,7 +55,7 @@ func newOSConfigurator(logf logger.Logf, health *health.Tracker, interfaceName s bs, err := env.fs.ReadFile(resolvConf) if os.IsNotExist(err) { dbg("rc", "missing") - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } if err != nil { return nil, fmt.Errorf("reading /etc/resolv.conf: %w", err) @@ -66,7 +67,7 @@ func newOSConfigurator(logf logger.Logf, health *health.Tracker, interfaceName s } dbg("resolvd", "missing") - return newDirectManager(logf, health), nil + return newDirectManager(logf, health, bus), nil } func rcIsResolvd(resolvConfContents []byte) bool { diff --git a/net/dns/manager_plan9.go b/net/dns/manager_plan9.go index ef1ceea17787a..47c996dad7cda 100644 --- a/net/dns/manager_plan9.go +++ b/net/dns/manager_plan9.go @@ -20,11 +20,12 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/set" "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, _ policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +func NewOSConfigurator(logf logger.Logf, ht *health.Tracker, _ *eventbus.Bus, _ policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { return &plan9DNSManager{ logf: logf, ht: ht, diff --git a/net/dns/manager_solaris.go b/net/dns/manager_solaris.go index de7e72bb52436..dcd8b1fd3951c 100644 --- a/net/dns/manager_solaris.go +++ b/net/dns/manager_solaris.go @@ -7,9 +7,10 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/policyclient" ) -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { - return newDirectManager(logf, health), nil +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, _ policyclient.Client, _ *controlknobs.Knobs, iface string) (OSConfigurator, error) { + return newDirectManager(logf, health, bus), nil } diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index dcdc88c7a22bf..420efe40405df 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -93,7 +93,7 @@ func TestDNSOverTCP(t *testing.T) { bus := eventbustest.NewBus(t) dialer := tsdial.NewDialer(netmon.NewStatic()) dialer.SetBus(bus) - m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, nil, "") + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, nil, "", bus) m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -181,7 +181,7 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { bus := eventbustest.NewBus(t) dialer := tsdial.NewDialer(netmon.NewStatic()) dialer.SetBus(bus) - m := NewManager(log, &f, health.NewTracker(bus), dialer, nil, nil, "") + m := NewManager(log, &f, health.NewTracker(bus), dialer, nil, nil, "", bus) m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 92b660007cdd2..18c88df9125c3 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -6,9 +6,11 @@ package dns import ( "errors" "net/netip" + "reflect" "runtime" "strings" "testing" + "testing/synctest" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -936,7 +938,7 @@ func TestManager(t *testing.T) { bus := eventbustest.NewBus(t) dialer := tsdial.NewDialer(netmon.NewStatic()) dialer.SetBus(bus) - m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, knobs, goos) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, knobs, goos, bus) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1045,7 +1047,7 @@ func TestConfigRecompilation(t *testing.T) { bus := eventbustest.NewBus(t) dialer := tsdial.NewDialer(netmon.NewStatic()) dialer.SetBus(bus) - m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "darwin") + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "darwin", bus) var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { @@ -1078,3 +1080,39 @@ func TestConfigRecompilation(t *testing.T) { t.Fatalf("Want non nil managerConfig. Got nil") } } + +func TestTrampleRetrample(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + f := &fakeOSConfigurator{} + f.BaseConfig = OSConfig{ + Nameservers: mustIPs("1.1.1.1"), + } + + config := Config{ + Routes: upstreams("ts.net", "69.4.2.0", "foo.ts.net", ""), + SearchDomains: fqdns("foo.ts.net"), + } + + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "linux", bus) + + // Initial set should error out and store the config + if err := m.Set(config); err != nil { + t.Fatalf("Want nil error. Got non-nil") + } + + // Set no config + f.OSConfig = OSConfig{} + + inj := eventbustest.NewInjector(t, bus) + eventbustest.Inject(inj, TrampleDNS{}) + synctest.Wait() + + t.Logf("OSConfig: %+v", f.OSConfig) + if reflect.DeepEqual(f.OSConfig, OSConfig{}) { + t.Errorf("Expected config to be set, got empty config") + } + }) +} diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 5ccadbab2d9ad..1eccb9a16ff1d 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -29,6 +29,7 @@ import ( "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" @@ -57,8 +58,8 @@ type windowsManager struct { // NewOSConfigurator created a new OS configurator. // -// The health tracker and the knobs may be nil. -func NewOSConfigurator(logf logger.Logf, health *health.Tracker, polc policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { +// The health tracker, eventbus and the knobs may be nil. +func NewOSConfigurator(logf logger.Logf, health *health.Tracker, bus *eventbus.Bus, polc policyclient.Client, knobs *controlknobs.Knobs, interfaceName string) (OSConfigurator, error) { if polc == nil { panic("nil policyclient.Client") } @@ -163,7 +164,7 @@ func setTailscaleHosts(logf logger.Logf, prevHostsFile []byte, hosts []*HostEntr header = "# TailscaleHostsSectionStart" footer = "# TailscaleHostsSectionEnd" ) - var comments = []string{ + comments := []string{ "# This section contains MagicDNS entries for Tailscale.", "# Do not edit this section manually.", } diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index aa538a0f66dcb..5525096b35c55 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -134,7 +134,7 @@ func TestManagerWindowsGPCopy(t *testing.T) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } @@ -263,7 +263,7 @@ func runTest(t *testing.T, isLocal bool) { } defer delIfKey() - cfg, err := NewOSConfigurator(logf, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) + cfg, err := NewOSConfigurator(logf, nil, nil, policyclient.NoPolicyClient{}, nil, fakeInterface.String()) if err != nil { t.Fatalf("NewOSConfigurator: %v\n", err) } diff --git a/net/dns/wsl_windows.go b/net/dns/wsl_windows.go index 8b0780f55e17c..81e8593160c02 100644 --- a/net/dns/wsl_windows.go +++ b/net/dns/wsl_windows.go @@ -76,7 +76,7 @@ func (wm *wslManager) SetDNS(cfg OSConfig) error { } managers := make(map[string]*directManager) for _, distro := range distros { - managers[distro] = newDirectManagerOnFS(wm.logf, wm.health, wslFS{ + managers[distro] = newDirectManagerOnFS(wm.logf, wm.health, nil, wslFS{ user: "root", distro: distro, }) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 1b8562d3ffe55..3db329a37e2a2 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -387,7 +387,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) conf.Dialer.SetTUNName(tunName) conf.Dialer.SetNetMon(e.netMon) conf.Dialer.SetBus(e.eventBus) - e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS) + e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS, e.eventBus) // TODO: there's probably a better place for this sockstats.SetNetMon(e.netMon) From 8eda947530cebbe3dc7882ace4d9f2829b0448da Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Wed, 10 Dec 2025 04:51:53 +0530 Subject: [PATCH 0806/1093] cmd/derper: add GCP Certificate Manager support (#18161) Add --certmode=gcp for using Google Cloud Certificate Manager's public CA instead of Let's Encrypt. GCP requires External Account Binding (EAB) credentials for ACME registration, so this adds --acme-eab-kid and --acme-eab-key flags. The EAB key accepts both base64url and standard base64 encoding to support both ACME spec format and gcloud output. Fixes tailscale/corp#34881 Signed-off-by: Raj Singh Co-authored-by: Brad Fitzpatrick --- cmd/derper/cert.go | 36 ++++++++++++++++++++++++++++++++++-- cmd/derper/cert_test.go | 36 +++++++++++++++++++++++++++++++++++- cmd/derper/depaware.txt | 2 +- cmd/derper/derper.go | 10 ++++++---- 4 files changed, 76 insertions(+), 8 deletions(-) diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index b95755c64d2a7..d383c82f01157 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -11,6 +11,7 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "encoding/base64" "encoding/json" "encoding/pem" "errors" @@ -24,6 +25,7 @@ import ( "regexp" "time" + "golang.org/x/crypto/acme" "golang.org/x/crypto/acme/autocert" "tailscale.com/tailcfg" ) @@ -42,17 +44,33 @@ type certProvider interface { HTTPHandler(fallback http.Handler) http.Handler } -func certProviderByCertMode(mode, dir, hostname string) (certProvider, error) { +func certProviderByCertMode(mode, dir, hostname, eabKID, eabKey string) (certProvider, error) { if dir == "" { return nil, errors.New("missing required --certdir flag") } switch mode { - case "letsencrypt": + case "letsencrypt", "gcp": certManager := &autocert.Manager{ Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist(hostname), Cache: autocert.DirCache(dir), } + if mode == "gcp" { + if eabKID == "" || eabKey == "" { + return nil, errors.New("--certmode=gcp requires --acme-eab-kid and --acme-eab-key flags") + } + keyBytes, err := decodeEABKey(eabKey) + if err != nil { + return nil, err + } + certManager.Client = &acme.Client{ + DirectoryURL: "https://dv.acme-v02.api.pki.goog/directory", + } + certManager.ExternalAccountBinding = &acme.ExternalAccountBinding{ + KID: eabKID, + Key: keyBytes, + } + } if hostname == "derp.tailscale.com" { certManager.HostPolicy = prodAutocertHostPolicy certManager.Email = "security@tailscale.com" @@ -209,3 +227,17 @@ func createSelfSignedIPCert(crtPath, keyPath, ipStr string) (*tls.Certificate, e } return &tlsCert, nil } + +// decodeEABKey decodes a base64-encoded EAB key. +// It accepts both standard base64 (with padding) and base64url (without padding). +func decodeEABKey(s string) ([]byte, error) { + // Try base64url first (no padding), then standard base64 (with padding). + // This handles both ACME spec format and gcloud output format. + if b, err := base64.RawURLEncoding.DecodeString(s); err == nil { + return b, nil + } + if b, err := base64.StdEncoding.DecodeString(s); err == nil { + return b, nil + } + return nil, errors.New("invalid base64 encoding for EAB key") +} diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index c8a3229e9f41c..3a8da46108428 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -91,7 +91,7 @@ func TestCertIP(t *testing.T) { t.Fatalf("Error closing key.pem: %v", err) } - cp, err := certProviderByCertMode("manual", dir, hostname) + cp, err := certProviderByCertMode("manual", dir, hostname, "", "") if err != nil { t.Fatal(err) } @@ -169,3 +169,37 @@ func TestPinnedCertRawIP(t *testing.T) { } defer connClose.Close() } + +func TestGCPCertMode(t *testing.T) { + dir := t.TempDir() + + // Missing EAB credentials + _, err := certProviderByCertMode("gcp", dir, "test.example.com", "", "") + if err == nil { + t.Fatal("expected error when EAB credentials are missing") + } + + // Invalid base64 + _, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "not-valid!") + if err == nil { + t.Fatal("expected error for invalid base64") + } + + // Valid base64url (no padding) + cp, err := certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk") + if err != nil { + t.Fatalf("base64url: %v", err) + } + if cp == nil { + t.Fatal("base64url: nil certProvider") + } + + // Valid standard base64 (with padding, gcloud format) + cp, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk=") + if err != nil { + t.Fatalf("base64: %v", err) + } + if cp == nil { + t.Fatal("base64: nil certProvider") + } +} diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 11a6318c30061..b2465d28de13a 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -171,7 +171,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/version from tailscale.com/cmd/derper+ tailscale.com/version/distro from tailscale.com/envknob+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap - golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert + golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert+ golang.org/x/crypto/acme/autocert from tailscale.com/cmd/derper golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index f177986a59f91..aeb2adb5dc61d 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -60,9 +60,11 @@ var ( httpPort = flag.Int("http-port", 80, "The port on which to serve HTTP. Set to -1 to disable. The listener is bound to the same IP (if any) as specified in the -a flag.") stunPort = flag.Int("stun-port", 3478, "The UDP port on which to serve STUN. The listener is bound to the same IP (if any) as specified in the -a flag.") configPath = flag.String("c", "", "config file path") - certMode = flag.String("certmode", "letsencrypt", "mode for getting a cert. possible options: manual, letsencrypt") - certDir = flag.String("certdir", tsweb.DefaultCertDir("derper-certs"), "directory to store LetsEncrypt certs, if addr's port is :443") - hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") + certMode = flag.String("certmode", "letsencrypt", "mode for getting a cert. possible options: manual, letsencrypt, gcp") + certDir = flag.String("certdir", tsweb.DefaultCertDir("derper-certs"), "directory to store ACME (e.g. LetsEncrypt) certs, if addr's port is :443") + hostname = flag.String("hostname", "derp.tailscale.com", "TLS host name for certs, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") + acmeEABKid = flag.String("acme-eab-kid", "", "ACME External Account Binding (EAB) Key ID (required for --certmode=gcp)") + acmeEABKey = flag.String("acme-eab-key", "", "ACME External Account Binding (EAB) HMAC key, base64-encoded (required for --certmode=gcp)") runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.") runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") flagHome = flag.String("home", "", "what to serve at the root path. It may be left empty (the default, for a default homepage), \"blank\" for a blank page, or a URL to redirect to") @@ -343,7 +345,7 @@ func main() { if serveTLS { log.Printf("derper: serving on %s with TLS", *addr) var certManager certProvider - certManager, err = certProviderByCertMode(*certMode, *certDir, *hostname) + certManager, err = certProviderByCertMode(*certMode, *certDir, *hostname, *acmeEABKid, *acmeEABKey) if err != nil { log.Fatalf("derper: can not start cert provider: %v", err) } From 723b9af21a17d3af38bb66c7ad5e3548fd590142 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 10 Dec 2025 18:57:51 +0000 Subject: [PATCH 0807/1093] Dockerfile,Dockerfile.base: link iptables to legacy binary (#18177) Re-instate the linking of iptables installed in Tailscale container to the legacy iptables version. In environments where the legacy iptables is not needed, we should be able to run nftables instead, but this will ensure that Tailscale keeps working in environments that don't support nftables, such as some Synology NAS hosts. Updates #17854 Signed-off-by: Irbe Krumina --- Dockerfile | 4 ++-- Dockerfile.base | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index c546cf6574abd..68e7caa3edcb2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -73,8 +73,8 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables -RUN ln -s /sbin/iptables-legacy /sbin/iptables -RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables +RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ # For compat with the previous run.sh, although ideally you should be diff --git a/Dockerfile.base b/Dockerfile.base index 6c3c8ed084fce..bd68e1572259e 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -8,5 +8,5 @@ RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tabl # suppport nftables, so link back to legacy for backwards compatibility reasons. # TODO(irbekrm): add some way how to determine if we still run on nodes that # don't support nftables, so that we can eventually remove these symlinks. -RUN ln -s /sbin/iptables-legacy /sbin/iptables -RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables +RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables From c870d3811da20736bbbecaca56b9266e9a43d575 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Wed, 10 Dec 2025 14:27:20 -0500 Subject: [PATCH 0808/1093] net/{packet,tstun},wgengine: update disco key when receiving via TSMP (#18158) When receiving a TSMPDiscoAdvertisement from peer, update the discokey for said peer. Some parts taken from: https://github.com/tailscale/tailscale/pull/18073/ Updates #12639 Co-authored-by: James Tucker --- net/packet/tsmp.go | 5 ++-- net/tstun/wrap.go | 4 ++- net/tstun/wrap_test.go | 2 +- wgengine/magicsock/magicsock.go | 42 ++++++++++++++++++++++++++ wgengine/magicsock/magicsock_test.go | 45 ++++++++++++++++++++++++++++ wgengine/userspace.go | 17 +++++++++++ 6 files changed, 111 insertions(+), 4 deletions(-) diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index 8fad1d5037468..9881299b7d13e 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -271,7 +271,7 @@ func (h TSMPPongReply) Marshal(buf []byte) error { // - 'a' (TSMPTypeDiscoAdvertisement) // - 32 disco key bytes type TSMPDiscoKeyAdvertisement struct { - Src, Dst netip.Addr + Src, Dst netip.Addr // Src and Dst are set from the parent IP Header when parsing. Key key.DiscoPublic } @@ -298,7 +298,7 @@ func (ka *TSMPDiscoKeyAdvertisement) Marshal() ([]byte, error) { return []byte{}, fmt.Errorf("expected payload length 33, got %d", len(payload)) } - return Generate(iph, payload), nil + return Generate(iph, payload[:]), nil } func (pp *Parsed) AsTSMPDiscoAdvertisement() (tka TSMPDiscoKeyAdvertisement, ok bool) { @@ -310,6 +310,7 @@ func (pp *Parsed) AsTSMPDiscoAdvertisement() (tka TSMPDiscoKeyAdvertisement, ok return } tka.Src = pp.Src.Addr() + tka.Dst = pp.Dst.Addr() tka.Key = key.DiscoPublicFromRaw32(mem.B(p[1:33])) return tka, true diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 6e07c7a3dabd0..fe1bc31b812b4 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -1126,8 +1126,10 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i return n, err } +// DiscoKeyAdvertisement is a TSMP message used for distributing disco keys. +// This struct is used an an event on the [eventbus.Bus]. type DiscoKeyAdvertisement struct { - Src netip.Addr + Src netip.Addr // Src field is populated by the IP header of the packet, not from the payload itself. Key key.DiscoPublic } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index c7d0708df85eb..3bc2ff447422d 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -986,7 +986,7 @@ func TestTSMPDisco(t *testing.T) { if tda.Src != src { t.Errorf("Src address did not match, expected %v, got %v", src, tda.Src) } - if !reflect.DeepEqual(tda.Key, discoKey.Public()) { + if tda.Key.Compare(discoKey.Public()) != 0 { t.Errorf("Key did not match, expected %q, got %q", discoKey.Public(), tda.Key) } }) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 064838a2d540c..b8a5f7da2b72f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -4104,6 +4104,11 @@ var ( metricUDPLifetimeCycleCompleteAt10sCliff = newUDPLifetimeCounter("magicsock_udp_lifetime_cycle_complete_at_10s_cliff") metricUDPLifetimeCycleCompleteAt30sCliff = newUDPLifetimeCounter("magicsock_udp_lifetime_cycle_complete_at_30s_cliff") metricUDPLifetimeCycleCompleteAt60sCliff = newUDPLifetimeCounter("magicsock_udp_lifetime_cycle_complete_at_60s_cliff") + + // TSMP disco key exchange + metricTSMPDiscoKeyAdvertisementReceived = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_received") + metricTSMPDiscoKeyAdvertisementApplied = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_applied") + metricTSMPDiscoKeyAdvertisementUnchanged = clientmetric.NewCounter("magicsock_tsmp_disco_key_advertisement_unchanged") ) // newUDPLifetimeCounter returns a new *clientmetric.Metric with the provided @@ -4264,3 +4269,40 @@ func (c *Conn) PeerRelays() set.Set[netip.Addr] { } return servers } + +// HandleDiscoKeyAdvertisement processes a TSMP disco key update. +// The update may be solicited (in response to a request) or unsolicited. +// node is the Tailscale tailcfg.NodeView of the peer that sent the update. +func (c *Conn) HandleDiscoKeyAdvertisement(node tailcfg.NodeView, update packet.TSMPDiscoKeyAdvertisement) { + discoKey := update.Key + c.logf("magicsock: received disco key update %v from %v", discoKey.ShortString(), node.StableID()) + metricTSMPDiscoKeyAdvertisementReceived.Add(1) + + c.mu.Lock() + defer c.mu.Unlock() + nodeKey := node.Key() + + ep, ok := c.peerMap.endpointForNodeKey(nodeKey) + if !ok { + c.logf("magicsock: endpoint not found for node %v", nodeKey.ShortString()) + return + } + + oldDiscoKey := key.DiscoPublic{} + if epDisco := ep.disco.Load(); epDisco != nil { + oldDiscoKey = epDisco.key + } + // If the key did not change, count it and return. + if oldDiscoKey.Compare(discoKey) == 0 { + metricTSMPDiscoKeyAdvertisementUnchanged.Add(1) + return + } + c.discoInfoForKnownPeerLocked(discoKey) + ep.disco.Store(&endpointDisco{ + key: discoKey, + short: discoKey.ShortString(), + }) + c.peerMap.upsertEndpoint(ep, oldDiscoKey) + c.logf("magicsock: updated disco key for peer %v to %v", nodeKey.ShortString(), discoKey.ShortString()) + metricTSMPDiscoKeyAdvertisementApplied.Add(1) +} diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 4e10248861500..68ab4dfa012a7 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -64,6 +64,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/ptr" + "tailscale.com/types/views" "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -4302,3 +4303,47 @@ func TestRotateDiscoKeyMultipleTimes(t *testing.T) { keys = append(keys, newKey) } } + +func TestReceiveTSMPDiscoKeyAdvertisement(t *testing.T) { + conn := newTestConn(t) + t.Cleanup(func() { conn.Close() }) + + peerKey := key.NewNode().Public() + ep := &endpoint{ + nodeID: 1, + publicKey: peerKey, + nodeAddr: netip.MustParseAddr("100.64.0.1"), + } + discoKey := key.NewDisco().Public() + ep.disco.Store(&endpointDisco{ + key: discoKey, + short: discoKey.ShortString(), + }) + ep.c = conn + conn.mu.Lock() + nodeView := (&tailcfg.Node{ + Key: ep.publicKey, + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.64.0.1/32"), + }, + }).View() + conn.peers = views.SliceOf([]tailcfg.NodeView{nodeView}) + conn.mu.Unlock() + + conn.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) + + if ep.discoShort() != discoKey.ShortString() { + t.Errorf("Original disco key %s, does not match %s", discoKey.ShortString(), ep.discoShort()) + } + + newDiscoKey := key.NewDisco().Public() + tka := packet.TSMPDiscoKeyAdvertisement{ + Src: netip.MustParseAddr("100.64.0.1"), + Key: newDiscoKey, + } + conn.HandleDiscoKeyAdvertisement(nodeView, tka) + + if ep.disco.Load().short != newDiscoKey.ShortString() { + t.Errorf("New disco key %s, does not match %s", newDiscoKey.ShortString(), ep.disco.Load().short) + } +} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 3db329a37e2a2..647923775ef10 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -551,6 +551,23 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.linkChangeQueue.Add(func() { e.linkChange(&cd) }) }) + eventbus.SubscribeFunc(ec, func(update tstun.DiscoKeyAdvertisement) { + e.logf("wgengine: got TSMP disco key advertisement from %v via eventbus", update.Src) + if e.magicConn == nil { + e.logf("wgengine: no magicConn") + return + } + + pkt := packet.TSMPDiscoKeyAdvertisement{ + Key: update.Key, + } + peer, ok := e.PeerForIP(update.Src) + if !ok { + e.logf("wgengine: no peer found for %v", update.Src) + return + } + e.magicConn.HandleDiscoKeyAdvertisement(peer.Node, pkt) + }) e.eventClient = ec e.logf("Engine created.") return e, nil From 6428ba01ef2d573828652975241e337a16fc4c69 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 10 Dec 2025 15:32:30 -0800 Subject: [PATCH 0809/1093] logtail/filch: rewrite the package (#18143) The filch implementation is fairly broken: * When Filch.cur exceeds MaxFileSize, it calls moveContents to copy the entirety of cur into alt (while holding the write lock). By nature, this is the movement of a lot of data in a hot path, meaning that all log calls will be globally blocked! It also means that log uploads will be blocked during the move. * The implementation of moveContents is buggy in that it copies data from cur into the start of alt, but fails to truncate alt to the number of bytes copied. Consequently, there are unrelated lines near the end, leading to out-of-order lines when being read back. * Data filched via stderr do not directly respect MaxFileSize, which is only checked every 100 Filch.Write calls. This means that it is possible that the file grows far beyond the specified max file size before moveContents is called. * If both log files have data when New is called, it also copies the entirety of cur into alt. This can block the startup of a process copying lots of data before the process can do any useful work. * TryReadLine is implemented using bufio.Scanner. Unfortunately, it will choke on any lines longer than bufio.MaxScanTokenSize, rather than gracefully skip over them. The re-implementation avoids a lot of these problems by fundamentally eliminating the need for moveContent. We enforce MaxFileSize by simply rotating the log files whenever the current file exceeds MaxFileSize/2. This is a constant-time operation regardless of file size. To more gracefully handle lines longer than bufio.MaxScanTokenSize, we skip over these lines (without growing the read buffer) and report an error. This allows subsequent lines to be read. In order to improve debugging, we add a lot of metrics. Note that the the mechanism of dup2 with stderr is inherently racy with a the two file approach. The order of operations during a rotation is carefully chosen to reduce the race window to be as short as possible. Thus, this is slightly less racy than before. Updates tailscale/corp#21363 Signed-off-by: Joe Tsai --- logtail/filch/filch.go | 529 +++++++++++++++++++++++---------- logtail/filch/filch_omit.go | 34 +++ logtail/filch/filch_stub.go | 8 +- logtail/filch/filch_test.go | 467 ++++++++++++++++++++--------- logtail/filch/filch_unix.go | 4 +- logtail/filch/filch_windows.go | 4 + 6 files changed, 737 insertions(+), 309 deletions(-) create mode 100644 logtail/filch/filch_omit.go diff --git a/logtail/filch/filch.go b/logtail/filch/filch.go index d00206dd51487..12ac647c4ec42 100644 --- a/logtail/filch/filch.go +++ b/logtail/filch/filch.go @@ -1,148 +1,420 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + // Package filch is a file system queue that pilfers your stderr. // (A FILe CHannel that filches.) package filch import ( - "bufio" "bytes" + "cmp" + "errors" + "expvar" "fmt" "io" "os" + "slices" "sync" + + "tailscale.com/util/must" ) var stderrFD = 2 // a variable for testing -const defaultMaxFileSize = 50 << 20 +var errTooLong = errors.New("filch: line too long") +var errClosed = errors.New("filch: buffer is closed") + +const DefaultMaxLineSize = 64 << 10 +const DefaultMaxFileSize = 50 << 20 type Options struct { - ReplaceStderr bool // dup over fd 2 so everything written to stderr comes here - MaxFileSize int + // ReplaceStderr specifies whether to filch [os.Stderr] such that + // everything written there appears in the [Filch] buffer instead. + // In order to write to stderr instead of writing to [Filch], + // then use [Filch.OrigStderr]. + ReplaceStderr bool + + // MaxLineSize is the maximum line size that could be encountered, + // including the trailing newline. This is enforced as a hard limit. + // Writes larger than this will be rejected. Reads larger than this + // will report an error and skip over the long line. + // If zero, the [DefaultMaxLineSize] is used. + MaxLineSize int + + // MaxFileSize specifies the maximum space on disk to use for logs. + // This is not enforced as a hard limit, but rather a soft limit. + // If zero, then [DefaultMaxFileSize] is used. + MaxFileSize int } // A Filch uses two alternating files as a simplistic ring buffer. type Filch struct { + // OrigStderr is the original [os.Stderr] if [Options.ReplaceStderr] is specified. + // Writing directly to this avoids writing into the Filch buffer. + // Otherwise, it is nil. OrigStderr *os.File - mu sync.Mutex - cur *os.File - alt *os.File - altscan *bufio.Scanner - recovered int64 - - maxFileSize int64 - writeCounter int - - // buf is an initial buffer for altscan. - // As of August 2021, 99.96% of all log lines - // are below 4096 bytes in length. - // Since this cutoff is arbitrary, instead of using 4096, - // we subtract off the size of the rest of the struct - // so that the whole struct takes 4096 bytes - // (less on 32 bit platforms). - // This reduces allocation waste. - buf [4096 - 64]byte + // maxLineSize specifies the maximum line size to use. + maxLineSize int // immutable once set + + // maxFileSize specifies the max space either newer and older should use. + maxFileSize int64 // immutable once set + + mu sync.Mutex + newer *os.File // newer logs data; writes are appended to the end + older *os.File // older logs data; reads are consumed from the start + + newlyWrittenBytes int64 // bytes written directly to newer; reset upon rotation + newlyFilchedBytes int64 // bytes filched indirectly to newer; reset upon rotation + + wrBuf []byte // temporary buffer for writing; only used for writes without trailing newline + wrBufMaxLen int // maximum length of wrBuf; reduced upon every rotation + + rdBufIdx int // index into rdBuf for the next unread bytes + rdBuf []byte // temporary buffer for reading + rdBufMaxLen int // maximum length of rdBuf; reduced upon every rotation + + // Metrics (see [Filch.ExpVar] for details). + writeCalls expvar.Int + readCalls expvar.Int + rotateCalls expvar.Int + callErrors expvar.Int + writeBytes expvar.Int + readBytes expvar.Int + filchedBytes expvar.Int + droppedBytes expvar.Int + storedBytes expvar.Int +} + +// ExpVar report metrics about the buffer. +// +// - counter_write_calls: Total number of calls to [Filch.Write] +// (excludes calls when file is closed). +// +// - counter_read_calls: Total number of calls to [Filch.TryReadLine] +// (excludes calls when file is closed or no bytes). +// +// - counter_rotate_calls: Total number of calls to rotate the log files +// (excludes calls when there is nothing to rotate to). +// +// - counter_call_errors: Total number of calls returning errors. +// +// - counter_write_bytes: Total number of bytes written +// (includes bytes filched from stderr). +// +// - counter_read_bytes: Total number of bytes read +// (includes bytes filched from stderr). +// +// - counter_filched_bytes: Total number of bytes filched from stderr. +// +// - counter_dropped_bytes: Total number of bytes dropped +// (includes bytes filched from stderr and lines too long to read). +// +// - gauge_stored_bytes: Current number of bytes stored on disk. +func (f *Filch) ExpVar() expvar.Var { + m := new(expvar.Map) + m.Set("counter_write_calls", &f.writeCalls) + m.Set("counter_read_calls", &f.readCalls) + m.Set("counter_rotate_calls", &f.rotateCalls) + m.Set("counter_call_errors", &f.callErrors) + m.Set("counter_write_bytes", &f.writeBytes) + m.Set("counter_read_bytes", &f.readBytes) + m.Set("counter_filched_bytes", &f.filchedBytes) + m.Set("counter_dropped_bytes", &f.droppedBytes) + m.Set("gauge_stored_bytes", &f.storedBytes) + return m +} + +func (f *Filch) unreadReadBuffer() []byte { + return f.rdBuf[f.rdBufIdx:] +} +func (f *Filch) availReadBuffer() []byte { + return f.rdBuf[len(f.rdBuf):cap(f.rdBuf)] +} +func (f *Filch) resetReadBuffer() { + f.rdBufIdx, f.rdBuf = 0, f.rdBuf[:0] +} +func (f *Filch) moveReadBufferToFront() { + f.rdBufIdx, f.rdBuf = 0, f.rdBuf[:copy(f.rdBuf, f.rdBuf[f.rdBufIdx:])] +} +func (f *Filch) growReadBuffer() { + f.rdBuf = slices.Grow(f.rdBuf, cap(f.rdBuf)+1) +} +func (f *Filch) consumeReadBuffer(n int) { + f.rdBufIdx += n +} +func (f *Filch) appendReadBuffer(n int) { + f.rdBuf = f.rdBuf[:len(f.rdBuf)+n] + f.rdBufMaxLen = max(f.rdBufMaxLen, len(f.rdBuf)) } // TryReadline implements the logtail.Buffer interface. -func (f *Filch) TryReadLine() ([]byte, error) { +func (f *Filch) TryReadLine() (b []byte, err error) { f.mu.Lock() defer f.mu.Unlock() + if f.older == nil { + return nil, io.EOF + } - if f.altscan != nil { - if b, err := f.scan(); b != nil || err != nil { - return b, err + var tooLong bool // whether we are in a line that is too long + defer func() { + f.consumeReadBuffer(len(b)) + if tooLong || len(b) > f.maxLineSize { + f.droppedBytes.Add(int64(len(b))) + b, err = nil, cmp.Or(err, errTooLong) + } else { + f.readBytes.Add(int64(len(b))) } - } + if len(b) != 0 || err != nil { + f.readCalls.Add(1) + } + if err != nil { + f.callErrors.Add(1) + } + }() - f.cur, f.alt = f.alt, f.cur - if f.OrigStderr != nil { - if err := dup2Stderr(f.cur); err != nil { + for { + // Check if unread buffer already has the next line. + unread := f.unreadReadBuffer() + if i := bytes.IndexByte(unread, '\n') + len("\n"); i > 0 { + return unread[:i], nil + } + + // Check whether to make space for more data to read. + avail := f.availReadBuffer() + if len(avail) == 0 { + switch { + case len(unread) > f.maxLineSize: + tooLong = true + f.droppedBytes.Add(int64(len(unread))) + f.resetReadBuffer() + case len(unread) < cap(f.rdBuf)/10: + f.moveReadBufferToFront() + default: + f.growReadBuffer() + } + avail = f.availReadBuffer() // invariant: len(avail) > 0 + } + + // Read data into the available buffer. + n, err := f.older.Read(avail) + f.appendReadBuffer(n) + if err != nil { + if err == io.EOF { + unread = f.unreadReadBuffer() + if len(unread) == 0 { + if err := f.rotateLocked(); err != nil { + return nil, err + } + if f.storedBytes.Value() == 0 { + return nil, nil + } + continue + } + return unread, nil + } return nil, err } } - if _, err := f.alt.Seek(0, io.SeekStart); err != nil { - return nil, err - } - f.altscan = bufio.NewScanner(f.alt) - f.altscan.Buffer(f.buf[:], bufio.MaxScanTokenSize) - f.altscan.Split(splitLines) - return f.scan() } -func (f *Filch) scan() ([]byte, error) { - if f.altscan.Scan() { - return f.altscan.Bytes(), nil - } - err := f.altscan.Err() - err2 := f.alt.Truncate(0) - _, err3 := f.alt.Seek(0, io.SeekStart) - f.altscan = nil - if err != nil { - return nil, err - } - if err2 != nil { - return nil, err2 - } - if err3 != nil { - return nil, err3 - } - return nil, nil -} +var alwaysStatForTests bool // Write implements the logtail.Buffer interface. -func (f *Filch) Write(b []byte) (int, error) { +func (f *Filch) Write(b []byte) (n int, err error) { f.mu.Lock() defer f.mu.Unlock() - if f.writeCounter == 100 { - // Check the file size every 100 writes. - f.writeCounter = 0 - fi, err := f.cur.Stat() + if f.newer == nil { + return 0, errClosed + } + + defer func() { + f.writeCalls.Add(1) if err != nil { - return 0, err + f.callErrors.Add(1) } - if fi.Size() >= f.maxFileSize { - // This most likely means we are not draining. - // To limit the amount of space we use, throw away the old logs. - if err := moveContents(f.alt, f.cur); err != nil { + }() + + // To make sure we do not write data to disk unbounded + // (in the event that we are not draining fast enough) + // check whether we exceeded maxFileSize. + // If so, then force a file rotation. + if f.newlyWrittenBytes+f.newlyFilchedBytes > f.maxFileSize || f.writeCalls.Value()%100 == 0 || alwaysStatForTests { + f.statAndUpdateBytes() + if f.newlyWrittenBytes+f.newlyFilchedBytes > f.maxFileSize { + if err := f.rotateLocked(); err != nil { return 0, err } } } - f.writeCounter++ + // Write the log entry (appending a newline character if needed). + var newline string if len(b) == 0 || b[len(b)-1] != '\n' { - bnl := make([]byte, len(b)+1) - copy(bnl, b) - bnl[len(bnl)-1] = '\n' - return f.cur.Write(bnl) + newline = "\n" + f.wrBuf = append(append(f.wrBuf[:0], b...), newline...) + f.wrBufMaxLen = max(f.wrBufMaxLen, len(f.wrBuf)) + b = f.wrBuf + } + if len(b) > f.maxLineSize { + for line := range bytes.Lines(b) { + if len(line) > f.maxLineSize { + return 0, errTooLong + } + } } - return f.cur.Write(b) + n, err = f.newer.Write(b) + f.writeBytes.Add(int64(n)) + f.storedBytes.Add(int64(n)) + f.newlyWrittenBytes += int64(n) + return n - len(newline), err // subtract possibly appended newline } -// Close closes the Filch, releasing all os resources. -func (f *Filch) Close() (err error) { - f.mu.Lock() - defer f.mu.Unlock() +func (f *Filch) statAndUpdateBytes() { + if fi, err := f.newer.Stat(); err == nil { + prevSize := f.newlyWrittenBytes + f.newlyFilchedBytes + filchedBytes := max(0, fi.Size()-prevSize) + f.writeBytes.Add(filchedBytes) + f.filchedBytes.Add(filchedBytes) + f.storedBytes.Add(filchedBytes) + f.newlyFilchedBytes += filchedBytes + } +} +func (f *Filch) storedBytesForTest() int64 { + return must.Get(f.newer.Stat()).Size() + must.Get(f.older.Stat()).Size() +} + +var activeStderrWriteForTest sync.RWMutex + +// stderrWriteForTest calls [os.Stderr.Write], but respects calls to [waitIdleStderrForTest]. +func stderrWriteForTest(b []byte) int { + activeStderrWriteForTest.RLock() + defer activeStderrWriteForTest.RUnlock() + return must.Get(os.Stderr.Write(b)) +} + +// waitIdleStderrForTest waits until there are no active stderrWriteForTest calls. +func waitIdleStderrForTest() { + activeStderrWriteForTest.Lock() + defer activeStderrWriteForTest.Unlock() +} + +// rotateLocked swaps f.newer and f.older such that: +// +// - f.newer will be truncated and future writes will be appended to the end. +// - if [Options.ReplaceStderr], then stderr writes will redirect to f.newer +// - f.older will contain historical data, reads will consume from the start. +// - f.older is guaranteed to be immutable. +// +// There are two reasons for rotating: +// +// - The reader finished reading f.older. +// No data should be lost under this condition. +// +// - The writer exceeded a limit for f.newer. +// Data may be lost under this cxondition. +func (f *Filch) rotateLocked() error { + f.rotateCalls.Add(1) + + // Truncate the older file. + if fi, err := f.older.Stat(); err != nil { + return err + } else if fi.Size() > 0 { + // Update dropped bytes. + if pos, err := f.older.Seek(0, io.SeekCurrent); err == nil { + rdPos := pos - int64(len(f.unreadReadBuffer())) // adjust for data already read into the read buffer + f.droppedBytes.Add(max(0, fi.Size()-rdPos)) + } + f.resetReadBuffer() + + // Truncate the older file and write relative to the start. + if err := f.older.Truncate(0); err != nil { + return err + } + if _, err := f.older.Seek(0, io.SeekStart); err != nil { + return err + } + } + + // Swap newer and older. + f.newer, f.older = f.older, f.newer + + // If necessary, filch stderr into newer instead of older. + // This must be done after truncation otherwise + // we might lose some stderr data asynchronously written + // right in the middle of a rotation. + // Note that mutex does not prevent stderr writes. + prevSize := f.newlyWrittenBytes + f.newlyFilchedBytes + f.newlyWrittenBytes, f.newlyFilchedBytes = 0, 0 if f.OrigStderr != nil { - if err2 := unsaveStderr(f.OrigStderr); err == nil { - err = err2 + if err := dup2Stderr(f.newer); err != nil { + return err } - f.OrigStderr = nil } - if err2 := f.cur.Close(); err == nil { - err = err2 + // Update filched bytes and stored bytes metrics. + // This must be done after filching to newer + // so that f.older.Stat is *mostly* stable. + // + // NOTE: Unfortunately, an asynchronous os.Stderr.Write call + // that is already in progress when we called dup2Stderr + // will still write to the previous FD and + // may not be immediately observable by this Stat call. + // This is fundamentally unsolvable with the current design + // as we cannot synchronize all other os.Stderr.Write calls. + // In rare cases, it is possible that [Filch.TryReadLine] consumes + // the entire older file before the write commits, + // leading to dropped stderr lines. + waitIdleStderrForTest() + if fi, err := f.older.Stat(); err != nil { + return err + } else { + filchedBytes := max(0, fi.Size()-prevSize) + f.writeBytes.Add(filchedBytes) + f.filchedBytes.Add(filchedBytes) + f.storedBytes.Set(fi.Size()) // newer has been truncated, so only older matters } - if err2 := f.alt.Close(); err == nil { - err = err2 + + // Start reading from the start of older. + if _, err := f.older.Seek(0, io.SeekStart); err != nil { + return err + } + + // Garbage collect unnecessarily large buffers. + mayGarbageCollect := func(b []byte, maxLen int) ([]byte, int) { + if cap(b)/4 > maxLen { // if less than 25% utilized + b = slices.Grow([]byte(nil), 2*maxLen) + } + maxLen = 3 * (maxLen / 4) // reduce by 25% + return b, maxLen } + f.wrBuf, f.wrBufMaxLen = mayGarbageCollect(f.wrBuf, f.wrBufMaxLen) + f.rdBuf, f.rdBufMaxLen = mayGarbageCollect(f.rdBuf, f.rdBufMaxLen) + + return nil +} - return err +// Close closes the Filch, releasing all resources. +func (f *Filch) Close() error { + f.mu.Lock() + defer f.mu.Unlock() + var errUnsave, errCloseNew, errCloseOld error + if f.OrigStderr != nil { + errUnsave = unsaveStderr(f.OrigStderr) + f.OrigStderr = nil + } + if f.newer != nil { + errCloseNew = f.newer.Close() + f.newer = nil + } + if f.older != nil { + errCloseOld = f.older.Close() + f.older = nil + } + return errors.Join(errUnsave, errCloseNew, errCloseOld) } // New creates a new filch around two log files, each starting with filePrefix. @@ -181,14 +453,10 @@ func New(filePrefix string, opts Options) (f *Filch, err error) { return nil, err } - mfs := defaultMaxFileSize - if opts.MaxFileSize > 0 { - mfs = opts.MaxFileSize - } - f = &Filch{ - OrigStderr: os.Stderr, // temporary, for past logs recovery - maxFileSize: int64(mfs), - } + f = new(Filch) + f.maxLineSize = int(cmp.Or(max(0, opts.MaxLineSize), DefaultMaxLineSize)) + f.maxFileSize = int64(cmp.Or(max(0, opts.MaxFileSize), DefaultMaxFileSize)) + f.maxFileSize /= 2 // since there are two log files that combine to equal MaxFileSize // Neither, either, or both files may exist and contain logs from // the last time the process ran. The three cases are: @@ -198,35 +466,22 @@ func New(filePrefix string, opts Options) (f *Filch, err error) { // - both: the files were swapped and were starting to be // read out, while new logs streamed into the other // file, but the read out did not complete - if n := fi1.Size() + fi2.Size(); n > 0 { - f.recovered = n - } switch { case fi1.Size() > 0 && fi2.Size() == 0: - f.cur, f.alt = f2, f1 + f.newer, f.older = f2, f1 // use empty file as newer case fi2.Size() > 0 && fi1.Size() == 0: - f.cur, f.alt = f1, f2 - case fi1.Size() > 0 && fi2.Size() > 0: // both - // We need to pick one of the files to be the elder, - // which we do using the mtime. - var older, newer *os.File - if fi1.ModTime().Before(fi2.ModTime()) { - older, newer = f1, f2 - } else { - older, newer = f2, f1 - } - if err := moveContents(older, newer); err != nil { - fmt.Fprintf(f.OrigStderr, "filch: recover move failed: %v\n", err) - fmt.Fprintf(older, "filch: recover move failed: %v\n", err) - } - f.cur, f.alt = newer, older + f.newer, f.older = f1, f2 // use empty file as newer + case fi1.ModTime().Before(fi2.ModTime()): + f.newer, f.older = f2, f1 // use older file as older + case fi2.ModTime().Before(fi1.ModTime()): + f.newer, f.older = f1, f2 // use newer file as newer default: - f.cur, f.alt = f1, f2 // does not matter + f.newer, f.older = f1, f2 // does not matter } - if f.recovered > 0 { - f.altscan = bufio.NewScanner(f.alt) - f.altscan.Buffer(f.buf[:], bufio.MaxScanTokenSize) - f.altscan.Split(splitLines) + f.writeBytes.Set(fi1.Size() + fi2.Size()) + f.storedBytes.Set(fi1.Size() + fi2.Size()) + if fi, err := f.newer.Stat(); err == nil { + f.newlyWrittenBytes = fi.Size() } f.OrigStderr = nil @@ -235,50 +490,10 @@ func New(filePrefix string, opts Options) (f *Filch, err error) { if err != nil { return nil, err } - if err := dup2Stderr(f.cur); err != nil { + if err := dup2Stderr(f.newer); err != nil { return nil, err } } return f, nil } - -func moveContents(dst, src *os.File) (err error) { - defer func() { - _, err2 := src.Seek(0, io.SeekStart) - err3 := src.Truncate(0) - _, err4 := dst.Seek(0, io.SeekStart) - if err == nil { - err = err2 - } - if err == nil { - err = err3 - } - if err == nil { - err = err4 - } - }() - if _, err := src.Seek(0, io.SeekStart); err != nil { - return err - } - if _, err := dst.Seek(0, io.SeekStart); err != nil { - return err - } - if _, err := io.Copy(dst, src); err != nil { - return err - } - return nil -} - -func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - if i := bytes.IndexByte(data, '\n'); i >= 0 { - return i + 1, data[0 : i+1], nil - } - if atEOF { - return len(data), data, nil - } - return 0, nil, nil -} diff --git a/logtail/filch/filch_omit.go b/logtail/filch/filch_omit.go new file mode 100644 index 0000000000000..898978e2152ea --- /dev/null +++ b/logtail/filch/filch_omit.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_logtail + +package filch + +import "os" + +type Options struct { + ReplaceStderr bool + MaxLineSize int + MaxFileSize int +} + +type Filch struct { + OrigStderr *os.File +} + +func (*Filch) TryReadLine() ([]byte, error) { + return nil, nil +} + +func (*Filch) Write(b []byte) (int, error) { + return len(b), nil +} + +func (f *Filch) Close() error { + return nil +} + +func New(string, Options) (*Filch, error) { + return new(Filch), nil +} diff --git a/logtail/filch/filch_stub.go b/logtail/filch/filch_stub.go index 3bb82b1906f17..f2aeeb9b9f819 100644 --- a/logtail/filch/filch_stub.go +++ b/logtail/filch/filch_stub.go @@ -1,13 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build wasm || plan9 || tamago +//go:build !ts_omit_logtail && (wasm || plan9 || tamago) package filch -import ( - "os" -) +import "os" + +const replaceStderrSupportedForTest = false func saveStderr() (*os.File, error) { return os.Stderr, nil diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index 6b7b88414a72c..1e33471809dbb 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -4,207 +4,380 @@ package filch import ( + "bytes" + "encoding/json" "fmt" "io" + "math" + "math/rand/v2" "os" + "path/filepath" "runtime" "strings" + "sync" + "sync/atomic" "testing" - "unicode" - "unsafe" + "time" + jsonv2 "github.com/go-json-experiment/json" "tailscale.com/tstest" + "tailscale.com/util/must" ) +func init() { alwaysStatForTests = true } + type filchTest struct { *Filch + + filePrefix string } -func newFilchTest(t *testing.T, filePrefix string, opts Options) *filchTest { +func newForTest(t *testing.T, filePrefix string, opts Options) *filchTest { + t.Helper() + if filePrefix == "" { + filePrefix = filepath.Join(t.TempDir(), "testlog") + } f, err := New(filePrefix, opts) if err != nil { t.Fatal(err) } - return &filchTest{Filch: f} + t.Cleanup(func() { + if err := f.Close(); err != nil { + t.Errorf("Close error: %v", err) + } + }) + return &filchTest{Filch: f, filePrefix: filePrefix} } -func (f *filchTest) write(t *testing.T, s string) { +func (f *filchTest) read(t *testing.T, want []byte) { t.Helper() - if _, err := f.Write([]byte(s)); err != nil { - t.Fatal(err) + if got, err := f.TryReadLine(); err != nil { + t.Fatalf("TryReadLine error: %v", err) + } else if string(got) != string(want) { + t.Errorf("TryReadLine = %q, want %q", got, want) } } -func (f *filchTest) read(t *testing.T, want string) { - t.Helper() - if b, err := f.TryReadLine(); err != nil { - t.Fatalf("r.ReadLine() err=%v", err) - } else if got := strings.TrimRightFunc(string(b), unicode.IsSpace); got != want { - t.Errorf("r.ReadLine()=%q, want %q", got, want) +func TestNew(t *testing.T) { + const want1 = "Lorem\nipsum\ndolor\nsit\namet,\nconsectetur\nadipiscing\nelit,\nsed\n" + const want2 = "do\neiusmod\ntempor\nincididunt\nut\nlabore\net\ndolore\nmagna\naliqua.\n" + filePrefix := filepath.Join(t.TempDir(), "testlog") + checkLinesAndCleanup := func() { + t.Helper() + defer os.Remove(filepath.Join(filePrefix + ".log1.txt")) + defer os.Remove(filepath.Join(filePrefix + ".log2.txt")) + f := newForTest(t, filePrefix, Options{}) + var got []byte + for { + b := must.Get(f.TryReadLine()) + if b == nil { + break + } + got = append(got, b...) + } + if string(got) != want1+want2 { + t.Errorf("got %q\nwant %q", got, want1+want2) + } } -} + now := time.Now() -func (f *filchTest) readEOF(t *testing.T) { - t.Helper() - if b, err := f.TryReadLine(); b != nil || err != nil { - t.Fatalf("r.ReadLine()=%q err=%v, want nil slice", string(b), err) - } + must.Do(os.WriteFile(filePrefix+".log1.txt", []byte(want1+want2), 0600)) + checkLinesAndCleanup() + + must.Do(os.WriteFile(filePrefix+".log2.txt", []byte(want1+want2), 0600)) + checkLinesAndCleanup() + + must.Do(os.WriteFile(filePrefix+".log1.txt", []byte(want1), 0600)) + os.Chtimes(filePrefix+".log1.txt", now.Add(-time.Minute), now.Add(-time.Minute)) + must.Do(os.WriteFile(filePrefix+".log2.txt", []byte(want2), 0600)) + os.Chtimes(filePrefix+".log2.txt", now.Add(+time.Minute), now.Add(+time.Minute)) + checkLinesAndCleanup() + + must.Do(os.WriteFile(filePrefix+".log1.txt", []byte(want2), 0600)) + os.Chtimes(filePrefix+".log1.txt", now.Add(+time.Minute), now.Add(+time.Minute)) + must.Do(os.WriteFile(filePrefix+".log2.txt", []byte(want1), 0600)) + os.Chtimes(filePrefix+".log2.txt", now.Add(-time.Minute), now.Add(-time.Minute)) + checkLinesAndCleanup() } -func (f *filchTest) close(t *testing.T) { +func setupStderr(t *testing.T) { t.Helper() - if err := f.Close(); err != nil { + pipeR, pipeW, err := os.Pipe() + if err != nil { t.Fatal(err) } + t.Cleanup(func() { pipeR.Close() }) + t.Cleanup(func() { + switch b, err := io.ReadAll(pipeR); { + case err != nil: + t.Fatalf("ReadAll error: %v", err) + case len(b) > 0: + t.Errorf("unexpected write to fake stderr: %s", b) + } + }) + t.Cleanup(func() { pipeW.Close() }) + tstest.Replace(t, &stderrFD, int(pipeW.Fd())) + tstest.Replace(t, &os.Stderr, pipeW) } -func TestDropOldLogs(t *testing.T) { - const line1 = "123456789" // 10 bytes (9+newline) - tests := []struct { - write, read int - }{ - {10, 10}, - {100, 100}, - {200, 200}, - {250, 150}, - {500, 200}, - } - for _, tc := range tests { - t.Run(fmt.Sprintf("w%d-r%d", tc.write, tc.read), func(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false, MaxFileSize: 1000}) - defer f.close(t) - // Make filch rotate the logs 3 times - for range tc.write { - f.write(t, line1) - } - // We should only be able to read the last 150 lines - for i := range tc.read { - f.read(t, line1) - if t.Failed() { - t.Logf("could only read %d lines", i) - break +func TestConcurrentWriteAndRead(t *testing.T) { + if replaceStderrSupportedForTest { + setupStderr(t) + } + + const numWriters = 10 + const linesPerWriter = 1000 + opts := Options{ReplaceStderr: replaceStderrSupportedForTest, MaxFileSize: math.MaxInt32} + f := newForTest(t, "", opts) + + // Concurrently write many lines. + var draining sync.RWMutex + var group sync.WaitGroup + defer group.Wait() + data := bytes.Repeat([]byte("X"), 1000) + var runningWriters atomic.Int64 + for i := range numWriters { + runningWriters.Add(+1) + group.Go(func() { + defer runningWriters.Add(-1) + var b []byte + for j := range linesPerWriter { + b = fmt.Appendf(b[:0], `{"Index":%d,"Count":%d,"Data":"%s"}`+"\n", i+1, j+1, data[:rand.IntN(len(data))]) + draining.RLock() + if i%2 == 0 && opts.ReplaceStderr { + stderrWriteForTest(b) + } else { + must.Get(f.Write(b)) } + draining.RUnlock() + runtime.Gosched() } - f.readEOF(t) }) } -} -func TestQueue(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - - f.readEOF(t) - const line1 = "Hello, World!" - const line2 = "This is a test." - const line3 = "Of filch." - f.write(t, line1) - f.write(t, line2) - f.read(t, line1) - f.write(t, line3) - f.read(t, line2) - f.read(t, line3) - f.readEOF(t) - f.write(t, line1) - f.read(t, line1) - f.readEOF(t) - f.close(t) + // Verify that we can read back the lines in an ordered manner. + var lines int + var entry struct{ Index, Count int } + state := make(map[int]int) + checkLine := func() (ok bool) { + b := must.Get(f.TryReadLine()) + if len(b) == 0 { + return false + } + entry.Index, entry.Count = 0, 0 + if err := jsonv2.Unmarshal(b, &entry); err != nil { + t.Fatalf("json.Unmarshal error: %v", err) + } + if wantCount := state[entry.Index] + 1; entry.Count != wantCount { + t.Fatalf("Index:%d, Count = %d, want %d", entry.Index, entry.Count, wantCount) + } + state[entry.Index] = entry.Count + lines++ + return true + } + for lines < numWriters*linesPerWriter { + writersDone := runningWriters.Load() == 0 + for range rand.IntN(100) { + runtime.Gosched() // bias towards more writer operations + } + + if rand.IntN(100) == 0 { + // Asynchronous read of a single line. + if !checkLine() && writersDone { + t.Fatal("failed to read all lines after all writers done") + } + } else { + // Synchronous reading of all lines. + draining.Lock() + for checkLine() { + } + draining.Unlock() + } + } } -func TestRecover(t *testing.T) { - t.Run("empty", func(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.write(t, "hello") - f.read(t, "hello") - f.readEOF(t) - f.close(t) - - f = newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.readEOF(t) - f.close(t) - }) +// Test that the +func TestBufferCapacity(t *testing.T) { + f := newForTest(t, "", Options{}) + b := bytes.Repeat([]byte("X"), 1000) + for range 1000 { + must.Get(f.Write(b[:rand.IntN(len(b))])) + } + for must.Get(f.TryReadLine()) != nil { + } + if !(10*len(b) < cap(f.rdBuf) && cap(f.rdBuf) < 20*len(b)) { + t.Errorf("cap(rdBuf) = %v, want within [%v:%v]", cap(f.rdBuf), 10*len(b), 20*len(b)) + } - t.Run("cur", func(t *testing.T) { - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.write(t, "hello") - f.close(t) + must.Get(f.Write(bytes.Repeat([]byte("X"), DefaultMaxLineSize-1))) + must.Get(f.TryReadLine()) + wrCap, rdCap := cap(f.wrBuf), cap(f.rdBuf) - f = newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.read(t, "hello") - f.readEOF(t) - f.close(t) - }) + // Force another rotation. Buffers should not be GC'd yet. + must.Get(f.TryReadLine()) + if cap(f.wrBuf) != wrCap { + t.Errorf("cap(f.wrBuf) = %v, want %v", cap(f.wrBuf), wrCap) + } + if cap(f.rdBuf) != rdCap { + t.Errorf("cap(f.rdBuf) = %v, want %v", cap(f.rdBuf), rdCap) + } - t.Run("alt", func(t *testing.T) { - t.Skip("currently broken on linux, passes on macOS") - /* --- FAIL: TestRecover/alt (0.00s) - filch_test.go:128: r.ReadLine()="world", want "hello" - filch_test.go:129: r.ReadLine()="hello", want "world" - */ - - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - f.write(t, "hello") - f.read(t, "hello") - f.write(t, "world") - f.close(t) - - f = newFilchTest(t, filePrefix, Options{ReplaceStderr: false}) - // TODO(crawshaw): The "hello" log is replayed in recovery. - // We could reduce replays by risking some logs loss. - // What should our policy here be? - f.read(t, "hello") - f.read(t, "world") - f.readEOF(t) - f.close(t) - }) + // Force many rotations. Buffers should be GC'd. + for range 64 { + t.Logf("cap(f.wrBuf), cap(f.rdBuf) = %d, %d", cap(f.wrBuf), cap(f.rdBuf)) + must.Get(f.TryReadLine()) + } + if cap(f.wrBuf) != 0 { + t.Errorf("cap(f.wrBuf) = %v, want %v", cap(f.wrBuf), 0) + } + if cap(f.rdBuf) != 0 { + t.Errorf("cap(f.rdBuf) = %v, want %v", cap(f.rdBuf), 0) + } } -func TestFilchStderr(t *testing.T) { - if runtime.GOOS == "windows" { - // TODO(bradfitz): this is broken on Windows but not - // fully sure why. Investigate. But notably, the - // stderrFD variable (defined in filch.go) and set - // below is only ever read in filch_unix.go. So just - // skip this for test for now. - t.Skip("test broken on Windows") +func TestMaxLineSize(t *testing.T) { + const maxLineSize = 1000 + f := newForTest(t, "", Options{MaxLineSize: maxLineSize}) + + // Test writing. + b0 := []byte(strings.Repeat("X", maxLineSize-len("\n")) + "\n") + must.Get(f.Write(b0)) + b1 := []byte(strings.Repeat("X", maxLineSize)) + if _, err := f.Write(b1); err != errTooLong { + t.Errorf("Write error = %v, want errTooLong", err) } - pipeR, pipeW, err := os.Pipe() - if err != nil { - t.Fatal(err) + b2 := bytes.Repeat(b0, 2) + must.Get(f.Write(b2)) + if f.storedBytesForTest() != int64(len(b0)+len(b2)) { + t.Errorf("storedBytes = %v, want %v", f.storedBytesForTest(), int64(len(b0)+len(b2))) } - defer pipeR.Close() - defer pipeW.Close() - tstest.Replace(t, &stderrFD, int(pipeW.Fd())) + // Test reading. + f.read(t, b0) + f.read(t, b0) + f.read(t, b0) + f.read(t, nil) // should trigger rotate + if f.storedBytesForTest() != 0 { + t.Errorf("storedBytes = %v, want 0", f.storedBytesForTest()) + } - filePrefix := t.TempDir() - f := newFilchTest(t, filePrefix, Options{ReplaceStderr: true}) - f.write(t, "hello") - if _, err := fmt.Fprintf(pipeW, "filch\n"); err != nil { - t.Fatal(err) + // Test writing + must.Get(f.Write([]byte("hello"))) + must.Get(f.Write(b0)) + must.Get(f.Write([]byte("goodbye"))) + + // Test reading. + f.Close() + f = newForTest(t, f.filePrefix, Options{MaxLineSize: 10}) + f.read(t, []byte("hello\n")) + if _, err := f.TryReadLine(); err != errTooLong { + t.Errorf("Write error = %v, want errTooLong", err) } - f.read(t, "hello") - f.read(t, "filch") - f.readEOF(t) - f.close(t) + f.read(t, []byte("goodbye\n")) - pipeW.Close() - b, err := io.ReadAll(pipeR) - if err != nil { - t.Fatal(err) + // Check that the read buffer does not need to be as long + // as the overly long line to skip over it. + if cap(f.rdBuf) >= maxLineSize/2 { + t.Errorf("cap(rdBuf) = %v, want <%v", cap(f.rdBuf), maxLineSize/2) } - if len(b) > 0 { - t.Errorf("unexpected write to fake stderr: %s", b) +} + +func TestMaxFileSize(t *testing.T) { + if replaceStderrSupportedForTest { + t.Run("ReplaceStderr:true", func(t *testing.T) { testMaxFileSize(t, true) }) } + t.Run("ReplaceStderr:false", func(t *testing.T) { testMaxFileSize(t, false) }) } -func TestSizeOf(t *testing.T) { - s := unsafe.Sizeof(Filch{}) - if s > 4096 { - t.Fatalf("Filch{} has size %d on %v, decrease size of buf field", s, runtime.GOARCH) +func testMaxFileSize(t *testing.T, replaceStderr bool) { + if replaceStderr { + setupStderr(t) + } + + opts := Options{ReplaceStderr: replaceStderr, MaxFileSize: 1000} + f := newForTest(t, "", opts) + + // Write lots of data. + const calls = 1000 + var group sync.WaitGroup + var filchedBytes, writeBytes int64 + group.Go(func() { + if !opts.ReplaceStderr { + return + } + var b []byte + for i := range calls { + b = fmt.Appendf(b[:0], `{"FilchIndex":%d}`+"\n", i+1) + filchedBytes += int64(stderrWriteForTest(b)) + } + }) + group.Go(func() { + var b []byte + for i := range calls { + b = fmt.Appendf(b[:0], `{"WriteIndex":%d}`+"\n", i+1) + writeBytes += int64(must.Get(f.Write(b))) + } + }) + group.Wait() + f.statAndUpdateBytes() + droppedBytes := filchedBytes + writeBytes - f.storedBytes.Value() + + switch { + case f.writeCalls.Value() != calls: + t.Errorf("writeCalls = %v, want %d", f.writeCalls.Value(), calls) + case f.readCalls.Value() != 0: + t.Errorf("readCalls = %v, want 0", f.readCalls.Value()) + case f.rotateCalls.Value() == 0: + t.Errorf("rotateCalls = 0, want >0") + case f.callErrors.Value() != 0: + t.Errorf("callErrors = %v, want 0", f.callErrors.Value()) + case f.writeBytes.Value() != writeBytes+filchedBytes: + t.Errorf("writeBytes = %v, want %d", f.writeBytes.Value(), writeBytes+filchedBytes) + case f.readBytes.Value() != 0: + t.Errorf("readBytes = %v, want 0", f.readBytes.Value()) + case f.filchedBytes.Value() != filchedBytes: + t.Errorf("filchedBytes = %v, want %d", f.filchedBytes.Value(), filchedBytes) + case f.droppedBytes.Value() != droppedBytes: + t.Errorf("droppedBytes = %v, want %d", f.droppedBytes.Value(), droppedBytes) + case f.droppedBytes.Value() == 0: + t.Errorf("droppedBytes = 0, want >0") + case f.storedBytes.Value() != f.storedBytesForTest(): + t.Errorf("storedBytes = %v, want %d", f.storedBytes.Value(), f.storedBytesForTest()) + case f.storedBytes.Value() > int64(opts.MaxFileSize) && !opts.ReplaceStderr: + // If ReplaceStderr, it is impossible for MaxFileSize to be + // strictly adhered to since asynchronous os.Stderr.Write calls + // do not trigger any checks to enforce maximum file size. + t.Errorf("storedBytes = %v, want <=%d", f.storedBytes.Value(), opts.MaxFileSize) + } + + // Read back the data and verify that the entries are in order. + var readBytes, lastFilchIndex, lastWriteIndex int64 + for { + b := must.Get(f.TryReadLine()) + if len(b) == 0 { + break + } + var entry struct{ FilchIndex, WriteIndex int64 } + must.Do(json.Unmarshal(b, &entry)) + if entry.FilchIndex == 0 && entry.WriteIndex == 0 { + t.Errorf("both indexes are zero") + } + if entry.FilchIndex > 0 { + if entry.FilchIndex <= lastFilchIndex { + t.Errorf("FilchIndex = %d, want >%d", entry.FilchIndex, lastFilchIndex) + } + lastFilchIndex = entry.FilchIndex + } + if entry.WriteIndex > 0 { + if entry.WriteIndex <= lastWriteIndex { + t.Errorf("WriteIndex = %d, want >%d", entry.WriteIndex, lastWriteIndex) + } + lastWriteIndex = entry.WriteIndex + } + readBytes += int64(len(b)) + } + + if f.readBytes.Value() != readBytes { + t.Errorf("readBytes = %v, want %v", f.readBytes.Value(), readBytes) } } diff --git a/logtail/filch/filch_unix.go b/logtail/filch/filch_unix.go index 2eae70aceb187..27f1d02ee86aa 100644 --- a/logtail/filch/filch_unix.go +++ b/logtail/filch/filch_unix.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows && !wasm && !plan9 && !tamago +//go:build !ts_omit_logtail && !windows && !wasm && !plan9 && !tamago package filch @@ -11,6 +11,8 @@ import ( "golang.org/x/sys/unix" ) +const replaceStderrSupportedForTest = true + func saveStderr() (*os.File, error) { fd, err := unix.Dup(stderrFD) if err != nil { diff --git a/logtail/filch/filch_windows.go b/logtail/filch/filch_windows.go index d60514bf00abe..b08b64db39f61 100644 --- a/logtail/filch/filch_windows.go +++ b/logtail/filch/filch_windows.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail && windows + package filch import ( @@ -9,6 +11,8 @@ import ( "syscall" ) +const replaceStderrSupportedForTest = true + var kernel32 = syscall.MustLoadDLL("kernel32.dll") var procSetStdHandle = kernel32.MustFindProc("SetStdHandle") From 6ace3995f0e2f1abb23266afead89582c8595840 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 10 Dec 2025 18:37:03 -0800 Subject: [PATCH 0810/1093] portlist: skip tests on Linux 6.14.x with /proc/net/tcp bug (#18185) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR #18033 skipped tests for the versions of Linux 6.6 and 6.12 that had a regression in /proc/net/tcp that causes seek operations to fail with “illegal seek”. This PR skips tests for Linux 6.14.0, which is the default Ubuntu kernel, that also contains this regression. Updates #16966 Signed-off-by: Simon Law --- portlist/portlist_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/portlist/portlist_test.go b/portlist/portlist_test.go index 791a8b118427f..8503b0fefdf50 100644 --- a/portlist/portlist_test.go +++ b/portlist/portlist_test.go @@ -17,6 +17,7 @@ func maybeSkip(t *testing.T) { "https://github.com/tailscale/tailscale/issues/16966", "6.6.102", "6.6.103", "6.6.104", "6.12.42", "6.12.43", "6.12.44", "6.12.45", + "6.14.0", ) } } From 0df463130889799588b95e63c0040be3501ec8b4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 11 Dec 2025 08:46:53 -0800 Subject: [PATCH 0811/1093] ipn/ipnlocal: avoid ResetAndStop panic Updates #18187 Change-Id: If7375efb7df0452a5e85b742fc4c4eecbbd62717 Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 51f92656040b7..73fa56c18258a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5809,7 +5809,14 @@ func (b *LocalBackend) stateMachineLocked() { func (b *LocalBackend) stopEngineAndWaitLocked() { syncs.RequiresMutex(&b.mu) b.logf("stopEngineAndWait...") - st, _ := b.e.ResetAndStop() // TODO: what should we do if this returns an error? + st, err := b.e.ResetAndStop() + if err != nil { + // TODO(braditz): our caller, popBrowserAuthNowLocked, probably + // should handle this somehow. For now, just log it. + // See tailscale/tailscale#18187 + b.logf("stopEngineAndWait: ResetAndStop error: %v", err) + return + } b.setWgengineStatusLocked(st) b.logf("stopEngineAndWait: done.") } From 9613b4eecca191e156c8195edb56dff4121c4bf9 Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 11 Dec 2025 10:49:48 -0800 Subject: [PATCH 0812/1093] logtail: add metrics (#18184) Add metrics about logtail uploading and underlying buffer. Add metrics to the in-memory buffer implementation. Updates tailscale/corp#21363 Signed-off-by: Joe Tsai --- logtail/buffer.go | 43 +++++++++++++++++++++++++++++++++++++++++- logtail/filch/filch.go | 5 +++-- logtail/logtail.go | 40 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 3 deletions(-) diff --git a/logtail/buffer.go b/logtail/buffer.go index 82c9b461010b2..6efdbda63ac8e 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -8,8 +8,10 @@ package logtail import ( "bytes" "errors" + "expvar" "fmt" + "tailscale.com/metrics" "tailscale.com/syncs" ) @@ -39,12 +41,42 @@ type memBuffer struct { dropMu syncs.Mutex dropCount int + + // Metrics (see [memBuffer.ExpVar] for details). + writeCalls expvar.Int + readCalls expvar.Int + writeBytes expvar.Int + readBytes expvar.Int + droppedBytes expvar.Int + storedBytes expvar.Int +} + +// ExpVar returns a [metrics.Set] with metrics about the buffer. +// +// - counter_write_calls: Total number of write calls. +// - counter_read_calls: Total number of read calls. +// - counter_write_bytes: Total number of bytes written. +// - counter_read_bytes: Total number of bytes read. +// - counter_dropped_bytes: Total number of bytes dropped. +// - gauge_stored_bytes: Current number of bytes stored in memory. +func (b *memBuffer) ExpVar() expvar.Var { + m := new(metrics.Set) + m.Set("counter_write_calls", &b.writeCalls) + m.Set("counter_read_calls", &b.readCalls) + m.Set("counter_write_bytes", &b.writeBytes) + m.Set("counter_read_bytes", &b.readBytes) + m.Set("counter_dropped_bytes", &b.droppedBytes) + m.Set("gauge_stored_bytes", &b.storedBytes) + return m } func (m *memBuffer) TryReadLine() ([]byte, error) { + m.readCalls.Add(1) if m.next != nil { msg := m.next m.next = nil + m.readBytes.Add(int64(len(msg))) + m.storedBytes.Add(-int64(len(msg))) return msg, nil } @@ -52,8 +84,13 @@ func (m *memBuffer) TryReadLine() ([]byte, error) { case ent := <-m.pending: if ent.dropCount > 0 { m.next = ent.msg - return fmt.Appendf(nil, "----------- %d logs dropped ----------", ent.dropCount), nil + b := fmt.Appendf(nil, "----------- %d logs dropped ----------", ent.dropCount) + m.writeBytes.Add(int64(len(b))) // indicate pseudo-injected log message + m.readBytes.Add(int64(len(b))) + return b, nil } + m.readBytes.Add(int64(len(ent.msg))) + m.storedBytes.Add(-int64(len(ent.msg))) return ent.msg, nil default: return nil, nil @@ -61,6 +98,7 @@ func (m *memBuffer) TryReadLine() ([]byte, error) { } func (m *memBuffer) Write(b []byte) (int, error) { + m.writeCalls.Add(1) m.dropMu.Lock() defer m.dropMu.Unlock() @@ -70,10 +108,13 @@ func (m *memBuffer) Write(b []byte) (int, error) { } select { case m.pending <- ent: + m.writeBytes.Add(int64(len(b))) + m.storedBytes.Add(+int64(len(b))) m.dropCount = 0 return len(b), nil default: m.dropCount++ + m.droppedBytes.Add(int64(len(b))) return 0, errBufferFull } } diff --git a/logtail/filch/filch.go b/logtail/filch/filch.go index 12ac647c4ec42..88c72f233daab 100644 --- a/logtail/filch/filch.go +++ b/logtail/filch/filch.go @@ -18,6 +18,7 @@ import ( "slices" "sync" + "tailscale.com/metrics" "tailscale.com/util/must" ) @@ -88,7 +89,7 @@ type Filch struct { storedBytes expvar.Int } -// ExpVar report metrics about the buffer. +// ExpVar returns a [metrics.Set] with metrics about the buffer. // // - counter_write_calls: Total number of calls to [Filch.Write] // (excludes calls when file is closed). @@ -114,7 +115,7 @@ type Filch struct { // // - gauge_stored_bytes: Current number of bytes stored on disk. func (f *Filch) ExpVar() expvar.Var { - m := new(expvar.Map) + m := new(metrics.Set) m.Set("counter_write_calls", &f.writeCalls) m.Set("counter_read_calls", &f.readCalls) m.Set("counter_rotate_calls", &f.rotateCalls) diff --git a/logtail/logtail.go b/logtail/logtail.go index 2879c6b0d3cf8..91bfed8b183a8 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -12,6 +12,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "expvar" "fmt" "io" "log" @@ -28,6 +29,7 @@ import ( "github.com/creachadair/msync/trigger" "github.com/go-json-experiment/json/jsontext" "tailscale.com/envknob" + "tailscale.com/metrics" "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/tstime" @@ -180,6 +182,12 @@ type Logger struct { shutdownStartMu sync.Mutex // guards the closing of shutdownStart shutdownStart chan struct{} // closed when shutdown begins shutdownDone chan struct{} // closed when shutdown complete + + // Metrics (see [Logger.ExpVar] for details). + uploadCalls expvar.Int + failedCalls expvar.Int + uploadedBytes expvar.Int + uploadingTime expvar.Int } type atomicSocktatsLabel struct{ p atomic.Uint32 } @@ -477,6 +485,9 @@ func (lg *Logger) awaitInternetUp(ctx context.Context) { // origlen indicates the pre-compression body length. // origlen of -1 indicates that the body is not compressed. func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAfter time.Duration, err error) { + lg.uploadCalls.Add(1) + startUpload := time.Now() + const maxUploadTime = 45 * time.Second ctx = sockstats.WithSockStats(ctx, lg.sockstatsLabel.Load(), lg.Logf) ctx, cancel := context.WithTimeout(ctx, maxUploadTime) @@ -516,15 +527,20 @@ func (lg *Logger) upload(ctx context.Context, body []byte, origlen int) (retryAf lg.httpDoCalls.Add(1) resp, err := lg.httpc.Do(req) if err != nil { + lg.failedCalls.Add(1) return 0, fmt.Errorf("log upload of %d bytes %s failed: %v", len(body), compressedNote, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { + lg.failedCalls.Add(1) n, _ := strconv.Atoi(resp.Header.Get("Retry-After")) b, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<10)) return time.Duration(n) * time.Second, fmt.Errorf("log upload of %d bytes %s failed %d: %s", len(body), compressedNote, resp.StatusCode, bytes.TrimSpace(b)) } + + lg.uploadedBytes.Add(int64(len(body))) + lg.uploadingTime.Add(int64(time.Since(startUpload))) return 0, nil } @@ -546,6 +562,30 @@ func (lg *Logger) StartFlush() { } } +// ExpVar report metrics about the logger. +// +// - counter_upload_calls: Total number of upload attempts. +// +// - counter_upload_errors: Total number of upload attempts that failed. +// +// - counter_uploaded_bytes: Total number of bytes successfully uploaded +// (which is calculated after compression is applied). +// +// - counter_uploading_nsecs: Total number of nanoseconds spent uploading. +// +// - buffer: An optional [metrics.Set] with metrics for the [Buffer]. +func (lg *Logger) ExpVar() expvar.Var { + m := new(metrics.Set) + m.Set("counter_upload_calls", &lg.uploadCalls) + m.Set("counter_upload_errors", &lg.failedCalls) + m.Set("counter_uploaded_bytes", &lg.uploadedBytes) + m.Set("counter_uploading_nsecs", &lg.uploadingTime) + if v, ok := lg.buffer.(interface{ ExpVar() expvar.Var }); ok { + m.Set("buffer", v.ExpVar()) + } + return m +} + // logtailDisabled is whether logtail uploads to logcatcher are disabled. var logtailDisabled atomic.Bool From 65182f211950bb9b73bea3b9cb354d4cda3ea84e Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Fri, 12 Dec 2025 02:53:21 +0530 Subject: [PATCH 0813/1093] ipn/ipnlocal: add ProxyProtocol support to VIP service TCP handler (#18175) tcpHandlerForVIPService was missing ProxyProtocol support that tcpHandlerForServe already had. Extract the shared logic into forwardTCPWithProxyProtocol helper and use it in both handlers. Fixes #18172 Signed-off-by: Raj Singh --- ipn/ipnlocal/serve.go | 171 ++++++++++++++++++++---------------------- 1 file changed, 81 insertions(+), 90 deletions(-) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index cda742892695b..69a68f66ee098 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -591,16 +591,7 @@ func (b *LocalBackend) tcpHandlerForVIPService(dstAddr, srcAddr netip.AddrPort) }) } - errc := make(chan error, 1) - go func() { - _, err := io.Copy(backConn, conn) - errc <- err - }() - go func() { - _, err := io.Copy(conn, backConn) - errc <- err - }() - return <-errc + return b.forwardTCPWithProxyProtocol(conn, backConn, tcph.ProxyProtocol(), srcAddr, dport, backDst) } } @@ -678,93 +669,93 @@ func (b *LocalBackend) tcpHandlerForServe(dport uint16, srcAddr netip.AddrPort, }) } - var proxyHeader []byte - if ver := tcph.ProxyProtocol(); ver > 0 { - // backAddr is the final "destination" of the connection, - // which is the connection to the proxied-to backend. - backAddr := backConn.RemoteAddr().(*net.TCPAddr) - - // We always want to format the PROXY protocol - // header based on the IPv4 or IPv6-ness of - // the client. The SourceAddr and - // DestinationAddr need to match in type, so we - // need to be careful to not e.g. set a - // SourceAddr of type IPv6 and DestinationAddr - // of type IPv4. - // - // If this is an IPv6-mapped IPv4 address, - // though, unmap it. - proxySrcAddr := srcAddr - if proxySrcAddr.Addr().Is4In6() { - proxySrcAddr = netip.AddrPortFrom( - proxySrcAddr.Addr().Unmap(), - proxySrcAddr.Port(), - ) - } - - is4 := proxySrcAddr.Addr().Is4() + // TODO(bradfitz): do the RegisterIPPortIdentity and + // UnregisterIPPortIdentity stuff that netstack does + return b.forwardTCPWithProxyProtocol(conn, backConn, tcph.ProxyProtocol(), srcAddr, dport, backDst) + } + } - var destAddr netip.Addr - if self := b.currentNode().Self(); self.Valid() { - if is4 { - destAddr = nodeIP(self, netip.Addr.Is4) - } else { - destAddr = nodeIP(self, netip.Addr.Is6) - } - } - if !destAddr.IsValid() { - // Pick a best-effort destination address of localhost. - if is4 { - destAddr = netip.AddrFrom4([4]byte{127, 0, 0, 1}) - } else { - destAddr = netip.IPv6Loopback() - } - } + return nil +} - header := &proxyproto.Header{ - Version: byte(ver), - Command: proxyproto.PROXY, - SourceAddr: net.TCPAddrFromAddrPort(proxySrcAddr), - DestinationAddr: &net.TCPAddr{ - IP: destAddr.AsSlice(), - Port: backAddr.Port, - }, - } - if is4 { - header.TransportProtocol = proxyproto.TCPv4 - } else { - header.TransportProtocol = proxyproto.TCPv6 - } - var err error - proxyHeader, err = header.Format() - if err != nil { - b.logf("localbackend: failed to format proxy protocol header for port %v (from %v) to %s: %v", dport, srcAddr, backDst, err) - } +// forwardTCPWithProxyProtocol forwards TCP traffic between conn and backConn, +// optionally prepending a PROXY protocol header if proxyProtoVer > 0. +// The srcAddr is the original client address used to build the PROXY header. +func (b *LocalBackend) forwardTCPWithProxyProtocol(conn, backConn net.Conn, proxyProtoVer int, srcAddr netip.AddrPort, dport uint16, backDst string) error { + var proxyHeader []byte + if proxyProtoVer > 0 { + backAddr := backConn.RemoteAddr().(*net.TCPAddr) + + // We always want to format the PROXY protocol header based on + // the IPv4 or IPv6-ness of the client. The SourceAddr and + // DestinationAddr need to match in type. + // If this is an IPv6-mapped IPv4 address, unmap it. + proxySrcAddr := srcAddr + if proxySrcAddr.Addr().Is4In6() { + proxySrcAddr = netip.AddrPortFrom( + proxySrcAddr.Addr().Unmap(), + proxySrcAddr.Port(), + ) + } + + is4 := proxySrcAddr.Addr().Is4() + + var destAddr netip.Addr + if self := b.currentNode().Self(); self.Valid() { + if is4 { + destAddr = nodeIP(self, netip.Addr.Is4) + } else { + destAddr = nodeIP(self, netip.Addr.Is6) } + } + if !destAddr.IsValid() { + // Unexpected: we couldn't determine the node's IP address. + // Pick a best-effort destination address of localhost. + if is4 { + destAddr = netip.AddrFrom4([4]byte{127, 0, 0, 1}) + } else { + destAddr = netip.IPv6Loopback() + } + } - // TODO(bradfitz): do the RegisterIPPortIdentity and - // UnregisterIPPortIdentity stuff that netstack does - errc := make(chan error, 1) - go func() { - if len(proxyHeader) > 0 { - if _, err := backConn.Write(proxyHeader); err != nil { - errc <- err - backConn.Close() // to ensure that the other side gets EOF - return - } - } - _, err := io.Copy(backConn, conn) - errc <- err - }() - go func() { - _, err := io.Copy(conn, backConn) - errc <- err - }() - return <-errc + header := &proxyproto.Header{ + Version: byte(proxyProtoVer), + Command: proxyproto.PROXY, + SourceAddr: net.TCPAddrFromAddrPort(proxySrcAddr), + DestinationAddr: &net.TCPAddr{ + IP: destAddr.AsSlice(), + Port: backAddr.Port, + }, + } + if is4 { + header.TransportProtocol = proxyproto.TCPv4 + } else { + header.TransportProtocol = proxyproto.TCPv6 + } + var err error + proxyHeader, err = header.Format() + if err != nil { + b.logf("localbackend: failed to format proxy protocol header for port %v (from %v) to %s: %v", dport, srcAddr, backDst, err) } } - return nil + errc := make(chan error, 1) + go func() { + if len(proxyHeader) > 0 { + if _, err := backConn.Write(proxyHeader); err != nil { + errc <- err + backConn.Close() + return + } + } + _, err := io.Copy(backConn, conn) + errc <- err + }() + go func() { + _, err := io.Copy(conn, backConn) + errc <- err + }() + return <-errc } func (b *LocalBackend) getServeHandler(r *http.Request) (_ ipn.HTTPHandlerView, at string, ok bool) { From 3ef9787379bef0c48143535583b0f23233ffb56f Mon Sep 17 00:00:00 2001 From: James 'zofrex' Sanderson Date: Fri, 12 Dec 2025 12:05:05 +0000 Subject: [PATCH 0814/1093] tsweb: add Unwrap to loggingResponseWriter for ResponseController (#18195) The new http.ResponseController type added in Go 1.20: https://go.dev/doc/go1.20#http_responsecontroller requires ResponseWriters that are wrapping the original passed to ServeHTTP to implement an Unwrap method: https://pkg.go.dev/net/http#NewResponseController With this in place, it is possible to call methods such as Flush and SetReadDeadline on a loggingResponseWriter without needing to implement them there ourselves. Updates tailscale/corp#34763 Updates tailscale/corp#34813 Signed-off-by: James Sanderson --- tsweb/tsweb.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index 869b4cc8ea566..f6196174b38b2 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -687,6 +687,10 @@ func (lg loggingResponseWriter) Flush() { f.Flush() } +func (lg *loggingResponseWriter) Unwrap() http.ResponseWriter { + return lg.ResponseWriter +} + // errorHandler is an http.Handler that wraps a ReturnHandler to render the // returned errors to the client and pass them back to any logHandlers. type errorHandler struct { From cb5fa35f571cc815b3f9c600e07beb4e37cad019 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 12 Dec 2025 18:10:00 +0000 Subject: [PATCH 0815/1093] .github/workfkows,Dockerfile,Dockerfile.base: add a test for base image (#18180) Test that the base image builds and has the right iptables binary linked. Updates #17854 Signed-off-by: Irbe Krumina --- .github/workflows/docker-base.yml | 29 +++++++++++++++++++++++++++++ Dockerfile | 5 +++++ Dockerfile.base | 10 +++++----- 3 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/docker-base.yml diff --git a/.github/workflows/docker-base.yml b/.github/workflows/docker-base.yml new file mode 100644 index 0000000000000..3c5931f2d8bcd --- /dev/null +++ b/.github/workflows/docker-base.yml @@ -0,0 +1,29 @@ +name: "Validate Docker base image" +on: + workflow_dispatch: + pull_request: + paths: + - "Dockerfile.base" + - ".github/workflows/docker-base.yml" +jobs: + build-and-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: "build and test" + run: | + set -e + IMG="test-base:$(head -c 8 /dev/urandom | xxd -p)" + docker build -t "$IMG" -f Dockerfile.base . + + iptables_version=$(docker run --rm "$IMG" iptables --version) + if [[ "$iptables_version" != *"(legacy)"* ]]; then + echo "ERROR: Docker base image should contain legacy iptables; found ${iptables_version}" + exit 1 + fi + + ip6tables_version=$(docker run --rm "$IMG" ip6tables --version) + if [[ "$ip6tables_version" != *"(legacy)"* ]]; then + echo "ERROR: Docker base image should contain legacy ip6tables; found ${ip6tables_version}" + exit 1 + fi diff --git a/Dockerfile b/Dockerfile index 68e7caa3edcb2..7122f99782fec 100644 --- a/Dockerfile +++ b/Dockerfile @@ -73,6 +73,11 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables +# Alpine 3.19 replaced legacy iptables with nftables based implementation. +# Tailscale is used on some hosts that don't support nftables, such as Synology +# NAS, so link iptables back to legacy version. Hosts that don't require legacy +# iptables should be able to use Tailscale in nftables mode. See +# https://github.com/tailscale/tailscale/issues/17854 RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables diff --git a/Dockerfile.base b/Dockerfile.base index bd68e1572259e..9b7ae512b9945 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -3,10 +3,10 @@ FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils -# Alpine 3.19 replaced legacy iptables with nftables based implementation. We -# can't be certain that all hosts that run Tailscale containers currently -# suppport nftables, so link back to legacy for backwards compatibility reasons. -# TODO(irbekrm): add some way how to determine if we still run on nodes that -# don't support nftables, so that we can eventually remove these symlinks. +# Alpine 3.19 replaced legacy iptables with nftables based implementation. +# Tailscale is used on some hosts that don't support nftables, such as Synology +# NAS, so link iptables back to legacy version. Hosts that don't require legacy +# iptables should be able to use Tailscale in nftables mode. See +# https://github.com/tailscale/tailscale/issues/17854 RUN rm /usr/sbin/iptables && ln -s /usr/sbin/iptables-legacy /usr/sbin/iptables RUN rm /usr/sbin/ip6tables && ln -s /usr/sbin/ip6tables-legacy /usr/sbin/ip6tables From d7a5624841227071b7b557fdf136b4aa7ff73897 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 15 Dec 2025 10:27:59 +0000 Subject: [PATCH 0816/1093] cmd/k8s-operator: fix statefulset template yaml indentation (#18194) Fixes #17000 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/deploy/manifests/proxy.yaml | 8 ++-- .../deploy/manifests/userspace-proxy.yaml | 8 ++-- cmd/k8s-operator/ingress_test.go | 42 +++++++++++++------ cmd/k8s-operator/sts.go | 12 +++++- cmd/k8s-operator/testutils_test.go | 13 ++++++ 5 files changed, 62 insertions(+), 21 deletions(-) diff --git a/cmd/k8s-operator/deploy/manifests/proxy.yaml b/cmd/k8s-operator/deploy/manifests/proxy.yaml index 3c9a3eaa36c56..74e36cf788c0f 100644 --- a/cmd/k8s-operator/deploy/manifests/proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/proxy.yaml @@ -16,12 +16,12 @@ spec: privileged: true command: ["/bin/sh", "-c"] args: [sysctl -w net.ipv4.ip_forward=1 && if sysctl net.ipv6.conf.all.forwarding; then sysctl -w net.ipv6.conf.all.forwarding=1; fi] - resources: - requests: - cpu: 1m - memory: 1Mi containers: - name: tailscale + resources: + requests: + cpu: 1m + memory: 1Mi imagePullPolicy: Always env: - name: TS_USERSPACE diff --git a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml index 6617f6d4b52fe..f93ab5855e7b2 100644 --- a/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml +++ b/cmd/k8s-operator/deploy/manifests/userspace-proxy.yaml @@ -10,12 +10,12 @@ spec: deletionGracePeriodSeconds: 10 spec: serviceAccountName: proxies - resources: - requests: - cpu: 1m - memory: 1Mi containers: - name: tailscale + resources: + requests: + cpu: 1m + memory: 1Mi imagePullPolicy: Always env: - name: TS_USERSPACE diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 038c746a97ca3..52afc3be40c50 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -15,6 +15,7 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -70,7 +71,8 @@ func TestTailscaleIngress(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://1.2.3.4:8080/"}, - }}}, + }}, + }, }, } @@ -164,7 +166,8 @@ func TestTailscaleIngressHostname(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://1.2.3.4:8080/"}, - }}}, + }}, + }, }, } @@ -238,7 +241,17 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Spec: tsapi.ProxyClassSpec{StatefulSet: &tsapi.StatefulSet{ Labels: tsapi.Labels{"foo": "bar"}, Annotations: map[string]string{"bar.io/foo": "some-val"}, - Pod: &tsapi.Pod{Annotations: map[string]string{"foo.io/bar": "some-val"}}, + Pod: &tsapi.Pod{ + Annotations: map[string]string{"foo.io/bar": "some-val"}, + TailscaleContainer: &tsapi.Container{ + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("28Mi"), + }, + }, + }, + }, }}, } fc := fake.NewClientBuilder(). @@ -286,13 +299,14 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://1.2.3.4:8080/"}, - }}}, + }}, + }, }, } expectEqual(t, fc, expectedSecret(t, fc, opts)) expectEqual(t, fc, expectedHeadlessService(shortName, "ingress")) - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts)) // 2. Ingress is updated to specify a ProxyClass, ProxyClass is not yet // ready, so proxy resource configuration does not change. @@ -300,7 +314,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { mak.Set(&ing.ObjectMeta.Labels, LabelAnnotationProxyClass, "custom-metadata") }) expectReconciled(t, ingR, "default", "test") - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts)) // 3. ProxyClass is set to Ready by proxy-class reconciler. Ingress get // reconciled and configuration from the ProxyClass is applied to the @@ -316,7 +330,7 @@ func TestTailscaleIngressWithProxyClass(t *testing.T) { }) expectReconciled(t, ingR, "default", "test") opts.proxyClass = pc.Name - expectEqual(t, fc, expectedSTSUserspace(t, fc, opts), removeResourceReqs) + expectEqual(t, fc, expectedSTSUserspace(t, fc, opts)) // 4. tailscale.com/proxy-class label is removed from the Ingress, the // Ingress gets reconciled and the custom ProxyClass configuration is @@ -390,7 +404,8 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://1.2.3.4:8080/"}, - }}}, + }}, + }, }, resourceVersion: "1", } @@ -731,7 +746,8 @@ func TestEmptyPath(t *testing.T) { Web: map[ipn.HostPort]*ipn.WebServerConfig{ "${TS_CERT_DOMAIN}:443": {Handlers: map[string]*ipn.HTTPHandler{ "/": {Proxy: "http://1.2.3.4:8080/"}, - }}}, + }}, + }, }, } @@ -764,9 +780,11 @@ func service() *corev1.Service { }, Spec: corev1.ServiceSpec{ ClusterIP: "1.2.3.4", - Ports: []corev1.ServicePort{{ - Port: 8080, - Name: "http"}, + Ports: []corev1.ServicePort{ + { + Port: 8080, + Name: "http", + }, }, }, } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 3e4e72696b61b..62f91bf921faa 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -922,7 +922,17 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, if overlay.SecurityContext != nil { base.SecurityContext = overlay.SecurityContext } - base.Resources = overlay.Resources + + if len(overlay.Resources.Requests) > 0 { + base.Resources.Requests = overlay.Resources.Requests + } + if len(overlay.Resources.Limits) > 0 { + base.Resources.Limits = overlay.Resources.Limits + } + if len(overlay.Resources.Claims) > 0 { + base.Resources.Limits = overlay.Resources.Limits + } + for _, e := range overlay.Env { // Env vars configured via ProxyClass might override env // vars that have been specified by the operator, i.e diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index b4c468c8e8e94..9eb06394c092b 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -23,6 +23,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" @@ -95,6 +96,12 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + corev1.ResourceMemory: resource.MustParse("1Mi"), + }, + }, ImagePullPolicy: "Always", } if opts.shouldEnableForwardingClusterTrafficViaIngress { @@ -288,6 +295,12 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "tailscaledconfig-0", ReadOnly: true, MountPath: path.Join("/etc/tsconfig", opts.secretName)}, {Name: "serve-config-0", ReadOnly: true, MountPath: path.Join("/etc/tailscaled", opts.secretName)}, }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1m"), + corev1.ResourceMemory: resource.MustParse("1Mi"), + }, + }, } if opts.enableMetrics { tsContainer.Env = append(tsContainer.Env, From d0d993f5d6576b5d97d0242c64bbe2de049d6486 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 12 Dec 2025 13:58:16 +0000 Subject: [PATCH 0817/1093] .github,cmd/cigocacher: add flags --version --stats --cigocached-host Add flags: * --cigocached-host to support alternative host resolution in other environments, like the corp repo. * --stats to reduce the amount of bash script we need. * --version to support a caching tool/cigocacher script that will download from GitHub releases. Updates tailscale/corp#10808 Change-Id: Ib2447bc5f79058669a70f2c49cef6aedd7afc049 Signed-off-by: Tom Proctor --- .github/actions/go-cache/action.sh | 57 +++------------- .github/actions/go-cache/action.yml | 4 ++ .github/workflows/test.yml | 7 +- cmd/cigocacher/cigocacher.go | 101 ++++++++++++++++++++++------ cmd/cigocacher/http.go | 6 -- 5 files changed, 99 insertions(+), 76 deletions(-) diff --git a/.github/actions/go-cache/action.sh b/.github/actions/go-cache/action.sh index 58ceabc861458..bd584f6f1270a 100755 --- a/.github/actions/go-cache/action.sh +++ b/.github/actions/go-cache/action.sh @@ -7,6 +7,7 @@ # Usage: ./action.sh # Inputs: # URL: The cigocached server URL. +# HOST: The cigocached server host to dial. # Outputs: # success: Whether cigocacher was set up successfully. @@ -22,57 +23,17 @@ if [ -z "${URL:-}" ]; then exit 0 fi -curl_and_parse() { - local jq_filter="$1" - local step="$2" - shift 2 - - local response - local curl_exit - response="$(curl -sSL "$@" 2>&1)" || curl_exit="$?" - if [ "${curl_exit:-0}" -ne "0" ]; then - echo "${step}: ${response}" >&2 - return 1 - fi - - local parsed - local jq_exit - parsed=$(echo "${response}" | jq -e -r "${jq_filter}" 2>&1) || jq_exit=$? - if [ "${jq_exit:-0}" -ne "0" ]; then - echo "${step}: Failed to parse JSON response:" >&2 - echo "${response}" >&2 - return 1 - fi - - echo "${parsed}" - return 0 -} - -JWT="$(curl_and_parse ".value" "Fetching GitHub identity JWT" \ - -H "Authorization: Bearer ${ACTIONS_ID_TOKEN_REQUEST_TOKEN}" \ - "${ACTIONS_ID_TOKEN_REQUEST_URL}&audience=gocached")" || exit 0 +BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(go env GOEXE)" +go build -o "${BIN_PATH}" ./cmd/cigocacher -# cigocached serves a TLS cert with an FQDN, but DNS is based on VM name. -HOST_AND_PORT="${URL#http*://}" -FIRST_LABEL="${HOST_AND_PORT/.*/}" -# Save CONNECT_TO for later steps to use. -echo "CONNECT_TO=${HOST_AND_PORT}:${FIRST_LABEL}:" >> "${GITHUB_ENV}" -BODY="$(jq -n --arg jwt "$JWT" '{"jwt": $jwt}')" -CIGOCACHER_TOKEN="$(curl_and_parse ".access_token" "Exchanging token with cigocached" \ - --connect-to "${HOST_AND_PORT}:${FIRST_LABEL}:" \ - -H "Content-Type: application/json" \ - "$URL/auth/exchange-token" \ - -d "$BODY")" || exit 0 +CIGOCACHER_TOKEN="$("${BIN_PATH}" --auth --cigocached-url "${URL}" --cigocached-host "${HOST}" )" +if [ -z "${CIGOCACHER_TOKEN:-}" ]; then + echo "Failed to fetch cigocacher token, skipping cigocacher setup" + exit 0 +fi -# Wait until we successfully auth before building cigocacher to ensure we know -# it's worth building. -# TODO(tomhjp): bake cigocacher into runner image and use it for auth. echo "Fetched cigocacher token successfully" echo "::add-mask::${CIGOCACHER_TOKEN}" -echo "CIGOCACHER_TOKEN=${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}" - -BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(go env GOEXE)" -go build -o "${BIN_PATH}" ./cmd/cigocacher -echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}" +echo "GOCACHEPROG=${BIN_PATH} --cache-dir ${CACHE_DIR} --cigocached-url ${URL} --cigocached-host ${HOST} --token ${CIGOCACHER_TOKEN}" >> "${GITHUB_ENV}" echo "success=true" >> "${GITHUB_OUTPUT}" diff --git a/.github/actions/go-cache/action.yml b/.github/actions/go-cache/action.yml index a671530f895f9..38bb15b37931e 100644 --- a/.github/actions/go-cache/action.yml +++ b/.github/actions/go-cache/action.yml @@ -5,6 +5,9 @@ inputs: cigocached-url: description: URL of the cigocached server required: true + cigocached-host: + description: Host to dial for the cigocached server + required: true checkout-path: description: Path to cloned repository required: true @@ -25,6 +28,7 @@ runs: shell: bash env: URL: ${{ inputs.cigocached-url }} + HOST: ${{ inputs.cigocached-host }} CACHE_DIR: ${{ inputs.cache-dir }} working-directory: ${{ inputs.checkout-path }} run: .github/actions/go-cache/action.sh diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index fd193401d7c7c..27862567f84da 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -263,6 +263,7 @@ jobs: checkout-path: ${{ github.workspace }}/src cache-dir: ${{ github.workspace }}/cigocacher cigocached-url: ${{ vars.CIGOCACHED_AZURE_URL }} + cigocached-host: ${{ vars.CIGOCACHED_AZURE_HOST }} - name: test if: matrix.key != 'win-bench' # skip on bench builder @@ -278,10 +279,12 @@ jobs: run: go test ./... -bench . -benchtime 1x -run "^$" - name: Print stats - shell: bash + shell: pwsh if: steps.cigocacher-setup.outputs.success == 'true' + env: + GOCACHEPROG: ${{ env.GOCACHEPROG }} run: | - curl -sSL --connect-to "${CONNECT_TO}" -H "Authorization: Bearer ${CIGOCACHER_TOKEN}" "${{ vars.CIGOCACHED_AZURE_URL }}/session/stats" | jq . + Invoke-Expression "$env:GOCACHEPROG --stats" | jq . win-tool-go: runs-on: windows-latest diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go index 1ada62b6a660b..872cb195355b5 100644 --- a/cmd/cigocacher/cigocacher.go +++ b/cmd/cigocacher/cigocacher.go @@ -22,8 +22,11 @@ import ( "log" "net" "net/http" + "net/url" "os" "path/filepath" + "runtime/debug" + "strconv" "strings" "sync/atomic" "time" @@ -34,20 +37,56 @@ import ( func main() { var ( - auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output") - token = flag.String("token", "", "the cigocached access token to use, as created using --auth") - cigocachedURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). empty means to not use one.") - dir = flag.String("cache-dir", "", "cache directory; empty means automatic") - verbose = flag.Bool("verbose", false, "enable verbose logging") + version = flag.Bool("version", false, "print version and exit") + auth = flag.Bool("auth", false, "auth with cigocached and exit, printing the access token as output") + stats = flag.Bool("stats", false, "fetch and print cigocached stats and exit") + token = flag.String("token", "", "the cigocached access token to use, as created using --auth") + srvURL = flag.String("cigocached-url", "", "optional cigocached URL (scheme, host, and port). Empty means to not use one.") + srvHostDial = flag.String("cigocached-host", "", "optional cigocached host to dial instead of the host in the provided --cigocached-url. Useful for public TLS certs on private addresses.") + dir = flag.String("cache-dir", "", "cache directory; empty means automatic") + verbose = flag.Bool("verbose", false, "enable verbose logging") ) flag.Parse() + if *version { + info, ok := debug.ReadBuildInfo() + if !ok { + log.Fatal("no build info") + } + var ( + rev string + dirty bool + ) + for _, s := range info.Settings { + switch s.Key { + case "vcs.revision": + rev = s.Value + case "vcs.modified": + dirty, _ = strconv.ParseBool(s.Value) + } + } + if dirty { + rev += "-dirty" + } + fmt.Println(rev) + return + } + + var srvHost string + if *srvHostDial != "" && *srvURL != "" { + u, err := url.Parse(*srvURL) + if err != nil { + log.Fatal(err) + } + srvHost = u.Hostname() + } + if *auth { - if *cigocachedURL == "" { + if *srvURL == "" { log.Print("--cigocached-url is empty, skipping auth") return } - tk, err := fetchAccessToken(httpClient(), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *cigocachedURL) + tk, err := fetchAccessToken(httpClient(srvHost, *srvHostDial), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL"), os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN"), *srvURL) if err != nil { log.Printf("error fetching access token, skipping auth: %v", err) return @@ -56,6 +95,28 @@ func main() { return } + if *stats { + if *srvURL == "" { + log.Fatal("--cigocached-url is empty; cannot fetch stats") + } + tk := *token + if tk == "" { + log.Fatal("--token is empty; cannot fetch stats") + } + c := &gocachedClient{ + baseURL: *srvURL, + cl: httpClient(srvHost, *srvHostDial), + accessToken: tk, + verbose: *verbose, + } + stats, err := c.fetchStats() + if err != nil { + log.Fatalf("error fetching gocached stats: %v", err) + } + fmt.Println(stats) + return + } + if *dir == "" { d, err := os.UserCacheDir() if err != nil { @@ -75,13 +136,13 @@ func main() { }, verbose: *verbose, } - if *cigocachedURL != "" { + if *srvURL != "" { if *verbose { - log.Printf("Using cigocached at %s", *cigocachedURL) + log.Printf("Using cigocached at %s", *srvURL) } c.gocached = &gocachedClient{ - baseURL: *cigocachedURL, - cl: httpClient(), + baseURL: *srvURL, + cl: httpClient(srvHost, *srvHostDial), accessToken: *token, verbose: *verbose, } @@ -104,18 +165,18 @@ func main() { } } -func httpClient() *http.Client { +func httpClient(srvHost, srvHostDial string) *http.Client { + if srvHost == "" || srvHostDial == "" { + return http.DefaultClient + } return &http.Client{ Transport: &http.Transport{ DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { - host, port, err := net.SplitHostPort(addr) - if err == nil { - // This does not run in a tailnet. We serve corp.ts.net - // TLS certs, and override DNS resolution to lookup the - // private IP for the VM by its hostname. - if vm, ok := strings.CutSuffix(host, ".corp.ts.net"); ok { - addr = net.JoinHostPort(vm, port) - } + if host, port, err := net.SplitHostPort(addr); err == nil && host == srvHost { + // This allows us to serve a publicly trusted TLS cert + // while also minimising latency by explicitly using a + // private network address. + addr = net.JoinHostPort(srvHostDial, port) } var d net.Dialer return d.DialContext(ctx, network, addr) diff --git a/cmd/cigocacher/http.go b/cmd/cigocacher/http.go index 57d3bfb45f53e..55735f089655e 100644 --- a/cmd/cigocacher/http.go +++ b/cmd/cigocacher/http.go @@ -32,12 +32,6 @@ func tryReadErrorMessage(res *http.Response) []byte { } func (c *gocachedClient) get(ctx context.Context, actionID string) (outputID string, resp *http.Response, err error) { - // TODO(tomhjp): make sure we timeout if cigocached disappears, but for some - // reason, this seemed to tank network performance. - // // Set a generous upper limit on the time we'll wait for a response. We'll - // // shorten this deadline later once we know the content length. - // ctx, cancel := context.WithTimeout(ctx, time.Minute) - // defer cancel() req, _ := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/action/"+actionID, nil) req.Header.Set("Want-Object", "1") // opt in to single roundtrip protocol if c.accessToken != "" { From 951d711054d71406bd360d180b063e89a0e11b89 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 15 Dec 2025 08:20:45 -0800 Subject: [PATCH 0818/1093] client/systray: add missing deferred unlock for httpCache mutex Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/systray/systray.go b/client/systray/systray.go index bc099a1ec23a2..330df8d06a4b1 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -372,6 +372,7 @@ func setRemoteIcon(menu *systray.MenuItem, urlStr string) { } cacheMu.Lock() + defer cacheMu.Unlock() b, ok := httpCache[urlStr] if !ok { resp, err := http.Get(urlStr) @@ -395,7 +396,6 @@ func setRemoteIcon(menu *systray.MenuItem, urlStr string) { resp.Body.Close() } } - cacheMu.Unlock() if len(b) > 0 { menu.SetIcon(b) From a663639bea0252ce0a34b404c72349f7b686d8b1 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Mon, 15 Dec 2025 12:14:34 -0800 Subject: [PATCH 0819/1093] net/udprelay: replace map+sync.Mutex with sync.Map for VNI lookup This commit also introduces a sync.Mutex for guarding mutatable fields on serverEndpoint, now that it is no longer guarded by the sync.Mutex in Server. These changes reduce lock contention and by effect increase aggregate throughput under high flow count load. A benchmark on Linux with AWS c8gn instances showed a ~30% increase in aggregate throughput (37Gb/s vs 28Gb/s) for 12 tailscaled flows. Updates tailscale/corp#35264 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 150 +++++++++++++++++++----------------- net/udprelay/server_test.go | 25 +++--- 2 files changed, 93 insertions(+), 82 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index d595787805aba..45127dfae6f5b 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -77,8 +77,8 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client - mu sync.Mutex // guards the following fields - macSecrets [][blake2s.Size]byte // [0] is most recent, max 2 elements + mu sync.Mutex // guards the following fields + macSecrets views.Slice[[blake2s.Size]byte] // [0] is most recent, max 2 elements macSecretRotatedAt mono.Time derpMap *tailcfg.DERPMap onlyStaticAddrPorts bool // no dynamic addr port discovery when set @@ -87,8 +87,11 @@ type Server struct { closed bool lamportID uint64 nextVNI uint32 - byVNI map[uint32]*serverEndpoint - byDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint + // serverEndpointByVNI is consistent with serverEndpointByDisco while mu is + // held, i.e. mu must be held around write ops. Read ops in performance + // sensitive paths, e.g. packet forwarding, do not need to acquire mu. + serverEndpointByVNI sync.Map // key is uint32 (Geneve VNI), value is [*serverEndpoint] + serverEndpointByDisco map[key.SortedPairOfDiscoPublic]*serverEndpoint } const macSecretRotationInterval = time.Minute * 2 @@ -100,23 +103,23 @@ const ( ) // serverEndpoint contains Server-internal [endpoint.ServerEndpoint] state. -// serverEndpoint methods are not thread-safe. type serverEndpoint struct { // discoPubKeys contains the key.DiscoPublic of the served clients. The // indexing of this array aligns with the following fields, e.g. // discoSharedSecrets[0] is the shared secret to use when sealing // Disco protocol messages for transmission towards discoPubKeys[0]. - discoPubKeys key.SortedPairOfDiscoPublic - discoSharedSecrets [2]key.DiscoShared + discoPubKeys key.SortedPairOfDiscoPublic + discoSharedSecrets [2]key.DiscoShared + lamportID uint64 + vni uint32 + allocatedAt mono.Time + + mu sync.Mutex // guards the following fields inProgressGeneration [2]uint32 // or zero if a handshake has never started, or has just completed boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg lastSeen [2]mono.Time packetsRx [2]uint64 // num packets received from/sent by each client after they are bound bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound - - lamportID uint64 - vni uint32 - allocatedAt mono.Time } func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg disco.BindUDPRelayEndpointCommon) ([blake2s.Size]byte, error) { @@ -141,7 +144,10 @@ func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg di return out, nil } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte, now mono.Time) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time) (write []byte, to netip.AddrPort) { + e.mu.Lock() + defer e.mu.Unlock() + if senderIndex != 0 && senderIndex != 1 { return nil, netip.AddrPort{} } @@ -186,7 +192,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } reply = append(reply, disco.Magic...) reply = serverDisco.AppendTo(reply) - mac, err := blakeMACFromBindMsg(macSecrets[0], from, m.BindUDPRelayEndpointCommon) + mac, err := blakeMACFromBindMsg(macSecrets.At(0), from, m.BindUDPRelayEndpointCommon) if err != nil { return nil, netip.AddrPort{} } @@ -206,7 +212,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex // silently drop return nil, netip.AddrPort{} } - for _, macSecret := range macSecrets { + for _, macSecret := range macSecrets.All() { mac, err := blakeMACFromBindMsg(macSecret, from, discoMsg.BindUDPRelayEndpointCommon) if err != nil { // silently drop @@ -230,7 +236,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets [][blake2s.Size]byte, now mono.Time) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message @@ -265,7 +271,9 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by } func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mono.Time) (write []byte, to netip.AddrPort) { - if !e.isBound() { + e.mu.Lock() + defer e.mu.Unlock() + if !e.isBoundLocked() { // not a control packet, but serverEndpoint isn't bound return nil, netip.AddrPort{} } @@ -287,7 +295,9 @@ func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mon } func (e *serverEndpoint) isExpired(now mono.Time, bindLifetime, steadyStateLifetime time.Duration) bool { - if !e.isBound() { + e.mu.Lock() + defer e.mu.Unlock() + if !e.isBoundLocked() { if now.Sub(e.allocatedAt) > bindLifetime { return true } @@ -299,9 +309,9 @@ func (e *serverEndpoint) isExpired(now mono.Time, bindLifetime, steadyStateLifet return false } -// isBound returns true if both clients have completed a 3-way handshake, +// isBoundLocked returns true if both clients have completed a 3-way handshake, // otherwise false. -func (e *serverEndpoint) isBound() bool { +func (e *serverEndpoint) isBoundLocked() bool { return e.boundAddrPorts[0].IsValid() && e.boundAddrPorts[1].IsValid() } @@ -313,15 +323,14 @@ func (e *serverEndpoint) isBound() bool { // used. func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (s *Server, err error) { s = &Server{ - logf: logf, - disco: key.NewDisco(), - bindLifetime: defaultBindLifetime, - steadyStateLifetime: defaultSteadyStateLifetime, - closeCh: make(chan struct{}), - onlyStaticAddrPorts: onlyStaticAddrPorts, - byDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), - nextVNI: minVNI, - byVNI: make(map[uint32]*serverEndpoint), + logf: logf, + disco: key.NewDisco(), + bindLifetime: defaultBindLifetime, + steadyStateLifetime: defaultSteadyStateLifetime, + closeCh: make(chan struct{}), + onlyStaticAddrPorts: onlyStaticAddrPorts, + serverEndpointByDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), + nextVNI: minVNI, } s.discoPublic = s.disco.Public() @@ -640,8 +649,8 @@ func (s *Server) Close() error { // acquire s.mu. s.mu.Lock() defer s.mu.Unlock() - clear(s.byVNI) - clear(s.byDisco) + s.serverEndpointByVNI.Clear() + clear(s.serverEndpointByDisco) s.closed = true s.bus.Close() }) @@ -659,10 +668,10 @@ func (s *Server) endpointGCLoop() { // holding s.mu for the duration. Keep it simple (and slow) for now. s.mu.Lock() defer s.mu.Unlock() - for k, v := range s.byDisco { + for k, v := range s.serverEndpointByDisco { if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { - delete(s.byDisco, k) - delete(s.byVNI, v.vni) + delete(s.serverEndpointByDisco, k) + s.serverEndpointByVNI.Delete(v.vni) } } } @@ -690,12 +699,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n if err != nil { return nil, netip.AddrPort{} } - // TODO: consider performance implications of holding s.mu for the remainder - // of this method, which does a bunch of disco/crypto work depending. Keep - // it simple (and slow) for now. - s.mu.Lock() - defer s.mu.Unlock() - e, ok := s.byVNI[gh.VNI.Get()] + e, ok := s.serverEndpointByVNI.Load(gh.VNI.Get()) if !ok { // unknown VNI return nil, netip.AddrPort{} @@ -708,27 +712,36 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n return nil, netip.AddrPort{} } msg := b[packet.GeneveFixedHeaderLength:] - s.maybeRotateMACSecretLocked(now) - return e.handleSealedDiscoControlMsg(from, msg, s.discoPublic, s.macSecrets, now) + secrets := s.getMACSecrets(now) + return e.(*serverEndpoint).handleSealedDiscoControlMsg(from, msg, s.discoPublic, secrets, now) } - return e.handleDataPacket(from, b, now) + return e.(*serverEndpoint).handleDataPacket(from, b, now) +} + +func (s *Server) getMACSecrets(now mono.Time) views.Slice[[blake2s.Size]byte] { + s.mu.Lock() + defer s.mu.Unlock() + s.maybeRotateMACSecretLocked(now) + return s.macSecrets } func (s *Server) maybeRotateMACSecretLocked(now mono.Time) { if !s.macSecretRotatedAt.IsZero() && now.Sub(s.macSecretRotatedAt) < macSecretRotationInterval { return } - switch len(s.macSecrets) { + secrets := s.macSecrets.AsSlice() + switch len(secrets) { case 0: - s.macSecrets = make([][blake2s.Size]byte, 1, 2) + secrets = make([][blake2s.Size]byte, 1, 2) case 1: - s.macSecrets = append(s.macSecrets, [blake2s.Size]byte{}) + secrets = append(secrets, [blake2s.Size]byte{}) fallthrough case 2: - s.macSecrets[1] = s.macSecrets[0] + secrets[1] = secrets[0] } - rand.Read(s.macSecrets[0][:]) + rand.Read(secrets[0][:]) s.macSecretRotatedAt = now + s.macSecrets = views.SliceOf(secrets) return } @@ -838,7 +851,7 @@ func (s *Server) getNextVNILocked() (uint32, error) { } else { s.nextVNI++ } - _, ok := s.byVNI[vni] + _, ok := s.serverEndpointByVNI.Load(vni) if !ok { return vni, nil } @@ -877,7 +890,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv } pair := key.NewSortedPairOfDiscoPublic(discoA, discoB) - e, ok := s.byDisco[pair] + e, ok := s.serverEndpointByDisco[pair] if ok { // Return the existing allocation. Clients can resolve duplicate // [endpoint.ServerEndpoint]'s via [endpoint.ServerEndpoint.LamportID]. @@ -915,8 +928,8 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv e.discoSharedSecrets[0] = s.disco.Shared(e.discoPubKeys.Get()[0]) e.discoSharedSecrets[1] = s.disco.Shared(e.discoPubKeys.Get()[1]) - s.byDisco[pair] = e - s.byVNI[e.vni] = e + s.serverEndpointByDisco[pair] = e + s.serverEndpointByVNI.Store(e.vni, e) s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) return endpoint.ServerEndpoint{ @@ -930,19 +943,19 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv }, nil } -// extractClientInfo constructs a [status.ClientInfo] for one of the two peer -// relay clients involved in this session. -func extractClientInfo(idx int, ep *serverEndpoint) status.ClientInfo { - if idx != 0 && idx != 1 { - panic(fmt.Sprintf("idx passed to extractClientInfo() must be 0 or 1; got %d", idx)) - } - - return status.ClientInfo{ - Endpoint: ep.boundAddrPorts[idx], - ShortDisco: ep.discoPubKeys.Get()[idx].ShortString(), - PacketsTx: ep.packetsRx[idx], - BytesTx: ep.bytesRx[idx], +// extractClientInfo constructs a [status.ClientInfo] for both relay clients +// involved in this session. +func (e *serverEndpoint) extractClientInfo() [2]status.ClientInfo { + e.mu.Lock() + defer e.mu.Unlock() + ret := [2]status.ClientInfo{} + for i := range e.boundAddrPorts { + ret[i].Endpoint = e.boundAddrPorts[i] + ret[i].ShortDisco = e.discoPubKeys.Get()[i].ShortString() + ret[i].PacketsTx = e.packetsRx[i] + ret[i].BytesTx = e.bytesRx[i] } + return ret } // GetSessions returns a slice of peer relay session statuses, with each @@ -955,14 +968,13 @@ func (s *Server) GetSessions() []status.ServerSession { if s.closed { return nil } - var sessions = make([]status.ServerSession, 0, len(s.byDisco)) - for _, se := range s.byDisco { - c1 := extractClientInfo(0, se) - c2 := extractClientInfo(1, se) + var sessions = make([]status.ServerSession, 0, len(s.serverEndpointByDisco)) + for _, se := range s.serverEndpointByDisco { + clientInfos := se.extractClientInfo() sessions = append(sessions, status.ServerSession{ VNI: se.vni, - Client1: c1, - Client2: c2, + Client1: clientInfos[0], + Client2: clientInfos[1], }) } return sessions diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index bc76801079edc..c4b3656417bae 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -339,19 +339,18 @@ func TestServer_getNextVNILocked(t *testing.T) { c := qt.New(t) s := &Server{ nextVNI: minVNI, - byVNI: make(map[uint32]*serverEndpoint), } for i := uint64(0); i < uint64(totalPossibleVNI); i++ { vni, err := s.getNextVNILocked() if err != nil { // using quicktest here triples test time t.Fatal(err) } - s.byVNI[vni] = nil + s.serverEndpointByVNI.Store(vni, nil) } c.Assert(s.nextVNI, qt.Equals, minVNI) _, err := s.getNextVNILocked() c.Assert(err, qt.IsNotNil) - delete(s.byVNI, minVNI) + s.serverEndpointByVNI.Delete(minVNI) _, err = s.getNextVNILocked() c.Assert(err, qt.IsNil) } @@ -455,17 +454,17 @@ func TestServer_maybeRotateMACSecretLocked(t *testing.T) { s := &Server{} start := mono.Now() s.maybeRotateMACSecretLocked(start) - qt.Assert(t, len(s.macSecrets), qt.Equals, 1) - macSecret := s.macSecrets[0] + qt.Assert(t, s.macSecrets.Len(), qt.Equals, 1) + macSecret := s.macSecrets.At(0) s.maybeRotateMACSecretLocked(start.Add(macSecretRotationInterval - time.Nanosecond)) - qt.Assert(t, len(s.macSecrets), qt.Equals, 1) - qt.Assert(t, s.macSecrets[0], qt.Equals, macSecret) + qt.Assert(t, s.macSecrets.Len(), qt.Equals, 1) + qt.Assert(t, s.macSecrets.At(0), qt.Equals, macSecret) s.maybeRotateMACSecretLocked(start.Add(macSecretRotationInterval)) - qt.Assert(t, len(s.macSecrets), qt.Equals, 2) - qt.Assert(t, s.macSecrets[1], qt.Equals, macSecret) - qt.Assert(t, s.macSecrets[0], qt.Not(qt.Equals), s.macSecrets[1]) + qt.Assert(t, s.macSecrets.Len(), qt.Equals, 2) + qt.Assert(t, s.macSecrets.At(1), qt.Equals, macSecret) + qt.Assert(t, s.macSecrets.At(0), qt.Not(qt.Equals), s.macSecrets.At(1)) s.maybeRotateMACSecretLocked(s.macSecretRotatedAt.Add(macSecretRotationInterval)) - qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets[0]) - qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets[1]) - qt.Assert(t, s.macSecrets[0], qt.Not(qt.Equals), s.macSecrets[1]) + qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets.At(0)) + qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets.At(1)) + qt.Assert(t, s.macSecrets.At(0), qt.Not(qt.Equals), s.macSecrets.At(1)) } From f174ecb6fdab1a234d8e6c3ab2cf8d6dc40fd0a9 Mon Sep 17 00:00:00 2001 From: stratself <126093083+stratself@users.noreply.github.com> Date: Tue, 16 Dec 2025 19:20:33 +0700 Subject: [PATCH 0820/1093] words: 33 tails and 26 scales (#18213) Updates #words Signed-off-by: stratself <126093083+stratself@users.noreply.github.com> --- words/scales.txt | 24 +++++++++++++++++++++++- words/tails.txt | 27 +++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/words/scales.txt b/words/scales.txt index bb623fb6f1ab8..ce749b9dcc368 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -309,7 +309,7 @@ pirate platy pleco powan -pomano +pompano paridae porgy rohu @@ -451,3 +451,25 @@ ph pain temperature wyrm +tilapia +leaffish +gourami +artichoke +fir +larch +lydian +piranha +mackarel +tuatara +balance +massometer +lungfish +bichir +reedfish +tarpon +pomfret +haddock +smelt +rattlesnake +armadillo +bonytongue diff --git a/words/tails.txt b/words/tails.txt index b0119a7563224..9b5ae2ca96164 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -772,3 +772,30 @@ ribbon echo lemming worm +hornbill +crane +mudskipper +leaffish +bagrid +gourami +stomatopod +piranha +seagull +dinosaur +muskellunge +bichir +reedfish +tarpon +egret +pomfret +snakebird +anhinga +gannet +basa +cobbler +haddock +smelt +komodo +rattlesnake +softshell +bonytongue From 0fd1670a592c9c6d03e165b382c8823da313f71b Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 15 Dec 2025 14:01:00 -0800 Subject: [PATCH 0821/1093] client/local: add method to set gauge metric to a value The existing client metric methods only support incrementing (or decrementing) a delta value. This new method allows setting the metric to a specific value. Updates tailscale/corp#35327 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/local/local.go | 29 +++++++++++++++++------------ client/systray/systray.go | 4 ++-- cmd/derper/depaware.txt | 2 +- ipn/localapi/localapi.go | 21 +++++++++++---------- util/clientmetric/clientmetric.go | 14 ++++++++++++++ util/clientmetric/omit.go | 7 +++++++ 6 files changed, 52 insertions(+), 25 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 72ddbb55f773a..195a91b1ef4a9 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -43,6 +43,7 @@ import ( "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/key" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -385,18 +386,14 @@ func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) if !buildfeatures.HasClientMetrics { return nil } - type metricUpdate struct { - Name string `json:"name"` - Type string `json:"type"` - Value int `json:"value"` // amount to increment by - } if delta < 0 { return errors.New("negative delta not allowed") } - _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]metricUpdate{{ + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]clientmetric.MetricUpdate{{ Name: name, Type: "counter", Value: delta, + Op: "add", }})) return err } @@ -405,15 +402,23 @@ func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) // metric by the given delta. If the metric has yet to exist, a new gauge // metric is created and initialized to delta. The delta value can be negative. func (lc *Client) IncrementGauge(ctx context.Context, name string, delta int) error { - type metricUpdate struct { - Name string `json:"name"` - Type string `json:"type"` - Value int `json:"value"` // amount to increment by - } - _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]metricUpdate{{ + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]clientmetric.MetricUpdate{{ Name: name, Type: "gauge", Value: delta, + Op: "add", + }})) + return err +} + +// SetGauge sets the value of a Tailscale daemon's gauge metric to the given value. +// If the metric has yet to exist, a new gauge metric is created and initialized to value. +func (lc *Client) SetGauge(ctx context.Context, name string, value int) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/upload-client-metrics", 200, jsonBody([]clientmetric.MetricUpdate{{ + Name: name, + Type: "gauge", + Value: value, + Op: "set", }})) return err } diff --git a/client/systray/systray.go b/client/systray/systray.go index 330df8d06a4b1..b9e8fcc59043c 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -66,8 +66,8 @@ func (menu *Menu) Run(client *local.Client) { case <-menu.bgCtx.Done(): } }() - go menu.lc.IncrementGauge(menu.bgCtx, "systray_running", 1) - defer menu.lc.IncrementGauge(menu.bgCtx, "systray_running", -1) + go menu.lc.SetGauge(menu.bgCtx, "systray_running", 1) + defer menu.lc.SetGauge(menu.bgCtx, "systray_running", 0) systray.Run(menu.onReady, menu.onExit) } diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index b2465d28de13a..7695cf598b694 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -143,7 +143,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ tailscale.com/util/cibuild from tailscale.com/health+ - tailscale.com/util/clientmetric from tailscale.com/net/netmon + tailscale.com/util/clientmetric from tailscale.com/net/netmon+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/ctxkey from tailscale.com/tsweb+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 7f249fe530e15..4648b2c49e849 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1283,13 +1283,8 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques http.Error(w, "unsupported method", http.StatusMethodNotAllowed) return } - type clientMetricJSON struct { - Name string `json:"name"` - Type string `json:"type"` // one of "counter" or "gauge" - Value int `json:"value"` // amount to increment metric by - } - var clientMetrics []clientMetricJSON + var clientMetrics []clientmetric.MetricUpdate if err := json.NewDecoder(r.Body).Decode(&clientMetrics); err != nil { http.Error(w, "invalid JSON body", http.StatusBadRequest) return @@ -1299,14 +1294,12 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques defer metricsMu.Unlock() for _, m := range clientMetrics { - if metric, ok := metrics[m.Name]; ok { - metric.Add(int64(m.Value)) - } else { + metric, ok := metrics[m.Name] + if !ok { if clientmetric.HasPublished(m.Name) { http.Error(w, "Already have a metric named "+m.Name, http.StatusBadRequest) return } - var metric *clientmetric.Metric switch m.Type { case "counter": metric = clientmetric.NewCounter(m.Name) @@ -1317,7 +1310,15 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques return } metrics[m.Name] = metric + } + switch m.Op { + case "add", "": metric.Add(int64(m.Value)) + case "set": + metric.Set(int64(m.Value)) + default: + http.Error(w, "Unknown metric op "+m.Op, http.StatusBadRequest) + return } } diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 9e6b03a15ce93..50cf3b2960499 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -58,6 +58,20 @@ const ( TypeCounter ) +// MetricUpdate requests that a client metric value be updated. +// +// This is the request body sent to /localapi/v0/upload-client-metrics. +type MetricUpdate struct { + Name string `json:"name"` + Type string `json:"type"` // one of "counter" or "gauge" + Value int `json:"value"` // amount to increment by or set + + // Op indicates if Value is added to the existing metric value, + // or if the metric is set to Value. + // One of "add" or "set". If empty, defaults to "add". + Op string `json:"op"` +} + // Metric is an integer metric value that's tracked over time. // // It's safe for concurrent use. diff --git a/util/clientmetric/omit.go b/util/clientmetric/omit.go index 5349fc7244cd7..6d678cf20d1ae 100644 --- a/util/clientmetric/omit.go +++ b/util/clientmetric/omit.go @@ -13,6 +13,13 @@ func (*Metric) Value() int64 { return 0 } func (*Metric) Register(expvarInt any) {} func (*Metric) UnregisterAll() {} +type MetricUpdate struct { + Name string `json:"name"` + Type string `json:"type"` + Value int `json:"value"` + Op string `json:"op"` +} + func HasPublished(string) bool { panic("unreachable") } func EncodeLogTailMetricsDelta() string { return "" } func WritePrometheusExpositionFormat(any) {} From 3e89068792e4f1c1f8d7e87414000168548053c9 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 17 Dec 2025 12:32:40 -0500 Subject: [PATCH 0822/1093] net/netmon, wgengine/userspace: purge ChangeDelta.Major and address TODOs (#17823) updates tailscale/corp#33891 Addresses several older the TODO's in netmon. This removes the Major flag precomputes the ChangeDelta state, rather than making consumers of ChangeDeltas sort that out themselves. We're also seeing a lot of ChangeDelta's being flagged as "Major" when they are not interesting, triggering rebinds in wgengine that are not needed. This cleans that up and adds a host of additional tests. The dependencies are cleaned, notably removing dependency on netmon itself for calculating what is interesting, and what is not. This includes letting individual platforms set a bespoke global "IsInterestingInterface" function. This is only used on Darwin. RebindRequired now roughly follows how "Major" was historically calculated but includes some additional checks for various uninteresting events such as changes in interface addresses that shouldn't trigger a rebind. This significantly reduces thrashing (by roughly half on Darwin clients which switching between nics). The individual values that we roll into RebindRequired are also exposed so that components consuming netmap.ChangeDelta can ask more targeted questions. Signed-off-by: Jonathan Nobels --- cmd/tailscaled/debug.go | 6 +- ipn/ipnlocal/local.go | 33 ++- ipn/ipnlocal/peerapi.go | 6 +- ipn/ipnlocal/peerapi_macios_ext.go | 10 +- ipn/ipnlocal/serve.go | 2 +- logtail/logtail.go | 4 +- net/netmon/loghelper.go | 39 +-- net/netmon/loghelper_test.go | 9 +- net/netmon/netmon.go | 408 ++++++++++++++++++----------- net/netmon/netmon_darwin.go | 21 +- net/netmon/netmon_freebsd.go | 2 - net/netmon/netmon_linux.go | 2 - net/netmon/netmon_test.go | 382 ++++++++++++++++++++++++--- net/netmon/netmon_windows.go | 2 - net/netmon/polling.go | 4 - net/netmon/state.go | 33 ++- net/sockstats/sockstats_tsgo.go | 4 +- net/tsdial/tsdial.go | 15 +- wgengine/userspace.go | 45 +++- 19 files changed, 754 insertions(+), 273 deletions(-) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index b16cb28e0df54..8208a6e3c6354 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -138,12 +138,12 @@ func changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, dump func(st * case <-ec.Done(): return case delta := <-changeSub.Events(): - if !delta.Major { - log.Printf("Network monitor fired; not a major change") + if !delta.RebindLikelyRequired { + log.Printf("Network monitor fired; not a significant change") return } log.Printf("Network monitor fired. New state:") - dump(delta.New) + dump(delta.CurrentState()) } } } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 73fa56c18258a..ef89af5af5591 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -296,7 +296,7 @@ type LocalBackend struct { authURLTime time.Time // when the authURL was received from the control server; TODO(nickkhyl): move to nodeBackend authActor ipnauth.Actor // an actor who called [LocalBackend.StartLoginInteractive] last, or nil; TODO(nickkhyl): move to nodeBackend egg bool - prevIfState *netmon.State + interfaceState *netmon.State // latest network interface state or nil peerAPIServer *peerAPIServer // or nil peerAPIListeners []*peerAPIListener // TODO(nickkhyl): move to nodeBackend loginFlags controlclient.LoginFlags @@ -561,10 +561,16 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetStatusCallback(b.setWgengineStatus) - b.prevIfState = netMon.InterfaceState() + b.interfaceState = netMon.InterfaceState() + // Call our linkChange code once with the current state. // Following changes are triggered via the eventbus. - b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) + cd, err := netmon.NewChangeDelta(nil, b.interfaceState, false, netMon.TailscaleInterfaceName(), false) + if err != nil { + b.logf("[unexpected] setting initial netmon state failed: %v", err) + } else { + b.linkChange(cd) + } if buildfeatures.HasPeerAPIServer { if tunWrap, ok := b.sys.Tun.GetOK(); ok { @@ -936,7 +942,7 @@ func (b *LocalBackend) pauseOrResumeControlClientLocked() { if b.cc == nil { return } - networkUp := b.prevIfState.AnyInterfaceUp() + networkUp := b.interfaceState.AnyInterfaceUp() pauseForNetwork := (b.state == ipn.Stopped && b.NetMap() != nil) || (!networkUp && !testenv.InTest() && !assumeNetworkUpdateForTest()) prefs := b.pm.CurrentPrefs() @@ -963,24 +969,23 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.mu.Lock() defer b.mu.Unlock() - ifst := delta.New - hadPAC := b.prevIfState.HasPAC() - b.prevIfState = ifst + b.interfaceState = delta.CurrentState() + b.pauseOrResumeControlClientLocked() prefs := b.pm.CurrentPrefs() - if delta.Major && prefs.AutoExitNode().IsSet() { + if delta.RebindLikelyRequired && prefs.AutoExitNode().IsSet() { b.refreshAutoExitNode = true } var needReconfig bool // If the network changed and we're using an exit node and allowing LAN access, we may need to reconfigure. - if delta.Major && prefs.ExitNodeID() != "" && prefs.ExitNodeAllowLANAccess() { + if delta.RebindLikelyRequired && prefs.ExitNodeID() != "" && prefs.ExitNodeAllowLANAccess() { b.logf("linkChange: in state %v; updating LAN routes", b.state) needReconfig = true } // If the PAC-ness of the network changed, reconfig wireguard+route to add/remove subnets. - if hadPAC != ifst.HasPAC() { - b.logf("linkChange: in state %v; PAC changed from %v->%v", b.state, hadPAC, ifst.HasPAC()) + if delta.HasPACOrProxyConfigChanged { + b.logf("linkChange: in state %v; PAC or proxyConfig changed; updating routes", b.state) needReconfig = true } if needReconfig { @@ -998,7 +1003,7 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. b.updateFilterLocked(prefs) - updateExitNodeUsageWarning(prefs, delta.New, b.health) + updateExitNodeUsageWarning(prefs, delta.CurrentState(), b.health) if buildfeatures.HasPeerAPIServer { cn := b.currentNode() @@ -5059,7 +5064,7 @@ func (b *LocalBackend) authReconfigLocked() { } prefs := b.pm.CurrentPrefs() - hasPAC := b.prevIfState.HasPAC() + hasPAC := b.interfaceState.HasPAC() disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) @@ -5310,7 +5315,7 @@ func (b *LocalBackend) initPeerAPIListenerLocked() { var err error skipListen := i > 0 && isNetstack if !skipListen { - ln, err = ps.listen(a.Addr(), b.prevIfState) + ln, err = ps.listen(a.Addr(), b.interfaceState.TailscaleInterfaceIndex) if err != nil { if peerAPIListenAsync { b.logf("[v1] possibly transient peerapi listen(%q) error, will try again on linkChange: %v", a.Addr(), err) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index a045086d468fa..20c61c0ec6c52 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -41,7 +41,7 @@ import ( "tailscale.com/wgengine/filter" ) -var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error +var initListenConfig func(config *net.ListenConfig, addr netip.Addr, tunIfIndex int) error // peerDNSQueryHandler is implemented by tsdns.Resolver. type peerDNSQueryHandler interface { @@ -53,7 +53,7 @@ type peerAPIServer struct { resolver peerDNSQueryHandler } -func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Listener, err error) { +func (s *peerAPIServer) listen(ip netip.Addr, tunIfIndex int) (ln net.Listener, err error) { // Android for whatever reason often has problems creating the peerapi listener. // But since we started intercepting it with netstack, it's not even important that // we have a real kernel-level listener. So just create a dummy listener on Android @@ -69,7 +69,7 @@ func (s *peerAPIServer) listen(ip netip.Addr, ifState *netmon.State) (ln net.Lis // On iOS/macOS, this sets the lc.Control hook to // setsockopt the interface index to bind to, to get // out of the network sandbox. - if err := initListenConfig(&lc, ip, ifState, s.b.dialer.TUNName()); err != nil { + if err := initListenConfig(&lc, ip, tunIfIndex); err != nil { return nil, err } if runtime.GOOS == "darwin" || runtime.GOOS == "ios" { diff --git a/ipn/ipnlocal/peerapi_macios_ext.go b/ipn/ipnlocal/peerapi_macios_ext.go index 15932dfe212fb..f23b877bd663c 100644 --- a/ipn/ipnlocal/peerapi_macios_ext.go +++ b/ipn/ipnlocal/peerapi_macios_ext.go @@ -6,11 +6,9 @@ package ipnlocal import ( - "fmt" "net" "net/netip" - "tailscale.com/net/netmon" "tailscale.com/net/netns" ) @@ -21,10 +19,6 @@ func init() { // initListenConfigNetworkExtension configures nc for listening on IP // through the iOS/macOS Network/System Extension (Packet Tunnel // Provider) sandbox. -func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, st *netmon.State, tunIfName string) error { - tunIf, ok := st.Interface[tunIfName] - if !ok { - return fmt.Errorf("no interface with name %q", tunIfName) - } - return netns.SetListenConfigInterfaceIndex(nc, tunIf.Index) +func initListenConfigNetworkExtension(nc *net.ListenConfig, ip netip.Addr, ifaceIndex int) error { + return netns.SetListenConfigInterfaceIndex(nc, ifaceIndex) } diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 69a68f66ee098..4d6055bbd81e8 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -171,7 +171,7 @@ func (s *localListener) Run() { // required by the network sandbox to allow binding to // a specific interface. Without this hook, the system // chooses a default interface to bind to. - if err := initListenConfig(&lc, ip, s.b.prevIfState, s.b.dialer.TUNName()); err != nil { + if err := initListenConfig(&lc, ip, s.b.interfaceState.TailscaleInterfaceIndex); err != nil { s.logf("localListener failed to init listen config %v, backing off: %v", s.ap, err) s.bo.BackOff(s.ctx, err) continue diff --git a/logtail/logtail.go b/logtail/logtail.go index 91bfed8b183a8..ce50c1c0a7f52 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -445,7 +445,7 @@ func (lg *Logger) internetUp() bool { // [netmon.ChangeDelta] events to detect whether the Internet is expected to be // reachable. func (lg *Logger) onChangeDelta(delta *netmon.ChangeDelta) { - if delta.New.AnyInterfaceUp() { + if delta.AnyInterfaceUp() { fmt.Fprintf(lg.stderr, "logtail: internet back up\n") lg.networkIsUp.Set() } else { @@ -464,7 +464,7 @@ func (lg *Logger) awaitInternetUp(ctx context.Context) { } upc := make(chan bool, 1) defer lg.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if delta.New.AnyInterfaceUp() { + if delta.AnyInterfaceUp() { select { case upc <- true: default: diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 675762cd10b18..2876e9b12481c 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -6,38 +6,43 @@ package netmon import ( "context" "sync" + "time" "tailscale.com/types/logger" "tailscale.com/util/eventbus" ) +const cooldownSeconds = 300 + // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique -// format string to the underlying logger only once per major LinkChange event. +// format string to the underlying logger only once per major LinkChange event +// with a cooldownSeconds second cooldown. // // The logger stops tracking seen format strings when the provided context is // done. func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { - var formatSeen sync.Map // map[string]bool - sub := eventbus.SubscribeFunc(nm.b, func(cd ChangeDelta) { - // If we're in a major change or a time jump, clear the seen map. - if cd.Major || cd.TimeJumped { - formatSeen.Clear() + var formatLastSeen sync.Map // map[string]int64 + + sub := eventbus.SubscribeFunc(nm.b, func(cd *ChangeDelta) { + // Any link changes that are flagged as likely require a rebind are + // interesting enough that we should log them. + if cd.RebindLikelyRequired { + formatLastSeen.Clear() } }) context.AfterFunc(ctx, sub.Close) return func(format string, args ...any) { - // We only store 'true' in the map, so if it's present then it - // means we've already logged this format string. - _, loaded := formatSeen.LoadOrStore(format, true) - if loaded { - // TODO(andrew-d): we may still want to log this - // message every N minutes (1x/hour?) even if it's been - // seen, so that debugging doesn't require searching - // back in the logs for an unbounded amount of time. - // - // See: https://github.com/tailscale/tailscale/issues/13145 - return + // get the current timestamp + now := time.Now().Unix() + lastSeen, ok := formatLastSeen.Load(format) + if ok { + // if we've seen this format string within the last cooldownSeconds, skip logging + if now-lastSeen.(int64) < cooldownSeconds { + return + } } + // update the last seen timestamp for this format string + formatLastSeen.Store(format, now) logf(format, args...) } diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index ca3b1284cfa0e..968c2fd41d950 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -64,7 +64,14 @@ func syncTestLinkChangeLogLimiter(t *testing.T) { // InjectEvent doesn't work because it's not a major event, so we // instead inject the event ourselves. injector := eventbustest.NewInjector(t, bus) - eventbustest.Inject(injector, ChangeDelta{Major: true}) + cd, err := NewChangeDelta(nil, &State{}, true, "tailscale0", true) + if err != nil { + t.Fatal(err) + } + if cd.RebindLikelyRequired != true { + t.Fatalf("expected RebindLikelyRequired to be true, got false") + } + eventbustest.Inject(injector, cd) synctest.Wait() logf("hello %s", "world") diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index 657da04d5978c..49fb426ae1993 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -7,10 +7,12 @@ package netmon import ( - "encoding/json" "errors" + "fmt" + "log" "net/netip" "runtime" + "slices" "sync" "time" @@ -45,12 +47,15 @@ type osMon interface { // until the osMon is closed. After a Close, the returned // error is ignored. Receive() (message, error) - - // IsInterestingInterface reports whether the provided interface should - // be considered for network change events. - IsInterestingInterface(iface string) bool } +// IsInterestingInterface is the function used to determine whether +// a given interface name is interesting enough to pay attention to +// for network change monitoring purposes. +// +// If nil, all interfaces are considered interesting. +var IsInterestingInterface func(Interface, []netip.Prefix) bool + // Monitor represents a monitoring instance. type Monitor struct { logf logger.Logf @@ -62,10 +67,6 @@ type Monitor struct { stop chan struct{} // closed on Stop static bool // static Monitor that doesn't actually monitor - // Things that must be set early, before use, - // and not change at runtime. - tsIfName string // tailscale interface name, if known/set ("tailscale0", "utun3", ...) - mu syncs.Mutex // guards all following fields cbs set.HandleSet[ChangeFunc] ifState *State @@ -77,7 +78,8 @@ type Monitor struct { goroutines sync.WaitGroup wallTimer *time.Timer // nil until Started; re-armed AfterFunc per tick lastWall time.Time - timeJumped bool // whether we need to send a changed=true after a big time jump + timeJumped bool // whether we need to send a changed=true after a big time jump + tsIfName string // tailscale interface name, if known/set ("tailscale0", "utun3", ...) } // ChangeFunc is a callback function registered with Monitor that's called when the @@ -85,32 +87,225 @@ type Monitor struct { type ChangeFunc func(*ChangeDelta) // ChangeDelta describes the difference between two network states. +// +// Use NewChangeDelta to construct a delta and compute the cached fields. type ChangeDelta struct { - // Old is the old interface state, if known. + // old is the old interface state, if known. // It's nil if the old state is unknown. - // Do not mutate it. - Old *State + old *State // New is the new network state. // It is always non-nil. - // Do not mutate it. - New *State - - // Major is our legacy boolean of whether the network changed in some major - // way. - // - // Deprecated: do not remove. As of 2023-08-23 we're in a renewed effort to - // remove it and ask specific qustions of ChangeDelta instead. Look at Old - // and New (or add methods to ChangeDelta) instead of using Major. - Major bool + new *State // TimeJumped is whether there was a big jump in wall time since the last - // time we checked. This is a hint that a mobile sleeping device might have + // time we checked. This is a hint that a sleeping device might have // come out of sleep. TimeJumped bool - // TODO(bradfitz): add some lazy cached fields here as needed with methods - // on *ChangeDelta to let callers ask specific questions + // The tailscale interface name, e.g. "tailscale0", "utun3", etc. Not all + // platforms know this or set it. Copied from netmon.Monitor.tsIfName. + TailscaleIfaceName string + + DefaultRouteInterface string + + // Computed Fields + + DefaultInterfaceChanged bool // whether default route interface changed + IsLessExpensive bool // whether new state's default interface is less expensive than old. + HasPACOrProxyConfigChanged bool // whether PAC/HTTP proxy config changed + InterfaceIPsChanged bool // whether any interface IPs changed in a meaningful way + AvailableProtocolsChanged bool // whether we have seen a change in available IPv4/IPv6 + DefaultInterfaceMaybeViable bool // whether the default interface is potentially viable (has usable IPs, is up and is not the tunnel itself) + IsInitialState bool // whether this is the initial state (old == nil, new != nil) + + // RebindLikelyRequired combines the various fields above to report whether this change likely requires us + // to rebind sockets. This is a very conservative estimate and covers a number ofcases where a rebind + // may not be strictly necessary. Consumers of the ChangeDelta should consider checking the individual fields + // above or the state of their sockets. + RebindLikelyRequired bool +} + +// CurrentState returns the current (new) state after the change. +func (cd *ChangeDelta) CurrentState() *State { + return cd.new +} + +// NewChangeDelta builds a ChangeDelta and eagerly computes the cached fields. +// forceViability, if true, forces DefaultInterfaceMaybeViable to be true regardless of the +// actual state of the default interface. This is useful in testing. +func NewChangeDelta(old, new *State, timeJumped bool, tsIfName string, forceViability bool) (*ChangeDelta, error) { + cd := ChangeDelta{ + old: old, + new: new, + TimeJumped: timeJumped, + TailscaleIfaceName: tsIfName, + } + + if cd.new == nil { + log.Printf("[unexpected] NewChangeDelta called with nil new state") + return nil, errors.New("new state cannot be nil") + } else if cd.old == nil && cd.new != nil { + cd.DefaultInterfaceChanged = cd.new.DefaultRouteInterface != "" + cd.IsLessExpensive = false + cd.HasPACOrProxyConfigChanged = true + cd.InterfaceIPsChanged = true + cd.IsInitialState = true + } else { + cd.AvailableProtocolsChanged = (cd.old.HaveV4 != cd.new.HaveV4) || (cd.old.HaveV6 != cd.new.HaveV6) + cd.DefaultInterfaceChanged = cd.old.DefaultRouteInterface != cd.new.DefaultRouteInterface + cd.IsLessExpensive = cd.old.IsExpensive && !cd.new.IsExpensive + cd.HasPACOrProxyConfigChanged = (cd.old.PAC != cd.new.PAC) || (cd.old.HTTPProxy != cd.new.HTTPProxy) + cd.InterfaceIPsChanged = cd.isInterestingInterfaceChange() + } + + cd.DefaultRouteInterface = new.DefaultRouteInterface + defIf := new.Interface[cd.DefaultRouteInterface] + + // The default interface is not viable if it is down or it is the Tailscale interface itself. + if !forceViability && (!defIf.IsUp() || cd.DefaultRouteInterface == tsIfName) { + cd.DefaultInterfaceMaybeViable = false + } else { + cd.DefaultInterfaceMaybeViable = true + } + + // Compute rebind requirement. The default interface needs to be viable and + // one of the other conditions needs to be true. + cd.RebindLikelyRequired = (cd.old == nil || + cd.TimeJumped || + cd.DefaultInterfaceChanged || + cd.InterfaceIPsChanged || + cd.IsLessExpensive || + cd.HasPACOrProxyConfigChanged || + cd.AvailableProtocolsChanged) && + cd.DefaultInterfaceMaybeViable + + return &cd, nil +} + +// StateDesc returns a description of the old and new states for logging. +func (cd *ChangeDelta) StateDesc() string { + return fmt.Sprintf("old: %v new: %v", cd.old, cd.new) +} + +// InterfaceIPDisappeared reports whether the given IP address exists on any interface +// in the old state, but not in the new state. +func (cd *ChangeDelta) InterfaceIPDisappeared(ip netip.Addr) bool { + if cd.old == nil { + return false + } + if cd.new == nil && cd.old.HasIP(ip) { + return true + } + return cd.new.HasIP(ip) && !cd.old.HasIP(ip) +} + +// AnyInterfaceUp reports whether any interfaces are up in the new state. +func (cd *ChangeDelta) AnyInterfaceUp() bool { + if cd.new == nil { + return false + } + for _, ifi := range cd.new.Interface { + if ifi.IsUp() { + return true + } + } + return false +} + +// isInterestingInterfaceChange reports whether any interfaces have changed in a meaningful way. +// This excludes interfaces that are not interesting per IsInterestingInterface and +// filters out changes to interface IPs that that are uninteresting (e.g. link-local addresses). +func (cd *ChangeDelta) isInterestingInterfaceChange() bool { + // If there is no old state, everything is considered changed. + if cd.old == nil { + return true + } + + // Compare interfaces in both directions. Old to new and new to old. + + for iname, oldInterface := range cd.old.Interface { + if iname == cd.TailscaleIfaceName { + // Ignore changes in the Tailscale interface itself. + continue + } + oldIps := filterRoutableIPs(cd.old.InterfaceIPs[iname]) + if IsInterestingInterface != nil && !IsInterestingInterface(oldInterface, oldIps) { + continue + } + + // Old interfaces with no routable addresses are not interesting + if len(oldIps) == 0 { + continue + } + + // The old interface doesn't exist in the new interface set and it has + // a global unicast IP. That's considered a change from the perspective + // of anything that may have been bound to it. If it didn't have a global + // unicast IP, it's not interesting. + newInterface, ok := cd.new.Interface[iname] + if !ok { + return true + } + newIps, ok := cd.new.InterfaceIPs[iname] + if !ok { + return true + } + newIps = filterRoutableIPs(newIps) + + if !oldInterface.Equal(newInterface) || !prefixesEqual(oldIps, newIps) { + return true + } + } + + for iname, newInterface := range cd.new.Interface { + if iname == cd.TailscaleIfaceName { + continue + } + newIps := filterRoutableIPs(cd.new.InterfaceIPs[iname]) + if IsInterestingInterface != nil && !IsInterestingInterface(newInterface, newIps) { + continue + } + + // New interfaces with no routable addresses are not interesting + if len(newIps) == 0 { + continue + } + + oldInterface, ok := cd.old.Interface[iname] + if !ok { + return true + } + + oldIps, ok := cd.old.InterfaceIPs[iname] + if !ok { + // Redundant but we can't dig up the "old" IPs for this interface. + return true + } + oldIps = filterRoutableIPs(oldIps) + + // The interface's IPs, Name, MTU, etc have changed. This is definitely interesting. + if !newInterface.Equal(oldInterface) || !prefixesEqual(oldIps, newIps) { + return true + } + } + return false +} + +func filterRoutableIPs(addrs []netip.Prefix) []netip.Prefix { + var filtered []netip.Prefix + for _, pfx := range addrs { + a := pfx.Addr() + // Skip link-local multicast addresses. + if a.IsLinkLocalMulticast() { + continue + } + + if isUsableV4(a) || isUsableV6(a) { + filtered = append(filtered, pfx) + } + } + return filtered } // New instantiates and starts a monitoring instance. @@ -174,9 +369,17 @@ func (m *Monitor) interfaceStateUncached() (*State, error) { // This must be called only early in tailscaled startup before the monitor is // used. func (m *Monitor) SetTailscaleInterfaceName(ifName string) { + m.mu.Lock() + defer m.mu.Unlock() m.tsIfName = ifName } +func (m *Monitor) TailscaleInterfaceName() string { + m.mu.Lock() + defer m.mu.Unlock() + return m.tsIfName +} + // GatewayAndSelfIP returns the current network's default gateway, and // the machine's default IP for that gateway. // @@ -344,17 +547,6 @@ func (m *Monitor) pump() { } } -// isInterestingInterface reports whether the provided interface should be -// considered when checking for network state changes. -// The ips parameter should be the IPs of the provided interface. -func (m *Monitor) isInterestingInterface(i Interface, ips []netip.Prefix) bool { - if !m.om.IsInterestingInterface(i.Name) { - return false - } - - return true -} - // debounce calls the callback function with a delay between events // and exits when a stop is issued. func (m *Monitor) debounce() { @@ -376,7 +568,10 @@ func (m *Monitor) debounce() { select { case <-m.stop: return - case <-time.After(250 * time.Millisecond): + // 1s is reasonable debounce time for network changes. Events such as undocking a laptop + // or roaming onto wifi will often generate multiple events in quick succession as interfaces + // flap. We want to avoid spamming consumers of these events. + case <-time.After(1000 * time.Millisecond): } } } @@ -403,146 +598,51 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { return } - delta := ChangeDelta{ - Old: oldState, - New: newState, - TimeJumped: timeJumped, + delta, err := NewChangeDelta(oldState, newState, timeJumped, m.tsIfName, false) + if err != nil { + m.logf("[unexpected] error creating ChangeDelta: %v", err) + return } - delta.Major = m.IsMajorChangeFrom(oldState, newState) - if delta.Major { + if delta.RebindLikelyRequired { m.gwValid = false - - if s1, s2 := oldState.String(), delta.New.String(); s1 == s2 { - m.logf("[unexpected] network state changed, but stringification didn't: %v", s1) - m.logf("[unexpected] old: %s", jsonSummary(oldState)) - m.logf("[unexpected] new: %s", jsonSummary(newState)) - } } m.ifState = newState // See if we have a queued or new time jump signal. if timeJumped { m.resetTimeJumpedLocked() - if !delta.Major { - // Only log if it wasn't an interesting change. - m.logf("time jumped (probably wake from sleep); synthesizing major change event") - delta.Major = true - } } metricChange.Add(1) - if delta.Major { + if delta.RebindLikelyRequired { metricChangeMajor.Add(1) } if delta.TimeJumped { metricChangeTimeJump.Add(1) } - m.changed.Publish(delta) + m.changed.Publish(*delta) for _, cb := range m.cbs { - go cb(&delta) + go cb(delta) } } -// IsMajorChangeFrom reports whether the transition from s1 to s2 is -// a "major" change, where major roughly means it's worth tearing down -// a bunch of connections and rebinding. -// -// TODO(bradiftz): tigten this definition. -func (m *Monitor) IsMajorChangeFrom(s1, s2 *State) bool { - if s1 == nil && s2 == nil { +// reports whether a and b contain the same set of prefixes regardless of order. +func prefixesEqual(a, b []netip.Prefix) bool { + if len(a) != len(b) { return false } - if s1 == nil || s2 == nil { - return true - } - if s1.HaveV6 != s2.HaveV6 || - s1.HaveV4 != s2.HaveV4 || - s1.IsExpensive != s2.IsExpensive || - s1.DefaultRouteInterface != s2.DefaultRouteInterface || - s1.HTTPProxy != s2.HTTPProxy || - s1.PAC != s2.PAC { - return true - } - for iname, i := range s1.Interface { - if iname == m.tsIfName { - // Ignore changes in the Tailscale interface itself. - continue - } - ips := s1.InterfaceIPs[iname] - if !m.isInterestingInterface(i, ips) { - continue - } - i2, ok := s2.Interface[iname] - if !ok { - return true - } - ips2, ok := s2.InterfaceIPs[iname] - if !ok { - return true - } - if !i.Equal(i2) || !prefixesMajorEqual(ips, ips2) { - return true - } - } - // Iterate over s2 in case there is a field in s2 that doesn't exist in s1 - for iname, i := range s2.Interface { - if iname == m.tsIfName { - // Ignore changes in the Tailscale interface itself. - continue - } - ips := s2.InterfaceIPs[iname] - if !m.isInterestingInterface(i, ips) { - continue - } - i1, ok := s1.Interface[iname] - if !ok { - return true - } - ips1, ok := s1.InterfaceIPs[iname] - if !ok { - return true - } - if !i.Equal(i1) || !prefixesMajorEqual(ips, ips1) { - return true - } - } - return false -} -// prefixesMajorEqual reports whether a and b are equal after ignoring -// boring things like link-local, loopback, and multicast addresses. -func prefixesMajorEqual(a, b []netip.Prefix) bool { - // trim returns a subslice of p with link local unicast, - // loopback, and multicast prefixes removed from the front. - trim := func(p []netip.Prefix) []netip.Prefix { - for len(p) > 0 { - a := p[0].Addr() - if a.IsLinkLocalUnicast() || a.IsLoopback() || a.IsMulticast() { - p = p[1:] - continue - } - break - } - return p - } - for { - a = trim(a) - b = trim(b) - if len(a) == 0 || len(b) == 0 { - return len(a) == 0 && len(b) == 0 - } - if a[0] != b[0] { - return false - } - a, b = a[1:], b[1:] - } -} + aa := make([]netip.Prefix, len(a)) + bb := make([]netip.Prefix, len(b)) + copy(aa, a) + copy(bb, b) -func jsonSummary(x any) any { - j, err := json.Marshal(x) - if err != nil { - return err + less := func(x, y netip.Prefix) int { + return x.Addr().Compare(y.Addr()) } - return j + + slices.SortFunc(aa, less) + slices.SortFunc(bb, less) + return slices.Equal(aa, bb) } func wallTime() time.Time { diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index 9c5e76475f3fd..042f9a3b750c2 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -16,6 +16,12 @@ import ( "tailscale.com/util/eventbus" ) +func init() { + IsInterestingInterface = func(iface Interface, prefixes []netip.Prefix) bool { + return isInterestingInterface(iface.Name) + } +} + const debugRouteMessages = false // unspecifiedMessage is a minimal message implementation that should not @@ -125,11 +131,10 @@ func addrType(addrs []route.Addr, rtaxType int) route.Addr { return nil } -func (m *darwinRouteMon) IsInterestingInterface(iface string) bool { +func isInterestingInterface(iface string) bool { baseName := strings.TrimRight(iface, "0123456789") switch baseName { - // TODO(maisem): figure out what this list should actually be. - case "llw", "awdl", "ipsec": + case "llw", "awdl", "ipsec", "gif", "XHC", "anpi", "lo", "utun": return false } return true @@ -137,7 +142,7 @@ func (m *darwinRouteMon) IsInterestingInterface(iface string) bool { func (m *darwinRouteMon) skipInterfaceAddrMessage(msg *route.InterfaceAddrMessage) bool { if la, ok := addrType(msg.Addrs, unix.RTAX_IFP).(*route.LinkAddr); ok { - if !m.IsInterestingInterface(la.Name) { + if !isInterestingInterface(la.Name) { return true } } @@ -150,6 +155,14 @@ func (m *darwinRouteMon) skipRouteMessage(msg *route.RouteMessage) bool { // dst = fe80::b476:66ff:fe30:c8f6%15 return true } + + // We can skip route messages from uninteresting interfaces. We do this upstream + // against the InterfaceMonitor, but skipping them here avoids unnecessary work. + if la, ok := addrType(msg.Addrs, unix.RTAX_IFP).(*route.LinkAddr); ok { + if !isInterestingInterface(la.Name) { + return true + } + } return false } diff --git a/net/netmon/netmon_freebsd.go b/net/netmon/netmon_freebsd.go index 842cbdb0d6476..3a4fb44d8f0a0 100644 --- a/net/netmon/netmon_freebsd.go +++ b/net/netmon/netmon_freebsd.go @@ -34,8 +34,6 @@ func newOSMon(_ *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { return &devdConn{conn}, nil } -func (c *devdConn) IsInterestingInterface(iface string) bool { return true } - func (c *devdConn) Close() error { return c.conn.Close() } diff --git a/net/netmon/netmon_linux.go b/net/netmon/netmon_linux.go index a1077c2578b14..aa5253f9be28b 100644 --- a/net/netmon/netmon_linux.go +++ b/net/netmon/netmon_linux.go @@ -81,8 +81,6 @@ func newOSMon(bus *eventbus.Bus, logf logger.Logf, m *Monitor) (osMon, error) { }, nil } -func (c *nlConn) IsInterestingInterface(iface string) bool { return true } - func (c *nlConn) Close() error { c.busClient.Close() return c.conn.Close() diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 6a87cedb8e7ea..8fbf512ddb50f 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -8,6 +8,7 @@ import ( "net" "net/netip" "reflect" + "strings" "sync/atomic" "testing" "time" @@ -138,7 +139,7 @@ func TestMonitorMode(t *testing.T) { n := 0 mon.RegisterChangeCallback(func(d *ChangeDelta) { n++ - t.Logf("cb: changed=%v, ifSt=%v", d.Major, d.New) + t.Logf("cb: changed=%v, ifSt=%v", d.RebindLikelyRequired, d.CurrentState()) }) mon.Start() <-done @@ -149,24 +150,22 @@ func TestMonitorMode(t *testing.T) { mon.Start() eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { n++ - t.Logf("cb: changed=%v, ifSt=%v", event.Major, event.New) + t.Logf("cb: changed=%v, ifSt=%v", event.RebindLikelyRequired, event.CurrentState()) return false, nil // Return false, indicating we wanna look for more events }) t.Logf("%v events", n) } } -// tests (*State).IsMajorChangeFrom -func TestIsMajorChangeFrom(t *testing.T) { +// tests (*ChangeDelta).RebindRequired +func TestRebindRequired(t *testing.T) { + // s1 cannot be nil by definition tests := []struct { - name string - s1, s2 *State - want bool + name string + s1, s2 *State + tsIfName string + want bool }{ - { - name: "eq_nil", - want: false, - }, { name: "nil_mix", s2: new(State), @@ -188,6 +187,110 @@ func TestIsMajorChangeFrom(t *testing.T) { }, want: false, }, + { + name: "new-with-no-addr", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "bar": {}, + }, + }, + want: false, + }, + { + name: "ignore-tailscale-interface-appearing", + tsIfName: "tailscale0", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "tailscale0": {netip.MustParsePrefix("100.69.4.20/32")}, + }, + }, + want: false, + }, + { + name: "ignore-tailscale-interface-disappearing", + tsIfName: "tailscale0", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "tailscale0": {netip.MustParsePrefix("100.69.4.20/32")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + want: false, + }, + { + name: "new-with-multicast-addr", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "bar": {netip.MustParsePrefix("224.0.0.1/32")}, + }, + }, + want: false, + }, + { + name: "old-with-addr-dropped", + s1: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + "bar": {netip.MustParsePrefix("192.168.0.1/32")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/32")}, + }, + }, + want: true, + }, + { + name: "old-with-no-addr-dropped", + s1: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {}, + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + }, + }, + want: false, + }, { name: "default-route-changed", s1: &State{ @@ -221,6 +324,8 @@ func TestIsMajorChangeFrom(t *testing.T) { want: true, }, { + // (barnstar) TODO: ULA addresses are only useful in some contexts, + // so maybe this shouldn't trigger rebinds after all? Needs more thought. name: "ipv6-ula-addressed-appeared", s1: &State{ DefaultRouteInterface: "foo", @@ -233,15 +338,147 @@ func TestIsMajorChangeFrom(t *testing.T) { InterfaceIPs: map[string][]netip.Prefix{ "foo": { netip.MustParsePrefix("10.0.1.2/16"), - // Brad saw this address coming & going on his home LAN, possibly - // via an Apple TV Thread routing advertisement? (Issue 9040) netip.MustParsePrefix("fd15:bbfa:c583:4fce:f4fb:4ff:fe1a:4148/64"), }, }, }, - want: true, // TODO(bradfitz): want false (ignore the IPv6 ULA address on foo) + want: true, + }, + { + // (barnstar) TODO: ULA addresses are only useful in some contexts, + // so maybe this shouldn't trigger rebinds after all? Needs more thought. + name: "ipv6-ula-addressed-disappeared", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("fd15:bbfa:c583:4fce:f4fb:4ff:fe1a:4148/64"), + }, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + want: true, + }, + { + name: "ipv6-link-local-addressed-appeared", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": {netip.MustParsePrefix("10.0.1.2/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("fe80::f242:25ff:fe64:b280/64"), + }, + }, + }, + want: false, + }, + { + name: "ipv6-addressed-changed", + s1: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("2001::f242:25ff:fe64:b280/64"), + netip.MustParsePrefix("fe80::f242:25ff:fe64:b280/64"), + }, + }, + }, + s2: &State{ + DefaultRouteInterface: "foo", + InterfaceIPs: map[string][]netip.Prefix{ + "foo": { + netip.MustParsePrefix("10.0.1.2/16"), + netip.MustParsePrefix("2001::beef:8bad:f00d:b280/64"), + netip.MustParsePrefix("fe80::f242:25ff:fe64:b280/64"), + }, + }, + }, + want: true, + }, + { + name: "have-addr-changed", + s1: &State{ + HaveV6: false, + HaveV4: false, + }, + + s2: &State{ + HaveV6: true, + HaveV4: true, + }, + want: true, + }, + { + name: "have-addr-unchanged", + s1: &State{ + HaveV6: true, + HaveV4: true, + }, + + s2: &State{ + HaveV6: true, + HaveV4: true, + }, + want: false, + }, + { + name: "new-is-less-expensive", + s1: &State{ + IsExpensive: true, + }, + + s2: &State{ + IsExpensive: false, + }, + want: true, + }, + { + name: "new-is-more-expensive", + s1: &State{ + IsExpensive: false, + }, + + s2: &State{ + IsExpensive: true, + }, + want: false, + }, + { + name: "uninteresting-interface-added", + s1: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + }, + }, + s2: &State{ + DefaultRouteInterface: "bar", + InterfaceIPs: map[string][]netip.Prefix{ + "bar": {netip.MustParsePrefix("192.168.0.1/16")}, + "boring": {netip.MustParsePrefix("fd7a:115c:a1e0:ab12:4843:cd96:625e:13ce/64")}, + }, + }, + want: false, }, } + + withIsInterestingInterface(t, func(ni Interface, pfxs []netip.Prefix) bool { + return !strings.HasPrefix(ni.Name, "boring") + }) + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { // Populate dummy interfaces where missing. @@ -258,16 +495,111 @@ func TestIsMajorChangeFrom(t *testing.T) { } } - var m Monitor - m.om = &testOSMon{ - Interesting: func(name string) bool { return true }, + cd, err := NewChangeDelta(tt.s1, tt.s2, false, tt.tsIfName, true) + if err != nil { + t.Fatalf("NewChangeDelta error: %v", err) } - if got := m.IsMajorChangeFrom(tt.s1, tt.s2); got != tt.want { - t.Errorf("IsMajorChange = %v; want %v", got, tt.want) + _ = cd // in case we need it later + if got := cd.RebindLikelyRequired; got != tt.want { + t.Errorf("RebindRequired = %v; want %v", got, tt.want) } }) } } + +func withIsInterestingInterface(t *testing.T, fn func(Interface, []netip.Prefix) bool) { + t.Helper() + old := IsInterestingInterface + IsInterestingInterface = fn + t.Cleanup(func() { IsInterestingInterface = old }) +} + +func TestIncludesRoutableIP(t *testing.T) { + routable := []netip.Prefix{ + netip.MustParsePrefix("1.2.3.4/32"), + netip.MustParsePrefix("10.0.0.1/24"), // RFC1918 IPv4 (private) + netip.MustParsePrefix("172.16.0.1/12"), // RFC1918 IPv4 (private) + netip.MustParsePrefix("192.168.1.1/24"), // RFC1918 IPv4 (private) + netip.MustParsePrefix("fd15:dead:beef::1/64"), // IPv6 ULA + netip.MustParsePrefix("2001:db8::1/64"), // global IPv6 + } + + nonRoutable := []netip.Prefix{ + netip.MustParsePrefix("ff00::/8"), // multicast IPv6 (should be filtered) + netip.MustParsePrefix("fe80::1/64"), // link-local IPv6 + netip.MustParsePrefix("::1/128"), // loopback IPv6 + netip.MustParsePrefix("::/128"), // unspecified IPv6 + netip.MustParsePrefix("224.0.0.1/32"), // multicast IPv4 + netip.MustParsePrefix("127.0.0.1/32"), // loopback IPv4 + } + + got, want := filterRoutableIPs( + append(nonRoutable, routable...), + ), routable + + if !reflect.DeepEqual(got, want) { + t.Fatalf("filterRoutableIPs returned %v; want %v", got, want) + } +} + +func TestPrefixesEqual(t *testing.T) { + tests := []struct { + name string + a, b []netip.Prefix + want bool + }{ + { + name: "empty", + a: []netip.Prefix{}, + b: []netip.Prefix{}, + want: true, + }, + { + name: "single-equal", + a: []netip.Prefix{netip.MustParsePrefix("10.0.0.1/24")}, + b: []netip.Prefix{netip.MustParsePrefix("10.0.0.1/24")}, + want: true, + }, + { + name: "single-different", + a: []netip.Prefix{netip.MustParsePrefix("10.0.0.1/24")}, + b: []netip.Prefix{netip.MustParsePrefix("10.0.0.2/24")}, + want: false, + }, + { + name: "unordered-equal", + a: []netip.Prefix{ + netip.MustParsePrefix("10.0.0.1/24"), + netip.MustParsePrefix("10.0.2.1/24"), + }, + b: []netip.Prefix{ + netip.MustParsePrefix("10.0.2.1/24"), + netip.MustParsePrefix("10.0.0.1/24"), + }, + want: true, + }, + { + name: "subset", + a: []netip.Prefix{ + netip.MustParsePrefix("10.0.2.1/24"), + }, + b: []netip.Prefix{ + netip.MustParsePrefix("10.0.2.1/24"), + netip.MustParsePrefix("10.0.0.1/24"), + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := prefixesEqual(tt.a, tt.b) + if got != tt.want { + t.Errorf("prefixesEqual(%v, %v) = %v; want %v", tt.a, tt.b, got, tt.want) + } + }) + } +} + func TestForeachInterface(t *testing.T) { tests := []struct { name string @@ -307,15 +639,3 @@ func TestForeachInterface(t *testing.T) { }) } } - -type testOSMon struct { - osMon - Interesting func(name string) bool -} - -func (m *testOSMon) IsInterestingInterface(name string) bool { - if m.Interesting == nil { - return true - } - return m.Interesting(name) -} diff --git a/net/netmon/netmon_windows.go b/net/netmon/netmon_windows.go index 718724b6d3f8d..e8966faf00f46 100644 --- a/net/netmon/netmon_windows.go +++ b/net/netmon/netmon_windows.go @@ -74,8 +74,6 @@ func newOSMon(_ *eventbus.Bus, logf logger.Logf, pm *Monitor) (osMon, error) { return m, nil } -func (m *winMon) IsInterestingInterface(iface string) bool { return true } - func (m *winMon) Close() (ret error) { m.cancel() m.noDeadlockTicker.Stop() diff --git a/net/netmon/polling.go b/net/netmon/polling.go index ce1618ed6c987..2a3e44cba0b9d 100644 --- a/net/netmon/polling.go +++ b/net/netmon/polling.go @@ -35,10 +35,6 @@ type pollingMon struct { stop chan struct{} } -func (pm *pollingMon) IsInterestingInterface(iface string) bool { - return true -} - func (pm *pollingMon) Close() error { pm.closeOnce.Do(func() { close(pm.stop) diff --git a/net/netmon/state.go b/net/netmon/state.go index 27e3524e8d7c9..aefbbb22d2830 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -149,12 +149,28 @@ type Interface struct { Desc string // extra description (used on Windows) } -func (i Interface) IsLoopback() bool { return isLoopback(i.Interface) } -func (i Interface) IsUp() bool { return isUp(i.Interface) } +func (i Interface) IsLoopback() bool { + if i.Interface == nil { + return false + } + return isLoopback(i.Interface) +} + +func (i Interface) IsUp() bool { + if i.Interface == nil { + return false + } + return isUp(i.Interface) +} + func (i Interface) Addrs() ([]net.Addr, error) { if i.AltAddrs != nil { return i.AltAddrs, nil } + if i.Interface == nil { + return nil, nil + } + return i.Interface.Addrs() } @@ -271,6 +287,9 @@ type State struct { // PAC is the URL to the Proxy Autoconfig URL, if applicable. PAC string + + // TailscaleInterfaceIndex is the index of the Tailscale interface + TailscaleInterfaceIndex int } func (s *State) String() string { @@ -485,6 +504,16 @@ func getState(optTSInterfaceName string) (*State, error) { ifUp := ni.IsUp() s.Interface[ni.Name] = ni s.InterfaceIPs[ni.Name] = append(s.InterfaceIPs[ni.Name], pfxs...) + + // Skip uninteresting interfaces. + if IsInterestingInterface != nil && !IsInterestingInterface(ni, pfxs) { + return + } + + if isTailscaleInterface(ni.Name, pfxs) { + s.TailscaleInterfaceIndex = ni.Index + } + if !ifUp || isTSInterfaceName || isTailscaleInterface(ni.Name, pfxs) { return } diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index aa875df9aeddd..4e9f4a9666308 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -271,10 +271,10 @@ func setNetMon(netMon *netmon.Monitor) { } netMon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if !delta.Major { + if !delta.RebindLikelyRequired { return } - state := delta.New + state := delta.CurrentState() ifName := state.DefaultRouteInterface if ifName == "" { return diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index 065c01384ed55..df2d80a619752 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -264,7 +264,7 @@ var ( func (d *Dialer) linkChanged(delta *netmon.ChangeDelta) { // Track how often we see ChangeDeltas with no DefaultRouteInterface. - if delta.New.DefaultRouteInterface == "" { + if delta.DefaultRouteInterface == "" { metricChangeDeltaNoDefaultRoute.Add(1) } @@ -294,22 +294,23 @@ func changeAffectsConn(delta *netmon.ChangeDelta, conn net.Conn) bool { } lip, rip := la.AddrPort().Addr(), ra.AddrPort().Addr() - if delta.Old == nil { + if delta.IsInitialState { return false } - if delta.Old.DefaultRouteInterface != delta.New.DefaultRouteInterface || - delta.Old.HTTPProxy != delta.New.HTTPProxy { + + if delta.DefaultInterfaceChanged || + delta.HasPACOrProxyConfigChanged { return true } // In a few cases, we don't have a new DefaultRouteInterface (e.g. on - // Android; see tailscale/corp#19124); if so, pessimistically assume + // Android and macOS/iOS; see tailscale/corp#19124); if so, pessimistically assume // that all connections are affected. - if delta.New.DefaultRouteInterface == "" && runtime.GOOS != "plan9" { + if delta.DefaultRouteInterface == "" && runtime.GOOS != "plan9" { return true } - if !delta.New.HasIP(lip) && delta.Old.HasIP(lip) { + if delta.InterfaceIPDisappeared(lip) { // Our interface with this source IP went away. return true } diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 647923775ef10..875011a9c3e05 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1349,20 +1349,18 @@ func (e *userspaceEngine) Done() <-chan struct{} { } func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { - changed := delta.Major // TODO(bradfitz): ask more specific questions? - cur := delta.New - up := cur.AnyInterfaceUp() + + up := delta.AnyInterfaceUp() if !up { - e.logf("LinkChange: all links down; pausing: %v", cur) - } else if changed { - e.logf("LinkChange: major, rebinding. New state: %v", cur) + e.logf("LinkChange: all links down; pausing: %v", delta.StateDesc()) + } else if delta.RebindLikelyRequired { + e.logf("LinkChange: major, rebinding: %v", delta.StateDesc()) } else { e.logf("[v1] LinkChange: minor") } e.health.SetAnyInterfaceUp(up) - e.magicConn.SetNetworkUp(up) - if !up || changed { + if !up || delta.RebindLikelyRequired { if err := e.dns.FlushCaches(); err != nil { e.logf("wgengine: dns flush failed after major link change: %v", err) } @@ -1372,9 +1370,20 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { // suspend/resume or whenever NetworkManager is started, it // nukes all systemd-resolved configs. So reapply our DNS // config on major link change. - // TODO: explain why this is ncessary not just on Linux but also android - // and Apple platforms. - if changed { + // + // On Darwin (netext), we reapply the DNS config when the interface flaps + // because the change in interface can potentially change the nameservers + // for the forwarder. On Darwin netext clients, magicDNS is ~always the default + // resolver so having no nameserver to forward queries to (or one on a network we + // are not currently on) breaks DNS resolution system-wide. There are notable + // timing issues here with Darwin's network stack. It is not guaranteed that + // the forward resolver will be available immediately after the interface + // comes up. We leave it to the network extension to also poke magicDNS directly + // via [dns.Manager.RecompileDNSConfig] when it detects any change in the + // nameservers. + // + // TODO: On Android, Darwin-tailscaled, and openbsd, why do we need this? + if delta.RebindLikelyRequired && up { switch runtime.GOOS { case "linux", "android", "ios", "darwin", "openbsd": e.wgLock.Lock() @@ -1392,15 +1401,23 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { } } + e.magicConn.SetNetworkUp(up) + why := "link-change-minor" - if changed { + if delta.RebindLikelyRequired { why = "link-change-major" metricNumMajorChanges.Add(1) - e.magicConn.Rebind() } else { metricNumMinorChanges.Add(1) } - e.magicConn.ReSTUN(why) + + // If we're up and it's a minor change, just send a STUN ping + if up { + if delta.RebindLikelyRequired { + e.magicConn.Rebind() + } + e.magicConn.ReSTUN(why) + } } func (e *userspaceEngine) SetNetworkMap(nm *netmap.NetworkMap) { From 323604b76cf8290a3df7cf44eebf8e8667ada902 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Tue, 16 Dec 2025 17:19:16 -0500 Subject: [PATCH 0823/1093] net/dns/resolver: log source IP of forwarded queries When the TS_DEBUG_DNS_FORWARD_SEND envknob is turned on, also log the source IP:port of the query that tailscaled is forwarding. Updates tailscale/corp#35374 Signed-off-by: Andrew Dunham --- net/dns/resolver/forwarder.go | 6 ++-- net/dns/resolver/forwarder_test.go | 46 ++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 5adc43efca860..797c5272ad651 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -526,9 +526,9 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe if f.verboseFwd { id := forwarderCount.Add(1) domain, typ, _ := nameFromQuery(fq.packet) - f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id) + f.logf("forwarder.send(%q, %d, %v, %d) from %v [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), fq.src, id) defer func() { - f.logf("forwarder.send(%q, %d, %v, %d) [%d] = %v, %v", rr.name.Addr, fq.txid, typ, len(domain), id, len(ret), err) + f.logf("forwarder.send(%q, %d, %v, %d) from %v [%d] = %v, %v", rr.name.Addr, fq.txid, typ, len(domain), fq.src, id, len(ret), err) }() } if strings.HasPrefix(rr.name.Addr, "http://") { @@ -904,6 +904,7 @@ type forwardQuery struct { txid txid packet []byte family string // "tcp" or "udp" + src netip.AddrPort // closeOnCtxDone lets send register values to Close if the // caller's ctx expires. This avoids send from allocating its @@ -988,6 +989,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo txid: getTxID(query.bs), packet: query.bs, family: query.family, + src: query.addr, closeOnCtxDone: new(closePool), } defer fq.closeOnCtxDone.Close() diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index ec491c581af99..0b38008c8a9c2 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -864,3 +864,49 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", res, response) } } + +func TestForwarderVerboseLogs(t *testing.T) { + const domain = "test.tailscale.com." + response := makeTestResponse(t, domain, dns.RCodeServerFailure) + request := makeTestRequest(t, domain) + + port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { + if !bytes.Equal(request, gotRequest) { + t.Errorf("invalid request\ngot: %+v\nwant: %+v", gotRequest, request) + } + }) + + var ( + mu sync.Mutex // protects following + done bool + logBuf bytes.Buffer + ) + fwdLogf := func(format string, args ...any) { + mu.Lock() + defer mu.Unlock() + if done { + return // no logging after test is done + } + + t.Logf("[forwarder] "+format, args...) + fmt.Fprintf(&logBuf, format+"\n", args...) + } + t.Cleanup(func() { + mu.Lock() + done = true + mu.Unlock() + }) + + _, err := runTestQuery(t, request, func(f *forwarder) { + f.logf = fwdLogf + f.verboseFwd = true + }, port) + if err != nil { + t.Fatal(err) + } + + logStr := logBuf.String() + if !strings.Contains(logStr, "forwarder.send(") { + t.Errorf("expected forwarding log, got:\n%s", logStr) + } +} From b21cba0921dfd4c8ac9cf4fa7210879d0ea7cf34 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 17 Dec 2025 20:58:47 +0100 Subject: [PATCH 0824/1093] cmd/k8s-operator: fixes helm template for oauth secret volume mount (#18230) Fixes #18228 Signed-off-by: chaosinthecrd --- .../deploy/chart/templates/deployment.yaml | 48 ++++++++++--------- .../deploy/chart/templates/oauth-secret.yaml | 2 +- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index 0f2dc42fc3c3a..df9cb8ce1bcb0 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -34,17 +34,11 @@ spec: securityContext: {{- toYaml . | nindent 8 }} {{- end }} - {{- if or .Values.oauth.clientSecret .Values.oauth.audience }} volumes: - {{- if .Values.oauth.clientSecret }} + {{- if .Values.oauthSecretVolume }} - name: oauth - {{- with .Values.oauthSecretVolume }} - {{- toYaml . | nindent 10 }} - {{- else }} - secret: - secretName: operator-oauth - {{- end }} - {{- else }} + {{- toYaml .Values.oauthSecretVolume | nindent 10 }} + {{- else if .Values.oauth.audience }} - name: oidc-jwt projected: defaultMode: 420 @@ -53,8 +47,11 @@ spec: audience: {{ .Values.oauth.audience }} expirationSeconds: 3600 path: token + {{- else }} + - name: oauth + secret: + secretName: operator-oauth {{- end }} - {{- end }} containers: - name: operator {{- with .Values.operatorConfig.securityContext }} @@ -85,7 +82,7 @@ spec: value: {{ .Values.loginServer }} - name: OPERATOR_INGRESS_CLASS_NAME value: {{ .Values.ingressClass.name }} - {{- if .Values.oauth.clientSecret }} + {{- if .Values.oauthSecretVolume }} - name: CLIENT_ID_FILE value: /oauth/client_id - name: CLIENT_SECRET_FILE @@ -93,6 +90,11 @@ spec: {{- else if .Values.oauth.audience }} - name: CLIENT_ID value: {{ .Values.oauth.clientId }} + {{- else }} + - name: CLIENT_ID_FILE + value: /oauth/client_id + - name: CLIENT_SECRET_FILE + value: /oauth/client_secret {{- end }} {{- $proxyTag := printf ":%s" ( .Values.proxyConfig.image.tag | default .Chart.AppVersion )}} - name: PROXY_IMAGE @@ -118,18 +120,20 @@ spec: {{- with .Values.operatorConfig.extraEnv }} {{- toYaml . | nindent 12 }} {{- end }} - {{- if or .Values.oauth.clientSecret .Values.oauth.audience }} volumeMounts: - {{- if .Values.oauth.clientSecret }} - - name: oauth - mountPath: /oauth - readOnly: true - {{- else }} - - name: oidc-jwt - mountPath: /var/run/secrets/tailscale/serviceaccount - readOnly: true - {{- end }} - {{- end }} + {{- if .Values.oauthSecretVolume }} + - name: oauth + mountPath: /oauth + readOnly: true + {{- else if .Values.oauth.audience }} + - name: oidc-jwt + mountPath: /var/run/secrets/tailscale/serviceaccount + readOnly: true + {{- else }} + - name: oauth + mountPath: /oauth + readOnly: true + {{- end }} {{- with .Values.operatorConfig.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml index b85c78915dedc..759ba341a8f21 100644 --- a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml @@ -1,7 +1,7 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -{{ if and .Values.oauth .Values.oauth.clientId .Values.oauth.clientSecret -}} +{{ if and .Values.oauth .Values.oauth.clientId (not .Values.oauth.audience) -}} apiVersion: v1 kind: Secret metadata: From ce7e1dea45e1e6a3c8c92556a949ee28632af7f9 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 17 Dec 2025 17:27:35 -0800 Subject: [PATCH 0825/1093] types/persist: omit Persist.AttestationKey based on IsZero (#18241) IsZero is required by the interface, so we should use that before trying to serialize the key. Updates #35412 Signed-off-by: Andrew Lytvynov --- types/persist/persist.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/persist/persist.go b/types/persist/persist.go index 4b62c79ddd186..80bac9b5e2741 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,7 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID - AttestationKey key.HardwareAttestationKey `json:",omitempty"` + AttestationKey key.HardwareAttestationKey `json:",omitzero"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to From e4847fa77bf669570d2b4242e402ffb8af8f80ac Mon Sep 17 00:00:00 2001 From: Brendan Creane Date: Wed, 17 Dec 2025 18:17:25 -0800 Subject: [PATCH 0826/1093] go.toolchain.rev: update to Go 1.25.5 (#18123) Updates #18122 Signed-off-by: Brendan Creane --- gokrazy/natlabapp/builddir/tailscale.com/go.mod | 2 +- gokrazy/tsapp/builddir/tailscale.com/go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.mod b/gokrazy/natlabapp/builddir/tailscale.com/go.mod index da21a143975e9..53bc11f9bd3f8 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.mod +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.mod @@ -1,6 +1,6 @@ module gokrazy/build/tsapp -go 1.23.1 +go 1.25.5 replace tailscale.com => ../../../.. diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.mod b/gokrazy/tsapp/builddir/tailscale.com/go.mod index da21a143975e9..53bc11f9bd3f8 100644 --- a/gokrazy/tsapp/builddir/tailscale.com/go.mod +++ b/gokrazy/tsapp/builddir/tailscale.com/go.mod @@ -1,6 +1,6 @@ module gokrazy/build/tsapp -go 1.23.1 +go 1.25.5 replace tailscale.com => ../../../.. From b73fb467e45dc501680200e667c84753120a8bbb Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 18 Dec 2025 09:58:13 +0000 Subject: [PATCH 0827/1093] ipn/ipnlocal: log cert renewal failures (#18246) Updates#cleanup Signed-off-by: Irbe Krumina --- ipn/ipnlocal/cert.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index d7133d25e24a2..a78fa5247d840 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -144,7 +144,11 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if minValidity == 0 { logf("starting async renewal") // Start renewal in the background, return current valid cert. - b.goTracker.Go(func() { getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity) }) + b.goTracker.Go(func() { + if _, err := getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity); err != nil { + logf("async renewal failed: getCertPem: %v", err) + } + }) return pair, nil } // If the caller requested a specific validity duration, fall through From eed5e95e27caf2bc618678040e4b413c74584299 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 17 Dec 2025 20:19:41 +0000 Subject: [PATCH 0828/1093] docs: use -x for cherry-picks Updates #cleanup Change-Id: I5222e23b716b342d7c6d113fc539d2021024348e Signed-off-by: Tom Proctor --- docs/commit-messages.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/commit-messages.md b/docs/commit-messages.md index 79b16e4c6f6f2..aef1035b35b8c 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -163,6 +163,10 @@ When you use `git revert` to revert a commit, the default commit message will id Don't revert reverts. That gets ugly. Send the change anew but reference the original & earlier revert. +# Cherry picks + +Use `git cherry-pick -x` to include git's standard "cherry picked from..." line in the commit message. Typically you'll only need this for cherry-picking onto release branches. + # Other repos To reference an issue in one repo from a commit in another (for example, fixing an issue in corp with a commit in `tailscale/tailscale`), you need to fully-qualify the issue number with the GitHub org/repo syntax: From bb3529fcd4ba8c16e03a6883a09d81e2bc63baa2 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 18 Dec 2025 17:06:42 +0000 Subject: [PATCH 0829/1093] cmd/containerboot: support egress to Tailscale Service FQDNs (#17493) Adds support for targeting FQDNs that are a Tailscale Service. Uses the same method of searching for Services as the tailscale configure kubeconfig command. This fixes using the tailscale.com/tailnet-fqdn annotation for Kubernetes Service when the specified FQDN is a Tailscale Service. Fixes #16534 Change-Id: I422795de76dc83ae30e7e757bc4fbd8eec21cc64 Signed-off-by: Tom Proctor Signed-off-by: Becky Pauley --- cmd/containerboot/egressservices.go | 39 ++++++------- cmd/containerboot/main.go | 89 +++++++++++++++++++++++------ cmd/containerboot/main_test.go | 50 +++++++++++----- cmd/tailscale/cli/configure-kube.go | 4 +- 4 files changed, 125 insertions(+), 57 deletions(-) diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index fe835a69e0b82..21d9f0bcb9a2b 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -27,7 +27,6 @@ import ( "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/tailcfg" "tailscale.com/util/httpm" "tailscale.com/util/linuxfw" "tailscale.com/util/mak" @@ -477,30 +476,26 @@ func (ep *egressProxy) tailnetTargetIPsForSvc(svc egressservices.Config, n ipn.N log.Printf("netmap is not available, unable to determine backend addresses for %s", svc.TailnetTarget.FQDN) return addrs, nil } - var ( - node tailcfg.NodeView - nodeFound bool - ) - for _, nn := range n.NetMap.Peers { - if equalFQDNs(nn.Name(), svc.TailnetTarget.FQDN) { - node = nn - nodeFound = true - break - } + egressAddrs, err := resolveTailnetFQDN(n.NetMap, svc.TailnetTarget.FQDN) + if err != nil { + return nil, fmt.Errorf("error fetching backend addresses for %q: %w", svc.TailnetTarget.FQDN, err) } - if nodeFound { - for _, addr := range node.Addresses().AsSlice() { - if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { - log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) - continue - } - addrs = append(addrs, addr.Addr()) + if len(egressAddrs) == 0 { + log.Printf("tailnet target %q does not have any backend addresses, skipping", svc.TailnetTarget.FQDN) + return addrs, nil + } + + for _, addr := range egressAddrs { + if addr.Addr().Is6() && !ep.nfr.HasIPV6NAT() { + log.Printf("tailnet target %v is an IPv6 address, but this host does not support IPv6 in the chosen firewall mode, skipping.", addr.Addr().String()) + continue } - // Egress target endpoints configured via FQDN are stored, so - // that we can determine if a netmap update should trigger a - // resync. - mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, node.Addresses().AsSlice()) + addrs = append(addrs, addr.Addr()) } + // Egress target endpoints configured via FQDN are stored, so + // that we can determine if a netmap update should trigger a + // resync. + mak.Set(&ep.targetFQDNs, svc.TailnetTarget.FQDN, egressAddrs) return addrs, nil } diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index f056d26f3c2c0..8c9d33c61ccd0 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -127,8 +127,10 @@ import ( "tailscale.com/kube/services" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/types/netmap" "tailscale.com/types/ptr" "tailscale.com/util/deephash" + "tailscale.com/util/dnsname" "tailscale.com/util/linuxfw" ) @@ -526,27 +528,14 @@ runLoop: } } if cfg.TailnetTargetFQDN != "" { - var ( - egressAddrs []netip.Prefix - newCurentEgressIPs deephash.Sum - egressIPsHaveChanged bool - node tailcfg.NodeView - nodeFound bool - ) - for _, n := range n.NetMap.Peers { - if strings.EqualFold(n.Name(), cfg.TailnetTargetFQDN) { - node = n - nodeFound = true - break - } - } - if !nodeFound { - log.Printf("Tailscale node %q not found; it either does not exist, or not reachable because of ACLs", cfg.TailnetTargetFQDN) + egressAddrs, err := resolveTailnetFQDN(n.NetMap, cfg.TailnetTargetFQDN) + if err != nil { + log.Print(err.Error()) break } - egressAddrs = node.Addresses().AsSlice() - newCurentEgressIPs = deephash.Hash(&egressAddrs) - egressIPsHaveChanged = newCurentEgressIPs != currentEgressIPs + + newCurentEgressIPs := deephash.Hash(&egressAddrs) + egressIPsHaveChanged := newCurentEgressIPs != currentEgressIPs // The firewall rules get (re-)installed: // - on startup // - when the tailnet IPs of the tailnet target have changed @@ -892,3 +881,65 @@ func runHTTPServer(mux *http.ServeMux, addr string) (close func() error) { return errors.Join(err, ln.Close()) } } + +// resolveTailnetFQDN resolves a tailnet FQDN to a list of IP prefixes, which +// can be either a peer device or a Tailscale Service. +func resolveTailnetFQDN(nm *netmap.NetworkMap, fqdn string) ([]netip.Prefix, error) { + dnsFQDN, err := dnsname.ToFQDN(fqdn) + if err != nil { + return nil, fmt.Errorf("error parsing %q as FQDN: %w", fqdn, err) + } + + // Check all peer devices first. + for _, p := range nm.Peers { + if strings.EqualFold(p.Name(), dnsFQDN.WithTrailingDot()) { + return p.Addresses().AsSlice(), nil + } + } + + // If not found yet, check for a matching Tailscale Service. + if svcIPs := serviceIPsFromNetMap(nm, dnsFQDN); len(svcIPs) != 0 { + return svcIPs, nil + } + + return nil, fmt.Errorf("could not find Tailscale node or service %q; it either does not exist, or not reachable because of ACLs", fqdn) +} + +// serviceIPsFromNetMap returns all IPs of a Tailscale Service if its FQDN is +// found in the netmap. Note that Tailscale Services are not a first-class +// object in the netmap, so we guess based on DNS ExtraRecords and AllowedIPs. +func serviceIPsFromNetMap(nm *netmap.NetworkMap, fqdn dnsname.FQDN) []netip.Prefix { + var extraRecords []tailcfg.DNSRecord + for _, rec := range nm.DNS.ExtraRecords { + recFQDN, err := dnsname.ToFQDN(rec.Name) + if err != nil { + continue + } + if strings.EqualFold(fqdn.WithTrailingDot(), recFQDN.WithTrailingDot()) { + extraRecords = append(extraRecords, rec) + } + } + + if len(extraRecords) == 0 { + return nil + } + + // Validate we can see a peer advertising the Tailscale Service. + var prefixes []netip.Prefix + for _, extraRecord := range extraRecords { + ip, err := netip.ParseAddr(extraRecord.Value) + if err != nil { + continue + } + ipPrefix := netip.PrefixFrom(ip, ip.BitLen()) + for _, ps := range nm.Peers { + for _, allowedIP := range ps.AllowedIPs().All() { + if allowedIP == ipPrefix { + prefixes = append(prefixes, ipPrefix) + } + } + } + } + + return prefixes +} diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index f92f353334de2..7007cc15202d9 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -46,7 +46,7 @@ func TestContainerBoot(t *testing.T) { if err := exec.Command("go", "build", "-ldflags", "-X main.testSleepDuration=1ms", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { t.Fatalf("Building containerboot: %v", err) } - egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net") + egressStatus := egressSvcStatus("foo", "foo.tailnetxyz.ts.net", "100.64.0.2") metricsURL := func(port int) string { return fmt.Sprintf("http://127.0.0.1:%d/metrics", port) @@ -99,7 +99,7 @@ func TestContainerBoot(t *testing.T) { NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), - Name: "test-node.test.ts.net", + Name: "test-node.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), }, @@ -356,7 +356,7 @@ func TestContainerBoot(t *testing.T) { return testCase{ Env: map[string]string{ "TS_AUTHKEY": "tskey-key", - "TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net", // resolves to IPv6 address + "TS_TAILNET_TARGET_FQDN": "ipv6-node.test.ts.net.", // resolves to IPv6 address "TS_USERSPACE": "false", "TS_TEST_FAKE_NETFILTER_6": "false", }, @@ -377,13 +377,13 @@ func TestContainerBoot(t *testing.T) { NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), - Name: "test-node.test.ts.net", + Name: "test-node.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), Peers: []tailcfg.NodeView{ (&tailcfg.Node{ StableID: tailcfg.StableNodeID("ipv6ID"), - Name: "ipv6-node.test.ts.net", + Name: "ipv6-node.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("::1/128")}, }).View(), }, @@ -481,7 +481,7 @@ func TestContainerBoot(t *testing.T) { Notify: runningNotify, WantKubeSecret: map[string]string{ "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", + "device_fqdn": "test-node.test.ts.net.", "device_id": "myID", "device_ips": `["100.64.0.1"]`, kubetypes.KeyCapVer: capver, @@ -580,7 +580,7 @@ func TestContainerBoot(t *testing.T) { "/usr/bin/tailscale --socket=/tmp/tailscaled.sock set --accept-dns=false", }, WantKubeSecret: map[string]string{ - "device_fqdn": "test-node.test.ts.net", + "device_fqdn": "test-node.test.ts.net.", "device_id": "myID", "device_ips": `["100.64.0.1"]`, kubetypes.KeyCapVer: capver, @@ -613,7 +613,7 @@ func TestContainerBoot(t *testing.T) { Notify: runningNotify, WantKubeSecret: map[string]string{ "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", + "device_fqdn": "test-node.test.ts.net.", "device_id": "myID", "device_ips": `["100.64.0.1"]`, kubetypes.KeyCapVer: capver, @@ -625,14 +625,14 @@ func TestContainerBoot(t *testing.T) { NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("newID"), - Name: "new-name.test.ts.net", + Name: "new-name.test.ts.net.", Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, }).View(), }, }, WantKubeSecret: map[string]string{ "authkey": "tskey-key", - "device_fqdn": "new-name.test.ts.net", + "device_fqdn": "new-name.test.ts.net.", "device_id": "newID", "device_ips": `["100.64.0.1"]`, kubetypes.KeyCapVer: capver, @@ -927,7 +927,7 @@ func TestContainerBoot(t *testing.T) { Notify: runningNotify, WantKubeSecret: map[string]string{ "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", + "device_fqdn": "test-node.test.ts.net.", "device_id": "myID", "device_ips": `["100.64.0.1"]`, "https_endpoint": "no-https", @@ -963,11 +963,27 @@ func TestContainerBoot(t *testing.T) { }, }, { - Notify: runningNotify, + Notify: &ipn.Notify{ + State: ptr.To(ipn.Running), + NetMap: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("myID"), + Name: "test-node.test.ts.net.", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + StableID: tailcfg.StableNodeID("fooID"), + Name: "foo.tailnetxyz.ts.net.", + Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")}, + }).View(), + }, + }, + }, WantKubeSecret: map[string]string{ "egress-services": string(mustJSON(t, egressStatus)), "authkey": "tskey-key", - "device_fqdn": "test-node.test.ts.net", + "device_fqdn": "test-node.test.ts.net.", "device_id": "myID", "device_ips": `["100.64.0.1"]`, kubetypes.KeyCapVer: capver, @@ -1338,6 +1354,11 @@ func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { } w.Write([]byte("fake metrics")) return + case "/localapi/v0/prefs": + if r.Method != "GET" { + panic(fmt.Sprintf("unsupported method %q", r.Method)) + } + return default: panic(fmt.Sprintf("unsupported path %q", r.URL.Path)) } @@ -1563,13 +1584,14 @@ func mustJSON(t *testing.T, v any) []byte { } // egress services status given one named tailnet target specified by FQDN. As written by the proxy to its state Secret. -func egressSvcStatus(name, fqdn string) egressservices.Status { +func egressSvcStatus(name, fqdn, ip string) egressservices.Status { return egressservices.Status{ Services: map[string]*egressservices.ServiceStatus{ name: { TailnetTarget: egressservices.TailnetTarget{ FQDN: fqdn, }, + TailnetTargetIPs: []netip.Addr{netip.MustParseAddr(ip)}, }, }, } diff --git a/cmd/tailscale/cli/configure-kube.go b/cmd/tailscale/cli/configure-kube.go index e74e8877996fe..bf5624856167a 100644 --- a/cmd/tailscale/cli/configure-kube.go +++ b/cmd/tailscale/cli/configure-kube.go @@ -247,7 +247,7 @@ func nodeOrServiceDNSNameFromArg(st *ipnstate.Status, nm *netmap.NetworkMap, arg } // If not found, check for a Tailscale Service DNS name. - rec, ok := serviceDNSRecordFromNetMap(nm, st.CurrentTailnet.MagicDNSSuffix, arg) + rec, ok := serviceDNSRecordFromNetMap(nm, arg) if !ok { return "", fmt.Errorf("no peer found for %q", arg) } @@ -287,7 +287,7 @@ func getNetMap(ctx context.Context) (*netmap.NetworkMap, error) { return n.NetMap, nil } -func serviceDNSRecordFromNetMap(nm *netmap.NetworkMap, tcd, arg string) (rec tailcfg.DNSRecord, ok bool) { +func serviceDNSRecordFromNetMap(nm *netmap.NetworkMap, arg string) (rec tailcfg.DNSRecord, ok bool) { argIP, _ := netip.ParseAddr(arg) argFQDN, err := dnsname.ToFQDN(arg) argFQDNValid := err == nil From c40f3521032c7687e8b9c85020f7857e211ab693 Mon Sep 17 00:00:00 2001 From: Alex Valiushko Date: Thu, 18 Dec 2025 16:12:50 -0800 Subject: [PATCH 0830/1093] net/udprelay: expose peer relay metrics (#18218) Adding both user and client metrics for peer relay forwarded bytes and packets, and the total endpoints gauge. User metrics: tailscaled_peer_relay_forwarded_packets_total{transport_in, transport_out} tailscaled_peer_relay_forwarded_bytes_total{transport_in, transport_out} tailscaled_peer_relay_endpoints_total{} Where the transport labels can be of "udp4" or "udp6". Client metrics: udprelay_forwarded_(packets|bytes)_udp(4|6)_udp(4|6) udprelay_endpoints RELNOTE: Expose tailscaled metrics for peer relay. Updates tailscale/corp#30820 Change-Id: I1a905d15bdc5ee84e28017e0b93210e2d9660259 Signed-off-by: Alex Valiushko --- feature/relayserver/relayserver.go | 2 +- net/udprelay/metrics.go | 153 +++++++++++++++++++++++++++++ net/udprelay/metrics_test.go | 63 ++++++++++++ net/udprelay/server.go | 58 +++++++++-- net/udprelay/server_test.go | 5 +- 5 files changed, 269 insertions(+), 12 deletions(-) create mode 100644 net/udprelay/metrics.go create mode 100644 net/udprelay/metrics_test.go diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 4f23ae18e4248..b29a6abed5336 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -70,7 +70,7 @@ func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r * func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { e := &extension{ newServerFn: func(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (relayServer, error) { - return udprelay.NewServer(logf, port, onlyStaticAddrPorts) + return udprelay.NewServer(logf, port, onlyStaticAddrPorts, sb.Sys().UserMetricsRegistry()) }, logf: logger.WithPrefix(logf, featureName+": "), } diff --git a/net/udprelay/metrics.go b/net/udprelay/metrics.go new file mode 100644 index 0000000000000..45d3c9f34266d --- /dev/null +++ b/net/udprelay/metrics.go @@ -0,0 +1,153 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package udprelay + +import ( + "expvar" + + "tailscale.com/util/clientmetric" + "tailscale.com/util/usermetric" +) + +var ( + // Although we only need one, [clientmetric.AggregateCounter] is the only + // method to embed [expvar.Int] into client metrics. + cMetricForwarded44Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp4_udp4") + cMetricForwarded46Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp4_udp6") + cMetricForwarded64Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp6_udp4") + cMetricForwarded66Packets = clientmetric.NewAggregateCounter("udprelay_forwarded_packets_udp6_udp6") + + cMetricForwarded44Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp4_udp4") + cMetricForwarded46Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp4_udp6") + cMetricForwarded64Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp4") + cMetricForwarded66Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp6") + + // [clientmetric.Gauge] does not let us embed existing counters, + // [metrics.addEndpoints] records data into client and user gauges independently. + cMetricEndpoints = clientmetric.NewGauge("udprelay_endpoints") +) + +type transport string + +const ( + transportUDP4 transport = "udp4" + transportUDP6 transport = "udp6" +) + +type forwardedLabel struct { + transportIn transport `prom:"transport_in"` + transportOut transport `prom:"transport_out"` +} + +type endpointLabel struct { +} + +type metrics struct { + forwarded44Packets expvar.Int + forwarded46Packets expvar.Int + forwarded64Packets expvar.Int + forwarded66Packets expvar.Int + + forwarded44Bytes expvar.Int + forwarded46Bytes expvar.Int + forwarded64Bytes expvar.Int + forwarded66Bytes expvar.Int + + endpoints expvar.Int +} + +// registerMetrics publishes user and client metric counters for peer relay server. +// +// It will panic if called twice with the same registry. +func registerMetrics(reg *usermetric.Registry) *metrics { + var ( + uMetricForwardedPackets = usermetric.NewMultiLabelMapWithRegistry[forwardedLabel]( + reg, + "tailscaled_peer_relay_forwarded_packets_total", + "counter", + "Number of packets forwarded via Peer Relay", + ) + uMetricForwardedBytes = usermetric.NewMultiLabelMapWithRegistry[forwardedLabel]( + reg, + "tailscaled_peer_relay_forwarded_bytes_total", + "counter", + "Number of bytes forwarded via Peer Relay", + ) + uMetricEndpoints = usermetric.NewMultiLabelMapWithRegistry[endpointLabel]( + reg, + "tailscaled_peer_relay_endpoints_total", + "gauge", + "Number of allocated Peer Relay endpoints", + ) + forwarded44 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP4} + forwarded46 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP6} + forwarded64 = forwardedLabel{transportIn: transportUDP6, transportOut: transportUDP4} + forwarded66 = forwardedLabel{transportIn: transportUDP6, transportOut: transportUDP6} + m = new(metrics) + ) + + // Publish user metrics. + uMetricForwardedPackets.Set(forwarded44, &m.forwarded44Packets) + uMetricForwardedPackets.Set(forwarded46, &m.forwarded46Packets) + uMetricForwardedPackets.Set(forwarded64, &m.forwarded64Packets) + uMetricForwardedPackets.Set(forwarded66, &m.forwarded66Packets) + + uMetricForwardedBytes.Set(forwarded44, &m.forwarded44Bytes) + uMetricForwardedBytes.Set(forwarded46, &m.forwarded46Bytes) + uMetricForwardedBytes.Set(forwarded64, &m.forwarded64Bytes) + uMetricForwardedBytes.Set(forwarded66, &m.forwarded66Bytes) + + uMetricEndpoints.Set(endpointLabel{}, &m.endpoints) + + // Publish client metrics. + cMetricForwarded44Packets.Register(&m.forwarded44Packets) + cMetricForwarded46Packets.Register(&m.forwarded46Packets) + cMetricForwarded64Packets.Register(&m.forwarded64Packets) + cMetricForwarded66Packets.Register(&m.forwarded66Packets) + cMetricForwarded44Bytes.Register(&m.forwarded44Bytes) + cMetricForwarded46Bytes.Register(&m.forwarded46Bytes) + cMetricForwarded64Bytes.Register(&m.forwarded64Bytes) + cMetricForwarded66Bytes.Register(&m.forwarded66Bytes) + + return m +} + +// addEndpoints updates the total endpoints gauge. Value can be negative. +// It records two gauges independently, see [cMetricEndpoints] doc. +func (m *metrics) addEndpoints(value int64) { + m.endpoints.Add(value) + cMetricEndpoints.Add(value) +} + +// countForwarded records user and client metrics according to the +// inbound and outbound address families. +func (m *metrics) countForwarded(in4, out4 bool, bytes, packets int64) { + if in4 && out4 { + m.forwarded44Packets.Add(packets) + m.forwarded44Bytes.Add(bytes) + } else if in4 && !out4 { + m.forwarded46Packets.Add(packets) + m.forwarded46Bytes.Add(bytes) + } else if !in4 && out4 { + m.forwarded64Packets.Add(packets) + m.forwarded64Bytes.Add(bytes) + } else { + m.forwarded66Packets.Add(packets) + m.forwarded66Bytes.Add(bytes) + } +} + +// deregisterMetrics unregisters the underlying expvar counters +// from clientmetrics. +func deregisterMetrics() { + cMetricForwarded44Packets.UnregisterAll() + cMetricForwarded46Packets.UnregisterAll() + cMetricForwarded64Packets.UnregisterAll() + cMetricForwarded66Packets.UnregisterAll() + cMetricForwarded44Bytes.UnregisterAll() + cMetricForwarded46Bytes.UnregisterAll() + cMetricForwarded64Bytes.UnregisterAll() + cMetricForwarded66Bytes.UnregisterAll() + cMetricEndpoints.Set(0) +} diff --git a/net/udprelay/metrics_test.go b/net/udprelay/metrics_test.go new file mode 100644 index 0000000000000..25345dc6b3459 --- /dev/null +++ b/net/udprelay/metrics_test.go @@ -0,0 +1,63 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package udprelay + +import ( + "slices" + "testing" + + qt "github.com/frankban/quicktest" + "tailscale.com/util/usermetric" +) + +func TestMetrics(t *testing.T) { + c := qt.New(t) + deregisterMetrics() + r := &usermetric.Registry{} + m := registerMetrics(r) + + // Expect certain prom names registered. + have := r.MetricNames() + want := []string{ + "tailscaled_peer_relay_forwarded_packets_total", + "tailscaled_peer_relay_forwarded_bytes_total", + "tailscaled_peer_relay_endpoints_total", + } + slices.Sort(have) + slices.Sort(want) + c.Assert(have, qt.CmpEquals(), want) + + // Validate addEndpoints. + m.addEndpoints(1) + c.Assert(m.endpoints.Value(), qt.Equals, int64(1)) + c.Assert(cMetricEndpoints.Value(), qt.Equals, int64(1)) + m.addEndpoints(-1) + c.Assert(m.endpoints.Value(), qt.Equals, int64(0)) + c.Assert(cMetricEndpoints.Value(), qt.Equals, int64(0)) + + // Validate countForwarded. + m.countForwarded(true, true, 1, 1) + c.Assert(m.forwarded44Bytes.Value(), qt.Equals, int64(1)) + c.Assert(m.forwarded44Packets.Value(), qt.Equals, int64(1)) + c.Assert(cMetricForwarded44Bytes.Value(), qt.Equals, int64(1)) + c.Assert(cMetricForwarded44Packets.Value(), qt.Equals, int64(1)) + + m.countForwarded(true, false, 2, 2) + c.Assert(m.forwarded46Bytes.Value(), qt.Equals, int64(2)) + c.Assert(m.forwarded46Packets.Value(), qt.Equals, int64(2)) + c.Assert(cMetricForwarded46Bytes.Value(), qt.Equals, int64(2)) + c.Assert(cMetricForwarded46Packets.Value(), qt.Equals, int64(2)) + + m.countForwarded(false, true, 3, 3) + c.Assert(m.forwarded64Bytes.Value(), qt.Equals, int64(3)) + c.Assert(m.forwarded64Packets.Value(), qt.Equals, int64(3)) + c.Assert(cMetricForwarded64Bytes.Value(), qt.Equals, int64(3)) + c.Assert(cMetricForwarded64Packets.Value(), qt.Equals, int64(3)) + + m.countForwarded(false, false, 4, 4) + c.Assert(m.forwarded66Bytes.Value(), qt.Equals, int64(4)) + c.Assert(m.forwarded66Packets.Value(), qt.Equals, int64(4)) + c.Assert(cMetricForwarded66Bytes.Value(), qt.Equals, int64(4)) + c.Assert(cMetricForwarded66Packets.Value(), qt.Equals, int64(4)) +} diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 45127dfae6f5b..e98fdf7bbad33 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -43,6 +43,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/util/usermetric" ) const ( @@ -76,6 +77,7 @@ type Server struct { wg sync.WaitGroup closeCh chan struct{} netChecker *netcheck.Client + metrics *metrics mu sync.Mutex // guards the following fields macSecrets views.Slice[[blake2s.Size]byte] // [0] is most recent, max 2 elements @@ -320,8 +322,8 @@ func (e *serverEndpoint) isBoundLocked() bool { // port selection is left up to the host networking stack. If // onlyStaticAddrPorts is true, then dynamic addr:port discovery will be // disabled, and only addr:port's set via [Server.SetStaticAddrPorts] will be -// used. -func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (s *Server, err error) { +// used. Metrics must be non-nil. +func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool, metrics *usermetric.Registry) (s *Server, err error) { s = &Server{ logf: logf, disco: key.NewDisco(), @@ -333,6 +335,7 @@ func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool) (s *Serv nextVNI: minVNI, } s.discoPublic = s.disco.Public() + s.metrics = registerMetrics(metrics) // TODO(creachadair): Find a way to plumb this in during initialization. // As-written, messages published here will not be seen by other components @@ -670,6 +673,7 @@ func (s *Server) endpointGCLoop() { defer s.mu.Unlock() for k, v := range s.serverEndpointByDisco { if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { + s.metrics.addEndpoints(-1) delete(s.serverEndpointByDisco, k) s.serverEndpointByVNI.Delete(v.vni) } @@ -686,36 +690,50 @@ func (s *Server) endpointGCLoop() { } } -func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to netip.AddrPort) { +// handlePacket unwraps headers and dispatches packet handling according to its +// type and destination. If the returned address is valid, write will contain data +// to transmit, and isDataPacket signals whether input was a data packet or OOB +// signaling. +// +// write, to, isDataPacket := s.handlePacket(from, buf) +// if to.IsValid() && isDataPacket { +// // ..handle data transmission +// } + +func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to netip.AddrPort, isDataPacket bool) { if stun.Is(b) && b[1] == 0x01 { // A b[1] value of 0x01 (STUN method binding) is sufficiently // non-overlapping with the Geneve header where the LSB is always 0 // (part of 6 "reserved" bits). s.netChecker.ReceiveSTUNPacket(b, from) - return nil, netip.AddrPort{} + return nil, netip.AddrPort{}, false } gh := packet.GeneveHeader{} err := gh.Decode(b) if err != nil { - return nil, netip.AddrPort{} + return nil, netip.AddrPort{}, false } e, ok := s.serverEndpointByVNI.Load(gh.VNI.Get()) if !ok { // unknown VNI - return nil, netip.AddrPort{} + return nil, netip.AddrPort{}, false } now := mono.Now() if gh.Control { if gh.Protocol != packet.GeneveProtocolDisco { // control packet, but not Disco - return nil, netip.AddrPort{} + return nil, netip.AddrPort{}, false } msg := b[packet.GeneveFixedHeaderLength:] secrets := s.getMACSecrets(now) - return e.(*serverEndpoint).handleSealedDiscoControlMsg(from, msg, s.discoPublic, secrets, now) + write, to = e.(*serverEndpoint).handleSealedDiscoControlMsg(from, msg, s.discoPublic, secrets, now) + isDataPacket = false + return } - return e.(*serverEndpoint).handleDataPacket(from, b, now) + write, to = e.(*serverEndpoint).handleDataPacket(from, b, now) + isDataPacket = true + return } func (s *Server) getMACSecrets(now mono.Time) views.Slice[[blake2s.Size]byte] { @@ -783,16 +801,32 @@ func (s *Server) packetReadLoop(readFromSocket, otherSocket batching.Conn, readF return } + // Aggregate counts for the packet batch before writing metrics. + forwardedByOutAF := struct { + bytes4 int64 + packets4 int64 + bytes6 int64 + packets6 int64 + }{} for _, msg := range msgs[:n] { if msg.N == 0 { continue } buf := msg.Buffers[0][:msg.N] from := msg.Addr.(*net.UDPAddr).AddrPort() - write, to := s.handlePacket(from, buf) + write, to, isDataPacket := s.handlePacket(from, buf) if !to.IsValid() { continue } + if isDataPacket { + if to.Addr().Is4() { + forwardedByOutAF.bytes4 += int64(len(write)) + forwardedByOutAF.packets4++ + } else { + forwardedByOutAF.bytes6 += int64(len(write)) + forwardedByOutAF.packets6++ + } + } if from.Addr().Is4() == to.Addr().Is4() || otherSocket != nil { buffs, ok := writeBuffsByDest[to] if !ok { @@ -823,6 +857,9 @@ func (s *Server) packetReadLoop(readFromSocket, otherSocket batching.Conn, readF } delete(writeBuffsByDest, dest) } + + s.metrics.countForwarded(readFromSocketIsIPv4, true, forwardedByOutAF.bytes4, forwardedByOutAF.packets4) + s.metrics.countForwarded(readFromSocketIsIPv4, false, forwardedByOutAF.bytes6, forwardedByOutAF.packets6) } } @@ -932,6 +969,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv s.serverEndpointByVNI.Store(e.vni, e) s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) + s.metrics.addEndpoints(1) return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, ClientDisco: pair.Get(), diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index c4b3656417bae..59917e1c6ef52 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -21,6 +21,7 @@ import ( "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/views" + "tailscale.com/util/usermetric" ) type testClient struct { @@ -209,7 +210,9 @@ func TestServer(t *testing.T) { for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - server, err := NewServer(t.Logf, 0, true) + reg := new(usermetric.Registry) + deregisterMetrics() + server, err := NewServer(t.Logf, 0, true, reg) if err != nil { t.Fatal(err) } From 90b4358113d86b4fb06e89d4ae91ef8bcb6f6264 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 19 Dec 2025 15:59:26 +0000 Subject: [PATCH 0831/1093] cmd/k8s-operator,ipn/ipnlocal: allow opting out of ACME order replace extension (#18252) In dynamically changing environments where ACME account keys and certs are stored separately, it can happen that the account key would get deleted (and recreated) between issuances. If that is the case, we currently fail renewals and the only way to recover is for users to delete certs. This adds a config knob to allow opting out of the replaces extension and utilizes it in the Kubernetes operator where there are known user workflows that could end up with this edge case. Updates #18251 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/proxygroup_specs.go | 16 ++++++++++++++++ cmd/k8s-operator/sts.go | 8 ++++++++ cmd/k8s-operator/testutils_test.go | 2 ++ ipn/ipnlocal/cert.go | 5 ++++- 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 34db86db27846..930b7049d8ea9 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -182,6 +182,14 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)", }, + { + // This ensures that cert renewals can succeed if ACME account + // keys have changed since issuance. We cannot guarantee or + // validate that the account key has not changed, see + // https://github.com/tailscale/tailscale/issues/18251 + Name: "TS_DEBUG_ACME_FORCE_RENEWAL", + Value: "true", + }, } if port != nil { @@ -347,6 +355,14 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por Name: "$(POD_NAME)-config", }.String(), }, + { + // This ensures that cert renewals can succeed if ACME account + // keys have changed since issuance. We cannot guarantee or + // validate that the account key has not changed, see + // https://github.com/tailscale/tailscale/issues/18251 + Name: "TS_DEBUG_ACME_FORCE_RENEWAL", + Value: "true", + }, } if port != nil { diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 62f91bf921faa..2b6d1290e53f8 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -671,6 +671,14 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)", }, + corev1.EnvVar{ + // This ensures that cert renewals can succeed if ACME account + // keys have changed since issuance. We cannot guarantee or + // validate that the account key has not changed, see + // https://github.com/tailscale/tailscale/issues/18251 + Name: "TS_DEBUG_ACME_FORCE_RENEWAL", + Value: "true", + }, ) if sts.ForwardClusterTrafficViaL7IngressProxy { diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 9eb06394c092b..b0e2cfd734fad 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -92,6 +92,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, }, SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -287,6 +288,7 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, + {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, {Name: "TS_INTERNAL_APP", Value: opts.app}, }, diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index a78fa5247d840..8804fcb5ce2e8 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -551,8 +551,11 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l // If we have a previous cert, include it in the order. Assuming we're // within the ARI renewal window this should exclude us from LE rate // limits. + // Note that this order extension will fail renewals if the ACME account key has changed + // since the last issuance, see + // https://github.com/tailscale/tailscale/issues/18251 var opts []acme.OrderOption - if previous != nil { + if previous != nil && !envknob.Bool("TS_DEBUG_ACME_FORCE_RENEWAL") { prevCrt, err := previous.parseCertificate() if err == nil { opts = append(opts, acme.WithOrderReplacesCert(prevCrt)) From ee5947027014337784af29919ceed447a41efffc Mon Sep 17 00:00:00 2001 From: Alex Valiushko Date: Fri, 19 Dec 2025 16:15:41 -0800 Subject: [PATCH 0832/1093] net/udprelay: remove tailscaled_peer_relay_endpoints_total (#18254) This gauge will be reworked to include endpoint state in future. Updates tailscale/corp#30820 Change-Id: I66f349d89422b46eec4ecbaf1a99ad656c7301f9 Signed-off-by: Alex Valiushko --- net/udprelay/metrics.go | 25 ------------------------- net/udprelay/metrics_test.go | 9 --------- net/udprelay/server.go | 2 -- 3 files changed, 36 deletions(-) diff --git a/net/udprelay/metrics.go b/net/udprelay/metrics.go index 45d3c9f34266d..b7c0710c2afc1 100644 --- a/net/udprelay/metrics.go +++ b/net/udprelay/metrics.go @@ -22,10 +22,6 @@ var ( cMetricForwarded46Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp4_udp6") cMetricForwarded64Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp4") cMetricForwarded66Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp6") - - // [clientmetric.Gauge] does not let us embed existing counters, - // [metrics.addEndpoints] records data into client and user gauges independently. - cMetricEndpoints = clientmetric.NewGauge("udprelay_endpoints") ) type transport string @@ -40,9 +36,6 @@ type forwardedLabel struct { transportOut transport `prom:"transport_out"` } -type endpointLabel struct { -} - type metrics struct { forwarded44Packets expvar.Int forwarded46Packets expvar.Int @@ -53,8 +46,6 @@ type metrics struct { forwarded46Bytes expvar.Int forwarded64Bytes expvar.Int forwarded66Bytes expvar.Int - - endpoints expvar.Int } // registerMetrics publishes user and client metric counters for peer relay server. @@ -74,12 +65,6 @@ func registerMetrics(reg *usermetric.Registry) *metrics { "counter", "Number of bytes forwarded via Peer Relay", ) - uMetricEndpoints = usermetric.NewMultiLabelMapWithRegistry[endpointLabel]( - reg, - "tailscaled_peer_relay_endpoints_total", - "gauge", - "Number of allocated Peer Relay endpoints", - ) forwarded44 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP4} forwarded46 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP6} forwarded64 = forwardedLabel{transportIn: transportUDP6, transportOut: transportUDP4} @@ -98,8 +83,6 @@ func registerMetrics(reg *usermetric.Registry) *metrics { uMetricForwardedBytes.Set(forwarded64, &m.forwarded64Bytes) uMetricForwardedBytes.Set(forwarded66, &m.forwarded66Bytes) - uMetricEndpoints.Set(endpointLabel{}, &m.endpoints) - // Publish client metrics. cMetricForwarded44Packets.Register(&m.forwarded44Packets) cMetricForwarded46Packets.Register(&m.forwarded46Packets) @@ -113,13 +96,6 @@ func registerMetrics(reg *usermetric.Registry) *metrics { return m } -// addEndpoints updates the total endpoints gauge. Value can be negative. -// It records two gauges independently, see [cMetricEndpoints] doc. -func (m *metrics) addEndpoints(value int64) { - m.endpoints.Add(value) - cMetricEndpoints.Add(value) -} - // countForwarded records user and client metrics according to the // inbound and outbound address families. func (m *metrics) countForwarded(in4, out4 bool, bytes, packets int64) { @@ -149,5 +125,4 @@ func deregisterMetrics() { cMetricForwarded46Bytes.UnregisterAll() cMetricForwarded64Bytes.UnregisterAll() cMetricForwarded66Bytes.UnregisterAll() - cMetricEndpoints.Set(0) } diff --git a/net/udprelay/metrics_test.go b/net/udprelay/metrics_test.go index 25345dc6b3459..5c6a751134e8b 100644 --- a/net/udprelay/metrics_test.go +++ b/net/udprelay/metrics_test.go @@ -22,20 +22,11 @@ func TestMetrics(t *testing.T) { want := []string{ "tailscaled_peer_relay_forwarded_packets_total", "tailscaled_peer_relay_forwarded_bytes_total", - "tailscaled_peer_relay_endpoints_total", } slices.Sort(have) slices.Sort(want) c.Assert(have, qt.CmpEquals(), want) - // Validate addEndpoints. - m.addEndpoints(1) - c.Assert(m.endpoints.Value(), qt.Equals, int64(1)) - c.Assert(cMetricEndpoints.Value(), qt.Equals, int64(1)) - m.addEndpoints(-1) - c.Assert(m.endpoints.Value(), qt.Equals, int64(0)) - c.Assert(cMetricEndpoints.Value(), qt.Equals, int64(0)) - // Validate countForwarded. m.countForwarded(true, true, 1, 1) c.Assert(m.forwarded44Bytes.Value(), qt.Equals, int64(1)) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index e98fdf7bbad33..acdbf5ad6893a 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -673,7 +673,6 @@ func (s *Server) endpointGCLoop() { defer s.mu.Unlock() for k, v := range s.serverEndpointByDisco { if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { - s.metrics.addEndpoints(-1) delete(s.serverEndpointByDisco, k) s.serverEndpointByVNI.Delete(v.vni) } @@ -969,7 +968,6 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv s.serverEndpointByVNI.Store(e.vni, e) s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) - s.metrics.addEndpoints(1) return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, ClientDisco: pair.Get(), From 9c3a420e158e32e8dcfa8b63a0794d4296d9abe7 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Mon, 22 Dec 2025 13:38:13 +0000 Subject: [PATCH 0833/1093] cmd/tailscale/cli: document why there's no --force-reauth on login Change-Id: Ied799fefbbb4612c7ba57b8369a418b7704eebf8 Updates #18273 Signed-off-by: Alex Chan --- cmd/tailscale/cli/up.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 2a3cbf75ace0c..d6971a6814b7c 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -137,6 +137,9 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // Some flags are only for "up", not "login". upf.BoolVar(&upArgs.json, "json", false, "output in JSON format (WARNING: format subject to change)") upf.BoolVar(&upArgs.reset, "reset", false, "reset unspecified settings to their default values") + + // There's no --force-reauth flag on "login" because all login commands + // trigger a reauth. upf.BoolVar(&upArgs.forceReauth, "force-reauth", false, "force reauthentication (WARNING: this may bring down the Tailscale connection and thus should not be done remotely over SSH or RDP)") registerAcceptRiskFlag(upf, &upArgs.acceptedRisks) } From 2917ea8d0e1b816ea80b4237d2adb25295984d87 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 19 Dec 2025 12:22:19 -0600 Subject: [PATCH 0834/1093] ipn/ipnauth, safesocket: defer named pipe client's token retrieval until ipnserver needs it An error returned by net.Listener.Accept() causes the owning http.Server to shut down. With the deprecation of net.Error.Temporary(), there's no way for the http.Server to test whether the returned error is temporary / retryable or not (see golang/go#66252). Because of that, errors returned by (*safesocket.winIOPipeListener).Accept() cause the LocalAPI server (aka ipnserver.Server) to shut down, and tailscaled process to exit. While this might be acceptable in the case of non-recoverable errors, such as programmer errors, we shouldn't shut down the entire tailscaled process for client- or connection-specific errors, such as when we couldn't obtain the client's access token because the client attempts to connect at the Anonymous impersonation level. Instead, the LocalAPI server should gracefully handle these errors by denying access and returning a 401 Unauthorized to the client. In tailscale/tscert#15, we fixed a known bug where Caddy and other apps using tscert would attempt to connect at the Anonymous impersonation level and fail. However, we should also fix this on the tailscaled side to prevent a potential DoS, where a local app could deliberately open the Tailscale LocalAPI named pipe at the Anonymous impersonation level and cause tailscaled to exit. In this PR, we defer token retrieval until (*WindowsClientConn).Token() is called and propagate the returned token or error via ipnauth.GetConnIdentity() to ipnserver, which handles it the same way as other ipnauth-related errors. Fixes #18212 Fixes tailscale/tscert#13 Signed-off-by: Nick Khyl --- ipn/ipnauth/ipnauth_windows.go | 29 +++++-------- safesocket/pipe_windows.go | 74 ++++++++++++++++++++++++++------- safesocket/pipe_windows_test.go | 7 +++- 3 files changed, 76 insertions(+), 34 deletions(-) diff --git a/ipn/ipnauth/ipnauth_windows.go b/ipn/ipnauth/ipnauth_windows.go index 1138bc23d20fa..e3ea448a855e5 100644 --- a/ipn/ipnauth/ipnauth_windows.go +++ b/ipn/ipnauth/ipnauth_windows.go @@ -25,6 +25,12 @@ func GetConnIdentity(logf logger.Logf, c net.Conn) (ci *ConnIdentity, err error) if !ok { return nil, fmt.Errorf("not a WindowsClientConn: %T", c) } + if err := wcc.CheckToken(); err != nil { + // Failure to obtain a token means the client cannot be authenticated. + // We don't care about the exact error, but it typically means the client + // attempted to connect at the Anonymous impersonation level. + return nil, fmt.Errorf("authentication failed: %w", err) + } ci.pid, err = wcc.ClientPID() if err != nil { return nil, err @@ -169,26 +175,13 @@ func (t *token) IsUID(uid ipn.WindowsUserID) bool { // WindowsToken returns the WindowsToken representing the security context // of the connection's client. func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { - var wcc *safesocket.WindowsClientConn - var ok bool - if wcc, ok = ci.conn.(*safesocket.WindowsClientConn); !ok { + wcc, ok := ci.conn.(*safesocket.WindowsClientConn) + if !ok { return nil, fmt.Errorf("not a WindowsClientConn: %T", ci.conn) } - - // We duplicate the token's handle so that the WindowsToken we return may have - // a lifetime independent from the original connection. - var h windows.Handle - if err := windows.DuplicateHandle( - windows.CurrentProcess(), - windows.Handle(wcc.Token()), - windows.CurrentProcess(), - &h, - 0, - false, - windows.DUPLICATE_SAME_ACCESS, - ); err != nil { + token, err := wcc.Token() + if err != nil { return nil, err } - - return newToken(windows.Token(h)), nil + return newToken(token), nil } diff --git a/safesocket/pipe_windows.go b/safesocket/pipe_windows.go index 58283416508da..2968542f2ccf4 100644 --- a/safesocket/pipe_windows.go +++ b/safesocket/pipe_windows.go @@ -10,6 +10,7 @@ import ( "fmt" "net" "runtime" + "sync" "time" "github.com/tailscale/go-winio" @@ -49,7 +50,9 @@ func listen(path string) (net.Listener, error) { // embedded net.Conn must be a go-winio PipeConn. type WindowsClientConn struct { winioPipeConn - token windows.Token + tokenOnce sync.Once + token windows.Token // or zero, if we couldn't obtain the client's token + tokenErr error } // winioPipeConn is a subset of the interface implemented by the go-winio's @@ -79,12 +82,63 @@ func (conn *WindowsClientConn) ClientPID() (int, error) { return int(pid), nil } -// Token returns the Windows access token of the client user. -func (conn *WindowsClientConn) Token() windows.Token { - return conn.token +// CheckToken returns an error if the client user's access token could not be retrieved, +// for example when the client opens the pipe with an anonymous impersonation level. +// +// Deprecated: use [WindowsClientConn.Token] instead. +func (conn *WindowsClientConn) CheckToken() error { + _, err := conn.getToken() + return err +} + +// getToken returns the Windows access token of the client user, +// or an error if the token could not be retrieved, for example +// when the client opens the pipe with an anonymous impersonation level. +// +// The connection retains ownership of the returned token handle; +// the caller must not close it. +// +// TODO(nickkhyl): Remove this, along with [WindowsClientConn.CheckToken], +// once [ipnauth.ConnIdentity] is removed in favor of [ipnauth.Actor]. +func (conn *WindowsClientConn) getToken() (windows.Token, error) { + conn.tokenOnce.Do(func() { + conn.token, conn.tokenErr = clientUserAccessToken(conn.winioPipeConn) + }) + return conn.token, conn.tokenErr +} + +// Token returns the Windows access token of the client user, +// or an error if the token could not be retrieved, for example +// when the client opens the pipe with an anonymous impersonation level. +// +// The caller is responsible for closing the returned token handle. +func (conn *WindowsClientConn) Token() (windows.Token, error) { + token, err := conn.getToken() + if err != nil { + return 0, err + } + + var dupToken windows.Handle + if err := windows.DuplicateHandle( + windows.CurrentProcess(), + windows.Handle(token), + windows.CurrentProcess(), + &dupToken, + 0, + false, + windows.DUPLICATE_SAME_ACCESS, + ); err != nil { + return 0, err + } + return windows.Token(dupToken), nil } func (conn *WindowsClientConn) Close() error { + // Either wait for any pending [WindowsClientConn.Token] calls to complete, + // or ensure that the token will never be opened. + conn.tokenOnce.Do(func() { + conn.tokenErr = net.ErrClosed + }) if conn.token != 0 { conn.token.Close() conn.token = 0 @@ -110,17 +164,7 @@ func (lw *winIOPipeListener) Accept() (net.Conn, error) { conn.Close() return nil, fmt.Errorf("unexpected type %T from winio.ListenPipe listener (itself a %T)", conn, lw.Listener) } - - token, err := clientUserAccessToken(pipeConn) - if err != nil { - conn.Close() - return nil, err - } - - return &WindowsClientConn{ - winioPipeConn: pipeConn, - token: token, - }, nil + return &WindowsClientConn{winioPipeConn: pipeConn}, nil } func clientUserAccessToken(pc winioPipeConn) (windows.Token, error) { diff --git a/safesocket/pipe_windows_test.go b/safesocket/pipe_windows_test.go index 054781f235abd..8d9cbd19b5e43 100644 --- a/safesocket/pipe_windows_test.go +++ b/safesocket/pipe_windows_test.go @@ -58,9 +58,14 @@ func TestExpectedWindowsTypes(t *testing.T) { if wcc.winioPipeConn.Fd() == 0 { t.Error("accepted conn had unexpected zero fd") } - if wcc.token == 0 { + tok, err := wcc.Token() + if err != nil { + t.Errorf("failed to retrieve client token: %v", err) + } + if tok == 0 { t.Error("accepted conn had unexpected zero token") } + tok.Close() s.Write([]byte("hello")) From d451cd54a70152a95ad708592a981cb5e37395a8 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Thu, 25 Dec 2025 01:57:11 -0500 Subject: [PATCH 0835/1093] cmd/derper: add --acme-email flag for GCP cert mode (#18278) GCP Certificate Manager requires an email contact on ACME accounts. Add --acme-email flag that is required for --certmode=gcp and optional for --certmode=letsencrypt. Fixes #18277 Signed-off-by: Raj Singh --- cmd/derper/cert.go | 9 ++++++++- cmd/derper/cert_test.go | 16 +++++++++++----- cmd/derper/derper.go | 3 ++- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index d383c82f01157..dfd7769905132 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -44,7 +44,7 @@ type certProvider interface { HTTPHandler(fallback http.Handler) http.Handler } -func certProviderByCertMode(mode, dir, hostname, eabKID, eabKey string) (certProvider, error) { +func certProviderByCertMode(mode, dir, hostname, eabKID, eabKey, email string) (certProvider, error) { if dir == "" { return nil, errors.New("missing required --certdir flag") } @@ -59,6 +59,9 @@ func certProviderByCertMode(mode, dir, hostname, eabKID, eabKey string) (certPro if eabKID == "" || eabKey == "" { return nil, errors.New("--certmode=gcp requires --acme-eab-kid and --acme-eab-key flags") } + if email == "" { + return nil, errors.New("--certmode=gcp requires --acme-email flag") + } keyBytes, err := decodeEABKey(eabKey) if err != nil { return nil, err @@ -73,6 +76,10 @@ func certProviderByCertMode(mode, dir, hostname, eabKID, eabKey string) (certPro } if hostname == "derp.tailscale.com" { certManager.HostPolicy = prodAutocertHostPolicy + } + if email != "" { + certManager.Email = email + } else if hostname == "derp.tailscale.com" { certManager.Email = "security@tailscale.com" } return certManager, nil diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 3a8da46108428..b4e18f6951ae0 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -91,7 +91,7 @@ func TestCertIP(t *testing.T) { t.Fatalf("Error closing key.pem: %v", err) } - cp, err := certProviderByCertMode("manual", dir, hostname, "", "") + cp, err := certProviderByCertMode("manual", dir, hostname, "", "", "") if err != nil { t.Fatal(err) } @@ -174,19 +174,25 @@ func TestGCPCertMode(t *testing.T) { dir := t.TempDir() // Missing EAB credentials - _, err := certProviderByCertMode("gcp", dir, "test.example.com", "", "") + _, err := certProviderByCertMode("gcp", dir, "test.example.com", "", "", "test@example.com") if err == nil { t.Fatal("expected error when EAB credentials are missing") } + // Missing email + _, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk", "") + if err == nil { + t.Fatal("expected error when email is missing") + } + // Invalid base64 - _, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "not-valid!") + _, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "not-valid!", "test@example.com") if err == nil { t.Fatal("expected error for invalid base64") } // Valid base64url (no padding) - cp, err := certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk") + cp, err := certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk", "test@example.com") if err != nil { t.Fatalf("base64url: %v", err) } @@ -195,7 +201,7 @@ func TestGCPCertMode(t *testing.T) { } // Valid standard base64 (with padding, gcloud format) - cp, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk=") + cp, err = certProviderByCertMode("gcp", dir, "test.example.com", "kid", "dGVzdC1rZXk=", "test@example.com") if err != nil { t.Fatalf("base64: %v", err) } diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index aeb2adb5dc61d..16f531be0ec62 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -65,6 +65,7 @@ var ( hostname = flag.String("hostname", "derp.tailscale.com", "TLS host name for certs, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") acmeEABKid = flag.String("acme-eab-kid", "", "ACME External Account Binding (EAB) Key ID (required for --certmode=gcp)") acmeEABKey = flag.String("acme-eab-key", "", "ACME External Account Binding (EAB) HMAC key, base64-encoded (required for --certmode=gcp)") + acmeEmail = flag.String("acme-email", "", "ACME account contact email address (required for --certmode=gcp, optional for letsencrypt)") runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.") runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") flagHome = flag.String("home", "", "what to serve at the root path. It may be left empty (the default, for a default homepage), \"blank\" for a blank page, or a URL to redirect to") @@ -345,7 +346,7 @@ func main() { if serveTLS { log.Printf("derper: serving on %s with TLS", *addr) var certManager certProvider - certManager, err = certProviderByCertMode(*certMode, *certDir, *hostname, *acmeEABKid, *acmeEABKey) + certManager, err = certProviderByCertMode(*certMode, *certDir, *hostname, *acmeEABKid, *acmeEABKey, *acmeEmail) if err != nil { log.Fatalf("derper: can not start cert provider: %v", err) } From b7081522e7b90468468b037449ce7c7f9b357e52 Mon Sep 17 00:00:00 2001 From: Vince Liem Date: Mon, 5 Jan 2026 21:10:18 +0100 Subject: [PATCH 0836/1093] scripts/installer.sh: add ultramarine to supported OS list --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index db94c26ec508a..89d54a4311d01 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -246,7 +246,7 @@ main() { VERSION="" PACKAGETYPE="dnf" ;; - rocky|almalinux|nobara|openmandriva|sangoma|risios|cloudlinux|alinux|fedora-asahi-remix) + rocky|almalinux|nobara|openmandriva|sangoma|risios|cloudlinux|alinux|fedora-asahi-remix|ultramarine) OS="fedora" VERSION="" PACKAGETYPE="dnf" From 39a61888b8b39f443c9a97a66ab538ff011f4e36 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Mon, 5 Jan 2026 15:18:23 -0800 Subject: [PATCH 0837/1093] ssh/tailssh: send audit messages on SSH login (Linux) Send LOGIN audit messages to the kernel audit subsystem on Linux when users successfully authenticate to Tailscale SSH. This provides administrators with audit trail integration via auditd or journald, recording details about both the Tailscale user (whois) and the mapped local user account. The implementation uses raw netlink sockets to send AUDIT_USER_LOGIN messages to the kernel audit subsystem. It requires CAP_AUDIT_WRITE capability, which is checked at runtime. If the capability is not present, audit logging is silently skipped. Audit messages are sent to the kernel (pid 0) and consumed by either auditd (written to /var/log/audit/audit.log) or journald (available via journalctl _TRANSPORT=audit), depending on system configuration. Note: This may result in duplicate messages on a system where auditd/journald audit logs are enabled and the system has and supports `login -h`. Sadly Linux login code paths are still an inconsistent wild west so we accept the potential duplication rather than trying to avoid it. Fixes #18332 Signed-off-by: James Tucker --- ssh/tailssh/auditd_linux.go | 176 ++++++++++++++++++++++++++++++ ssh/tailssh/auditd_linux_test.go | 180 +++++++++++++++++++++++++++++++ ssh/tailssh/tailssh.go | 10 ++ 3 files changed, 366 insertions(+) create mode 100644 ssh/tailssh/auditd_linux.go create mode 100644 ssh/tailssh/auditd_linux_test.go diff --git a/ssh/tailssh/auditd_linux.go b/ssh/tailssh/auditd_linux.go new file mode 100644 index 0000000000000..e9f551d9e7991 --- /dev/null +++ b/ssh/tailssh/auditd_linux.go @@ -0,0 +1,176 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +package tailssh + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "syscall" + + "golang.org/x/sys/unix" + "tailscale.com/types/logger" +) + +const ( + auditUserLogin = 1112 // audit message type for user login (from linux/audit.h) + netlinkAudit = 9 // AF_NETLINK protocol number for audit (from linux/netlink.h) + nlmFRequest = 0x01 // netlink message flag: request (from linux/netlink.h) + + // maxAuditMessageLength is the maximum length of an audit message payload. + // This is derived from MAX_AUDIT_MESSAGE_LENGTH (8970) in the Linux kernel + // (linux/audit.h), minus overhead for the netlink header and safety margin. + maxAuditMessageLength = 8192 +) + +// hasAuditWriteCap checks if the process has CAP_AUDIT_WRITE in its effective capability set. +func hasAuditWriteCap() bool { + var hdr unix.CapUserHeader + var data [2]unix.CapUserData + + hdr.Version = unix.LINUX_CAPABILITY_VERSION_3 + hdr.Pid = int32(os.Getpid()) + + if err := unix.Capget(&hdr, &data[0]); err != nil { + return false + } + + const capBit = uint32(1 << (unix.CAP_AUDIT_WRITE % 32)) + const capIdx = unix.CAP_AUDIT_WRITE / 32 + return (data[capIdx].Effective & capBit) != 0 +} + +// buildAuditNetlinkMessage constructs a netlink audit message. +// This is separated from sendAuditMessage to allow testing the message format +// without requiring CAP_AUDIT_WRITE or a netlink socket. +func buildAuditNetlinkMessage(msgType uint16, message string) ([]byte, error) { + msgBytes := []byte(message) + if len(msgBytes) > maxAuditMessageLength { + msgBytes = msgBytes[:maxAuditMessageLength] + } + msgLen := len(msgBytes) + + totalLen := syscall.NLMSG_HDRLEN + msgLen + alignedLen := (totalLen + syscall.NLMSG_ALIGNTO - 1) & ^(syscall.NLMSG_ALIGNTO - 1) + + nlh := syscall.NlMsghdr{ + Len: uint32(totalLen), + Type: msgType, + Flags: nlmFRequest, + Seq: 1, + Pid: uint32(os.Getpid()), + } + + buf := bytes.NewBuffer(make([]byte, 0, alignedLen)) + if err := binary.Write(buf, binary.NativeEndian, nlh); err != nil { + return nil, err + } + buf.Write(msgBytes) + + for buf.Len() < alignedLen { + buf.WriteByte(0) + } + + return buf.Bytes(), nil +} + +// sendAuditMessage sends a message to the audit subsystem using raw netlink. +// It logs errors but does not return them. +func sendAuditMessage(logf logger.Logf, msgType uint16, message string) { + if !hasAuditWriteCap() { + return + } + + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, netlinkAudit) + if err != nil { + logf("auditd: failed to create netlink socket: %v", err) + return + } + defer syscall.Close(fd) + + bindAddr := &syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Pid: uint32(os.Getpid()), + Groups: 0, + } + + if err := syscall.Bind(fd, bindAddr); err != nil { + logf("auditd: failed to bind netlink socket: %v", err) + return + } + + kernelAddr := &syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Pid: 0, + Groups: 0, + } + + msgBytes, err := buildAuditNetlinkMessage(msgType, message) + if err != nil { + logf("auditd: failed to build audit message: %v", err) + return + } + + if err := syscall.Sendto(fd, msgBytes, 0, kernelAddr); err != nil { + logf("auditd: failed to send audit message: %v", err) + return + } +} + +// logSSHLogin logs an SSH login event to auditd with whois information. +func logSSHLogin(logf logger.Logf, c *conn) { + if c == nil || c.info == nil || c.localUser == nil { + return + } + + exePath := c.srv.tailscaledPath + if exePath == "" { + exePath = "tailscaled" + } + + srcIP := c.info.src.Addr().String() + srcPort := c.info.src.Port() + dstIP := c.info.dst.Addr().String() + dstPort := c.info.dst.Port() + + tailscaleUser := c.info.uprof.LoginName + tailscaleUserID := c.info.uprof.ID + tailscaleDisplayName := c.info.uprof.DisplayName + nodeName := c.info.node.Name() + nodeID := c.info.node.ID() + + localUser := c.localUser.Username + localUID := c.localUser.Uid + localGID := c.localUser.Gid + + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + } + + // use principally the same format as ssh / PAM, which come from the audit userspace, i.e. + // https://github.com/linux-audit/audit-userspace/blob/b6f8c208435038df113a9795e3e202720aee6b70/lib/audit_logging.c#L515 + msg := fmt.Sprintf( + "op=login acct=%s uid=%s gid=%s "+ + "src=%s src_port=%d dst=%s dst_port=%d "+ + "hostname=%q exe=%q terminal=ssh res=success "+ + "ts_user=%q ts_user_id=%d ts_display_name=%q ts_node=%q ts_node_id=%d", + localUser, localUID, localGID, + srcIP, srcPort, dstIP, dstPort, + hostname, exePath, + tailscaleUser, tailscaleUserID, tailscaleDisplayName, nodeName, nodeID, + ) + + sendAuditMessage(logf, auditUserLogin, msg) + + logf("audit: SSH login: user=%s uid=%s from=%s ts_user=%s node=%s", + localUser, localUID, srcIP, tailscaleUser, nodeName) +} + +func init() { + hookSSHLoginSuccess.Set(logSSHLogin) +} diff --git a/ssh/tailssh/auditd_linux_test.go b/ssh/tailssh/auditd_linux_test.go new file mode 100644 index 0000000000000..93f5442918a98 --- /dev/null +++ b/ssh/tailssh/auditd_linux_test.go @@ -0,0 +1,180 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +package tailssh + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "os" + "os/exec" + "strings" + "syscall" + "testing" + "time" +) + +// maybeWithSudo returns a command with context that may be prefixed with sudo if not running as root. +func maybeWithSudo(ctx context.Context, name string, args ...string) *exec.Cmd { + if os.Geteuid() == 0 { + return exec.CommandContext(ctx, name, args...) + } + sudoArgs := append([]string{name}, args...) + return exec.CommandContext(ctx, "sudo", sudoArgs...) +} + +func TestBuildAuditNetlinkMessage(t *testing.T) { + testCases := []struct { + name string + msgType uint16 + message string + wantType uint16 + }{ + { + name: "simple-message", + msgType: auditUserLogin, + message: "op=login acct=test", + wantType: auditUserLogin, + }, + { + name: "message-with-quoted-fields", + msgType: auditUserLogin, + message: `op=login hostname="test-host" exe="/usr/bin/tailscaled" ts_user="user@example.com" ts_node="node.tail-scale.ts.net"`, + wantType: auditUserLogin, + }, + { + name: "message-with-special-chars", + msgType: auditUserLogin, + message: `op=login hostname="host with spaces" ts_user="user name@example.com" ts_display_name="User \"Quote\" Name"`, + wantType: auditUserLogin, + }, + { + name: "long-message-truncated", + msgType: auditUserLogin, + message: string(make([]byte, 2000)), + wantType: auditUserLogin, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + msg, err := buildAuditNetlinkMessage(tc.msgType, tc.message) + if err != nil { + t.Fatalf("buildAuditNetlinkMessage failed: %v", err) + } + + if len(msg) < syscall.NLMSG_HDRLEN { + t.Fatalf("message too short: got %d bytes, want at least %d", len(msg), syscall.NLMSG_HDRLEN) + } + + var nlh syscall.NlMsghdr + buf := bytes.NewReader(msg[:syscall.NLMSG_HDRLEN]) + if err := binary.Read(buf, binary.NativeEndian, &nlh); err != nil { + t.Fatalf("failed to parse netlink header: %v", err) + } + + if nlh.Type != tc.wantType { + t.Errorf("message type: got %d, want %d", nlh.Type, tc.wantType) + } + + if nlh.Flags != nlmFRequest { + t.Errorf("flags: got 0x%x, want 0x%x", nlh.Flags, nlmFRequest) + } + + if len(msg)%syscall.NLMSG_ALIGNTO != 0 { + t.Errorf("message not aligned: len=%d, alignment=%d", len(msg), syscall.NLMSG_ALIGNTO) + } + + payloadLen := int(nlh.Len) - syscall.NLMSG_HDRLEN + if payloadLen < 0 { + t.Fatalf("invalid payload length: %d", payloadLen) + } + + payload := msg[syscall.NLMSG_HDRLEN : syscall.NLMSG_HDRLEN+payloadLen] + + expectedMsg := tc.message + if len(expectedMsg) > maxAuditMessageLength { + expectedMsg = expectedMsg[:maxAuditMessageLength] + } + if string(payload) != expectedMsg { + t.Errorf("payload mismatch:\ngot: %q\nwant: %q", string(payload), expectedMsg) + } + + expectedLen := syscall.NLMSG_HDRLEN + len(payload) + if int(nlh.Len) != expectedLen { + t.Errorf("length field: got %d, want %d", nlh.Len, expectedLen) + } + }) + } +} + +func TestAuditIntegration(t *testing.T) { + if !hasAuditWriteCap() { + t.Skip("skipping: CAP_AUDIT_WRITE not in effective capability set") + } + + if _, err := exec.LookPath("journalctl"); err != nil { + t.Skip("skipping: journalctl not available") + } + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + checkCmd := maybeWithSudo(ctx, "journalctl", "--field", "_TRANSPORT") + var out bytes.Buffer + checkCmd.Stdout = &out + if err := checkCmd.Run(); err != nil { + t.Skipf("skipping: cannot query journalctl transports: %v", err) + } + if !strings.Contains(out.String(), "audit") { + t.Skip("skipping: journald not configured for audit messages, try: systemctl enable systemd-journald-audit.socket && systemctl restart systemd-journald") + } + + testID := fmt.Sprintf("tailscale-test-%d", time.Now().UnixNano()) + testMsg := fmt.Sprintf("op=test-audit test_id=%s res=success", testID) + + followCmd := maybeWithSudo(ctx, "journalctl", "-f", "_TRANSPORT=audit", "--no-pager") + + stdout, err := followCmd.StdoutPipe() + if err != nil { + t.Fatalf("failed to get stdout pipe: %v", err) + } + + if err := followCmd.Start(); err != nil { + t.Fatalf("failed to start journalctl: %v", err) + } + defer followCmd.Process.Kill() + + testLogf := func(format string, args ...any) { + t.Logf(format, args...) + } + sendAuditMessage(testLogf, auditUserLogin, testMsg) + + bs := bufio.NewScanner(stdout) + found := false + for bs.Scan() { + line := bs.Text() + if strings.Contains(line, testID) { + t.Logf("found audit log entry: %s", line) + found = true + break + } + } + + if err := bs.Err(); err != nil && ctx.Err() == nil { + t.Fatalf("error reading journalctl output: %v", err) + } + + if !found { + if ctx.Err() == context.DeadlineExceeded { + t.Errorf("timeout waiting for audit message with test_id=%s", testID) + } else { + t.Errorf("audit message with test_id=%s not found in journald audit log", testID) + } + } +} diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 7d12ab45f8552..91e1779bfd543 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -31,6 +31,7 @@ import ( gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/ipn/ipnlocal" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" @@ -56,6 +57,10 @@ var ( // authentication methods that may proceed), which results in the SSH // server immediately disconnecting the client. errTerminal = &gossh.PartialSuccessError{} + + // hookSSHLoginSuccess is called after successful SSH authentication. + // It is set by platform-specific code (e.g., auditd_linux.go). + hookSSHLoginSuccess feature.Hook[func(logf logger.Logf, c *conn)] ) const ( @@ -647,6 +652,11 @@ func (c *conn) handleSessionPostSSHAuth(s ssh.Session) { ss := c.newSSHSession(s) ss.logf("handling new SSH connection from %v (%v) to ssh-user %q", c.info.uprof.LoginName, c.info.src.Addr(), c.localUser.Username) ss.logf("access granted to %v as ssh-user %q", c.info.uprof.LoginName, c.localUser.Username) + + if f, ok := hookSSHLoginSuccess.GetOk(); ok { + f(c.srv.logf, c) + } + ss.run() } From 2e77b75e96208ccadf7cdf893640d1bf63ef5784 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 5 Jan 2026 16:58:59 -0800 Subject: [PATCH 0838/1093] ipn/ipnlocal: don't fail profile unmarshal due to attestation keys (#18335) Soft-fail on initial unmarshal and try again, ignoring the AttestationKey. This helps in cases where something about the attestation key storage (usually a TPM) is messed up. The old key will be lost, but at least the node can start again. Updates #18302 Updates #15830 Signed-off-by: Andrew Lytvynov --- ipn/ipnlocal/profiles.go | 48 ++++++++++++++++++++++++++++++----- ipn/ipnlocal/profiles_test.go | 38 +++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 7 deletions(-) diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 40a3c9887b2ff..7080e3c3edd50 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -5,10 +5,12 @@ package ipnlocal import ( "cmp" + "crypto" "crypto/rand" "encoding/json" "errors" "fmt" + "io" "runtime" "slices" "strings" @@ -59,6 +61,9 @@ type profileManager struct { // extHost is the bridge between [profileManager] and the registered [ipnext.Extension]s. // It may be nil in tests. A nil pointer is a valid, no-op host. extHost *ExtensionHost + + // Override for key.NewEmptyHardwareAttestationKey used for testing. + newEmptyHardwareAttestationKey func() (key.HardwareAttestationKey, error) } // SetExtensionHost sets the [ExtensionHost] for the [profileManager]. @@ -660,13 +665,23 @@ func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) // if supported by the platform, create an empty hardware attestation key to use when deserializing // to avoid type exceptions from json.Unmarshaling into an interface{}. - hw, _ := key.NewEmptyHardwareAttestationKey() + hw, _ := pm.newEmptyHardwareAttestationKey() savedPrefs.Persist = &persist.Persist{ AttestationKey: hw, } if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { - return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) + // Try loading again, this time ignoring the AttestationKey contents. + // If that succeeds, there's something wrong with the underlying + // attestation key mechanism (most likely the TPM changed), but we + // should at least proceed with client startup. + origErr := err + savedPrefs.Persist.AttestationKey = &noopAttestationKey{} + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { + return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %w", err) + } else { + pm.logf("failed to parse savedPrefs with attestation key (error: %v) but parsing without the attestation key succeeded; will proceed without using the old attestation key", origErr) + } } pm.logf("using backend prefs for %q: %v", k, savedPrefs.Pretty()) @@ -912,11 +927,12 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *healt metricProfileCount.Set(int64(len(knownProfiles))) pm := &profileManager{ - goos: goos, - store: store, - knownProfiles: knownProfiles, - logf: logf, - health: ht, + goos: goos, + store: store, + knownProfiles: knownProfiles, + logf: logf, + health: ht, + newEmptyHardwareAttestationKey: key.NewEmptyHardwareAttestationKey, } var initialProfile ipn.LoginProfileView @@ -985,3 +1001,21 @@ var ( metricMigrationError = clientmetric.NewCounter("profiles_migration_error") metricMigrationSuccess = clientmetric.NewCounter("profiles_migration_success") ) + +// noopAttestationKey is a key.HardwareAttestationKey that always successfully +// unmarshals as a zero key. +type noopAttestationKey struct{} + +func (n noopAttestationKey) Public() crypto.PublicKey { + panic("noopAttestationKey.Public should not be called; missing IsZero check somewhere?") +} + +func (n noopAttestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + panic("noopAttestationKey.Sign should not be called; missing IsZero check somewhere?") +} + +func (n noopAttestationKey) MarshalJSON() ([]byte, error) { return nil, nil } +func (n noopAttestationKey) UnmarshalJSON([]byte) error { return nil } +func (n noopAttestationKey) Close() error { return nil } +func (n noopAttestationKey) Clone() key.HardwareAttestationKey { return n } +func (n noopAttestationKey) IsZero() bool { return true } diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 95834284e91d5..6be7f0e53f59e 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -4,6 +4,7 @@ package ipnlocal import ( + "errors" "fmt" "os/user" "strconv" @@ -1147,3 +1148,40 @@ func TestProfileStateChangeCallback(t *testing.T) { }) } } + +func TestProfileBadAttestationKey(t *testing.T) { + store := new(mem.Store) + pm, err := newProfileManagerWithGOOS(store, t.Logf, health.NewTracker(eventbustest.NewBus(t)), "linux") + if err != nil { + t.Fatal(err) + } + fk := new(failingHardwareAttestationKey) + pm.newEmptyHardwareAttestationKey = func() (key.HardwareAttestationKey, error) { + return fk, nil + } + sk := ipn.StateKey(t.Name()) + if err := pm.store.WriteState(sk, []byte(`{"Config": {"AttestationKey": {}}}`)); err != nil { + t.Fatal(err) + } + prefs, err := pm.loadSavedPrefs(sk) + if err != nil { + t.Fatal(err) + } + ak := prefs.Persist().AsStruct().AttestationKey + if _, ok := ak.(noopAttestationKey); !ok { + t.Errorf("loaded attestation key of type %T, want noopAttestationKey", ak) + } + if !fk.unmarshalCalled { + t.Error("UnmarshalJSON was not called on failingHardwareAttestationKey") + } +} + +type failingHardwareAttestationKey struct { + noopAttestationKey + unmarshalCalled bool +} + +func (k *failingHardwareAttestationKey) UnmarshalJSON([]byte) error { + k.unmarshalCalled = true + return errors.New("failed to unmarshal attestation key!") +} From 68617bb82e3205d9b6eb0a90589e0f3c9033a12f Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 5 Jan 2026 17:05:00 -0800 Subject: [PATCH 0839/1093] cmd/tailscaled: disable state encryption / attestation by default (#18336) TPM-based features have been incredibly painful due to the heterogeneous devices in the wild, and many situations in which the TPM "changes" (is reset or replaced). All of this leads to a lot of customer issues. We hoped to iron out all the kinks and get all users to benefit from state encryption and hardware attestation without manually opting in, but the long tail of kinks is just too long. This change disables TPM-based features on Windows and Linux by default. Node state should get auto-decrypted on update, and old attestation keys will be removed. There's also tailscaled-on-macOS, but it won't have a TPM or Keychain bindings anyway. Updates #18302 Updates #15830 Signed-off-by: Andrew Lytvynov --- cmd/tailscaled/tailscaled.go | 18 ++++-------------- ipn/ipnlocal/local.go | 8 +++++++- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 5c8611c8e41d1..6abe0cb797bf5 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -910,13 +910,8 @@ func handleTPMFlags() { log.Fatalf("--hardware-attestation is not supported on this platform or in this build of tailscaled") } case !args.hardwareAttestation.set: - policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, feature.HardwareAttestationAvailable()) - if !policyHWAttestation { - break - } - if feature.TPMAvailable() { - args.hardwareAttestation.v = true - } + policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, false) + args.hardwareAttestation.v = policyHWAttestation } switch { @@ -927,13 +922,8 @@ func handleTPMFlags() { log.Fatal(err) } case !args.encryptState.set: - policyEncrypt, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) - if !policyEncrypt { - // Default disabled, no need to validate. - return - } - // Default enabled if available. - if err := canEncryptState(); err == nil { + policyEncrypt, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) + if err := canEncryptState(); policyEncrypt && err == nil { args.encryptState.v = true } } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ef89af5af5591..cebb961305a34 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2507,7 +2507,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { // neither UpdatePrefs or reconciliation should change Persist newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() - if buildfeatures.HasTPM { + if buildfeatures.HasTPM && b.HardwareAttested() { if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { newKey, err := genKey(newPrefs.Persist, logf) if err != nil { @@ -2519,6 +2519,12 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { } } } + // Remove any existing attestation key if HardwareAttested is false. + if !b.HardwareAttested() && newPrefs.Persist != nil && newPrefs.Persist.AttestationKey != nil && !newPrefs.Persist.AttestationKey.IsZero() { + newPrefs.Persist.AttestationKey = nil + prefsChanged = true + prefsChangedWhy = append(prefsChangedWhy, "removeAttestationKey") + } if prefsChanged { logf("updated prefs: %v, reason: %v", newPrefs.Pretty(), prefsChangedWhy) From 8ea90ba80d640c7197fa80097bd247ea78108a66 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 6 Jan 2026 12:29:46 +0100 Subject: [PATCH 0840/1093] cmd/tailscaled,ipn/{ipnlocal,store/kubestore}: don't create attestation keys for stores that are not bound to a node (#18322) Ensure that hardware attestation keys are not added to tailscaled state stores that are Kubernetes Secrets or AWS SSM as those Tailscale devices should be able to be recreated on different nodes, for example, when moving Pods between nodes. Updates tailscale/tailscale#18302 Signed-off-by: Irbe Krumina --- cmd/tailscaled/tailscaled.go | 49 +++++++++++++-- cmd/tailscaled/tailscaled_test.go | 53 ++++++++++++++++ ipn/store/kubestore/store_kube.go | 80 ++++++++++++++++++++---- ipn/store/kubestore/store_kube_test.go | 84 ++++++++++++++++++++++++++ 4 files changed, 251 insertions(+), 15 deletions(-) diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 6abe0cb797bf5..7c19ebb422b87 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -209,7 +209,10 @@ func main() { flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") if buildfeatures.HasTPM { - flag.Var(&args.hardwareAttestation, "hardware-attestation", "use hardware-backed keys to bind node identity to this device when supported by the OS and hardware. Uses TPM 2.0 on Linux and Windows; SecureEnclave on macOS and iOS; and Keystore on Android") + flag.Var(&args.hardwareAttestation, "hardware-attestation", `use hardware-backed keys to bind node identity to this device when supported +by the OS and hardware. Uses TPM 2.0 on Linux and Windows; SecureEnclave on +macOS and iOS; and Keystore on Android. Only supported for Tailscale nodes that +store state on filesystem.`) } if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { f() @@ -905,13 +908,18 @@ func applyIntegrationTestEnvKnob() { func handleTPMFlags() { switch { case args.hardwareAttestation.v: - if _, err := key.NewEmptyHardwareAttestationKey(); err == key.ErrUnsupported { + if err := canUseHardwareAttestation(); err != nil { log.SetFlags(0) - log.Fatalf("--hardware-attestation is not supported on this platform or in this build of tailscaled") + log.Fatal(err) } case !args.hardwareAttestation.set: policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, false) - args.hardwareAttestation.v = policyHWAttestation + if err := canUseHardwareAttestation(); err != nil { + log.Printf("[unexpected] policy requires hardware attestation, but device does not support it: %v", err) + args.hardwareAttestation.v = false + } else { + args.hardwareAttestation.v = policyHWAttestation + } } switch { @@ -929,6 +937,39 @@ func handleTPMFlags() { } } +// canUseHardwareAttestation returns an error if hardware attestation can't be +// enabled, either due to availability or compatibility with other settings. +func canUseHardwareAttestation() error { + if _, err := key.NewEmptyHardwareAttestationKey(); err == key.ErrUnsupported { + return errors.New("--hardware-attestation is not supported on this platform or in this build of tailscaled") + } + // Hardware attestation keys are TPM-bound and cannot be migrated between + // machines. Disable when using portable state stores like kube: or arn: + // where state may be loaded on a different machine. + if args.statepath != "" && isPortableStore(args.statepath) { + return errors.New("--hardware-attestation cannot be used with portable state stores (kube:, arn:) because TPM-bound keys cannot be migrated between machines") + } + return nil +} + +// isPortableStore reports whether the given state path refers to a portable +// state store where state may be loaded on different machines. +// All stores apart from file store and TPM store are portable. +func isPortableStore(path string) bool { + if store.HasKnownProviderPrefix(path) && !strings.HasPrefix(path, store.TPMPrefix) { + return true + } + // In most cases Kubernetes Secret and AWS SSM stores would have been caught + // by the earlier check - but that check relies on those stores having been + // registered. This additional check is here to ensure that if we ever + // produce a faulty build that failed to register some store, users who + // upgraded to that don't get hardware keys generated. + if strings.HasPrefix(path, "kube:") || strings.HasPrefix(path, "arn:") { + return true + } + return false +} + // canEncryptState returns an error if state encryption can't be enabled, // either due to availability or compatibility with other settings. func canEncryptState() error { diff --git a/cmd/tailscaled/tailscaled_test.go b/cmd/tailscaled/tailscaled_test.go index 1188ad35f3b5b..36327cccc7bc7 100644 --- a/cmd/tailscaled/tailscaled_test.go +++ b/cmd/tailscaled/tailscaled_test.go @@ -88,3 +88,56 @@ func TestStateStoreError(t *testing.T) { } }) } + +func TestIsPortableStore(t *testing.T) { + tests := []struct { + name string + path string + want bool + }{ + { + name: "kube_store", + path: "kube:my-secret", + want: true, + }, + { + name: "aws_arn_store", + path: "arn:aws:ssm:us-east-1:123456789012:parameter/tailscale/state", + want: true, + }, + { + name: "tpm_store", + path: "tpmseal:/var/lib/tailscale/tailscaled.state", + want: false, + }, + { + name: "local_file_store", + path: "/var/lib/tailscale/tailscaled.state", + want: false, + }, + { + name: "empty_path", + path: "", + want: false, + }, + { + name: "mem_store", + path: "mem:", + want: true, + }, + { + name: "windows_file_store", + path: `C:\ProgramData\Tailscale\server-state.conf`, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isPortableStore(tt.path) + if got != tt.want { + t.Errorf("isPortableStore(%q) = %v, want %v", tt.path, got, tt.want) + } + }) + } +} diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index f48237c057142..ba45409ed7903 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -6,8 +6,8 @@ package kubestore import ( "context" + "encoding/json" "fmt" - "log" "net" "net/http" "os" @@ -57,6 +57,8 @@ type Store struct { certShareMode string // 'ro', 'rw', or empty podName string + logf logger.Logf + // memory holds the latest tailscale state. Writes write state to a kube // Secret and memory, Reads read from memory. memory mem.Store @@ -96,6 +98,7 @@ func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*S canPatch: canPatch, secretName: secretName, podName: os.Getenv("POD_NAME"), + logf: logf, } if envknob.IsCertShareReadWriteMode() { s.certShareMode = "rw" @@ -113,11 +116,11 @@ func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*S if err := s.loadCerts(context.Background(), sel); err != nil { // We will attempt to again retrieve the certs from Secrets when a request for an HTTPS endpoint // is received. - log.Printf("[unexpected] error loading TLS certs: %v", err) + s.logf("[unexpected] error loading TLS certs: %v", err) } } if s.certShareMode == "ro" { - go s.runCertReload(context.Background(), logf) + go s.runCertReload(context.Background()) } return s, nil } @@ -147,7 +150,7 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { // of a Tailscale Kubernetes node's state Secret. func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) { if s.certShareMode == "ro" { - log.Printf("[unexpected] TLS cert and key write in read-only mode") + s.logf("[unexpected] TLS cert and key write in read-only mode") } if err := dnsname.ValidHostname(domain); err != nil { return fmt.Errorf("invalid domain name %q: %w", domain, err) @@ -258,11 +261,11 @@ func (s *Store) updateSecret(data map[string][]byte, secretName string) (err err defer func() { if err != nil { if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { - log.Printf("kubestore: error creating tailscaled state update Event: %v", err) + s.logf("kubestore: error creating tailscaled state update Event: %v", err) } } else { if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateUpdated, "Successfully updated tailscaled state Secret"); err != nil { - log.Printf("kubestore: error creating tailscaled state Event: %v", err) + s.logf("kubestore: error creating tailscaled state Event: %v", err) } } cancel() @@ -342,17 +345,72 @@ func (s *Store) loadState() (err error) { return ipn.ErrStateNotExist } if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateLoadFailed, err.Error()); err != nil { - log.Printf("kubestore: error creating Event: %v", err) + s.logf("kubestore: error creating Event: %v", err) } return err } if err := s.client.Event(ctx, eventTypeNormal, reasonTailscaleStateLoaded, "Successfully loaded tailscaled state from Secret"); err != nil { - log.Printf("kubestore: error creating Event: %v", err) + s.logf("kubestore: error creating Event: %v", err) + } + data, err := s.maybeStripAttestationKeyFromProfile(secret.Data) + if err != nil { + return fmt.Errorf("error attempting to strip attestation data from state Secret: %w", err) } - s.memory.LoadFromMap(secret.Data) + s.memory.LoadFromMap(data) return nil } +// maybeStripAttestationKeyFromProfile removes the hardware attestation key +// field from serialized Tailscale profile. This is done to recover from a bug +// introduced in 1.92, where node-bound hardware attestation keys were added to +// Tailscale states stored in Kubernetes Secrets. +// See https://github.com/tailscale/tailscale/issues/18302 +// TODO(irbekrm): it would be good if we could somehow determine when we no +// longer need to run this check. +func (s *Store) maybeStripAttestationKeyFromProfile(data map[string][]byte) (map[string][]byte, error) { + prefsKey := extractPrefsKey(data) + prefsBytes, ok := data[prefsKey] + if !ok { + return data, nil + } + var prefs map[string]any + if err := json.Unmarshal(prefsBytes, &prefs); err != nil { + s.logf("[unexpected]: kube store: failed to unmarshal prefs data") + // don't error as in most cases the state won't have the attestation key + return data, nil + } + + config, ok := prefs["Config"].(map[string]any) + if !ok { + return data, nil + } + if _, hasKey := config["AttestationKey"]; !hasKey { + return data, nil + } + s.logf("kube store: found redundant attestation key, deleting") + delete(config, "AttestationKey") + prefsBytes, err := json.Marshal(prefs) + if err != nil { + return nil, fmt.Errorf("[unexpected] kube store: failed to marshal profile after removing attestation key: %v", err) + } + data[prefsKey] = prefsBytes + if err := s.updateSecret(map[string][]byte{prefsKey: prefsBytes}, s.secretName); err != nil { + // don't error out - this might have been a temporary kube API server + // connection issue. The key will be removed from the in-memory cache + // and we'll retry updating the Secret on the next restart. + s.logf("kube store: error updating Secret after stripping AttestationKey: %v", err) + } + return data, nil +} + +const currentProfileKey = "_current-profile" + +// extractPrefs returns the key at which Tailscale prefs are stored in the +// provided Secret data. +func extractPrefsKey(data map[string][]byte) string { + return string(data[currentProfileKey]) +} + // runCertReload relists and reloads all TLS certs for endpoints shared by this // node from Secrets other than the state Secret to ensure that renewed certs get eventually loaded. // It is not critical to reload a cert immediately after @@ -361,7 +419,7 @@ func (s *Store) loadState() (err error) { // Note that if shared certs are not found in memory on an HTTPS request, we // do a Secret lookup, so this mechanism does not need to ensure that newly // added Ingresses' certs get loaded. -func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) { +func (s *Store) runCertReload(ctx context.Context) { ticker := time.NewTicker(time.Hour * 24) defer ticker.Stop() for { @@ -371,7 +429,7 @@ func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) { case <-ticker.C: sel := s.certSecretSelector() if err := s.loadCerts(ctx, sel); err != nil { - logf("[unexpected] error reloading TLS certs: %v", err) + s.logf("[unexpected] error reloading TLS certs: %v", err) } } } diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 8c8e5e87075f0..44a4bbb7fc14d 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -20,6 +20,90 @@ import ( "tailscale.com/kube/kubetypes" ) +func TestKubernetesPodMigrationWithTPMAttestationKey(t *testing.T) { + stateWithAttestationKey := `{ + "Config": { + "NodeID": "nSTABLE123456", + "AttestationKey": { + "tpmPrivate": "c2Vuc2l0aXZlLXRwbS1kYXRhLXRoYXQtb25seS13b3Jrcy1vbi1vcmlnaW5hbC1ub2Rl", + "tpmPublic": "cHVibGljLXRwbS1kYXRhLWZvci1hdHRlc3RhdGlvbi1rZXk=" + } + } + }` + + secretData := map[string][]byte{ + "profile-abc123": []byte(stateWithAttestationKey), + "_current-profile": []byte("profile-abc123"), + } + + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + return &kubeapi.Secret{Data: secretData}, nil + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return true, true, nil + }, + JSONPatchResourceImpl: func(ctx context.Context, name, resourceType string, patches []kubeclient.JSONPatch) error { + for _, p := range patches { + if p.Op == "add" && p.Path == "/data" { + secretData = p.Value.(map[string][]byte) + } + } + return nil + }, + } + + store := &Store{ + client: client, + canPatch: true, + secretName: "ts-state", + memory: mem.Store{}, + logf: t.Logf, + } + + if err := store.loadState(); err != nil { + t.Fatalf("loadState failed: %v", err) + } + + // Verify we can read the state from the store + stateBytes, err := store.ReadState("profile-abc123") + if err != nil { + t.Fatalf("ReadState failed: %v", err) + } + + // The state should be readable as JSON + var state map[string]json.RawMessage + if err := json.Unmarshal(stateBytes, &state); err != nil { + t.Fatalf("failed to unmarshal state: %v", err) + } + + // Verify the Config field exists + configRaw, ok := state["Config"] + if !ok { + t.Fatal("Config field not found in state") + } + + // Parse the Config to verify fields are preserved + var config map[string]json.RawMessage + if err := json.Unmarshal(configRaw, &config); err != nil { + t.Fatalf("failed to unmarshal Config: %v", err) + } + + // The AttestationKey should be stripped by the kubestore + if _, hasAttestation := config["AttestationKey"]; hasAttestation { + t.Error("AttestationKey should be stripped from state loaded by kubestore") + } + + // Verify other fields are preserved + var nodeID string + if err := json.Unmarshal(config["NodeID"], &nodeID); err != nil { + t.Fatalf("failed to unmarshal NodeID: %v", err) + } + if nodeID != "nSTABLE123456" { + t.Errorf("NodeID mismatch: got %q, want %q", nodeID, "nSTABLE123456") + } +} + func TestWriteState(t *testing.T) { tests := []struct { name string From 7de1b0b33082cc28eb26ab7dd3703d2c018f4c75 Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Tue, 6 Jan 2026 09:10:19 -0700 Subject: [PATCH 0841/1093] cmd/tailscale/cli: remove Services-specific subcommands from funnel (#18225) The funnel command is sort of an alias for the serve command. This means that the subcommands added to serve to support Services appear as subcommands for funnel as well, despite having no meaning for funnel. This change removes all such Services-specific subcommands from funnel. Fixes tailscale/corp#34167 Signed-off-by: Harry Harpham --- cmd/tailscale/cli/serve_v2.go | 155 ++++++++++++++++++---------------- 1 file changed, 81 insertions(+), 74 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index d474696b3bf86..6e040819528ba 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -243,87 +243,94 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { fs.UintVar(&e.http, "http", 0, "Expose an HTTP server at the specified port") fs.Var(&acceptAppCapsFlag{Value: &e.acceptAppCaps}, "accept-app-caps", "App capabilities to forward to the server (specify multiple capabilities with a comma-separated list)") fs.Var(&serviceNameFlag{Value: &e.service}, "service", "Serve for a service with distinct virtual IP instead on node itself.") + fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") } fs.UintVar(&e.tcp, "tcp", 0, "Expose a TCP forwarder to forward raw TCP packets at the specified port") fs.UintVar(&e.tlsTerminatedTCP, "tls-terminated-tcp", 0, "Expose a TCP forwarder to forward TLS-terminated TCP packets at the specified port") fs.UintVar(&e.proxyProtocol, "proxy-protocol", 0, "PROXY protocol version (1 or 2) for TCP forwarding") fs.BoolVar(&e.yes, "yes", false, "Update without interactive prompts (default false)") - fs.BoolVar(&e.tun, "tun", false, "Forward all traffic to the local machine (default false), only supported for services. Refer to docs for more information.") }), UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "status", - ShortUsage: "tailscale " + info.Name + " status [--json]", - Exec: e.runServeStatus, - ShortHelp: "View current " + info.Name + " configuration", - FlagSet: e.newFlags("serve-status", func(fs *flag.FlagSet) { - fs.BoolVar(&e.json, "json", false, "output JSON") - }), - }, - { - Name: "reset", - ShortUsage: "tailscale " + info.Name + " reset", - ShortHelp: "Reset current " + info.Name + " config", - Exec: e.runServeReset, - FlagSet: e.newFlags("serve-reset", nil), - }, - { - Name: "drain", - ShortUsage: fmt.Sprintf("tailscale %s drain ", info.Name), - ShortHelp: "Drain a service from the current node", - LongHelp: "Make the current node no longer accept new connections for the specified service.\n" + - "Existing connections will continue to work until they are closed, but no new connections will be accepted.\n" + - "Use this command to gracefully remove a service from the current node without disrupting existing connections.\n" + - " should be a service name (e.g., svc:my-service).", - Exec: e.runServeDrain, - }, - { - Name: "clear", - ShortUsage: fmt.Sprintf("tailscale %s clear ", info.Name), - ShortHelp: "Remove all config for a service", - LongHelp: "Remove all handlers configured for the specified service.", - Exec: e.runServeClear, - }, - { - Name: "advertise", - ShortUsage: fmt.Sprintf("tailscale %s advertise ", info.Name), - ShortHelp: "Advertise this node as a service proxy to the tailnet", - LongHelp: "Advertise this node as a service proxy to the tailnet. This command is used\n" + - "to make the current node be considered as a service host for a service. This is\n" + - "useful to bring a service back after it has been drained. (i.e. after running \n" + - "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", - Exec: e.runServeAdvertise, - }, - { - Name: "get-config", - ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), - ShortHelp: "Get service configuration to save to a file", - LongHelp: "Get the configuration for services that this node is currently hosting in a\n" + - "format that can later be provided to set-config. This can be used to declaratively set\n" + - "configuration for a service host.", - Exec: e.runServeGetConfig, - FlagSet: e.newFlags("serve-get-config", func(fs *flag.FlagSet) { - fs.BoolVar(&e.allServices, "all", false, "read config from all services") - fs.Var(&serviceNameFlag{Value: &e.service}, "service", "read config from a particular service") - }), - }, - { - Name: "set-config", - ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), - ShortHelp: "Define service configuration from a file", - LongHelp: "Read the provided configuration file and use it to declaratively set the configuration\n" + - "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + - "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + - "all services are overwritten.\n\n" + - "For information on the file format, see tailscale.com/kb/1589/tailscale-services-configuration-file", - Exec: e.runServeSetConfig, - FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { - fs.BoolVar(&e.allServices, "all", false, "apply config to all services") - fs.Var(&serviceNameFlag{Value: &e.service}, "service", "apply config to a particular service") - }), - }, - }, + Subcommands: func() []*ffcli.Command { + subcmds := []*ffcli.Command{ + { + Name: "status", + ShortUsage: "tailscale " + info.Name + " status [--json]", + Exec: e.runServeStatus, + ShortHelp: "View current " + info.Name + " configuration", + FlagSet: e.newFlags("serve-status", func(fs *flag.FlagSet) { + fs.BoolVar(&e.json, "json", false, "output JSON") + }), + }, + { + Name: "reset", + ShortUsage: "tailscale " + info.Name + " reset", + ShortHelp: "Reset current " + info.Name + " config", + Exec: e.runServeReset, + FlagSet: e.newFlags("serve-reset", nil), + }, + } + if subcmd == serve { + subcmds = append(subcmds, []*ffcli.Command{ + { + Name: "drain", + ShortUsage: fmt.Sprintf("tailscale %s drain ", info.Name), + ShortHelp: "Drain a service from the current node", + LongHelp: "Make the current node no longer accept new connections for the specified service.\n" + + "Existing connections will continue to work until they are closed, but no new connections will be accepted.\n" + + "Use this command to gracefully remove a service from the current node without disrupting existing connections.\n" + + " should be a service name (e.g., svc:my-service).", + Exec: e.runServeDrain, + }, + { + Name: "clear", + ShortUsage: fmt.Sprintf("tailscale %s clear ", info.Name), + ShortHelp: "Remove all config for a service", + LongHelp: "Remove all handlers configured for the specified service.", + Exec: e.runServeClear, + }, + { + Name: "advertise", + ShortUsage: fmt.Sprintf("tailscale %s advertise ", info.Name), + ShortHelp: "Advertise this node as a service proxy to the tailnet", + LongHelp: "Advertise this node as a service proxy to the tailnet. This command is used\n" + + "to make the current node be considered as a service host for a service. This is\n" + + "useful to bring a service back after it has been drained. (i.e. after running \n" + + "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", + Exec: e.runServeAdvertise, + }, + { + Name: "get-config", + ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), + ShortHelp: "Get service configuration to save to a file", + LongHelp: "Get the configuration for services that this node is currently hosting in a\n" + + "format that can later be provided to set-config. This can be used to declaratively set\n" + + "configuration for a service host.", + Exec: e.runServeGetConfig, + FlagSet: e.newFlags("serve-get-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "read config from all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "read config from a particular service") + }), + }, + { + Name: "set-config", + ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), + ShortHelp: "Define service configuration from a file", + LongHelp: "Read the provided configuration file and use it to declaratively set the configuration\n" + + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + + "all services are overwritten.\n\n" + + "For information on the file format, see tailscale.com/kb/1589/tailscale-services-configuration-file", + Exec: e.runServeSetConfig, + FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "apply config to all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "apply config to a particular service") + }), + }, + }...) + } + return subcmds + }(), } } From 9a6282b515b2bf438885025fac0a95bfebf2ce1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 6 Jan 2026 15:32:48 +0000 Subject: [PATCH 0842/1093] .github: Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/checklocks.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker-base.yml | 2 +- .github/workflows/docker-file-build.yml | 2 +- .github/workflows/flakehub-publish-tagged.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/govulncheck.yml | 2 +- .github/workflows/installer.yml | 2 +- .github/workflows/kubemanifests.yaml | 2 +- .github/workflows/natlab-integrationtest.yml | 2 +- .github/workflows/pin-github-actions.yml | 2 +- .../workflows/request-dataplane-review.yml | 2 +- .github/workflows/ssh-integrationtest.yml | 2 +- .github/workflows/test.yml | 36 +++++++++---------- .github/workflows/update-flake.yml | 2 +- .../workflows/update-webclient-prebuilt.yml | 2 +- .github/workflows/vet.yml | 2 +- .github/workflows/webclient.yml | 2 +- 18 files changed, 35 insertions(+), 35 deletions(-) diff --git a/.github/workflows/checklocks.yml b/.github/workflows/checklocks.yml index 5957e69258db5..ee950b4fc9212 100644 --- a/.github/workflows/checklocks.yml +++ b/.github/workflows/checklocks.yml @@ -18,7 +18,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Build checklocks run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2f5ae7d923eb5..e66d6454a9847 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 # Install a more recent Go that understands modern go.mod content. - name: Install Go diff --git a/.github/workflows/docker-base.yml b/.github/workflows/docker-base.yml index 3c5931f2d8bcd..a47669f6ade8a 100644 --- a/.github/workflows/docker-base.yml +++ b/.github/workflows/docker-base.yml @@ -9,7 +9,7 @@ jobs: build-and-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: "build and test" run: | set -e diff --git a/.github/workflows/docker-file-build.yml b/.github/workflows/docker-file-build.yml index c61680a343e72..9a56fd05758a9 100644 --- a/.github/workflows/docker-file-build.yml +++ b/.github/workflows/docker-file-build.yml @@ -8,6 +8,6 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: "Build Docker image" run: docker build . diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 50bb8b9f74de5..8b3f44338026a 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -17,7 +17,7 @@ jobs: id-token: "write" contents: "read" steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 098b6f387c239..0b9fb6a4151e2 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -27,7 +27,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index c7560983abeb6..c99cb11d3eff7 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code into the Go module directory - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Install govulncheck run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index 3a9ba194d6a61..d7db30782470b 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -99,7 +99,7 @@ jobs: contains(matrix.image, 'parrotsec') || contains(matrix.image, 'kalilinux') - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: run installer run: scripts/installer.sh env: diff --git a/.github/workflows/kubemanifests.yaml b/.github/workflows/kubemanifests.yaml index 4cffea02fce6b..6812b69d6e702 100644 --- a/.github/workflows/kubemanifests.yaml +++ b/.github/workflows/kubemanifests.yaml @@ -17,7 +17,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Build and lint Helm chart run: | eval `./tool/go run ./cmd/mkversion` diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index 99d58717b7beb..3e87ba4345180 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Install qemu run: | sudo rm /var/lib/man-db/auto-update diff --git a/.github/workflows/pin-github-actions.yml b/.github/workflows/pin-github-actions.yml index cb66739931bf1..7c1816d134cd6 100644 --- a/.github/workflows/pin-github-actions.yml +++ b/.github/workflows/pin-github-actions.yml @@ -22,7 +22,7 @@ jobs: name: pin-github-actions runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: pin run: make pin-github-actions - name: check for changed workflow files diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 58f6d3d0b5979..7ca3b98022ce7 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Get access token uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 id: generate-token diff --git a/.github/workflows/ssh-integrationtest.yml b/.github/workflows/ssh-integrationtest.yml index 463f4bdd4b24f..342b8e9362c30 100644 --- a/.github/workflows/ssh-integrationtest.yml +++ b/.github/workflows/ssh-integrationtest.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Run SSH integration tests run: | make sshintegrationtest \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 27862567f84da..e99e75b22f8a6 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -48,7 +48,7 @@ jobs: cache-key: ${{ steps.hash.outputs.key }} steps: - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Compute cache key from go.{mod,sum} @@ -88,7 +88,7 @@ jobs: - shard: '4/4' steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -126,7 +126,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -239,7 +239,7 @@ jobs: shard: "2/2" steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: ${{ github.workspace }}/src @@ -292,7 +292,7 @@ jobs: name: Windows (win-tool-go) steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: test-tool-go @@ -307,7 +307,7 @@ jobs: options: --privileged steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -330,7 +330,7 @@ jobs: if: github.repository == 'tailscale/tailscale' steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -386,7 +386,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -447,7 +447,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -485,7 +485,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -541,7 +541,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src # Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed @@ -566,7 +566,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -624,7 +624,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache @@ -709,7 +709,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Set GOMODCACHE env @@ -729,7 +729,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -753,7 +753,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -775,7 +775,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache @@ -829,7 +829,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src - name: Restore Go module cache diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 1968c68302d37..cef33dc920372 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Run update-flakes run: ./update-flake.sh diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 5565b8c86c4bf..4a676de59bc35 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Run go get run: | diff --git a/.github/workflows/vet.yml b/.github/workflows/vet.yml index 7eff6b45fd37b..b7862889daa7f 100644 --- a/.github/workflows/vet.yml +++ b/.github/workflows/vet.yml @@ -25,7 +25,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: src diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index bcec1f52d3732..4fc19901d0ef6 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Install deps run: ./tool/yarn --cwd client/web - name: Run lint From a662c541abf9b41a2b6f551beec3ffbdd5d7c467 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 6 Jan 2026 11:49:43 -0700 Subject: [PATCH 0843/1093] .github/workflows: bump create-pull-request to 8.0.0 Bump peter-evans/create-pull-request to 8.0.0 to ensure compatibility with actions/checkout 6.x. Updates #cleanup Signed-off-by: Mario Minardi --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index cef33dc920372..69c954384e9bc 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -35,7 +35,7 @@ jobs: private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 + uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 #v8.0.0 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 4a676de59bc35..c302e4f2091ca 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -32,7 +32,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 + uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 #v8.0.0 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 4c3cf8bb110e3d747c7c84b23d88b5d11d204b3b Mon Sep 17 00:00:00 2001 From: Alex Valiushko Date: Tue, 6 Jan 2026 21:58:52 -0800 Subject: [PATCH 0844/1093] wgengine/magicsock: extract IMDS utilities into a standalone package (#18334) Moves magicksock.cloudInfo into util/cloudinfo with minimal changes. Updates #17796 Change-Id: I83f32473b9180074d5cdbf00fa31e5b3f579f189 Signed-off-by: Alex Valiushko --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware-min.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + tsnet/depaware.txt | 1 + .../magicsock => util/cloudinfo}/cloudinfo.go | 19 +++++++++----- util/cloudinfo/cloudinfo_nocloud.go | 26 +++++++++++++++++++ .../cloudinfo}/cloudinfo_test.go | 6 ++--- wgengine/magicsock/cloudinfo_nocloud.go | 23 ---------------- wgengine/magicsock/magicsock.go | 5 ++-- 11 files changed, 50 insertions(+), 35 deletions(-) rename {wgengine/magicsock => util/cloudinfo}/cloudinfo.go (89%) create mode 100644 util/cloudinfo/cloudinfo_nocloud.go rename {wgengine/magicsock => util/cloudinfo}/cloudinfo_test.go (97%) delete mode 100644 wgengine/magicsock/cloudinfo_nocloud.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 959a8ca728f90..ec842651ae7a1 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -856,6 +856,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 942c962280fbf..a2d20dedaf243 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -149,6 +149,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index acc4241033411..9b761b76d7aa0 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -176,6 +176,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ tailscale.com/util/dnsname from tailscale.com/appc+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 5a5f0a1b31136..13c1f5daf574b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -422,6 +422,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 045986aedc4e5..aa5d633468a49 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -260,6 +260,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9ef42400f259a..7702de69d9725 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -255,6 +255,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting diff --git a/wgengine/magicsock/cloudinfo.go b/util/cloudinfo/cloudinfo.go similarity index 89% rename from wgengine/magicsock/cloudinfo.go rename to util/cloudinfo/cloudinfo.go index 0db56b3f6c514..2c4a32c031d2c 100644 --- a/wgengine/magicsock/cloudinfo.go +++ b/util/cloudinfo/cloudinfo.go @@ -3,7 +3,8 @@ //go:build !(ios || android || js) -package magicsock +// Package cloudinfo provides cloud metadata utilities. +package cloudinfo import ( "context" @@ -24,7 +25,8 @@ import ( const maxCloudInfoWait = 2 * time.Second -type cloudInfo struct { +// CloudInfo holds state used in querying instance metadata (IMDS) endpoints. +type CloudInfo struct { client http.Client logf logger.Logf @@ -34,7 +36,8 @@ type cloudInfo struct { endpoint string } -func newCloudInfo(logf logger.Logf) *cloudInfo { +// New constructs a new [*CloudInfo] that will log to the provided logger instance. +func New(logf logger.Logf) *CloudInfo { if !buildfeatures.HasCloud { return nil } @@ -45,7 +48,7 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { }).Dial, } - return &cloudInfo{ + return &CloudInfo{ client: http.Client{Transport: tr}, logf: logf, cloud: cloudenv.Get(), @@ -56,7 +59,9 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { // GetPublicIPs returns any public IPs attached to the current cloud instance, // if the tailscaled process is running in a known cloud and there are any such // IPs present. -func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { +// +// Currently supports only AWS. +func (ci *CloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { if !buildfeatures.HasCloud { return nil, nil } @@ -73,7 +78,7 @@ func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { // getAWSMetadata makes a request to the AWS metadata service at the given // path, authenticating with the provided IMDSv2 token. The returned metadata // is split by newline and returned as a slice. -func (ci *cloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([]string, error) { +func (ci *CloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([]string, error) { req, err := http.NewRequestWithContext(ctx, "GET", ci.endpoint+path, nil) if err != nil { return nil, fmt.Errorf("creating request to %q: %w", path, err) @@ -105,7 +110,7 @@ func (ci *cloudInfo) getAWSMetadata(ctx context.Context, token, path string) ([] } // getAWS returns all public IPv4 and IPv6 addresses present in the AWS instance metadata. -func (ci *cloudInfo) getAWS(ctx context.Context) ([]netip.Addr, error) { +func (ci *CloudInfo) getAWS(ctx context.Context) ([]netip.Addr, error) { ctx, cancel := context.WithTimeout(ctx, maxCloudInfoWait) defer cancel() diff --git a/util/cloudinfo/cloudinfo_nocloud.go b/util/cloudinfo/cloudinfo_nocloud.go new file mode 100644 index 0000000000000..6a525cd2a5725 --- /dev/null +++ b/util/cloudinfo/cloudinfo_nocloud.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios || android || js + +package cloudinfo + +import ( + "context" + "net/netip" + + "tailscale.com/types/logger" +) + +// CloudInfo is not available in mobile and JS targets. +type CloudInfo struct{} + +// New construct a no-op CloudInfo stub. +func New(_ logger.Logf) *CloudInfo { + return &CloudInfo{} +} + +// GetPublicIPs always returns nil slice and error. +func (ci *CloudInfo) GetPublicIPs(_ context.Context) ([]netip.Addr, error) { + return nil, nil +} diff --git a/wgengine/magicsock/cloudinfo_test.go b/util/cloudinfo/cloudinfo_test.go similarity index 97% rename from wgengine/magicsock/cloudinfo_test.go rename to util/cloudinfo/cloudinfo_test.go index 15191aeefea36..38817f47a6e56 100644 --- a/wgengine/magicsock/cloudinfo_test.go +++ b/util/cloudinfo/cloudinfo_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package magicsock +package cloudinfo import ( "context" @@ -44,7 +44,7 @@ func TestCloudInfo_AWS(t *testing.T) { srv := httptest.NewServer(fake) defer srv.Close() - ci := newCloudInfo(t.Logf) + ci := New(t.Logf) ci.cloud = cloudenv.AWS ci.endpoint = srv.URL @@ -76,7 +76,7 @@ func TestCloudInfo_AWSNotPublic(t *testing.T) { srv := httptest.NewServer(returns404) defer srv.Close() - ci := newCloudInfo(t.Logf) + ci := New(t.Logf) ci.cloud = cloudenv.AWS ci.endpoint = srv.URL diff --git a/wgengine/magicsock/cloudinfo_nocloud.go b/wgengine/magicsock/cloudinfo_nocloud.go deleted file mode 100644 index b4414d318c7ea..0000000000000 --- a/wgengine/magicsock/cloudinfo_nocloud.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ios || android || js - -package magicsock - -import ( - "context" - "net/netip" - - "tailscale.com/types/logger" -) - -type cloudInfo struct{} - -func newCloudInfo(_ logger.Logf) *cloudInfo { - return &cloudInfo{} -} - -func (ci *cloudInfo) GetPublicIPs(_ context.Context) ([]netip.Addr, error) { - return nil, nil -} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b8a5f7da2b72f..a19032fb27cb8 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -60,6 +60,7 @@ import ( "tailscale.com/types/nettype" "tailscale.com/types/views" "tailscale.com/util/clientmetric" + "tailscale.com/util/cloudinfo" "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/ringlog" @@ -213,7 +214,7 @@ type Conn struct { bind *connBind // cloudInfo is used to query cloud metadata services. - cloudInfo *cloudInfo + cloudInfo *cloudinfo.CloudInfo // ============================================================ // Fields that must be accessed via atomic load/stores. @@ -597,7 +598,7 @@ func newConn(logf logger.Logf) *Conn { peerLastDerp: make(map[key.NodePublic]int), peerMap: newPeerMap(), discoInfo: make(map[key.DiscoPublic]*discoInfo), - cloudInfo: newCloudInfo(logf), + cloudInfo: cloudinfo.New(logf), } c.discoAtomic.Set(discoPrivate) c.bind = &connBind{Conn: c, closed: true} From 480ee9fec05a60d00e5b744434243270c8ac60ad Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Wed, 7 Jan 2026 09:31:46 -0500 Subject: [PATCH 0845/1093] ipn,cmd/tailscale/cli: set correct SNI name for TLS-terminated TCP Services (#17752) Fixes #17749. Signed-off-by: Naman Sood --- cmd/tailscale/cli/serve_v2.go | 17 +++++++++++--- cmd/tailscale/cli/serve_v2_test.go | 8 ++++--- ipn/serve.go | 37 ++++++++++++++++++++---------- 3 files changed, 44 insertions(+), 18 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 6e040819528ba..6a29074817a59 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -923,7 +923,7 @@ func (e *serveEnv) setServe(sc *ipn.ServeConfig, dnsName string, srvType serveTy if e.setPath != "" { return fmt.Errorf("cannot mount a path for TCP serve") } - err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target, proxyProtocol) + err := e.applyTCPServe(sc, dnsName, srvType, srvPort, target, mds, proxyProtocol) if err != nil { return fmt.Errorf("failed to apply TCP serve: %w", err) } @@ -1203,7 +1203,7 @@ func (e *serveEnv) applyWebServe(sc *ipn.ServeConfig, dnsName string, srvPort ui return nil } -func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string, proxyProtocol int) error { +func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType serveType, srcPort uint16, target string, mds string, proxyProtocol int) error { var terminateTLS bool switch srcType { case serveTypeTCP: @@ -1226,11 +1226,22 @@ func (e *serveEnv) applyTCPServe(sc *ipn.ServeConfig, dnsName string, srcType se return fmt.Errorf("invalid TCP target %q: %v", target, err) } - // TODO: needs to account for multiple configs from foreground mode if sc.IsServingWeb(srcPort, svcName) { return fmt.Errorf("cannot serve TCP; already serving web on %d for %s", srcPort, dnsName) } + // TODO: needs to account for multiple configs from foreground mode + if svcName := tailcfg.AsServiceName(dnsName); svcName != "" { + sc.SetTCPForwardingForService(srcPort, dstURL.Host, terminateTLS, svcName, proxyProtocol, mds) + return nil + } + + // TODO: needs to account for multiple configs from foreground mode + if svcName != "" { + sc.SetTCPForwardingForService(srcPort, dstURL.Host, terminateTLS, svcName, proxyProtocol, mds) + return nil + } + sc.SetTCPForwarding(srcPort, dstURL.Host, terminateTLS, proxyProtocol, dnsName) return nil } diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index b3ebb32a2b4c4..a56fece3e8c59 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -2077,9 +2077,11 @@ func TestSetServe(t *testing.T) { if err == nil && tt.expectErr { t.Fatalf("got no error; expected error.") } - if !tt.expectErr && !reflect.DeepEqual(tt.cfg, tt.expected) { - svcName := tailcfg.ServiceName(tt.dnsName) - t.Fatalf("got: %v; expected: %v", tt.cfg.Services[svcName], tt.expected.Services[svcName]) + if !tt.expectErr { + if diff := cmp.Diff(tt.expected, tt.cfg); diff != "" { + // svcName := tailcfg.ServiceName(tt.dnsName) + t.Fatalf("got diff:\n%s", diff) + } } }) } diff --git a/ipn/serve.go b/ipn/serve.go index 76823a8464977..240308f290edc 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -433,24 +433,37 @@ func (sc *ServeConfig) SetTCPForwarding(port uint16, fwdAddr string, terminateTL if sc == nil { sc = new(ServeConfig) } - tcpPortHandler := &sc.TCP - if svcName := tailcfg.AsServiceName(host); svcName != "" { - svcConfig, ok := sc.Services[svcName] - if !ok { - svcConfig = new(ServiceConfig) - mak.Set(&sc.Services, svcName, svcConfig) - } - tcpPortHandler = &svcConfig.TCP + mak.Set(&sc.TCP, port, &TCPPortHandler{ + TCPForward: fwdAddr, + ProxyProtocol: proxyProtocol, // can be 0 + }) + + if terminateTLS { + sc.TCP[port].TerminateTLS = host } +} - handler := &TCPPortHandler{ +// SetTCPForwardingForService sets the fwdAddr (IP:port form) to which to +// forward connections from the given port on the service. If terminateTLS +// is true, TLS connections are terminated, with only the FQDN that corresponds +// to the given service being permitted, before passing them to the fwdAddr. +func (sc *ServeConfig) SetTCPForwardingForService(port uint16, fwdAddr string, terminateTLS bool, svcName tailcfg.ServiceName, proxyProtocol int, magicDNSSuffix string) { + if sc == nil { + sc = new(ServeConfig) + } + svcConfig, ok := sc.Services[svcName] + if !ok { + svcConfig = new(ServiceConfig) + mak.Set(&sc.Services, svcName, svcConfig) + } + mak.Set(&svcConfig.TCP, port, &TCPPortHandler{ TCPForward: fwdAddr, ProxyProtocol: proxyProtocol, // can be 0 - } + }) + if terminateTLS { - handler.TerminateTLS = host + svcConfig.TCP[port].TerminateTLS = fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix) } - mak.Set(tcpPortHandler, port, handler) } // SetFunnel sets the sc.AllowFunnel value for the given host and port. From 6c67deff3805e7c90894c9aced1b594854747b87 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 7 Jan 2026 11:04:14 -0800 Subject: [PATCH 0846/1093] cmd/distsign: add CLI for verifying package signatures (#18239) Updates #35374 Signed-off-by: Andrew Lytvynov --- clientupdate/distsign/distsign.go | 8 +++++- cmd/distsign/distsign.go | 42 +++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 cmd/distsign/distsign.go diff --git a/clientupdate/distsign/distsign.go b/clientupdate/distsign/distsign.go index 270ee4c1f9ace..954403ae0c62c 100644 --- a/clientupdate/distsign/distsign.go +++ b/clientupdate/distsign/distsign.go @@ -332,7 +332,13 @@ func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([] tr := http.DefaultTransport.(*http.Transport).Clone() tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() defer tr.CloseIdleConnections() - hc := &http.Client{Transport: tr} + hc := &http.Client{ + Transport: tr, + CheckRedirect: func(r *http.Request, via []*http.Request) error { + c.logf("Download redirected to %q", r.URL) + return nil + }, + } quickCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() diff --git a/cmd/distsign/distsign.go b/cmd/distsign/distsign.go new file mode 100644 index 0000000000000..051afabcd0b71 --- /dev/null +++ b/cmd/distsign/distsign.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Command distsign tests downloads and signature validating for packages +// published by Tailscale on pkgs.tailscale.com. +package main + +import ( + "context" + "flag" + "log" + "os" + "path/filepath" + + "tailscale.com/clientupdate/distsign" +) + +var ( + pkgsURL = flag.String("pkgs-url", "https://pkgs.tailscale.com/", "URL of the packages server") + pkgName = flag.String("pkg-name", "", "name of the package on the packages server, including the stable/unstable track prefix") +) + +func main() { + flag.Parse() + + if *pkgName == "" { + log.Fatalf("--pkg-name is required") + } + + c, err := distsign.NewClient(log.Printf, *pkgsURL) + if err != nil { + log.Fatal(err) + } + tempDir := filepath.Join(os.TempDir(), "distsign") + if err := os.MkdirAll(tempDir, 0755); err != nil { + log.Fatal(err) + } + if err := c.Download(context.Background(), *pkgName, filepath.Join(os.TempDir(), "distsign", filepath.Base(*pkgName))); err != nil { + log.Fatal(err) + } + log.Printf("%q ok", *pkgName) +} From e66531041b7d8f6c22a654d5b6e0aabe3e914b92 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Wed, 7 Jan 2026 16:22:14 -0500 Subject: [PATCH 0847/1093] cmd/containerboot: add OAuth and WIF auth support (#18311) Fixes tailscale/corp#34430 Signed-off-by: Raj Singh --- cmd/containerboot/main.go | 14 ++++- cmd/containerboot/settings.go | 29 +++++++--- cmd/containerboot/settings_test.go | 89 +++++++++++++++++++++++++++++- cmd/containerboot/tailscaled.go | 9 +++ 4 files changed, 131 insertions(+), 10 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 8c9d33c61ccd0..011c1830a856b 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -11,7 +11,17 @@ // As with most container things, configuration is passed through environment // variables. All configuration is optional. // -// - TS_AUTHKEY: the authkey to use for login. +// - TS_AUTHKEY: the authkey to use for login. Also accepts TS_AUTH_KEY. +// If the value begins with "file:", it is treated as a path to a file containing the key. +// - TS_CLIENT_ID: the OAuth client ID. Can be used alone (ID token auto-generated +// in well-known environments), with TS_CLIENT_SECRET, or with TS_ID_TOKEN. +// - TS_CLIENT_SECRET: the OAuth client secret for generating authkeys. +// If the value begins with "file:", it is treated as a path to a file containing the secret. +// - TS_ID_TOKEN: the ID token from the identity provider for workload identity federation. +// Must be used together with TS_CLIENT_ID. If the value begins with "file:", it is +// treated as a path to a file containing the token. +// - Note: TS_AUTHKEY is mutually exclusive with TS_CLIENT_ID, TS_CLIENT_SECRET, and TS_ID_TOKEN. +// TS_CLIENT_SECRET and TS_ID_TOKEN cannot be used together. // - TS_HOSTNAME: the hostname to request for the node. // - TS_ROUTES: subnet routes to advertise. Explicitly setting it to an empty // value will cause containerboot to stop acting as a subnet router for any @@ -67,7 +77,7 @@ // - TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR: if specified, a path to a // directory that containers tailscaled config in file. The config file needs to be // named cap-.hujson. If this is set, TS_HOSTNAME, -// TS_EXTRA_ARGS, TS_AUTHKEY, +// TS_EXTRA_ARGS, TS_AUTHKEY, TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, // TS_ROUTES, TS_ACCEPT_DNS env vars must not be set. If this is set, // containerboot only runs `tailscaled --config ` // and not `tailscale up` or `tailscale set`. diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 5a8be9036b3ca..216dd766e85ee 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -22,9 +22,12 @@ import ( // settings is all the configuration for containerboot. type settings struct { - AuthKey string - Hostname string - Routes *string + AuthKey string + ClientID string + ClientSecret string + IDToken string + Hostname string + Routes *string // ProxyTargetIP is the destination IP to which all incoming // Tailscale traffic should be proxied. If empty, no proxying // is done. This is typically a locally reachable IP. @@ -86,6 +89,9 @@ type settings struct { func configFromEnv() (*settings, error) { cfg := &settings{ AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""), + ClientID: defaultEnv("TS_CLIENT_ID", ""), + ClientSecret: defaultEnv("TS_CLIENT_SECRET", ""), + IDToken: defaultEnv("TS_ID_TOKEN", ""), Hostname: defaultEnv("TS_HOSTNAME", ""), Routes: defaultEnvStringPointer("TS_ROUTES"), ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), @@ -241,8 +247,17 @@ func (s *settings) validate() error { if s.TailnetTargetFQDN != "" && s.TailnetTargetIP != "" { return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set") } - if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "") { - return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS.") + if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "" || s.ClientID != "" || s.ClientSecret != "" || s.IDToken != "") { + return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS, TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN.") + } + if s.IDToken != "" && s.ClientID == "" { + return errors.New("TS_ID_TOKEN is set but TS_CLIENT_ID is not set") + } + if s.IDToken != "" && s.ClientSecret != "" { + return errors.New("TS_ID_TOKEN and TS_CLIENT_SECRET cannot both be set") + } + if s.AuthKey != "" && (s.ClientID != "" || s.ClientSecret != "" || s.IDToken != "") { + return errors.New("TS_AUTHKEY cannot be used with TS_CLIENT_ID, TS_CLIENT_SECRET, or TS_ID_TOKEN") } if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode { return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode") @@ -312,8 +327,8 @@ func (cfg *settings) setupKube(ctx context.Context, kc *kubeClient) error { } } - // Return early if we already have an auth key. - if cfg.AuthKey != "" || isOneStepConfig(cfg) { + // Return early if we already have an auth key or are using OAuth/WIF. + if cfg.AuthKey != "" || cfg.ClientID != "" || cfg.ClientSecret != "" || isOneStepConfig(cfg) { return nil } diff --git a/cmd/containerboot/settings_test.go b/cmd/containerboot/settings_test.go index dbec066c9ab0d..d97e786e6b334 100644 --- a/cmd/containerboot/settings_test.go +++ b/cmd/containerboot/settings_test.go @@ -5,7 +5,10 @@ package main -import "testing" +import ( + "strings" + "testing" +) func Test_parseAcceptDNS(t *testing.T) { tests := []struct { @@ -106,3 +109,87 @@ func Test_parseAcceptDNS(t *testing.T) { }) } } + +func TestValidateAuthMethods(t *testing.T) { + tests := []struct { + name string + authKey string + clientID string + clientSecret string + idToken string + errContains string + }{ + { + name: "no_auth_method", + }, + { + name: "authkey_only", + authKey: "tskey-auth-xxx", + }, + { + name: "client_secret_only", + clientSecret: "tskey-client-xxx", + }, + { + name: "client_id_alone", + clientID: "client-id", + }, + { + name: "oauth_client_id_and_secret", + clientID: "client-id", + clientSecret: "tskey-client-xxx", + }, + { + name: "wif_client_id_and_id_token", + clientID: "client-id", + idToken: "id-token", + }, + { + name: "id_token_without_client_id", + idToken: "id-token", + errContains: "TS_ID_TOKEN is set but TS_CLIENT_ID is not set", + }, + { + name: "authkey_with_client_secret", + authKey: "tskey-auth-xxx", + clientSecret: "tskey-client-xxx", + errContains: "TS_AUTHKEY cannot be used with", + }, + { + name: "authkey_with_wif", + authKey: "tskey-auth-xxx", + clientID: "client-id", + idToken: "id-token", + errContains: "TS_AUTHKEY cannot be used with", + }, + { + name: "id_token_with_client_secret", + clientID: "client-id", + clientSecret: "tskey-client-xxx", + idToken: "id-token", + errContains: "TS_ID_TOKEN and TS_CLIENT_SECRET cannot both be set", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &settings{ + AuthKey: tt.authKey, + ClientID: tt.clientID, + ClientSecret: tt.clientSecret, + IDToken: tt.idToken, + } + err := s.validate() + if tt.errContains != "" { + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), tt.errContains) { + t.Errorf("error %q does not contain %q", err.Error(), tt.errContains) + } + } else if err != nil { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index f828c52573089..1374b1802046e 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -120,6 +120,15 @@ func tailscaleUp(ctx context.Context, cfg *settings) error { if cfg.AuthKey != "" { args = append(args, "--authkey="+cfg.AuthKey) } + if cfg.ClientID != "" { + args = append(args, "--client-id="+cfg.ClientID) + } + if cfg.ClientSecret != "" { + args = append(args, "--client-secret="+cfg.ClientSecret) + } + if cfg.IDToken != "" { + args = append(args, "--id-token="+cfg.IDToken) + } // --advertise-routes can be passed an empty string to configure a // device (that might have previously advertised subnet routes) to not // advertise any routes. Respect an empty string passed by a user and From 522a6e385ef2624ff3a976ee29594cb2a2669eda Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 7 Jan 2026 18:12:06 -0800 Subject: [PATCH 0848/1093] cmd/tailscale/cli, util/qrcodes: format QR codes on Linux consoles (#18182) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Raw Linux consoles support UTF-8, but we cannot assume that all UTF-8 characters are available. The default Fixed and Terminus fonts don’t contain half-block characters (`▀` and `▄`), but do contain the full-block character (`█`). Sometimes, Linux doesn’t have a framebuffer, so it falls back to VGA. When this happens, the full-block character could be anywhere in extended ASCII block, because we don’t know which code page is active. This PR introduces `--qr-format=auto` which tries to heuristically detect when Tailscale is printing to a raw Linux console, whether UTF-8 is enabled, and which block characters have been mapped in the console font. If Unicode characters are unavailable, the new `--qr-format=ascii` formatter uses `#` characters instead of full-block characters. Fixes #12935 Signed-off-by: Simon Law --- cmd/tailscale/cli/up.go | 24 ++--- cmd/tailscale/depaware.txt | 3 +- cmd/tailscaled/depaware-minbox.txt | 3 +- util/qrcodes/qrcodes.go | 75 ++++++++++++++ util/qrcodes/qrcodes_linux.go | 160 +++++++++++++++++++++++++++++ util/qrcodes/qrcodes_notlinux.go | 14 +++ 6 files changed, 259 insertions(+), 20 deletions(-) create mode 100644 util/qrcodes/qrcodes.go create mode 100644 util/qrcodes/qrcodes_linux.go create mode 100644 util/qrcodes/qrcodes_notlinux.go diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index d6971a6814b7c..1d9f7e17c48df 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -23,7 +23,6 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" - qrcode "github.com/skip2/go-qrcode" "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister/identityfederation" _ "tailscale.com/feature/condregister/oauthkey" @@ -39,6 +38,7 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/qrcodes" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -95,7 +95,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // When adding new flags, prefer to put them under "tailscale set" instead // of here. Setting preferences via "tailscale up" is deprecated. upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") - upf.StringVar(&upArgs.qrFormat, "qr-format", "small", "QR code formatting (small or large)") + upf.StringVar(&upArgs.qrFormat, "qr-format", string(qrcodes.FormatAuto), fmt.Sprintf("QR code formatting (%s, %s, %s, %s)", qrcodes.FormatAuto, qrcodes.FormatASCII, qrcodes.FormatLarge, qrcodes.FormatSmall)) upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) @@ -720,12 +720,9 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if upArgs.json { js := &upOutputJSON{AuthURL: authURL, BackendState: st.BackendState} - q, err := qrcode.New(authURL, qrcode.Medium) + png, err := qrcodes.EncodePNG(authURL, 128) if err == nil { - png, err := q.PNG(128) - if err == nil { - js.QR = "data:image/png;base64," + base64.StdEncoding.EncodeToString(png) - } + js.QR = "data:image/png;base64," + base64.StdEncoding.EncodeToString(png) } data, err := json.MarshalIndent(js, "", "\t") @@ -737,18 +734,9 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } else { fmt.Fprintf(Stderr, "\nTo authenticate, visit:\n\n\t%s\n\n", authURL) if upArgs.qr { - q, err := qrcode.New(authURL, qrcode.Medium) + _, err := qrcodes.Fprintln(Stderr, qrcodes.Format(upArgs.qrFormat), authURL) if err != nil { - log.Printf("QR code error: %v", err) - } else { - switch upArgs.qrFormat { - case "large": - fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) - case "small": - fmt.Fprintf(Stderr, "%s\n", q.ToSmallString(false)) - default: - log.Printf("unknown QR code format: %q", upArgs.qrFormat) - } + log.Print(err) } } } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8b576ffc3a4dd..7c89633ac3fe9 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -47,7 +47,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 - github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli + github.com/skip2/go-qrcode from tailscale.com/util/qrcodes github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket @@ -189,6 +189,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli + 💣 tailscale.com/util/qrcodes from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb tailscale.com/util/set from tailscale.com/ipn+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 9b761b76d7aa0..38da380135198 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -33,7 +33,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli + github.com/skip2/go-qrcode from tailscale.com/util/qrcodes github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -193,6 +193,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/osshare from tailscale.com/cmd/tailscaled tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli + 💣 tailscale.com/util/qrcodes from tailscale.com/cmd/tailscale/cli tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ diff --git a/util/qrcodes/qrcodes.go b/util/qrcodes/qrcodes.go new file mode 100644 index 0000000000000..14bdf858145b5 --- /dev/null +++ b/util/qrcodes/qrcodes.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package qrcodes provides functions to render or format QR codes. +package qrcodes + +import ( + "fmt" + "io" + "strings" + + qrcode "github.com/skip2/go-qrcode" +) + +// Format selects the text representation used to print QR codes. +type Format string + +const ( + // FormatAuto will format QR codes to best fit the capabilities of the + // [io.Writer]. + FormatAuto Format = "auto" + + // FormatASCII will format QR codes with only ASCII characters. + FormatASCII Format = "ascii" + + // FormatLarge will format QR codes with full block characters. + FormatLarge Format = "large" + + // FormatSmall will format QR codes with full and half block characters. + FormatSmall Format = "small" +) + +// Fprintln formats s according to [Format] and writes a QR code to w, along +// with a newline. It returns the number of bytes written and any write error +// encountered. +func Fprintln(w io.Writer, format Format, s string) (n int, err error) { + const inverse = false // Modern scanners can read QR codes of any colour. + + q, err := qrcode.New(s, qrcode.Medium) + if err != nil { + return 0, fmt.Errorf("QR code error: %w", err) + } + + if format == FormatAuto { + format, err = detectFormat(w, inverse) + if err != nil { + return 0, fmt.Errorf("QR code error: %w", err) + } + } + + var out string + switch format { + case FormatASCII: + out = q.ToString(inverse) + out = strings.ReplaceAll(out, "█", "#") + case FormatLarge: + out = q.ToString(inverse) + case FormatSmall: + out = q.ToSmallString(inverse) + default: + return 0, fmt.Errorf("unknown QR code format: %q", format) + } + + return fmt.Fprintln(w, out) +} + +// EncodePNG renders a QR code for s as a PNG, with a width and height of size +// pixels. +func EncodePNG(s string, size int) ([]byte, error) { + q, err := qrcode.New(s, qrcode.Medium) + if err != nil { + return nil, err + } + return q.PNG(size) +} diff --git a/util/qrcodes/qrcodes_linux.go b/util/qrcodes/qrcodes_linux.go new file mode 100644 index 0000000000000..9cc0c09bf0e5d --- /dev/null +++ b/util/qrcodes/qrcodes_linux.go @@ -0,0 +1,160 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package qrcodes + +import ( + "errors" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" + "golang.org/x/sys/unix" +) + +func detectFormat(w io.Writer, inverse bool) (format Format, _ error) { + var zero Format + + // Almost every terminal supports UTF-8, but the Linux + // console may have partial or no support, which is + // especially painful inside VMs. See tailscale/tailscale#12935. + format = FormatSmall + + // Is the locale (LC_CTYPE) set to UTF-8? + locale, err := locale() + if err != nil { + return FormatASCII, fmt.Errorf("QR: %w", err) + } + const utf8 = ".UTF-8" + if !strings.HasSuffix(locale["LC_CTYPE"], utf8) && + !strings.HasSuffix(locale["LANG"], utf8) { + return FormatASCII, nil + } + + // Are we printing to a terminal? + f, ok := w.(*os.File) + if !ok { + return format, nil + } + if !isatty.IsTerminal(f.Fd()) { + return format, nil + } + fd := f.Fd() + + // On a Linux console, check that the current keyboard + // is in Unicode mode. See unicode_start(1). + const K_UNICODE = 0x03 + kbMode, err := ioctlGetKBMode(fd) + if err != nil { + if errors.Is(err, syscall.ENOTTY) { + return format, nil + } + return zero, err + } + if kbMode != K_UNICODE { + return FormatASCII, nil + } + + // On a raw Linux console, detect whether the block + // characters are available in the current font by + // consulting the Unicode-to-font mapping. + unimap, err := ioctlGetUniMap(fd) + if err != nil { + return zero, err + } + if _, ok := unimap['█']; ok { + format = FormatLarge + } + if _, ok := unimap['▀']; ok && inverse { + format = FormatSmall + } + if _, ok := unimap['▄']; ok && !inverse { + format = FormatSmall + } + + return format, nil +} + +func locale() (map[string]string, error) { + locale := map[string]string{ + "LANG": os.Getenv("LANG"), + "LC_CTYPE": os.Getenv("LC_CTYPE"), + } + + cmd := exec.Command("locale") + out, err := cmd.Output() + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + return locale, nil + } + return nil, fmt.Errorf("locale error: %w", err) + } + + for line := range strings.SplitSeq(string(out), "\n") { + if line == "" { + continue + } + k, v, found := strings.Cut(line, "=") + if !found { + continue + } + v, err := strconv.Unquote(v) + if err != nil { + continue + } + locale[k] = v + } + return locale, nil +} + +func ioctlGetKBMode(fd uintptr) (int, error) { + const KDGKBMODE = 0x4b44 + mode, err := unix.IoctlGetInt(int(fd), KDGKBMODE) + if err != nil { + return 0, fmt.Errorf("keyboard mode error: %w", err) + } + return mode, nil +} + +func ioctlGetUniMap(fd uintptr) (map[rune]int, error) { + const GIO_UNIMAP = 0x4B66 // get unicode-to-font mapping from kernel + var ud struct { + Count uint16 + Entries uintptr // pointer to unipair array + } + type unipair struct { + Unicode uint16 // Unicode value + FontPos uint16 // Font position in the console font + } + + // First, get the number of entries: + _, _, errno := unix.Syscall(unix.SYS_IOCTL, fd, GIO_UNIMAP, uintptr(unsafe.Pointer(&ud))) + if errno != 0 && !errors.Is(errno, syscall.ENOMEM) { + return nil, fmt.Errorf("unicode mapping error: %w", errno) + } + + // Then allocate enough space and get the entries themselves: + if ud.Count == 0 { + return nil, nil + } + entries := make([]unipair, ud.Count) + ud.Entries = uintptr(unsafe.Pointer(&entries[0])) + _, _, errno = unix.Syscall(unix.SYS_IOCTL, fd, GIO_UNIMAP, uintptr(unsafe.Pointer(&ud))) + if errno != 0 { + return nil, fmt.Errorf("unicode mapping error: %w", errno) + } + + unimap := make(map[rune]int) + for _, e := range entries { + unimap[rune(e.Unicode)] = int(e.FontPos) + } + return unimap, nil +} diff --git a/util/qrcodes/qrcodes_notlinux.go b/util/qrcodes/qrcodes_notlinux.go new file mode 100644 index 0000000000000..a12ce39d11168 --- /dev/null +++ b/util/qrcodes/qrcodes_notlinux.go @@ -0,0 +1,14 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !linux + +package qrcodes + +import "io" + +func detectFormat(w io.Writer, inverse bool) (Format, error) { + // Assume all terminals can support the full set of UTF-8 block + // characters: (█, ▀, ▄). See tailscale/tailscale#12935. + return FormatSmall, nil +} From 73cb3b491e7d60ef57e14312fdec01abe1025da0 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 8 Jan 2026 12:01:12 +0000 Subject: [PATCH 0849/1093] cmd/k8s-operator/e2e: run self-contained e2e tests with devcontrol (#17415) * cmd/k8s-operator/e2e: run self-contained e2e tests with devcontrol Adds orchestration for more of the e2e testing setup requirements to make it easier to run them in CI, but also run them locally in a way that's consistent with CI. Requires running devcontrol, but otherwise supports creating all the scaffolding required to exercise the operator and proxies. Updates tailscale/corp#32085 Change-Id: Ia7bff38af3801fd141ad17452aa5a68b7e724ca6 Signed-off-by: Tom Proctor * cmd/k8s-operator/e2e: being more specific on tmp dir cleanup Signed-off-by: chaosinthecrd --------- Signed-off-by: Tom Proctor Signed-off-by: chaosinthecrd Co-authored-by: chaosinthecrd --- cmd/k8s-operator/depaware.txt | 96 +-- .../crds/tailscale.com_proxyclasses.yaml | 18 +- .../deploy/crds/tailscale.com_recorders.yaml | 14 +- .../deploy/manifests/operator.yaml | 32 +- cmd/k8s-operator/e2e/certs/pebble.minica.crt | 19 + cmd/k8s-operator/e2e/doc.go | 28 + cmd/k8s-operator/e2e/ingress_test.go | 22 +- cmd/k8s-operator/e2e/main_test.go | 74 +- cmd/k8s-operator/e2e/pebble.go | 174 +++++ cmd/k8s-operator/e2e/proxy_test.go | 36 +- cmd/k8s-operator/e2e/setup.go | 680 ++++++++++++++++++ cmd/k8s-operator/e2e/ssh.go | 352 +++++++++ cmd/tailscale/depaware.txt | 2 +- flake.nix | 2 +- go.mod | 118 ++- go.mod.sri | 2 +- go.sum | 340 ++++++--- shell.nix | 2 +- 18 files changed, 1680 insertions(+), 331 deletions(-) create mode 100644 cmd/k8s-operator/e2e/certs/pebble.minica.crt create mode 100644 cmd/k8s-operator/e2e/doc.go create mode 100644 cmd/k8s-operator/e2e/pebble.go create mode 100644 cmd/k8s-operator/e2e/setup.go create mode 100644 cmd/k8s-operator/e2e/ssh.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index ec842651ae7a1..2f909ee8e0d50 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -25,6 +25,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/evanphx/json-patch/v5 from sigs.k8s.io/controller-runtime/pkg/client github.com/evanphx/json-patch/v5/internal/json from github.com/evanphx/json-patch/v5 💣 github.com/fsnotify/fsnotify from sigs.k8s.io/controller-runtime/pkg/certwatcher + github.com/fsnotify/fsnotify/internal from github.com/fsnotify/fsnotify github.com/fxamacker/cbor/v2 from tailscale.com/tka+ github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -46,27 +47,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+ github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/golang/protobuf/proto from k8s.io/client-go/discovery+ github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ github.com/google/gnostic-models/compiler from github.com/google/gnostic-models/openapiv2+ github.com/google/gnostic-models/extensions from github.com/google/gnostic-models/compiler github.com/google/gnostic-models/jsonschema from github.com/google/gnostic-models/compiler github.com/google/gnostic-models/openapiv2 from k8s.io/client-go/discovery+ github.com/google/gnostic-models/openapiv3 from k8s.io/kube-openapi/pkg/handler3+ - 💣 github.com/google/go-cmp/cmp from k8s.io/apimachinery/pkg/util/diff+ - github.com/google/go-cmp/cmp/internal/diff from github.com/google/go-cmp/cmp - github.com/google/go-cmp/cmp/internal/flags from github.com/google/go-cmp/cmp+ - github.com/google/go-cmp/cmp/internal/function from github.com/google/go-cmp/cmp - 💣 github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp - github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ - github.com/google/gofuzz/bytesource from github.com/google/gofuzz github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/tka - W 💣 github.com/inconshreveable/mousetrap from github.com/spf13/cobra github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink - 💣 github.com/json-iterator/go from sigs.k8s.io/structured-merge-diff/v4/fieldpath+ + 💣 github.com/json-iterator/go from sigs.k8s.io/structured-merge-diff/v6/fieldpath+ github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd @@ -88,6 +80,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/opencontainers/go-digest from github.com/distribution/reference github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal github.com/pkg/errors from github.com/evanphx/json-patch/v5+ + github.com/pmezard/go-difflib/difflib from k8s.io/apimachinery/pkg/util/diff D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil @@ -103,7 +96,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/spf13/cobra from k8s.io/component-base/cli/flag github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket @@ -131,12 +123,14 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 - go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace+ go.opentelemetry.io/otel/codes from go.opentelemetry.io/otel/trace 💣 go.opentelemetry.io/otel/internal from go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/internal/attribute from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/semconv/v1.26.0 from go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace from k8s.io/component-base/metrics go.opentelemetry.io/otel/trace/embedded from go.opentelemetry.io/otel/trace + 💣 go.opentelemetry.io/otel/trace/internal/telemetry from go.opentelemetry.io/otel/trace go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ go.uber.org/zap/buffer from go.uber.org/zap/internal/bufferpool+ @@ -147,19 +141,20 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go.uber.org/zap/internal/pool from go.uber.org/zap+ go.uber.org/zap/internal/stacktrace from go.uber.org/zap go.uber.org/zap/zapcore from github.com/go-logr/zapr+ + go.yaml.in/yaml/v2 from k8s.io/kube-openapi/pkg/util/proto+ + go.yaml.in/yaml/v3 from github.com/google/gnostic-models/compiler+ 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ W 💣 golang.zx2c4.com/wintun from github.com/tailscale/wireguard-go/tun W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/dns+ gomodules.xyz/jsonpatch/v2 from sigs.k8s.io/controller-runtime/pkg/webhook+ google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt - google.golang.org/protobuf/encoding/prototext from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/encoding/protowire from github.com/golang/protobuf/proto+ + google.golang.org/protobuf/encoding/prototext from github.com/prometheus/common/expfmt+ + google.golang.org/protobuf/encoding/protowire from google.golang.org/protobuf/encoding/protodelim+ google.golang.org/protobuf/internal/descfmt from google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/descopts from google.golang.org/protobuf/internal/filedesc+ google.golang.org/protobuf/internal/detrand from google.golang.org/protobuf/internal/descfmt+ - google.golang.org/protobuf/internal/editiondefaults from google.golang.org/protobuf/internal/filedesc+ - google.golang.org/protobuf/internal/editionssupport from google.golang.org/protobuf/reflect/protodesc + google.golang.org/protobuf/internal/editiondefaults from google.golang.org/protobuf/internal/filedesc google.golang.org/protobuf/internal/encoding/defval from google.golang.org/protobuf/internal/encoding/tag+ google.golang.org/protobuf/internal/encoding/messageset from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/encoding/tag from google.golang.org/protobuf/internal/impl @@ -176,19 +171,17 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext 💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+ google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl - google.golang.org/protobuf/proto from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/reflect/protodesc from github.com/golang/protobuf/proto - 💣 google.golang.org/protobuf/reflect/protoreflect from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+ - google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+ - 💣 google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3+ - 💣 google.golang.org/protobuf/types/gofeaturespb from google.golang.org/protobuf/reflect/protodesc + google.golang.org/protobuf/proto from github.com/google/gnostic-models/compiler+ + 💣 google.golang.org/protobuf/reflect/protoreflect from github.com/google/gnostic-models/extensions+ + google.golang.org/protobuf/reflect/protoregistry from google.golang.org/protobuf/encoding/prototext+ + google.golang.org/protobuf/runtime/protoiface from google.golang.org/protobuf/internal/impl+ + google.golang.org/protobuf/runtime/protoimpl from github.com/google/gnostic-models/extensions+ + 💣 google.golang.org/protobuf/types/descriptorpb from github.com/google/gnostic-models/openapiv3 💣 google.golang.org/protobuf/types/known/anypb from github.com/google/gnostic-models/compiler+ 💣 google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ gopkg.in/evanphx/json-patch.v4 from k8s.io/client-go/testing gopkg.in/inf.v0 from k8s.io/apimachinery/pkg/api/resource - gopkg.in/yaml.v3 from github.com/go-openapi/swag+ + gopkg.in/yaml.v3 from github.com/go-openapi/swag gvisor.dev/gvisor/pkg/atomicbitops from gvisor.dev/gvisor/pkg/buffer+ gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/buffer 💣 gvisor.dev/gvisor/pkg/buffer from gvisor.dev/gvisor/pkg/tcpip+ @@ -269,7 +262,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/flowcontrol/v1beta2 from k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2+ k8s.io/api/flowcontrol/v1beta3 from k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3+ k8s.io/api/networking/v1 from k8s.io/client-go/applyconfigurations/networking/v1+ - k8s.io/api/networking/v1alpha1 from k8s.io/client-go/applyconfigurations/networking/v1alpha1+ k8s.io/api/networking/v1beta1 from k8s.io/client-go/applyconfigurations/networking/v1beta1+ k8s.io/api/node/v1 from k8s.io/client-go/applyconfigurations/node/v1+ k8s.io/api/node/v1alpha1 from k8s.io/client-go/applyconfigurations/node/v1alpha1+ @@ -279,8 +271,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/api/rbac/v1 from k8s.io/client-go/applyconfigurations/rbac/v1+ k8s.io/api/rbac/v1alpha1 from k8s.io/client-go/applyconfigurations/rbac/v1alpha1+ k8s.io/api/rbac/v1beta1 from k8s.io/client-go/applyconfigurations/rbac/v1beta1+ + k8s.io/api/resource/v1 from k8s.io/client-go/applyconfigurations/resource/v1+ k8s.io/api/resource/v1alpha3 from k8s.io/client-go/applyconfigurations/resource/v1alpha3+ k8s.io/api/resource/v1beta1 from k8s.io/client-go/applyconfigurations/resource/v1beta1+ + k8s.io/api/resource/v1beta2 from k8s.io/client-go/applyconfigurations/resource/v1beta2+ k8s.io/api/scheduling/v1 from k8s.io/client-go/applyconfigurations/scheduling/v1+ k8s.io/api/scheduling/v1alpha1 from k8s.io/client-go/applyconfigurations/scheduling/v1alpha1+ k8s.io/api/scheduling/v1beta1 from k8s.io/client-go/applyconfigurations/scheduling/v1beta1+ @@ -294,16 +288,20 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/api/errors from k8s.io/apimachinery/pkg/util/managedfields/internal+ k8s.io/apimachinery/pkg/api/meta from k8s.io/apimachinery/pkg/api/validation+ k8s.io/apimachinery/pkg/api/meta/testrestmapper from k8s.io/client-go/testing + k8s.io/apimachinery/pkg/api/operation from k8s.io/api/extensions/v1beta1+ k8s.io/apimachinery/pkg/api/resource from k8s.io/api/autoscaling/v1+ + k8s.io/apimachinery/pkg/api/safe from k8s.io/api/extensions/v1beta1 + k8s.io/apimachinery/pkg/api/validate from k8s.io/api/extensions/v1beta1 + k8s.io/apimachinery/pkg/api/validate/constraints from k8s.io/apimachinery/pkg/api/validate+ + k8s.io/apimachinery/pkg/api/validate/content from k8s.io/apimachinery/pkg/api/validate k8s.io/apimachinery/pkg/api/validation from k8s.io/apimachinery/pkg/util/managedfields/internal+ k8s.io/apimachinery/pkg/api/validation/path from k8s.io/apiserver/pkg/endpoints/request 💣 k8s.io/apimachinery/pkg/apis/meta/internalversion from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata+ - k8s.io/apimachinery/pkg/apis/meta/internalversion/validation from k8s.io/client-go/util/watchlist 💣 k8s.io/apimachinery/pkg/apis/meta/v1 from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/apis/meta/v1/unstructured from k8s.io/apimachinery/pkg/runtime/serializer/versioning+ k8s.io/apimachinery/pkg/apis/meta/v1/validation from k8s.io/apimachinery/pkg/api/validation+ - 💣 k8s.io/apimachinery/pkg/apis/meta/v1beta1 from k8s.io/apimachinery/pkg/apis/meta/internalversion + 💣 k8s.io/apimachinery/pkg/apis/meta/v1beta1 from k8s.io/apimachinery/pkg/apis/meta/internalversion+ k8s.io/apimachinery/pkg/conversion from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/apimachinery/pkg/conversion/queryparams from k8s.io/apimachinery/pkg/runtime+ k8s.io/apimachinery/pkg/fields from k8s.io/apimachinery/pkg/api/equality+ @@ -322,7 +320,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/selection from k8s.io/apimachinery/pkg/apis/meta/v1+ k8s.io/apimachinery/pkg/types from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/util/cache from k8s.io/client-go/tools/cache - k8s.io/apimachinery/pkg/util/diff from k8s.io/client-go/tools/cache + k8s.io/apimachinery/pkg/util/diff from k8s.io/client-go/tools/cache+ k8s.io/apimachinery/pkg/util/dump from k8s.io/apimachinery/pkg/util/diff+ k8s.io/apimachinery/pkg/util/errors from k8s.io/apimachinery/pkg/api/meta+ k8s.io/apimachinery/pkg/util/framer from k8s.io/apimachinery/pkg/runtime/serializer/json+ @@ -385,7 +383,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/internal from k8s.io/client-go/applyconfigurations/admissionregistration/v1+ k8s.io/client-go/applyconfigurations/meta/v1 from k8s.io/client-go/applyconfigurations/admissionregistration/v1+ k8s.io/client-go/applyconfigurations/networking/v1 from k8s.io/client-go/kubernetes/typed/networking/v1 - k8s.io/client-go/applyconfigurations/networking/v1alpha1 from k8s.io/client-go/kubernetes/typed/networking/v1alpha1 k8s.io/client-go/applyconfigurations/networking/v1beta1 from k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/applyconfigurations/node/v1 from k8s.io/client-go/kubernetes/typed/node/v1 k8s.io/client-go/applyconfigurations/node/v1alpha1 from k8s.io/client-go/kubernetes/typed/node/v1alpha1 @@ -395,8 +392,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/applyconfigurations/rbac/v1 from k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/applyconfigurations/rbac/v1alpha1 from k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/applyconfigurations/rbac/v1beta1 from k8s.io/client-go/kubernetes/typed/rbac/v1beta1 + k8s.io/client-go/applyconfigurations/resource/v1 from k8s.io/client-go/kubernetes/typed/resource/v1 k8s.io/client-go/applyconfigurations/resource/v1alpha3 from k8s.io/client-go/kubernetes/typed/resource/v1alpha3 k8s.io/client-go/applyconfigurations/resource/v1beta1 from k8s.io/client-go/kubernetes/typed/resource/v1beta1 + k8s.io/client-go/applyconfigurations/resource/v1beta2 from k8s.io/client-go/kubernetes/typed/resource/v1beta2 k8s.io/client-go/applyconfigurations/scheduling/v1 from k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/applyconfigurations/scheduling/v1alpha1 from k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/applyconfigurations/scheduling/v1beta1 from k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 @@ -453,7 +452,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/informers/internalinterfaces from k8s.io/client-go/informers+ k8s.io/client-go/informers/networking from k8s.io/client-go/informers k8s.io/client-go/informers/networking/v1 from k8s.io/client-go/informers/networking - k8s.io/client-go/informers/networking/v1alpha1 from k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1beta1 from k8s.io/client-go/informers/networking k8s.io/client-go/informers/node from k8s.io/client-go/informers k8s.io/client-go/informers/node/v1 from k8s.io/client-go/informers/node @@ -467,8 +465,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/informers/rbac/v1alpha1 from k8s.io/client-go/informers/rbac k8s.io/client-go/informers/rbac/v1beta1 from k8s.io/client-go/informers/rbac k8s.io/client-go/informers/resource from k8s.io/client-go/informers + k8s.io/client-go/informers/resource/v1 from k8s.io/client-go/informers/resource k8s.io/client-go/informers/resource/v1alpha3 from k8s.io/client-go/informers/resource k8s.io/client-go/informers/resource/v1beta1 from k8s.io/client-go/informers/resource + k8s.io/client-go/informers/resource/v1beta2 from k8s.io/client-go/informers/resource k8s.io/client-go/informers/scheduling from k8s.io/client-go/informers k8s.io/client-go/informers/scheduling/v1 from k8s.io/client-go/informers/scheduling k8s.io/client-go/informers/scheduling/v1alpha1 from k8s.io/client-go/informers/scheduling @@ -503,8 +503,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/certificates/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/certificates/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/coordination/v1 from k8s.io/client-go/kubernetes+ - k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 from k8s.io/client-go/kubernetes+ - k8s.io/client-go/kubernetes/typed/coordination/v1beta1 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/coordination/v1alpha2 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/coordination/v1beta1 from k8s.io/client-go/kubernetes+ k8s.io/client-go/kubernetes/typed/core/v1 from k8s.io/client-go/kubernetes+ k8s.io/client-go/kubernetes/typed/discovery/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/discovery/v1beta1 from k8s.io/client-go/kubernetes @@ -516,7 +516,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta3 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/networking/v1 from k8s.io/client-go/kubernetes - k8s.io/client-go/kubernetes/typed/networking/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/networking/v1beta1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/node/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/node/v1alpha1 from k8s.io/client-go/kubernetes @@ -526,8 +525,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/kubernetes/typed/rbac/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/rbac/v1beta1 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/resource/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/resource/v1alpha3 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/resource/v1beta1 from k8s.io/client-go/kubernetes + k8s.io/client-go/kubernetes/typed/resource/v1beta2 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 from k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 from k8s.io/client-go/kubernetes @@ -566,7 +567,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/listers/flowcontrol/v1beta2 from k8s.io/client-go/informers/flowcontrol/v1beta2 k8s.io/client-go/listers/flowcontrol/v1beta3 from k8s.io/client-go/informers/flowcontrol/v1beta3 k8s.io/client-go/listers/networking/v1 from k8s.io/client-go/informers/networking/v1 - k8s.io/client-go/listers/networking/v1alpha1 from k8s.io/client-go/informers/networking/v1alpha1 k8s.io/client-go/listers/networking/v1beta1 from k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/listers/node/v1 from k8s.io/client-go/informers/node/v1 k8s.io/client-go/listers/node/v1alpha1 from k8s.io/client-go/informers/node/v1alpha1 @@ -576,8 +576,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/listers/rbac/v1 from k8s.io/client-go/informers/rbac/v1 k8s.io/client-go/listers/rbac/v1alpha1 from k8s.io/client-go/informers/rbac/v1alpha1 k8s.io/client-go/listers/rbac/v1beta1 from k8s.io/client-go/informers/rbac/v1beta1 + k8s.io/client-go/listers/resource/v1 from k8s.io/client-go/informers/resource/v1 k8s.io/client-go/listers/resource/v1alpha3 from k8s.io/client-go/informers/resource/v1alpha3 k8s.io/client-go/listers/resource/v1beta1 from k8s.io/client-go/informers/resource/v1beta1 + k8s.io/client-go/listers/resource/v1beta2 from k8s.io/client-go/informers/resource/v1beta2 k8s.io/client-go/listers/scheduling/v1 from k8s.io/client-go/informers/scheduling/v1 k8s.io/client-go/listers/scheduling/v1alpha1 from k8s.io/client-go/informers/scheduling/v1alpha1 k8s.io/client-go/listers/scheduling/v1beta1 from k8s.io/client-go/informers/scheduling/v1beta1 @@ -616,19 +618,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/util/apply from k8s.io/client-go/dynamic+ k8s.io/client-go/util/cert from k8s.io/client-go/rest+ k8s.io/client-go/util/connrotation from k8s.io/client-go/plugin/pkg/client/auth/exec+ - k8s.io/client-go/util/consistencydetector from k8s.io/client-go/dynamic+ + k8s.io/client-go/util/consistencydetector from k8s.io/client-go/tools/cache k8s.io/client-go/util/flowcontrol from k8s.io/client-go/kubernetes+ k8s.io/client-go/util/homedir from k8s.io/client-go/tools/clientcmd k8s.io/client-go/util/keyutil from k8s.io/client-go/util/cert - k8s.io/client-go/util/watchlist from k8s.io/client-go/dynamic+ k8s.io/client-go/util/workqueue from k8s.io/client-go/transport+ - k8s.io/component-base/cli/flag from k8s.io/component-base/featuregate k8s.io/component-base/featuregate from k8s.io/apiserver/pkg/features+ k8s.io/component-base/metrics from k8s.io/component-base/metrics/legacyregistry+ k8s.io/component-base/metrics/legacyregistry from k8s.io/component-base/metrics/prometheus/feature k8s.io/component-base/metrics/prometheus/feature from k8s.io/component-base/featuregate k8s.io/component-base/metrics/prometheusextension from k8s.io/component-base/metrics k8s.io/component-base/version from k8s.io/component-base/featuregate+ + k8s.io/component-base/zpages/features from k8s.io/apiserver/pkg/features k8s.io/klog/v2 from k8s.io/apimachinery/pkg/api/meta+ k8s.io/klog/v2/internal/buffer from k8s.io/klog/v2 k8s.io/klog/v2/internal/clock from k8s.io/klog/v2 @@ -647,12 +648,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/kube-openapi/pkg/validation/spec from k8s.io/apimachinery/pkg/util/managedfields+ k8s.io/utils/buffer from k8s.io/client-go/tools/cache k8s.io/utils/clock from k8s.io/apimachinery/pkg/util/cache+ - k8s.io/utils/clock/testing from k8s.io/client-go/util/flowcontrol k8s.io/utils/internal/third_party/forked/golang/golang-lru from k8s.io/utils/lru k8s.io/utils/internal/third_party/forked/golang/net from k8s.io/utils/net k8s.io/utils/lru from k8s.io/client-go/tools/record k8s.io/utils/net from k8s.io/apimachinery/pkg/util/net+ - k8s.io/utils/pointer from k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1+ k8s.io/utils/ptr from k8s.io/client-go/tools/cache+ k8s.io/utils/trace from k8s.io/client-go/tools/cache sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator @@ -696,13 +695,14 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics from sigs.k8s.io/controller-runtime/pkg/webhook+ sigs.k8s.io/json from k8s.io/apimachinery/pkg/runtime/serializer/json+ sigs.k8s.io/json/internal/golang/encoding/json from sigs.k8s.io/json - 💣 sigs.k8s.io/structured-merge-diff/v4/fieldpath from k8s.io/apimachinery/pkg/util/managedfields+ - sigs.k8s.io/structured-merge-diff/v4/merge from k8s.io/apimachinery/pkg/util/managedfields/internal - sigs.k8s.io/structured-merge-diff/v4/schema from k8s.io/apimachinery/pkg/util/managedfields+ - sigs.k8s.io/structured-merge-diff/v4/typed from k8s.io/apimachinery/pkg/util/managedfields+ - sigs.k8s.io/structured-merge-diff/v4/value from k8s.io/apimachinery/pkg/runtime+ + 💣 sigs.k8s.io/randfill from k8s.io/apimachinery/pkg/apis/meta/v1+ + sigs.k8s.io/randfill/bytesource from sigs.k8s.io/randfill + 💣 sigs.k8s.io/structured-merge-diff/v6/fieldpath from k8s.io/apimachinery/pkg/util/managedfields+ + sigs.k8s.io/structured-merge-diff/v6/merge from k8s.io/apimachinery/pkg/util/managedfields/internal + sigs.k8s.io/structured-merge-diff/v6/schema from k8s.io/apimachinery/pkg/util/managedfields+ + sigs.k8s.io/structured-merge-diff/v6/typed from k8s.io/apimachinery/pkg/util/managedfields+ + sigs.k8s.io/structured-merge-diff/v6/value from k8s.io/apimachinery/pkg/runtime+ sigs.k8s.io/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json+ - sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml+ tailscale.com from tailscale.com/version tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ @@ -1152,7 +1152,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ math from compress/flate+ math/big from crypto/dsa+ math/bits from compress/flate+ - math/rand from github.com/google/go-cmp/cmp+ + math/rand from github.com/fxamacker/cbor/v2+ math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from github.com/go-openapi/swag+ @@ -1191,7 +1191,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ sync/atomic from context+ syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ - text/template from html/template+ + text/template from html/template text/template/parse from html/template+ time from compress/gzip+ unicode from bytes+ diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index 516e75f489129..d25915e987760 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -431,7 +431,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -446,7 +445,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -603,7 +601,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -618,7 +615,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -703,8 +699,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: @@ -776,7 +772,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -791,7 +786,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -948,7 +942,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -963,7 +956,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -1482,7 +1474,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -1823,7 +1815,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2238,7 +2230,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2249,7 +2240,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 48db3ef4bd84d..3d80c55e10a73 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -380,7 +380,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -395,7 +394,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -552,7 +550,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -567,7 +564,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -652,8 +648,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. type: array items: @@ -725,7 +721,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -740,7 +735,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -897,7 +891,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -912,7 +905,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). type: array items: type: string @@ -1057,7 +1049,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 2757f09e5f36b..c53f5049261e8 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -992,7 +992,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1007,7 +1006,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1168,7 +1166,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1183,7 +1180,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1272,8 +1268,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -1337,7 +1333,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1352,7 +1347,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1513,7 +1507,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1528,7 +1521,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -2051,7 +2043,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2392,7 +2384,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2803,7 +2795,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2814,7 +2805,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -3648,7 +3638,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3663,7 +3652,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3824,7 +3812,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3839,7 +3826,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -3928,8 +3914,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) @@ -3993,7 +3979,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4008,7 +3993,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4169,7 +4153,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4184,7 +4167,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4333,7 +4315,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. diff --git a/cmd/k8s-operator/e2e/certs/pebble.minica.crt b/cmd/k8s-operator/e2e/certs/pebble.minica.crt new file mode 100644 index 0000000000000..35388ee56db91 --- /dev/null +++ b/cmd/k8s-operator/e2e/certs/pebble.minica.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCTCCAfGgAwIBAgIIJOLbes8sTr4wDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE +AxMVbWluaWNhIHJvb3QgY2EgMjRlMmRiMCAXDTE3MTIwNjE5NDIxMFoYDzIxMTcx +MjA2MTk0MjEwWjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAyNGUyZGIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5WgZNoVJandj43kkLyU50vzCZ +alozvdRo3OFiKoDtmqKPNWRNO2hC9AUNxTDJco51Yc42u/WV3fPbbhSznTiOOVtn +Ajm6iq4I5nZYltGGZetGDOQWr78y2gWY+SG078MuOO2hyDIiKtVc3xiXYA+8Hluu +9F8KbqSS1h55yxZ9b87eKR+B0zu2ahzBCIHKmKWgc6N13l7aDxxY3D6uq8gtJRU0 +toumyLbdzGcupVvjbjDP11nl07RESDWBLG1/g3ktJvqIa4BWgU2HMh4rND6y8OD3 +Hy3H8MY6CElL+MOCbFJjWqhtOxeFyZZV9q3kYnk9CAuQJKMEGuN4GU6tzhW1AgMB +AAGjRTBDMA4GA1UdDwEB/wQEAwIChDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYB +BQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBADANBgkqhkiG9w0BAQsFAAOCAQEAF85v +d40HK1ouDAtWeO1PbnWfGEmC5Xa478s9ddOd9Clvp2McYzNlAFfM7kdcj6xeiNhF +WPIfaGAi/QdURSL/6C1KsVDqlFBlTs9zYfh2g0UXGvJtj1maeih7zxFLvet+fqll +xseM4P9EVJaQxwuK/F78YBt0tCNfivC6JNZMgxKF59h0FBpH70ytUSHXdz7FKwix +Mfn3qEb9BXSk0Q3prNV5sOV3vgjEtB4THfDxSz9z3+DepVnW3vbbqwEbkXdk3j82 +2muVldgOUgTwK8eT+XdofVdntzU/kzygSAtAQwLJfn51fS1GvEcYGBc1bDryIqmF +p9BI7gVKtWSZYegicA== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/cmd/k8s-operator/e2e/doc.go b/cmd/k8s-operator/e2e/doc.go new file mode 100644 index 0000000000000..40fa1f36a1d82 --- /dev/null +++ b/cmd/k8s-operator/e2e/doc.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package e2e runs end-to-end tests for the Tailscale Kubernetes operator. +// +// To run without arguments, it requires: +// +// * Kubernetes cluster with local kubeconfig for it (direct connection, no API server proxy) +// * Tailscale operator installed with --set apiServerProxyConfig.mode="true" +// * ACLs from acl.hujson +// * OAuth client secret in TS_API_CLIENT_SECRET env, with at least auth_keys write scope and tag:k8s tag +// * Default ProxyClass and operator env vars as appropriate to set the desired default proxy images. +// +// It also supports running against devcontrol, using the --devcontrol flag, +// which it expects to reach at http://localhost:31544. Use --cluster to create +// a dedicated kind cluster for the tests, and --build to build and test the +// operator and proxy images for the current checkout. +// +// To run with minimal dependencies, use: +// +// go test -count=1 -v ./cmd/k8s-operator/e2e/ --build --cluster --devcontrol --skip-cleanup +// +// Running like this, it requires: +// +// * go +// * container runtime with the docker daemon API available +// * devcontrol: ./tool/go run ./cmd/devcontrol --generate-test-devices=k8s-operator-e2e --scenario-output-dir=/tmp/k8s-operator-e2e --test-dns=http://localhost:8055 +package e2e diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index 23f0711ec9906..c5b238e852b89 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -14,8 +14,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" kube "tailscale.com/k8s-operator" "tailscale.com/tstest" "tailscale.com/types/ptr" @@ -24,17 +22,12 @@ import ( // See [TestMain] for test requirements. func TestIngress(t *testing.T) { - if apiClient == nil { - t.Skip("TestIngress requires TS_API_CLIENT_SECRET set") + if tnClient == nil { + t.Skip("TestIngress requires a working tailnet client") } - cfg := config.GetConfigOrDie() - cl, err := client.New(cfg, client.Options{}) - if err != nil { - t.Fatal(err) - } // Apply nginx - createAndCleanup(t, cl, + createAndCleanup(t, kubeClient, &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "nginx", @@ -73,8 +66,7 @@ func TestIngress(t *testing.T) { Name: "test-ingress", Namespace: "default", Annotations: map[string]string{ - "tailscale.com/expose": "true", - "tailscale.com/proxy-class": "prod", + "tailscale.com/expose": "true", }, }, Spec: corev1.ServiceSpec{ @@ -90,12 +82,12 @@ func TestIngress(t *testing.T) { }, }, } - createAndCleanup(t, cl, svc) + createAndCleanup(t, kubeClient, svc) // TODO: instead of timing out only when test times out, cancel context after 60s or so. if err := wait.PollUntilContextCancel(t.Context(), time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { maybeReadySvc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} - if err := get(ctx, cl, maybeReadySvc); err != nil { + if err := get(ctx, kubeClient, maybeReadySvc); err != nil { return false, err } isReady := kube.SvcIsReady(maybeReadySvc) @@ -118,7 +110,7 @@ func TestIngress(t *testing.T) { } ctx, cancel := context.WithTimeout(t.Context(), time.Second) defer cancel() - resp, err = tailnetClient.HTTPClient().Do(req.WithContext(ctx)) + resp, err = tnClient.HTTPClient().Do(req.WithContext(ctx)) return err }); err != nil { t.Fatalf("error trying to reach Service: %v", err) diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index fb5e5c8597cef..68f10dbb064cf 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -5,34 +5,22 @@ package e2e import ( "context" - "errors" + "flag" "log" "os" - "strings" "testing" - "time" - "golang.org/x/oauth2/clientcredentials" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "tailscale.com/internal/client/tailscale" - "tailscale.com/ipn/store/mem" - "tailscale.com/tsnet" -) - -// This test suite is currently not run in CI. -// It requires some setup not handled by this code: -// - Kubernetes cluster with local kubeconfig for it (direct connection, no API server proxy) -// - Tailscale operator installed with --set apiServerProxyConfig.mode="true" -// - ACLs from acl.hujson -// - OAuth client secret in TS_API_CLIENT_SECRET env, with at least auth_keys write scope and tag:k8s tag -var ( - apiClient *tailscale.Client // For API calls to control. - tailnetClient *tsnet.Server // For testing real tailnet traffic. ) func TestMain(m *testing.M) { + flag.Parse() + if !*fDevcontrol && os.Getenv("TS_API_CLIENT_SECRET") == "" { + log.Printf("Skipping setup: devcontrol is false and TS_API_CLIENT_SECRET is not set") + os.Exit(m.Run()) + } code, err := runTests(m) if err != nil { log.Printf("Error: %v", err) @@ -41,56 +29,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -func runTests(m *testing.M) (int, error) { - secret := os.Getenv("TS_API_CLIENT_SECRET") - if secret != "" { - secretParts := strings.Split(secret, "-") - if len(secretParts) != 4 { - return 0, errors.New("TS_API_CLIENT_SECRET is not valid") - } - ctx := context.Background() - credentials := clientcredentials.Config{ - ClientID: secretParts[2], - ClientSecret: secret, - TokenURL: "https://login.tailscale.com/api/v2/oauth/token", - Scopes: []string{"auth_keys"}, - } - apiClient = tailscale.NewClient("-", nil) - apiClient.HTTPClient = credentials.Client(ctx) - - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Preauthorized: true, - Ephemeral: true, - Tags: []string{"tag:k8s"}, - }, - }, - } - - authKey, authKeyMeta, err := apiClient.CreateKeyWithExpiry(ctx, caps, 10*time.Minute) - if err != nil { - return 0, err - } - defer apiClient.DeleteKey(context.Background(), authKeyMeta.ID) - - tailnetClient = &tsnet.Server{ - Hostname: "test-proxy", - Ephemeral: true, - Store: &mem.Store{}, - AuthKey: authKey, - } - _, err = tailnetClient.Up(ctx) - if err != nil { - return 0, err - } - defer tailnetClient.Close() - } - - return m.Run(), nil -} - func objectMeta(namespace, name string) metav1.ObjectMeta { return metav1.ObjectMeta{ Namespace: namespace, diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go new file mode 100644 index 0000000000000..a3175a4edc771 --- /dev/null +++ b/cmd/k8s-operator/e2e/pebble.go @@ -0,0 +1,174 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/types/ptr" +) + +func applyPebbleResources(ctx context.Context, cl client.Client) error { + owner := client.FieldOwner("k8s-test") + + if err := cl.Patch(ctx, pebbleDeployment(pebbleTag), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply pebble Deployment: %w", err) + } + if err := cl.Patch(ctx, pebbleService(), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply pebble Service: %w", err) + } + if err := cl.Patch(ctx, tailscaleNamespace(), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply tailscale Namespace: %w", err) + } + if err := cl.Patch(ctx, pebbleExternalNameService(), client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply pebble ExternalName Service: %w", err) + } + + return nil +} + +func pebbleDeployment(tag string) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pebble", + Namespace: ns, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "pebble", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "pebble", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pebble", + Image: fmt.Sprintf("ghcr.io/letsencrypt/pebble:%s", tag), + ImagePullPolicy: corev1.PullIfNotPresent, + Args: []string{ + "-dnsserver=localhost:8053", + "-strict", + }, + Ports: []corev1.ContainerPort{ + { + Name: "acme", + ContainerPort: 14000, + }, + { + Name: "pebble-api", + ContainerPort: 15000, + }, + }, + Env: []corev1.EnvVar{ + { + Name: "PEBBLE_VA_NOSLEEP", + Value: "1", + }, + }, + }, + { + Name: "challtestsrv", + Image: fmt.Sprintf("ghcr.io/letsencrypt/pebble-challtestsrv:%s", tag), + ImagePullPolicy: corev1.PullIfNotPresent, + Args: []string{"-defaultIPv6="}, + Ports: []corev1.ContainerPort{ + { + Name: "mgmt-api", + ContainerPort: 8055, + }, + }, + }, + }, + }, + }, + }, + } +} + +func pebbleService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pebble", + Namespace: ns, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app": "pebble", + }, + Ports: []corev1.ServicePort{ + { + Name: "acme", + Port: 14000, + TargetPort: intstr.FromInt(14000), + }, + { + Name: "pebble-api", + Port: 15000, + TargetPort: intstr.FromInt(15000), + }, + { + Name: "mgmt-api", + Port: 8055, + TargetPort: intstr.FromInt(8055), + }, + }, + }, + } +} + +func tailscaleNamespace() *corev1.Namespace { + return &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "tailscale", + }, + } +} + +// pebbleExternalNameService ensures the operator in the tailscale namespace +// can reach pebble on a DNS name (pebble) that matches its TLS cert. +func pebbleExternalNameService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pebble", + Namespace: "tailscale", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + Selector: map[string]string{ + "app": "pebble", + }, + ExternalName: "pebble.default.svc.cluster.local", + }, + } +} diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index b3010f97e28c8..b61d6d5763810 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -4,8 +4,10 @@ package e2e import ( + "crypto/tls" "encoding/json" "fmt" + "net/http" "testing" "time" @@ -14,25 +16,18 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" "tailscale.com/ipn" "tailscale.com/tstest" ) // See [TestMain] for test requirements. func TestProxy(t *testing.T) { - if apiClient == nil { - t.Skip("TestIngress requires TS_API_CLIENT_SECRET set") - } - - cfg := config.GetConfigOrDie() - cl, err := client.New(cfg, client.Options{}) - if err != nil { - t.Fatal(err) + if tnClient == nil { + t.Skip("TestProxy requires a working tailnet client") } // Create role and role binding to allow a group we'll impersonate to do stuff. - createAndCleanup(t, cl, &rbacv1.Role{ + createAndCleanup(t, kubeClient, &rbacv1.Role{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Rules: []rbacv1.PolicyRule{{ APIGroups: []string{""}, @@ -40,7 +35,7 @@ func TestProxy(t *testing.T) { Resources: []string{"secrets"}, }}, }) - createAndCleanup(t, cl, &rbacv1.RoleBinding{ + createAndCleanup(t, kubeClient, &rbacv1.RoleBinding{ ObjectMeta: objectMeta("tailscale", "read-secrets"), Subjects: []rbacv1.Subject{{ Kind: "Group", @@ -56,16 +51,25 @@ func TestProxy(t *testing.T) { operatorSecret := corev1.Secret{ ObjectMeta: objectMeta("tailscale", "operator"), } - if err := get(t.Context(), cl, &operatorSecret); err != nil { + if err := get(t.Context(), kubeClient, &operatorSecret); err != nil { t.Fatal(err) } // Join tailnet as a client of the API server proxy. proxyCfg := &rest.Config{ Host: fmt.Sprintf("https://%s:443", hostNameFromOperatorSecret(t, operatorSecret)), - Dial: tailnetClient.Dial, } - proxyCl, err := client.New(proxyCfg, client.Options{}) + proxyCl, err := client.New(proxyCfg, client.Options{ + HTTPClient: &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: testCAs, + }, + DialContext: tnClient.Dial, + }, + }, + }) if err != nil { t.Fatal(err) } @@ -77,7 +81,9 @@ func TestProxy(t *testing.T) { // Wait for up to a minute the first time we use the proxy, to give it time // to provision the TLS certs. if err := tstest.WaitFor(time.Minute, func() error { - return get(t.Context(), proxyCl, &allowedSecret) + err := get(t.Context(), proxyCl, &allowedSecret) + t.Logf("get Secret via proxy: %v", err) + return err }); err != nil { t.Fatal(err) } diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go new file mode 100644 index 0000000000000..287ef4969c497 --- /dev/null +++ b/cmd/k8s-operator/e2e/setup.go @@ -0,0 +1,680 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "crypto/rand" + "crypto/tls" + "crypto/x509" + _ "embed" + jsonv1 "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "os/signal" + "path/filepath" + "slices" + "strings" + "sync" + "syscall" + "testing" + "time" + + "github.com/go-logr/zapr" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/daemon" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "go.uber.org/zap" + "golang.org/x/oauth2/clientcredentials" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + "helm.sh/helm/v3/pkg/release" + "helm.sh/helm/v3/pkg/storage/driver" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + "sigs.k8s.io/controller-runtime/pkg/client" + klog "sigs.k8s.io/controller-runtime/pkg/log" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/cmd" + "tailscale.com/client/tailscale/v2" + "tailscale.com/ipn" + "tailscale.com/ipn/store/mem" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/tsnet" +) + +const ( + pebbleTag = "2.8.0" + ns = "default" + tmp = "/tmp/k8s-operator-e2e" + kindClusterName = "k8s-operator-e2e" +) + +var ( + tsClient = &tailscale.Client{Tailnet: "-"} // For API calls to control. + tnClient *tsnet.Server // For testing real tailnet traffic. + kubeClient client.WithWatch // For k8s API calls. + + //go:embed certs/pebble.minica.crt + pebbleMiniCACert []byte + + // Either nil (system) or pebble CAs if pebble is deployed for devcontrol. + // pebble has a static "mini" CA that its ACME directory URL serves a cert + // from, and also dynamically generates a different CA for issuing certs. + testCAs *x509.CertPool + + //go:embed acl.hujson + requiredACLs []byte + + fDevcontrol = flag.Bool("devcontrol", false, "if true, connect to devcontrol at http://localhost:31544. Run devcontrol with "+` + ./tool/go run ./cmd/devcontrol \ + --generate-test-devices=k8s-operator-e2e \ + --dir=/tmp/devcontrol \ + --scenario-output-dir=/tmp/k8s-operator-e2e \ + --test-dns=http://localhost:8055`) + fSkipCleanup = flag.Bool("skip-cleanup", false, "if true, do not delete the kind cluster (if created) or tmp dir on exit") + fCluster = flag.Bool("cluster", false, "if true, create or use a pre-existing kind cluster named k8s-operator-e2e; otherwise assume a usable cluster already exists in kubeconfig") + fBuild = flag.Bool("build", false, "if true, build and deploy the operator and container images from the current checkout; otherwise assume the operator is already set up") +) + +func runTests(m *testing.M) (int, error) { + logger := kzap.NewRaw().Sugar() + klog.SetLogger(zapr.NewLogger(logger.Desugar())) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT) + defer cancel() + + ossDir, err := gitRootDir() + if err != nil { + return 0, err + } + if err := os.MkdirAll(tmp, 0755); err != nil { + return 0, fmt.Errorf("failed to create temp dir: %w", err) + } + + logger.Infof("temp dir: %q", tmp) + logger.Infof("oss dir: %q", ossDir) + + var ( + kubeconfig string + kindProvider *cluster.Provider + ) + if *fCluster { + kubeconfig = filepath.Join(tmp, "kubeconfig") + kindProvider = cluster.NewProvider( + cluster.ProviderWithLogger(cmd.NewLogger()), + ) + clusters, err := kindProvider.List() + if err != nil { + return 0, fmt.Errorf("failed to list kind clusters: %w", err) + } + if !slices.Contains(clusters, kindClusterName) { + if err := kindProvider.Create(kindClusterName, + cluster.CreateWithWaitForReady(5*time.Minute), + cluster.CreateWithKubeconfigPath(kubeconfig), + cluster.CreateWithNodeImage("kindest/node:v1.30.0"), + ); err != nil { + return 0, fmt.Errorf("failed to create kind cluster: %w", err) + } + } + + if !*fSkipCleanup { + defer kindProvider.Delete(kindClusterName, kubeconfig) + defer os.Remove(kubeconfig) + } + } + + // Cluster client setup. + restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return 0, fmt.Errorf("error loading kubeconfig: %w", err) + } + kubeClient, err = client.NewWithWatch(restCfg, client.Options{Scheme: tsapi.GlobalScheme}) + if err != nil { + return 0, fmt.Errorf("error creating Kubernetes client: %w", err) + } + + var ( + clusterLoginServer string // Login server from cluster Pod point of view. + clientID, clientSecret string // OAuth client for the operator to use. + caPaths []string // Extra CA cert file paths to add to images. + + certsDir string = filepath.Join(tmp, "certs") // Directory containing extra CA certs to add to images. + ) + if *fDevcontrol { + // Deploy pebble and get its certs. + if err := applyPebbleResources(ctx, kubeClient); err != nil { + return 0, fmt.Errorf("failed to apply pebble resources: %w", err) + } + pebblePod, err := waitForPodReady(ctx, logger, kubeClient, ns, client.MatchingLabels{"app": "pebble"}) + if err != nil { + return 0, fmt.Errorf("pebble pod not ready: %w", err) + } + if err := forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 15000); err != nil { + return 0, fmt.Errorf("failed to set up port forwarding to pebble: %w", err) + } + testCAs = x509.NewCertPool() + if ok := testCAs.AppendCertsFromPEM(pebbleMiniCACert); !ok { + return 0, fmt.Errorf("failed to parse pebble minica cert") + } + var pebbleCAChain []byte + for _, path := range []string{"/intermediates/0", "/roots/0"} { + pem, err := pebbleGet(ctx, 15000, path) + if err != nil { + return 0, err + } + pebbleCAChain = append(pebbleCAChain, pem...) + } + if ok := testCAs.AppendCertsFromPEM(pebbleCAChain); !ok { + return 0, fmt.Errorf("failed to parse pebble ca chain cert") + } + if err := os.MkdirAll(certsDir, 0755); err != nil { + return 0, fmt.Errorf("failed to create certs dir: %w", err) + } + pebbleCAChainPath := filepath.Join(certsDir, "pebble-ca-chain.crt") + if err := os.WriteFile(pebbleCAChainPath, pebbleCAChain, 0644); err != nil { + return 0, fmt.Errorf("failed to write pebble CA chain: %w", err) + } + pebbleMiniCACertPath := filepath.Join(certsDir, "pebble.minica.crt") + if err := os.WriteFile(pebbleMiniCACertPath, pebbleMiniCACert, 0644); err != nil { + return 0, fmt.Errorf("failed to write pebble minica: %w", err) + } + caPaths = []string{pebbleCAChainPath, pebbleMiniCACertPath} + if !*fSkipCleanup { + defer os.RemoveAll(certsDir) + } + + // Set up network connectivity between cluster and devcontrol. + // + // For devcontrol -> pebble (DNS mgmt for ACME challenges): + // * Port forward from localhost port 8055 to in-cluster pebble port 8055. + // + // For Pods -> devcontrol (tailscale clients joining the tailnet): + // * Create ssh-server Deployment in cluster. + // * Create reverse ssh tunnel that goes from ssh-server port 31544 to localhost:31544. + if err := forwardLocalPortToPod(ctx, logger, restCfg, ns, pebblePod, 8055); err != nil { + return 0, fmt.Errorf("failed to set up port forwarding to pebble: %w", err) + } + privateKey, publicKey, err := readOrGenerateSSHKey(tmp) + if err != nil { + return 0, fmt.Errorf("failed to read or generate SSH key: %w", err) + } + if !*fSkipCleanup { + defer os.Remove(privateKeyPath) + } + + sshServiceIP, err := connectClusterToDevcontrol(ctx, logger, kubeClient, restCfg, privateKey, publicKey) + if err != nil { + return 0, fmt.Errorf("failed to set up cluster->devcontrol connection: %w", err) + } + if !*fSkipCleanup { + defer func() { + if err := cleanupSSHResources(context.Background(), kubeClient); err != nil { + logger.Infof("failed to clean up ssh-server resources: %v", err) + } + }() + } + + // Address cluster workloads can reach devcontrol at. Must be a private + // IP to make sure tailscale client code recognises it shouldn't try an + // https fallback. See [controlclient.NewNoiseClient] for details. + clusterLoginServer = fmt.Sprintf("http://%s:31544", sshServiceIP) + + b, err := os.ReadFile(filepath.Join(tmp, "api-key.json")) + if err != nil { + return 0, fmt.Errorf("failed to read api-key.json: %w", err) + } + var apiKeyData struct { + APIKey string `json:"apiKey"` + } + if err := jsonv1.Unmarshal(b, &apiKeyData); err != nil { + return 0, fmt.Errorf("failed to parse api-key.json: %w", err) + } + if apiKeyData.APIKey == "" { + return 0, fmt.Errorf("api-key.json did not contain an API key") + } + + // Finish setting up tsClient. + baseURL, err := url.Parse("http://localhost:31544") + if err != nil { + return 0, fmt.Errorf("parse url: %w", err) + } + tsClient.BaseURL = baseURL + tsClient.APIKey = apiKeyData.APIKey + tsClient.HTTP = &http.Client{} + + // Set ACLs and create OAuth client. + if err := tsClient.PolicyFile().Set(ctx, string(requiredACLs), ""); err != nil { + return 0, fmt.Errorf("failed to set ACLs: %w", err) + } + logger.Infof("ACLs configured") + + key, err := tsClient.Keys().CreateOAuthClient(ctx, tailscale.CreateOAuthClientRequest{ + Scopes: []string{"auth_keys", "devices:core", "services"}, + Tags: []string{"tag:k8s-operator"}, + Description: "k8s-operator client for e2e tests", + }) + if err != nil { + return 0, fmt.Errorf("failed to create OAuth client: %w", err) + } + clientID = key.ID + clientSecret = key.Key + } else { + clientSecret = os.Getenv("TS_API_CLIENT_SECRET") + if clientSecret == "" { + return 0, fmt.Errorf("must use --devcontrol or set TS_API_CLIENT_SECRET to an OAuth client suitable for the operator") + } + // Format is "tskey-client--". + parts := strings.Split(clientSecret, "-") + if len(parts) != 4 { + return 0, fmt.Errorf("TS_API_CLIENT_SECRET is not valid") + } + clientID = parts[2] + credentials := clientcredentials.Config{ + ClientID: clientID, + ClientSecret: clientSecret, + TokenURL: fmt.Sprintf("%s/api/v2/oauth/token", ipn.DefaultControlURL), + Scopes: []string{"auth_keys"}, + } + baseURL, _ := url.Parse(ipn.DefaultControlURL) + tsClient = &tailscale.Client{ + Tailnet: "-", + HTTP: credentials.Client(ctx), + BaseURL: baseURL, + } + } + + var ossTag string + if *fBuild { + // TODO(tomhjp): proper support for --build=false and layering pebble certs on top of existing images. + // TODO(tomhjp): support non-local platform. + // TODO(tomhjp): build tsrecorder as well. + + // Build tailscale/k8s-operator, tailscale/tailscale, tailscale/k8s-proxy, with pebble CAs added. + ossTag, err = tagForRepo(ossDir) + if err != nil { + return 0, err + } + logger.Infof("using OSS image tag: %q", ossTag) + ossImageToTarget := map[string]string{ + "local/k8s-operator": "publishdevoperator", + "local/tailscale": "publishdevimage", + "local/k8s-proxy": "publishdevproxy", + } + for img, target := range ossImageToTarget { + if err := buildImage(ctx, ossDir, img, target, ossTag, caPaths); err != nil { + return 0, err + } + nodes, err := kindProvider.ListInternalNodes(kindClusterName) + if err != nil { + return 0, fmt.Errorf("failed to list kind nodes: %w", err) + } + // TODO(tomhjp): can be made more efficient and portable if we + // stream built image tarballs straight to the node rather than + // going via the daemon. + // TODO(tomhjp): support --build with non-kind clusters. + imgRef, err := name.ParseReference(fmt.Sprintf("%s:%s", img, ossTag)) + if err != nil { + return 0, fmt.Errorf("failed to parse image reference: %w", err) + } + img, err := daemon.Image(imgRef) + if err != nil { + return 0, fmt.Errorf("failed to get image from daemon: %w", err) + } + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if err := tarball.Write(imgRef, img, pw); err != nil { + logger.Infof("failed to write image to pipe: %v", err) + } + }() + for _, n := range nodes { + if err := nodeutils.LoadImageArchive(n, pr); err != nil { + return 0, fmt.Errorf("failed to load image into node %q: %w", n.String(), err) + } + } + } + } + + // Generate CRDs for the helm chart. + cmd := exec.CommandContext(ctx, "go", "run", "tailscale.com/cmd/k8s-operator/generate", "helmcrd") + cmd.Dir = ossDir + out, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("failed to generate CRD: %v: %s", err, out) + } + + // Load and install helm chart. + chart, err := loader.Load(filepath.Join(ossDir, "cmd", "k8s-operator", "deploy", "chart")) + if err != nil { + return 0, fmt.Errorf("failed to load helm chart: %w", err) + } + values := map[string]any{ + "loginServer": clusterLoginServer, + "oauth": map[string]any{ + "clientId": clientID, + "clientSecret": clientSecret, + }, + "apiServerProxyConfig": map[string]any{ + "mode": "true", + }, + "operatorConfig": map[string]any{ + "logging": "debug", + "extraEnv": []map[string]any{ + { + "name": "K8S_PROXY_IMAGE", + "value": "local/k8s-proxy:" + ossTag, + }, + { + "name": "TS_DEBUG_ACME_DIRECTORY_URL", + "value": "https://pebble:14000/dir", + }, + }, + "image": map[string]any{ + "repo": "local/k8s-operator", + "tag": ossTag, + "pullPolicy": "IfNotPresent", + }, + }, + "proxyConfig": map[string]any{ + "defaultProxyClass": "default", + "image": map[string]any{ + "repository": "local/tailscale", + "tag": ossTag, + }, + }, + } + + settings := cli.New() + settings.KubeConfig = kubeconfig + settings.SetNamespace("tailscale") + helmCfg := &action.Configuration{} + if err := helmCfg.Init(settings.RESTClientGetter(), "tailscale", "", logger.Infof); err != nil { + return 0, fmt.Errorf("failed to initialize helm action configuration: %w", err) + } + + const relName = "tailscale-operator" // TODO(tomhjp): maybe configurable if others use a different value. + f := upgraderOrInstaller(helmCfg, relName) + if _, err := f(ctx, relName, chart, values); err != nil { + return 0, fmt.Errorf("failed to install %q via helm: %w", relName, err) + } + + if err := applyDefaultProxyClass(ctx, kubeClient); err != nil { + return 0, fmt.Errorf("failed to apply default ProxyClass: %w", err) + } + + caps := tailscale.KeyCapabilities{} + caps.Devices.Create.Preauthorized = true + caps.Devices.Create.Ephemeral = true + caps.Devices.Create.Tags = []string{"tag:k8s"} + + authKey, err := tsClient.Keys().CreateAuthKey(ctx, tailscale.CreateKeyRequest{ + Capabilities: caps, + ExpirySeconds: 600, + Description: "e2e test authkey", + }) + if err != nil { + return 0, err + } + defer tsClient.Keys().Delete(context.Background(), authKey.ID) + + tnClient = &tsnet.Server{ + ControlURL: tsClient.BaseURL.String(), + Hostname: "test-proxy", + Ephemeral: true, + Store: &mem.Store{}, + AuthKey: authKey.Key, + } + _, err = tnClient.Up(ctx) + if err != nil { + return 0, err + } + defer tnClient.Close() + + return m.Run(), nil +} + +func upgraderOrInstaller(cfg *action.Configuration, releaseName string) helmInstallerFunc { + hist := action.NewHistory(cfg) + hist.Max = 1 + helmVersions, err := hist.Run(releaseName) + if err == driver.ErrReleaseNotFound || (len(helmVersions) > 0 && helmVersions[0].Info.Status == release.StatusUninstalled) { + return helmInstaller(cfg, releaseName) + } else { + return helmUpgrader(cfg) + } +} + +func helmUpgrader(cfg *action.Configuration) helmInstallerFunc { + upgrade := action.NewUpgrade(cfg) + upgrade.Namespace = "tailscale" + upgrade.Install = true + upgrade.Wait = true + upgrade.Timeout = 5 * time.Minute + return upgrade.RunWithContext +} + +func helmInstaller(cfg *action.Configuration, releaseName string) helmInstallerFunc { + install := action.NewInstall(cfg) + install.Namespace = "tailscale" + install.CreateNamespace = true + install.ReleaseName = releaseName + install.Wait = true + install.Timeout = 5 * time.Minute + install.Replace = true + return func(ctx context.Context, _ string, chart *chart.Chart, values map[string]any) (*release.Release, error) { + return install.RunWithContext(ctx, chart, values) + } +} + +type helmInstallerFunc func(context.Context, string, *chart.Chart, map[string]any) (*release.Release, error) + +// gitRootDir returns the top-level directory of the current git repo. Expects +// to be run from inside a git repo. +func gitRootDir() (string, error) { + top, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + if err != nil { + return "", fmt.Errorf("failed to find git top level (not in corp git?): %w", err) + } + return strings.TrimSpace(string(top)), nil +} + +func tagForRepo(dir string) (string, error) { + cmd := exec.Command("git", "rev-parse", "--short", "HEAD") + cmd.Dir = dir + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("failed to get latest git tag for repo %q: %w", dir, err) + } + tag := strings.TrimSpace(string(out)) + + // If dirty, append an extra random tag to ensure unique image tags. + cmd = exec.Command("git", "status", "--porcelain") + cmd.Dir = dir + out, err = cmd.Output() + if err != nil { + return "", fmt.Errorf("failed to check git status for repo %q: %w", dir, err) + } + if strings.TrimSpace(string(out)) != "" { + tag += "-" + strings.ToLower(rand.Text()) + } + + return tag, nil +} + +func applyDefaultProxyClass(ctx context.Context, cl client.Client) error { + pc := &tsapi.ProxyClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: tsapi.SchemeGroupVersion.String(), + Kind: tsapi.ProxyClassKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: tsapi.ProxyClassSpec{ + StatefulSet: &tsapi.StatefulSet{ + Pod: &tsapi.Pod{ + TailscaleInitContainer: &tsapi.Container{ + ImagePullPolicy: "IfNotPresent", + }, + TailscaleContainer: &tsapi.Container{ + ImagePullPolicy: "IfNotPresent", + }, + }, + }, + }, + } + + owner := client.FieldOwner("k8s-test") + if err := cl.Patch(ctx, pc, client.Apply, owner); err != nil { + return fmt.Errorf("failed to apply default ProxyClass: %w", err) + } + + return nil +} + +// forwardLocalPortToPod sets up port forwarding to the specified Pod and remote port. +// It runs until the provided ctx is done. +func forwardLocalPortToPod(ctx context.Context, logger *zap.SugaredLogger, cfg *rest.Config, ns, podName string, port int) error { + transport, upgrader, err := spdy.RoundTripperFor(cfg) + if err != nil { + return fmt.Errorf("failed to create round tripper: %w", err) + } + + u, err := url.Parse(fmt.Sprintf("%s%s/api/v1/namespaces/%s/pods/%s/portforward", cfg.Host, cfg.APIPath, ns, podName)) + if err != nil { + return fmt.Errorf("failed to parse URL: %w", err) + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", u) + + stopChan := make(chan struct{}, 1) + readyChan := make(chan struct{}, 1) + + ports := []string{fmt.Sprintf("%d:%d", port, port)} + + // TODO(tomhjp): work out how zap logger can be used instead of stdout/err. + pf, err := portforward.New(dialer, ports, stopChan, readyChan, os.Stdout, os.Stderr) + if err != nil { + return fmt.Errorf("failed to create port forwarder: %w", err) + } + + go func() { + if err := pf.ForwardPorts(); err != nil { + logger.Infof("Port forwarding error: %v\n", err) + } + }() + + var once sync.Once + go func() { + <-ctx.Done() + once.Do(func() { close(stopChan) }) + }() + + // Wait for port forwarding to be ready + select { + case <-readyChan: + logger.Infof("Port forwarding to Pod %s/%s ready", ns, podName) + case <-time.After(10 * time.Second): + once.Do(func() { close(stopChan) }) + return fmt.Errorf("timeout waiting for port forward to be ready") + } + + return nil +} + +// waitForPodReady waits for at least 1 Pod matching the label selector to be +// in Ready state. It returns the name of the first ready Pod it finds. +func waitForPodReady(ctx context.Context, logger *zap.SugaredLogger, cl client.WithWatch, ns string, labelSelector client.MatchingLabels) (string, error) { + pods := &corev1.PodList{} + w, err := cl.Watch(ctx, pods, client.InNamespace(ns), client.MatchingLabels(labelSelector)) + if err != nil { + return "", fmt.Errorf("failed to create pod watcher: %v", err) + } + defer w.Stop() + + for { + select { + case event, ok := <-w.ResultChan(): + if !ok { + return "", fmt.Errorf("watcher channel closed") + } + + switch event.Type { + case watch.Added, watch.Modified: + if pod, ok := event.Object.(*corev1.Pod); ok { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + logger.Infof("pod %s is ready", pod.Name) + return pod.Name, nil + } + } + } + case watch.Error: + return "", fmt.Errorf("watch error: %v", event.Object) + } + case <-ctx.Done(): + return "", fmt.Errorf("timeout waiting for pod to be ready") + } + } +} + +func pebbleGet(ctx context.Context, port uint16, path string) ([]byte, error) { + pebbleClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: testCAs, + }, + }, + Timeout: 10 * time.Second, + } + req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("https://localhost:%d%s", port, path), nil) + resp, err := pebbleClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to fetch pebble root CA: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d when fetching pebble root CA", resp.StatusCode) + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read pebble root CA response: %w", err) + } + + return b, nil +} + +func buildImage(ctx context.Context, dir, repo, target, tag string, extraCACerts []string) error { + var files []string + for _, f := range extraCACerts { + files = append(files, fmt.Sprintf("%s:/etc/ssl/certs/%s", f, filepath.Base(f))) + } + cmd := exec.CommandContext(ctx, "make", target, + "PLATFORM=local", + fmt.Sprintf("TAGS=%s", tag), + fmt.Sprintf("REPO=%s", repo), + fmt.Sprintf("FILES=%s", strings.Join(files, ",")), + ) + cmd.Dir = dir + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to build image %q: %w", target, err) + } + + return nil +} diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go new file mode 100644 index 0000000000000..407e4e085b7a9 --- /dev/null +++ b/cmd/k8s-operator/e2e/ssh.go @@ -0,0 +1,352 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "encoding/hex" + "encoding/pem" + "fmt" + "io" + "net" + "os" + "path/filepath" + "time" + + "go.uber.org/zap" + "golang.org/x/crypto/ssh" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + tailscaleroot "tailscale.com" + "tailscale.com/types/ptr" +) + +const ( + keysFilePath = "/root/.ssh/authorized_keys" + sshdConfig = ` +Port 8022 + +# Allow reverse tunnels +GatewayPorts yes +AllowTcpForwarding yes + +# Auth +PermitRootLogin yes +PasswordAuthentication no +PubkeyAuthentication yes +AuthorizedKeysFile ` + keysFilePath +) + +var privateKeyPath = filepath.Join(tmp, "id_ed25519") + +func connectClusterToDevcontrol(ctx context.Context, logger *zap.SugaredLogger, cl client.WithWatch, restConfig *rest.Config, privKey ed25519.PrivateKey, pubKey []byte) (clusterIP string, _ error) { + logger.Info("Setting up SSH reverse tunnel from cluster to devcontrol...") + var err error + if clusterIP, err = applySSHResources(ctx, cl, tailscaleroot.AlpineDockerTag, pubKey); err != nil { + return "", fmt.Errorf("failed to apply ssh-server resources: %w", err) + } + sshPodName, err := waitForPodReady(ctx, logger, cl, ns, client.MatchingLabels{"app": "ssh-server"}) + if err != nil { + return "", fmt.Errorf("ssh-server Pod not ready: %w", err) + } + if err := forwardLocalPortToPod(ctx, logger, restConfig, ns, sshPodName, 8022); err != nil { + return "", fmt.Errorf("failed to set up port forwarding to ssh-server: %w", err) + } + if err := reverseTunnel(ctx, logger, privKey, fmt.Sprintf("localhost:%d", 8022), 31544, "localhost:31544"); err != nil { + return "", fmt.Errorf("failed to set up reverse tunnel: %w", err) + } + + return clusterIP, nil +} + +func reverseTunnel(ctx context.Context, logger *zap.SugaredLogger, privateKey ed25519.PrivateKey, sshHost string, remotePort uint16, fwdTo string) error { + signer, err := ssh.NewSignerFromKey(privateKey) + if err != nil { + return fmt.Errorf("failed to create signer: %w", err) + } + config := &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.PublicKeys(signer), + }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + Timeout: 30 * time.Second, + } + + conn, err := ssh.Dial("tcp", sshHost, config) + if err != nil { + return fmt.Errorf("failed to connect to SSH server: %w", err) + } + logger.Infof("Connected to SSH server at %s\n", sshHost) + + go func() { + defer conn.Close() + + // Start listening on remote port. + remoteAddr := fmt.Sprintf("localhost:%d", remotePort) + remoteLn, err := conn.Listen("tcp", remoteAddr) + if err != nil { + logger.Infof("Failed to listen on remote port %d: %v", remotePort, err) + return + } + defer remoteLn.Close() + logger.Infof("Reverse tunnel ready on remote addr %s -> local addr %s", remoteAddr, fwdTo) + + for { + remoteConn, err := remoteLn.Accept() + if err != nil { + logger.Infof("Failed to accept remote connection: %v", err) + return + } + + go handleConnection(ctx, logger, remoteConn, fwdTo) + } + }() + + return nil +} + +func handleConnection(ctx context.Context, logger *zap.SugaredLogger, remoteConn net.Conn, fwdTo string) { + go func() { + <-ctx.Done() + remoteConn.Close() + }() + + var d net.Dialer + localConn, err := d.DialContext(ctx, "tcp", fwdTo) + if err != nil { + logger.Infof("Failed to connect to local service %s: %v", fwdTo, err) + return + } + go func() { + <-ctx.Done() + localConn.Close() + }() + + go func() { + if _, err := io.Copy(localConn, remoteConn); err != nil { + logger.Infof("Error copying remote->local: %v", err) + } + }() + + go func() { + if _, err := io.Copy(remoteConn, localConn); err != nil { + logger.Infof("Error copying local->remote: %v", err) + } + }() +} + +func readOrGenerateSSHKey(tmp string) (ed25519.PrivateKey, []byte, error) { + var privateKey ed25519.PrivateKey + b, err := os.ReadFile(privateKeyPath) + switch { + case os.IsNotExist(err): + _, privateKey, err = ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate key: %w", err) + } + privKeyPEM, err := ssh.MarshalPrivateKey(privateKey, "") + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal SSH private key: %w", err) + } + f, err := os.OpenFile(privateKeyPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return nil, nil, fmt.Errorf("failed to open SSH private key file: %w", err) + } + defer f.Close() + if err := pem.Encode(f, privKeyPEM); err != nil { + return nil, nil, fmt.Errorf("failed to write SSH private key: %w", err) + } + case err != nil: + return nil, nil, fmt.Errorf("failed to read SSH private key: %w", err) + default: + pKey, err := ssh.ParseRawPrivateKey(b) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse SSH private key: %w", err) + } + pKeyPointer, ok := pKey.(*ed25519.PrivateKey) + if !ok { + return nil, nil, fmt.Errorf("SSH private key is not ed25519: %T", pKey) + } + privateKey = *pKeyPointer + } + + sshPublicKey, err := ssh.NewPublicKey(privateKey.Public()) + if err != nil { + return nil, nil, fmt.Errorf("failed to create SSH public key: %w", err) + } + + return privateKey, ssh.MarshalAuthorizedKey(sshPublicKey), nil +} + +func applySSHResources(ctx context.Context, cl client.Client, alpineTag string, pubKey []byte) (string, error) { + owner := client.FieldOwner("k8s-test") + + if err := cl.Patch(ctx, sshDeployment(alpineTag, pubKey), client.Apply, owner); err != nil { + return "", fmt.Errorf("failed to apply ssh-server Deployment: %w", err) + } + if err := cl.Patch(ctx, sshConfigMap(pubKey), client.Apply, owner); err != nil { + return "", fmt.Errorf("failed to apply ssh-server ConfigMap: %w", err) + } + svc := sshService() + if err := cl.Patch(ctx, svc, client.Apply, owner); err != nil { + return "", fmt.Errorf("failed to apply ssh-server Service: %w", err) + } + + return svc.Spec.ClusterIP, nil +} + +func cleanupSSHResources(ctx context.Context, cl client.Client) error { + noGrace := &client.DeleteOptions{ + GracePeriodSeconds: ptr.To[int64](0), + } + if err := cl.Delete(ctx, sshDeployment("", nil), noGrace); err != nil { + return fmt.Errorf("failed to delete ssh-server Deployment: %w", err) + } + if err := cl.Delete(ctx, sshConfigMap(nil), noGrace); err != nil { + return fmt.Errorf("failed to delete ssh-server ConfigMap: %w", err) + } + if err := cl.Delete(ctx, sshService(), noGrace); err != nil { + return fmt.Errorf("failed to delete control Service: %w", err) + } + + return nil +} + +func sshDeployment(tag string, pubKey []byte) *appsv1.Deployment { + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ssh-server", + Namespace: ns, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "ssh-server", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "ssh-server", + }, + Annotations: map[string]string{ + "pubkey": hex.EncodeToString(pubKey), // Ensure new key triggers rollout. + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ssh-server", + Image: fmt.Sprintf("alpine:%s", tag), + Command: []string{ + "sh", "-c", + "apk add openssh-server; ssh-keygen -A; /usr/sbin/sshd -D -e", + }, + Ports: []corev1.ContainerPort{ + { + Name: "ctrl-port-fwd", + ContainerPort: 31544, + Protocol: corev1.ProtocolTCP, + }, + { + Name: "ssh", + ContainerPort: 8022, + Protocol: corev1.ProtocolTCP, + }, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt(8022), + }, + }, + InitialDelaySeconds: 1, + PeriodSeconds: 1, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "sshd-config", + MountPath: "/etc/ssh/sshd_config.d/reverse-tunnel.conf", + SubPath: "reverse-tunnel.conf", + }, + { + Name: "sshd-config", + MountPath: keysFilePath, + SubPath: "authorized_keys", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "sshd-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "ssh-server-config", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func sshConfigMap(pubKey []byte) *corev1.ConfigMap { + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "ssh-server-config", + Namespace: ns, + }, + Data: map[string]string{ + "reverse-tunnel.conf": sshdConfig, + "authorized_keys": string(pubKey), + }, + } +} + +func sshService() *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "control", + Namespace: ns, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "app": "ssh-server", + }, + Ports: []corev1.ServicePort{ + { + Name: "tunnel", + Port: 31544, + Protocol: corev1.ProtocolTCP, + }, + }, + }, + } +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 7c89633ac3fe9..695c8a85e6c76 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -65,12 +65,12 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 + go.yaml.in/yaml/v2 from sigs.k8s.io/yaml 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli sigs.k8s.io/yaml from tailscale.com/cmd/tailscale/cli - sigs.k8s.io/yaml/goyaml.v2 from sigs.k8s.io/yaml software.sslmate.com/src/go-pkcs12 from tailscale.com/cmd/tailscale/cli software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version diff --git a/flake.nix b/flake.nix index 484b7e0c593fe..dff1f9e90998a 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-DTf2GHnoVXDMA1vWbBzpHA4ipL7UB/n/2Yijj/beBF8= +# nix-direnv cache busting line: sha256-7Ak8bu6uQV+XmjzgW7yqFdptqocWYJS6grkCUAr1qlo= diff --git a/go.mod b/go.mod index 08062b220d5ec..1d018598df55a 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/evanw/esbuild v0.19.11 github.com/fogleman/gg v1.3.0 github.com/frankban/quicktest v1.14.6 - github.com/fxamacker/cbor/v2 v2.7.0 + github.com/fxamacker/cbor/v2 v2.9.0 github.com/gaissmai/bart v0.18.0 github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced github.com/go-logr/zapr v1.3.0 @@ -108,110 +108,159 @@ require ( golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.30.0 golang.org/x/net v0.47.0 - golang.org/x/oauth2 v0.30.0 + golang.org/x/oauth2 v0.31.0 golang.org/x/sync v0.18.0 golang.org/x/sys v0.38.0 golang.org/x/term v0.37.0 - golang.org/x/time v0.11.0 + golang.org/x/time v0.12.0 golang.org/x/tools v0.39.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 + helm.sh/helm/v3 v3.19.0 honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 - k8s.io/api v0.32.0 - k8s.io/apimachinery v0.32.0 - k8s.io/apiserver v0.32.0 - k8s.io/client-go v0.32.0 + k8s.io/api v0.34.0 + k8s.io/apimachinery v0.34.0 + k8s.io/apiserver v0.34.0 + k8s.io/client-go v0.34.0 sigs.k8s.io/controller-runtime v0.19.4 sigs.k8s.io/controller-tools v0.17.0 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/kind v0.30.0 + sigs.k8s.io/yaml v1.6.0 software.sslmate.com/src/go-pkcs12 v0.4.0 + tailscale.com/client/tailscale/v2 v2.0.0-20250925170215-115deaf34058 ) require ( 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f // indirect + al.essio.dev/pkg/shellescape v1.5.1 // indirect github.com/4meepo/tagalign v1.3.3 // indirect github.com/Antonboom/testifylint v1.2.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect + github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v1.0.0-rc.1 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/cyphar/filepath-securejoin v0.3.6 // indirect + github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/deckarep/golang-set/v2 v2.8.0 // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/evanphx/json-patch v5.9.11+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghostiam/protogetter v0.3.5 // indirect + github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.12.0 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-github/v66 v66.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/gorilla/securecookie v1.1.2 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-metrics v0.5.4 // indirect github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/macabu/inamedparam v0.1.3 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/buildkit v0.20.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/puzpuzpuz/xsync v1.5.2 // indirect + github.com/rubenv/sql-migrate v1.8.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/stacklok/frizbee v0.1.7 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/xlab/treeprint v1.2.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect go-simpler.org/musttag v0.9.0 // indirect go-simpler.org/sloglint v0.5.0 // indirect - go.etcd.io/bbolt v1.3.11 // indirect + go.etcd.io/bbolt v1.4.2 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect + google.golang.org/grpc v1.72.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect - k8s.io/component-base v0.32.0 // indirect + k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect + k8s.io/kubectl v0.34.0 // indirect + oras.land/oras-go/v2 v2.6.0 // indirect + sigs.k8s.io/kustomize/api v0.20.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.1 // indirect filippo.io/edwards25519 v1.1.0 // indirect github.com/Abirdcfly/dupword v0.0.14 // indirect github.com/AlekSi/pointer v1.2.0 github.com/Antonboom/errname v0.1.12 // indirect github.com/Antonboom/nilnil v0.1.7 // indirect - github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Djarvur/go-err113 v0.1.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect + github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect @@ -253,14 +302,14 @@ require ( github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v27.5.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect - github.com/emicklei/go-restful/v3 v3.11.2 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/ettle/strcase v0.2.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -281,14 +330,12 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect github.com/golangci/misspell v0.4.1 // indirect github.com/golangci/revgrep v0.5.2 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 // indirect github.com/google/rpmpack v0.5.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect @@ -340,7 +387,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/moricho/tparallel v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect @@ -350,7 +397,7 @@ require ( github.com/nunnatsa/ginkgolinter v0.16.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect @@ -373,7 +420,7 @@ require ( github.com/securego/gosec/v2 v2.19.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect - github.com/shopspring/decimal v1.3.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.7.1 // indirect @@ -381,15 +428,15 @@ require ( github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.9 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 @@ -423,14 +470,13 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 howett.net/plist v1.0.0 // indirect - k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apiextensions-apiserver v0.34.0 k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 mvdan.cc/gofumpt v0.6.0 // indirect mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect ) tool github.com/stacklok/frizbee diff --git a/go.mod.sri b/go.mod.sri index b36887eeffbc6..898bc8cc8c386 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-DTf2GHnoVXDMA1vWbBzpHA4ipL7UB/n/2Yijj/beBF8= +sha256-7Ak8bu6uQV+XmjzgW7yqFdptqocWYJS6grkCUAr1qlo= diff --git a/go.sum b/go.sum index 19f16c5cd5ce3..af9bca25f18d3 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q= 9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM= +al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= +al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -36,8 +38,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -49,6 +51,8 @@ github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= @@ -57,12 +61,14 @@ github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTo github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= github.com/Antonboom/testifylint v1.2.0 h1:015bxD8zc5iY8QwTp4+RG9I4kIbqwvGX9TrBbb7jGdM= github.com/Antonboom/testifylint v1.2.0/go.mod h1:rkmEqjqVnHDRNsinyN6fPSLnoajzFwsCcguJgwADBkw= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= @@ -72,17 +78,20 @@ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8M github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9 h1:1ltqoej5GtaWF8jaiA49HwsZD459jqm9YFz9ZtMFpQA= github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9/go.mod h1:7uhhqiBaR4CpN0k9rMjOtjpcfGd6DG2m04zQxKnWQ0I= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= @@ -124,6 +133,8 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+ github.com/armon/go-proxyproto v0.0.0-20210323213023-7e956b284f0a/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= @@ -196,6 +207,8 @@ github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= @@ -218,6 +231,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= @@ -237,8 +252,14 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= +github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= +github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= @@ -247,6 +268,8 @@ github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA= github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs= @@ -259,8 +282,8 @@ github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= -github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -275,12 +298,18 @@ github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42 github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= +github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= +github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= @@ -291,6 +320,10 @@ github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZ github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= @@ -299,8 +332,8 @@ github.com/elastic/crd-ref-docs v0.0.12 h1:F3seyncbzUz3rT3d+caeYWhumb5ojYQ6Bl0Z+ github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U= github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= -github.com/emicklei/go-restful/v3 v3.11.2 h1:1onLa9DcsMYO9P+CXaL0dStDqQ2EHHXLiz+BtnqkLAU= -github.com/emicklei/go-restful/v3 v3.11.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -309,12 +342,14 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= +github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/evanw/esbuild v0.19.11 h1:mbPO1VJ/df//jjUd+p/nRLYCpizXxXb2w/zZMShxa2k= github.com/evanw/esbuild v0.19.11/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= @@ -328,12 +363,14 @@ github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo= @@ -346,6 +383,8 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-critic/go-critic v0.11.2 h1:81xH/2muBphEgPtcwH1p6QD+KzXl2tMSi3hXjBSxDnM= github.com/go-critic/go-critic v0.11.2/go.mod h1:OePaicfjsf+KPy33yq4gzv6CO7TEQ9Rom6ns1KsJnl8= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= @@ -357,6 +396,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced h1:Q311OHjMh/u5E2TITc++WlTP5We0xNseRMkHDyvhW7I= github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -388,6 +429,8 @@ github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7a github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -481,10 +524,10 @@ github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNF github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -533,7 +576,8 @@ github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAx github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.5.0 h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A= github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -550,8 +594,14 @@ github.com/goreleaser/nfpm/v2 v2.33.1 h1:EkdAzZyVhAI9JC1vjmjjbmnNzyH1J6Cu4JCsA7Y github.com/goreleaser/nfpm/v2 v2.33.1/go.mod h1:8wwWWvJWmn84xo/Sqiv0aMvEGTHlHZTXTEuVSgQpkIM= github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0= github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= @@ -564,11 +614,18 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= @@ -581,6 +638,8 @@ github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9 github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -591,6 +650,10 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/raft v1.7.2 h1:pyvxhfJ4R8VIAlHKvLoKQWElZspsCVT6YWuxVxsPAgc= @@ -603,7 +666,6 @@ github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1 h1:jWoR2Yqg8tzM0v6LAiP7i1bikZJu3gxpgvu3g1Lw+a0= @@ -611,7 +673,6 @@ github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1/go.mod h1:B63hDJM github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk= github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -637,6 +698,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -694,6 +757,10 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= @@ -702,6 +769,10 @@ github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= @@ -730,6 +801,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= @@ -743,22 +816,24 @@ github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -766,8 +841,11 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -776,6 +854,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= @@ -798,8 +878,8 @@ github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -809,10 +889,16 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo= github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= @@ -835,6 +921,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v1.4.8 h1:jiEjKDH33ouFktyez7sckv6pHWif9B7SuS8cutDXFHw= github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4= @@ -881,6 +969,12 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= +github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= +github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -888,6 +982,9 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= +github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.3.1 h1:fH+fUg+ngsQO0ruZXXHnA/2aNllWA1whly4a6UvyzGE= github.com/ryancurrah/gomodguard v1.3.1/go.mod h1:DGFHzEhi6iJ0oIDfMuo3TgrS+L9gZvrEfmjjuelnRU0= @@ -899,6 +996,8 @@ github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/ github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.25.0 h1:IK8SI2QyFzy/2OD2PYnhy84dpfNo9qADrRt6LH8vSzU= @@ -910,9 +1009,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -939,16 +1037,15 @@ github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCp github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= @@ -967,6 +1064,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -974,8 +1072,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/studio-b12/gowebdav v0.9.0 h1:1j1sc9gQnNxbXXM4M/CebPOX4aXYtr7MojAVcN4dHjU= github.com/studio-b12/gowebdav v0.9.0/go.mod h1:bHA7t77X/QFExdeAnDzK6vKM34kEZAcE1OX4MfiwjkE= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= @@ -1064,6 +1162,8 @@ github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HH github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= @@ -1087,8 +1187,8 @@ go-simpler.org/musttag v0.9.0 h1:Dzt6/tyP9ONr5g9h9P3cnYWCxeBFRkd0uJL/w+1Mxos= go-simpler.org/musttag v0.9.0/go.mod h1:gA9nThnalvNSKpEoyp3Ko4/vCX2xTpqKoUtNqXOnVR4= go-simpler.org/sloglint v0.5.0 h1:2YCcd+YMuYpuqthCgubcF5lBSjb6berc5VMOYUHKrpY= go-simpler.org/sloglint v0.5.0/go.mod h1:EUknX5s8iXqf18KQxKnaBHUPVriiPnOrPjjJcsaTcSQ= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= +go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1096,22 +1196,50 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= +go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -1120,6 +1248,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= @@ -1133,7 +1265,6 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1232,8 +1363,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1293,6 +1424,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1338,8 +1470,8 @@ golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= -golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1473,11 +1605,11 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= +google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1490,8 +1622,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= +google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1543,6 +1675,8 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= +helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= +helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1554,28 +1688,34 @@ honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLa honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= -k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= -k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= -k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= -k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= -k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= -k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= -k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= +k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= +k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= +k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= +k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= +k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= +k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= +k8s.io/client-go v0.34.0 h1:YoWv5r7bsBfb0Hs2jh8SOvFbKzzxyNo0nSb0zC19KZo= +k8s.io/client-go v0.34.0/go.mod h1:ozgMnEKXkRjeMvBZdV1AijMHLTh3pbACPvK7zFR+QQY= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= +k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= +k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= +k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w= mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1583,11 +1723,21 @@ sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGF sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/controller-tools v0.17.0 h1:KaEQZbhrdY6J3zLBHplt+0aKUp8PeIttlhtF2UDo6bI= sigs.k8s.io/controller-tools v0.17.0/go.mod h1:SKoWY8rwGWDzHtfnhmOwljn6fViG0JF7/xmnxpklgjo= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= +sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= +sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= +sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +tailscale.com/client/tailscale/v2 v2.0.0-20250925170215-115deaf34058 h1:X78yMWHEQLo0iFspwDpdbfNIfAP8thmIBrplbd3/0lk= +tailscale.com/client/tailscale/v2 v2.0.0-20250925170215-115deaf34058/go.mod h1:RkAl+CyJiu437uUelFWW/2wL+EgZ6Vd15S1f+IitGr4= diff --git a/shell.nix b/shell.nix index 569057dbd3bb1..20c6af763ab44 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-DTf2GHnoVXDMA1vWbBzpHA4ipL7UB/n/2Yijj/beBF8= +# nix-direnv cache busting line: sha256-7Ak8bu6uQV+XmjzgW7yqFdptqocWYJS6grkCUAr1qlo= From 5be02ee6f858416c2c494472069029c198258b32 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 8 Jan 2026 14:07:51 +0000 Subject: [PATCH 0850/1093] cmd/k8s-operator/e2e,go.mod: remove client v2 dependency It's not worth adding the v2 client just for these e2e tests. Remove that dependency for now to keep a clear separation, but we should revive the v2 client version if we ever decide to take that dependency for the tailscale/tailscale repo as a whole. Updates tailscale/corp#32085 Change-Id: Ic51ce233d5f14ce2d25f31a6c4bb9cf545057dd0 Signed-off-by: Tom Proctor --- cmd/k8s-operator/e2e/setup.go | 81 +++++++++++++++++++++-------------- flake.nix | 2 +- go.mod | 1 - go.mod.sri | 2 +- go.sum | 2 - shell.nix | 2 +- 6 files changed, 53 insertions(+), 37 deletions(-) diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index 287ef4969c497..00e75ddd5b3eb 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -4,12 +4,13 @@ package e2e import ( + "bytes" "context" "crypto/rand" "crypto/tls" "crypto/x509" _ "embed" - jsonv1 "encoding/json" + "encoding/json" "flag" "fmt" "io" @@ -51,7 +52,7 @@ import ( "sigs.k8s.io/kind/pkg/cluster" "sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cmd" - "tailscale.com/client/tailscale/v2" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -66,9 +67,9 @@ const ( ) var ( - tsClient = &tailscale.Client{Tailnet: "-"} // For API calls to control. - tnClient *tsnet.Server // For testing real tailnet traffic. - kubeClient client.WithWatch // For k8s API calls. + tsClient *tailscale.Client // For API calls to control. + tnClient *tsnet.Server // For testing real tailnet traffic. + kubeClient client.WithWatch // For k8s API calls. //go:embed certs/pebble.minica.crt pebbleMiniCACert []byte @@ -241,7 +242,7 @@ func runTests(m *testing.M) (int, error) { var apiKeyData struct { APIKey string `json:"apiKey"` } - if err := jsonv1.Unmarshal(b, &apiKeyData); err != nil { + if err := json.Unmarshal(b, &apiKeyData); err != nil { return 0, fmt.Errorf("failed to parse api-key.json: %w", err) } if apiKeyData.APIKey == "" { @@ -249,28 +250,48 @@ func runTests(m *testing.M) (int, error) { } // Finish setting up tsClient. - baseURL, err := url.Parse("http://localhost:31544") - if err != nil { - return 0, fmt.Errorf("parse url: %w", err) - } - tsClient.BaseURL = baseURL - tsClient.APIKey = apiKeyData.APIKey - tsClient.HTTP = &http.Client{} + tsClient = tailscale.NewClient("-", tailscale.APIKey(apiKeyData.APIKey)) + tsClient.BaseURL = "http://localhost:31544" // Set ACLs and create OAuth client. - if err := tsClient.PolicyFile().Set(ctx, string(requiredACLs), ""); err != nil { + req, _ := http.NewRequest("POST", tsClient.BuildTailnetURL("acl"), bytes.NewReader(requiredACLs)) + resp, err := tsClient.Do(req) + if err != nil { return 0, fmt.Errorf("failed to set ACLs: %w", err) } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return 0, fmt.Errorf("HTTP %d setting ACLs: %s", resp.StatusCode, string(b)) + } logger.Infof("ACLs configured") - key, err := tsClient.Keys().CreateOAuthClient(ctx, tailscale.CreateOAuthClientRequest{ - Scopes: []string{"auth_keys", "devices:core", "services"}, - Tags: []string{"tag:k8s-operator"}, - Description: "k8s-operator client for e2e tests", + reqBody, err := json.Marshal(map[string]any{ + "keyType": "client", + "scopes": []string{"auth_keys", "devices:core", "services"}, + "tags": []string{"tag:k8s-operator"}, + "description": "k8s-operator client for e2e tests", }) + if err != nil { + return 0, fmt.Errorf("failed to marshal OAuth client creation request: %w", err) + } + req, _ = http.NewRequest("POST", tsClient.BuildTailnetURL("keys"), bytes.NewReader(reqBody)) + resp, err = tsClient.Do(req) if err != nil { return 0, fmt.Errorf("failed to create OAuth client: %w", err) } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return 0, fmt.Errorf("HTTP %d creating OAuth client: %s", resp.StatusCode, string(b)) + } + var key struct { + ID string `json:"id"` + Key string `json:"key"` + } + if err := json.NewDecoder(resp.Body).Decode(&key); err != nil { + return 0, fmt.Errorf("failed to decode OAuth client creation response: %w", err) + } clientID = key.ID clientSecret = key.Key } else { @@ -290,12 +311,14 @@ func runTests(m *testing.M) (int, error) { TokenURL: fmt.Sprintf("%s/api/v2/oauth/token", ipn.DefaultControlURL), Scopes: []string{"auth_keys"}, } - baseURL, _ := url.Parse(ipn.DefaultControlURL) - tsClient = &tailscale.Client{ - Tailnet: "-", - HTTP: credentials.Client(ctx), - BaseURL: baseURL, + tk, err := credentials.Token(ctx) + if err != nil { + return 0, fmt.Errorf("failed to get OAuth token: %w", err) } + // An access token will last for an hour which is plenty of time for + // the tests to run. No need for token refresh logic. + tsClient = tailscale.NewClient("-", tailscale.APIKey(tk.AccessToken)) + tsClient.BaseURL = "http://localhost:31544" } var ossTag string @@ -422,22 +445,18 @@ func runTests(m *testing.M) (int, error) { caps.Devices.Create.Ephemeral = true caps.Devices.Create.Tags = []string{"tag:k8s"} - authKey, err := tsClient.Keys().CreateAuthKey(ctx, tailscale.CreateKeyRequest{ - Capabilities: caps, - ExpirySeconds: 600, - Description: "e2e test authkey", - }) + authKey, authKeyMeta, err := tsClient.CreateKey(ctx, caps) if err != nil { return 0, err } - defer tsClient.Keys().Delete(context.Background(), authKey.ID) + defer tsClient.DeleteKey(context.Background(), authKeyMeta.ID) tnClient = &tsnet.Server{ - ControlURL: tsClient.BaseURL.String(), + ControlURL: tsClient.BaseURL, Hostname: "test-proxy", Ephemeral: true, Store: &mem.Store{}, - AuthKey: authKey.Key, + AuthKey: authKey, } _, err = tnClient.Up(ctx) if err != nil { diff --git a/flake.nix b/flake.nix index dff1f9e90998a..68aaa15e913be 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-7Ak8bu6uQV+XmjzgW7yqFdptqocWYJS6grkCUAr1qlo= +# nix-direnv cache busting line: sha256-kXdjsA1QIXS13vMBTMbxBJK4tewd6rVz0Csod+HtN10= diff --git a/go.mod b/go.mod index 1d018598df55a..3b3df755446cd 100644 --- a/go.mod +++ b/go.mod @@ -129,7 +129,6 @@ require ( sigs.k8s.io/kind v0.30.0 sigs.k8s.io/yaml v1.6.0 software.sslmate.com/src/go-pkcs12 v0.4.0 - tailscale.com/client/tailscale/v2 v2.0.0-20250925170215-115deaf34058 ) require ( diff --git a/go.mod.sri b/go.mod.sri index 898bc8cc8c386..0ac77cb8b9ce4 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-7Ak8bu6uQV+XmjzgW7yqFdptqocWYJS6grkCUAr1qlo= +sha256-kXdjsA1QIXS13vMBTMbxBJK4tewd6rVz0Csod+HtN10= diff --git a/go.sum b/go.sum index af9bca25f18d3..4e2895d9e7dd6 100644 --- a/go.sum +++ b/go.sum @@ -1739,5 +1739,3 @@ sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= -tailscale.com/client/tailscale/v2 v2.0.0-20250925170215-115deaf34058 h1:X78yMWHEQLo0iFspwDpdbfNIfAP8thmIBrplbd3/0lk= -tailscale.com/client/tailscale/v2 v2.0.0-20250925170215-115deaf34058/go.mod h1:RkAl+CyJiu437uUelFWW/2wL+EgZ6Vd15S1f+IitGr4= diff --git a/shell.nix b/shell.nix index 20c6af763ab44..4f2d598517103 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-7Ak8bu6uQV+XmjzgW7yqFdptqocWYJS6grkCUAr1qlo= +# nix-direnv cache busting line: sha256-kXdjsA1QIXS13vMBTMbxBJK4tewd6rVz0Csod+HtN10= From 5019dc8eb2eac7b5ffd15837e6071b7e0589397b Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 8 Jan 2026 15:39:29 +0000 Subject: [PATCH 0851/1093] go.mod: bump mkctr dep (#18365) Brings in tailscale/mkctr#29. Updates tailscale/corp#32085 Change-Id: I90160ed1cdc47118ac8fd0712d63a7b590e739d3 Signed-off-by: Tom Proctor --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index 68aaa15e913be..9ee1b8f4f4245 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-kXdjsA1QIXS13vMBTMbxBJK4tewd6rVz0Csod+HtN10= +# nix-direnv cache busting line: sha256-PVE4oEwcoTbXbPJnBcVgBeXITWlnkhBf+UT5nqSeANM= diff --git a/go.mod b/go.mod index 3b3df755446cd..68f8de18804f2 100644 --- a/go.mod +++ b/go.mod @@ -88,7 +88,7 @@ require ( github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 + github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20251203133219-2ab774e4129a diff --git a/go.mod.sri b/go.mod.sri index 0ac77cb8b9ce4..a385972677dfc 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-kXdjsA1QIXS13vMBTMbxBJK4tewd6rVz0Csod+HtN10= +sha256-PVE4oEwcoTbXbPJnBcVgBeXITWlnkhBf+UT5nqSeANM= diff --git a/go.sum b/go.sum index 4e2895d9e7dd6..e7fc54e9faa3b 100644 --- a/go.sum +++ b/go.sum @@ -1094,8 +1094,8 @@ github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPx github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 h1:SwZ72kr1oRzzSPA5PYB4hzPh22UI0nm0dapn3bHaUPs= -github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= +github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b h1:QKqCnmp0qHWUHySySKjpuhZANzRn7XrTVZWUuUgJ3lQ= +github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b/go.mod h1:4st7fy3NTWcWsQdOC69JcHK4UXnncgcxSOvSR8aD8a0= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= diff --git a/shell.nix b/shell.nix index 4f2d598517103..4f4714fea8475 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-kXdjsA1QIXS13vMBTMbxBJK4tewd6rVz0Csod+HtN10= +# nix-direnv cache busting line: sha256-PVE4oEwcoTbXbPJnBcVgBeXITWlnkhBf+UT5nqSeANM= From 6aac87a84cfdf3f9f6cbe6fd159117e8e2e3be4a Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 18 Dec 2025 12:22:11 -0500 Subject: [PATCH 0852/1093] net/portmapper, go.mod: unfork our goupnp dependency Updates #7436 Signed-off-by: Andrew Dunham --- cmd/k8s-operator/depaware.txt | 12 +++--- cmd/tailscale/depaware.txt | 14 +++---- cmd/tailscaled/depaware.txt | 12 +++--- cmd/tsidp/depaware.txt | 16 +++---- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 +- licenses/android.md | 2 +- licenses/apple.md | 2 +- net/portmapper/legacy_upnp.go | 52 +++++++++++------------ net/portmapper/select_test.go | 6 +-- net/portmapper/upnp.go | 78 +++++++++++++++++++++++++---------- shell.nix | 2 +- tsnet/depaware.txt | 16 +++---- tstest/jsdeps/jsdeps_test.go | 12 +++--- 16 files changed, 134 insertions(+), 100 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 2f909ee8e0d50..aa9ad2fb4b40c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -55,6 +55,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/google/gnostic-models/openapiv3 from k8s.io/kube-openapi/pkg/handler3+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ github.com/hdevalence/ed25519consensus from tailscale.com/tka + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -103,12 +109,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 695c8a85e6c76..1a6a1a52cea07 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -35,6 +35,12 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/golang/groupcache/lru from tailscale.com/net/dnscache DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli @@ -55,12 +61,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ @@ -463,7 +463,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/tailscale/goupnp/httpu+ + regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 13c1f5daf574b..ed8f6a5125ece 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -125,6 +125,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp L 💣 github.com/illarion/gonotify/v3 from tailscale.com/feature/linuxdnsfight L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap @@ -168,12 +174,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index aa5d633468a49..24069551eb890 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -30,6 +30,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ D github.com/google/uuid from github.com/prometheus-community/pro-bing github.com/hdevalence/ed25519consensus from tailscale.com/tka + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -53,12 +59,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -471,7 +471,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/tailscale/goupnp+ + encoding/xml from github.com/huin/goupnp+ errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ @@ -562,7 +562,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar path from debug/dwarf+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from github.com/tailscale/goupnp/httpu+ + regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ diff --git a/flake.nix b/flake.nix index 9ee1b8f4f4245..dd8016b4eb362 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-PVE4oEwcoTbXbPJnBcVgBeXITWlnkhBf+UT5nqSeANM= +# nix-direnv cache busting line: sha256-knSIes9pFVkVfK5hcBG9BSR1ueH+yPpx4hv/UsyaW2M= diff --git a/go.mod b/go.mod index 68f8de18804f2..c8be839c39083 100644 --- a/go.mod +++ b/go.mod @@ -56,6 +56,7 @@ require ( github.com/hashicorp/raft v1.7.2 github.com/hashicorp/raft-boltdb/v2 v2.3.1 github.com/hdevalence/ed25519consensus v0.2.0 + github.com/huin/goupnp v1.3.0 github.com/illarion/gonotify/v3 v3.0.2 github.com/inetaf/tcpproxy v0.0.0-20250203165043-ded522cbd03f github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 @@ -86,7 +87,6 @@ require ( github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 - github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 diff --git a/go.mod.sri b/go.mod.sri index a385972677dfc..fd2ab9d7a3f48 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-PVE4oEwcoTbXbPJnBcVgBeXITWlnkhBf+UT5nqSeANM= +sha256-knSIes9pFVkVfK5hcBG9BSR1ueH+yPpx4hv/UsyaW2M= diff --git a/go.sum b/go.sum index e7fc54e9faa3b..19703e072a593 100644 --- a/go.sum +++ b/go.sum @@ -670,6 +670,8 @@ github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1 h1:jWoR2Yqg8tzM0v6LAiP7i1bikZJu3gxpgvu3g1Lw+a0= github.com/hugelgupf/vmtest v0.0.0-20240216064925-0561770280a1/go.mod h1:B63hDJMhTupLWCHwopAyEo7wRFowx9kOc8m8j1sfOqE= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk= github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U= @@ -1090,8 +1092,6 @@ github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjY github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b h1:QKqCnmp0qHWUHySySKjpuhZANzRn7XrTVZWUuUgJ3lQ= diff --git a/licenses/android.md b/licenses/android.md index d4d8c9d7b5c5f..4dc8e6c6de06c 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -27,7 +27,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) + - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 6bb109f776c06..c3f2d3bb7a3c3 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -59,7 +59,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) + - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) diff --git a/net/portmapper/legacy_upnp.go b/net/portmapper/legacy_upnp.go index 042ced16cbabe..2ce92dc65d6b3 100644 --- a/net/portmapper/legacy_upnp.go +++ b/net/portmapper/legacy_upnp.go @@ -10,8 +10,8 @@ package portmapper import ( "context" - "github.com/tailscale/goupnp" - "github.com/tailscale/goupnp/soap" + "github.com/huin/goupnp" + "github.com/huin/goupnp/soap" ) const ( @@ -32,8 +32,8 @@ type legacyWANPPPConnection1 struct { goupnp.ServiceClient } -// AddPortMapping implements upnpClient -func (client *legacyWANPPPConnection1) AddPortMapping( +// AddPortMappingCtx implements upnpClient +func (client *legacyWANPPPConnection1) AddPortMappingCtx( ctx context.Context, NewRemoteHost string, NewExternalPort uint16, @@ -85,11 +85,11 @@ func (client *legacyWANPPPConnection1) AddPortMapping( response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "AddPortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "AddPortMapping", request, response) } -// DeletePortMapping implements upnpClient -func (client *legacyWANPPPConnection1) DeletePortMapping(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { +// DeletePortMappingCtx implements upnpClient +func (client *legacyWANPPPConnection1) DeletePortMappingCtx(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { // Request structure. request := &struct { NewRemoteHost string @@ -110,11 +110,11 @@ func (client *legacyWANPPPConnection1) DeletePortMapping(ctx context.Context, Ne response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "DeletePortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "DeletePortMapping", request, response) } -// GetExternalIPAddress implements upnpClient -func (client *legacyWANPPPConnection1) GetExternalIPAddress(ctx context.Context) (NewExternalIPAddress string, err error) { +// GetExternalIPAddressCtx implements upnpClient +func (client *legacyWANPPPConnection1) GetExternalIPAddressCtx(ctx context.Context) (NewExternalIPAddress string, err error) { // Request structure. request := any(nil) @@ -124,7 +124,7 @@ func (client *legacyWANPPPConnection1) GetExternalIPAddress(ctx context.Context) }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "GetExternalIPAddress", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "GetExternalIPAddress", request, response); err != nil { return } @@ -134,8 +134,8 @@ func (client *legacyWANPPPConnection1) GetExternalIPAddress(ctx context.Context) return } -// GetStatusInfo implements upnpClient -func (client *legacyWANPPPConnection1) GetStatusInfo(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { +// GetStatusInfoCtx implements upnpClient +func (client *legacyWANPPPConnection1) GetStatusInfoCtx(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { // Request structure. request := any(nil) @@ -147,7 +147,7 @@ func (client *legacyWANPPPConnection1) GetStatusInfo(ctx context.Context) (NewCo }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANPPPConnection_1, "GetStatusInfo", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANPPPConnection_1, "GetStatusInfo", request, response); err != nil { return } @@ -171,8 +171,8 @@ type legacyWANIPConnection1 struct { goupnp.ServiceClient } -// AddPortMapping implements upnpClient -func (client *legacyWANIPConnection1) AddPortMapping( +// AddPortMappingCtx implements upnpClient +func (client *legacyWANIPConnection1) AddPortMappingCtx( ctx context.Context, NewRemoteHost string, NewExternalPort uint16, @@ -224,11 +224,11 @@ func (client *legacyWANIPConnection1) AddPortMapping( response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "AddPortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "AddPortMapping", request, response) } -// DeletePortMapping implements upnpClient -func (client *legacyWANIPConnection1) DeletePortMapping(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { +// DeletePortMappingCtx implements upnpClient +func (client *legacyWANIPConnection1) DeletePortMappingCtx(ctx context.Context, NewRemoteHost string, NewExternalPort uint16, NewProtocol string) (err error) { // Request structure. request := &struct { NewRemoteHost string @@ -249,11 +249,11 @@ func (client *legacyWANIPConnection1) DeletePortMapping(ctx context.Context, New response := any(nil) // Perform the SOAP call. - return client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "DeletePortMapping", request, response) + return client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "DeletePortMapping", request, response) } -// GetExternalIPAddress implements upnpClient -func (client *legacyWANIPConnection1) GetExternalIPAddress(ctx context.Context) (NewExternalIPAddress string, err error) { +// GetExternalIPAddressCtx implements upnpClient +func (client *legacyWANIPConnection1) GetExternalIPAddressCtx(ctx context.Context) (NewExternalIPAddress string, err error) { // Request structure. request := any(nil) @@ -263,7 +263,7 @@ func (client *legacyWANIPConnection1) GetExternalIPAddress(ctx context.Context) }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "GetExternalIPAddress", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "GetExternalIPAddress", request, response); err != nil { return } @@ -273,8 +273,8 @@ func (client *legacyWANIPConnection1) GetExternalIPAddress(ctx context.Context) return } -// GetStatusInfo implements upnpClient -func (client *legacyWANIPConnection1) GetStatusInfo(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { +// GetStatusInfoCtx implements upnpClient +func (client *legacyWANIPConnection1) GetStatusInfoCtx(ctx context.Context) (NewConnectionStatus string, NewLastConnectionError string, NewUptime uint32, err error) { // Request structure. request := any(nil) @@ -286,7 +286,7 @@ func (client *legacyWANIPConnection1) GetStatusInfo(ctx context.Context) (NewCon }{} // Perform the SOAP call. - if err = client.SOAPClient.PerformAction(ctx, urn_LegacyWANIPConnection_1, "GetStatusInfo", request, response); err != nil { + if err = client.SOAPClient.PerformActionCtx(ctx, urn_LegacyWANIPConnection_1, "GetStatusInfo", request, response); err != nil { return } diff --git a/net/portmapper/select_test.go b/net/portmapper/select_test.go index af2e35cbfb764..cc685bc253d3d 100644 --- a/net/portmapper/select_test.go +++ b/net/portmapper/select_test.go @@ -11,8 +11,8 @@ import ( "strings" "testing" - "github.com/tailscale/goupnp" - "github.com/tailscale/goupnp/dcps/internetgateway2" + "github.com/huin/goupnp" + "github.com/huin/goupnp/dcps/internetgateway2" ) // NOTE: this is in a distinct file because the various string constants are @@ -168,7 +168,7 @@ func TestSelectBestService(t *testing.T) { // Ensure that we're using the HTTP client that talks to our test IGD server ctx := context.Background() - ctx = goupnp.WithHTTPClient(ctx, c.upnpHTTPClientLocked()) + ctx = upnpHTTPClientKey.WithValue(ctx, c.upnpHTTPClientLocked()) loc := mustParseURL(igd.ts.URL) rootDev := mustParseRootDev(t, rootDesc, loc) diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index d65d6e94d70fd..34140e9473460 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -25,15 +25,47 @@ import ( "sync/atomic" "time" - "github.com/tailscale/goupnp" - "github.com/tailscale/goupnp/dcps/internetgateway2" - "github.com/tailscale/goupnp/soap" + "github.com/huin/goupnp" + "github.com/huin/goupnp/dcps/internetgateway2" + "github.com/huin/goupnp/soap" "tailscale.com/envknob" "tailscale.com/net/netns" "tailscale.com/types/logger" + "tailscale.com/util/ctxkey" "tailscale.com/util/mak" ) +// upnpHTTPClientKey is a context key for storing an HTTP client to use +// for UPnP requests. This allows us to use a custom HTTP client (with custom +// dialer, timeouts, etc.) while using the upstream goupnp library which only +// supports a global HTTPClientDefault. +var upnpHTTPClientKey = ctxkey.New[*http.Client]("portmapper.upnpHTTPClient", nil) + +// delegatingRoundTripper implements http.RoundTripper by delegating to +// the HTTP client stored in the request's context. This allows us to use +// per-request HTTP client configuration with the upstream goupnp library. +type delegatingRoundTripper struct { + inner *http.Client +} + +func (d delegatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if c := upnpHTTPClientKey.Value(req.Context()); c != nil { + return c.Transport.RoundTrip(req) + } + return d.inner.Do(req) +} + +func init() { + // The upstream goupnp library uses a global HTTP client for all + // requests, while we want to be able to use a per-Client + // [http.Client]. We replace its global HTTP client with one that + // delegates to the HTTP client stored in the request's context. + old := goupnp.HTTPClientDefault + goupnp.HTTPClientDefault = &http.Client{ + Transport: delegatingRoundTripper{old}, + } +} + // References: // // WANIP Connection v2: http://upnp.org/specs/gw/UPnP-gw-WANIPConnection-v2-Service.pdf @@ -79,14 +111,17 @@ func (u *upnpMapping) MappingDebug() string { u.loc) } func (u *upnpMapping) Release(ctx context.Context) { - u.client.DeletePortMapping(ctx, "", u.external.Port(), upnpProtocolUDP) + u.client.DeletePortMappingCtx(ctx, "", u.external.Port(), upnpProtocolUDP) } // upnpClient is an interface over the multiple different clients exported by goupnp, // exposing the functions we need for portmapping. Those clients are auto-generated from XML-specs, // which is why they're not very idiomatic. +// +// The method names use the *Ctx suffix to match the upstream goupnp library's convention +// for context-aware methods. type upnpClient interface { - AddPortMapping( + AddPortMappingCtx( ctx context.Context, // remoteHost is the remote device sending packets to this device, in the format of x.x.x.x. @@ -119,9 +154,9 @@ type upnpClient interface { leaseDurationSec uint32, ) error - DeletePortMapping(ctx context.Context, remoteHost string, externalPort uint16, protocol string) error - GetExternalIPAddress(ctx context.Context) (externalIPAddress string, err error) - GetStatusInfo(ctx context.Context) (status string, lastConnError string, uptime uint32, err error) + DeletePortMappingCtx(ctx context.Context, remoteHost string, externalPort uint16, protocol string) error + GetExternalIPAddressCtx(ctx context.Context) (externalIPAddress string, err error) + GetStatusInfoCtx(ctx context.Context) (status string, lastConnError string, uptime uint32, err error) } // tsPortMappingDesc gets sent to UPnP clients as a human-readable label for the portmapping. @@ -171,7 +206,7 @@ func addAnyPortMapping( // First off, try using AddAnyPortMapping; if there's a conflict, the // router will pick another port and return it. if upnp, ok := upnp.(*internetgateway2.WANIPConnection2); ok { - return upnp.AddAnyPortMapping( + return upnp.AddAnyPortMappingCtx( ctx, "", externalPort, @@ -186,7 +221,7 @@ func addAnyPortMapping( // Fall back to using AddPortMapping, which requests a mapping to/from // a specific external port. - err = upnp.AddPortMapping( + err = upnp.AddPortMappingCtx( ctx, "", externalPort, @@ -244,7 +279,7 @@ func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, defer cancel() // This part does a network fetch. - root, err := goupnp.DeviceByURL(ctx, u) + root, err := goupnp.DeviceByURLCtx(ctx, u) if err != nil { return nil, nil, err } @@ -257,8 +292,7 @@ func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, // // loc is the parsed location that was used to fetch the given RootDevice. // -// The provided ctx is not retained in the returned upnpClient, but -// its associated HTTP client is (if set via goupnp.WithHTTPClient). +// The provided ctx is not retained in the returned upnpClient. func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootDevice, loc *url.URL) (client upnpClient, err error) { method := "none" defer func() { @@ -274,9 +308,9 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD // First, get all available clients from the device, and append to our // list of possible clients. Order matters here; we want to prefer // WANIPConnection2 over WANIPConnection1 or WANPPPConnection. - wanIP2, _ := internetgateway2.NewWANIPConnection2ClientsFromRootDevice(ctx, root, loc) - wanIP1, _ := internetgateway2.NewWANIPConnection1ClientsFromRootDevice(ctx, root, loc) - wanPPP, _ := internetgateway2.NewWANPPPConnection1ClientsFromRootDevice(ctx, root, loc) + wanIP2, _ := internetgateway2.NewWANIPConnection2ClientsFromRootDevice(root, loc) + wanIP1, _ := internetgateway2.NewWANIPConnection1ClientsFromRootDevice(root, loc) + wanPPP, _ := internetgateway2.NewWANPPPConnection1ClientsFromRootDevice(root, loc) var clients []upnpClient for _, v := range wanIP2 { @@ -291,12 +325,12 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD // These are legacy services that were deprecated in 2015, but are // still in use by older devices; try them just in case. - legacyClients, _ := goupnp.NewServiceClientsFromRootDevice(ctx, root, loc, urn_LegacyWANPPPConnection_1) + legacyClients, _ := goupnp.NewServiceClientsFromRootDevice(root, loc, urn_LegacyWANPPPConnection_1) metricUPnPSelectLegacy.Add(int64(len(legacyClients))) for _, client := range legacyClients { clients = append(clients, &legacyWANPPPConnection1{client}) } - legacyClients, _ = goupnp.NewServiceClientsFromRootDevice(ctx, root, loc, urn_LegacyWANIPConnection_1) + legacyClients, _ = goupnp.NewServiceClientsFromRootDevice(root, loc, urn_LegacyWANIPConnection_1) metricUPnPSelectLegacy.Add(int64(len(legacyClients))) for _, client := range legacyClients { clients = append(clients, &legacyWANIPConnection1{client}) @@ -346,7 +380,7 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD } // Check if the device has an external IP address. - extIP, err := svc.GetExternalIPAddress(ctx) + extIP, err := svc.GetExternalIPAddressCtx(ctx) if err != nil { continue } @@ -399,7 +433,7 @@ func selectBestService(ctx context.Context, logf logger.Logf, root *goupnp.RootD // serviceIsConnected returns whether a given UPnP service is connected, based // on the NewConnectionStatus field returned from GetStatusInfo. func serviceIsConnected(ctx context.Context, logf logger.Logf, svc upnpClient) bool { - status, _ /* NewLastConnectionError */, _ /* NewUptime */, err := svc.GetStatusInfo(ctx) + status, _ /* NewLastConnectionError */, _ /* NewUptime */, err := svc.GetStatusInfoCtx(ctx) if err != nil { return false } @@ -454,7 +488,7 @@ func (c *Client) getUPnPPortMapping( c.mu.Lock() oldMapping, ok := c.mapping.(*upnpMapping) metas := c.uPnPMetas - ctx = goupnp.WithHTTPClient(ctx, c.upnpHTTPClientLocked()) + ctx = upnpHTTPClientKey.WithValue(ctx, c.upnpHTTPClientLocked()) c.mu.Unlock() // Wrapper for a uPnPDiscoResponse with an optional existing root @@ -629,7 +663,7 @@ func (c *Client) tryUPnPPortmapWithDevice( } // TODO cache this ip somewhere? - extIP, err := client.GetExternalIPAddress(ctx) + extIP, err := client.GetExternalIPAddressCtx(ctx) c.vlogf("client.GetExternalIPAddress: %v, %v", extIP, err) if err != nil { return netip.AddrPort{}, nil, err diff --git a/shell.nix b/shell.nix index 4f4714fea8475..c494ce47cce6c 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-PVE4oEwcoTbXbPJnBcVgBeXITWlnkhBf+UT5nqSeANM= +# nix-direnv cache busting line: sha256-knSIes9pFVkVfK5hcBG9BSR1ueH+yPpx4hv/UsyaW2M= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 7702de69d9725..f2b80f2bd3394 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -30,6 +30,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ DI github.com/google/uuid from github.com/prometheus-community/pro-bing github.com/hdevalence/ed25519consensus from tailscale.com/tka + github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ + github.com/huin/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper + github.com/huin/goupnp/httpu from github.com/huin/goupnp+ + github.com/huin/goupnp/scpd from github.com/huin/goupnp + github.com/huin/goupnp/soap from github.com/huin/goupnp+ + github.com/huin/goupnp/ssdp from github.com/huin/goupnp L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -53,12 +59,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ - github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper - github.com/tailscale/goupnp/httpu from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp - github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ - github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile LDAI github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web @@ -464,7 +464,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/tailscale/goupnp+ + encoding/xml from github.com/huin/goupnp+ errors from bufio+ expvar from tailscale.com/health+ flag from tailscale.com/util/testenv @@ -554,7 +554,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) path from debug/dwarf+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from github.com/tailscale/goupnp/httpu+ + regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/coder/websocket/internal/xsync+ diff --git a/tstest/jsdeps/jsdeps_test.go b/tstest/jsdeps/jsdeps_test.go index eb44df62eda8f..27570fc2676b0 100644 --- a/tstest/jsdeps/jsdeps_test.go +++ b/tstest/jsdeps/jsdeps_test.go @@ -14,12 +14,12 @@ func TestDeps(t *testing.T) { GOOS: "js", GOARCH: "wasm", BadDeps: map[string]string{ - "testing": "do not use testing package in production code", - "runtime/pprof": "bloat", - "golang.org/x/net/http2/h2c": "bloat", - "net/http/pprof": "bloat", - "golang.org/x/net/proxy": "bloat", - "github.com/tailscale/goupnp": "bloat, which can't work anyway in wasm", + "testing": "do not use testing package in production code", + "runtime/pprof": "bloat", + "golang.org/x/net/http2/h2c": "bloat", + "net/http/pprof": "bloat", + "golang.org/x/net/proxy": "bloat", + "github.com/huin/goupnp": "bloat, which can't work anyway in wasm", }, }.Check(t) } From 3e45e5b420f23895ccfc51970aea9845c844f160 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Thu, 8 Jan 2026 10:28:40 -0800 Subject: [PATCH 0853/1093] feature/featuretags: make QR codes modular (#18358) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit QR codes are used by `tailscale up --qr` to provide an easy way to open a web-page without transcribing a difficult URI. However, there’s no need for this feature if the client will never be called interactively. So this PR adds the `ts_omit_qrcodes` build tag. Updates #18182 Signed-off-by: Simon Law --- cmd/tailscale/cli/up.go | 16 +++++++++----- cmd/tailscale/deps_test.go | 22 +++++++++++++++++++ cmd/tailscaled/depaware-minbox.txt | 13 ++--------- .../buildfeatures/feature_qrcodes_disabled.go | 13 +++++++++++ .../buildfeatures/feature_qrcodes_enabled.go | 13 +++++++++++ feature/featuretags/featuretags.go | 1 + util/qrcodes/format.go | 22 +++++++++++++++++++ util/qrcodes/qrcodes.go | 20 ++--------------- util/qrcodes/qrcodes_disabled.go | 16 ++++++++++++++ util/qrcodes/qrcodes_linux.go | 2 +- util/qrcodes/qrcodes_notlinux.go | 2 +- 11 files changed, 103 insertions(+), 37 deletions(-) create mode 100644 cmd/tailscale/deps_test.go create mode 100644 feature/buildfeatures/feature_qrcodes_disabled.go create mode 100644 feature/buildfeatures/feature_qrcodes_enabled.go create mode 100644 util/qrcodes/format.go create mode 100644 util/qrcodes/qrcodes_disabled.go diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 1d9f7e17c48df..2a7465de1f03b 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -94,8 +94,10 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // When adding new flags, prefer to put them under "tailscale set" instead // of here. Setting preferences via "tailscale up" is deprecated. - upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") - upf.StringVar(&upArgs.qrFormat, "qr-format", string(qrcodes.FormatAuto), fmt.Sprintf("QR code formatting (%s, %s, %s, %s)", qrcodes.FormatAuto, qrcodes.FormatASCII, qrcodes.FormatLarge, qrcodes.FormatSmall)) + if buildfeatures.HasQRCodes { + upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") + upf.StringVar(&upArgs.qrFormat, "qr-format", string(qrcodes.FormatAuto), fmt.Sprintf("QR code formatting (%s, %s, %s, %s)", qrcodes.FormatAuto, qrcodes.FormatASCII, qrcodes.FormatLarge, qrcodes.FormatSmall)) + } upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) @@ -720,9 +722,11 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if upArgs.json { js := &upOutputJSON{AuthURL: authURL, BackendState: st.BackendState} - png, err := qrcodes.EncodePNG(authURL, 128) - if err == nil { - js.QR = "data:image/png;base64," + base64.StdEncoding.EncodeToString(png) + if buildfeatures.HasQRCodes { + png, err := qrcodes.EncodePNG(authURL, 128) + if err == nil { + js.QR = "data:image/png;base64," + base64.StdEncoding.EncodeToString(png) + } } data, err := json.MarshalIndent(js, "", "\t") @@ -733,7 +737,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } else { fmt.Fprintf(Stderr, "\nTo authenticate, visit:\n\n\t%s\n\n", authURL) - if upArgs.qr { + if upArgs.qr && buildfeatures.HasQRCodes { _, err := qrcodes.Fprintln(Stderr, qrcodes.Format(upArgs.qrFormat), authURL) if err != nil { log.Print(err) diff --git a/cmd/tailscale/deps_test.go b/cmd/tailscale/deps_test.go new file mode 100644 index 0000000000000..132940e3cc937 --- /dev/null +++ b/cmd/tailscale/deps_test.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import ( + "testing" + + "tailscale.com/tstest/deptest" +) + +func TestOmitQRCodes(t *testing.T) { + const msg = "unexpected with ts_omit_qrcodes" + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_qrcodes", + BadDeps: map[string]string{ + "github.com/skip2/go-qrcode": msg, + }, + }.Check(t) +} diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 38da380135198..4b2f71983d441 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -33,9 +33,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf - github.com/skip2/go-qrcode from tailscale.com/util/qrcodes - github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ - github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device @@ -193,7 +190,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/osshare from tailscale.com/cmd/tailscaled tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli - 💣 tailscale.com/util/qrcodes from tailscale.com/cmd/tailscale/cli + tailscale.com/util/qrcodes from tailscale.com/cmd/tailscale/cli tailscale.com/util/race from tailscale.com/net/dns/resolver tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ @@ -274,9 +271,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ - compress/flate from compress/gzip+ + compress/flate from compress/gzip compress/gzip from net/http+ - compress/zlib from image/png container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ @@ -355,13 +351,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de flag from tailscale.com/cmd/tailscaled+ fmt from compress/flate+ hash from crypto+ - hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem html from tailscale.com/ipn/ipnlocal+ - image from github.com/skip2/go-qrcode+ - image/color from github.com/skip2/go-qrcode+ - image/png from github.com/skip2/go-qrcode internal/abi from hash/maphash+ internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug @@ -406,7 +398,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - io/ioutil from github.com/skip2/go-qrcode iter from bytes+ log from github.com/klauspost/compress/zstd+ log/internal from log diff --git a/feature/buildfeatures/feature_qrcodes_disabled.go b/feature/buildfeatures/feature_qrcodes_disabled.go new file mode 100644 index 0000000000000..4b992501c969e --- /dev/null +++ b/feature/buildfeatures/feature_qrcodes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_qrcodes + +package buildfeatures + +// HasQRCodes is whether the binary was built with support for modular feature "QR codes in tailscale CLI". +// Specifically, it's whether the binary was NOT built with the "ts_omit_qrcodes" build tag. +// It's a const so it can be used for dead code elimination. +const HasQRCodes = false diff --git a/feature/buildfeatures/feature_qrcodes_enabled.go b/feature/buildfeatures/feature_qrcodes_enabled.go new file mode 100644 index 0000000000000..5b74e2b3e5cbe --- /dev/null +++ b/feature/buildfeatures/feature_qrcodes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_qrcodes + +package buildfeatures + +// HasQRCodes is whether the binary was built with support for modular feature "QR codes in tailscale CLI". +// Specifically, it's whether the binary was NOT built with the "ts_omit_qrcodes" build tag. +// It's a const so it can be used for dead code elimination. +const HasQRCodes = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 44b1295769c56..99df18b5a3c3b 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -222,6 +222,7 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Linux NetworkManager integration", Deps: []FeatureTag{"dbus"}, }, + "qrcodes": {Sym: "QRCodes", Desc: "QR codes in tailscale CLI"}, "relayserver": {Sym: "RelayServer", Desc: "Relay server"}, "resolved": { Sym: "Resolved", diff --git a/util/qrcodes/format.go b/util/qrcodes/format.go new file mode 100644 index 0000000000000..dbd565b2ec9d3 --- /dev/null +++ b/util/qrcodes/format.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package qrcodes + +// Format selects the text representation used to print QR codes. +type Format string + +const ( + // FormatAuto will format QR codes to best fit the capabilities of the + // [io.Writer]. + FormatAuto Format = "auto" + + // FormatASCII will format QR codes with only ASCII characters. + FormatASCII Format = "ascii" + + // FormatLarge will format QR codes with full block characters. + FormatLarge Format = "large" + + // FormatSmall will format QR codes with full and half block characters. + FormatSmall Format = "small" +) diff --git a/util/qrcodes/qrcodes.go b/util/qrcodes/qrcodes.go index 14bdf858145b5..02e06e59b4be3 100644 --- a/util/qrcodes/qrcodes.go +++ b/util/qrcodes/qrcodes.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_qrcodes + // Package qrcodes provides functions to render or format QR codes. package qrcodes @@ -12,24 +14,6 @@ import ( qrcode "github.com/skip2/go-qrcode" ) -// Format selects the text representation used to print QR codes. -type Format string - -const ( - // FormatAuto will format QR codes to best fit the capabilities of the - // [io.Writer]. - FormatAuto Format = "auto" - - // FormatASCII will format QR codes with only ASCII characters. - FormatASCII Format = "ascii" - - // FormatLarge will format QR codes with full block characters. - FormatLarge Format = "large" - - // FormatSmall will format QR codes with full and half block characters. - FormatSmall Format = "small" -) - // Fprintln formats s according to [Format] and writes a QR code to w, along // with a newline. It returns the number of bytes written and any write error // encountered. diff --git a/util/qrcodes/qrcodes_disabled.go b/util/qrcodes/qrcodes_disabled.go new file mode 100644 index 0000000000000..fa1b89cf437ef --- /dev/null +++ b/util/qrcodes/qrcodes_disabled.go @@ -0,0 +1,16 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_qrcodes + +package qrcodes + +import "io" + +func Fprintln(w io.Writer, format Format, s string) (n int, err error) { + panic("omitted") +} + +func EncodePNG(s string, size int) ([]byte, error) { + panic("omitted") +} diff --git a/util/qrcodes/qrcodes_linux.go b/util/qrcodes/qrcodes_linux.go index 9cc0c09bf0e5d..8f0d40f0a5e4a 100644 --- a/util/qrcodes/qrcodes_linux.go +++ b/util/qrcodes/qrcodes_linux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !ts_omit_qrcodes package qrcodes diff --git a/util/qrcodes/qrcodes_notlinux.go b/util/qrcodes/qrcodes_notlinux.go index a12ce39d11168..3149a60605bf3 100644 --- a/util/qrcodes/qrcodes_notlinux.go +++ b/util/qrcodes/qrcodes_notlinux.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux && !ts_omit_qrcodes package qrcodes From 4c37141ab780dbf6c037bd64fe48ab330441ad06 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Wed, 17 Dec 2025 09:49:34 -0700 Subject: [PATCH 0854/1093] cmd,internal,feature: add workload idenity support to gitops pusher Add support for authenticating the gitops-pusher using workload identity federation. Updates https://github.com/tailscale/corp/issues/34172 Signed-off-by: Mario Minardi --- cmd/gitops-pusher/gitops-pusher.go | 111 ++++++++++++------ .../identityfederation/identityfederation.go | 1 + .../client/tailscale/identityfederation.go | 12 +- 3 files changed, 84 insertions(+), 40 deletions(-) diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index 690ca287056d3..0cbbda88a18b9 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -19,12 +19,15 @@ import ( "os" "regexp" "strings" + "sync" "time" "github.com/peterbourgon/ff/v3/ffcli" "github.com/tailscale/hujson" "golang.org/x/oauth2/clientcredentials" - "tailscale.com/client/tailscale" + tsclient "tailscale.com/client/tailscale" + _ "tailscale.com/feature/condregister/identityfederation" + "tailscale.com/internal/client/tailscale" "tailscale.com/util/httpm" ) @@ -38,6 +41,12 @@ var ( failOnManualEdits = rootFlagSet.Bool("fail-on-manual-edits", false, "fail if manual edits to the ACLs in the admin panel are detected; when set to false (the default) only a warning is printed") ) +var ( + getCredentialsOnce sync.Once + client *http.Client + apiKey string +) + func modifiedExternallyError() error { if *githubSyntax { return fmt.Errorf("::warning file=%s,line=1,col=1,title=Policy File Modified Externally::The policy file was modified externally in the admin console.", *policyFname) @@ -46,9 +55,9 @@ func modifiedExternallyError() error { } } -func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error { +func apply(cache *Cache, tailnet string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { - controlEtag, err := getACLETag(ctx, client, tailnet, apiKey) + controlEtag, err := getACLETag(ctx, tailnet) if err != nil { return err } @@ -83,7 +92,7 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte } } - if err := applyNewACL(ctx, client, tailnet, apiKey, *policyFname, controlEtag); err != nil { + if err := applyNewACL(ctx, tailnet, *policyFname, controlEtag); err != nil { return err } @@ -93,9 +102,9 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte } } -func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error { +func test(cache *Cache, tailnet string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { - controlEtag, err := getACLETag(ctx, client, tailnet, apiKey) + controlEtag, err := getACLETag(ctx, tailnet) if err != nil { return err } @@ -129,16 +138,16 @@ func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(contex } } - if err := testNewACLs(ctx, client, tailnet, apiKey, *policyFname); err != nil { + if err := testNewACLs(ctx, tailnet, *policyFname); err != nil { return err } return nil } } -func getChecksums(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error { +func getChecksums(cache *Cache, tailnet string) func(context.Context, []string) error { return func(ctx context.Context, args []string) error { - controlEtag, err := getACLETag(ctx, client, tailnet, apiKey) + controlEtag, err := getACLETag(ctx, tailnet) if err != nil { return err } @@ -166,28 +175,7 @@ func main() { if !ok { log.Fatal("set envvar TS_TAILNET to your tailnet's name") } - apiKey, ok := os.LookupEnv("TS_API_KEY") - oauthId, oiok := os.LookupEnv("TS_OAUTH_ID") - oauthSecret, osok := os.LookupEnv("TS_OAUTH_SECRET") - if !ok && (!oiok || !osok) { - log.Fatal("set envvar TS_API_KEY to your Tailscale API key or TS_OAUTH_ID and TS_OAUTH_SECRET to your Tailscale OAuth ID and Secret") - } - if apiKey != "" && (oauthId != "" || oauthSecret != "") { - log.Fatal("set either the envvar TS_API_KEY or TS_OAUTH_ID and TS_OAUTH_SECRET") - } - var client *http.Client - if oiok && (oauthId != "" || oauthSecret != "") { - // Both should ideally be set, but if either are non-empty it means the user had an intent - // to set _something_, so they should receive the oauth error flow. - oauthConfig := &clientcredentials.Config{ - ClientID: oauthId, - ClientSecret: oauthSecret, - TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer), - } - client = oauthConfig.Client(context.Background()) - } else { - client = http.DefaultClient - } + cache, err := LoadCache(*cacheFname) if err != nil { if os.IsNotExist(err) { @@ -203,7 +191,7 @@ func main() { ShortUsage: "gitops-pusher [options] apply", ShortHelp: "Pushes changes to CONTROL", LongHelp: `Pushes changes to CONTROL`, - Exec: apply(cache, client, tailnet, apiKey), + Exec: apply(cache, tailnet), } testCmd := &ffcli.Command{ @@ -211,7 +199,7 @@ func main() { ShortUsage: "gitops-pusher [options] test", ShortHelp: "Tests ACL changes", LongHelp: "Tests ACL changes", - Exec: test(cache, client, tailnet, apiKey), + Exec: test(cache, tailnet), } cksumCmd := &ffcli.Command{ @@ -219,7 +207,7 @@ func main() { ShortUsage: "Shows checksums of ACL files", ShortHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison", LongHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison", - Exec: getChecksums(cache, client, tailnet, apiKey), + Exec: getChecksums(cache, tailnet), } root := &ffcli.Command{ @@ -242,6 +230,47 @@ func main() { } } +func getCredentials() (*http.Client, string) { + getCredentialsOnce.Do(func() { + apiKeyEnv, ok := os.LookupEnv("TS_API_KEY") + oauthId, oiok := os.LookupEnv("TS_OAUTH_ID") + oauthSecret, osok := os.LookupEnv("TS_OAUTH_SECRET") + idToken, idok := os.LookupEnv("TS_ID_TOKEN") + + if !ok && (!oiok || (!osok && !idok)) { + log.Fatal("set envvar TS_API_KEY to your Tailscale API key, TS_OAUTH_ID and TS_OAUTH_SECRET to a Tailscale OAuth ID and Secret, or TS_OAUTH_ID and TS_ID_TOKEN to a Tailscale federated identity Client ID and OIDC identity token") + } + if apiKeyEnv != "" && (oauthId != "" || (oauthSecret != "" && idToken != "")) { + log.Fatal("set either the envvar TS_API_KEY, TS_OAUTH_ID and TS_OAUTH_SECRET, or TS_OAUTH_ID and TS_ID_TOKEN") + } + if oiok && ((oauthId != "" && !idok) || oauthSecret != "") { + // Both should ideally be set, but if either are non-empty it means the user had an intent + // to set _something_, so they should receive the oauth error flow. + oauthConfig := &clientcredentials.Config{ + ClientID: oauthId, + ClientSecret: oauthSecret, + TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer), + } + client = oauthConfig.Client(context.Background()) + } else if idok { + if exchangeJWTForToken, ok := tailscale.HookExchangeJWTForTokenViaWIF.GetOk(); ok { + var err error + apiKeyEnv, err = exchangeJWTForToken(context.Background(), fmt.Sprintf("https://%s", *apiServer), oauthId, idToken) + if err != nil { + log.Fatal(err) + } + } + client = http.DefaultClient + } else { + client = http.DefaultClient + } + + apiKey = apiKeyEnv + }) + + return client, apiKey +} + func sumFile(fname string) (string, error) { data, err := os.ReadFile(fname) if err != nil { @@ -262,7 +291,9 @@ func sumFile(fname string) (string, error) { return fmt.Sprintf("%x", h.Sum(nil)), nil } -func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname, oldEtag string) error { +func applyNewACL(ctx context.Context, tailnet, policyFname, oldEtag string) error { + client, apiKey := getCredentials() + fin, err := os.Open(policyFname) if err != nil { return err @@ -299,7 +330,9 @@ func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, poli return nil } -func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname string) error { +func testNewACLs(ctx context.Context, tailnet, policyFname string) error { + client, apiKey := getCredentials() + data, err := os.ReadFile(policyFname) if err != nil { return err @@ -346,7 +379,7 @@ var lineColMessageSplit = regexp.MustCompile(`line ([0-9]+), column ([0-9]+): (. // ACLGitopsTestError is redefined here so we can add a custom .Error() response type ACLGitopsTestError struct { - tailscale.ACLTestError + tsclient.ACLTestError } func (ate ACLGitopsTestError) Error() string { @@ -388,7 +421,9 @@ func (ate ACLGitopsTestError) Error() string { return sb.String() } -func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string) (string, error) { +func getACLETag(ctx context.Context, tailnet string) (string, error) { + client, apiKey := getCredentials() + req, err := http.NewRequestWithContext(ctx, httpm.GET, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), nil) if err != nil { return "", err diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go index ab1b65f1217d1..47ebd1349fcf3 100644 --- a/feature/identityfederation/identityfederation.go +++ b/feature/identityfederation/identityfederation.go @@ -24,6 +24,7 @@ import ( func init() { feature.Register("identityfederation") tailscale.HookResolveAuthKeyViaWIF.Set(resolveAuthKey) + tailscale.HookExchangeJWTForTokenViaWIF.Set(exchangeJWTForToken) } // resolveAuthKey uses OIDC identity federation to exchange the provided ID token and client ID for an authkey. diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go index e1fe3559c7b44..b8eb0fc9cfc8e 100644 --- a/internal/client/tailscale/identityfederation.go +++ b/internal/client/tailscale/identityfederation.go @@ -9,11 +9,19 @@ import ( "tailscale.com/feature" ) -// HookResolveAuthKeyViaWIF resolves to [identityfederation.ResolveAuthKey] when the +// HookResolveAuthKeyViaWIF resolves to [identityfederation.resolveAuthKey] when the // corresponding feature tag is enabled in the build process. // // baseURL is the URL of the control server used for token exchange and authkey generation. -// clientID is the federated client ID used for token exchange, the format is / +// clientID is the federated client ID used for token exchange // idToken is the Identity token from the identity provider // tags is the list of tags to be associated with the auth key var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error)] + +// HookExchangeJWTForTokenViaWIF resolves to [identityfederation.exchangeJWTForToken] when the +// corresponding feature tag is enabled in the build process. +// +// baseURL is the URL of the control server used for token exchange +// clientID is the federated client ID used for token exchange +// idToken is the Identity token from the identity provider +var HookExchangeJWTForTokenViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string) (string, error)] From 5f34f14e144674a563474b6059ef51a2ee35fd0b Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 7 Jan 2026 15:17:38 -0800 Subject: [PATCH 0855/1093] net/udprelay: apply netns Control func to server socket(s) To prevent peer relay servers from sending packets *over* Tailscale. Updates tailscale/corp#35651 Signed-off-by: Jordan Whited --- net/udprelay/server.go | 29 ++++++++++++++++++++++++++++- net/udprelay/server_linux.go | 3 +-- net/udprelay/server_notlinux.go | 4 +--- 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index acdbf5ad6893a..5918863a5323f 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -19,6 +19,7 @@ import ( "slices" "strconv" "sync" + "syscall" "time" "go4.org/mem" @@ -29,6 +30,7 @@ import ( "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" + "tailscale.com/net/netns" "tailscale.com/net/packet" "tailscale.com/net/sockopts" "tailscale.com/net/stun" @@ -78,6 +80,7 @@ type Server struct { closeCh chan struct{} netChecker *netcheck.Client metrics *metrics + netMon *netmon.Monitor mu sync.Mutex // guards the following fields macSecrets views.Slice[[blake2s.Size]byte] // [0] is most recent, max 2 elements @@ -346,6 +349,7 @@ func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool, metrics if err != nil { return nil, err } + s.netMon = netMon s.netChecker = &netcheck.Client{ NetMon: netMon, Logf: logger.WithPrefix(logf, "netcheck: "), @@ -542,6 +546,25 @@ func trySetUDPSocketOptions(pconn nettype.PacketConn, logf logger.Logf) { } } +func trySetSOMark(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) { + if netns.UseSocketMark() { + // Leverage SO_MARK where available to prevent packets from routing + // *over* Tailscale. Where SO_MARK is unavailable we choose to not set + // SO_BINDTODEVICE as that could prevent handshakes from completing + // where multiple interfaces are in play. + // + // SO_MARK is only used on Linux at the time of writing (2026-01-08), + // and Linux is the popular/common choice for peer relay. If we are + // running on Linux and SO_MARK is unavailable (EPERM), chances are + // there is no TUN device, so routing over Tailscale is impossible + // anyway. Both TUN creation and SO_MARK require CAP_NET_ADMIN. + lis := netns.Listener(logf, netMon) + if lis.Control != nil { + lis.Control(network, address, c) + } + } +} + // bindSockets binds udp4 and udp6 sockets to desiredPort. We consider it // successful if we manage to bind at least one udp4 socket. Multiple sockets // may be bound per address family, e.g. SO_REUSEPORT, depending on platform. @@ -562,7 +585,11 @@ func (s *Server) bindSockets(desiredPort uint16) error { // arbitrary. maxSocketsPerAF := min(16, runtime.NumCPU()) listenConfig := &net.ListenConfig{ - Control: listenControl, + Control: func(network, address string, c syscall.RawConn) error { + trySetReusePort(network, address, c) + trySetSOMark(s.logf, s.netMon, network, address, c) + return nil + }, } for _, network := range []string{"udp4", "udp6"} { SocketsLoop: diff --git a/net/udprelay/server_linux.go b/net/udprelay/server_linux.go index 009ec8cc8bfe9..d4cf2a2b16ee9 100644 --- a/net/udprelay/server_linux.go +++ b/net/udprelay/server_linux.go @@ -12,11 +12,10 @@ import ( "golang.org/x/sys/unix" ) -func listenControl(_ string, _ string, c syscall.RawConn) error { +func trySetReusePort(_ string, _ string, c syscall.RawConn) { c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) }) - return nil } func isReusableSocket(uc *net.UDPConn) bool { diff --git a/net/udprelay/server_notlinux.go b/net/udprelay/server_notlinux.go index 042a6dd68215e..f21020631f76e 100644 --- a/net/udprelay/server_notlinux.go +++ b/net/udprelay/server_notlinux.go @@ -10,9 +10,7 @@ import ( "syscall" ) -func listenControl(_ string, _ string, _ syscall.RawConn) error { - return nil -} +func trySetReusePort(_ string, _ string, _ syscall.RawConn) {} func isReusableSocket(*net.UDPConn) bool { return false From f9762064cfcec9ab285750b0e25b48cd31642a31 Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Thu, 8 Jan 2026 20:49:18 -0700 Subject: [PATCH 0856/1093] tsnet: reset serve config only once Prior to this change, we were resetting the tsnet's serve config every time tsnet.Server.Up was run. This is important to do on startup, to prevent messy interactions with stale configuration when the code has changed. However, Up is frequently run as a just-in-case step (for example, by Server.ListenTLS/ListenFunnel and possibly by consumers of tsnet). When the serve config is reset on each of these calls to Up, this creates situations in which the serve config disappears unexpectedly. The solution is to reset the serve config only on the first call to Up. Fixes #8800 Updates tailscale/corp#27200 Signed-off-by: Harry Harpham --- tsnet/tsnet.go | 57 +++++++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index ea165e932e4bc..61112d4dcaf08 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -160,25 +160,26 @@ type Server struct { getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) - initOnce sync.Once - initErr error - lb *ipnlocal.LocalBackend - sys *tsd.System - netstack *netstack.Impl - netMon *netmon.Monitor - rootPath string // the state directory - hostname string - shutdownCtx context.Context - shutdownCancel context.CancelFunc - proxyCred string // SOCKS5 proxy auth for loopbackListener - localAPICred string // basic auth password for loopbackListener - loopbackListener net.Listener // optional loopback for localapi and proxies - localAPIListener net.Listener // in-memory, used by localClient - localClient *local.Client // in-memory - localAPIServer *http.Server - logbuffer *filch.Filch - logtail *logtail.Logger - logid logid.PublicID + initOnce sync.Once + initErr error + lb *ipnlocal.LocalBackend + sys *tsd.System + netstack *netstack.Impl + netMon *netmon.Monitor + rootPath string // the state directory + hostname string + shutdownCtx context.Context + shutdownCancel context.CancelFunc + proxyCred string // SOCKS5 proxy auth for loopbackListener + localAPICred string // basic auth password for loopbackListener + loopbackListener net.Listener // optional loopback for localapi and proxies + localAPIListener net.Listener // in-memory, used by localClient + localClient *local.Client // in-memory + localAPIServer *http.Server + resetServeConfigOnce sync.Once + logbuffer *filch.Filch + logtail *logtail.Logger + logid logid.PublicID mu sync.Mutex listeners map[listenKey]*listener @@ -388,8 +389,8 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { if n.ErrMessage != nil { return nil, fmt.Errorf("tsnet.Up: backend: %s", *n.ErrMessage) } - if s := n.State; s != nil { - if *s == ipn.Running { + if st := n.State; st != nil { + if *st == ipn.Running { status, err := lc.Status(ctx) if err != nil { return nil, fmt.Errorf("tsnet.Up: %w", err) @@ -398,11 +399,15 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { return nil, errors.New("tsnet.Up: running, but no ip") } - // Clear the persisted serve config state to prevent stale configuration - // from code changes. This is a temporary workaround until we have a better - // way to handle this. (2023-03-11) - if err := lc.SetServeConfig(ctx, new(ipn.ServeConfig)); err != nil { - return nil, fmt.Errorf("tsnet.Up: %w", err) + // The first time Up is run, clear the persisted serve config. + // We do this to prevent messy interactions with stale config in + // the face of code changes. + var srvResetErr error + s.resetServeConfigOnce.Do(func() { + srvResetErr = lc.SetServeConfig(ctx, new(ipn.ServeConfig)) + }) + if srvResetErr != nil { + return nil, fmt.Errorf("tsnet.Up: clearing serve config: %w", err) } return status, nil From 3c1be083a480e4f55d0224c0b98f90c12257db11 Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Fri, 9 Jan 2026 10:02:12 -0700 Subject: [PATCH 0857/1093] tsnet: ensure funnel listener cleans up after itself when closed Previously the funnel listener would leave artifacts in the serve config. This caused weird out-of-sync effects like the admin panel showing that funnel was enabled for a node, but the node rejecting packets because the listener was closed. This change resolves these synchronization issues by ensuring that funnel listeners clean up the serve config when closed. See also: https://github.com/tailscale/tailscale/commit/e109cf9fdd405153a8d8c0ec52a87d7c8ce8689b Updates #cleanup Signed-off-by: Harry Harpham --- tsnet/tsnet.go | 42 ++++++++++++++++++ tsnet/tsnet_test.go | 101 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 61112d4dcaf08..d2810c0b25544 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1228,12 +1228,26 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L } domain := st.CertDomains[0] hp := ipn.HostPort(domain + ":" + portStr) + var cleanupOnClose func() error if !srvConfig.AllowFunnel[hp] { mak.Set(&srvConfig.AllowFunnel, hp, true) srvConfig.AllowFunnel[hp] = true if err := lc.SetServeConfig(ctx, srvConfig); err != nil { return nil, err } + cleanupOnClose = func() error { + sc, err := lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("cleaning config changes: %w", err) + } + if sc.AllowFunnel != nil { + delete(sc.AllowFunnel, hp) + } + if err := lc.SetServeConfig(ctx, sc); err != nil { + return fmt.Errorf("cleaning config changes: %w", err) + } + return nil + } } // Start a funnel listener. @@ -1241,6 +1255,7 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L if err != nil { return nil, err } + ln = &cleanupListener{Listener: ln, cleanup: cleanupOnClose} return tls.NewListener(ln, tlsConfig), nil } @@ -1449,3 +1464,30 @@ type addr struct{ ln *listener } func (a addr) Network() string { return a.ln.keys[0].network } func (a addr) String() string { return a.ln.addr } + +// cleanupListener wraps a net.Listener with a function to be run on Close. +type cleanupListener struct { + net.Listener + cleanup func() error + cleanupOnce sync.Once +} + +func (cl *cleanupListener) Close() error { + var cleanupErr error + cl.cleanupOnce.Do(func() { + if cl.cleanup != nil { + cleanupErr = cl.cleanup() + } + }) + closeErr := cl.Listener.Close() + switch { + case closeErr != nil && cleanupErr != nil: + return fmt.Errorf("%w; also: %w", closeErr, cleanupErr) + case closeErr != nil: + return closeErr + case cleanupErr != nil: + return cleanupErr + default: + return nil + } +} diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 838d5f3f5f1a5..af8fa765de559 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -13,6 +13,7 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "encoding/json" "errors" "flag" "fmt" @@ -33,6 +34,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "golang.org/x/net/proxy" @@ -741,6 +743,105 @@ func TestFunnel(t *testing.T) { } } +// TestFunnelClose ensures that the listener returned by ListenFunnel cleans up +// after itself when closed. Specifically, changes made to the serve config +// should be cleared. +func TestFunnelClose(t *testing.T) { + marshalServeConfig := func(t *testing.T, sc ipn.ServeConfigView) string { + t.Helper() + return string(must.Get(json.MarshalIndent(sc, "", "\t"))) + } + + t.Run("simple", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s") + + before := s.lb.ServeConfig() + + ln := must.Get(s.ListenFunnel("tcp", ":443")) + ln.Close() + + after := s.lb.ServeConfig() + if diff := cmp.Diff(marshalServeConfig(t, after), marshalServeConfig(t, before)); diff != "" { + t.Fatalf("expected serve config to be unchanged after close (-got, +want):\n%s", diff) + } + }) + + // Closing the listener shouldn't clear out config that predates it. + t.Run("no_clobbering", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s") + + // To obtain config the listener might want to clobber, we: + // - run a listener + // - grab the config + // - close the listener (clearing config) + ln := must.Get(s.ListenFunnel("tcp", ":443")) + before := s.lb.ServeConfig() + ln.Close() + + // Now we manually write the config to the local backend (it should have + // been cleared), run the listener again, and close it again. + must.Do(s.lb.SetServeConfig(before.AsStruct(), "")) + ln = must.Get(s.ListenFunnel("tcp", ":443")) + ln.Close() + + // The config should not have been cleared this time since it predated + // the most recent run. + after := s.lb.ServeConfig() + if diff := cmp.Diff(marshalServeConfig(t, after), marshalServeConfig(t, before)); diff != "" { + t.Fatalf("expected existing config to remain intact (-got, +want):\n%s", diff) + } + }) + + // Closing one listener shouldn't affect config for another listener. + t.Run("two_listeners", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s1") + + // Start a listener on 443. + ln1 := must.Get(s.ListenFunnel("tcp", ":443")) + defer ln1.Close() + + // Save the serve config for this original listener. + before := s.lb.ServeConfig() + + // Now start and close a new listener on a different port. + ln2 := must.Get(s.ListenFunnel("tcp", ":8080")) + ln2.Close() + + // The serve config for the original listener should be intact. + after := s.lb.ServeConfig() + if diff := cmp.Diff(marshalServeConfig(t, after), marshalServeConfig(t, before)); diff != "" { + t.Fatalf("expected existing config to remain intact (-got, +want):\n%s", diff) + } + }) + + // It should be possible to close a listener and free system resources even + // when the Server has been closed (or the listener should be automatically + // closed). + t.Run("after_server_close", func(t *testing.T) { + controlURL, _ := startControl(t) + s, _, _ := startServer(t, t.Context(), controlURL, "s") + + ln := must.Get(s.ListenFunnel("tcp", ":443")) + + // Close the server, then close the listener. + must.Do(s.Close()) + // We don't care whether we get an error from the listener closing. + ln.Close() + + // The listener should immediately return an error indicating closure. + _, err := ln.Accept() + // Looking for a string in the error sucks, but it's supposed to stay + // consistent: + // https://github.com/golang/go/blob/108b333d510c1f60877ac917375d7931791acfe6/src/internal/poll/fd.go#L20-L24 + if err == nil || !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatal("expected listener to be closed, got:", err) + } + }) +} + func TestListenerClose(t *testing.T) { tstest.Shard(t) ctx := context.Background() From 5db95ec376b00a8db90bcb0c61c452f0b49a8633 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Fri, 9 Jan 2026 12:16:53 -0800 Subject: [PATCH 0858/1093] go.mod: bump github.com/containerd/containerd@v1.7.29 (#18374) Updates #cleanup Signed-off-by: Patrick O'Doherty --- cmd/k8s-operator/depaware.txt | 8 +-- flake.nix | 2 +- go.mod | 48 ++++++++------- go.mod.sri | 2 +- go.sum | 111 +++++++++++++++++----------------- shell.nix | 2 +- 6 files changed, 88 insertions(+), 85 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index aa9ad2fb4b40c..b809c85b90c3f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -7,7 +7,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus github.com/blang/semver/v4 from k8s.io/component-base/metrics - 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus + 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus+ github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -124,10 +124,10 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace+ + go.opentelemetry.io/otel/attribute/internal from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/attribute/internal/xxhash from go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/codes from go.opentelemetry.io/otel/trace - 💣 go.opentelemetry.io/otel/internal from go.opentelemetry.io/otel/attribute - go.opentelemetry.io/otel/internal/attribute from go.opentelemetry.io/otel/attribute - go.opentelemetry.io/otel/semconv/v1.26.0 from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/semconv/v1.37.0 from go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace from k8s.io/component-base/metrics go.opentelemetry.io/otel/trace/embedded from go.opentelemetry.io/otel/trace 💣 go.opentelemetry.io/otel/trace/internal/telemetry from go.opentelemetry.io/otel/trace diff --git a/flake.nix b/flake.nix index dd8016b4eb362..6049e069258ea 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-knSIes9pFVkVfK5hcBG9BSR1ueH+yPpx4hv/UsyaW2M= +# nix-direnv cache busting line: sha256-MKMLpGUYzUPYKjVYQSnxDQDdH1oXaM8bCIbhCTuGeV0= diff --git a/go.mod b/go.mod index c8be839c39083..a236aad8bdcd6 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd github.com/bramvdbogaerde/go-scp v1.4.0 - github.com/cilium/ebpf v0.15.0 + github.com/cilium/ebpf v0.16.0 github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf @@ -42,7 +42,7 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.7.0 @@ -63,7 +63,7 @@ require ( github.com/jellydator/ttlcache/v3 v3.1.0 github.com/jsimonetti/rtnetlink v1.4.0 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/klauspost/compress v1.18.0 + github.com/klauspost/compress v1.18.2 github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.20 @@ -104,14 +104,14 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.45.0 + golang.org/x/crypto v0.46.0 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.30.0 - golang.org/x/net v0.47.0 - golang.org/x/oauth2 v0.31.0 - golang.org/x/sync v0.18.0 - golang.org/x/sys v0.38.0 - golang.org/x/term v0.37.0 + golang.org/x/net v0.48.0 + golang.org/x/oauth2 v0.32.0 + golang.org/x/sync v0.19.0 + golang.org/x/sys v0.40.0 + golang.org/x/term v0.38.0 golang.org/x/time v0.12.0 golang.org/x/tools v0.39.0 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 @@ -155,15 +155,16 @@ require ( github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/ckaznocha/intrange v0.1.0 // indirect - github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/platforms v1.0.0-rc.1 // indirect + github.com/containerd/platforms v1.0.0-rc.2 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/deckarep/golang-set/v2 v2.8.0 // indirect github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-events v0.0.0-20250808211157-605354379745 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect @@ -221,19 +222,20 @@ require ( go-simpler.org/musttag v0.9.0 // indirect go-simpler.org/sloglint v0.5.0 // indirect go.etcd.io/bbolt v1.4.2 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect go.uber.org/automaxprocs v1.5.3 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/grpc v1.72.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect + google.golang.org/grpc v1.78.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect k8s.io/cli-runtime v0.34.0 // indirect k8s.io/component-base v0.34.0 // indirect @@ -314,7 +316,7 @@ require ( github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.23.0 // indirect @@ -410,7 +412,7 @@ require ( github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/ryancurrah/gomodguard v1.3.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect @@ -460,9 +462,9 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/image v0.27.0 // indirect - golang.org/x/text v0.31.0 // indirect + golang.org/x/text v0.32.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.mod.sri b/go.mod.sri index fd2ab9d7a3f48..bbda9fe49fe5e 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-knSIes9pFVkVfK5hcBG9BSR1ueH+yPpx4hv/UsyaW2M= +sha256-MKMLpGUYzUPYKjVYQSnxDQDdH1oXaM8bCIbhCTuGeV0= diff --git a/go.sum b/go.sum index 19703e072a593..4c2c0bfed1e66 100644 --- a/go.sum +++ b/go.sum @@ -240,8 +240,8 @@ github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+U github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= -github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= @@ -252,14 +252,14 @@ github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= -github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= -github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= -github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= +github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6acgLGv/QzE4= +github.com/containerd/platforms v1.0.0-rc.2/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= @@ -282,8 +282,8 @@ github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/daixiang0/gci v0.12.3 h1:yOZI7VAxAGPQmkb1eqt5g/11SUlwoat1fSblGLmdiQc= github.com/daixiang0/gci v0.12.3/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -320,8 +320,8 @@ github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZ github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20250808211157-605354379745 h1:yOn6Ze6IbYI/KAw2lw/83ELYvZh6hvsygTVkD0dzMC4= +github.com/docker/go-events v0.0.0-20250808211157-605354379745/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -407,8 +407,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -479,8 +479,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -707,6 +707,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -733,8 +735,8 @@ github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -982,8 +984,8 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1194,16 +1196,16 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= @@ -1228,16 +1230,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= @@ -1265,8 +1267,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1355,16 +1357,16 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= -golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1378,8 +1380,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1443,8 +1445,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1453,8 +1455,8 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1465,8 +1467,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1605,11 +1607,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 h1:7LRqPCEdE4TP4/9psdaB7F2nhZFfBiGJomA5sojLWdU= +google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1622,8 +1623,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= -google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1636,8 +1637,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/shell.nix b/shell.nix index c494ce47cce6c..3c85586b9f188 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-knSIes9pFVkVfK5hcBG9BSR1ueH+yPpx4hv/UsyaW2M= +# nix-direnv cache busting line: sha256-MKMLpGUYzUPYKjVYQSnxDQDdH1oXaM8bCIbhCTuGeV0= From aadc4f2ef4a63ecea4fed696091d5b0bb6918795 Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Fri, 9 Jan 2026 16:47:56 -0500 Subject: [PATCH 0859/1093] wgengine/magicsock: add home DERP region usermetric (#18062) Expose the node's home DERP region ID as a Prometheus gauge via the usermetrics endpoint. Fixes #18061 Signed-off-by: Raj Singh --- wgengine/magicsock/derp.go | 12 ++++++++++++ wgengine/magicsock/magicsock.go | 7 +++++++ 2 files changed, 19 insertions(+) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 37a4f1a64ee02..1c5225e2249b5 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -216,17 +216,28 @@ func (c *Conn) derpRegionCodeLocked(regionID int) string { return "" } +// setHomeDERPGaugeLocked updates the home DERP gauge metric. +// +// c.mu must be held. +func (c *Conn) setHomeDERPGaugeLocked(derpNum int) { + if c.homeDERPGauge != nil { + c.homeDERPGauge.Set(float64(derpNum)) + } +} + // c.mu must NOT be held. func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) { c.mu.Lock() defer c.mu.Unlock() if !c.wantDerpLocked() { c.myDerp = 0 + c.setHomeDERPGaugeLocked(0) c.health.SetMagicSockDERPHome(0, c.homeless) return false } if c.homeless { c.myDerp = 0 + c.setHomeDERPGaugeLocked(0) c.health.SetMagicSockDERPHome(0, c.homeless) return false } @@ -238,6 +249,7 @@ func (c *Conn) setNearestDERP(derpNum int) (wantDERP bool) { metricDERPHomeChange.Add(1) } c.myDerp = derpNum + c.setHomeDERPGaugeLocked(derpNum) c.health.SetMagicSockDERPHome(derpNum, c.homeless) if c.privateKey.IsZero() { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index a19032fb27cb8..8fbd07013797d 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -406,6 +406,10 @@ type Conn struct { // metrics contains the metrics for the magicsock instance. metrics *metrics + + // homeDERPGauge is the usermetric gauge for the home DERP region ID. + // This can be nil when [Options.Metrics] are not enabled. + homeDERPGauge *usermetric.Gauge } // SetDebugLoggingEnabled controls whether spammy debug logging is enabled. @@ -744,6 +748,9 @@ func NewConn(opts Options) (*Conn, error) { } c.metrics = registerMetrics(opts.Metrics) + if opts.Metrics != nil { + c.homeDERPGauge = opts.Metrics.NewGauge("tailscaled_home_derp_region_id", "DERP region ID of this node's home relay server") + } if d4, err := c.listenRawDisco("ip4"); err == nil { c.logf("[v1] using BPF disco receiver for IPv4") From 78c8d14254eab4c35dca73af2006ea1eaff19f6b Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Fri, 9 Jan 2026 12:54:39 -0700 Subject: [PATCH 0860/1093] tsnet: use errors.Join and idiomatic field order Updates #18376 (follow up on feedback) Signed-off-by: Harry Harpham --- tsnet/tsnet.go | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d2810c0b25544..9efad32b3dc66 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1468,8 +1468,8 @@ func (a addr) String() string { return a.ln.addr } // cleanupListener wraps a net.Listener with a function to be run on Close. type cleanupListener struct { net.Listener - cleanup func() error cleanupOnce sync.Once + cleanup func() error // nil if unused } func (cl *cleanupListener) Close() error { @@ -1479,15 +1479,5 @@ func (cl *cleanupListener) Close() error { cleanupErr = cl.cleanup() } }) - closeErr := cl.Listener.Close() - switch { - case closeErr != nil && cleanupErr != nil: - return fmt.Errorf("%w; also: %w", closeErr, cleanupErr) - case closeErr != nil: - return closeErr - case cleanupErr != nil: - return cleanupErr - default: - return nil - } + return errors.Join(cl.Listener.Close(), cleanupErr) } From 87e108e10c84f71341fb4edaaeb06e8e12fe682a Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Mon, 12 Jan 2026 09:09:05 -0700 Subject: [PATCH 0861/1093] docs: add instructions on referencing pull requests in commit messages Updates #cleanup Signed-off-by: Harry Harpham --- docs/commit-messages.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/commit-messages.md b/docs/commit-messages.md index aef1035b35b8c..b617e1fadd425 100644 --- a/docs/commit-messages.md +++ b/docs/commit-messages.md @@ -72,7 +72,7 @@ For the body (the rest of the description): - blank line after the subject (first) line - the text should be wrapped to ~76 characters (to appease git viewing tools, mainly), unless you really need longer lines (e.g. for ASCII art, tables, or long links) -- there must be a `Fixes` or `Updates` line for all non-cleanup commits linking to a tracking bug. This goes after the body with a blank newline separating the two. [Cleanup commits](#is-it-a-cleanup) can use `Updates #cleanup` instead of an issue. +- there must be a `Fixes` or `Updates` line for all non-cleanup commits linking to a tracking bug. This goes after the body with a blank newline separating the two. A pull request may be referenced rather than a tracking bug (using the same format, e.g. `Updates #12345`), though a bug is generally preferred. [Cleanup commits](#is-it-a-cleanup) can use `Updates #cleanup` instead of an issue. - `Change-Id` lines should ideally be included in commits in the `corp` repo and are more optional in `tailscale/tailscale`. You can configure Git to do this for you by running `./tool/go run misc/install-git-hooks.go` from the root of the corp repo. This was originally a Gerrit thing and we don't use Gerrit, but it lets us tooling track commits as they're cherry-picked between branches. Also, tools like [git-cleanup](https://github.com/bradfitz/gitutil) use it to clean up your old local branches once they're merged upstream. - we don't use Markdown in commit messages. (Accidental Markdown like bulleted lists or even headings is fine, but not links) - we require `Signed-off-by` lines in public repos (such as `tailscale/tailscale`). Add them using `git commit --signoff` or `git commit -s` for short. You can use them in private repos but do not have to. From 8c17d871b33ade8ebf8e2a6c5e136f06c4019cd2 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 13 Jan 2026 13:43:17 +0100 Subject: [PATCH 0862/1093] ipn/store/kubestore: don't load write replica certs in memory (#18395) Fixes a bug where, for kube HA proxies, TLS certs for the replica responsible for cert issuance where loaded in memory on startup, although the in-memory store was not updated after renewal (to avoid failing re-issuance for re-created Ingresses). Now the 'write' replica always reads certs from the kube Secret. Updates tailscale/tailscale#18394 Signed-off-by: Irbe Krumina --- ipn/store/kubestore/store_kube.go | 10 +++++++--- ipn/store/kubestore/store_kube_test.go | 8 ++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index ba45409ed7903..5fbd795c2174d 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -110,8 +110,12 @@ func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*S if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist { return nil, fmt.Errorf("error loading state from kube Secret: %w", err) } - // If we are in cert share mode, pre-load existing shared certs. - if s.certShareMode == "rw" || s.certShareMode == "ro" { + // If we are in read-only cert share mode, pre-load existing shared certs. + // Write replicas never load certs in-memory to avoid a situation where, + // after Ingress recreation (and the associated cert Secret recreation), new + // TLS certs don't get issued because the write replica still has certs + // in-memory. Instead, write replicas fetch certs from Secret on each request. + if s.certShareMode == "ro" { sel := s.certSecretSelector() if err := s.loadCerts(context.Background(), sel); err != nil { // We will attempt to again retrieve the certs from Secrets when a request for an HTTPS endpoint @@ -176,7 +180,7 @@ func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) // written to memory to avoid out of sync memory state after // Ingress resources have been recreated. This means that TLS // certs for write replicas are retrieved from the Secret on - // each HTTPS request. This is a temporary solution till we + // each HTTPS request. This is a temporary solution till we // implement a Secret watch. if s.certShareMode != "rw" { s.memory.WriteState(ipn.StateKey(domain+".crt"), cert) diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 44a4bbb7fc14d..aea39d3bb51f8 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -688,7 +688,7 @@ func TestNewWithClient(t *testing.T) { }, }, { - name: "load_select_certs_in_read_write_mode", + name: "do_not_load_certs_in_read_write_mode", certMode: "rw", stateSecretContents: map[string][]byte{ "foo": []byte("bar"), @@ -704,11 +704,7 @@ func TestNewWithClient(t *testing.T) { }, "4"), }, wantMemoryStoreContents: map[ipn.StateKey][]byte{ - "foo": []byte("bar"), - "app1.tailnetxyz.ts.net.crt": []byte(testCert + "1"), - "app1.tailnetxyz.ts.net.key": []byte(testKey + "1"), - "app2.tailnetxyz.ts.net.crt": []byte(testCert + "2"), - "app2.tailnetxyz.ts.net.key": []byte(testKey + "2"), + "foo": []byte("bar"), }, }, { From 76fb09c6bd8492b9edae5e667930c13008c40091 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 13 Jan 2026 09:56:53 -0800 Subject: [PATCH 0863/1093] .github/workflows: fix timeouts by caching packages for golangci-lint (#18398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recently, the golangci-lint workflow has been taking longer and longer to complete, causing it to timeout after the default of 5 minutes. Running error: context loading failed: failed to load packages: failed to load packages: failed to load with go/packages: context deadline exceeded Timeout exceeded: try increasing it by passing --timeout option This PR upgrades actions/setup-go to version 6, the latest, and enables caching for Go modules and build outputs. This should speed up linting because most packages won’t have to be downloaded over and over again. Fixes #18366 Signed-off-by: Simon Law --- .github/workflows/golangci-lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 0b9fb6a4151e2..69efcfd5b0839 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -29,10 +29,10 @@ jobs: steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: go-version-file: go.mod - cache: false + cache: true - name: golangci-lint uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 From 17b0c7bfb384892ed8a6b5c6aa669e866d493fdc Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Mon, 12 Jan 2026 15:09:04 -0500 Subject: [PATCH 0864/1093] metrics: add a NewLabelMap helper to create and register label maps Updates tailscale/corp#31174 Signed-off-by: Anton Tolchanov --- cmd/derper/derper.go | 9 ++------- metrics/metrics.go | 8 ++++++++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 16f531be0ec62..ddf45747ac9fe 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -99,18 +99,13 @@ var ( ) var ( - tlsRequestVersion = &metrics.LabelMap{Label: "version"} - tlsActiveVersion = &metrics.LabelMap{Label: "version"} + tlsRequestVersion = metrics.NewLabelMap("derper_tls_request_version", "version") + tlsActiveVersion = metrics.NewLabelMap("gauge_derper_tls_active_version", "version") ) const setecMeshKeyName = "meshkey" const meshKeyEnvVar = "TAILSCALE_DERPER_MESH_KEY" -func init() { - expvar.Publish("derper_tls_request_version", tlsRequestVersion) - expvar.Publish("gauge_derper_tls_active_version", tlsActiveVersion) -} - type config struct { PrivateKey key.NodePrivate } diff --git a/metrics/metrics.go b/metrics/metrics.go index 19966d395f815..010a32d02feee 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -43,6 +43,14 @@ type LabelMap struct { shardedIntMu syncs.Mutex } +// NewLabelMap creates and publishes a new LabelMap metric with the given +// metric name and label name. +func NewLabelMap(metric, label string) *LabelMap { + m := &LabelMap{Label: label} + expvar.Publish(metric, m) + return m +} + // SetInt64 sets the *Int value stored under the given map key. func (m *LabelMap) SetInt64(key string, v int64) { m.Get(key).Set(v) From 58042e2de39c9c2827fe0bad7c45e8631369325f Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 13 Jan 2026 11:43:03 -0500 Subject: [PATCH 0865/1093] metrics: add a NewSet and Set.NewLabelMap helpers Updates tailscale/corp#31174 Signed-off-by: Anton Tolchanov --- metrics/metrics.go | 15 +++++++++++++++ net/stunserver/stunserver.go | 13 +++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/metrics/metrics.go b/metrics/metrics.go index 010a32d02feee..092b56c41b6dc 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -29,6 +29,21 @@ type Set struct { expvar.Map } +// NewSet creates and publishes a new Set with the given name. +func NewSet(name string) *Set { + s := &Set{} + expvar.Publish(name, s) + return s +} + +// NewLabelMap creates a new LabelMap metric with the given +// metric name and label name, and adds it to the Set. +func (s *Set) NewLabelMap(metric, label string) *LabelMap { + m := &LabelMap{Label: label} + s.Set(metric, m) + return m +} + // LabelMap is a string-to-Var map variable that satisfies the // expvar.Var interface. // diff --git a/net/stunserver/stunserver.go b/net/stunserver/stunserver.go index b45bb633129fe..7397675ca8dc3 100644 --- a/net/stunserver/stunserver.go +++ b/net/stunserver/stunserver.go @@ -8,7 +8,6 @@ package stunserver import ( "context" "errors" - "expvar" "io" "log" "net" @@ -20,9 +19,9 @@ import ( ) var ( - stats = new(metrics.Set) - stunDisposition = &metrics.LabelMap{Label: "disposition"} - stunAddrFamily = &metrics.LabelMap{Label: "family"} + stats = metrics.NewSet("stun") + stunDisposition = stats.NewLabelMap("counter_requests", "disposition") + stunAddrFamily = stats.NewLabelMap("counter_addrfamily", "family") stunReadError = stunDisposition.Get("read_error") stunNotSTUN = stunDisposition.Get("not_stun") stunWriteError = stunDisposition.Get("write_error") @@ -32,12 +31,6 @@ var ( stunIPv6 = stunAddrFamily.Get("ipv6") ) -func init() { - stats.Set("counter_requests", stunDisposition) - stats.Set("counter_addrfamily", stunAddrFamily) - expvar.Publish("stun", stats) -} - type STUNServer struct { ctx context.Context // ctx signals service shutdown pc *net.UDPConn // pc is the UDP listener From 6a6aa805d61a014aa602501e42320368970eb17d Mon Sep 17 00:00:00 2001 From: Danni Popova Date: Wed, 14 Jan 2026 15:00:59 +0000 Subject: [PATCH 0866/1093] cmd,feature: add identity token auto generation for workload identity (#18373) Adds the ability to detect what provider the client is running on and tries fetch the ID token to use with Workload Identity. Updates https://github.com/tailscale/corp/issues/33316 Signed-off-by: Danni Popova --- cmd/k8s-operator/depaware.txt | 72 ++++++ cmd/tailscale/cli/up.go | 8 +- cmd/tailscale/cli/up_test.go | 1 + cmd/tailscale/depaware.txt | 72 ++++++ cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 72 ++++++ .../identityfederation/identityfederation.go | 12 +- .../identityfederation_test.go | 12 +- flake.nix | 2 +- go.mod | 16 +- go.mod.sri | 2 +- go.sum | 28 +- .../client/tailscale/identityfederation.go | 4 +- shell.nix | 2 +- tsnet/depaware.txt | 72 ++++++ tsnet/tsnet.go | 2 +- tsnet/tsnet_test.go | 14 +- wif/wif.go | 242 ++++++++++++++++++ 18 files changed, 592 insertions(+), 42 deletions(-) create mode 100644 wif/wif.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index b809c85b90c3f..d6993465304fd 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -5,6 +5,77 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus github.com/blang/semver/v4 from k8s.io/component-base/metrics 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus+ @@ -916,6 +987,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 2a7465de1f03b..bf0315860fbeb 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -99,6 +99,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { upf.StringVar(&upArgs.qrFormat, "qr-format", string(qrcodes.FormatAuto), fmt.Sprintf("QR code formatting (%s, %s, %s, %s)", qrcodes.FormatAuto, qrcodes.FormatASCII, qrcodes.FormatLarge, qrcodes.FormatSmall)) } upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) + upf.StringVar(&upArgs.audience, "audience", "", "Audience used when requesting an ID token from an identity provider for auth keys via workload identity federation") upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) upf.StringVar(&upArgs.idTokenOrFile, "id-token", "", `ID token from the identity provider to exchange with the control server for workload identity federation; if it begins with "file:", then it's a path to a file containing the token`) @@ -149,7 +150,7 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { return upf } -// notFalseVar is is a flag.Value that can only be "true", if set. +// notFalseVar is a flag.Value that can only be "true", if set. type notFalseVar struct{} func (notFalseVar) IsBoolFlag() bool { return true } @@ -194,6 +195,7 @@ type upArgsT struct { netfilterMode string authKeyOrFile string // "secret" or "file:/path/to/secret" clientID string + audience string clientSecretOrFile string // "secret" or "file:/path/to/secret" idTokenOrFile string // "secret" or "file:/path/to/secret" hostname string @@ -628,7 +630,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return err } - authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, strings.Split(upArgs.advertiseTags, ",")) + authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, upArgs.audience, strings.Split(upArgs.advertiseTags, ",")) if err != nil { return err } @@ -905,7 +907,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes", "client-id", "client-secret", "id-token": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes", "client-id", "audience", "client-secret", "id-token": return true } return false diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index fe2f1b555a2bc..bb172f9063f59 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -46,6 +46,7 @@ var validUpFlags = set.Of( "client-id", "client-secret", "id-token", + "audience", ) // TestUpFlagSetIsFrozen complains when new flags are added to tailscale up. diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 1a6a1a52cea07..67ffa4fbc0fda 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -10,6 +10,77 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy L github.com/atotto/clipboard from tailscale.com/client/systray + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -217,6 +288,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/version from tailscale.com/client/web+ tailscale.com/version/distro from tailscale.com/client/web+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/clientupdate/distsign+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index ed8f6a5125ece..43165ea36c6d3 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -68,6 +68,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ + L github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 24069551eb890..e29ae93484c95 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -5,6 +5,77 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -320,6 +391,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go index 47ebd1349fcf3..f75b096a603a2 100644 --- a/feature/identityfederation/identityfederation.go +++ b/feature/identityfederation/identityfederation.go @@ -19,6 +19,7 @@ import ( "tailscale.com/feature" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" + "tailscale.com/wif" ) func init() { @@ -28,13 +29,20 @@ func init() { } // resolveAuthKey uses OIDC identity federation to exchange the provided ID token and client ID for an authkey. -func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { +func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { if clientID == "" { return "", nil // Short-circuit, no client ID means not using identity federation } if idToken == "" { - return "", errors.New("federated identity authkeys require --id-token") + if audience == "" { + return "", errors.New("federated identity requires either an ID token or an audience") + } + providerIdToken, err := wif.ObtainProviderToken(ctx, audience) + if err != nil { + return "", errors.New("federated identity authkeys require --id-token") + } + idToken = providerIdToken } if len(tags) == 0 { return "", errors.New("federated identity authkeys require --advertise-tags") diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go index a673a42982706..b050f1a019e38 100644 --- a/feature/identityfederation/identityfederation_test.go +++ b/feature/identityfederation/identityfederation_test.go @@ -16,6 +16,7 @@ func TestResolveAuthKey(t *testing.T) { name string clientID string idToken string + audience string tags []string wantAuthKey string wantErr string @@ -24,6 +25,7 @@ func TestResolveAuthKey(t *testing.T) { name: "success", clientID: "client-123", idToken: "token", + audience: "api://tailscale-wif", tags: []string{"tag:test"}, wantAuthKey: "tskey-auth-xyz", wantErr: "", @@ -32,21 +34,24 @@ func TestResolveAuthKey(t *testing.T) { name: "missing client id short-circuits without error", clientID: "", idToken: "token", + audience: "api://tailscale-wif", tags: []string{"tag:test"}, wantAuthKey: "", wantErr: "", }, { - name: "missing id token", + name: "missing id token and audience", clientID: "client-123", idToken: "", + audience: "", tags: []string{"tag:test"}, - wantErr: "federated identity authkeys require --id-token", + wantErr: "federated identity requires either an ID token or an audience", }, { name: "missing tags", clientID: "client-123", idToken: "token", + audience: "api://tailscale-wif", tags: []string{}, wantErr: "federated identity authkeys require --advertise-tags", }, @@ -54,6 +59,7 @@ func TestResolveAuthKey(t *testing.T) { name: "invalid client id attributes", clientID: "client-123?invalid=value", idToken: "token", + audience: "api://tailscale-wif", tags: []string{"tag:test"}, wantErr: `failed to parse optional config attributes: unknown optional config attribute "invalid"`, }, @@ -64,7 +70,7 @@ func TestResolveAuthKey(t *testing.T) { srv := mockedControlServer(t) defer srv.Close() - authKey, err := resolveAuthKey(context.Background(), srv.URL, tt.clientID, tt.idToken, tt.tags) + authKey, err := resolveAuthKey(context.Background(), srv.URL, tt.clientID, tt.idToken, tt.audience, tt.tags) if tt.wantErr != "" { if err == nil { t.Errorf("resolveAuthKey() error = nil, want %q", tt.wantErr) diff --git a/flake.nix b/flake.nix index 6049e069258ea..149223d0aac60 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-MKMLpGUYzUPYKjVYQSnxDQDdH1oXaM8bCIbhCTuGeV0= +# nix-direnv cache busting line: sha256-WeMTOkERj4hvdg4yPaZ1gRgKnhRIBXX55kUVbX/k/xM= diff --git a/go.mod b/go.mod index a236aad8bdcd6..a8ec79e6e014f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/andybalholm/brotli v1.1.0 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/atotto/clipboard v0.1.4 - github.com/aws/aws-sdk-go-v2 v1.36.0 + github.com/aws/aws-sdk-go-v2 v1.41.0 github.com/aws/aws-sdk-go-v2/config v1.29.5 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 @@ -269,19 +269,19 @@ require ( github.com/ashanbrown/makezero v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 + github.com/aws/smithy-go v1.24.0 github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect diff --git a/go.mod.sri b/go.mod.sri index bbda9fe49fe5e..b533a75654aa6 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-MKMLpGUYzUPYKjVYQSnxDQDdH1oXaM8bCIbhCTuGeV0= +sha256-WeMTOkERj4hvdg4yPaZ1gRgKnhRIBXX55kUVbX/k/xM= diff --git a/go.sum b/go.sum index 4c2c0bfed1e66..541cef6058655 100644 --- a/go.sum +++ b/go.sum @@ -141,8 +141,8 @@ github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5Fc github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= +github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= @@ -153,20 +153,20 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPd github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58 h1:/BsEGAyMai+KdXS+CMHlLhB5miAO19wOqE6tj8azWPM= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.58/go.mod h1:KHM3lfl/sAJBCoLI1Lsg5w4SD2VDYWwQi7vxbKhw7TI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw= github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo= @@ -177,10 +177,10 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uU github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= +github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= +github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ= github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go index b8eb0fc9cfc8e..3bb64b270a017 100644 --- a/internal/client/tailscale/identityfederation.go +++ b/internal/client/tailscale/identityfederation.go @@ -16,7 +16,9 @@ import ( // clientID is the federated client ID used for token exchange // idToken is the Identity token from the identity provider // tags is the list of tags to be associated with the auth key -var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error)] +// audience is the federated audience acquired by configuring +// the trusted credential in the admin UI +var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error)] // HookExchangeJWTForTokenViaWIF resolves to [identityfederation.exchangeJWTForToken] when the // corresponding feature tag is enabled in the build process. diff --git a/shell.nix b/shell.nix index 3c85586b9f188..ccec5faf538e0 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-MKMLpGUYzUPYKjVYQSnxDQDdH1oXaM8bCIbhCTuGeV0= +# nix-direnv cache busting line: sha256-WeMTOkERj4hvdg4yPaZ1gRgKnhRIBXX55kUVbX/k/xM= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index f2b80f2bd3394..5b08200c97f6d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -5,6 +5,77 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ + github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 + github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ + github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds + github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds + github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints+ + github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config + github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds + github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 + github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws + github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso + github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ + github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ + github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ + github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ + github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ + github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts + github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer + github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ + github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ + github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config + github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ + github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ + github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ + github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http LDW github.com/coder/websocket from tailscale.com/util/eventbus LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -315,6 +386,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine + tailscale.com/wif from tailscale.com/feature/identityfederation golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 9efad32b3dc66..595b052ab00b9 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -811,7 +811,7 @@ func (s *Server) resolveAuthKey() (string, error) { if clientID == "" && idToken != "" { return "", fmt.Errorf("ID token for workload identity federation found, but client ID is empty") } - authKey, err = resolveViaWIF(s.shutdownCtx, s.ControlURL, clientID, idToken, s.AdvertiseTags) + authKey, err = resolveViaWIF(s.shutdownCtx, s.ControlURL, clientID, idToken, "", s.AdvertiseTags) if err != nil { return "", err } diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index af8fa765de559..18e352c67a30b 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1506,7 +1506,7 @@ func TestResolveAuthKey(t *testing.T) { oauthAvailable bool wifAvailable bool resolveViaOAuth func(ctx context.Context, clientSecret string, tags []string) (string, error) - resolveViaWIF func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) + resolveViaWIF func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) wantAuthKey string wantErr bool wantErrContains string @@ -1538,7 +1538,7 @@ func TestResolveAuthKey(t *testing.T) { clientID: "client-id-123", idToken: "id-token-456", wifAvailable: true, - resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { if clientID != "client-id-123" { return "", fmt.Errorf("unexpected client ID: %s", clientID) } @@ -1555,7 +1555,7 @@ func TestResolveAuthKey(t *testing.T) { clientID: "client-id-123", idToken: "id-token-456", wifAvailable: true, - resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { return "", fmt.Errorf("resolution failed") }, wantErrContains: "resolution failed", @@ -1565,7 +1565,7 @@ func TestResolveAuthKey(t *testing.T) { clientID: "", idToken: "id-token-456", wifAvailable: true, - resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { return "", fmt.Errorf("should not be called") }, wantErrContains: "empty", @@ -1575,7 +1575,7 @@ func TestResolveAuthKey(t *testing.T) { clientID: "client-id-123", idToken: "", wifAvailable: true, - resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { return "", fmt.Errorf("should not be called") }, wantErrContains: "empty", @@ -1591,7 +1591,7 @@ func TestResolveAuthKey(t *testing.T) { return "tskey-auth-via-oauth", nil }, wifAvailable: true, - resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { return "", fmt.Errorf("should not be called") }, wantAuthKey: "tskey-auth-via-oauth", @@ -1606,7 +1606,7 @@ func TestResolveAuthKey(t *testing.T) { return "", fmt.Errorf("resolution failed") }, wifAvailable: true, - resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { return "", fmt.Errorf("should not be called") }, wantErrContains: "failed", diff --git a/wif/wif.go b/wif/wif.go new file mode 100644 index 0000000000000..557685c448c0b --- /dev/null +++ b/wif/wif.go @@ -0,0 +1,242 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package wif deals with obtaining ID tokens from provider VMs +// to be used as part of Workload Identity Federation +package wif + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go" + "tailscale.com/util/httpm" +) + +type Environment string + +const ( + EnvGitHub Environment = "github" + EnvAWS Environment = "aws" + EnvGCP Environment = "gcp" + EnvNone Environment = "none" +) + +// ObtainProviderToken tries to detect what provider the client is running in +// and then tries to obtain an ID token for the audience that is passed as an argument +// To detect the environment, we do it in the following intentional order: +// 1. GitHub Actions (strongest env signals; may run atop any cloud) +// 2. AWS via IMDSv2 token endpoint (does not require env vars) +// 3. GCP via metadata header semantics +// 4. Azure via metadata endpoint +func ObtainProviderToken(ctx context.Context, audience string) (string, error) { + env := detectEnvironment(ctx) + + switch env { + case EnvGitHub: + return acquireGitHubActionsIDToken(ctx, audience) + case EnvAWS: + return acquireAWSWebIdentityToken(ctx, audience) + case EnvGCP: + return acquireGCPMetadataIDToken(ctx, audience) + default: + return "", errors.New("could not detect environment; provide --id-token explicitly") + } +} + +func detectEnvironment(ctx context.Context) Environment { + if os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") != "" && + os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") != "" { + return EnvGitHub + } + + client := httpClient() + if detectAWSIMDSv2(ctx, client) { + return EnvAWS + } + if detectGCPMetadata(ctx, client) { + return EnvGCP + } + return EnvNone +} + +func httpClient() *http.Client { + return &http.Client{ + Timeout: time.Second * 5, + } +} + +func detectAWSIMDSv2(ctx context.Context, client *http.Client) bool { + req, err := http.NewRequestWithContext(ctx, httpm.PUT, "http://169.254.169.254/latest/api/token", nil) + if err != nil { + return false + } + req.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "1") + + resp, err := client.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + + return resp.StatusCode == http.StatusOK +} + +func detectGCPMetadata(ctx context.Context, client *http.Client) bool { + req, err := http.NewRequestWithContext(ctx, httpm.GET, "http://metadata.google.internal", nil) + if err != nil { + return false + } + req.Header.Set("Metadata-Flavor", "Google") + + resp, err := client.Do(req) + if err != nil { + return false + } + defer resp.Body.Close() + + return resp.Header.Get("Metadata-Flavor") == "Google" +} + +type githubOIDCResponse struct { + Value string `json:"value"` +} + +func acquireGitHubActionsIDToken(ctx context.Context, audience string) (string, error) { + reqURL := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") + reqTok := os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") + if reqURL == "" || reqTok == "" { + return "", errors.New("missing ACTIONS_ID_TOKEN_REQUEST_URL/TOKEN (ensure workflow has permissions: id-token: write)") + } + + u, err := url.Parse(reqURL) + if err != nil { + return "", fmt.Errorf("parse ACTIONS_ID_TOKEN_REQUEST_URL: %w", err) + } + if strings.TrimSpace(audience) != "" { + q := u.Query() + q.Set("audience", strings.TrimSpace(audience)) + u.RawQuery = q.Encode() + } + + req, err := http.NewRequestWithContext(ctx, httpm.GET, u.String(), nil) + if err != nil { + return "", fmt.Errorf("build request: %w", err) + } + req.Header.Set("Authorization", "Bearer "+reqTok) + req.Header.Set("Accept", "application/json") + + client := httpClient() + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("request github oidc token: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) + return "", fmt.Errorf("github oidc token endpoint returned %s: %s", resp.Status, strings.TrimSpace(string(b))) + } + + var tr githubOIDCResponse + if err := json.NewDecoder(resp.Body).Decode(&tr); err != nil { + return "", fmt.Errorf("decode github oidc response: %w", err) + } + if strings.TrimSpace(tr.Value) == "" { + return "", errors.New("github oidc response contained empty token") + } + + // GitHub response doesn't provide exp directly; caller can parse JWT if needed. + return tr.Value, nil +} + +func acquireAWSWebIdentityToken(ctx context.Context, audience string) (string, error) { + // LoadDefaultConfig wires up the default credential chain (incl. IMDS). + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return "", fmt.Errorf("load aws config: %w", err) + } + + // Verify credentials are available before proceeding. + if _, err := cfg.Credentials.Retrieve(ctx); err != nil { + return "", fmt.Errorf("AWS credentials unavailable (instance profile/IMDS?): %w", err) + } + + imdsClient := imds.NewFromConfig(cfg) + region, err := imdsClient.GetRegion(ctx, &imds.GetRegionInput{}) + if err != nil { + return "", fmt.Errorf("couldn't get AWS region: %w", err) + } + cfg.Region = region.Region + + stsClient := sts.NewFromConfig(cfg) + in := &sts.GetWebIdentityTokenInput{ + Audience: []string{strings.TrimSpace(audience)}, + SigningAlgorithm: aws.String("ES384"), + DurationSeconds: aws.Int32(300), // 5 minutes + } + + out, err := stsClient.GetWebIdentityToken(ctx, in) + if err != nil { + var apiErr smithy.APIError + if errors.As(err, &apiErr) { + return "", fmt.Errorf("aws sts:GetWebIdentityToken failed (%s): %w", apiErr.ErrorCode(), err) + } + return "", fmt.Errorf("aws sts:GetWebIdentityToken failed: %w", err) + } + + if out.WebIdentityToken == nil || strings.TrimSpace(*out.WebIdentityToken) == "" { + return "", fmt.Errorf("aws sts:GetWebIdentityToken returned empty token") + } + + return *out.WebIdentityToken, nil +} + +func acquireGCPMetadataIDToken(ctx context.Context, audience string) (string, error) { + u := "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/identity" + v := url.Values{} + v.Set("audience", strings.TrimSpace(audience)) + v.Set("format", "full") + fullURL := u + "?" + v.Encode() + + req, err := http.NewRequestWithContext(ctx, httpm.GET, fullURL, nil) + if err != nil { + return "", fmt.Errorf("build request: %w", err) + } + req.Header.Set("Metadata-Flavor", "Google") + + client := httpClient() + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("call gcp metadata identity endpoint: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode/100 != 2 { + b, _ := io.ReadAll(io.LimitReader(resp.Body, 2048)) + return "", fmt.Errorf("gcp metadata identity endpoint returned %s: %s", resp.Status, strings.TrimSpace(string(b))) + } + + b, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024)) + if err != nil { + return "", fmt.Errorf("read gcp id token: %w", err) + } + jwt := strings.TrimSpace(string(b)) + if jwt == "" { + return "", fmt.Errorf("gcp metadata returned empty token") + } + + return jwt, nil +} From 28f163542cc089078b35a8dc1168c878223aadc5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 14 Jan 2026 15:15:02 +0000 Subject: [PATCH 0867/1093] .github/actions/go-cache: build cigocacher using remote path, fall back to ./tool/go (#18409) If local tailscale/tailscale checkout is not available, pulll cigocacher remotely. Fall back to ./tool/go if no other Go installation is present. Updates tailscale/corp#32493 Signed-off-by: Irbe Krumina --- .github/actions/go-cache/action.sh | 19 +++++++++++++++++-- .github/actions/go-cache/action.yml | 3 ++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/.github/actions/go-cache/action.sh b/.github/actions/go-cache/action.sh index bd584f6f1270a..f49d5bb779f4d 100755 --- a/.github/actions/go-cache/action.sh +++ b/.github/actions/go-cache/action.sh @@ -23,8 +23,23 @@ if [ -z "${URL:-}" ]; then exit 0 fi -BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(go env GOEXE)" -go build -o "${BIN_PATH}" ./cmd/cigocacher +GOPATH=$(command -v go || true) +if [ -z "${GOPATH}" ]; then + if [ ! -f "tool/go" ]; then + echo "Go not available, unable to proceed" + exit 1 + fi + GOPATH="./tool/go" +fi + +BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(${GOPATH} env GOEXE)" +if [ -d "cmd/cigocacher" ]; then + echo "cmd/cigocacher found locally, building from local source" + "${GOPATH}" build -o "${BIN_PATH}" ./cmd/cigocacher +else + echo "cmd/cigocacher not found locally, fetching from tailscale.com/cmd/cigocacher" + "${GOPATH}" build -o "${BIN_PATH}" tailscale.com/cmd/cigocacher +fi CIGOCACHER_TOKEN="$("${BIN_PATH}" --auth --cigocached-url "${URL}" --cigocached-host "${HOST}" )" if [ -z "${CIGOCACHER_TOKEN:-}" ]; then diff --git a/.github/actions/go-cache/action.yml b/.github/actions/go-cache/action.yml index 38bb15b37931e..7f5a66de17d0f 100644 --- a/.github/actions/go-cache/action.yml +++ b/.github/actions/go-cache/action.yml @@ -31,4 +31,5 @@ runs: HOST: ${{ inputs.cigocached-host }} CACHE_DIR: ${{ inputs.cache-dir }} working-directory: ${{ inputs.checkout-path }} - run: .github/actions/go-cache/action.sh + # https://github.com/orgs/community/discussions/25910 + run: $GITHUB_ACTION_PATH/action.sh From 02af7c963ce837cc8f90d4142d671a49d09a83d5 Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 13 Jan 2026 17:06:48 -0700 Subject: [PATCH 0868/1093] tsnet: allow for automatic ID token generation Allow for optionally specifiying an audience for tsnet. This is passed to the underlying identity federation logic to allow for tsnet auth to use automatic ID token generation for authentication. Updates https://github.com/tailscale/corp/issues/33316 Signed-off-by: Mario Minardi --- tsnet/tsnet.go | 34 +++++++++++++++++++++++++++++----- tsnet/tsnet_test.go | 42 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 6 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 595b052ab00b9..8b23b7ae3b8d3 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -139,6 +139,14 @@ type Server struct { // field is not used. IDToken string + // Audience, if non-empty, is the audience to use when requesting + // an ID token from a well-known identity provider to exchange + // with the control server for workload identity federation. It + // will be preferred over the TS_AUDIENCE environment variable. If + // the node is already created (from state previously stored in Store), + // then this field is not used. + Audience string + // ControlURL optionally specifies the coordination server URL. // If empty, the Tailscale default is used. ControlURL string @@ -567,6 +575,13 @@ func (s *Server) getIDToken() string { return os.Getenv("TS_ID_TOKEN") } +func (s *Server) getAudience() string { + if v := s.Audience; v != "" { + return v + } + return os.Getenv("TS_AUDIENCE") +} + func (s *Server) start() (reterr error) { var closePool closeOnErrorPool defer closePool.closeAllIfError(&reterr) @@ -805,13 +820,22 @@ func (s *Server) resolveAuthKey() (string, error) { if wifOk && authKey == "" { clientID := s.getClientID() idToken := s.getIDToken() - if clientID != "" && idToken == "" { - return "", fmt.Errorf("client ID for workload identity federation found, but ID token is empty") + audience := s.getAudience() + if clientID != "" && idToken == "" && audience == "" { + return "", fmt.Errorf("client ID for workload identity federation found, but ID token and audience are empty") + } + if idToken != "" && audience != "" { + return "", fmt.Errorf("only one of ID token and audience should be for workload identity federation") } - if clientID == "" && idToken != "" { - return "", fmt.Errorf("ID token for workload identity federation found, but client ID is empty") + if clientID == "" { + if idToken != "" { + return "", fmt.Errorf("ID token for workload identity federation found, but client ID is empty") + } + if audience != "" { + return "", fmt.Errorf("audience for workload identity federation found, but client ID is empty") + } } - authKey, err = resolveViaWIF(s.shutdownCtx, s.ControlURL, clientID, idToken, "", s.AdvertiseTags) + authKey, err = resolveViaWIF(s.shutdownCtx, s.ControlURL, clientID, idToken, audience, s.AdvertiseTags) if err != nil { return "", err } diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 18e352c67a30b..2c8514cf42d0b 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1503,6 +1503,7 @@ func TestResolveAuthKey(t *testing.T) { clientSecret string clientID string idToken string + audience string oauthAvailable bool wifAvailable bool resolveViaOAuth func(ctx context.Context, clientSecret string, tags []string) (string, error) @@ -1550,6 +1551,23 @@ func TestResolveAuthKey(t *testing.T) { wantAuthKey: "tskey-auth-via-wif", wantErrContains: "", }, + { + name: "successful resolution via federated audience", + clientID: "client-id-123", + audience: "api.tailscale.com", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + if clientID != "client-id-123" { + return "", fmt.Errorf("unexpected client ID: %s", clientID) + } + if audience != "api.tailscale.com" { + return "", fmt.Errorf("unexpected ID token: %s", idToken) + } + return "tskey-auth-via-wif", nil + }, + wantAuthKey: "tskey-auth-via-wif", + wantErrContains: "", + }, { name: "failing resolution via federated ID token", clientID: "client-id-123", @@ -1561,7 +1579,7 @@ func TestResolveAuthKey(t *testing.T) { wantErrContains: "resolution failed", }, { - name: "empty client ID", + name: "empty client ID with ID token", clientID: "", idToken: "id-token-456", wifAvailable: true, @@ -1570,6 +1588,16 @@ func TestResolveAuthKey(t *testing.T) { }, wantErrContains: "empty", }, + { + name: "empty client ID with audience", + clientID: "", + audience: "api.tailscale.com", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "empty", + }, { name: "empty ID token", clientID: "client-id-123", @@ -1580,6 +1608,17 @@ func TestResolveAuthKey(t *testing.T) { }, wantErrContains: "empty", }, + { + name: "audience with ID token", + clientID: "client-id-123", + idToken: "id-token-456", + audience: "api.tailscale.com", + wifAvailable: true, + resolveViaWIF: func(ctx context.Context, baseURL, clientID, idToken, audience string, tags []string) (string, error) { + return "", fmt.Errorf("should not be called") + }, + wantErrContains: "only one of ID token and audience", + }, { name: "workload identity resolution skipped if resolution via OAuth token succeeds", clientSecret: "tskey-client-secret-123", @@ -1665,6 +1704,7 @@ func TestResolveAuthKey(t *testing.T) { ClientSecret: tt.clientSecret, ClientID: tt.clientID, IDToken: tt.idToken, + Audience: tt.audience, ControlURL: "https://control.example.com", } s.shutdownCtx = context.Background() From e9d82767e507108ed0f4eb0ff3b46a5625af7b0c Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Tue, 13 Jan 2026 17:30:57 -0700 Subject: [PATCH 0869/1093] cmd/containerboot: allow for automatic ID token generation Allow for optionally specifying an audience for containerboot. This is passed to tailscale up to allow for containerboot to use automatic ID token generation for authentication. Updates https://github.com/tailscale/corp/issues/34430 Signed-off-by: Mario Minardi --- cmd/containerboot/main.go | 10 +++++--- cmd/containerboot/settings.go | 39 +++++++++++++++++++++++++++--- cmd/containerboot/settings_test.go | 35 ++++++++++++++++++++++++++- cmd/containerboot/tailscaled.go | 3 +++ 4 files changed, 79 insertions(+), 8 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 011c1830a856b..a520b5756ade5 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -20,8 +20,12 @@ // - TS_ID_TOKEN: the ID token from the identity provider for workload identity federation. // Must be used together with TS_CLIENT_ID. If the value begins with "file:", it is // treated as a path to a file containing the token. -// - Note: TS_AUTHKEY is mutually exclusive with TS_CLIENT_ID, TS_CLIENT_SECRET, and TS_ID_TOKEN. -// TS_CLIENT_SECRET and TS_ID_TOKEN cannot be used together. +// - TS_AUDIENCE: the audience to use when requesting an ID token from a well-known identity provider +// to exchange with the control server for workload identity federation. Must be used together +// with TS_CLIENT_ID. +// - Note: TS_AUTHKEY is mutually exclusive with TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, +// and TS_AUDIENCE. +// TS_CLIENT_SECRET, TS_ID_TOKEN, and TS_AUDIENCE cannot be used together. // - TS_HOSTNAME: the hostname to request for the node. // - TS_ROUTES: subnet routes to advertise. Explicitly setting it to an empty // value will cause containerboot to stop acting as a subnet router for any @@ -78,7 +82,7 @@ // directory that containers tailscaled config in file. The config file needs to be // named cap-.hujson. If this is set, TS_HOSTNAME, // TS_EXTRA_ARGS, TS_AUTHKEY, TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, -// TS_ROUTES, TS_ACCEPT_DNS env vars must not be set. If this is set, +// TS_ROUTES, TS_ACCEPT_DNS, TS_AUDIENCE env vars must not be set. If this is set, // containerboot only runs `tailscaled --config ` // and not `tailscale up` or `tailscale set`. // The config file contents are currently read once on container start. diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 216dd766e85ee..aab2b86314e23 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -26,6 +26,7 @@ type settings struct { ClientID string ClientSecret string IDToken string + Audience string Hostname string Routes *string // ProxyTargetIP is the destination IP to which all incoming @@ -92,6 +93,7 @@ func configFromEnv() (*settings, error) { ClientID: defaultEnv("TS_CLIENT_ID", ""), ClientSecret: defaultEnv("TS_CLIENT_SECRET", ""), IDToken: defaultEnv("TS_ID_TOKEN", ""), + Audience: defaultEnv("TS_AUDIENCE", ""), Hostname: defaultEnv("TS_HOSTNAME", ""), Routes: defaultEnvStringPointer("TS_ROUTES"), ServeConfigPath: defaultEnv("TS_SERVE_CONFIG", ""), @@ -247,17 +249,46 @@ func (s *settings) validate() error { if s.TailnetTargetFQDN != "" && s.TailnetTargetIP != "" { return errors.New("Both TS_TAILNET_TARGET_IP and TS_TAILNET_FQDN cannot be set") } - if s.TailscaledConfigFilePath != "" && (s.AcceptDNS != nil || s.AuthKey != "" || s.Routes != nil || s.ExtraArgs != "" || s.Hostname != "" || s.ClientID != "" || s.ClientSecret != "" || s.IDToken != "") { - return errors.New("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with TS_HOSTNAME, TS_EXTRA_ARGS, TS_AUTHKEY, TS_ROUTES, TS_ACCEPT_DNS, TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN.") + if s.TailscaledConfigFilePath != "" && + (s.AcceptDNS != nil || + s.AuthKey != "" || + s.Routes != nil || + s.ExtraArgs != "" || + s.Hostname != "" || + s.ClientID != "" || + s.ClientSecret != "" || + s.IDToken != "" || + s.Audience != "") { + conflictingArgs := []string{ + "TS_HOSTNAME", + "TS_EXTRA_ARGS", + "TS_AUTHKEY", + "TS_ROUTES", + "TS_ACCEPT_DNS", + "TS_CLIENT_ID", + "TS_CLIENT_SECRET", + "TS_ID_TOKEN", + "TS_AUDIENCE", + } + return fmt.Errorf("TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR cannot be set in combination with %s.", strings.Join(conflictingArgs, ", ")) } if s.IDToken != "" && s.ClientID == "" { return errors.New("TS_ID_TOKEN is set but TS_CLIENT_ID is not set") } + if s.Audience != "" && s.ClientID == "" { + return errors.New("TS_AUDIENCE is set but TS_CLIENT_ID is not set") + } if s.IDToken != "" && s.ClientSecret != "" { return errors.New("TS_ID_TOKEN and TS_CLIENT_SECRET cannot both be set") } - if s.AuthKey != "" && (s.ClientID != "" || s.ClientSecret != "" || s.IDToken != "") { - return errors.New("TS_AUTHKEY cannot be used with TS_CLIENT_ID, TS_CLIENT_SECRET, or TS_ID_TOKEN") + if s.IDToken != "" && s.Audience != "" { + return errors.New("TS_ID_TOKEN and TS_AUDIENCE cannot both be set") + } + if s.Audience != "" && s.ClientSecret != "" { + return errors.New("TS_AUDIENCE and TS_CLIENT_SECRET cannot both be set") + } + if s.AuthKey != "" && (s.ClientID != "" || s.ClientSecret != "" || s.IDToken != "" || s.Audience != "") { + return errors.New("TS_AUTHKEY cannot be used with TS_CLIENT_ID, TS_CLIENT_SECRET, TS_ID_TOKEN, or TS_AUDIENCE.") } if s.AllowProxyingClusterTrafficViaIngress && s.UserspaceMode { return errors.New("EXPERIMENTAL_ALLOW_PROXYING_CLUSTER_TRAFFIC_VIA_INGRESS is not supported in userspace mode") diff --git a/cmd/containerboot/settings_test.go b/cmd/containerboot/settings_test.go index d97e786e6b334..576ea7f3eef3e 100644 --- a/cmd/containerboot/settings_test.go +++ b/cmd/containerboot/settings_test.go @@ -117,6 +117,7 @@ func TestValidateAuthMethods(t *testing.T) { clientID string clientSecret string idToken string + audience string errContains string }{ { @@ -144,11 +145,21 @@ func TestValidateAuthMethods(t *testing.T) { clientID: "client-id", idToken: "id-token", }, + { + name: "wif_client_id_and_audience", + clientID: "client-id", + audience: "audience", + }, { name: "id_token_without_client_id", idToken: "id-token", errContains: "TS_ID_TOKEN is set but TS_CLIENT_ID is not set", }, + { + name: "audience_without_client_id", + audience: "audience", + errContains: "TS_AUDIENCE is set but TS_CLIENT_ID is not set", + }, { name: "authkey_with_client_secret", authKey: "tskey-auth-xxx", @@ -156,12 +167,19 @@ func TestValidateAuthMethods(t *testing.T) { errContains: "TS_AUTHKEY cannot be used with", }, { - name: "authkey_with_wif", + name: "authkey_with_id_token", authKey: "tskey-auth-xxx", clientID: "client-id", idToken: "id-token", errContains: "TS_AUTHKEY cannot be used with", }, + { + name: "authkey_with_audience", + authKey: "tskey-auth-xxx", + clientID: "client-id", + audience: "audience", + errContains: "TS_AUTHKEY cannot be used with", + }, { name: "id_token_with_client_secret", clientID: "client-id", @@ -169,6 +187,20 @@ func TestValidateAuthMethods(t *testing.T) { idToken: "id-token", errContains: "TS_ID_TOKEN and TS_CLIENT_SECRET cannot both be set", }, + { + name: "id_token_with_audience", + clientID: "client-id", + idToken: "id-token", + audience: "audience", + errContains: "TS_ID_TOKEN and TS_AUDIENCE cannot both be set", + }, + { + name: "audience_with_client_secret", + clientID: "client-id", + clientSecret: "tskey-client-xxx", + audience: "audience", + errContains: "TS_AUDIENCE and TS_CLIENT_SECRET cannot both be set", + }, } for _, tt := range tests { @@ -178,6 +210,7 @@ func TestValidateAuthMethods(t *testing.T) { ClientID: tt.clientID, ClientSecret: tt.clientSecret, IDToken: tt.idToken, + Audience: tt.audience, } err := s.validate() if tt.errContains != "" { diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 1374b1802046e..e5b0b8b8ed1b1 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -129,6 +129,9 @@ func tailscaleUp(ctx context.Context, cfg *settings) error { if cfg.IDToken != "" { args = append(args, "--id-token="+cfg.IDToken) } + if cfg.Audience != "" { + args = append(args, "--audience="+cfg.Audience) + } // --advertise-routes can be passed an empty string to configure a // device (that might have previously advertised subnet routes) to not // advertise any routes. Respect an empty string passed by a user and From c3b7f2405155c39b563b85801724dc8855d1fbdb Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Wed, 14 Jan 2026 18:20:00 +0000 Subject: [PATCH 0870/1093] ipn,ipn/local: always accept routes for Tailscale Services (cgnat range) (#18173) Updates #18198 Signed-off-by: chaosinthecrd Co-authored-by: James Tucker --- ipn/ipnlocal/local.go | 6 +- ipn/ipnlocal/local_test.go | 104 +++++++++++++++++++++++++++++++++- types/netmap/netmap.go | 8 ++- wgengine/wgcfg/nmcfg/nmcfg.go | 4 ++ 4 files changed, 116 insertions(+), 6 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index cebb961305a34..44b12826bcc50 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5383,7 +5383,7 @@ func magicDNSRootDomains(nm *netmap.NetworkMap) []dnsname.FQDN { // peerRoutes returns the routerConfig.Routes to access peers. // If there are over cgnatThreshold CGNAT routes, one big CGNAT route // is used instead. -func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (routes []netip.Prefix) { +func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int, routeAll bool) (routes []netip.Prefix) { tsULA := tsaddr.TailscaleULARange() cgNAT := tsaddr.CGNATRange() var didULA bool @@ -5413,7 +5413,7 @@ func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int) (route } if aip.IsSingleIP() && cgNAT.Contains(aip.Addr()) { cgNATIPs = append(cgNATIPs, aip) - } else { + } else if routeAll { routes = append(routes, aip) } } @@ -5461,7 +5461,7 @@ func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView SNATSubnetRoutes: !prefs.NoSNAT(), StatefulFiltering: doStatefulFiltering, NetfilterMode: prefs.NetfilterMode(), - Routes: peerRoutes(b.logf, cfg.Peers, singleRouteThreshold), + Routes: peerRoutes(b.logf, cfg.Peers, singleRouteThreshold, prefs.RouteAll()), NetfilterKind: netfilterKind, } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 02997a0e12fce..bcc5ebaf26dbf 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -306,7 +306,7 @@ func TestPeerRoutes(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := peerRoutes(t.Logf, tt.peers, 2) + got := peerRoutes(t.Logf, tt.peers, 2, true) if !reflect.DeepEqual(got, tt.want) { t.Errorf("got = %v; want %v", got, tt.want) } @@ -7295,3 +7295,105 @@ func TestStripKeysFromPrefs(t *testing.T) { }) } } + +func TestRouteAllDisabled(t *testing.T) { + pp := netip.MustParsePrefix + + tests := []struct { + name string + peers []wgcfg.Peer + wantEndpoints []netip.Prefix + routeAll bool + }{ + { + name: "route_all_disabled", + routeAll: false, + peers: []wgcfg.Peer{ + { + AllowedIPs: []netip.Prefix{ + // if one ip in the Tailscale ULA range is added, the entire range is added to the router config + pp("fd7a:115c:a1e0::2501:9b83/128"), + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + + // a /28 range will not be added, since this is not a Service IP range (which is always /32, a single IP) + pp("100.64.0.0/28"), + + // ips outside the tailscale cgnat/ula range are not added to the router config + pp("192.168.0.45/32"), + pp("fd7a:115c:b1e0::2501:9b83/128"), + pp("fdf8:f966:e27c:0:5:0:0:10/128"), + }, + }, + }, + wantEndpoints: []netip.Prefix{ + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + pp("fd7a:115c:a1e0::/48"), + }, + }, + { + name: "route_all_enabled", + routeAll: true, + peers: []wgcfg.Peer{ + { + AllowedIPs: []netip.Prefix{ + // if one ip in the Tailscale ULA range is added, the entire range is added to the router config + pp("fd7a:115c:a1e0::2501:9b83/128"), + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + + // ips outside the tailscale cgnat/ula range are not added to the router config + pp("192.168.0.45/32"), + pp("fd7a:115c:b1e0::2501:9b83/128"), + pp("fdf8:f966:e27c:0:5:0:0:10/128"), + }, + }, + }, + wantEndpoints: []netip.Prefix{ + pp("100.80.207.38/32"), + pp("100.80.207.56/32"), + pp("100.80.207.40/32"), + pp("100.94.122.93/32"), + pp("100.79.141.115/32"), + pp("192.168.0.45/32"), + pp("fd7a:115c:a1e0::/48"), + pp("fd7a:115c:b1e0::2501:9b83/128"), + pp("fdf8:f966:e27c:0:5:0:0:10/128"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prefs := ipn.Prefs{RouteAll: tt.routeAll} + lb := newTestLocalBackend(t) + cfg := &wgcfg.Config{ + Peers: tt.peers, + } + + rcfg := lb.routerConfigLocked(cfg, prefs.View(), false) + for _, p := range rcfg.Routes { + found := false + for _, r := range tt.wantEndpoints { + if p.Addr() == r.Addr() { + found = true + break + } + } + if !found { + t.Errorf("unexpected prefix %q in router config", p.String()) + } + } + }) + } +} diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index c54562f4d5b53..18abd1c195024 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/types/key" @@ -154,8 +155,11 @@ func (nm *NetworkMap) SelfNodeOrZero() tailcfg.NodeView { // AnyPeersAdvertiseRoutes reports whether any peer is advertising non-exit node routes. func (nm *NetworkMap) AnyPeersAdvertiseRoutes() bool { for _, p := range nm.Peers { - if p.PrimaryRoutes().Len() > 0 { - return true + // NOTE: (ChaosInTheCRD) if the peer being advertised is a tailscale ip, we ignore it in this check + for _, r := range p.PrimaryRoutes().All() { + if !tsaddr.IsTailscaleIP(r.Addr()) || !r.IsSingleIP() { + return true + } } } return false diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index 487e78d81218d..a42827337d5c6 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -11,6 +11,7 @@ import ( "net/netip" "strings" + "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -33,6 +34,9 @@ func cidrIsSubnet(node tailcfg.NodeView, cidr netip.Prefix) bool { if !cidr.IsSingleIP() { return true } + if tsaddr.IsTailscaleIP(cidr.Addr()) { + return false + } for _, selfCIDR := range node.Addresses().All() { if cidr == selfCIDR { return false From 5aeee1d8a576b29ddc6b6b0a8c3b526142fa9c9b Mon Sep 17 00:00:00 2001 From: Simon Law Date: Wed, 14 Jan 2026 11:53:14 -0800 Subject: [PATCH 0871/1093] .github/workflows: double the timeout for golangci-lint (#18404) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recently, the golangci-lint workflow has been taking longer and longer to complete, causing it to timeout after the default of 5 minutes. Running error: context loading failed: failed to load packages: failed to load packages: failed to load with go/packages: context deadline exceeded Timeout exceeded: try increasing it by passing --timeout option Although PR #18398 enabled the Go module cache, bootstrapping with a cold cache still takes too long. This PR doubles the default 5 minute timeout for golangci-lint to 10 minutes so that golangci-lint can finish downloading all of its dependencies. Note that this doesn’t affect the 5 minute timeout configured in .golangci.yml, since running golangci-lint on your local instance should still be plenty fast. Fixes #18366 Signed-off-by: Simon Law --- .github/workflows/golangci-lint.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 69efcfd5b0839..684a094e26560 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -35,9 +35,13 @@ jobs: cache: true - name: golangci-lint - uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7.0.0 + uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0 with: version: v2.4.0 # Show only new issues if it's a pull request. only-new-issues: true + + # Loading packages with a cold cache takes a while: + args: --timeout=10m + From 1a79abf5fb0358242e77e8dedfd699a4d7e4e6c5 Mon Sep 17 00:00:00 2001 From: Nick O'Neill Date: Wed, 14 Jan 2026 14:19:17 -0800 Subject: [PATCH 0872/1093] VERSION.txt: this is v1.95.0 (#18414) Signed-off-by: Nick O'Neill --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 95784efddbc41..55f6ae93382d1 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.93.0 +1.95.0 From 54d77898da17c0051384c63ebcab9831586dbd48 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Wed, 14 Jan 2026 14:33:33 -0700 Subject: [PATCH 0873/1093] tool/gocross: update gocross-wrapper.ps1 to use absolute path for resolving tar gocross-wrapper.ps1 is written to use the version of tar that ships with Windows; we want to avoid conflicts with any other tar on the PATH, such ones installed by MSYS and/or Cygwin. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/gocross-wrapper.ps1 | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 index fe0b46996204d..324b220c8319d 100644 --- a/tool/gocross/gocross-wrapper.ps1 +++ b/tool/gocross/gocross-wrapper.ps1 @@ -114,7 +114,12 @@ $bootstrapScriptBlock = { New-Item -Force -Path $toolchain -ItemType Directory | Out-Null Start-ChildScope -ScriptBlock { Set-Location -LiteralPath $toolchain - tar --strip-components=1 -xf "$toolchain.tar.gz" + + # Using an absolute path to the tar that ships with Windows + # to avoid conflicts with others (eg msys2). + $system32 = [System.Environment]::GetFolderPath([System.Environment+SpecialFolder]::System) + $tar = Join-Path $system32 'tar.exe' -Resolve + & $tar --strip-components=1 -xf "$toolchain.tar.gz" if ($LASTEXITCODE -ne 0) { throw "tar failed with exit code $LASTEXITCODE" } From 1cc6f3282e547fd38d77bf90e61d3ac5ebd62420 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 16 Jan 2026 13:29:12 +0000 Subject: [PATCH 0874/1093] k8s-operator,kube: allowing k8s api request events to be enabled via grants (#18393) Updates #35796 Signed-off-by: chaosinthecrd --- cmd/vet/jsontags_allowlist | 2 + k8s-operator/api-proxy/proxy.go | 126 +++++++++++++------- k8s-operator/api-proxy/proxy_events_test.go | 18 ++- k8s-operator/api-proxy/proxy_test.go | 10 +- k8s-operator/sessionrecording/hijacker.go | 2 + kube/kubetypes/grants.go | 10 +- 6 files changed, 118 insertions(+), 50 deletions(-) diff --git a/cmd/vet/jsontags_allowlist b/cmd/vet/jsontags_allowlist index 9526f44ef9d9a..b9f91d562cb46 100644 --- a/cmd/vet/jsontags_allowlist +++ b/cmd/vet/jsontags_allowlist @@ -221,6 +221,8 @@ OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Event.Count OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.ObjectMeta.Generation OmitEmptyUnsupportedInV2 tailscale.com/kube/kubeapi.Status.Code OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnforceRecorder +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnableEvents +OmitEmptyUnsupportedInV2 tailscale.com/kube/kubetypes.KubernetesCapRule.EnableSessionRecordings OmitEmptyUnsupportedInV2 tailscale.com/log/sockstatlog.event.IsCellularInterface OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.CastHeader.SrcNodeUserID OmitEmptyUnsupportedInV2 tailscale.com/sessionrecording.Source.NodeUserID diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index 762a52f1fdbfc..fcd57cd17e006 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -46,6 +46,10 @@ var ( whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) +const ( + eventsEnabledVar = "TS_EXPERIMENTAL_KUBE_API_EVENTS" +) + // NewAPIServerProxy creates a new APIServerProxy that's ready to start once Run // is called. No network traffic will flow until Run is called. // @@ -97,7 +101,7 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn upstreamURL: u, ts: ts, sendEventFunc: sessionrecording.SendEvent, - eventsEnabled: envknob.Bool("TS_EXPERIMENTAL_KUBE_API_EVENTS"), + eventsEnabled: envknob.Bool(eventsEnabledVar), } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -128,6 +132,10 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } + if ap.eventsEnabled { + ap.log.Warnf("DEPRECATED: %q environment variable is deprecated, and will be removed in v1.96. See documentation for more detail.", eventsEnabledVar) + } + mode := "noauth" if ap.authMode { mode = "auth" @@ -196,6 +204,7 @@ type APIServerProxy struct { sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error // Flag used to enable sending API requests as events to tsrecorder. + // Deprecated: events are now set via ACLs (see https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file) eventsEnabled bool } @@ -207,13 +216,34 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { return } - if err = ap.recordRequestAsEvent(r, who); err != nil { - msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) - ap.log.Errorf(msg) - http.Error(w, msg, http.StatusBadGateway) + c, err := determineRecorderConfig(who) + if err != nil { + ap.log.Errorf("error trying to determine whether the kubernetes api request %q needs to be recorded: %v", r.URL.String(), err) return } + if c.failOpen && len(c.recorderAddresses) == 0 { // will not record + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + return + } + ksr.CounterKubernetesAPIRequestEventsAttempted.Add(1) // at this point we know that users intended for this request to be recorded + if !c.failOpen && len(c.recorderAddresses) == 0 { + msg := fmt.Sprintf("forbidden: api request %q must be recorded, but no recorders are available.", r.URL.String()) + ap.log.Error(msg) + http.Error(w, msg, http.StatusForbidden) + return + } + + // NOTE: (ChaosInTheCRD) ap.eventsEnabled deprecated, remove in v1.96 + if c.enableEvents || ap.eventsEnabled { + if err = ap.recordRequestAsEvent(r, who, c.recorderAddresses, c.failOpen); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + } + counterNumRequestsProxied.Add(1) ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) @@ -256,35 +286,45 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request return } - if err = ap.recordRequestAsEvent(r, who); err != nil { - msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) - ap.log.Errorf(msg) - http.Error(w, msg, http.StatusBadGateway) - return - } - counterNumRequestsProxied.Add(1) - failOpen, addrs, err := determineRecorderConfig(who) + c, err := determineRecorderConfig(who) if err != nil { ap.log.Errorf("error trying to determine whether the 'kubectl %s' session needs to be recorded: %v", sessionType, err) return } - if failOpen && len(addrs) == 0 { // will not record + + if c.failOpen && len(c.recorderAddresses) == 0 { // will not record ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) return } - ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded - if !failOpen && len(addrs) == 0 { + ksr.CounterKubernetesAPIRequestEventsAttempted.Add(1) // at this point we know that users intended for this request to be recorded + if !c.failOpen && len(c.recorderAddresses) == 0 { msg := fmt.Sprintf("forbidden: 'kubectl %s' session must be recorded, but no recorders are available.", sessionType) ap.log.Error(msg) http.Error(w, msg, http.StatusForbidden) return } + // NOTE: (ChaosInTheCRD) ap.eventsEnabled deprecated, remove in v1.96 + if c.enableEvents || ap.eventsEnabled { + if err = ap.recordRequestAsEvent(r, who, c.recorderAddresses, c.failOpen); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + } + + if !c.enableRecordings { + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + return + } + ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded + wantsHeader := upgradeHeaderForProto[proto] if h := r.Header.Get(upgradeHeaderKey); h != wantsHeader { msg := fmt.Sprintf("[unexpected] unable to verify that streaming protocol is %s, wants Upgrade header %q, got: %q", proto, wantsHeader, h) - if failOpen { + if c.failOpen { msg = msg + "; failure mode is 'fail open'; continuing session without recording." ap.log.Warn(msg) ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) @@ -303,8 +343,8 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request SessionType: sessionType, TS: ap.ts, Who: who, - Addrs: addrs, - FailOpen: failOpen, + Addrs: c.recorderAddresses, + FailOpen: c.failOpen, Pod: r.PathValue(podNameKey), Namespace: r.PathValue(namespaceNameKey), Log: ap.log, @@ -314,21 +354,9 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } -func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse) error { - if !ap.eventsEnabled { - return nil - } - - failOpen, addrs, err := determineRecorderConfig(who) - if err != nil { - return fmt.Errorf("error trying to determine whether the kubernetes api request needs to be recorded: %w", err) - } +func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse, addrs []netip.AddrPort, failOpen bool) error { if len(addrs) == 0 { - if failOpen { - return nil - } else { - return fmt.Errorf("forbidden: kubernetes api request must be recorded, but no recorders are available") - } + return fmt.Errorf("no recorder addresses specified") } factory := &request.RequestInfoFactory{ @@ -537,20 +565,30 @@ func addImpersonationHeaders(r *http.Request, log *zap.SugaredLogger) error { return nil } +type recorderConfig struct { + failOpen bool + enableEvents bool + enableRecordings bool + recorderAddresses []netip.AddrPort +} + // determineRecorderConfig determines recorder config from requester's peer // capabilities. Determines whether a 'kubectl exec' session from this requester // needs to be recorded and what recorders the recording should be sent to. -func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorderAddresses []netip.AddrPort, _ error) { +func determineRecorderConfig(who *apitype.WhoIsResponse) (c recorderConfig, _ error) { if who == nil { - return false, nil, errors.New("[unexpected] cannot determine caller") + return c, errors.New("[unexpected] cannot determine caller") } - failOpen = true + + c.failOpen = true + c.enableEvents = false + c.enableRecordings = true rules, err := tailcfg.UnmarshalCapJSON[kubetypes.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) if err != nil { - return failOpen, nil, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) + return c, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) } if len(rules) == 0 { - return failOpen, nil, nil + return c, nil } for _, rule := range rules { @@ -559,13 +597,19 @@ func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorde // recorders behind those addrs are online - else we // spend 30s trying to reach a recorder whose tailscale // status is offline. - recorderAddresses = append(recorderAddresses, rule.RecorderAddrs...) + c.recorderAddresses = append(c.recorderAddresses, rule.RecorderAddrs...) } if rule.EnforceRecorder { - failOpen = false + c.failOpen = false + } + if rule.EnableEvents { + c.enableEvents = true + } + if rule.EnableSessionRecordings { + c.enableRecordings = true } } - return failOpen, recorderAddresses, nil + return c, nil } var upgradeHeaderForProto = map[ksr.Protocol]string{ diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go index 8bcf484368a35..e35be33a0e734 100644 --- a/k8s-operator/api-proxy/proxy_events_test.go +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -61,7 +61,6 @@ func TestRecordRequestAsEvent(t *testing.T) { log: zl.Sugar(), ts: &tsnet.Server{}, sendEventFunc: sender.Send, - eventsEnabled: true, } defaultWho := &apitype.WhoIsResponse{ @@ -76,7 +75,7 @@ func TestRecordRequestAsEvent(t *testing.T) { CapMap: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234"]}`), - tailcfg.RawMessage(`{"enforceRecorder": true}`), + tailcfg.RawMessage(`{"enforceRecorder": true, "enableEvents": true}`), }, }, } @@ -310,6 +309,7 @@ func TestRecordRequestAsEvent(t *testing.T) { CapMap: tailcfg.PeerCapMap{ tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234", "127.0.0.1:5678"]}`), + tailcfg.RawMessage(`{"enforceRecorder": true, "enableEvents": true}`), }, }, }, @@ -398,6 +398,7 @@ func TestRecordRequestAsEvent(t *testing.T) { }, setupSender: func() { sender.Reset() }, wantNumCalls: 0, + wantErr: true, }, { name: "error-sending", @@ -510,8 +511,19 @@ func TestRecordRequestAsEvent(t *testing.T) { tt.setupSender() req := tt.req() - err := ap.recordRequestAsEvent(req, tt.who) + c, err := determineRecorderConfig(tt.who) + if err != nil { + t.Fatalf("error trying to determine whether the kubernetes api request %q needs to be recorded: %v", req.URL.String(), err) + return + } + + if !c.enableEvents && tt.wantEvent != nil { + t.Errorf("expected event but events not enabled in CapMap. Want: %#v", tt.wantEvent) + return + } + + err = ap.recordRequestAsEvent(req, tt.who, c.recorderAddresses, c.failOpen) if (err != nil) != tt.wantErr { t.Fatalf("recordRequestAsEvent() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/k8s-operator/api-proxy/proxy_test.go b/k8s-operator/api-proxy/proxy_test.go index 71bf65648931c..14e6554236234 100644 --- a/k8s-operator/api-proxy/proxy_test.go +++ b/k8s-operator/api-proxy/proxy_test.go @@ -166,15 +166,15 @@ func Test_determineRecorderConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotFailOpen, gotRecorderAddresses, err := determineRecorderConfig(tt.who) + c, err := determineRecorderConfig(tt.who) if err != nil { t.Fatalf("unexpected error: %v", err) } - if gotFailOpen != tt.wantFailOpen { - t.Errorf("determineRecorderConfig() gotFailOpen = %v, want %v", gotFailOpen, tt.wantFailOpen) + if c.failOpen != tt.wantFailOpen { + t.Errorf("determineRecorderConfig() gotFailOpen = %v, want %v", c.failOpen, tt.wantFailOpen) } - if !reflect.DeepEqual(gotRecorderAddresses, tt.wantRecorderAddresses) { - t.Errorf("determineRecorderConfig() gotRecorderAddresses = %v, want %v", gotRecorderAddresses, tt.wantRecorderAddresses) + if !reflect.DeepEqual(c.recorderAddresses, tt.wantRecorderAddresses) { + t.Errorf("determineRecorderConfig() gotRecorderAddresses = %v, want %v", c.recorderAddresses, tt.wantRecorderAddresses) } }) } diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 2d6c94710e866..7345a407c8faa 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -52,6 +52,8 @@ var ( // CounterSessionRecordingsAttempted counts the number of session recording attempts. CounterSessionRecordingsAttempted = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_attempted") + CounterKubernetesAPIRequestEventsAttempted = clientmetric.NewCounter("k8s_auth_proxy_api_request_event_recording_attempted") + // counterSessionRecordingsUploaded counts the number of successfully uploaded session recordings. counterSessionRecordingsUploaded = clientmetric.NewCounter("k8s_auth_proxy_session_recordings_uploaded") ) diff --git a/kube/kubetypes/grants.go b/kube/kubetypes/grants.go index 4dc278ff14d4c..d293ae5792e41 100644 --- a/kube/kubetypes/grants.go +++ b/kube/kubetypes/grants.go @@ -38,8 +38,16 @@ type KubernetesCapRule struct { // Default is to fail open. // The field name matches `EnforceRecorder` field with equal semantics for Tailscale SSH // session recorder. - // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-acls + // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file EnforceRecorder bool `json:"enforceRecorder,omitempty"` + // EnableEvents defines whether kubectl API request events (beta) + // should be recorded or not. + // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file + EnableEvents bool `json:"enableEvents,omitempty"` + // EnableSessionRecordings defines whether kubectl sessions + // (e.g., exec, attach) should be recorded or not. + // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file + EnableSessionRecordings bool `json:"enableSessionRecordings,omitempty"` } // ImpersonateRule defines how a request from the tailnet identity matching From 1478028591283068717b68bbf4ab90ccf01457ab Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 16 Jan 2026 11:21:17 -0600 Subject: [PATCH 0875/1093] docs/windows/policy: use a separate value to track the configuration state of EnableDNSRegistration Policy editors, such as gpedit.msc and gpme.msc, rely on both the presence and the value of the registry value to determine whether a policy is enabled. Unless an enabledValue is specified explicitly, it defaults to REG_DWORD 1. Therefore, we cannot rely on the same registry value to track the policy configuration state when it is already used by a policy option, such as a dropdown. Otherwise, while the policy setting will be written and function correctly, it will appear as Not Configured in the policy editor due to the value mismatch (for example, REG_SZ "always" vs REG_DWORD 1). In this PR, we update the DNSRegistration policy setting to use the DNSRegistrationConfigured registry value for tracking. This change has no effect on the client side and exists solely to satisfy ADMX and policy editor requirements. Updates #14917 Signed-off-by: Nick Khyl --- docs/windows/policy/tailscale.admx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/windows/policy/tailscale.admx b/docs/windows/policy/tailscale.admx index 7bd31ac9c597d..7cc174b06a8cd 100644 --- a/docs/windows/policy/tailscale.admx +++ b/docs/windows/policy/tailscale.admx @@ -231,7 +231,7 @@ never - + From 643e91f2eb8b3e3bc7a12b3e79a2df580684e3d0 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 16 Jan 2026 14:53:23 -0500 Subject: [PATCH 0876/1093] net/netmon: move TailscaleInterfaceIndex out of netmon.State (#18428) fixes tailscale/tailscale#18418 Both Serve and PeerAPI broke when we moved the TailscaleInterfaceName into State, which is updated asynchronously and may not be available when we configure the listeners. This extracts the explicit interface name property from netmon.State and adds as a static struct with getters that have proper error handling. The bug is only found in sandboxed Darwin clients, where we need to know the Tailscale interface details in order to set up the listeners correctly (they must bind to our interface explicitly to escape the network sandboxing that is applied by NECP). Currently set only sandboxed macOS and Plan9 set this but it will also be useful on Windows to simplify interface filtering in netns. Signed-off-by: Jonathan Nobels --- cmd/tailscaled/tailscaled.go | 3 +- ipn/ipnlocal/local.go | 13 ++++- ipn/ipnlocal/peerapi.go | 9 +++ ipn/ipnlocal/serve.go | 17 ++++-- net/netmon/interfaces.go | 103 +++++++++++++++++++++++++++++++++++ net/netmon/loghelper_test.go | 2 +- net/netmon/netmon.go | 49 +++++------------ net/netmon/netmon_test.go | 16 +++++- net/netmon/state.go | 32 +++++------ 9 files changed, 184 insertions(+), 60 deletions(-) create mode 100644 net/netmon/interfaces.go diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 7c19ebb422b87..410ae00bc0716 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -799,8 +799,9 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo if runtime.GOOS == "plan9" { // TODO(bradfitz): why don't we do this on all platforms? + // TODO(barnstar): we do it on sandboxed darwin now // We should. Doing it just on plan9 for now conservatively. - sys.NetMon.Get().SetTailscaleInterfaceName(devName) + netmon.SetTailscaleInterfaceProps(devName, 0) } r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker.Get(), sys.Bus.Get()) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 44b12826bcc50..066d8ba0a58ef 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -565,7 +565,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo // Call our linkChange code once with the current state. // Following changes are triggered via the eventbus. - cd, err := netmon.NewChangeDelta(nil, b.interfaceState, false, netMon.TailscaleInterfaceName(), false) + cd, err := netmon.NewChangeDelta(nil, b.interfaceState, false, false) if err != nil { b.logf("[unexpected] setting initial netmon state failed: %v", err) } else { @@ -5321,7 +5321,11 @@ func (b *LocalBackend) initPeerAPIListenerLocked() { var err error skipListen := i > 0 && isNetstack if !skipListen { - ln, err = ps.listen(a.Addr(), b.interfaceState.TailscaleInterfaceIndex) + // We don't care about the error here. Not all platforms set this. + // If ps.listen needs it, it will check for zero values and error out. + tsIfIndex, _ := netmon.TailscaleInterfaceIndex() + + ln, err = ps.listen(a.Addr(), tsIfIndex) if err != nil { if peerAPIListenAsync { b.logf("[v1] possibly transient peerapi listen(%q) error, will try again on linkChange: %v", a.Addr(), err) @@ -5329,6 +5333,11 @@ func (b *LocalBackend) initPeerAPIListenerLocked() { // ("peerAPIListeners too low"). continue } + // Sandboxed macOS specifically requires the interface index to be non-zero. + if version.IsSandboxedMacOS() && tsIfIndex == 0 { + b.logf("[v1] peerapi listen(%q) error: interface index is 0 on darwin; try restarting tailscaled", a.Addr()) + continue + } b.logf("[unexpected] peerapi listen(%q) error: %v", a.Addr(), err) continue } diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 20c61c0ec6c52..318d9bf6bb72f 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -41,6 +41,8 @@ import ( "tailscale.com/wgengine/filter" ) +// initListenConfig, if non-nil, is called during peerAPIListener setup. It is used only +// on iOS and macOS to set socket options to bind the listener to the Tailscale interface. var initListenConfig func(config *net.ListenConfig, addr netip.Addr, tunIfIndex int) error // peerDNSQueryHandler is implemented by tsdns.Resolver. @@ -69,6 +71,13 @@ func (s *peerAPIServer) listen(ip netip.Addr, tunIfIndex int) (ln net.Listener, // On iOS/macOS, this sets the lc.Control hook to // setsockopt the interface index to bind to, to get // out of the network sandbox. + + // A zero tunIfIndex is invalid for peerapi. A zero value will not get us + // out of the network sandbox. Caller should log and retry. + if tunIfIndex == 0 { + return nil, fmt.Errorf("peerapi: cannot listen on %s with tunIfIndex 0", ipStr) + } + if err := initListenConfig(&lc, ip, tunIfIndex); err != nil { return nil, err } diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 4d6055bbd81e8..9fca3db69b540 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -36,6 +36,7 @@ import ( "github.com/pires/go-proxyproto" "go4.org/mem" "tailscale.com/ipn" + "tailscale.com/net/netmon" "tailscale.com/net/netutil" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -166,16 +167,24 @@ func (s *localListener) Run() { var lc net.ListenConfig if initListenConfig != nil { + ifIndex, err := netmon.TailscaleInterfaceIndex() + if err != nil { + s.logf("localListener failed to get Tailscale interface index %v, backing off: %v", s.ap, err) + s.bo.BackOff(s.ctx, err) + continue + } + // On macOS, this sets the lc.Control hook to // setsockopt the interface index to bind to. This is - // required by the network sandbox to allow binding to - // a specific interface. Without this hook, the system - // chooses a default interface to bind to. - if err := initListenConfig(&lc, ip, s.b.interfaceState.TailscaleInterfaceIndex); err != nil { + // required by the network sandbox which will not automatically + // bind to the tailscale interface to prevent routing loops. + // Explicit binding allows us to bypass that restriction. + if err := initListenConfig(&lc, ip, ifIndex); err != nil { s.logf("localListener failed to init listen config %v, backing off: %v", s.ap, err) s.bo.BackOff(s.ctx, err) continue } + // On macOS (AppStore or macsys) and if we're binding to a privileged port, if version.IsSandboxedMacOS() && s.ap.Port() < 1024 { // On macOS, we need to bind to ""/all-interfaces due to diff --git a/net/netmon/interfaces.go b/net/netmon/interfaces.go new file mode 100644 index 0000000000000..4cf93973c6473 --- /dev/null +++ b/net/netmon/interfaces.go @@ -0,0 +1,103 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netmon + +import ( + "errors" + "net" + + "tailscale.com/syncs" +) + +type ifProps struct { + mu syncs.Mutex + name string // interface name, if known/set + index int // interface index, if known/set +} + +// tsIfProps tracks the properties (name and index) of the tailscale interface. +// There is only one tailscale interface per tailscaled instance. +var tsIfProps ifProps + +func (p *ifProps) tsIfName() string { + p.mu.Lock() + defer p.mu.Unlock() + return p.name +} + +func (p *ifProps) tsIfIndex() int { + p.mu.Lock() + defer p.mu.Unlock() + return p.index +} + +func (p *ifProps) set(ifName string, ifIndex int) { + p.mu.Lock() + defer p.mu.Unlock() + p.name = ifName + p.index = ifIndex +} + +// TODO (barnstar): This doesn't need the Monitor receiver anymore but we're +// keeping it for API compatibility to avoid a breaking change.  This can be +// removed when the various clients have switched to SetTailscaleInterfaceProps +func (m *Monitor) SetTailscaleInterfaceName(ifName string) { + SetTailscaleInterfaceProps(ifName, 0) +} + +// SetTailscaleInterfaceProps sets the name of the Tailscale interface and +// its index for use by various listeners/dialers. If the index is zero, +// an attempt will be made to look it up by name. This makes no attempt +// to validate that the interface exists at the time of calling. +// +// If this method is called, it is the responsibility of the caller to +// update the interface name and index if they change. +// +// This should be called as early as possible during tailscaled startup. +func SetTailscaleInterfaceProps(ifName string, ifIndex int) { + if ifIndex != 0 { + tsIfProps.set(ifName, ifIndex) + return + } + + ifaces, err := net.Interfaces() + if err != nil { + return + } + + for _, iface := range ifaces { + if iface.Name == ifName { + ifIndex = iface.Index + break + } + } + + tsIfProps.set(ifName, ifIndex) +} + +// TailscaleInterfaceName returns the name of the Tailscale interface. +// For example, "tailscale0", "tun0", "utun3", etc or an error if unset. +// +// Callers must handle errors, as the Tailscale interface +// name may not be set in some environments. +func TailscaleInterfaceName() (string, error) { + name := tsIfProps.tsIfName() + if name == "" { + return "", errors.New("Tailscale interface name not set") + } + return name, nil +} + +// TailscaleInterfaceIndex returns the index of the Tailscale interface or +// an error if unset. +// +// Callers must handle errors, as the Tailscale interface +// index may not be set in some environments. +func TailscaleInterfaceIndex() (int, error) { + index := tsIfProps.tsIfIndex() + if index == 0 { + return 0, errors.New("Tailscale interface index not set") + } + return index, nil +} diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 968c2fd41d950..468a12505f322 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -64,7 +64,7 @@ func syncTestLinkChangeLogLimiter(t *testing.T) { // InjectEvent doesn't work because it's not a major event, so we // instead inject the event ourselves. injector := eventbustest.NewInjector(t, bus) - cd, err := NewChangeDelta(nil, &State{}, true, "tailscale0", true) + cd, err := NewChangeDelta(nil, &State{}, true, true) if err != nil { t.Fatal(err) } diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index 49fb426ae1993..e18bc392dd196 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -78,8 +78,7 @@ type Monitor struct { goroutines sync.WaitGroup wallTimer *time.Timer // nil until Started; re-armed AfterFunc per tick lastWall time.Time - timeJumped bool // whether we need to send a changed=true after a big time jump - tsIfName string // tailscale interface name, if known/set ("tailscale0", "utun3", ...) + timeJumped bool // whether we need to send a changed=true after a big time jump } // ChangeFunc is a callback function registered with Monitor that's called when the @@ -103,10 +102,6 @@ type ChangeDelta struct { // come out of sleep. TimeJumped bool - // The tailscale interface name, e.g. "tailscale0", "utun3", etc. Not all - // platforms know this or set it. Copied from netmon.Monitor.tsIfName. - TailscaleIfaceName string - DefaultRouteInterface string // Computed Fields @@ -134,12 +129,11 @@ func (cd *ChangeDelta) CurrentState() *State { // NewChangeDelta builds a ChangeDelta and eagerly computes the cached fields. // forceViability, if true, forces DefaultInterfaceMaybeViable to be true regardless of the // actual state of the default interface. This is useful in testing. -func NewChangeDelta(old, new *State, timeJumped bool, tsIfName string, forceViability bool) (*ChangeDelta, error) { +func NewChangeDelta(old, new *State, timeJumped bool, forceViability bool) (*ChangeDelta, error) { cd := ChangeDelta{ - old: old, - new: new, - TimeJumped: timeJumped, - TailscaleIfaceName: tsIfName, + old: old, + new: new, + TimeJumped: timeJumped, } if cd.new == nil { @@ -162,8 +156,10 @@ func NewChangeDelta(old, new *State, timeJumped bool, tsIfName string, forceViab cd.DefaultRouteInterface = new.DefaultRouteInterface defIf := new.Interface[cd.DefaultRouteInterface] + tsIfName, err := TailscaleInterfaceName() + // The default interface is not viable if it is down or it is the Tailscale interface itself. - if !forceViability && (!defIf.IsUp() || cd.DefaultRouteInterface == tsIfName) { + if !forceViability && (!defIf.IsUp() || (err == nil && cd.DefaultRouteInterface == tsIfName)) { cd.DefaultInterfaceMaybeViable = false } else { cd.DefaultInterfaceMaybeViable = true @@ -223,10 +219,11 @@ func (cd *ChangeDelta) isInterestingInterfaceChange() bool { } // Compare interfaces in both directions. Old to new and new to old. + tsIfName, ifNameErr := TailscaleInterfaceName() for iname, oldInterface := range cd.old.Interface { - if iname == cd.TailscaleIfaceName { - // Ignore changes in the Tailscale interface itself. + if ifNameErr == nil && iname == tsIfName { + // Ignore changes in the Tailscale interface itself continue } oldIps := filterRoutableIPs(cd.old.InterfaceIPs[iname]) @@ -259,7 +256,8 @@ func (cd *ChangeDelta) isInterestingInterfaceChange() bool { } for iname, newInterface := range cd.new.Interface { - if iname == cd.TailscaleIfaceName { + if ifNameErr == nil && iname == tsIfName { + // Ignore changes in the Tailscale interface itself continue } newIps := filterRoutableIPs(cd.new.InterfaceIPs[iname]) @@ -360,24 +358,7 @@ func (m *Monitor) InterfaceState() *State { } func (m *Monitor) interfaceStateUncached() (*State, error) { - return getState(m.tsIfName) -} - -// SetTailscaleInterfaceName sets the name of the Tailscale interface. For -// example, "tailscale0", "tun0", "utun3", etc. -// -// This must be called only early in tailscaled startup before the monitor is -// used. -func (m *Monitor) SetTailscaleInterfaceName(ifName string) { - m.mu.Lock() - defer m.mu.Unlock() - m.tsIfName = ifName -} - -func (m *Monitor) TailscaleInterfaceName() string { - m.mu.Lock() - defer m.mu.Unlock() - return m.tsIfName + return getState(tsIfProps.tsIfName()) } // GatewayAndSelfIP returns the current network's default gateway, and @@ -598,7 +579,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { return } - delta, err := NewChangeDelta(oldState, newState, timeJumped, m.tsIfName, false) + delta, err := NewChangeDelta(oldState, newState, timeJumped, false) if err != nil { m.logf("[unexpected] error creating ChangeDelta: %v", err) return diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 8fbf512ddb50f..50519b4a9c531 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -159,7 +159,7 @@ func TestMonitorMode(t *testing.T) { // tests (*ChangeDelta).RebindRequired func TestRebindRequired(t *testing.T) { - // s1 cannot be nil by definition + // s1 must not be nil by definition tests := []struct { name string s1, s2 *State @@ -478,9 +478,11 @@ func TestRebindRequired(t *testing.T) { withIsInterestingInterface(t, func(ni Interface, pfxs []netip.Prefix) bool { return !strings.HasPrefix(ni.Name, "boring") }) + saveAndRestoreTailscaleIfaceProps(t) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + // Populate dummy interfaces where missing. for _, s := range []*State{tt.s1, tt.s2} { if s == nil { @@ -495,7 +497,8 @@ func TestRebindRequired(t *testing.T) { } } - cd, err := NewChangeDelta(tt.s1, tt.s2, false, tt.tsIfName, true) + SetTailscaleInterfaceProps(tt.tsIfName, 1) + cd, err := NewChangeDelta(tt.s1, tt.s2, false, true) if err != nil { t.Fatalf("NewChangeDelta error: %v", err) } @@ -507,6 +510,15 @@ func TestRebindRequired(t *testing.T) { } } +func saveAndRestoreTailscaleIfaceProps(t *testing.T) { + t.Helper() + index, _ := TailscaleInterfaceIndex() + name, _ := TailscaleInterfaceName() + t.Cleanup(func() { + SetTailscaleInterfaceProps(name, index) + }) +} + func withIsInterestingInterface(t *testing.T, fn func(Interface, []netip.Prefix) bool) { t.Helper() old := IsInterestingInterface diff --git a/net/netmon/state.go b/net/netmon/state.go index aefbbb22d2830..79dd8a01ba9e1 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -287,9 +287,6 @@ type State struct { // PAC is the URL to the Proxy Autoconfig URL, if applicable. PAC string - - // TailscaleInterfaceIndex is the index of the Tailscale interface - TailscaleInterfaceIndex int } func (s *State) String() string { @@ -473,15 +470,22 @@ func hasTailscaleIP(pfxs []netip.Prefix) bool { } func isTailscaleInterface(name string, ips []netip.Prefix) bool { + // Sandboxed macOS and Plan9 (and anything else that explicitly calls SetTailscaleInterfaceProps). + tsIfName, err := TailscaleInterfaceName() + if err == nil { + // If we've been told the Tailscale interface name, use that. + return name == tsIfName + } + + // The sandboxed app should (as of 1.92) set the tun interface name via SetTailscaleInterfaceProps + // early in the startup process. The non-sandboxed app does not. + // TODO (barnstar): If Wireguard created the tun device on darwin, it should know the name and it should + // be explicitly set instead checking addresses here. if runtime.GOOS == "darwin" && strings.HasPrefix(name, "utun") && hasTailscaleIP(ips) { - // On macOS in the sandboxed app (at least as of - // 2021-02-25), we often see two utun devices - // (e.g. utun4 and utun7) with the same IPv4 and IPv6 - // addresses. Just remove all utun devices with - // Tailscale IPs until we know what's happening with - // macOS NetworkExtensions and utun devices. return true } + + // Windows, Linux... return name == "Tailscale" || // as it is on Windows strings.HasPrefix(name, "tailscale") // TODO: use --tun flag value, etc; see TODO in method doc } @@ -505,18 +509,15 @@ func getState(optTSInterfaceName string) (*State, error) { s.Interface[ni.Name] = ni s.InterfaceIPs[ni.Name] = append(s.InterfaceIPs[ni.Name], pfxs...) - // Skip uninteresting interfaces. + // Skip uninteresting interfaces if IsInterestingInterface != nil && !IsInterestingInterface(ni, pfxs) { return } - if isTailscaleInterface(ni.Name, pfxs) { - s.TailscaleInterfaceIndex = ni.Index - } - if !ifUp || isTSInterfaceName || isTailscaleInterface(ni.Name, pfxs) { return } + for _, pfx := range pfxs { if pfx.Addr().IsLoopback() { continue @@ -803,8 +804,7 @@ func (m *Monitor) HasCGNATInterface() (bool, error) { hasCGNATInterface := false cgnatRange := tsaddr.CGNATRange() err := ForeachInterface(func(i Interface, pfxs []netip.Prefix) { - isTSInterfaceName := m.tsIfName != "" && i.Name == m.tsIfName - if hasCGNATInterface || !i.IsUp() || isTSInterfaceName || isTailscaleInterface(i.Name, pfxs) { + if hasCGNATInterface || !i.IsUp() || isTailscaleInterface(i.Name, pfxs) { return } for _, pfx := range pfxs { From 1b88e93ff5e6f984f52bbdbedad45db7287619fd Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Tue, 13 Jan 2026 14:26:20 -0700 Subject: [PATCH 0877/1093] ipn/ipnlocal: allow retrieval of serve config ETags from local API This change adds API to ipn.LocalBackend to retrieve the ETag when querying for the current serve config. This allows consumers of ipn.LocalBackend.SetServeConfig to utilize the concurrency control offered by ETags. Previous to this change, utilizing serve config ETags required copying the local backend's internal ETag calcuation. The local API server was previously copying the local backend's ETag calculation as described above. With this change, the local API server now uses the new ETag retrieval function instead. Serve config ETags are therefore now opaque to clients, in line with best practices. Fixes tailscale/corp#35857 Signed-off-by: Harry Harpham --- ipn/ipnlocal/serve.go | 35 +++++++++++++++++++++++++--------- ipn/ipnlocal/serve_test.go | 39 +++++++++++++++++--------------------- ipn/localapi/serve.go | 10 +++++----- 3 files changed, 48 insertions(+), 36 deletions(-) diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 9fca3db69b540..a857147e1adab 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -302,6 +302,15 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1 } } +func generateServeConfigETag(sc ipn.ServeConfigView) (string, error) { + j, err := json.Marshal(sc) + if err != nil { + return "", fmt.Errorf("encoding config: %w", err) + } + sum := sha256.Sum256(j) + return hex.EncodeToString(sum[:]), nil +} + // SetServeConfig establishes or replaces the current serve config. // ETag is an optional parameter to enforce Optimistic Concurrency Control. // If it is an empty string, then the config will be overwritten. @@ -336,17 +345,11 @@ func (b *LocalBackend) setServeConfigLocked(config *ipn.ServeConfig, etag string // not changed from the last config. prevConfig := b.serveConfig if etag != "" { - // Note that we marshal b.serveConfig - // and not use b.lastServeConfJSON as that might - // be a Go nil value, which produces a different - // checksum from a JSON "null" value. - prevBytes, err := json.Marshal(prevConfig) + prevETag, err := generateServeConfigETag(prevConfig) if err != nil { - return fmt.Errorf("error encoding previous config: %w", err) + return fmt.Errorf("generating ETag for previous config: %w", err) } - sum := sha256.Sum256(prevBytes) - previousEtag := hex.EncodeToString(sum[:]) - if etag != previousEtag { + if etag != prevETag { return ErrETagMismatch } } @@ -401,6 +404,20 @@ func (b *LocalBackend) ServeConfig() ipn.ServeConfigView { return b.serveConfig } +// ServeConfigETag provides a view of the current serve mappings and an ETag, +// which can later be provided to [LocalBackend.SetServeConfig] to implement +// Optimistic Concurrency Control. +// +// If serving is not configured, the returned view is not Valid. +func (b *LocalBackend) ServeConfigETag() (scv ipn.ServeConfigView, etag string, err error) { + sc := b.ServeConfig() + etag, err = generateServeConfigETag(sc) + if err != nil { + return ipn.ServeConfigView{}, "", fmt.Errorf("generating ETag: %w", err) + } + return sc, etag, nil +} + // DeleteForegroundSession deletes a ServeConfig's foreground session // in the LocalBackend if it exists. It also ensures check, delete, and // set operations happen within the same mutex lock to avoid any races. diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 6ee2181a0aaa2..0892545cceec8 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -9,9 +9,7 @@ import ( "bytes" "cmp" "context" - "crypto/sha256" "crypto/tls" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -222,16 +220,6 @@ func TestGetServeHandler(t *testing.T) { } } -func getEtag(t *testing.T, b any) string { - t.Helper() - bts, err := json.Marshal(b) - if err != nil { - t.Fatal(err) - } - sum := sha256.Sum256(bts) - return hex.EncodeToString(sum[:]) -} - // TestServeConfigForeground tests the inter-dependency // between a ServeConfig and a WatchIPNBus: // 1. Creating a WatchIPNBus returns a sessionID, that @@ -544,8 +532,14 @@ func TestServeConfigServices(t *testing.T) { func TestServeConfigETag(t *testing.T) { b := newTestBackend(t) - // a nil config with initial etag should succeed - err := b.SetServeConfig(nil, getEtag(t, nil)) + // the etag should be valid even when there is no config + _, emptyStateETag, err := b.ServeConfigETag() + if err != nil { + t.Fatal(err) + } + + // a nil config with the empty-state etag should succeed + err = b.SetServeConfig(nil, emptyStateETag) if err != nil { t.Fatal(err) } @@ -556,7 +550,7 @@ func TestServeConfigETag(t *testing.T) { t.Fatal("expected an error but got nil") } - // a new config with no etag should succeed + // a new config with the empty-state etag should succeed conf := &ipn.ServeConfig{ Web: map[ipn.HostPort]*ipn.WebServerConfig{ "example.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{ @@ -564,15 +558,14 @@ func TestServeConfigETag(t *testing.T) { }}, }, } - err = b.SetServeConfig(conf, getEtag(t, nil)) + err = b.SetServeConfig(conf, emptyStateETag) if err != nil { t.Fatal(err) } - confView := b.ServeConfig() - etag := getEtag(t, confView) - if etag == "" { - t.Fatal("expected to get an etag but got an empty string") + confView, etag, err := b.ServeConfigETag() + if err != nil { + t.Fatal(err) } conf = confView.AsStruct() mak.Set(&conf.AllowFunnel, "example.ts.net:443", true) @@ -596,8 +589,10 @@ func TestServeConfigETag(t *testing.T) { } // replacing an existing config with the new etag should succeed - newCfg := b.ServeConfig() - etag = getEtag(t, newCfg) + _, etag, err = b.ServeConfigETag() + if err != nil { + t.Fatal(err) + } err = b.SetServeConfig(nil, etag) if err != nil { t.Fatal(err) diff --git a/ipn/localapi/serve.go b/ipn/localapi/serve.go index 56c8b486cf93c..efbbde06ff954 100644 --- a/ipn/localapi/serve.go +++ b/ipn/localapi/serve.go @@ -6,8 +6,6 @@ package localapi import ( - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -31,14 +29,16 @@ func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, "serve config denied", http.StatusForbidden) return } - config := h.b.ServeConfig() + config, etag, err := h.b.ServeConfigETag() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } bts, err := json.Marshal(config) if err != nil { http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) return } - sum := sha256.Sum256(bts) - etag := hex.EncodeToString(sum[:]) w.Header().Set("Etag", etag) w.Header().Set("Content-Type", "application/json") w.Write(bts) From 3840183be9d0494291ebfaf352b7b1e02a6c26ad Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Tue, 13 Jan 2026 14:36:12 -0700 Subject: [PATCH 0878/1093] tsnet: add support for Services This change allows tsnet nodes to act as Service hosts by adding a new function, tsnet.Server.ListenService. Invoking this function will advertise the node as a host for the Service and create a listener to receive traffic for the Service. Fixes #17697 Fixes tailscale/corp#27200 Signed-off-by: Harry Harpham --- ipn/ipnlocal/cert.go | 19 + ipn/ipnlocal/local.go | 4 + .../example/tsnet-services/tsnet-services.go | 82 ++++ ...snet_listen_service_multiple_ports_test.go | 69 +++ tsnet/example_tsnet_test.go | 55 +++ tsnet/tsnet.go | 274 +++++++++++- tsnet/tsnet_test.go | 423 +++++++++++++++++- tstest/integration/testcontrol/testcontrol.go | 92 +++- 8 files changed, 983 insertions(+), 35 deletions(-) create mode 100644 tsnet/example/tsnet-services/tsnet-services.go create mode 100644 tsnet/example_tsnet_listen_service_multiple_ports_test.go diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 8804fcb5ce2e8..b389c93e7e971 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -107,6 +107,15 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK // If a cert is expired, or expires sooner than minValidity, it will be renewed // synchronously. Otherwise it will be renewed asynchronously. func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) { + b.mu.Lock() + getCertForTest := b.getCertForTest + b.mu.Unlock() + + if getCertForTest != nil { + testenv.AssertInTest() + return getCertForTest(domain) + } + if !validLookingCertDomain(domain) { return nil, errors.New("invalid domain") } @@ -303,6 +312,16 @@ func (b *LocalBackend) getCertStore() (certStore, error) { return certFileStore{dir: dir, testRoots: testX509Roots}, nil } +// ConfigureCertsForTest sets a certificate retrieval function to be used by +// this local backend, skipping the usual ACME certificate registration. Should +// only be used in tests. +func (b *LocalBackend) ConfigureCertsForTest(getCert func(hostname string) (*TLSCertKeyPair, error)) { + testenv.AssertInTest() + b.mu.Lock() + b.getCertForTest = getCert + b.mu.Unlock() +} + // certFileStore implements certStore by storing the cert & key files in the named directory. type certFileStore struct { dir string diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 066d8ba0a58ef..2f05a4dbbc9ba 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -399,6 +399,10 @@ type LocalBackend struct { // hardwareAttested is whether backend should use a hardware-backed key to // bind the node identity to this device. hardwareAttested atomic.Bool + + // getCertForTest is used to retrieve TLS certificates in tests. + // See [LocalBackend.ConfigureCertsForTest]. + getCertForTest func(hostname string) (*TLSCertKeyPair, error) } // SetHardwareAttested enables hardware attestation key signatures in map diff --git a/tsnet/example/tsnet-services/tsnet-services.go b/tsnet/example/tsnet-services/tsnet-services.go new file mode 100644 index 0000000000000..6eb1a76ab5f5c --- /dev/null +++ b/tsnet/example/tsnet-services/tsnet-services.go @@ -0,0 +1,82 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The tsnet-services example demonstrates how to use tsnet with Services. +// +// To run this example yourself: +// +// 1. Add access controls which (i) define a new ACL tag, (ii) allow the demo +// node to host the Service, and (iii) allow peers on the tailnet to reach +// the Service. A sample ACL policy is provided below. +// +// 2. [Generate an auth key] using the Tailscale admin panel. When doing so, add +// your new tag to your key (Service hosts must be tagged nodes). +// +// 3. [Define a Service]. For the purposes of this demo, it must be defined to +// listen on TCP port 443. Note that you only need to follow Step 1 in the +// linked document. +// +// 4. Run the demo on the command line: +// +// TS_AUTHKEY= go run tsnet-services.go -service +// +// The following is a sample ACL policy for step 1: +// +// "tagOwners": { +// "tag:tsnet-demo-host": ["autogroup:member"], +// }, +// "autoApprovers": { +// "services": { +// "svc:tsnet-demo": ["tag:tsnet-demo-host"], +// }, +// }, +// "grants": [ +// "src": ["*"], +// "dst": ["svc:tsnet-demo"], +// "ip": ["*"], +// ], +// +// [Define a Service]: https://tailscale.com/kb/1552/tailscale-services#step-1-define-a-tailscale-service +// [Generate an auth key]: https://tailscale.com/kb/1085/auth-keys#generate-an-auth-key +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + + "tailscale.com/tsnet" +) + +var ( + svcName = flag.String("service", "", "the name of your Service, e.g. svc:tsnet-demo") +) + +func main() { + flag.Parse() + if *svcName == "" { + log.Fatal("a Service name must be provided") + } + + s := &tsnet.Server{ + Hostname: "tsnet-services-demo", + } + defer s.Close() + + ln, err := s.ListenService(*svcName, tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + log.Printf("Listening on https://%v\n", ln.FQDN) + + err = http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "

      Hello, tailnet!

      ") + })) + log.Fatal(err) +} diff --git a/tsnet/example_tsnet_listen_service_multiple_ports_test.go b/tsnet/example_tsnet_listen_service_multiple_ports_test.go new file mode 100644 index 0000000000000..04781c2b20d16 --- /dev/null +++ b/tsnet/example_tsnet_listen_service_multiple_ports_test.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsnet_test + +import ( + "fmt" + "log" + "net/http" + _ "net/http/pprof" + "strings" + + "tailscale.com/tsnet" +) + +// This example function is in a separate file for the "net/http/pprof" import. + +// ExampleServer_ListenService_multiplePorts demonstrates how to advertise a +// Service on multiple ports. In this example, we run an HTTPS server on 443 and +// an HTTP server handling pprof requests to the same runtime on 6060. +func ExampleServer_ListenService_multiplePorts() { + s := &tsnet.Server{ + Hostname: "tsnet-services-demo", + } + defer s.Close() + + ln, err := s.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + pprofLn, err := s.ListenService("svc:my-service", tsnet.ServiceModeTCP{ + Port: 6060, + }) + if err != nil { + log.Fatal(err) + } + defer pprofLn.Close() + + go func() { + log.Printf("Listening for pprof requests on http://%v:%d\n", pprofLn.FQDN, 6060) + + handler := func(w http.ResponseWriter, r *http.Request) { + // The pprof listener is separate from our main server, so we can + // allow users to leave off the /debug/pprof prefix. We'll just + // attach it here, then pass along to the pprof handlers, which have + // been added implicitly due to our import of net/http/pprof. + if !strings.HasPrefix("/debug/pprof", r.URL.Path) { + r.URL.Path = "/debug/pprof" + r.URL.Path + } + http.DefaultServeMux.ServeHTTP(w, r) + } + if err := http.Serve(pprofLn, http.HandlerFunc(handler)); err != nil { + log.Fatal("error serving pprof:", err) + } + }() + + log.Printf("Listening on https://%v\n", ln.FQDN) + + // Specifying a handler here means pprof endpoints will not be served by + // this server (since we are not using http.DefaultServeMux). + log.Fatal(http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "

      Hello, tailnet!

      ") + }))) +} diff --git a/tsnet/example_tsnet_test.go b/tsnet/example_tsnet_test.go index c5a20ab77fcd5..2a3236b3b6501 100644 --- a/tsnet/example_tsnet_test.go +++ b/tsnet/example_tsnet_test.go @@ -8,6 +8,8 @@ import ( "fmt" "log" "net/http" + "net/http/httputil" + "net/url" "os" "path/filepath" @@ -200,3 +202,56 @@ func ExampleServer_ListenFunnel_funnelOnly() { fmt.Fprintln(w, "Hi there! Welcome to the tailnet!") }))) } + +// ExampleServer_ListenService demonstrates how to advertise an HTTPS Service. +func ExampleServer_ListenService() { + s := &tsnet.Server{ + Hostname: "tsnet-services-demo", + } + defer s.Close() + + ln, err := s.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + log.Printf("Listening on https://%v\n", ln.FQDN) + log.Fatal(http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "

      Hello, tailnet!

      ") + }))) +} + +// ExampleServer_ListenService_reverseProxy demonstrates how to advertise a +// Service targeting a reverse proxy. This is useful when the backing server is +// external to the tsnet application. +func ExampleServer_ListenService_reverseProxy() { + // targetAddress represents the address of the backing server. + const targetAddress = "1.2.3.4:80" + + // We will use a reverse proxy to direct traffic to the backing server. + reverseProxy := httputil.NewSingleHostReverseProxy(&url.URL{ + Scheme: "http", + Host: targetAddress, + }) + + s := &tsnet.Server{ + Hostname: "tsnet-services-demo", + } + defer s.Close() + + ln, err := s.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + HTTPS: true, + Port: 443, + }) + if err != nil { + log.Fatal(err) + } + defer ln.Close() + + log.Printf("Listening on https://%v\n", ln.FQDN) + log.Fatal(http.Serve(ln, reverseProxy)) +} diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 8b23b7ae3b8d3..6c840c335535e 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -52,6 +52,7 @@ import ( "tailscale.com/net/proxymux" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/types/bools" "tailscale.com/types/logger" @@ -166,8 +167,6 @@ type Server struct { // that the control server will allow the node to adopt that tag. AdvertiseTags []string - getCertForTesting func(*tls.ClientHelloInfo) (*tls.Certificate, error) - initOnce sync.Once initErr error lb *ipnlocal.LocalBackend @@ -1130,9 +1129,6 @@ func (s *Server) RegisterFallbackTCPHandler(cb FallbackTCPHandler) func() { // It calls GetCertificate on the localClient, passing in the ClientHelloInfo. // For testing, if s.getCertForTesting is set, it will call that instead. func (s *Server) getCert(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if s.getCertForTesting != nil { - return s.getCertForTesting(hi) - } lc, err := s.LocalClient() if err != nil { return nil, err @@ -1283,6 +1279,259 @@ func (s *Server) ListenFunnel(network, addr string, opts ...FunnelOption) (net.L return tls.NewListener(ln, tlsConfig), nil } +// ServiceMode defines how a Service is run. Currently supported modes are: +// - [ServiceModeTCP] +// - [ServiceModeHTTP] +// +// For more information, see [Server.ListenService]. +type ServiceMode interface { + // network is the network this Service will advertise on. Per Go convention, + // this should be lowercase, e.g. 'tcp'. + network() string +} + +// serviceModeWithPort is a convenience type to extract the port from +// ServiceMode types which have one. +type serviceModeWithPort interface { + ServiceMode + port() uint16 +} + +// ServiceModeTCP is used to configure a TCP Service via [Server.ListenService]. +type ServiceModeTCP struct { + // Port is the TCP port to advertise. If this Service needs to advertise + // multiple ports, call ListenService multiple times. + Port uint16 + + // TerminateTLS means that TLS connections will be terminated before being + // forwarded to the listener. In this case, the only server name indicator + // (SNI) permitted is the Service's fully-qualified domain name. + TerminateTLS bool + + // PROXYProtocolVersion indicates whether to send a PROXY protocol header + // before forwarding the connection to the listener and which version of the + // protocol to use. + // + // For more information, see + // https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt + PROXYProtocolVersion int +} + +func (ServiceModeTCP) network() string { return "tcp" } + +func (m ServiceModeTCP) port() uint16 { return m.Port } + +// ServiceModeHTTP is used to configure an HTTP Service via +// [Server.ListenService]. +type ServiceModeHTTP struct { + // Port is the TCP port to advertise. If this Service needs to advertise + // multiple ports, call ListenService multiple times. + Port uint16 + + // HTTPS, if true, means that the listener should handle connections as + // HTTPS connections. In this case, the only server name indicator (SNI) + // permitted is the Service's fully-qualified domain name. + HTTPS bool + + // AcceptAppCaps defines the app capabilities to forward to the server. The + // keys in this map are the mount points for each set of capabilities. + // + // By example, + // + // AcceptAppCaps: map[string][]string{ + // "/": {"example.com/cap/all-paths"}, + // "/foo": {"example.com/cap/all-paths", "example.com/cap/foo"}, + // } + // + // would forward example.com/cap/all-paths to all paths on the server and + // example.com/cap/foo only to paths beginning with /foo. + // + // For more information on app capabilities, see + // https://tailscale.com/kb/1537/grants-app-capabilities + AcceptAppCaps map[string][]string + + // PROXYProtocolVersion indicates whether to send a PROXY protocol header + // before forwarding the connection to the listener and which version of the + // protocol to use. + // + // For more information, see + // https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt + PROXYProtocol int +} + +func (ServiceModeHTTP) network() string { return "tcp" } + +func (m ServiceModeHTTP) port() uint16 { return m.Port } + +func (m ServiceModeHTTP) capsMap() map[string][]tailcfg.PeerCapability { + capsMap := map[string][]tailcfg.PeerCapability{} + for path, capNames := range m.AcceptAppCaps { + caps := make([]tailcfg.PeerCapability, 0, len(capNames)) + for _, c := range capNames { + caps = append(caps, tailcfg.PeerCapability(c)) + } + capsMap[path] = caps + } + return capsMap +} + +// A ServiceListener is a network listener for a Tailscale Service. For more +// information about Services, see +// https://tailscale.com/kb/1552/tailscale-services +type ServiceListener struct { + net.Listener + addr addr + + // FQDN is the fully-qualifed domain name of this Service. + FQDN string +} + +// Addr returns the listener's network address. This will be the Service's +// fully-qualified domain name (FQDN) and the port. +// +// A hostname is not truly a network address, but Services listen on multiple +// addresses (the IPv4 and IPv6 virtual IPs). +func (sl ServiceListener) Addr() net.Addr { + return sl.addr +} + +// ErrUntaggedServiceHost is returned by ListenService when run on a node +// without any ACL tags. A node must use a tag-based identity to act as a +// Service host. For more information, see: +// https://tailscale.com/kb/1552/tailscale-services#prerequisites +var ErrUntaggedServiceHost = errors.New("service hosts must be tagged nodes") + +// ListenService creates a network listener for a Tailscale Service. This will +// advertise this node as hosting the Service. Note that: +// - Approval must still be granted by an admin or by ACL auto-approval rules. +// - Service hosts must be tagged nodes. +// - A valid Service host must advertise all ports defined for the Service. +// +// To advertise a Service with multiple ports, run ListenService multiple times. +// For more information about Services, see +// https://tailscale.com/kb/1552/tailscale-services +func (s *Server) ListenService(name string, mode ServiceMode) (*ServiceListener, error) { + if err := tailcfg.ServiceName(name).Validate(); err != nil { + return nil, err + } + if mode == nil { + return nil, errors.New("mode may not be nil") + } + svcName := name + + // TODO(hwh33,tailscale/corp#35859): support TUN mode + + ctx := context.Background() + _, err := s.Up(ctx) + if err != nil { + return nil, err + } + + st := s.lb.StatusWithoutPeers() + if st.Self.Tags == nil || st.Self.Tags.Len() == 0 { + return nil, ErrUntaggedServiceHost + } + + advertisedServices := s.lb.Prefs().AdvertiseServices().AsSlice() + if !slices.Contains(advertisedServices, svcName) { + // TODO(hwh33,tailscale/corp#35860): clean these prefs up when (a) we + // exit early due to error or (b) when the returned listener is closed. + _, err = s.lb.EditPrefs(&ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: append(advertisedServices, svcName), + }, + }) + if err != nil { + return nil, fmt.Errorf("updating advertised Services: %w", err) + } + } + + srvConfig := new(ipn.ServeConfig) + sc, srvConfigETag, err := s.lb.ServeConfigETag() + if err != nil { + return nil, fmt.Errorf("fetching current serve config: %w", err) + } + if sc.Valid() { + srvConfig = sc.AsStruct() + } + + fqdn := tailcfg.ServiceName(svcName).WithoutPrefix() + "." + st.CurrentTailnet.MagicDNSSuffix + + // svcAddr is used to implement Addr() on the returned listener. + svcAddr := addr{ + network: mode.network(), + // A hostname is not a network address, but Services listen on + // multiple addresses (the IPv4 and IPv6 virtual IPs), and there's + // no clear winner here between the two. Therefore prefer the FQDN. + // + // In the case of TCP or HTTP Services, the port will be added below. + addr: fqdn, + } + if m, ok := mode.(serviceModeWithPort); ok { + if m.port() == 0 { + return nil, errors.New("must specify a port to advertise") + } + svcAddr.addr += ":" + strconv.Itoa(int(m.port())) + } + + // Start listening on a local TCP socket. + ln, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("starting local listener: %w", err) + } + + switch m := mode.(type) { + case ServiceModeTCP: + // Forward all connections from service-hostname:port to our socket. + srvConfig.SetTCPForwardingForService( + m.Port, ln.Addr().String(), m.TerminateTLS, + tailcfg.ServiceName(svcName), m.PROXYProtocolVersion, st.CurrentTailnet.MagicDNSSuffix) + case ServiceModeHTTP: + // For HTTP Services, proxy all connections to our socket. + mds := st.CurrentTailnet.MagicDNSSuffix + haveRootHandler := false + // We need to add a separate proxy for each mount point in the caps map. + for path, caps := range m.capsMap() { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + h := ipn.HTTPHandler{ + AcceptAppCaps: caps, + Proxy: ln.Addr().String(), + } + if path == "/" { + haveRootHandler = true + } else { + h.Proxy += path + } + srvConfig.SetWebHandler(&h, svcName, m.Port, path, m.HTTPS, mds) + } + // We always need a root handler. + if !haveRootHandler { + h := ipn.HTTPHandler{Proxy: ln.Addr().String()} + srvConfig.SetWebHandler(&h, svcName, m.Port, "/", m.HTTPS, mds) + } + default: + ln.Close() + return nil, fmt.Errorf("unknown ServiceMode type %T", m) + } + + if err := s.lb.SetServeConfig(srvConfig, srvConfigETag); err != nil { + ln.Close() + return nil, err + } + + // TODO(hwh33,tailscale/corp#35860): clean up state (advertising prefs, + // serve config changes) when the returned listener is closed. + + return &ServiceListener{ + Listener: ln, + FQDN: fqdn, + addr: svcAddr, + }, nil +} + type listenOn string const ( @@ -1444,7 +1693,12 @@ func (ln *listener) Accept() (net.Conn, error) { } } -func (ln *listener) Addr() net.Addr { return addr{ln} } +func (ln *listener) Addr() net.Addr { + return addr{ + network: ln.keys[0].network, + addr: ln.addr, + } +} func (ln *listener) Close() error { ln.s.mu.Lock() @@ -1484,10 +1738,12 @@ func (ln *listener) handle(c net.Conn) { // Server returns the tsnet Server associated with the listener. func (ln *listener) Server() *Server { return ln.s } -type addr struct{ ln *listener } +type addr struct { + network, addr string +} -func (a addr) Network() string { return a.ln.keys[0].network } -func (a addr) String() string { return a.ln.addr } +func (a addr) Network() string { return a.network } +func (a addr) String() string { return a.addr } // cleanupListener wraps a net.Listener with a function to be run on Close. type cleanupListener struct { diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 2c8514cf42d0b..f44bacab08431 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -14,6 +14,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/json" + "encoding/pem" "errors" "flag" "fmt" @@ -28,6 +29,7 @@ import ( "path/filepath" "reflect" "runtime" + "slices" "strings" "sync" "sync/atomic" @@ -38,10 +40,12 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "golang.org/x/net/proxy" + "tailscale.com/client/local" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" "tailscale.com/tailcfg" @@ -51,6 +55,8 @@ import ( "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/views" + "tailscale.com/util/mak" "tailscale.com/util/must" ) @@ -136,7 +142,7 @@ func startControl(t *testing.T) (controlURL string, control *testcontrol.Server) type testCertIssuer struct { mu sync.Mutex - certs map[string]*tls.Certificate + certs map[string]ipnlocal.TLSCertKeyPair // keyed by hostname root *x509.Certificate rootKey *ecdsa.PrivateKey @@ -168,18 +174,18 @@ func newCertIssuer() *testCertIssuer { panic(err) } return &testCertIssuer{ - certs: make(map[string]*tls.Certificate), root: rootCA, rootKey: rootKey, + certs: map[string]ipnlocal.TLSCertKeyPair{}, } } -func (tci *testCertIssuer) getCert(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { +func (tci *testCertIssuer) getCert(hostname string) (*ipnlocal.TLSCertKeyPair, error) { tci.mu.Lock() defer tci.mu.Unlock() - cert, ok := tci.certs[chi.ServerName] + cert, ok := tci.certs[hostname] if ok { - return cert, nil + return &cert, nil } certPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -188,7 +194,7 @@ func (tci *testCertIssuer) getCert(chi *tls.ClientHelloInfo) (*tls.Certificate, } certTmpl := &x509.Certificate{ SerialNumber: big.NewInt(1), - DNSNames: []string{chi.ServerName}, + DNSNames: []string{hostname}, NotBefore: time.Now(), NotAfter: time.Now().Add(time.Hour), } @@ -196,12 +202,22 @@ func (tci *testCertIssuer) getCert(chi *tls.ClientHelloInfo) (*tls.Certificate, if err != nil { return nil, err } - cert = &tls.Certificate{ - Certificate: [][]byte{certDER, tci.root.Raw}, - PrivateKey: certPrivKey, + keyDER, err := x509.MarshalPKCS8PrivateKey(certPrivKey) + if err != nil { + return nil, err } - tci.certs[chi.ServerName] = cert - return cert, nil + cert = ipnlocal.TLSCertKeyPair{ + CertPEM: pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certDER, + }), + KeyPEM: pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: keyDER, + }), + } + tci.certs[hostname] = cert + return &cert, nil } func (tci *testCertIssuer) Pool() *x509.CertPool { @@ -218,12 +234,11 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) tmp := filepath.Join(t.TempDir(), hostname) os.MkdirAll(tmp, 0755) s := &Server{ - Dir: tmp, - ControlURL: controlURL, - Hostname: hostname, - Store: new(mem.Store), - Ephemeral: true, - getCertForTesting: testCertRoot.getCert, + Dir: tmp, + ControlURL: controlURL, + Hostname: hostname, + Store: new(mem.Store), + Ephemeral: true, } if *verboseNodes { s.Logf = t.Logf @@ -234,6 +249,8 @@ func startServer(t *testing.T, ctx context.Context, controlURL, hostname string) if err != nil { t.Fatal(err) } + s.lb.ConfigureCertsForTest(testCertRoot.getCert) + return s, status.TailscaleIPs[0], status.Self.PublicKey } @@ -259,12 +276,11 @@ func TestDialBlocks(t *testing.T) { tmp := filepath.Join(t.TempDir(), "s2") os.MkdirAll(tmp, 0755) s2 := &Server{ - Dir: tmp, - ControlURL: controlURL, - Hostname: "s2", - Store: new(mem.Store), - Ephemeral: true, - getCertForTesting: testCertRoot.getCert, + Dir: tmp, + ControlURL: controlURL, + Hostname: "s2", + Store: new(mem.Store), + Ephemeral: true, } if *verboseNodes { s2.Logf = log.Printf @@ -842,6 +858,367 @@ func TestFunnelClose(t *testing.T) { }) } +func TestListenService(t *testing.T) { + // First test an error case which doesn't require all of the fancy setup. + t.Run("untagged_node_error", func(t *testing.T) { + ctx := t.Context() + + controlURL, _ := startControl(t) + serviceHost, _, _ := startServer(t, ctx, controlURL, "service-host") + + ln, err := serviceHost.ListenService("svc:foo", ServiceModeTCP{Port: 8080}) + if ln != nil { + ln.Close() + } + if !errors.Is(err, ErrUntaggedServiceHost) { + t.Fatalf("expected %v, got %v", ErrUntaggedServiceHost, err) + } + }) + + // Now on to the fancier tests. + + type dialFn func(context.Context, string, string) (net.Conn, error) + + // TCP helpers + acceptAndEcho := func(t *testing.T, ln net.Listener) { + t.Helper() + conn, err := ln.Accept() + if err != nil { + t.Error("accept error:", err) + return + } + defer conn.Close() + if _, err := io.Copy(conn, conn); err != nil { + t.Error("copy error:", err) + } + } + assertEcho := func(t *testing.T, conn net.Conn) { + t.Helper() + msg := "echo" + buf := make([]byte, 1024) + if _, err := conn.Write([]byte(msg)); err != nil { + t.Fatal("write failed:", err) + } + n, err := conn.Read(buf) + if err != nil { + t.Fatal("read failed:", err) + } + got := string(buf[:n]) + if got != msg { + t.Fatalf("unexpected response:\n\twant: %s\n\tgot: %s", msg, got) + } + } + + // HTTP helpers + checkAndEcho := func(t *testing.T, ln net.Listener, check func(r *http.Request)) { + t.Helper() + if check == nil { + check = func(*http.Request) {} + } + http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + check(r) + if _, err := io.Copy(w, r.Body); err != nil { + t.Error("copy error:", err) + w.WriteHeader(http.StatusInternalServerError) + } + })) + } + assertEchoHTTP := func(t *testing.T, hostname, path string, dial dialFn) { + t.Helper() + c := http.Client{ + Transport: &http.Transport{ + DialContext: dial, + }, + } + msg := "echo" + resp, err := c.Post("http://"+hostname+path, "text/plain", strings.NewReader(msg)) + if err != nil { + t.Fatal("posting request:", err) + } + defer resp.Body.Close() + b, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal("reading body:", err) + } + got := string(b) + if got != msg { + t.Fatalf("unexpected response:\n\twant: %s\n\tgot: %s", msg, got) + } + } + + tests := []struct { + name string + + // modes is used as input to [Server.ListenService]. + // + // If this slice has multiple modes, then ListenService will be invoked + // multiple times. The number of listeners provided to the run function + // (below) will always match the number of elements in this slice. + modes []ServiceMode + + extraSetup func(t *testing.T, control *testcontrol.Server) + + // run executes the test. This function does not need to close any of + // the input resources, but it should close any new resources it opens. + // listeners[i] corresponds to inputs[i]. + run func(t *testing.T, listeners []*ServiceListener, peer *Server) + }{ + { + name: "basic_TCP", + modes: []ServiceMode{ + ServiceModeTCP{Port: 99}, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + go acceptAndEcho(t, listeners[0]) + + target := fmt.Sprintf("%s:%d", listeners[0].FQDN, 99) + conn := must.Get(peer.Dial(t.Context(), "tcp", target)) + defer conn.Close() + + assertEcho(t, conn) + }, + }, + { + name: "TLS_terminated_TCP", + modes: []ServiceMode{ + ServiceModeTCP{ + TerminateTLS: true, + Port: 443, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + go acceptAndEcho(t, listeners[0]) + + target := fmt.Sprintf("%s:%d", listeners[0].FQDN, 443) + conn := must.Get(peer.Dial(t.Context(), "tcp", target)) + defer conn.Close() + + assertEcho(t, tls.Client(conn, &tls.Config{ + ServerName: listeners[0].FQDN, + RootCAs: testCertRoot.Pool(), + })) + }, + }, + { + name: "identity_headers", + modes: []ServiceMode{ + ServiceModeHTTP{ + Port: 80, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + expectHeader := "Tailscale-User-Name" + go checkAndEcho(t, listeners[0], func(r *http.Request) { + if _, ok := r.Header[expectHeader]; !ok { + t.Error("did not see expected header:", expectHeader) + } + }) + assertEchoHTTP(t, listeners[0].FQDN, "", peer.Dial) + }, + }, + { + name: "identity_headers_TLS", + modes: []ServiceMode{ + ServiceModeHTTP{ + HTTPS: true, + Port: 80, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + expectHeader := "Tailscale-User-Name" + go checkAndEcho(t, listeners[0], func(r *http.Request) { + if _, ok := r.Header[expectHeader]; !ok { + t.Error("did not see expected header:", expectHeader) + } + }) + + dial := func(ctx context.Context, network, addr string) (net.Conn, error) { + tcpConn, err := peer.Dial(ctx, network, addr) + if err != nil { + return nil, err + } + return tls.Client(tcpConn, &tls.Config{ + ServerName: listeners[0].FQDN, + RootCAs: testCertRoot.Pool(), + }), nil + } + + assertEchoHTTP(t, listeners[0].FQDN, "", dial) + }, + }, + { + name: "app_capabilities", + modes: []ServiceMode{ + ServiceModeHTTP{ + Port: 80, + AcceptAppCaps: map[string][]string{ + "/": {"example.com/cap/all-paths"}, + "/foo": {"example.com/cap/all-paths", "example.com/cap/foo"}, + }, + }, + }, + extraSetup: func(t *testing.T, control *testcontrol.Server) { + control.SetGlobalAppCaps(tailcfg.PeerCapMap{ + "example.com/cap/all-paths": []tailcfg.RawMessage{`true`}, + "example.com/cap/foo": []tailcfg.RawMessage{`true`}, + }) + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + allPathsCap := "example.com/cap/all-paths" + fooCap := "example.com/cap/foo" + checkCaps := func(r *http.Request) { + rawCaps, ok := r.Header["Tailscale-App-Capabilities"] + if !ok { + t.Error("no app capabilities header") + return + } + if len(rawCaps) != 1 { + t.Error("expected one app capabilities header value, got", len(rawCaps)) + return + } + var caps map[string][]any + if err := json.Unmarshal([]byte(rawCaps[0]), &caps); err != nil { + t.Error("error unmarshaling app caps:", err) + return + } + if _, ok := caps[allPathsCap]; !ok { + t.Errorf("got app caps, but %v is not present; saw:\n%v", allPathsCap, caps) + } + if strings.HasPrefix(r.URL.Path, "/foo") { + if _, ok := caps[fooCap]; !ok { + t.Errorf("%v should be present for /foo request; saw:\n%v", fooCap, caps) + } + } else { + if _, ok := caps[fooCap]; ok { + t.Errorf("%v should not be present for non-/foo request; saw:\n%v", fooCap, caps) + } + } + } + + go checkAndEcho(t, listeners[0], checkCaps) + assertEchoHTTP(t, listeners[0].FQDN, "", peer.Dial) + assertEchoHTTP(t, listeners[0].FQDN, "/foo", peer.Dial) + assertEchoHTTP(t, listeners[0].FQDN, "/foo/bar", peer.Dial) + }, + }, + { + name: "multiple_ports", + modes: []ServiceMode{ + ServiceModeTCP{ + Port: 99, + }, + ServiceModeHTTP{ + Port: 80, + }, + }, + run: func(t *testing.T, listeners []*ServiceListener, peer *Server) { + go acceptAndEcho(t, listeners[0]) + + target := fmt.Sprintf("%s:%d", listeners[0].FQDN, 99) + conn := must.Get(peer.Dial(t.Context(), "tcp", target)) + defer conn.Close() + assertEcho(t, conn) + + go checkAndEcho(t, listeners[1], nil) + assertEchoHTTP(t, listeners[1].FQDN, "", peer.Dial) + }, + }, + } + + for _, tt := range tests { + // Overview: + // - start test control + // - start 2 tsnet nodes: + // one to act as Service host and a second to act as a peer client + // - configure necessary state on control mock + // - start a Service listener from the host + // - call tt.run with our test bed + // + // This ends up also testing the Service forwarding logic in + // LocalBackend, but that's useful too. + t.Run(tt.name, func(t *testing.T) { + ctx := t.Context() + + controlURL, control := startControl(t) + serviceHost, _, _ := startServer(t, ctx, controlURL, "service-host") + serviceClient, _, _ := startServer(t, ctx, controlURL, "service-client") + + const serviceName = tailcfg.ServiceName("svc:foo") + const serviceVIP = "100.11.22.33" + + // == Set up necessary state in our mock == + + // The Service host must have the 'service-host' capability, which + // is a mapping from the Service name to the Service VIP. + var serviceHostCaps map[tailcfg.ServiceName]views.Slice[netip.Addr] + mak.Set(&serviceHostCaps, serviceName, views.SliceOf([]netip.Addr{netip.MustParseAddr(serviceVIP)})) + j := must.Get(json.Marshal(serviceHostCaps)) + cm := serviceHost.lb.NetMap().SelfNode.CapMap().AsMap() + mak.Set(&cm, tailcfg.NodeAttrServiceHost, []tailcfg.RawMessage{tailcfg.RawMessage(j)}) + control.SetNodeCapMap(serviceHost.lb.NodeKey(), cm) + + // The Service host must be allowed to advertise the Service VIP. + control.SetSubnetRoutes(serviceHost.lb.NodeKey(), []netip.Prefix{ + netip.MustParsePrefix(serviceVIP + `/32`), + }) + + // The Service host must be a tagged node (any tag will do). + serviceHostNode := control.Node(serviceHost.lb.NodeKey()) + serviceHostNode.Tags = append(serviceHostNode.Tags, "some-tag") + control.UpdateNode(serviceHostNode) + + // The service client must accept routes advertised by other nodes + // (RouteAll is equivalent to --accept-routes). + must.Get(serviceClient.localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + RouteAllSet: true, + Prefs: ipn.Prefs{ + RouteAll: true, + }, + })) + + // Set up DNS for our Service. + control.AddDNSRecords(tailcfg.DNSRecord{ + Name: serviceName.WithoutPrefix() + "." + control.MagicDNSDomain, + Value: serviceVIP, + }) + + if tt.extraSetup != nil { + tt.extraSetup(t, control) + } + + // Force netmap updates to avoid race conditions. The nodes need to + // see our control updates before we can start the test. + must.Do(control.ForceNetmapUpdate(ctx, serviceHost.lb.NodeKey())) + must.Do(control.ForceNetmapUpdate(ctx, serviceClient.lb.NodeKey())) + netmapUpToDate := func(s *Server) bool { + nm := s.lb.NetMap() + return slices.ContainsFunc(nm.DNS.ExtraRecords, func(r tailcfg.DNSRecord) bool { + return r.Value == serviceVIP + }) + } + for !netmapUpToDate(serviceClient) { + time.Sleep(10 * time.Millisecond) + } + for !netmapUpToDate(serviceHost) { + time.Sleep(10 * time.Millisecond) + } + + // == Done setting up mock state == + + // Start the Service listeners. + listeners := make([]*ServiceListener, 0, len(tt.modes)) + for _, input := range tt.modes { + ln := must.Get(serviceHost.ListenService(serviceName.String(), input)) + defer ln.Close() + listeners = append(listeners, ln) + } + + tt.run(t, listeners, serviceClient) + }) + } +} + func TestListenerClose(t *testing.T) { tstest.Shard(t) ctx := context.Background() diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 19964c91ff8a4..447efb0c1b15d 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -110,6 +110,16 @@ type Server struct { // nodeCapMaps overrides the capability map sent down to a client. nodeCapMaps map[key.NodePublic]tailcfg.NodeCapMap + // globalAppCaps configures global app capabilities, equivalent to: + // "grants": [ + // { + // "src": ["*"], + // "dst": ["*"], + // "app": + // } + // ] + globalAppCaps tailcfg.PeerCapMap + // suppressAutoMapResponses is the set of nodes that should not be sent // automatic map responses from serveMap. (They should only get manually sent ones) suppressAutoMapResponses set.Set[key.NodePublic] @@ -289,6 +299,43 @@ func (s *Server) addDebugMessage(nodeKeyDst key.NodePublic, msg any) bool { return sendUpdate(oldUpdatesCh, updateDebugInjection) } +// ForceNetmapUpdate waits for the node to get stuck in a map poll and then +// sends the current netmap (which may result in a redundant netmap). The +// intended use case is ensuring state changes propagate before running tests. +// +// This should only be called for nodes connected as streaming clients. Calling +// this with a non-streaming node will result in non-deterministic behavior. +// +// This function cannot guarantee that the node has processed the issued update, +// so tests should confirm processing by querying the node. By example: +// +// if err := s.ForceNetmapUpdate(node.Key()); err != nil { +// // handle error +// } +// for !updatesPresent(node.NetMap()) { +// time.Sleep(10 * time.Millisecond) +// } +func (s *Server) ForceNetmapUpdate(ctx context.Context, nodeKey key.NodePublic) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := s.AwaitNodeInMapRequest(ctx, nodeKey); err != nil { + return fmt.Errorf("waiting for node to poll: %w", err) + } + mr, err := s.MapResponse(&tailcfg.MapRequest{NodeKey: nodeKey}) + if err != nil { + return fmt.Errorf("generating map response: %w", err) + } + if s.addDebugMessage(nodeKey, mr) { + return nil + } + // If we failed to send the map response, loop around and try again. + } +} + // Mark the Node key of every node as expired func (s *Server) SetExpireAllNodes(expired bool) { s.mu.Lock() @@ -531,6 +578,31 @@ func (s *Server) SetNodeCapMap(nodeKey key.NodePublic, capMap tailcfg.NodeCapMap s.updateLocked("SetNodeCapMap", s.nodeIDsLocked(0)) } +// SetGlobalAppCaps configures global app capabilities. This is equivalent to +// +// "grants": [ +// { +// "src": ["*"], +// "dst": ["*"], +// "app": +// } +// ] +func (s *Server) SetGlobalAppCaps(appCaps tailcfg.PeerCapMap) { + s.mu.Lock() + s.globalAppCaps = appCaps + s.mu.Unlock() +} + +// AddDNSRecords adds records to the server's DNS config. +func (s *Server) AddDNSRecords(records ...tailcfg.DNSRecord) { + s.mu.Lock() + defer s.mu.Unlock() + if s.DNSConfig == nil { + s.DNSConfig = new(tailcfg.DNSConfig) + } + s.DNSConfig.ExtraRecords = append(s.DNSConfig.ExtraRecords, records...) +} + // nodeIDsLocked returns the node IDs of all nodes in the server, except // for the node with the given ID. func (s *Server) nodeIDsLocked(except tailcfg.NodeID) []tailcfg.NodeID { @@ -838,6 +910,9 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. CapMap: capMap, Capabilities: slices.Collect(maps.Keys(capMap)), } + if s.MagicDNSDomain != "" { + node.Name = node.Name + "." + s.MagicDNSDomain + "." + } s.nodes[nk] = node } requireAuth := s.RequireAuth @@ -1261,9 +1336,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, dns := s.DNSConfig if dns != nil && s.MagicDNSDomain != "" { dns = dns.Clone() - dns.CertDomains = []string{ - node.Hostinfo.Hostname() + "." + s.MagicDNSDomain, - } + dns.CertDomains = append(dns.CertDomains, node.Hostinfo.Hostname()+"."+s.MagicDNSDomain) } res = &tailcfg.MapResponse{ @@ -1279,6 +1352,7 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() nodeMasqs := s.masquerades[node.Key] jailed := maps.Clone(s.peerIsJailed[node.Key]) + globalAppCaps := s.globalAppCaps s.mu.Unlock() for _, p := range s.AllNodes() { if p.StableID == node.StableID { @@ -1330,6 +1404,18 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, v6Prefix, } + if globalAppCaps != nil { + res.PacketFilter = append(res.PacketFilter, tailcfg.FilterRule{ + SrcIPs: []string{"*"}, + CapGrant: []tailcfg.CapGrant{ + { + Dsts: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, + CapMap: globalAppCaps, + }, + }, + }) + } + // If the server is tracking TKA state, and there's a single TKA head, // add it to the MapResponse. if s.tkaStorage != nil { From 7676030355387c5cc240cdccf02f3781958f7e00 Mon Sep 17 00:00:00 2001 From: Eduardo Sorribas Date: Mon, 19 Jan 2026 15:32:13 +0100 Subject: [PATCH 0879/1093] net/portmapper: Stop replacing the internal port with the upnp external port (#18349) net/portmapper: Stop replacing the internal port with the upnp external port This causes the UPnP mapping to break in the next recreation of the mapping. Fixes #18348 Signed-off-by: Eduardo Sorribas --- net/portmapper/upnp.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 34140e9473460..46d7ff70215fd 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -574,7 +574,7 @@ func (c *Client) getUPnPPortMapping( c.mu.Lock() defer c.mu.Unlock() c.mapping = upnp - c.localPort = externalAddrPort.Port() + c.localPort = internal.Port() return upnp.external, true } From 7213b35d85f006b662eabc2e770321ed93abfaa8 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Mon, 19 Jan 2026 16:06:40 +0000 Subject: [PATCH 0880/1093] k8s-operator,kube: remove enableSessionRecording from Kubernetes Cap Map (#18452) * k8s-operator,kube: removing enableSessionRecordings option. It seems like it is going to create a confusing user experience and it's going to be a very niche use case, so we have decided to defer this for now. Updates tailscale/corp#35796 Signed-off-by: chaosinthecrd * k8s-operator: adding metric for env var deprecation Signed-off-by: chaosinthecrd --------- Signed-off-by: chaosinthecrd --- k8s-operator/api-proxy/proxy.go | 14 ++++---------- kube/kubetypes/grants.go | 4 ---- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index fcd57cd17e006..f5f1da80f1a05 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -43,7 +43,9 @@ import ( var ( // counterNumRequestsproxies counts the number of API server requests proxied via this proxy. counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests_proxied") - whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) + // NOTE: adding this metric so we can keep track of users during deprecation + counterExperimentalEventsVarUsed = clientmetric.NewCounter("ts_experimental_kube_api_events_var_used") + whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) const ( @@ -133,6 +135,7 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { } if ap.eventsEnabled { + counterExperimentalEventsVarUsed.Add(1) ap.log.Warnf("DEPRECATED: %q environment variable is deprecated, and will be removed in v1.96. See documentation for more detail.", eventsEnabledVar) } @@ -315,10 +318,6 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request } } - if !c.enableRecordings { - ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) - return - } ksr.CounterSessionRecordingsAttempted.Add(1) // at this point we know that users intended for this session to be recorded wantsHeader := upgradeHeaderForProto[proto] @@ -568,7 +567,6 @@ func addImpersonationHeaders(r *http.Request, log *zap.SugaredLogger) error { type recorderConfig struct { failOpen bool enableEvents bool - enableRecordings bool recorderAddresses []netip.AddrPort } @@ -582,7 +580,6 @@ func determineRecorderConfig(who *apitype.WhoIsResponse) (c recorderConfig, _ er c.failOpen = true c.enableEvents = false - c.enableRecordings = true rules, err := tailcfg.UnmarshalCapJSON[kubetypes.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) if err != nil { return c, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) @@ -605,9 +602,6 @@ func determineRecorderConfig(who *apitype.WhoIsResponse) (c recorderConfig, _ er if rule.EnableEvents { c.enableEvents = true } - if rule.EnableSessionRecordings { - c.enableRecordings = true - } } return c, nil } diff --git a/kube/kubetypes/grants.go b/kube/kubetypes/grants.go index d293ae5792e41..50d7d760ff5a7 100644 --- a/kube/kubetypes/grants.go +++ b/kube/kubetypes/grants.go @@ -44,10 +44,6 @@ type KubernetesCapRule struct { // should be recorded or not. // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file EnableEvents bool `json:"enableEvents,omitempty"` - // EnableSessionRecordings defines whether kubectl sessions - // (e.g., exec, attach) should be recorded or not. - // https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file - EnableSessionRecordings bool `json:"enableSessionRecordings,omitempty"` } // ImpersonateRule defines how a request from the tailnet identity matching From 0a5639dcc008d60fe375a6707be1fec1ffc2ec53 Mon Sep 17 00:00:00 2001 From: Alex Valiushko Date: Mon, 19 Jan 2026 18:03:30 -0800 Subject: [PATCH 0881/1093] net/udprelay: advertise addresses from cloud metadata service (#18368) Polls IMDS (currently only AWS) for extra IPs to advertise as udprelay. Updates #17796 Change-Id: Iaaa899ef4575dc23b09a5b713ce6693f6a6a6964 Signed-off-by: Alex Valiushko --- cmd/tailscaled/depaware.txt | 2 +- net/udprelay/server.go | 28 ++++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 43165ea36c6d3..da480d1a694e3 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -423,7 +423,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ - tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock + tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock+ tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 5918863a5323f..2b6d389232832 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -43,6 +43,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/nettype" "tailscale.com/types/views" + "tailscale.com/util/cloudinfo" "tailscale.com/util/eventbus" "tailscale.com/util/set" "tailscale.com/util/usermetric" @@ -81,6 +82,7 @@ type Server struct { netChecker *netcheck.Client metrics *metrics netMon *netmon.Monitor + cloudInfo *cloudinfo.CloudInfo // used to query cloud metadata services mu sync.Mutex // guards the following fields macSecrets views.Slice[[blake2s.Size]byte] // [0] is most recent, max 2 elements @@ -336,6 +338,7 @@ func NewServer(logf logger.Logf, port uint16, onlyStaticAddrPorts bool, metrics onlyStaticAddrPorts: onlyStaticAddrPorts, serverEndpointByDisco: make(map[key.SortedPairOfDiscoPublic]*serverEndpoint), nextVNI: minVNI, + cloudInfo: cloudinfo.New(logf), } s.discoPublic = s.disco.Public() s.metrics = registerMetrics(metrics) @@ -402,11 +405,13 @@ func (s *Server) startPacketReaders() { func (s *Server) addrDiscoveryLoop() { defer s.wg.Done() - timer := time.NewTimer(0) // fire immediately defer timer.Stop() getAddrPorts := func() ([]netip.AddrPort, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var addrPorts set.Set[netip.AddrPort] addrPorts.Make() @@ -425,6 +430,21 @@ func (s *Server) addrDiscoveryLoop() { } } + // Get cloud metadata service addresses. + // TODO(illotum) Same is done within magicsock, consider caching within cloudInfo + cloudIPs, err := s.cloudInfo.GetPublicIPs(ctx) + if err == nil { // Not handling the err, GetPublicIPs already printed to log. + for _, ip := range cloudIPs { + if ip.IsValid() { + if ip.Is4() { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc4Port)) + } else { + addrPorts.Add(netip.AddrPortFrom(ip, s.uc6Port)) + } + } + } + } + dm := s.getDERPMap() if dm == nil { // We don't have a DERPMap which is required to dynamically @@ -434,9 +454,7 @@ func (s *Server) addrDiscoveryLoop() { } // get addrPorts as visible from DERP - netCheckerCtx, netCheckerCancel := context.WithTimeout(context.Background(), netcheck.ReportTimeout) - defer netCheckerCancel() - rep, err := s.netChecker.GetReport(netCheckerCtx, dm, &netcheck.GetReportOpts{ + rep, err := s.netChecker.GetReport(ctx, dm, &netcheck.GetReportOpts{ OnlySTUN: true, }) if err != nil { @@ -474,6 +492,8 @@ func (s *Server) addrDiscoveryLoop() { // Mirror magicsock behavior for duration between STUN. We consider // 30s a min bound for NAT timeout. timer.Reset(tstime.RandomDurationBetween(20*time.Second, 26*time.Second)) + // TODO(illotum) Pass in context bound to the [s.closeCh] lifetime, + // and do not block on getAddrPorts IO. addrPorts, err := getAddrPorts() if err != nil { s.logf("error discovering IP:port candidates: %v", err) From e30626c480535b77a5dc332cc5af2454ac8a5d77 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 20 Jan 2026 15:05:03 -0500 Subject: [PATCH 0882/1093] version: add support for reporting the mac variant from tailscale --version (#18462) fixes tailscale/corp#27182 tailscale version --json now includes an osVariant field that will report one of macsys, appstore or darwin. We can extend this to other platforms where tailscaled can have multiple personalities. This also adds the concept of a platform-specific callback for querying an explicit application identifier. On Apple, we can use CFBundleGetIdentifier(mainBundle) to get the bundle identifier via cgo. This removes all the ambiguity and lets us remove other less direct methods (like env vars, locations, etc). Signed-off-by: Jonathan Nobels --- version/prop.go | 81 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 74 insertions(+), 7 deletions(-) diff --git a/version/prop.go b/version/prop.go index 0d6a5c00df375..795f3a9127be0 100644 --- a/version/prop.go +++ b/version/prop.go @@ -15,6 +15,22 @@ import ( "tailscale.com/types/lazy" ) +// AppIdentifierFn, if non-nil, is a callback function that returns the +// application identifier of the running process or an empty string if unknown. +// +// tailscale(d) implementations can set an explicit callback to return an identifier +// for the running process if such a concept exists. The Apple bundle identifier, for example. +var AppIdentifierFn func() string // or nil + +const ( + macsysBundleID = "io.tailscale.ipn.macsys" // The macsys gui app and CLI + appStoreBundleID = "io.tailscale.ipn.macos" // The App Store gui app and CLI + macsysExtBundleId = "io.tailscale.ipn.macsys.network-extension" // The macsys system extension + appStoreExtBundleId = "io.tailscale.ipn.macos.network-extension" // The App Store network extension + tvOSExtBundleId = "io.tailscale.ipn.ios.network-extension-tvos" // The tvOS network extension + iOSExtBundleId = "io.tailscale.ipn.ios.network-extension" // The iOS network extension +) + // IsMobile reports whether this is a mobile client build. func IsMobile() bool { return runtime.GOOS == "android" || runtime.GOOS == "ios" @@ -52,8 +68,8 @@ func IsMacGUIVariant() bool { // IsSandboxedMacOS reports whether this process is a sandboxed macOS // process (either the app or the extension). It is true for the Mac App Store -// and macsys (System Extension) version on macOS, and false for -// tailscaled-on-macOS. +// and macsys (only its System Extension) variants on macOS, and false for +// tailscaled and the macsys GUI process on macOS. func IsSandboxedMacOS() bool { return IsMacAppStore() || IsMacSysExt() } @@ -73,10 +89,15 @@ func IsMacSysGUI() bool { if runtime.GOOS != "darwin" { return false } - return isMacSysApp.Get(func() bool { + if AppIdentifierFn != nil { + return AppIdentifierFn() == macsysBundleID + } + + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. return strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macsys/") || - strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macsys") + strings.Contains(os.Getenv("XPC_SERVICE_NAME"), macsysBundleID) }) } @@ -90,11 +111,17 @@ func IsMacSysExt() bool { return false } return isMacSysExt.Get(func() bool { + if AppIdentifierFn != nil { + return AppIdentifierFn() == macsysExtBundleId + } + + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. exe, err := os.Executable() if err != nil { return false } - return filepath.Base(exe) == "io.tailscale.ipn.macsys.network-extension" + return filepath.Base(exe) == macsysExtBundleId }) } @@ -107,11 +134,17 @@ func IsMacAppStore() bool { return false } return isMacAppStore.Get(func() bool { + if AppIdentifierFn != nil { + id := AppIdentifierFn() + return id == appStoreBundleID || id == appStoreExtBundleId + } + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. // Both macsys and app store versions can run CLI executable with // suffix /Contents/MacOS/Tailscale. Check $HOME to filter out running // as macsys. return strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macos/") || - strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macos") + strings.Contains(os.Getenv("XPC_SERVICE_NAME"), appStoreBundleID) }) } @@ -124,6 +157,11 @@ func IsMacAppStoreGUI() bool { return false } return isMacAppStoreGUI.Get(func() bool { + if AppIdentifierFn != nil { + return AppIdentifierFn() == appStoreBundleID + } + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. exe, err := os.Executable() if err != nil { return false @@ -143,7 +181,13 @@ func IsAppleTV() bool { return false } return isAppleTV.Get(func() bool { - return strings.EqualFold(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.ios.network-extension-tvos") + if AppIdentifierFn != nil { + return AppIdentifierFn() == tvOSExtBundleId + } + + // TODO (barnstar): This check should be redundant once all relevant callers + // use AppIdentifierFn. + return strings.EqualFold(os.Getenv("XPC_SERVICE_NAME"), tvOSExtBundleId) }) } @@ -187,6 +231,23 @@ func IsUnstableBuild() bool { }) } +// osVariant returns the OS variant string for systems where we support +// multiple ways of running tailscale(d), if any. +// +// For example: "appstore", "macsys", "darwin". +func osVariant() string { + if IsMacAppStore() { + return "appstore" + } + if IsMacSys() { + return "macsys" + } + if runtime.GOOS == "darwin" { + return "darwin" + } + return "" +} + var isDev = sync.OnceValue(func() bool { return strings.Contains(Short(), "-dev") }) @@ -227,6 +288,11 @@ type Meta struct { // setting "vcs.modified" was true). GitDirty bool `json:"gitDirty,omitempty"` + // OSVariant is specific variant of the binary, if applicable. For example, + // macsys/appstore/darwin for macOS builds. Nil/empty where not supported + // or on oses without variants. + OSVariant string `json:"osVariant,omitempty"` + // ExtraGitCommit, if non-empty, is the git commit of a "supplemental" // repository at which Tailscale was built. Its format is the same as // gitCommit. @@ -264,6 +330,7 @@ func GetMeta() Meta { GitCommitTime: getEmbeddedInfo().commitTime, GitCommit: gitCommit(), GitDirty: gitDirty(), + OSVariant: osVariant(), ExtraGitCommit: extraGitCommitStamp, IsDev: isDev(), UnstableBranch: IsUnstableBuild(), From 2cb86cf65eb8908d34f93a7587807cde6ecf979c Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 21 Jan 2026 12:35:44 +0000 Subject: [PATCH 0883/1093] cmd/k8s-operator,k8s-operator: Allow the use of multiple tailnets (#18344) This commit contains the implementation of multi-tailnet support within the Kubernetes Operator Each of our custom resources now expose the `spec.tailnet` field. This field is a string that must match the name of an existing `Tailnet` resource. A `Tailnet` resource looks like this: ```yaml apiVersion: tailscale.com/v1alpha1 kind: Tailnet metadata: name: example # This is the name that must be referenced by other resources spec: credentials: secretName: example-oauth ``` Each `Tailnet` references a `Secret` resource that contains a set of oauth credentials. This secret must be created in the same namespace as the operator: ```yaml apiVersion: v1 kind: Secret metadata: name: example-oauth # This is the name that's referenced by the Tailnet resource. namespace: tailscale stringData: client_id: "client-id" client_secret: "client-secret" ``` When created, the operator performs a basic check that the oauth client has access to all required scopes. This is done using read actions on devices, keys & services. While this doesn't capture a missing "write" permission, it catches completely missing permissions. Once this check passes, the `Tailnet` moves into a ready state and can be referenced. Attempting to use a `Tailnet` in a non-ready state will stall the deployment of `Connector`s, `ProxyGroup`s and `Recorder`s until the `Tailnet` becomes ready. The `spec.tailnet` field informs the operator that a `Connector`, `ProxyGroup`, or `Recorder` must be given an auth key generated using the specified oauth client. For backwards compatibility, the set of credentials the operator is configured with are considered the default. That is, where `spec.tailnet` is not set, the resource will be deployed in the same tailnet as the operator. Updates https://github.com/tailscale/corp/issues/34561 --- cmd/k8s-operator/connector.go | 4 +- cmd/k8s-operator/depaware.txt | 6 +- .../deploy/chart/templates/.gitignore | 1 + .../deploy/chart/templates/operator-rbac.yaml | 3 + .../deploy/crds/tailscale.com_connectors.yaml | 8 + .../crds/tailscale.com_proxygroups.yaml | 8 + .../deploy/crds/tailscale.com_recorders.yaml | 8 + .../deploy/crds/tailscale.com_tailnets.yaml | 141 ++++++ .../deploy/manifests/operator.yaml | 176 ++++++++ cmd/k8s-operator/generate/main.go | 3 + cmd/k8s-operator/ingress.go | 2 +- cmd/k8s-operator/operator.go | 12 + cmd/k8s-operator/proxygroup.go | 68 ++- cmd/k8s-operator/sts.go | 37 +- cmd/k8s-operator/svc.go | 3 +- cmd/k8s-operator/tailnet.go | 58 +++ cmd/k8s-operator/tsrecorder.go | 74 ++-- k8s-operator/api.md | 96 ++++ k8s-operator/apis/v1alpha1/register.go | 2 + k8s-operator/apis/v1alpha1/types_connector.go | 6 + .../apis/v1alpha1/types_proxygroup.go | 6 + k8s-operator/apis/v1alpha1/types_recorder.go | 6 + k8s-operator/apis/v1alpha1/types_tailnet.go | 69 +++ .../apis/v1alpha1/zz_generated.deepcopy.go | 112 +++++ k8s-operator/conditions.go | 20 + k8s-operator/reconciler/reconciler.go | 39 ++ k8s-operator/reconciler/reconciler_test.go | 42 ++ k8s-operator/reconciler/tailnet/mocks_test.go | 45 ++ k8s-operator/reconciler/tailnet/tailnet.go | 327 ++++++++++++++ .../reconciler/tailnet/tailnet_test.go | 411 ++++++++++++++++++ kube/kubetypes/types.go | 1 + 31 files changed, 1730 insertions(+), 64 deletions(-) create mode 100644 cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml create mode 100644 cmd/k8s-operator/tailnet.go create mode 100644 k8s-operator/apis/v1alpha1/types_tailnet.go create mode 100644 k8s-operator/reconciler/reconciler.go create mode 100644 k8s-operator/reconciler/reconciler_test.go create mode 100644 k8s-operator/reconciler/tailnet/mocks_test.go create mode 100644 k8s-operator/reconciler/tailnet/tailnet.go create mode 100644 k8s-operator/reconciler/tailnet/tailnet_test.go diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index 7fa311532238b..f4d518faa3aad 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -25,6 +25,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -207,6 +208,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge ProxyClassName: proxyClass, proxyType: proxyTypeConnector, LoginServer: a.ssr.loginServer, + Tailnet: cn.Spec.Tailnet, } if cn.Spec.SubnetRouter != nil && len(cn.Spec.SubnetRouter.AdvertiseRoutes) > 0 { @@ -276,7 +278,7 @@ func (a *ConnectorReconciler) maybeProvisionConnector(ctx context.Context, logge } func (a *ConnectorReconciler) maybeCleanupConnector(ctx context.Context, logger *zap.SugaredLogger, cn *tsapi.Connector) (bool, error) { - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil { + if done, err := a.ssr.Cleanup(ctx, cn.Spec.Tailnet, logger, childResourceLabels(cn.Name, a.tsnamespace, "connector"), proxyTypeConnector); err != nil { return false, fmt.Errorf("failed to cleanup Connector resources: %w", err) } else if !done { logger.Debugf("Connector cleanup not done yet, waiting for next reconcile") diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d6993465304fd..6a6e7d61f9aa3 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -725,7 +725,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/utils/net from k8s.io/apimachinery/pkg/util/net+ k8s.io/utils/ptr from k8s.io/client-go/tools/cache+ k8s.io/utils/trace from k8s.io/client-go/tools/cache - sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator + sigs.k8s.io/controller-runtime/pkg/builder from tailscale.com/cmd/k8s-operator+ sigs.k8s.io/controller-runtime/pkg/cache from sigs.k8s.io/controller-runtime/pkg/cluster+ sigs.k8s.io/controller-runtime/pkg/cache/internal from sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/certwatcher from sigs.k8s.io/controller-runtime/pkg/metrics/server+ @@ -821,10 +821,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator + tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator+ tailscale.com/k8s-operator/api-proxy from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1 tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+ + tailscale.com/k8s-operator/reconciler from tailscale.com/k8s-operator/reconciler/tailnet + tailscale.com/k8s-operator/reconciler/tailnet from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/sessionrecording from tailscale.com/k8s-operator/api-proxy tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording tailscale.com/k8s-operator/sessionrecording/tsrecorder from tailscale.com/k8s-operator/sessionrecording+ diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore index ae7c682d9fd15..f480bb57d5f18 100644 --- a/cmd/k8s-operator/deploy/chart/templates/.gitignore +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -8,3 +8,4 @@ /proxyclass.yaml /proxygroup.yaml /recorder.yaml +/tailnet.yaml diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 5eb920a6f41c4..930eef852c9ce 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -37,6 +37,9 @@ rules: - apiGroups: ["tailscale.com"] resources: ["dnsconfigs", "dnsconfigs/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["tailnets", "tailnets/status"] + verbs: ["get", "list", "watch", "update"] - apiGroups: ["tailscale.com"] resources: ["recorders", "recorders/status"] verbs: ["get", "list", "watch", "update"] diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml index 74d32d53d2199..03c51c7553bf9 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_connectors.yaml @@ -181,6 +181,14 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + tailnet: + description: |- + Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Connector tailnet is immutable x-kubernetes-validations: - rule: has(self.subnetRouter) || (has(self.exitNode) && self.exitNode == true) || has(self.appConnector) message: A Connector needs to have at least one of exit node, subnet router or app connector configured. diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml index 98ca1c378ab8d..0254f01b8f0bf 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygroups.yaml @@ -139,6 +139,14 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + tailnet: + description: |- + Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: ProxyGroup tailnet is immutable type: description: |- Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 3d80c55e10a73..28d2be78e509c 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -1680,6 +1680,14 @@ spec: items: type: string pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ + tailnet: + description: |- + Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: Recorder tailnet is immutable x-kubernetes-validations: - rule: '!(self.replicas > 1 && (!has(self.storage) || !has(self.storage.s3)))' message: S3 storage must be used when deploying multiple Recorder replicas diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml new file mode 100644 index 0000000000000..200d839431573 --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_tailnets.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: tailnets.tailscale.com +spec: + group: tailscale.com + names: + kind: Tailnet + listKind: TailnetList + plural: tailnets + shortNames: + - tn + singular: tailnet + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Status of the deployed Tailnet resources. + jsonPath: .status.conditions[?(@.type == "TailnetReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the Tailnet. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + type: object + required: + - credentials + properties: + credentials: + description: Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. + type: object + required: + - secretName + properties: + secretName: + description: |- + The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and + "client_secret". + type: string + loginUrl: + description: URL of the control plane to be used by all resources managed by the operator using this Tailnet. + type: string + status: + description: |- + Status describes the status of the Tailnet. This is set + and managed by the Tailscale operator. + type: object + properties: + conditions: + type: array + items: + description: Condition contains details for one aspect of the current state of this API Resource. + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index c53f5049261e8..5a64f2c7db307 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -206,6 +206,14 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: string type: array + tailnet: + description: |- + Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - message: Connector tailnet is immutable + rule: self == oldSelf type: object x-kubernetes-validations: - message: A Connector needs to have at least one of exit node, subnet router or app connector configured. @@ -3135,6 +3143,14 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: string type: array + tailnet: + description: |- + Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - message: ProxyGroup tailnet is immutable + rule: self == oldSelf type: description: |- Type of the ProxyGroup proxies. Supported types are egress, ingress, and kube-apiserver. @@ -4950,6 +4966,14 @@ spec: pattern: ^tag:[a-zA-Z][a-zA-Z0-9-]*$ type: string type: array + tailnet: + description: |- + Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this + name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + type: string + x-kubernetes-validations: + - message: Recorder tailnet is immutable + rule: self == oldSelf type: object x-kubernetes-validations: - message: S3 storage must be used when deploying multiple Recorder replicas @@ -5059,6 +5083,148 @@ spec: subresources: status: {} --- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: tailnets.tailscale.com +spec: + group: tailscale.com + names: + kind: Tailnet + listKind: TailnetList + plural: tailnets + shortNames: + - tn + singular: tailnet + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Status of the deployed Tailnet resources. + jsonPath: .status.conditions[?(@.type == "TailnetReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the Tailnet. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + credentials: + description: Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. + properties: + secretName: + description: |- + The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and + "client_secret". + type: string + required: + - secretName + type: object + loginUrl: + description: URL of the control plane to be used by all resources managed by the operator using this Tailnet. + type: string + required: + - credentials + type: object + status: + description: |- + Status describes the status of the Tailnet. This is set + and managed by the Tailscale operator. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -5141,6 +5307,16 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - tailnets + - tailnets/status + verbs: + - get + - list + - watch + - update - apiGroups: - tailscale.com resources: diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 08bdc350d500c..ca54e90909954 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -26,12 +26,14 @@ const ( dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" + tailnetCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_tailnets.yaml" helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" + tailnetCRDHelmTemplatePath = helmTemplatesPath + "/tailnet.yaml" helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalEnd = "{{- end -}}" @@ -154,6 +156,7 @@ func generate(baseDir string) error { {dnsConfigCRDPath, dnsConfigCRDHelmTemplatePath}, {recorderCRDPath, recorderCRDHelmTemplatePath}, {proxyGroupCRDPath, proxyGroupCRDHelmTemplatePath}, + {tailnetCRDPath, tailnetCRDHelmTemplatePath}, } { if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil { return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err) diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 050b03f55970f..9ef173ecef746 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -102,7 +102,7 @@ func (a *IngressReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare return nil } - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil { + if done, err := a.ssr.Cleanup(ctx, operatorTailnet, logger, childResourceLabels(ing.Name, ing.Namespace, "ingress"), proxyTypeIngressResource); err != nil { return fmt.Errorf("failed to cleanup: %w", err) } else if !done { logger.Debugf("cleanup not done yet, waiting for next reconcile") diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index b50be8ce7ba66..7bb8b95f0855f 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -54,6 +54,7 @@ import ( "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/tailnet" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" "tailscale.com/tstime" @@ -325,6 +326,17 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not create manager: %v", err) } + tailnetOptions := tailnet.ReconcilerOptions{ + Client: mgr.GetClient(), + TailscaleNamespace: opts.tailscaleNamespace, + Clock: tstime.DefaultClock{}, + Logger: opts.log, + } + + if err = tailnet.NewReconciler(tailnetOptions).Register(mgr); err != nil { + startlog.Fatalf("could not register tailnet reconciler: %v", err) + } + svcFilter := handler.EnqueueRequestsFromMapFunc(serviceHandler) svcChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("svc")) // If a ProxyClass changes, enqueue all Services labeled with that diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 946e017a26f00..3a50ed8fb4c2b 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -49,11 +49,12 @@ import ( ) const ( - reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" - reasonProxyGroupReady = "ProxyGroupReady" - reasonProxyGroupAvailable = "ProxyGroupAvailable" - reasonProxyGroupCreating = "ProxyGroupCreating" - reasonProxyGroupInvalid = "ProxyGroupInvalid" + reasonProxyGroupCreationFailed = "ProxyGroupCreationFailed" + reasonProxyGroupReady = "ProxyGroupReady" + reasonProxyGroupAvailable = "ProxyGroupAvailable" + reasonProxyGroupCreating = "ProxyGroupCreating" + reasonProxyGroupInvalid = "ProxyGroupInvalid" + reasonProxyGroupTailnetUnavailable = "ProxyGroupTailnetUnavailable" // Copied from k8s.io/apiserver/pkg/registry/generic/registry/store.go@cccad306d649184bf2a0e319ba830c53f65c445c optimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again" @@ -117,6 +118,23 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } else if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com ProxyGroup: %w", err) } + + tailscaleClient := r.tsClient + if pg.Spec.Tailnet != "" { + tc, err := clientForTailnet(ctx, r.Client, r.tsNamespace, pg.Spec.Tailnet) + if err != nil { + oldPGStatus := pg.Status.DeepCopy() + nrr := ¬ReadyReason{ + reason: reasonProxyGroupTailnetUnavailable, + message: err.Error(), + } + + return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, make(map[string][]netip.AddrPort))) + } + + tailscaleClient = tc + } + if markedForDeletion(pg) { logger.Debugf("ProxyGroup is being deleted, cleaning up resources") ix := xslices.Index(pg.Finalizers, FinalizerName) @@ -125,7 +143,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return reconcile.Result{}, nil } - if done, err := r.maybeCleanup(ctx, pg); err != nil { + if done, err := r.maybeCleanup(ctx, tailscaleClient, pg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return reconcile.Result{}, nil @@ -144,7 +162,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldPGStatus := pg.Status.DeepCopy() - staticEndpoints, nrr, err := r.reconcilePG(ctx, pg, logger) + staticEndpoints, nrr, err := r.reconcilePG(ctx, tailscaleClient, pg, logger) return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, staticEndpoints)) } @@ -152,7 +170,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // for deletion. It is separated out from Reconcile to make a clear separation // between reconciling the ProxyGroup, and posting the status of its created // resources onto the ProxyGroup status field. -func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { if !slices.Contains(pg.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -193,7 +211,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, pg *tsapi.ProxyG return notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) } - staticEndpoints, nrr, err := r.maybeProvision(ctx, pg, proxyClass) + staticEndpoints, nrr, err := r.maybeProvision(ctx, tailscaleClient, pg, proxyClass) if err != nil { return nil, nrr, err } @@ -279,7 +297,7 @@ func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGrou return errors.Join(errs...) } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureAddedToGaugeForProxyGroup(pg) @@ -302,7 +320,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro } } - staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, pg, proxyClass, svcToNodePorts) + staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tailscaleClient, pg, proxyClass, svcToNodePorts) if err != nil { var selectorErr *FindStaticEndpointErr if errors.As(err, &selectorErr) { @@ -414,7 +432,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, pg *tsapi.Pro return r.notReadyErrf(pg, logger, "error reconciling metrics resources: %w", err) } - if err := r.cleanupDanglingResources(ctx, pg, proxyClass); err != nil { + if err := r.cleanupDanglingResources(ctx, tailscaleClient, pg, proxyClass); err != nil { return r.notReadyErrf(pg, logger, "error cleaning up dangling resources: %w", err) } @@ -611,7 +629,7 @@ func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, // cleanupDanglingResources ensures we don't leak config secrets, state secrets, and // tailnet devices when the number of replicas specified is reduced. -func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { +func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { logger := r.logger(pg.Name) metadata, err := r.getNodeMetadata(ctx, pg) if err != nil { @@ -625,7 +643,7 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg // Dangling resource, delete the config + state Secrets, as well as // deleting the device from the tailnet. - if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + if err := r.deleteTailnetDevice(ctx, tailscaleClient, m.tsID, logger); err != nil { return err } if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) { @@ -668,7 +686,7 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, pg // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a ProxyGroup will get cleaned up via owner references // (which we can use because they are all in the same namespace). -func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.ProxyGroup) (bool, error) { +func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup) (bool, error) { logger := r.logger(pg.Name) metadata, err := r.getNodeMetadata(ctx, pg) @@ -677,7 +695,7 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy } for _, m := range metadata { - if err := r.deleteTailnetDevice(ctx, m.tsID, logger); err != nil { + if err := r.deleteTailnetDevice(ctx, tailscaleClient, m.tsID, logger); err != nil { return false, err } } @@ -698,9 +716,9 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, pg *tsapi.Proxy return true, nil } -func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { +func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, tailscaleClient tsClient, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { logger.Debugf("deleting device %s from control", string(id)) - if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { + if err := tailscaleClient.DeleteDevice(ctx, string(id)); err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) @@ -714,7 +732,13 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, id tailc return nil } -func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16) (endpoints map[string][]netip.AddrPort, err error) { +func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( + ctx context.Context, + tailscaleClient tsClient, + pg *tsapi.ProxyGroup, + proxyClass *tsapi.ProxyClass, + svcToNodePorts map[string]uint16, +) (endpoints map[string][]netip.AddrPort, err error) { logger := r.logger(pg.Name) endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) // keyed by Service name. for i := range pgReplicas(pg) { @@ -728,7 +752,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } var existingCfgSecret *corev1.Secret // unmodified copy of secret - if err := r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { + if err = r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { @@ -742,7 +766,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if len(tags) == 0 { tags = r.defaultTags } - key, err := newAuthKey(ctx, r.tsClient, tags) + key, err := newAuthKey(ctx, tailscaleClient, tags) if err != nil { return nil, err } @@ -757,7 +781,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p Namespace: r.tsNamespace, }, } - if err := r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { + if err = r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { return nil, err } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 2b6d1290e53f8..2919e535c0dca 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -107,6 +107,7 @@ const ( letsEncryptStagingEndpoint = "https://acme-staging-v02.api.letsencrypt.org/directory" mainContainerName = "tailscale" + operatorTailnet = "" ) var ( @@ -152,6 +153,9 @@ type tailscaleSTSConfig struct { // HostnamePrefix specifies the desired prefix for the device's hostname. The hostname will be suffixed with the // ordinal number generated by the StatefulSet. HostnamePrefix string + + // Tailnet specifies the Tailnet resource to use for producing auth keys. + Tailnet string } type connector struct { @@ -194,6 +198,16 @@ func IsHTTPSEnabledOnTailnet(tsnetServer tsnetServer) bool { // Provision ensures that the StatefulSet for the given service is running and // up to date. func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { + tailscaleClient := a.tsClient + if sts.Tailnet != "" { + tc, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, sts.Tailnet) + if err != nil { + return nil, err + } + + tailscaleClient = tc + } + // Do full reconcile. // TODO (don't create Service for the Connector) hsvc, err := a.reconcileHeadlessService(ctx, logger, sts) @@ -213,7 +227,7 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretNames, err := a.provisionSecrets(ctx, logger, sts, hsvc) + secretNames, err := a.provisionSecrets(ctx, tailscaleClient, logger, sts, hsvc) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } @@ -237,7 +251,18 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga // Cleanup removes all resources associated that were created by Provision with // the given labels. It returns true when all resources have been removed, // otherwise it returns false and the caller should retry later. -func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { +func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { + tailscaleClient := a.tsClient + if tailnet != "" { + tc, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnet) + if err != nil { + logger.Errorf("failed to get tailscale client: %v", err) + return false, nil + } + + tailscaleClient = tc + } + // Need to delete the StatefulSet first, and delete it with foreground // cascading deletion. That way, the pod that's writing to the Secret will // stop running before we start looking at the Secret's contents, and @@ -279,7 +304,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, logger *zap.Sugare for _, dev := range devices { if dev.id != "" { logger.Debugf("deleting device %s from control", string(dev.id)) - if err = a.tsClient.DeleteDevice(ctx, string(dev.id)); err != nil { + if err = tailscaleClient.DeleteDevice(ctx, string(dev.id)); err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) @@ -360,7 +385,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) ([]string, error) { +func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscaleClient tsClient, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) ([]string, error) { secretNames := make([]string, stsC.Replicas) // Start by ensuring we have Secrets for the desired number of replicas. This will handle both creating and scaling @@ -403,7 +428,7 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z if len(tags) == 0 { tags = a.defaultTags } - authKey, err = newAuthKey(ctx, a.tsClient, tags) + authKey, err = newAuthKey(ctx, tailscaleClient, tags) if err != nil { return nil, err } @@ -477,7 +502,7 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z if dev != nil && dev.id != "" { var errResp *tailscale.ErrResponse - err = a.tsClient.DeleteDevice(ctx, string(dev.id)) + err = tailscaleClient.DeleteDevice(ctx, string(dev.id)) switch { case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: // This device has possibly already been deleted in the admin console. So we can ignore this diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 5c163e081f5a6..ec7bb8080dec7 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -167,7 +168,7 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare proxyTyp = proxyTypeIngressService } - if done, err := a.ssr.Cleanup(ctx, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil { + if done, err := a.ssr.Cleanup(ctx, operatorTailnet, logger, childResourceLabels(svc.Name, svc.Namespace, "svc"), proxyTyp); err != nil { return fmt.Errorf("failed to cleanup: %w", err) } else if !done { logger.Debugf("cleanup not done yet, waiting for next reconcile") diff --git a/cmd/k8s-operator/tailnet.go b/cmd/k8s-operator/tailnet.go new file mode 100644 index 0000000000000..8d749545faa46 --- /dev/null +++ b/cmd/k8s-operator/tailnet.go @@ -0,0 +1,58 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "fmt" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + operatorutils "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" +) + +func clientForTailnet(ctx context.Context, cl client.Client, namespace, name string) (tsClient, error) { + var tn tsapi.Tailnet + if err := cl.Get(ctx, client.ObjectKey{Name: name}, &tn); err != nil { + return nil, fmt.Errorf("failed to get tailnet %q: %w", name, err) + } + + if !operatorutils.TailnetIsReady(&tn) { + return nil, fmt.Errorf("tailnet %q is not ready", name) + } + + var secret corev1.Secret + if err := cl.Get(ctx, client.ObjectKey{Name: tn.Spec.Credentials.SecretName, Namespace: namespace}, &secret); err != nil { + return nil, fmt.Errorf("failed to get Secret %q in namespace %q: %w", tn.Spec.Credentials.SecretName, namespace, err) + } + + baseURL := ipn.DefaultControlURL + if tn.Spec.LoginURL != "" { + baseURL = tn.Spec.LoginURL + } + + credentials := clientcredentials.Config{ + ClientID: string(secret.Data["client_id"]), + ClientSecret: string(secret.Data["client_secret"]), + TokenURL: baseURL + "/api/v2/oauth/token", + } + + source := credentials.TokenSource(ctx) + httpClient := oauth2.NewClient(ctx, source) + + ts := tailscale.NewClient(defaultTailnet, nil) + ts.UserAgent = "tailscale-k8s-operator" + ts.HTTPClient = httpClient + ts.BaseURL = baseURL + + return ts, nil +} diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index bfb01fa86de67..3e8608bc8db8e 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -42,10 +42,11 @@ import ( ) const ( - reasonRecorderCreationFailed = "RecorderCreationFailed" - reasonRecorderCreating = "RecorderCreating" - reasonRecorderCreated = "RecorderCreated" - reasonRecorderInvalid = "RecorderInvalid" + reasonRecorderCreationFailed = "RecorderCreationFailed" + reasonRecorderCreating = "RecorderCreating" + reasonRecorderCreated = "RecorderCreated" + reasonRecorderInvalid = "RecorderInvalid" + reasonRecorderTailnetUnavailable = "RecorderTailnetUnavailable" currentProfileKey = "_current-profile" ) @@ -84,6 +85,30 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques } else if err != nil { return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com Recorder: %w", err) } + + oldTSRStatus := tsr.Status.DeepCopy() + setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { + tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger) + if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { + // An error encountered here should get returned by the Reconcile function. + if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { + return reconcile.Result{}, errors.Join(err, updateErr) + } + } + + return reconcile.Result{}, nil + } + + tailscaleClient := r.tsClient + if tsr.Spec.Tailnet != "" { + tc, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tsr.Spec.Tailnet) + if err != nil { + return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderTailnetUnavailable, err.Error()) + } + + tailscaleClient = tc + } + if markedForDeletion(tsr) { logger.Debugf("Recorder is being deleted, cleaning up resources") ix := xslices.Index(tsr.Finalizers, FinalizerName) @@ -92,7 +117,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return reconcile.Result{}, nil } - if done, err := r.maybeCleanup(ctx, tsr); err != nil { + if done, err := r.maybeCleanup(ctx, tsr, tailscaleClient); err != nil { return reconcile.Result{}, err } else if !done { logger.Debugf("Recorder resource cleanup not yet finished, will retry...") @@ -106,19 +131,6 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return reconcile.Result{}, nil } - oldTSRStatus := tsr.Status.DeepCopy() - setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) { - tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger) - if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) { - // An error encountered here should get returned by the Reconcile function. - if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil { - return reconcile.Result{}, errors.Join(err, updateErr) - } - } - - return reconcile.Result{}, nil - } - if !slices.Contains(tsr.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -137,7 +149,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) } - if err = r.maybeProvision(ctx, tsr); err != nil { + if err = r.maybeProvision(ctx, tailscaleClient, tsr); err != nil { reason := reasonRecorderCreationFailed message := fmt.Sprintf("failed creating Recorder: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -155,7 +167,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated) } -func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { logger := r.logger(tsr.Name) r.mu.Lock() @@ -163,7 +175,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco gaugeRecorderResources.Set(int64(r.recorders.Len())) r.mu.Unlock() - if err := r.ensureAuthSecretsCreated(ctx, tsr); err != nil { + if err := r.ensureAuthSecretsCreated(ctx, tailscaleClient, tsr); err != nil { return fmt.Errorf("error creating secrets: %w", err) } @@ -241,13 +253,13 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco // If we have scaled the recorder down, we will have dangling state secrets // that we need to clean up. - if err = r.maybeCleanupSecrets(ctx, tsr); err != nil { + if err = r.maybeCleanupSecrets(ctx, tailscaleClient, tsr); err != nil { return fmt.Errorf("error cleaning up Secrets: %w", err) } var devices []tsapi.RecorderTailnetDevice for replica := range replicas { - dev, ok, err := r.getDeviceInfo(ctx, tsr.Name, replica) + dev, ok, err := r.getDeviceInfo(ctx, tailscaleClient, tsr.Name, replica) switch { case err != nil: return fmt.Errorf("failed to get device info: %w", err) @@ -312,7 +324,7 @@ func (r *RecorderReconciler) maybeCleanupServiceAccounts(ctx context.Context, ts return nil } -func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { options := []client.ListOption{ client.InNamespace(r.tsNamespace), client.MatchingLabels(tsrLabels("recorder", tsr.Name, nil)), @@ -354,7 +366,7 @@ func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsr *tsapi var errResp *tailscale.ErrResponse r.log.Debugf("deleting device %s", devicePrefs.Config.NodeID) - err = r.tsClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID)) + err = tailscaleClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID)) switch { case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: // This device has possibly already been deleted in the admin console. So we can ignore this @@ -375,7 +387,7 @@ func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tsr *tsapi // maybeCleanup just deletes the device from the tailnet. All the kubernetes // resources linked to a Recorder will get cleaned up via owner references // (which we can use because they are all in the same namespace). -func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { +func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder, tailscaleClient tsClient) (bool, error) { logger := r.logger(tsr.Name) var replicas int32 = 1 @@ -399,7 +411,7 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record nodeID := string(devicePrefs.Config.NodeID) logger.Debugf("deleting device %s from control", nodeID) - if err = r.tsClient.DeleteDevice(ctx, nodeID); err != nil { + if err = tailscaleClient.DeleteDevice(ctx, nodeID); err != nil { errResp := &tailscale.ErrResponse{} if errors.As(err, errResp) && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID) @@ -425,7 +437,7 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record return true, nil } -func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tsr *tsapi.Recorder) error { +func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { var replicas int32 = 1 if tsr.Spec.Replicas != nil { replicas = *tsr.Spec.Replicas @@ -453,7 +465,7 @@ func (r *RecorderReconciler) ensureAuthSecretsCreated(ctx context.Context, tsr * return fmt.Errorf("failed to get Secret %q: %w", key.Name, err) } - authKey, err := newAuthKey(ctx, r.tsClient, tags.Stringify()) + authKey, err := newAuthKey(ctx, tailscaleClient, tags.Stringify()) if err != nil { return err } @@ -555,7 +567,7 @@ func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) { return prefs, ok, nil } -func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { +func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tailscaleClient tsClient, tsrName string, replica int32) (d tsapi.RecorderTailnetDevice, ok bool, err error) { secret, err := r.getStateSecret(ctx, tsrName, replica) if err != nil || secret == nil { return tsapi.RecorderTailnetDevice{}, false, err @@ -569,7 +581,7 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string, // TODO(tomhjp): The profile info doesn't include addresses, which is why we // need the API. Should maybe update tsrecorder to write IPs to the state // Secret like containerboot does. - device, err := r.tsClient.Device(ctx, string(prefs.Config.NodeID), nil) + device, err := tailscaleClient.Device(ctx, string(prefs.Config.NodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 3a4e692d902ec..31f351013164a 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -18,6 +18,8 @@ - [ProxyGroupList](#proxygrouplist) - [Recorder](#recorder) - [RecorderList](#recorderlist) +- [Tailnet](#tailnet) +- [TailnetList](#tailnetlist) @@ -139,6 +141,7 @@ _Appears in:_ | `appConnector` _[AppConnector](#appconnector)_ | AppConnector defines whether the Connector device should act as a Tailscale app connector. A Connector that is
      configured as an app connector cannot be a subnet router or an exit node. If this field is unset, the
      Connector does not act as an app connector.
      Note that you will need to manually configure the permissions and the domains for the app connector via the
      Admin panel.
      Note also that the main tested and supported use case of this config option is to deploy an app connector on
      Kubernetes to access SaaS applications available on the public internet. Using the app connector to expose
      cluster workloads or other internal workloads to tailnet might work, but this is not a use case that we have
      tested or optimised for.
      If you are using the app connector to access SaaS applications because you need a predictable egress IP that
      can be whitelisted, it is also your responsibility to ensure that cluster traffic from the connector flows
      via that predictable IP, for example by enforcing that cluster egress traffic is routed via an egress NAT
      device with a static IP address.
      https://tailscale.com/kb/1281/app-connectors | | | | `exitNode` _boolean_ | ExitNode defines whether the Connector device should act as a Tailscale exit node. Defaults to false.
      This field is mutually exclusive with the appConnector field.
      https://tailscale.com/kb/1103/exit-nodes | | | | `replicas` _integer_ | Replicas specifies how many devices to create. Set this to enable
      high availability for app connectors, subnet routers, or exit nodes.
      https://tailscale.com/kb/1115/high-availability. Defaults to 1. | | Minimum: 0
      | +| `tailnet` _string_ | Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this
      name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | #### ConnectorStatus @@ -741,6 +744,7 @@ _Appears in:_ | `hostnamePrefix` _[HostnamePrefix](#hostnameprefix)_ | HostnamePrefix is the hostname prefix to use for tailnet devices created
      by the ProxyGroup. Each device will have the integer number from its
      StatefulSet pod appended to this prefix to form the full hostname.
      HostnamePrefix can contain lower case letters, numbers and dashes, it
      must not start with a dash and must be between 1 and 62 characters long. | | Pattern: `^[a-z0-9][a-z0-9-]{0,61}$`
      Type: string
      | | `proxyClass` _string_ | ProxyClass is the name of the ProxyClass custom resource that contains
      configuration options that should be applied to the resources created
      for this ProxyGroup. If unset, and there is no default ProxyClass
      configured, the operator will create resources with the default
      configuration. | | | | `kubeAPIServer` _[KubeAPIServerConfig](#kubeapiserverconfig)_ | KubeAPIServer contains configuration specific to the kube-apiserver
      ProxyGroup type. This field is only used when Type is set to "kube-apiserver". | | | +| `tailnet` _string_ | Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this
      name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | #### ProxyGroupStatus @@ -901,6 +905,7 @@ _Appears in:_ | `enableUI` _boolean_ | Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.
      The UI will be served at :443. Defaults to false.
      Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
      Required if S3 storage is not set up, to ensure that recordings are accessible. | | | | `storage` _[Storage](#storage)_ | Configure where to store session recordings. By default, recordings will
      be stored in a local ephemeral volume, and will not be persisted past the
      lifetime of a specific pod. | | | | `replicas` _integer_ | Replicas specifies how many instances of tsrecorder to run. Defaults to 1. | | Minimum: 0
      | +| `tailnet` _string_ | Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this
      name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | #### RecorderStatefulSet @@ -1154,6 +1159,44 @@ _Appears in:_ +#### Tailnet + + + + + + + +_Appears in:_ +- [TailnetList](#tailnetlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `Tailnet` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
      Servers may infer this from the endpoint the client submits requests to.
      Cannot be updated.
      In CamelCase.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
      Servers should convert recognized schemas to the latest internal value, and
      may reject unrecognized values.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TailnetSpec](#tailnetspec)_ | Spec describes the desired state of the Tailnet.
      More info:
      https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[TailnetStatus](#tailnetstatus)_ | Status describes the status of the Tailnet. This is set
      and managed by the Tailscale operator. | | | + + +#### TailnetCredentials + + + + + + + +_Appears in:_ +- [TailnetSpec](#tailnetspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `secretName` _string_ | The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and
      "client_secret". | | | + + #### TailnetDevice @@ -1172,6 +1215,59 @@ _Appears in:_ | `staticEndpoints` _string array_ | StaticEndpoints are user configured, 'static' endpoints by which tailnet peers can reach this device. | | | +#### TailnetList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `TailnetList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
      Servers may infer this from the endpoint the client submits requests to.
      Cannot be updated.
      In CamelCase.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
      Servers should convert recognized schemas to the latest internal value, and
      may reject unrecognized values.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Tailnet](#tailnet) array_ | | | | + + +#### TailnetSpec + + + + + + + +_Appears in:_ +- [Tailnet](#tailnet) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `loginUrl` _string_ | URL of the control plane to be used by all resources managed by the operator using this Tailnet. | | | +| `credentials` _[TailnetCredentials](#tailnetcredentials)_ | Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. | | | + + +#### TailnetStatus + + + + + + + +_Appears in:_ +- [Tailnet](#tailnet) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | | | | + + #### TailscaleConfig diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index 0880ac975732e..993a119fad2eb 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -67,6 +67,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RecorderList{}, &ProxyGroup{}, &ProxyGroupList{}, + &Tailnet{}, + &TailnetList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index 58457500f6c34..ebedea18f0e98 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -133,6 +133,12 @@ type ConnectorSpec struct { // +optional // +kubebuilder:validation:Minimum=0 Replicas *int32 `json:"replicas,omitempty"` + + // Tailnet specifies the tailnet this Connector should join. If blank, the default tailnet is used. When set, this + // name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Connector tailnet is immutable" + Tailnet string `json:"tailnet,omitempty"` } // SubnetRouter defines subnet routes that should be exposed to tailnet via a diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 28fd9e00973c5..8cbcc2d196e51 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -97,6 +97,12 @@ type ProxyGroupSpec struct { // ProxyGroup type. This field is only used when Type is set to "kube-apiserver". // +optional KubeAPIServer *KubeAPIServerConfig `json:"kubeAPIServer,omitempty"` + + // Tailnet specifies the tailnet this ProxyGroup should join. If blank, the default tailnet is used. When set, this + // name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ProxyGroup tailnet is immutable" + Tailnet string `json:"tailnet,omitempty"` } type ProxyGroupStatus struct { diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index 67cffbf09e969..d5a22e82c2dbc 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -81,6 +81,12 @@ type RecorderSpec struct { // +optional // +kubebuilder:validation:Minimum=0 Replicas *int32 `json:"replicas,omitzero"` + + // Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this + // name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Recorder tailnet is immutable" + Tailnet string `json:"tailnet,omitempty"` } type RecorderStatefulSet struct { diff --git a/k8s-operator/apis/v1alpha1/types_tailnet.go b/k8s-operator/apis/v1alpha1/types_tailnet.go new file mode 100644 index 0000000000000..a3a17374be5cd --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_tailnet.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Code comments on these types should be treated as user facing documentation- +// they will appear on the Tailnet CRD i.e. if someone runs kubectl explain tailnet. + +var TailnetKind = "Tailnet" + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=tn +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "TailnetReady")].reason`,description="Status of the deployed Tailnet resources." + +type Tailnet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitzero"` + + // Spec describes the desired state of the Tailnet. + // More info: + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec TailnetSpec `json:"spec"` + + // Status describes the status of the Tailnet. This is set + // and managed by the Tailscale operator. + // +optional + Status TailnetStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type TailnetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Tailnet `json:"items"` +} + +type TailnetSpec struct { + // URL of the control plane to be used by all resources managed by the operator using this Tailnet. + // +optional + LoginURL string `json:"loginUrl,omitempty"` + // Denotes the location of the OAuth credentials to use for authenticating with this Tailnet. + Credentials TailnetCredentials `json:"credentials"` +} + +type TailnetCredentials struct { + // The name of the secret containing the OAuth credentials. This secret must contain two fields "client_id" and + // "client_secret". + SecretName string `json:"secretName"` +} + +type TailnetStatus struct { + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions"` +} + +// TailnetReady is set to True if the Tailnet is available for use by operator workloads. +const TailnetReady ConditionType = `TailnetReady` diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index ff0f3f6ace415..4743a5156c16b 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -1365,6 +1365,48 @@ func (in Tags) DeepCopy() Tags { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tailnet) DeepCopyInto(out *Tailnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tailnet. +func (in *Tailnet) DeepCopy() *Tailnet { + if in == nil { + return nil + } + out := new(Tailnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Tailnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetCredentials) DeepCopyInto(out *TailnetCredentials) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetCredentials. +func (in *TailnetCredentials) DeepCopy() *TailnetCredentials { + if in == nil { + return nil + } + out := new(TailnetCredentials) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TailnetDevice) DeepCopyInto(out *TailnetDevice) { *out = *in @@ -1390,6 +1432,76 @@ func (in *TailnetDevice) DeepCopy() *TailnetDevice { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetList) DeepCopyInto(out *TailnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Tailnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetList. +func (in *TailnetList) DeepCopy() *TailnetList { + if in == nil { + return nil + } + out := new(TailnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TailnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetSpec) DeepCopyInto(out *TailnetSpec) { + *out = *in + out.Credentials = in.Credentials +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetSpec. +func (in *TailnetSpec) DeepCopy() *TailnetSpec { + if in == nil { + return nil + } + out := new(TailnetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TailnetStatus) DeepCopyInto(out *TailnetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TailnetStatus. +func (in *TailnetStatus) DeepCopy() *TailnetStatus { + if in == nil { + return nil + } + out := new(TailnetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TailscaleConfig) DeepCopyInto(out *TailscaleConfig) { *out = *in diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index ae465a728f0ff..bce6e39bdb142 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -13,6 +13,7 @@ import ( xslices "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstime" ) @@ -91,6 +92,14 @@ func SetProxyGroupCondition(pg *tsapi.ProxyGroup, conditionType tsapi.ConditionT pg.Status.Conditions = conds } +// SetTailnetCondition ensures that Tailnet status has a condition with the +// given attributes. LastTransitionTime gets set every time condition's status +// changes. +func SetTailnetCondition(tn *tsapi.Tailnet, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, clock tstime.Clock, logger *zap.SugaredLogger) { + conds := updateCondition(tn.Status.Conditions, conditionType, status, reason, message, tn.Generation, clock, logger) + tn.Status.Conditions = conds +} + func updateCondition(conds []metav1.Condition, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string, gen int64, clock tstime.Clock, logger *zap.SugaredLogger) []metav1.Condition { newCondition := metav1.Condition{ Type: string(conditionType), @@ -187,3 +196,14 @@ func SvcIsReady(svc *corev1.Service) bool { cond := svc.Status.Conditions[idx] return cond.Status == metav1.ConditionTrue } + +func TailnetIsReady(tn *tsapi.Tailnet) bool { + idx := xslices.IndexFunc(tn.Status.Conditions, func(cond metav1.Condition) bool { + return cond.Type == string(tsapi.TailnetReady) + }) + if idx == -1 { + return false + } + cond := tn.Status.Conditions[idx] + return cond.Status == metav1.ConditionTrue && cond.ObservedGeneration == tn.Generation +} diff --git a/k8s-operator/reconciler/reconciler.go b/k8s-operator/reconciler/reconciler.go new file mode 100644 index 0000000000000..2751790964577 --- /dev/null +++ b/k8s-operator/reconciler/reconciler.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package reconciler provides utilities for working with Kubernetes resources within controller reconciliation +// loops. +package reconciler + +import ( + "slices" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // FinalizerName is the common finalizer used across all Tailscale Kubernetes resources. + FinalizerName = "tailscale.com/finalizer" +) + +// SetFinalizer adds the finalizer to the resource if not already present. +func SetFinalizer(obj client.Object) { + if idx := slices.Index(obj.GetFinalizers(), FinalizerName); idx >= 0 { + return + } + + obj.SetFinalizers(append(obj.GetFinalizers(), FinalizerName)) +} + +// RemoveFinalizer removes the finalizer from the resource if present. +func RemoveFinalizer(obj client.Object) { + idx := slices.Index(obj.GetFinalizers(), FinalizerName) + if idx < 0 { + return + } + + finalizers := obj.GetFinalizers() + obj.SetFinalizers(append(finalizers[:idx], finalizers[idx+1:]...)) +} diff --git a/k8s-operator/reconciler/reconciler_test.go b/k8s-operator/reconciler/reconciler_test.go new file mode 100644 index 0000000000000..573cd4d9db8da --- /dev/null +++ b/k8s-operator/reconciler/reconciler_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package reconciler_test + +import ( + "slices" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "tailscale.com/k8s-operator/reconciler" +) + +func TestFinalizers(t *testing.T) { + t.Parallel() + + object := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + StringData: map[string]string{ + "hello": "world", + }, + } + + reconciler.SetFinalizer(object) + + if !slices.Contains(object.Finalizers, reconciler.FinalizerName) { + t.Fatalf("object does not have finalizer %q: %v", reconciler.FinalizerName, object.Finalizers) + } + + reconciler.RemoveFinalizer(object) + + if slices.Contains(object.Finalizers, reconciler.FinalizerName) { + t.Fatalf("object still has finalizer %q: %v", reconciler.FinalizerName, object.Finalizers) + } +} diff --git a/k8s-operator/reconciler/tailnet/mocks_test.go b/k8s-operator/reconciler/tailnet/mocks_test.go new file mode 100644 index 0000000000000..7f3f2ddb91085 --- /dev/null +++ b/k8s-operator/reconciler/tailnet/mocks_test.go @@ -0,0 +1,45 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package tailnet_test + +import ( + "context" + "io" + + "tailscale.com/internal/client/tailscale" +) + +type ( + MockTailnetClient struct { + ErrorOnDevices bool + ErrorOnKeys bool + ErrorOnServices bool + } +) + +func (m MockTailnetClient) Devices(_ context.Context, _ *tailscale.DeviceFieldsOpts) ([]*tailscale.Device, error) { + if m.ErrorOnDevices { + return nil, io.EOF + } + + return nil, nil +} + +func (m MockTailnetClient) Keys(_ context.Context) ([]string, error) { + if m.ErrorOnKeys { + return nil, io.EOF + } + + return nil, nil +} + +func (m MockTailnetClient) ListVIPServices(_ context.Context) (*tailscale.VIPServiceList, error) { + if m.ErrorOnServices { + return nil, io.EOF + } + + return nil, nil +} diff --git a/k8s-operator/reconciler/tailnet/tailnet.go b/k8s-operator/reconciler/tailnet/tailnet.go new file mode 100644 index 0000000000000..fe445a36323be --- /dev/null +++ b/k8s-operator/reconciler/tailnet/tailnet.go @@ -0,0 +1,327 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package tailnet provides reconciliation logic for the Tailnet custom resource definition. It is responsible for +// ensuring the referenced OAuth credentials are valid and have the required scopes to be able to generate authentication +// keys, manage devices & manage VIP services. +package tailnet + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" + operatorutils "tailscale.com/k8s-operator" + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler" + "tailscale.com/kube/kubetypes" + "tailscale.com/tstime" + "tailscale.com/util/clientmetric" + "tailscale.com/util/set" +) + +type ( + // The Reconciler type is a reconcile.TypedReconciler implementation used to manage the reconciliation of + // Tailnet custom resources. + Reconciler struct { + client.Client + + tailscaleNamespace string + clock tstime.Clock + logger *zap.SugaredLogger + clientFunc func(*tsapi.Tailnet, *corev1.Secret) TailscaleClient + + // Metrics related fields + mu sync.Mutex + tailnets set.Slice[types.UID] + } + + // The ReconcilerOptions type contains configuration values for the Reconciler. + ReconcilerOptions struct { + // The client for interacting with the Kubernetes API. + Client client.Client + // The namespace the operator is installed in. This reconciler expects Tailnet OAuth credentials to be stored + // in Secret resources within this namespace. + TailscaleNamespace string + // Controls which clock to use for performing time-based functions. This is typically modified for use + // in tests. + Clock tstime.Clock + // The logger to use for this Reconciler. + Logger *zap.SugaredLogger + // ClientFunc is a function that takes tailscale credentials and returns an implementation for the Tailscale + // HTTP API. This should generally be nil unless needed for testing. + ClientFunc func(*tsapi.Tailnet, *corev1.Secret) TailscaleClient + } + + // The TailscaleClient interface describes types that interact with the Tailscale HTTP API. + TailscaleClient interface { + Devices(context.Context, *tailscale.DeviceFieldsOpts) ([]*tailscale.Device, error) + Keys(ctx context.Context) ([]string, error) + ListVIPServices(ctx context.Context) (*tailscale.VIPServiceList, error) + } +) + +const reconcilerName = "tailnet-reconciler" + +// NewReconciler returns a new instance of the Reconciler type. It watches specifically for changes to Tailnet custom +// resources. The ReconcilerOptions can be used to modify the behaviour of the Reconciler. +func NewReconciler(options ReconcilerOptions) *Reconciler { + return &Reconciler{ + Client: options.Client, + tailscaleNamespace: options.TailscaleNamespace, + clock: options.Clock, + logger: options.Logger.Named(reconcilerName), + clientFunc: options.ClientFunc, + } +} + +// Register the Reconciler onto the given manager.Manager implementation. +func (r *Reconciler) Register(mgr manager.Manager) error { + return builder. + ControllerManagedBy(mgr). + For(&tsapi.Tailnet{}). + Named(reconcilerName). + Complete(r) +} + +var ( + // gaugeTailnetResources tracks the overall number of Tailnet resources currently managed by this operator instance. + gaugeTailnetResources = clientmetric.NewGauge(kubetypes.MetricTailnetCount) +) + +// Reconcile is invoked when a change occurs to Tailnet resources within the cluster. On create/update, the Tailnet +// resource is validated ensuring that the specified Secret exists and contains valid OAuth credentials that have +// required permissions to perform all necessary functions by the operator. +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + var tailnet tsapi.Tailnet + err := r.Get(ctx, req.NamespacedName, &tailnet) + switch { + case apierrors.IsNotFound(err): + return reconcile.Result{}, nil + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to get Tailnet %q: %w", req.NamespacedName, err) + } + + if !tailnet.DeletionTimestamp.IsZero() { + return r.delete(ctx, &tailnet) + } + + return r.createOrUpdate(ctx, &tailnet) +} + +func (r *Reconciler) delete(ctx context.Context, tailnet *tsapi.Tailnet) (reconcile.Result, error) { + reconciler.RemoveFinalizer(tailnet) + if err := r.Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to remove finalizer from Tailnet %q: %w", tailnet.Name, err) + } + + r.mu.Lock() + r.tailnets.Remove(tailnet.UID) + r.mu.Unlock() + gaugeTailnetResources.Set(int64(r.tailnets.Len())) + + return reconcile.Result{}, nil +} + +// Constants for condition reasons. +const ( + ReasonInvalidOAuth = "InvalidOAuth" + ReasonInvalidSecret = "InvalidSecret" + ReasonValid = "TailnetValid" +) + +func (r *Reconciler) createOrUpdate(ctx context.Context, tailnet *tsapi.Tailnet) (reconcile.Result, error) { + r.mu.Lock() + r.tailnets.Add(tailnet.UID) + r.mu.Unlock() + gaugeTailnetResources.Set(int64(r.tailnets.Len())) + + name := types.NamespacedName{Name: tailnet.Spec.Credentials.SecretName, Namespace: r.tailscaleNamespace} + + var secret corev1.Secret + err := r.Get(ctx, name, &secret) + + // The referenced Secret does not exist within the tailscale namespace, so we'll mark the Tailnet as not ready + // for use. + if apierrors.IsNotFound(err) { + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionFalse, + ReasonInvalidSecret, + fmt.Sprintf("referenced secret %q does not exist in namespace %q", name.Name, r.tailscaleNamespace), + r.clock, + r.logger, + ) + + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + return reconcile.Result{}, nil + } + + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get secret %q: %w", name, err) + } + + // We first ensure that the referenced secret contains the required fields. Otherwise, we set the Tailnet as + // invalid. The operator will not allow the use of this Tailnet while it is in an invalid state. + if ok := r.ensureSecret(tailnet, &secret); !ok { + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + return reconcile.Result{RequeueAfter: time.Minute / 2}, nil + } + + tsClient := r.createClient(ctx, tailnet, &secret) + + // Second, we ensure the OAuth credentials supplied in the secret are valid and have the required scopes to access + // the various API endpoints required by the operator. + if ok := r.ensurePermissions(ctx, tsClient, tailnet); !ok { + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + // We provide a requeue duration here as a user will likely want to go and modify their scopes and come back. + // This should save them having to delete and recreate the resource. + return reconcile.Result{RequeueAfter: time.Minute / 2}, nil + } + + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionTrue, + ReasonValid, + ReasonValid, + r.clock, + r.logger, + ) + + if err = r.Status().Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to update Tailnet status for %q: %w", tailnet.Name, err) + } + + reconciler.SetFinalizer(tailnet) + if err = r.Update(ctx, tailnet); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to add finalizer to Tailnet %q: %w", tailnet.Name, err) + } + + return reconcile.Result{}, nil +} + +// Constants for OAuth credential fields within the Secret referenced by the Tailnet. +const ( + clientIDKey = "client_id" + clientSecretKey = "client_secret" +) + +func (r *Reconciler) createClient(ctx context.Context, tailnet *tsapi.Tailnet, secret *corev1.Secret) TailscaleClient { + if r.clientFunc != nil { + return r.clientFunc(tailnet, secret) + } + + baseURL := ipn.DefaultControlURL + if tailnet.Spec.LoginURL != "" { + baseURL = tailnet.Spec.LoginURL + } + + credentials := clientcredentials.Config{ + ClientID: string(secret.Data[clientIDKey]), + ClientSecret: string(secret.Data[clientSecretKey]), + TokenURL: baseURL + "/api/v2/oauth/token", + } + + source := credentials.TokenSource(ctx) + httpClient := oauth2.NewClient(ctx, source) + + tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-k8s-operator" + tsClient.HTTPClient = httpClient + tsClient.BaseURL = baseURL + + return tsClient +} + +func (r *Reconciler) ensurePermissions(ctx context.Context, tsClient TailscaleClient, tailnet *tsapi.Tailnet) bool { + // Perform basic list requests here to confirm that the OAuth credentials referenced on the Tailnet resource + // can perform the basic operations required for the operator to function. This has a caveat of only performing + // read actions, as we don't want to create arbitrary keys and VIP services. However, it will catch when a user + // has completely forgotten an entire scope that's required. + var errs error + if _, err := tsClient.Devices(ctx, nil); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to list devices: %w", err)) + } + + if _, err := tsClient.Keys(ctx); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to list auth keys: %w", err)) + } + + if _, err := tsClient.ListVIPServices(ctx); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to list tailscale services: %w", err)) + } + + if errs != nil { + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionFalse, + ReasonInvalidOAuth, + errs.Error(), + r.clock, + r.logger, + ) + + return false + } + + return true +} + +func (r *Reconciler) ensureSecret(tailnet *tsapi.Tailnet, secret *corev1.Secret) bool { + var message string + + switch { + case len(secret.Data) == 0: + message = fmt.Sprintf("Secret %q is empty", secret.Name) + case len(secret.Data[clientIDKey]) == 0: + message = fmt.Sprintf("Secret %q is missing the client_id field", secret.Name) + case len(secret.Data[clientSecretKey]) == 0: + message = fmt.Sprintf("Secret %q is missing the client_secret field", secret.Name) + } + + if message == "" { + return true + } + + operatorutils.SetTailnetCondition( + tailnet, + tsapi.TailnetReady, + metav1.ConditionFalse, + ReasonInvalidSecret, + message, + r.clock, + r.logger, + ) + + return false +} diff --git a/k8s-operator/reconciler/tailnet/tailnet_test.go b/k8s-operator/reconciler/tailnet/tailnet_test.go new file mode 100644 index 0000000000000..471752b86080d --- /dev/null +++ b/k8s-operator/reconciler/tailnet/tailnet_test.go @@ -0,0 +1,411 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package tailnet_test + +import ( + "testing" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/tailnet" + "tailscale.com/tstest" +) + +func TestReconciler_Reconcile(t *testing.T) { + t.Parallel() + clock := tstest.NewClock(tstest.ClockOpts{}) + logger, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + tt := []struct { + Name string + Request reconcile.Request + Tailnet *tsapi.Tailnet + Secret *corev1.Secret + ExpectsError bool + ExpectedConditions []metav1.Condition + ClientFunc func(*tsapi.Tailnet, *corev1.Secret) tailnet.TailscaleClient + }{ + { + Name: "ignores unknown tailnet requests", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + }, + { + Name: "invalid status for missing secret", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `referenced secret "test" does not exist in namespace "tailscale"`, + }, + }, + }, + { + Name: "invalid status for empty secret", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `Secret "test" is empty`, + }, + }, + }, + { + Name: "invalid status for missing client id", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_secret": []byte("test"), + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `Secret "test" is missing the client_id field`, + }, + }, + }, + { + Name: "invalid status for missing client secret", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + }, + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidSecret, + Message: `Secret "test" is missing the client_secret field`, + }, + }, + }, + { + Name: "invalid status for bad devices scope", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{ErrorOnDevices: true} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidOAuth, + Message: `failed to list devices: EOF`, + }, + }, + }, + { + Name: "invalid status for bad services scope", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{ErrorOnServices: true} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidOAuth, + Message: `failed to list tailscale services: EOF`, + }, + }, + }, + { + Name: "invalid status for bad keys scope", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{ErrorOnKeys: true} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionFalse, + Reason: tailnet.ReasonInvalidOAuth, + Message: `failed to list auth keys: EOF`, + }, + }, + }, + { + Name: "ready when valid and scopes are correct", + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "default", + }, + }, + Tailnet: &tsapi.Tailnet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: tsapi.TailnetSpec{ + Credentials: tsapi.TailnetCredentials{ + SecretName: "test", + }, + }, + }, + Secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "tailscale", + }, + Data: map[string][]byte{ + "client_id": []byte("test"), + "client_secret": []byte("test"), + }, + }, + ClientFunc: func(_ *tsapi.Tailnet, _ *corev1.Secret) tailnet.TailscaleClient { + return &MockTailnetClient{} + }, + ExpectedConditions: []metav1.Condition{ + { + Type: string(tsapi.TailnetReady), + Status: metav1.ConditionTrue, + Reason: tailnet.ReasonValid, + Message: tailnet.ReasonValid, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.Name, func(t *testing.T) { + builder := fake.NewClientBuilder().WithScheme(tsapi.GlobalScheme) + if tc.Tailnet != nil { + builder = builder.WithObjects(tc.Tailnet).WithStatusSubresource(tc.Tailnet) + } + if tc.Secret != nil { + builder = builder.WithObjects(tc.Secret) + } + + fc := builder.Build() + opts := tailnet.ReconcilerOptions{ + Client: fc, + Clock: clock, + Logger: logger.Sugar(), + ClientFunc: tc.ClientFunc, + TailscaleNamespace: "tailscale", + } + + reconciler := tailnet.NewReconciler(opts) + _, err = reconciler.Reconcile(t.Context(), tc.Request) + if tc.ExpectsError && err == nil { + t.Fatalf("expected error, got none") + } + + if !tc.ExpectsError && err != nil { + t.Fatalf("expected no error, got %v", err) + } + + if len(tc.ExpectedConditions) == 0 { + return + } + + var tn tsapi.Tailnet + if err = fc.Get(t.Context(), tc.Request.NamespacedName, &tn); err != nil { + t.Fatal(err) + } + + if len(tn.Status.Conditions) != len(tc.ExpectedConditions) { + t.Fatalf("expected %v condition(s), got %v", len(tc.ExpectedConditions), len(tn.Status.Conditions)) + } + + for i, expected := range tc.ExpectedConditions { + actual := tn.Status.Conditions[i] + + if actual.Type != expected.Type { + t.Errorf("expected %v, got %v", expected.Type, actual.Type) + } + + if actual.Status != expected.Status { + t.Errorf("expected %v, got %v", expected.Status, actual.Status) + } + + if actual.Reason != expected.Reason { + t.Errorf("expected %v, got %v", expected.Reason, actual.Reason) + } + + if actual.Message != expected.Message { + t.Errorf("expected %v, got %v", expected.Message, actual.Message) + } + } + + if err = fc.Delete(t.Context(), &tn); err != nil { + t.Fatal(err) + } + + if _, err = reconciler.Reconcile(t.Context(), tc.Request); err != nil { + t.Fatal(err) + } + + err = fc.Get(t.Context(), tc.Request.NamespacedName, &tn) + if !apierrors.IsNotFound(err) { + t.Fatalf("expected not found error, got %v", err) + } + }) + } +} diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 44b01fe1ad1f5..b8b94a4b21a5d 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -33,6 +33,7 @@ const ( MetricProxyGroupEgressCount = "k8s_proxygroup_egress_resources" MetricProxyGroupIngressCount = "k8s_proxygroup_ingress_resources" MetricProxyGroupAPIServerCount = "k8s_proxygroup_kube_apiserver_resources" + MetricTailnetCount = "k8s_tailnet_resources" // Keys that containerboot writes to state file that can be used to determine its state. // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine From 6dc0bd834c0858332aff1579bc4559934f6d242c Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Mon, 12 Jan 2026 11:43:41 -0800 Subject: [PATCH 0884/1093] util/limiter: don't panic when dumping a new Limiter Fixes #18439 Signed-off-by: Josh Bleecher Snyder --- util/limiter/limiter.go | 3 +++ util/limiter/limiter_test.go | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index b86efdf29cfd0..b5fbb6fa6b2f7 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -187,6 +187,9 @@ func (lm *Limiter[K]) collectDump(now time.Time) []dumpEntry[K] { lm.mu.Lock() defer lm.mu.Unlock() + if lm.cache == nil { + return nil + } ret := make([]dumpEntry[K], 0, lm.cache.Len()) lm.cache.ForEach(func(k K, v *bucket) { lm.updateBucketLocked(v, now) // so stats are accurate diff --git a/util/limiter/limiter_test.go b/util/limiter/limiter_test.go index 77b1d562b23fb..d3f3e307a2b82 100644 --- a/util/limiter/limiter_test.go +++ b/util/limiter/limiter_test.go @@ -5,6 +5,7 @@ package limiter import ( "bytes" + "io" "strings" "testing" "time" @@ -175,6 +176,10 @@ func TestDumpHTML(t *testing.T) { } } +func TestDumpHTMLEmpty(t *testing.T) { + new(Limiter[string]).DumpHTML(io.Discard, false) // should not panic +} + func allowed(t *testing.T, limiter *Limiter[string], key string, count int, now time.Time) { t.Helper() for i := range count { From 4b7585df77e593ec6e57d9f55ce1296dc5bc6aaf Mon Sep 17 00:00:00 2001 From: Alex Valiushko Date: Wed, 21 Jan 2026 21:55:37 -0800 Subject: [PATCH 0885/1093] net/udprelay: add tailscaled_peer_relay_endpoints gauge (#18265) New gauge reflects endpoints state via labels: - open, when both peers are connected and ready to talk, and - connecting. when at least one peer hasn't connected yet. Corresponding client metrics are logged as - udprelay_endpoints_connecting - udprelay_endpoints_open Updates tailscale/corp#30820 Change-Id: Idb1baa90a38c97847e14f9b2390093262ad0ea23 Signed-off-by: Alex Valiushko --- net/udprelay/metrics.go | 59 ++++++++++++++++++++++- net/udprelay/metrics_test.go | 57 +++++++++++++++++++++- net/udprelay/server.go | 92 ++++++++++++++++++++++++++++-------- net/udprelay/server_test.go | 74 +++++++++++++++++++++++++++++ 4 files changed, 258 insertions(+), 24 deletions(-) diff --git a/net/udprelay/metrics.go b/net/udprelay/metrics.go index b7c0710c2afc1..235029bf425ce 100644 --- a/net/udprelay/metrics.go +++ b/net/udprelay/metrics.go @@ -22,6 +22,17 @@ var ( cMetricForwarded46Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp4_udp6") cMetricForwarded64Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp4") cMetricForwarded66Bytes = clientmetric.NewAggregateCounter("udprelay_forwarded_bytes_udp6_udp6") + + // cMetricEndpoints is initialized here with no other writes, making it safe for concurrent reads. + // + // [clientmetric.Gauge] does not let us embed existing counters, so + // [metrics.updateEndpoint] records data into client and user gauges independently. + // + // Transitions to and from [endpointClosed] are not recorded. + cMetricEndpoints = map[endpointState]*clientmetric.Metric{ + endpointConnecting: clientmetric.NewGauge("udprelay_endpoints_connecting"), + endpointOpen: clientmetric.NewGauge("udprelay_endpoints_open"), + } ) type transport string @@ -36,6 +47,10 @@ type forwardedLabel struct { transportOut transport `prom:"transport_out"` } +type endpointLabel struct { + state endpointState `prom:"state"` +} + type metrics struct { forwarded44Packets expvar.Int forwarded46Packets expvar.Int @@ -46,6 +61,11 @@ type metrics struct { forwarded46Bytes expvar.Int forwarded64Bytes expvar.Int forwarded66Bytes expvar.Int + + // endpoints are set in [registerMetrics] and safe for concurrent reads. + // + // Transitions to and from [endpointClosed] are not recorded + endpoints map[endpointState]*expvar.Int } // registerMetrics publishes user and client metric counters for peer relay server. @@ -65,6 +85,12 @@ func registerMetrics(reg *usermetric.Registry) *metrics { "counter", "Number of bytes forwarded via Peer Relay", ) + uMetricEndpoints = usermetric.NewMultiLabelMapWithRegistry[endpointLabel]( + reg, + "tailscaled_peer_relay_endpoints", + "gauge", + "Number of allocated Peer Relay endpoints", + ) forwarded44 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP4} forwarded46 = forwardedLabel{transportIn: transportUDP4, transportOut: transportUDP6} forwarded64 = forwardedLabel{transportIn: transportUDP6, transportOut: transportUDP4} @@ -83,6 +109,13 @@ func registerMetrics(reg *usermetric.Registry) *metrics { uMetricForwardedBytes.Set(forwarded64, &m.forwarded64Bytes) uMetricForwardedBytes.Set(forwarded66, &m.forwarded66Bytes) + m.endpoints = map[endpointState]*expvar.Int{ + endpointConnecting: {}, + endpointOpen: {}, + } + uMetricEndpoints.Set(endpointLabel{endpointOpen}, m.endpoints[endpointOpen]) + uMetricEndpoints.Set(endpointLabel{endpointConnecting}, m.endpoints[endpointConnecting]) + // Publish client metrics. cMetricForwarded44Packets.Register(&m.forwarded44Packets) cMetricForwarded46Packets.Register(&m.forwarded46Packets) @@ -96,6 +129,26 @@ func registerMetrics(reg *usermetric.Registry) *metrics { return m } +type endpointUpdater interface { + updateEndpoint(before, after endpointState) +} + +// updateEndpoint updates the endpoints gauge according to states left and entered. +// It records client-metric gauges independently, see [cMetricEndpoints] doc. +func (m *metrics) updateEndpoint(before, after endpointState) { + if before == after { + return + } + if uMetricEndpointsBefore, ok := m.endpoints[before]; ok && before != endpointClosed { + uMetricEndpointsBefore.Add(-1) + cMetricEndpoints[before].Add(-1) + } + if uMetricEndpointsAfter, ok := m.endpoints[after]; ok && after != endpointClosed { + uMetricEndpointsAfter.Add(1) + cMetricEndpoints[after].Add(1) + } +} + // countForwarded records user and client metrics according to the // inbound and outbound address families. func (m *metrics) countForwarded(in4, out4 bool, bytes, packets int64) { @@ -114,8 +167,7 @@ func (m *metrics) countForwarded(in4, out4 bool, bytes, packets int64) { } } -// deregisterMetrics unregisters the underlying expvar counters -// from clientmetrics. +// deregisterMetrics clears clientmetrics counters and resets gauges to zero. func deregisterMetrics() { cMetricForwarded44Packets.UnregisterAll() cMetricForwarded46Packets.UnregisterAll() @@ -125,4 +177,7 @@ func deregisterMetrics() { cMetricForwarded46Bytes.UnregisterAll() cMetricForwarded64Bytes.UnregisterAll() cMetricForwarded66Bytes.UnregisterAll() + for _, v := range cMetricEndpoints { + v.Set(0) + } } diff --git a/net/udprelay/metrics_test.go b/net/udprelay/metrics_test.go index 5c6a751134e8b..0b7650534f884 100644 --- a/net/udprelay/metrics_test.go +++ b/net/udprelay/metrics_test.go @@ -4,6 +4,7 @@ package udprelay import ( + "fmt" "slices" "testing" @@ -11,7 +12,7 @@ import ( "tailscale.com/util/usermetric" ) -func TestMetrics(t *testing.T) { +func TestMetricsLifecycle(t *testing.T) { c := qt.New(t) deregisterMetrics() r := &usermetric.Registry{} @@ -22,6 +23,7 @@ func TestMetrics(t *testing.T) { want := []string{ "tailscaled_peer_relay_forwarded_packets_total", "tailscaled_peer_relay_forwarded_bytes_total", + "tailscaled_peer_relay_endpoints", } slices.Sort(have) slices.Sort(want) @@ -51,4 +53,57 @@ func TestMetrics(t *testing.T) { c.Assert(m.forwarded66Packets.Value(), qt.Equals, int64(4)) c.Assert(cMetricForwarded66Bytes.Value(), qt.Equals, int64(4)) c.Assert(cMetricForwarded66Packets.Value(), qt.Equals, int64(4)) + + // Validate client metrics deregistration. + m.updateEndpoint(endpointClosed, endpointOpen) + deregisterMetrics() + c.Check(cMetricForwarded44Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded44Packets.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded46Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded46Packets.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded64Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded64Packets.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded66Bytes.Value(), qt.Equals, int64(0)) + c.Check(cMetricForwarded66Packets.Value(), qt.Equals, int64(0)) + for k := range cMetricEndpoints { + c.Check(cMetricEndpoints[k].Value(), qt.Equals, int64(0)) + } +} + +func TestMetricsEndpointTransitions(t *testing.T) { + c := qt.New(t) + var states = []endpointState{ + endpointClosed, + endpointConnecting, + endpointOpen, + } + for _, a := range states { + for _, b := range states { + t.Run(fmt.Sprintf("%s-%s", a, b), func(t *testing.T) { + deregisterMetrics() + r := &usermetric.Registry{} + m := registerMetrics(r) + m.updateEndpoint(a, b) + var wantA, wantB int64 + switch { + case a == b: + wantA, wantB = 0, 0 + case a == endpointClosed: + wantA, wantB = 0, 1 + case b == endpointClosed: + wantA, wantB = -1, 0 + default: + wantA, wantB = -1, 1 + } + if a != endpointClosed { + c.Check(m.endpoints[a].Value(), qt.Equals, wantA) + c.Check(cMetricEndpoints[a].Value(), qt.Equals, wantA) + } + if b != endpointClosed { + c.Check(m.endpoints[b].Value(), qt.Equals, wantB) + c.Check(cMetricEndpoints[b].Value(), qt.Equals, wantB) + } + }) + } + } } diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 2b6d389232832..38ee04df9e1ca 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -122,6 +122,7 @@ type serverEndpoint struct { allocatedAt mono.Time mu sync.Mutex // guards the following fields + closed bool // signals that no new data should be accepted inProgressGeneration [2]uint32 // or zero if a handshake has never started, or has just completed boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg lastSeen [2]mono.Time @@ -151,9 +152,15 @@ func blakeMACFromBindMsg(blakeKey [blake2s.Size]byte, src netip.AddrPort, msg di return out, nil } -func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex int, discoMsg disco.Message, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time, m endpointUpdater) (write []byte, to netip.AddrPort) { e.mu.Lock() defer e.mu.Unlock() + lastState := e.stateLocked() + + if lastState == endpointClosed { + // endpoint was closed in [Server.endpointGC] + return nil, netip.AddrPort{} + } if senderIndex != 0 && senderIndex != 1 { return nil, netip.AddrPort{} @@ -230,6 +237,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex if bytes.Equal(mac[:], discoMsg.Challenge[:]) { // Handshake complete. Update the binding for this sender. e.boundAddrPorts[senderIndex] = from + m.updateEndpoint(lastState, e.stateLocked()) e.lastSeen[senderIndex] = now // record last seen as bound time e.inProgressGeneration[senderIndex] = 0 // reset to zero, which indicates there is no in-progress handshake return nil, netip.AddrPort{} @@ -243,7 +251,7 @@ func (e *serverEndpoint) handleDiscoControlMsg(from netip.AddrPort, senderIndex } } -func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time) (write []byte, to netip.AddrPort) { +func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []byte, serverDisco key.DiscoPublic, macSecrets views.Slice[[blake2s.Size]byte], now mono.Time, m endpointUpdater) (write []byte, to netip.AddrPort) { senderRaw, isDiscoMsg := disco.Source(b) if !isDiscoMsg { // Not a Disco message @@ -274,7 +282,7 @@ func (e *serverEndpoint) handleSealedDiscoControlMsg(from netip.AddrPort, b []by return nil, netip.AddrPort{} } - return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets, now) + return e.handleDiscoControlMsg(from, senderIndex, discoMsg, serverDisco, macSecrets, now, m) } func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mono.Time) (write []byte, to netip.AddrPort) { @@ -284,6 +292,10 @@ func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mon // not a control packet, but serverEndpoint isn't bound return nil, netip.AddrPort{} } + if e.stateLocked() == endpointClosed { + // endpoint was closed in [Server.endpointGC] + return nil, netip.AddrPort{} + } switch { case from == e.boundAddrPorts[0]: e.lastSeen[0] = now @@ -301,9 +313,21 @@ func (e *serverEndpoint) handleDataPacket(from netip.AddrPort, b []byte, now mon } } -func (e *serverEndpoint) isExpired(now mono.Time, bindLifetime, steadyStateLifetime time.Duration) bool { +// maybeExpire checks if the endpoint has expired according to the provided timeouts and sets its closed state accordingly. +// True is returned if the endpoint was expired and closed. +func (e *serverEndpoint) maybeExpire(now mono.Time, bindLifetime, steadyStateLifetime time.Duration, m endpointUpdater) bool { e.mu.Lock() defer e.mu.Unlock() + before := e.stateLocked() + if e.isExpiredLocked(now, bindLifetime, steadyStateLifetime) { + e.closed = true + m.updateEndpoint(before, e.stateLocked()) + return true + } + return false +} + +func (e *serverEndpoint) isExpiredLocked(now mono.Time, bindLifetime, steadyStateLifetime time.Duration) bool { if !e.isBoundLocked() { if now.Sub(e.allocatedAt) > bindLifetime { return true @@ -323,6 +347,31 @@ func (e *serverEndpoint) isBoundLocked() bool { e.boundAddrPorts[1].IsValid() } +// stateLocked returns current endpointState according to the +// peers handshake status. +func (e *serverEndpoint) stateLocked() endpointState { + switch { + case e == nil, e.closed: + return endpointClosed + case e.boundAddrPorts[0].IsValid() && e.boundAddrPorts[1].IsValid(): + return endpointOpen + default: + return endpointConnecting + } +} + +// endpointState canonicalizes endpoint state names, +// see [serverEndpoint.stateLocked]. +// +// Usermetrics can't handle Stringer, must be a string enum. +type endpointState string + +const ( + endpointClosed endpointState = "closed" // unallocated, not tracked in metrics + endpointConnecting endpointState = "connecting" // at least one peer has not completed handshake + endpointOpen endpointState = "open" // ready to forward +) + // NewServer constructs a [Server] listening on port. If port is zero, then // port selection is left up to the host networking stack. If // onlyStaticAddrPorts is true, then dynamic addr:port discovery will be @@ -703,33 +752,33 @@ func (s *Server) Close() error { clear(s.serverEndpointByDisco) s.closed = true s.bus.Close() + deregisterMetrics() }) return nil } +func (s *Server) endpointGC(bindLifetime, steadyStateLifetime time.Duration) { + now := mono.Now() + // TODO: consider performance implications of scanning all endpoints and + // holding s.mu for the duration. Keep it simple (and slow) for now. + s.mu.Lock() + defer s.mu.Unlock() + for k, v := range s.serverEndpointByDisco { + if v.maybeExpire(now, bindLifetime, steadyStateLifetime, s.metrics) { + delete(s.serverEndpointByDisco, k) + s.serverEndpointByVNI.Delete(v.vni) + } + } +} + func (s *Server) endpointGCLoop() { defer s.wg.Done() ticker := time.NewTicker(s.bindLifetime) defer ticker.Stop() - - gc := func() { - now := mono.Now() - // TODO: consider performance implications of scanning all endpoints and - // holding s.mu for the duration. Keep it simple (and slow) for now. - s.mu.Lock() - defer s.mu.Unlock() - for k, v := range s.serverEndpointByDisco { - if v.isExpired(now, s.bindLifetime, s.steadyStateLifetime) { - delete(s.serverEndpointByDisco, k) - s.serverEndpointByVNI.Delete(v.vni) - } - } - } - for { select { case <-ticker.C: - gc() + s.endpointGC(s.bindLifetime, s.steadyStateLifetime) case <-s.closeCh: return } @@ -773,7 +822,7 @@ func (s *Server) handlePacket(from netip.AddrPort, b []byte) (write []byte, to n } msg := b[packet.GeneveFixedHeaderLength:] secrets := s.getMACSecrets(now) - write, to = e.(*serverEndpoint).handleSealedDiscoControlMsg(from, msg, s.discoPublic, secrets, now) + write, to = e.(*serverEndpoint).handleSealedDiscoControlMsg(from, msg, s.discoPublic, secrets, now, s.metrics) isDataPacket = false return } @@ -1015,6 +1064,7 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv s.serverEndpointByVNI.Store(e.vni, e) s.logf("allocated endpoint vni=%d lamportID=%d disco[0]=%v disco[1]=%v", e.vni, e.lamportID, pair.Get()[0].ShortString(), pair.Get()[1].ShortString()) + s.metrics.updateEndpoint(endpointClosed, endpointConnecting) return endpoint.ServerEndpoint{ ServerDisco: s.discoPublic, ClientDisco: pair.Get(), diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 59917e1c6ef52..cb6b05eea2108 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -8,6 +8,7 @@ import ( "crypto/rand" "net" "net/netip" + "sync" "testing" "time" @@ -21,6 +22,7 @@ import ( "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/views" + "tailscale.com/util/mak" "tailscale.com/util/usermetric" ) @@ -471,3 +473,75 @@ func TestServer_maybeRotateMACSecretLocked(t *testing.T) { qt.Assert(t, macSecret, qt.Not(qt.Equals), s.macSecrets.At(1)) qt.Assert(t, s.macSecrets.At(0), qt.Not(qt.Equals), s.macSecrets.At(1)) } + +func TestServer_endpointGC(t *testing.T) { + for _, tc := range []struct { + name string + addrs [2]netip.AddrPort + lastSeen [2]mono.Time + allocatedAt mono.Time + wantRemoved bool + }{ + { + name: "unbound_endpoint_expired", + allocatedAt: mono.Now().Add(-2 * defaultBindLifetime), + wantRemoved: true, + }, + { + name: "unbound_endpoint_kept", + allocatedAt: mono.Now(), + wantRemoved: false, + }, + { + name: "bound_endpoint_expired_a", + addrs: [2]netip.AddrPort{netip.MustParseAddrPort("192.0.2.1:1"), netip.MustParseAddrPort("192.0.2.2:1")}, + lastSeen: [2]mono.Time{mono.Now().Add(-2 * defaultSteadyStateLifetime), mono.Now()}, + wantRemoved: true, + }, + { + name: "bound_endpoint_expired_b", + addrs: [2]netip.AddrPort{netip.MustParseAddrPort("192.0.2.1:1"), netip.MustParseAddrPort("192.0.2.2:1")}, + lastSeen: [2]mono.Time{mono.Now(), mono.Now().Add(-2 * defaultSteadyStateLifetime)}, + wantRemoved: true, + }, + { + name: "bound_endpoint_kept", + addrs: [2]netip.AddrPort{netip.MustParseAddrPort("192.0.2.1:1"), netip.MustParseAddrPort("192.0.2.2:1")}, + lastSeen: [2]mono.Time{mono.Now(), mono.Now()}, + wantRemoved: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + disco1 := key.NewDisco() + disco2 := key.NewDisco() + pair := key.NewSortedPairOfDiscoPublic(disco1.Public(), disco2.Public()) + ep := &serverEndpoint{ + discoPubKeys: pair, + vni: 1, + lastSeen: tc.lastSeen, + boundAddrPorts: tc.addrs, + allocatedAt: tc.allocatedAt, + } + s := &Server{serverEndpointByVNI: sync.Map{}, metrics: &metrics{}} + mak.Set(&s.serverEndpointByDisco, pair, ep) + s.serverEndpointByVNI.Store(ep.vni, ep) + s.endpointGC(defaultBindLifetime, defaultSteadyStateLifetime) + removed := len(s.serverEndpointByDisco) > 0 + if tc.wantRemoved { + if removed { + t.Errorf("expected endpoint to be removed from Server") + } + if !ep.closed { + t.Errorf("expected endpoint to be closed") + } + } else { + if !removed { + t.Errorf("expected endpoint to remain in Server") + } + if ep.closed { + t.Errorf("expected endpoint to remain open") + } + } + }) + } +} From 151644f647d9388bb4cb1ae1c4c155b8d8de4cab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 22 Jan 2026 14:50:24 -0500 Subject: [PATCH 0886/1093] wgengine: send disco key via TSMP on first contact (#18215) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we have not yet communicated with a peer, send a TSMPDiscoAdvertisement to let the peer know of our disco key. This is in most cases redundant, but will allow us to set up direct connections when the client cannot access control. Some parts taken from: #18073 Updates #12639 Signed-off-by: Claus Lensbøl --- wgengine/magicsock/endpoint.go | 1 + wgengine/magicsock/magicsock.go | 61 ++++++++++++++++++++++++++++++--- wgengine/userspace.go | 9 +++++ 3 files changed, 67 insertions(+), 4 deletions(-) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index eda589e14b1b6..586a2dc75c5cc 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -80,6 +80,7 @@ type endpoint struct { lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints lastUDPRelayPathDiscovery mono.Time // last time we ran UDP relay path discovery + sentDiscoKeyAdvertisement bool // wether we sent a TSMPDiscoAdvertisement or not to this endpoint derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) bestAddr addrQuality // best non-DERP path; zero if none; mutate via setBestAddrLocked() diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8fbd07013797d..1c13093478a2e 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -179,9 +179,10 @@ type Conn struct { // A publisher for synchronization points to ensure correct ordering of // config changes between magicsock and wireguard. - syncPub *eventbus.Publisher[syncPoint] - allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] - portUpdatePub *eventbus.Publisher[router.PortUpdate] + syncPub *eventbus.Publisher[syncPoint] + allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] + portUpdatePub *eventbus.Publisher[router.PortUpdate] + tsmpDiscoKeyAvailablePub *eventbus.Publisher[NewDiscoKeyAvailable] // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -696,6 +697,7 @@ func NewConn(opts Options) (*Conn, error) { c.syncPub = eventbus.Publish[syncPoint](ec) c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec) c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec) + c.tsmpDiscoKeyAvailablePub = eventbus.Publish[NewDiscoKeyAvailable](ec) eventbus.SubscribeFunc(ec, c.onPortMapChanged) eventbus.SubscribeFunc(ec, c.onFilterUpdate) eventbus.SubscribeFunc(ec, c.onNodeViewsUpdate) @@ -1249,7 +1251,8 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { // RotateDiscoKey generates a new discovery key pair and updates the connection // to use it. This invalidates all existing disco sessions and will cause peers -// to re-establish discovery sessions with the new key. +// to re-establish discovery sessions with the new key. Addtionally, the +// lastTSMPDiscoAdvertisement on all endpoints is reset to 0. // // This is primarily for debugging and testing purposes, a future enhancement // should provide a mechanism for seamless rotation by supporting short term use @@ -1263,6 +1266,11 @@ func (c *Conn) RotateDiscoKey() { newShort := c.discoAtomic.Short() c.discoInfo = make(map[key.DiscoPublic]*discoInfo) connCtx := c.connCtx + for _, endpoint := range c.peerMap.byEpAddr { + endpoint.ep.mu.Lock() + endpoint.ep.sentDiscoKeyAdvertisement = false + endpoint.ep.mu.Unlock() + } c.mu.Unlock() c.logf("magicsock: rotated disco key from %v to %v", oldShort, newShort) @@ -2247,6 +2255,7 @@ func (c *Conn) handleDiscoMessage(msg []byte, src epAddr, shouldBeRelayHandshake if debugDisco() { c.logf("magicsock: disco: failed to open naclbox from %v (wrong rcpt?) via %s", sender, via) } + metricRecvDiscoBadKey.Add(1) return } @@ -2654,6 +2663,8 @@ func (c *Conn) enqueueCallMeMaybe(derpAddr netip.AddrPort, de *endpoint) { return } + c.maybeSendTSMPDiscoAdvert(de) + eps := make([]netip.AddrPort, 0, len(c.lastEndpoints)) for _, ep := range c.lastEndpoints { eps = append(eps, ep.Addr) @@ -4314,3 +4325,45 @@ func (c *Conn) HandleDiscoKeyAdvertisement(node tailcfg.NodeView, update packet. c.logf("magicsock: updated disco key for peer %v to %v", nodeKey.ShortString(), discoKey.ShortString()) metricTSMPDiscoKeyAdvertisementApplied.Add(1) } + +// NewDiscoKeyAvailable is an eventbus topic that is emitted when we're sending +// a packet to a node and observe we haven't told it our current DiscoKey before. +// +// The publisher is magicsock, when we're sending a packet. +// The subscriber is userspaceEngine, which sends a TSMP packet, also via +// magicsock. This doesn't recurse infinitely because we only publish it once per +// DiscoKey. +// In the common case, a DiscoKey is not rotated within a process generation +// (as of 2026-01-21), except with debug commands to simulate process restarts. +// +// The address is the first node address (tailscale address) of the node. It +// does not matter if the address is v4/v6, the receiver should handle either. +// +// Since we have not yet communicated with the node at the time we are +// sending this event, the resulting TSMPDiscoKeyAdvertisement will with all +// likelihood be transmitted via DERP. +type NewDiscoKeyAvailable struct { + NodeFirstAddr netip.Addr + NodeID tailcfg.NodeID +} + +// maybeSendTSMPDiscoAdvert conditionally emits an event indicating that we +// should send our DiscoKey to the first node address of the magicksock endpoint. +// The event is only emitted if we have not yet contacted that endpoint since +// the DiscoKey changed. +// +// This condition is most likely met only once per endpoint, after the start of +// tailscaled, but not until we contact the endpoint for the first time. +// +// We do not need the Conn to be locked, but the endpoint should be. +func (c *Conn) maybeSendTSMPDiscoAdvert(de *endpoint) { + de.mu.Lock() + defer de.mu.Unlock() + if !de.sentDiscoKeyAdvertisement { + de.sentDiscoKeyAdvertisement = true + c.tsmpDiscoKeyAvailablePub.Publish(NewDiscoKeyAvailable{ + NodeFirstAddr: de.nodeAddr, + NodeID: de.nodeID, + }) + } +} diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 875011a9c3e05..dbc8e8b573c49 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -54,6 +54,7 @@ import ( "tailscale.com/util/execqueue" "tailscale.com/util/mak" "tailscale.com/util/set" + "tailscale.com/util/singleflight" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -568,6 +569,14 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.magicConn.HandleDiscoKeyAdvertisement(peer.Node, pkt) }) + var tsmpRequestGroup singleflight.Group[netip.Addr, struct{}] + eventbus.SubscribeFunc(ec, func(req magicsock.NewDiscoKeyAvailable) { + go tsmpRequestGroup.Do(req.NodeFirstAddr, func() (struct{}, error) { + e.sendTSMPDiscoAdvertisement(req.NodeFirstAddr) + e.logf("wgengine: sending TSMP disco key advertisement to %v", req.NodeFirstAddr) + return struct{}{}, nil + }) + }) e.eventClient = ec e.logf("Engine created.") return e, nil From c062230cce0e0e3d3940578b046d97ceb88128b9 Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Thu, 22 Jan 2026 13:05:37 -0700 Subject: [PATCH 0887/1093] tsnet: clarify that ListenService starts the server if necessary Every other listen method on tsnet.Server makes this clarification, so should ListenService. Fixes tailscale/corp#36207 Signed-off-by: Harry Harpham --- tsnet/tsnet.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 6c840c335535e..bf7e694df28dd 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1410,6 +1410,8 @@ var ErrUntaggedServiceHost = errors.New("service hosts must be tagged nodes") // To advertise a Service with multiple ports, run ListenService multiple times. // For more information about Services, see // https://tailscale.com/kb/1552/tailscale-services +// +// This function will start the server if it is not already started. func (s *Server) ListenService(name string, mode ServiceMode) (*ServiceListener, error) { if err := tailcfg.ServiceName(name).Validate(); err != nil { return nil, err From 63d563e7340b4712b9f2933f663057ce2dcfa4a4 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 15 Jan 2026 20:35:41 -0800 Subject: [PATCH 0888/1093] tsnet: add support for a user-supplied tun.Device tsnet users can now provide a tun.Device, including any custom implementation that conforms to the interface. netstack has a new option CheckLocalTransportEndpoints that when used alongside a TUN enables netstack listens and dials to correctly capture traffic associated with those sockets. tsnet with a TUN sets this option, while all other builds leave this at false to preserve existing performance. Updates #18423 Signed-off-by: James Tucker --- tsnet/tsnet.go | 88 ++++- tsnet/tsnet_test.go | 673 ++++++++++++++++++++++++++++++++++ wgengine/netstack/netstack.go | 86 ++++- 3 files changed, 842 insertions(+), 5 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index bf7e694df28dd..d627d55b37314 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -26,6 +26,7 @@ import ( "sync" "time" + "github.com/tailscale/wireguard-go/tun" "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" @@ -167,6 +168,11 @@ type Server struct { // that the control server will allow the node to adopt that tag. AdvertiseTags []string + // Tun, if non-nil, specifies a custom tun.Device to use for packet I/O. + // + // This field must be set before calling Start. + Tun tun.Device + initOnce sync.Once initErr error lb *ipnlocal.LocalBackend @@ -659,6 +665,7 @@ func (s *Server) start() (reterr error) { s.dialer = &tsdial.Dialer{Logf: tsLogf} // mutated below (before used) s.dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(tsLogf, wgengine.Config{ + Tun: s.Tun, EventBus: sys.Bus.Get(), ListenPort: s.Port, NetMon: s.netMon, @@ -682,8 +689,16 @@ func (s *Server) start() (reterr error) { } sys.Tun.Get().Start() sys.Set(ns) - ns.ProcessLocalIPs = true - ns.ProcessSubnets = true + if s.Tun == nil { + // Only process packets in netstack when using the default fake TUN. + // When a TUN is provided, let packets flow through it instead. + ns.ProcessLocalIPs = true + ns.ProcessSubnets = true + } else { + // When using a TUN, check gVisor for registered endpoints to handle + // packets for tsnet listeners and outbound connection replies. + ns.CheckLocalTransportEndpoints = true + } ns.GetTCPHandlerForFlow = s.getTCPHandlerForFlow ns.GetUDPHandlerForFlow = s.getUDPHandlerForFlow s.netstack = ns @@ -1072,10 +1087,34 @@ func (s *Server) ListenPacket(network, addr string) (net.PacketConn, error) { network = "udp6" } } - if err := s.Start(); err != nil { + + netLn, err := s.listen(network, addr, listenOnTailnet) + if err != nil { return nil, err } - return s.netstack.ListenPacket(network, ap.String()) + ln := netLn.(*listener) + + pc, err := s.netstack.ListenPacket(network, ap.String()) + if err != nil { + ln.Close() + return nil, err + } + + return &udpPacketConn{ + PacketConn: pc, + ln: ln, + }, nil +} + +// udpPacketConn wraps a net.PacketConn to unregister from s.listeners on Close. +type udpPacketConn struct { + net.PacketConn + ln *listener +} + +func (c *udpPacketConn) Close() error { + c.ln.Close() + return c.PacketConn.Close() } // ListenTLS announces only on the Tailscale network. @@ -1611,10 +1650,37 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro closedc: make(chan struct{}), conn: make(chan net.Conn), } + + // When using a TUN with TCP, create a gVisor TCP listener. + if s.Tun != nil && (network == "" || network == "tcp" || network == "tcp4" || network == "tcp6") { + var nsNetwork string + nsAddr := host + switch { + case network == "tcp4" || network == "tcp6": + nsNetwork = network + case host.Addr().Is4(): + nsNetwork = "tcp4" + case host.Addr().Is6(): + nsNetwork = "tcp6" + default: + // Wildcard address: use tcp6 for dual-stack (accepts both v4 and v6). + nsNetwork = "tcp6" + nsAddr = netip.AddrPortFrom(netip.IPv6Unspecified(), host.Port()) + } + gonetLn, err := s.netstack.ListenTCP(nsNetwork, nsAddr.String()) + if err != nil { + return nil, fmt.Errorf("tsnet: %w", err) + } + ln.gonetLn = gonetLn + } + s.mu.Lock() for _, key := range keys { if _, ok := s.listeners[key]; ok { s.mu.Unlock() + if ln.gonetLn != nil { + ln.gonetLn.Close() + } return nil, fmt.Errorf("tsnet: listener already open for %s, %s", network, addr) } } @@ -1684,9 +1750,17 @@ type listener struct { conn chan net.Conn // unbuffered, never closed closedc chan struct{} // closed on [listener.Close] closed bool // guarded by s.mu + + // gonetLn, if set, is the gonet.Listener that handles new connections. + // gonetLn is set by [listen] when a TUN is in use and terminates the listener. + // gonetLn is nil when TUN is nil. + gonetLn net.Listener } func (ln *listener) Accept() (net.Conn, error) { + if ln.gonetLn != nil { + return ln.gonetLn.Accept() + } select { case c := <-ln.conn: return c, nil @@ -1696,6 +1770,9 @@ func (ln *listener) Accept() (net.Conn, error) { } func (ln *listener) Addr() net.Addr { + if ln.gonetLn != nil { + return ln.gonetLn.Addr() + } return addr{ network: ln.keys[0].network, addr: ln.addr, @@ -1721,6 +1798,9 @@ func (ln *listener) closeLocked() error { } close(ln.closedc) ln.closed = true + if ln.gonetLn != nil { + ln.gonetLn.Close() + } return nil } diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index f44bacab08431..2c6970fa3b723 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -39,6 +39,7 @@ import ( "github.com/google/go-cmp/cmp" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/tailscale/wireguard-go/tun" "golang.org/x/net/proxy" "tailscale.com/client/local" @@ -48,11 +49,13 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/netns" + "tailscale.com/net/packet" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/deptest" "tailscale.com/tstest/integration" "tailscale.com/tstest/integration/testcontrol" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/views" @@ -1860,6 +1863,676 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { t.Error("magicsock did not find a direct path from lc1 to lc2") } +// chanTUN is a tun.Device for testing that uses channels for packet I/O. +// Inbound receives packets written to the TUN (from the perspective of the network stack). +// Outbound is for injecting packets to be read from the TUN. +type chanTUN struct { + Inbound chan []byte // packets written to TUN + Outbound chan []byte // packets to read from TUN + closed chan struct{} + events chan tun.Event +} + +func newChanTUN() *chanTUN { + t := &chanTUN{ + Inbound: make(chan []byte, 10), + Outbound: make(chan []byte, 10), + closed: make(chan struct{}), + events: make(chan tun.Event, 1), + } + t.events <- tun.EventUp + return t +} + +func (t *chanTUN) File() *os.File { panic("not implemented") } + +func (t *chanTUN) Close() error { + select { + case <-t.closed: + default: + close(t.closed) + close(t.Inbound) + } + return nil +} + +func (t *chanTUN) Read(bufs [][]byte, sizes []int, offset int) (int, error) { + select { + case <-t.closed: + return 0, io.EOF + case pkt := <-t.Outbound: + sizes[0] = copy(bufs[0][offset:], pkt) + return 1, nil + } +} + +func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) { + for _, buf := range bufs { + pkt := buf[offset:] + if len(pkt) == 0 { + continue + } + select { + case <-t.closed: + return 0, errors.New("closed") + case t.Inbound <- slices.Clone(pkt): + } + } + return len(bufs), nil +} + +func (t *chanTUN) MTU() (int, error) { return 1280, nil } +func (t *chanTUN) Name() (string, error) { return "chantun", nil } +func (t *chanTUN) Events() <-chan tun.Event { return t.events } +func (t *chanTUN) BatchSize() int { return 1 } + +// listenTest provides common setup for listener and TUN tests. +type listenTest struct { + s1, s2 *Server + s1ip4, s1ip6 netip.Addr + s2ip4, s2ip6 netip.Addr + tun *chanTUN // nil for netstack mode +} + +// setupListenTest creates two tsnet servers for testing. +// If useTUN is true, s2 uses a chanTUN; otherwise it uses netstack only. +func setupListenTest(t *testing.T, useTUN bool) *listenTest { + t.Helper() + tstest.Shard(t) + tstest.ResourceCheck(t) + ctx := t.Context() + controlURL, _ := startControl(t) + s1, _, _ := startServer(t, ctx, controlURL, "s1") + + tmp := filepath.Join(t.TempDir(), "s2") + must.Do(os.MkdirAll(tmp, 0755)) + s2 := &Server{ + Dir: tmp, + ControlURL: controlURL, + Hostname: "s2", + Store: new(mem.Store), + Ephemeral: true, + } + + var tun *chanTUN + if useTUN { + tun = newChanTUN() + s2.Tun = tun + } + + if *verboseNodes { + s2.Logf = t.Logf + } + t.Cleanup(func() { s2.Close() }) + + s2status, err := s2.Up(ctx) + if err != nil { + t.Fatal(err) + } + + s1ip4, s1ip6 := s1.TailscaleIPs() + s2ip4 := s2status.TailscaleIPs[0] + var s2ip6 netip.Addr + if len(s2status.TailscaleIPs) > 1 { + s2ip6 = s2status.TailscaleIPs[1] + } + + lc1 := must.Get(s1.LocalClient()) + must.Get(lc1.Ping(ctx, s2ip4, tailcfg.PingTSMP)) + + return &listenTest{ + s1: s1, + s2: s2, + s1ip4: s1ip4, + s1ip6: s1ip6, + s2ip4: s2ip4, + s2ip6: s2ip6, + tun: tun, + } +} + +// echoUDP returns an IP packet with src/dst and ports swapped, with checksums recomputed. +func echoUDP(pkt []byte) []byte { + var p packet.Parsed + p.Decode(pkt) + if p.IPProto != ipproto.UDP { + return nil + } + switch p.IPVersion { + case 4: + h := p.UDP4Header() + h.ToResponse() + return packet.Generate(h, p.Payload()) + case 6: + h := packet.UDP6Header{ + IP6Header: p.IP6Header(), + SrcPort: p.Src.Port(), + DstPort: p.Dst.Port(), + } + h.ToResponse() + return packet.Generate(h, p.Payload()) + } + return nil +} + +func TestTUN(t *testing.T) { + tt := setupListenTest(t, true) + + go func() { + for pkt := range tt.tun.Inbound { + var p packet.Parsed + p.Decode(pkt) + if p.Dst.Port() == 9999 { + tt.tun.Outbound <- echoUDP(pkt) + } + } + }() + + test := func(t *testing.T, s2ip netip.Addr) { + conn, err := tt.s1.Dial(t.Context(), "udp", netip.AddrPortFrom(s2ip, 9999).String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + want := "hello from s1" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatal(err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + t.Fatalf("reading echo response: %v", err) + } + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("IPv4", func(t *testing.T) { test(t, tt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { test(t, tt.s2ip6) }) +} + +// TestTUNDNS tests that a TUN can send DNS queries to quad-100 and receive +// responses. This verifies that handleLocalPackets intercepts outbound traffic +// to the service IP. +func TestTUNDNS(t *testing.T) { + tt := setupListenTest(t, true) + + test := func(t *testing.T, srcIP netip.Addr, serviceIP netip.Addr) { + tt.tun.Outbound <- buildDNSQuery("s2", srcIP) + + ipVersion := uint8(4) + if srcIP.Is6() { + ipVersion = 6 + } + for { + select { + case pkt := <-tt.tun.Inbound: + var p packet.Parsed + p.Decode(pkt) + if p.IPVersion != ipVersion || p.IPProto != ipproto.UDP { + continue + } + if p.Src.Addr() == serviceIP && p.Src.Port() == 53 { + if len(p.Payload()) < 12 { + t.Fatalf("DNS response too short: %d bytes", len(p.Payload())) + } + return // success + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for DNS response") + } + } + } + + t.Run("IPv4", func(t *testing.T) { + test(t, tt.s2ip4, netip.MustParseAddr("100.100.100.100")) + }) + t.Run("IPv6", func(t *testing.T) { + test(t, tt.s2ip6, netip.MustParseAddr("fd7a:115c:a1e0::53")) + }) +} + +// TestListenPacket tests UDP listeners (ListenPacket) in both netstack and TUN modes. +func TestListenPacket(t *testing.T) { + testListenPacket := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + pc, err := lt.s2.ListenPacket("udp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer pc.Close() + + echoErr := make(chan error, 1) + go func() { + buf := make([]byte, 1500) + n, addr, err := pc.ReadFrom(buf) + if err != nil { + echoErr <- err + return + } + _, err = pc.WriteTo(buf[:n], addr) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s1.Dial(t.Context(), "udp", pc.LocalAddr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + want := "hello udp" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatal(err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupListenTest(t, false) + t.Run("IPv4", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupListenTest(t, true) + t.Run("IPv4", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip6) }) + }) +} + +// TestListenTCP tests TCP listeners with concrete addresses in both netstack +// and TUN modes. +func TestListenTCP(t *testing.T) { + testListenTCP := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + ln, err := lt.s2.Listen("tcp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s1.Dial(t.Context(), "tcp", ln.Addr().String()) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + defer conn.Close() + + want := "hello tcp" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupListenTest(t, false) + t.Run("IPv4", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupListenTest(t, true) + t.Run("IPv4", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip4) }) + t.Run("IPv6", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip6) }) + }) +} + +// TestListenTCPDualStack tests TCP listeners with wildcard addresses (dual-stack) +// in both netstack and TUN modes. +func TestListenTCPDualStack(t *testing.T) { + testListenTCPDualStack := func(t *testing.T, lt *listenTest, dialIP netip.Addr) { + ln, err := lt.s2.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + _, portStr, err := net.SplitHostPort(ln.Addr().String()) + if err != nil { + t.Fatalf("parsing listener address %q: %v", ln.Addr().String(), err) + } + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + if err != nil { + echoErr <- err + return + } + }() + + dialAddr := net.JoinHostPort(dialIP.String(), portStr) + conn, err := lt.s1.Dial(t.Context(), "tcp", dialAddr) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", dialAddr, err) + } + defer conn.Close() + + want := "hello tcp dualstack" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupListenTest(t, false) + t.Run("DialIPv4", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip4) }) + t.Run("DialIPv6", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupListenTest(t, true) + t.Run("DialIPv4", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip4) }) + t.Run("DialIPv6", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip6) }) + }) +} + +// TestDialTCP tests TCP dialing from s2 to s1 in both netstack and TUN modes. +// In TUN mode, this verifies that outbound TCP connections and their replies +// are handled by netstack without packets escaping to the TUN. +func TestDialTCP(t *testing.T) { + testDialTCP := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + ln, err := lt.s1.Listen("tcp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s2.Dial(t.Context(), "tcp", ln.Addr().String()) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + defer conn.Close() + + want := "hello tcp dial" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupListenTest(t, false) + t.Run("IPv4", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupListenTest(t, true) + + var escapedTCPPackets atomic.Int32 + var wg sync.WaitGroup + wg.Go(func() { + for pkt := range lt.tun.Inbound { + var p packet.Parsed + p.Decode(pkt) + if p.IPProto == ipproto.TCP { + escapedTCPPackets.Add(1) + t.Logf("TCP packet escaped to TUN: %v -> %v", p.Src, p.Dst) + } + } + }) + + t.Run("IPv4", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip6) }) + + lt.tun.Close() + wg.Wait() + if escaped := escapedTCPPackets.Load(); escaped > 0 { + t.Errorf("%d TCP packets escaped to TUN", escaped) + } + }) +} + +// TestDialUDP tests UDP dialing from s2 to s1 in both netstack and TUN modes. +// In TUN mode, this verifies that outbound UDP connections register endpoints +// with gVisor, allowing reply packets to be routed through netstack instead of +// escaping to the TUN. +func TestDialUDP(t *testing.T) { + testDialUDP := func(t *testing.T, lt *listenTest, listenIP netip.Addr) { + pc, err := lt.s1.ListenPacket("udp", netip.AddrPortFrom(listenIP, 0).String()) + if err != nil { + t.Fatal(err) + } + defer pc.Close() + + echoErr := make(chan error, 1) + go func() { + buf := make([]byte, 1500) + n, addr, err := pc.ReadFrom(buf) + if err != nil { + echoErr <- err + return + } + _, err = pc.WriteTo(buf[:n], addr) + if err != nil { + echoErr <- err + return + } + }() + + conn, err := lt.s2.Dial(t.Context(), "udp", pc.LocalAddr().String()) + if err != nil { + t.Fatalf("Dial failed: %v", err) + } + defer conn.Close() + + want := "hello udp dial" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("echo error: %v; read error: %v", e, err) + default: + t.Fatalf("Read failed: %v", err) + } + } + + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupListenTest(t, false) + t.Run("IPv4", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip6) }) + }) + + t.Run("TUN", func(t *testing.T) { + lt := setupListenTest(t, true) + + var escapedUDPPackets atomic.Int32 + var wg sync.WaitGroup + wg.Go(func() { + for pkt := range lt.tun.Inbound { + var p packet.Parsed + p.Decode(pkt) + if p.IPProto == ipproto.UDP { + escapedUDPPackets.Add(1) + t.Logf("UDP packet escaped to TUN: %v -> %v", p.Src, p.Dst) + } + } + }) + + t.Run("IPv4", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip4) }) + t.Run("IPv6", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip6) }) + + lt.tun.Close() + wg.Wait() + if escaped := escapedUDPPackets.Load(); escaped > 0 { + t.Errorf("%d UDP packets escaped to TUN", escaped) + } + }) +} + +// buildDNSQuery builds a UDP/IP packet containing a DNS query for name to the +// Tailscale service IP (100.100.100.100 for IPv4, fd7a:115c:a1e0::53 for IPv6). +func buildDNSQuery(name string, srcIP netip.Addr) []byte { + qtype := byte(0x01) // Type A for IPv4 + if srcIP.Is6() { + qtype = 0x1c // Type AAAA for IPv6 + } + dns := []byte{ + 0x12, 0x34, // ID + 0x01, 0x00, // Flags: standard query, recursion desired + 0x00, 0x01, // QDCOUNT: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ANCOUNT, NSCOUNT, ARCOUNT + } + for _, label := range strings.Split(name, ".") { + dns = append(dns, byte(len(label))) + dns = append(dns, label...) + } + dns = append(dns, 0x00, 0x00, qtype, 0x00, 0x01) // null, Type A/AAAA, Class IN + + if srcIP.Is4() { + h := packet.UDP4Header{ + IP4Header: packet.IP4Header{ + Src: srcIP, + Dst: netip.MustParseAddr("100.100.100.100"), + }, + SrcPort: 12345, + DstPort: 53, + } + return packet.Generate(h, dns) + } + h := packet.UDP6Header{ + IP6Header: packet.IP6Header{ + Src: srcIP, + Dst: netip.MustParseAddr("fd7a:115c:a1e0::53"), + }, + SrcPort: 12345, + DstPort: 53, + } + return packet.Generate(h, dns) +} + func TestDeps(t *testing.T) { tstest.Shard(t) deptest.DepChecker{ diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index c2b5d8a3266c7..e05846e150a27 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -165,6 +165,17 @@ type Impl struct { // over the UDP flow. GetUDPHandlerForFlow func(src, dst netip.AddrPort) (handler func(nettype.ConnPacketConn), intercept bool) + // CheckLocalTransportEndpoints, if true, causes netstack to check if gVisor + // has a registered endpoint for incoming packets to local IPs. This is used + // by tsnet to intercept packets for registered listeners and outbound + // connections when ProcessLocalIPs is false (i.e., when using a TUN). + // It can only be set before calling Start. + // TODO(raggi): refactor the way we handle both CheckLocalTransportEndpoints + // and the earlier netstack registrations for serve, funnel, peerAPI and so + // on. Currently this optimizes away cost for tailscaled in TUN mode, while + // enabling extension support when using tsnet in TUN mode. See #18423. + CheckLocalTransportEndpoints bool + // ProcessLocalIPs is whether netstack should handle incoming // traffic directed at the Node.Addresses (local IPs). // It can only be set before calling Start. @@ -1109,6 +1120,45 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { if ns.ProcessSubnets && !isLocal { return true } + if isLocal && ns.CheckLocalTransportEndpoints { + // Handle packets to registered listeners and replies to outbound + // connections by checking if gVisor has a registered endpoint. + // This covers TCP listeners, UDP listeners, and outbound TCP replies. + if p.IPProto == ipproto.TCP || p.IPProto == ipproto.UDP { + var netProto tcpip.NetworkProtocolNumber + var id stack.TransportEndpointID + if p.Dst.Addr().Is4() { + netProto = ipv4.ProtocolNumber + id = stack.TransportEndpointID{ + LocalAddress: tcpip.AddrFrom4(p.Dst.Addr().As4()), + LocalPort: p.Dst.Port(), + RemoteAddress: tcpip.AddrFrom4(p.Src.Addr().As4()), + RemotePort: p.Src.Port(), + } + } else { + netProto = ipv6.ProtocolNumber + id = stack.TransportEndpointID{ + LocalAddress: tcpip.AddrFrom16(p.Dst.Addr().As16()), + LocalPort: p.Dst.Port(), + RemoteAddress: tcpip.AddrFrom16(p.Src.Addr().As16()), + RemotePort: p.Src.Port(), + } + } + var transProto tcpip.TransportProtocolNumber + if p.IPProto == ipproto.TCP { + transProto = tcp.ProtocolNumber + } else { + transProto = udp.ProtocolNumber + } + ep := ns.ipstack.FindTransportEndpoint(netProto, transProto, id, nicID) + if debugNetstack() { + ns.logf("[v2] FindTransportEndpoint: id=%+v found=%v", id, ep != nil) + } + if ep != nil { + return true + } + } + } return false } @@ -1575,7 +1625,7 @@ func (ns *Impl) forwardTCP(getClient func(...tcpip.SettableSocketOption) *gonet. func (ns *Impl) ListenPacket(network, address string) (net.PacketConn, error) { ap, err := netip.ParseAddrPort(address) if err != nil { - return nil, fmt.Errorf("netstack: ParseAddrPort(%q): %v", address, err) + return nil, fmt.Errorf("netstack: ParseAddrPort(%q): %w", address, err) } var networkProto tcpip.NetworkProtocolNumber @@ -1612,6 +1662,40 @@ func (ns *Impl) ListenPacket(network, address string) (net.PacketConn, error) { return gonet.NewUDPConn(&wq, ep), nil } +// ListenTCP listens for TCP connections on the given address. +func (ns *Impl) ListenTCP(network, address string) (*gonet.TCPListener, error) { + ap, err := netip.ParseAddrPort(address) + if err != nil { + return nil, fmt.Errorf("netstack: ParseAddrPort(%q): %w", address, err) + } + + var networkProto tcpip.NetworkProtocolNumber + switch network { + case "tcp4": + networkProto = ipv4.ProtocolNumber + if ap.Addr().IsValid() && !ap.Addr().Is4() { + return nil, fmt.Errorf("netstack: tcp4 requires an IPv4 address") + } + case "tcp6": + networkProto = ipv6.ProtocolNumber + if ap.Addr().IsValid() && !ap.Addr().Is6() { + return nil, fmt.Errorf("netstack: tcp6 requires an IPv6 address") + } + default: + return nil, fmt.Errorf("netstack: unsupported network %q", network) + } + + localAddress := tcpip.FullAddress{ + NIC: nicID, + Port: ap.Port(), + } + if ap.Addr().IsValid() && !ap.Addr().IsUnspecified() { + localAddress.Addr = tcpip.AddrFromSlice(ap.Addr().AsSlice()) + } + + return gonet.ListenTCP(ns.ipstack, localAddress, networkProto) +} + func (ns *Impl) acceptUDP(r *udp.ForwarderRequest) { sess := r.ID() if debugNetstack() { From df547517251b8ef6ce68eeb74df6b3d0b3b50360 Mon Sep 17 00:00:00 2001 From: Francois Marier Date: Fri, 23 Jan 2026 08:30:19 -0800 Subject: [PATCH 0889/1093] scripts/installer.sh: allow running dnf5 install script twice (#18492) `dnf config-manager addrepo` will fail if the Tailscale repo is already installed. Without the --overwrite flag, the installer will error out instead of succeeding like with dnf3. Fixes #18491 Signed-off-by: Francois Marier --- scripts/installer.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 89d54a4311d01..76e8943e9931f 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -603,7 +603,7 @@ main() { $SUDO dnf config-manager --add-repo "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" elif [ "$DNF_VERSION" = "5" ]; then # Already installed config-manager, above. - $SUDO dnf config-manager addrepo --from-repofile="https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" + $SUDO dnf config-manager addrepo --overwrite --from-repofile="https://pkgs.tailscale.com/$TRACK/$OS/$VERSION/tailscale.repo" else echo "unexpected: unknown dnf version $DNF_VERSION" exit 1 From ce12863ee5af44fd25d9e9ad84fc56449f87a72f Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 23 Jan 2026 10:09:46 -0800 Subject: [PATCH 0890/1093] ipn/ipnlocal: manage per-profile subdirectories in TailscaleVarRoot (#18485) In order to better manage per-profile data resources on the client, add methods to the LocalBackend to support creation of per-profile directory structures in local storage. These methods build on the existing TailscaleVarRoot config, and have the same limitation (i.e., if no local storage is available, it will report an error when used). The immediate motivation is to support netmap caching, but we can also use this mechanism for other per-profile resources including pending taildrop files and Tailnet Lock authority caches. This commit only adds the directory-management plumbing; later commits will handle migrating taildrop, TKA, etc. to this mechanism, as well as caching network maps. Updates #12639 Change-Id: Ia75741955c7bf885e49c1ad99f856f669a754169 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 61 ++++++++++++++++++++++++++++++++++++++ ipn/ipnlocal/local_test.go | 50 +++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 2f05a4dbbc9ba..bbd2aa2e0e425 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -21,6 +21,7 @@ import ( "net/netip" "net/url" "os" + "path/filepath" "reflect" "runtime" "slices" @@ -165,6 +166,10 @@ var ( // errManagedByPolicy indicates the operation is blocked // because the target state is managed by a GP/MDM policy. errManagedByPolicy = errors.New("managed by policy") + + // ErrProfileStorageUnavailable indicates that profile-specific local data + // storage is not available; see [LocalBackend.ProfileMkdirAll]. + ErrProfileStorageUnavailable = errors.New("profile local data storage unavailable") ) // LocalBackend is the glue between the major pieces of the Tailscale @@ -5228,6 +5233,56 @@ func (b *LocalBackend) TailscaleVarRoot() string { return "" } +// ProfileMkdirAll creates (if necessary) and returns the path of a directory +// specific to the specified login profile, inside Tailscale's writable storage +// area. If subs are provided, they are joined to the base path to form the +// subdirectory path. +// +// It reports [ErrProfileStorageUnavailable] if there's no configured or +// discovered storage location, or if there was an error making the +// subdirectory. +func (b *LocalBackend) ProfileMkdirAll(id ipn.ProfileID, subs ...string) (string, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.profileMkdirAllLocked(id, subs...) +} + +// profileDataPathLocked returns a path of a profile-specific (sub)directory +// inside the writable storage area for the given profile ID. It does not +// create or verify the existence of the path in the filesystem. +// If b.varRoot == "", it returns "". It panics if id is empty. +// +// The caller must hold b.mu. +func (b *LocalBackend) profileDataPathLocked(id ipn.ProfileID, subs ...string) string { + if id == "" { + panic("invalid empty profile ID") + } + vr := b.TailscaleVarRoot() + if vr == "" { + return "" + } + return filepath.Join(append([]string{vr, "profile-data", string(id)}, subs...)...) +} + +// profileMkdirAllLocked implements ProfileMkdirAll. +// The caller must hold b.mu. +func (b *LocalBackend) profileMkdirAllLocked(id ipn.ProfileID, subs ...string) (string, error) { + if id == "" { + return "", errProfileNotFound + } + if vr := b.TailscaleVarRoot(); vr == "" { + return "", ErrProfileStorageUnavailable + } + + // Use the LoginProfile ID rather than the UserProfile ID, as the latter may + // change over time. + dir := b.profileDataPathLocked(id, subs...) + if err := os.MkdirAll(dir, 0700); err != nil { + return "", fmt.Errorf("create profile directory: %w", err) + } + return dir, nil +} + // closePeerAPIListenersLocked closes any existing PeerAPI listeners // and clears out the PeerAPI server state. // @@ -7011,6 +7066,12 @@ func (b *LocalBackend) DeleteProfile(p ipn.ProfileID) error { } return err } + // Make a best-effort to remove the profile-specific data directory, if one exists. + if pd := b.profileDataPathLocked(p); pd != "" { + if err := os.RemoveAll(pd); err != nil { + b.logf("warning: removing profile data for %q: %v", p, err) + } + } if !needToRestart { return nil } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index bcc5ebaf26dbf..23a3161ca47f1 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -2306,6 +2306,56 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { } } +func TestProfileMkdirAll(t *testing.T) { + t.Run("NoVarRoot", func(t *testing.T) { + b := newTestBackend(t) + b.SetVarRoot("") + + got, err := b.ProfileMkdirAll(b.CurrentProfile().ID()) + if got != "" || !errors.Is(err, ErrProfileStorageUnavailable) { + t.Errorf(`ProfileMkdirAll: got %q, %v; want "", %v`, got, err, ErrProfileStorageUnavailable) + } + }) + + t.Run("InvalidProfileID", func(t *testing.T) { + b := newTestBackend(t) + got, err := b.ProfileMkdirAll("") + if got != "" || !errors.Is(err, errProfileNotFound) { + t.Errorf("ProfileMkdirAll: got %q, %v; want %q, %v", got, err, "", errProfileNotFound) + } + }) + + t.Run("ProfileRoot", func(t *testing.T) { + b := newTestBackend(t) + want := filepath.Join(b.TailscaleVarRoot(), "profile-data", "id0") + + got, err := b.ProfileMkdirAll(b.CurrentProfile().ID()) + if err != nil || got != want { + t.Errorf("ProfileMkdirAll: got %q, %v, want %q, nil", got, err, want) + } + if fi, err := os.Stat(got); err != nil { + t.Errorf("Check directory: %v", err) + } else if !fi.IsDir() { + t.Errorf("Path %q is not a directory", got) + } + }) + + t.Run("ProfileSubdir", func(t *testing.T) { + b := newTestBackend(t) + want := filepath.Join(b.TailscaleVarRoot(), "profile-data", "id0", "a", "b") + + got, err := b.ProfileMkdirAll(b.CurrentProfile().ID(), "a", "b") + if err != nil || got != want { + t.Errorf("ProfileMkdirAll: got %q, %v, want %q, nil", got, err, want) + } + if fi, err := os.Stat(got); err != nil { + t.Errorf("Check directory: %v", err) + } else if !fi.IsDir() { + t.Errorf("Path %q is not a directory", got) + } + }) +} + func TestOfferingAppConnector(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) From 3ec5be3f510f74738179c1023468343a62a7e00f Mon Sep 17 00:00:00 2001 From: Will Norris Date: Fri, 23 Jan 2026 13:21:57 -0800 Subject: [PATCH 0891/1093] all: remove AUTHORS file and references to it This file was never truly necessary and has never actually been used in the history of Tailscale's open source releases. A Brief History of AUTHORS files --- The AUTHORS file was a pattern developed at Google, originally for Chromium, then adopted by Go and a bunch of other projects. The problem was that Chromium originally had a copyright line only recognizing Google as the copyright holder. Because Google (and most open source projects) do not require copyright assignemnt for contributions, each contributor maintains their copyright. Some large corporate contributors then tried to add their own name to the copyright line in the LICENSE file or in file headers. This quickly becomes unwieldy, and puts a tremendous burden on anyone building on top of Chromium, since the license requires that they keep all copyright lines intact. The compromise was to create an AUTHORS file that would list all of the copyright holders. The LICENSE file and source file headers would then include that list by reference, listing the copyright holder as "The Chromium Authors". This also become cumbersome to simply keep the file up to date with a high rate of new contributors. Plus it's not always obvious who the copyright holder is. Sometimes it is the individual making the contribution, but many times it may be their employer. There is no way for the proejct maintainer to know. Eventually, Google changed their policy to no longer recommend trying to keep the AUTHORS file up to date proactively, and instead to only add to it when requested: https://opensource.google/docs/releasing/authors. They are also clear that: > Adding contributors to the AUTHORS file is entirely within the > project's discretion and has no implications for copyright ownership. It was primarily added to appease a small number of large contributors that insisted that they be recognized as copyright holders (which was entirely their right to do). But it's not truly necessary, and not even the most accurate way of identifying contributors and/or copyright holders. In practice, we've never added anyone to our AUTHORS file. It only lists Tailscale, so it's not really serving any purpose. It also causes confusion because Tailscalars put the "Tailscale Inc & AUTHORS" header in other open source repos which don't actually have an AUTHORS file, so it's ambiguous what that means. Instead, we just acknowledge that the contributors to Tailscale (whoever they are) are copyright holders for their individual contributions. We also have the benefit of using the DCO (developercertificate.org) which provides some additional certification of their right to make the contribution. The source file changes were purely mechanical with: git ls-files | xargs sed -i -e 's/\(Tailscale Inc &\) AUTHORS/\1 contributors/g' Updates #cleanup Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- AUTHORS | 17 ----------------- Dockerfile | 2 +- Dockerfile.base | 2 +- LICENSE | 2 +- appc/appconnector.go | 2 +- appc/appconnector_test.go | 2 +- appc/appctest/appctest.go | 2 +- appc/conn25.go | 2 +- appc/conn25_test.go | 2 +- appc/ippool.go | 2 +- appc/ippool_test.go | 2 +- appc/observe.go | 2 +- appc/observe_disabled.go | 2 +- assert_ts_toolchain_match.go | 2 +- atomicfile/atomicfile.go | 2 +- atomicfile/atomicfile_notwindows.go | 2 +- atomicfile/atomicfile_test.go | 2 +- atomicfile/atomicfile_windows.go | 2 +- atomicfile/atomicfile_windows_test.go | 2 +- atomicfile/mksyscall.go | 2 +- chirp/chirp.go | 2 +- chirp/chirp_test.go | 2 +- client/local/cert.go | 2 +- client/local/debugportmapper.go | 2 +- client/local/local.go | 2 +- client/local/local_test.go | 2 +- client/local/serve.go | 2 +- client/local/syspolicy.go | 2 +- client/local/tailnetlock.go | 2 +- client/systray/logo.go | 2 +- client/systray/startup-creator.go | 2 +- client/systray/systray.go | 2 +- client/tailscale/acl.go | 2 +- client/tailscale/apitype/apitype.go | 2 +- client/tailscale/apitype/controltype.go | 2 +- client/tailscale/cert.go | 2 +- client/tailscale/devices.go | 2 +- client/tailscale/dns.go | 2 +- client/tailscale/example/servetls/servetls.go | 2 +- client/tailscale/keys.go | 2 +- client/tailscale/localclient_aliases.go | 2 +- client/tailscale/required_version.go | 2 +- client/tailscale/routes.go | 2 +- client/tailscale/tailnet.go | 2 +- client/tailscale/tailscale.go | 2 +- client/tailscale/tailscale_test.go | 2 +- client/web/assets.go | 2 +- client/web/auth.go | 2 +- client/web/qnap.go | 2 +- client/web/src/api.ts | 2 +- client/web/src/components/acl-tag.tsx | 2 +- client/web/src/components/address-copy-card.tsx | 2 +- client/web/src/components/app.tsx | 2 +- .../web/src/components/control-components.tsx | 2 +- .../web/src/components/exit-node-selector.tsx | 2 +- client/web/src/components/login-toggle.tsx | 2 +- client/web/src/components/nice-ip.tsx | 2 +- client/web/src/components/update-available.tsx | 2 +- .../components/views/device-details-view.tsx | 2 +- .../src/components/views/disconnected-view.tsx | 2 +- client/web/src/components/views/home-view.tsx | 2 +- client/web/src/components/views/login-view.tsx | 2 +- client/web/src/components/views/ssh-view.tsx | 2 +- .../src/components/views/subnet-router-view.tsx | 2 +- .../web/src/components/views/updating-view.tsx | 2 +- client/web/src/hooks/auth.ts | 2 +- client/web/src/hooks/exit-nodes.ts | 2 +- client/web/src/hooks/self-update.ts | 2 +- client/web/src/hooks/toaster.ts | 2 +- client/web/src/hooks/ts-web-connected.ts | 2 +- client/web/src/index.tsx | 4 ++-- client/web/src/types.ts | 2 +- client/web/src/ui/badge.tsx | 2 +- client/web/src/ui/button.tsx | 2 +- client/web/src/ui/card.tsx | 2 +- client/web/src/ui/collapsible.tsx | 2 +- client/web/src/ui/dialog.tsx | 2 +- client/web/src/ui/empty-state.tsx | 2 +- client/web/src/ui/input.tsx | 2 +- client/web/src/ui/loading-dots.tsx | 2 +- client/web/src/ui/popover.tsx | 2 +- client/web/src/ui/portal-container-context.tsx | 2 +- client/web/src/ui/profile-pic.tsx | 2 +- client/web/src/ui/quick-copy.tsx | 2 +- client/web/src/ui/search-input.tsx | 2 +- client/web/src/ui/spinner.tsx | 2 +- client/web/src/ui/toaster.tsx | 2 +- client/web/src/ui/toggle.tsx | 2 +- client/web/src/utils/clipboard.ts | 2 +- client/web/src/utils/util.test.ts | 2 +- client/web/src/utils/util.ts | 2 +- client/web/synology.go | 2 +- client/web/web.go | 2 +- client/web/web_test.go | 2 +- clientupdate/clientupdate.go | 2 +- clientupdate/clientupdate_downloads.go | 2 +- clientupdate/clientupdate_not_downloads.go | 2 +- clientupdate/clientupdate_notwindows.go | 2 +- clientupdate/clientupdate_test.go | 2 +- clientupdate/clientupdate_windows.go | 2 +- clientupdate/distsign/distsign.go | 2 +- clientupdate/distsign/distsign_test.go | 2 +- clientupdate/distsign/roots.go | 2 +- clientupdate/distsign/roots_test.go | 2 +- cmd/addlicense/main.go | 4 ++-- cmd/build-webclient/build-webclient.go | 2 +- cmd/checkmetrics/checkmetrics.go | 2 +- cmd/cigocacher/cigocacher.go | 2 +- cmd/cigocacher/disk.go | 2 +- cmd/cigocacher/disk_notwindows.go | 2 +- cmd/cigocacher/disk_windows.go | 2 +- cmd/cigocacher/http.go | 2 +- cmd/cloner/cloner.go | 2 +- cmd/cloner/cloner_test.go | 2 +- cmd/cloner/clonerex/clonerex.go | 2 +- cmd/cloner/clonerex/clonerex_clone.go | 2 +- cmd/connector-gen/advertise-routes.go | 2 +- cmd/connector-gen/aws.go | 2 +- cmd/connector-gen/connector-gen.go | 2 +- cmd/connector-gen/github.go | 2 +- cmd/containerboot/egressservices.go | 2 +- cmd/containerboot/egressservices_test.go | 2 +- cmd/containerboot/forwarding.go | 2 +- cmd/containerboot/ingressservices.go | 2 +- cmd/containerboot/ingressservices_test.go | 2 +- cmd/containerboot/kube.go | 2 +- cmd/containerboot/kube_test.go | 2 +- cmd/containerboot/main.go | 2 +- cmd/containerboot/main_test.go | 2 +- cmd/containerboot/serve.go | 2 +- cmd/containerboot/serve_test.go | 2 +- cmd/containerboot/settings.go | 2 +- cmd/containerboot/settings_test.go | 2 +- cmd/containerboot/tailscaled.go | 2 +- cmd/derper/ace.go | 2 +- cmd/derper/bootstrap_dns.go | 2 +- cmd/derper/bootstrap_dns_test.go | 2 +- cmd/derper/cert.go | 2 +- cmd/derper/cert_test.go | 2 +- cmd/derper/derper.go | 2 +- cmd/derper/derper_test.go | 2 +- cmd/derper/mesh.go | 2 +- cmd/derper/websocket.go | 2 +- cmd/derpprobe/derpprobe.go | 2 +- cmd/dist/dist.go | 2 +- cmd/distsign/distsign.go | 2 +- cmd/featuretags/featuretags.go | 2 +- cmd/get-authkey/main.go | 2 +- cmd/gitops-pusher/cache.go | 2 +- cmd/gitops-pusher/gitops-pusher.go | 2 +- cmd/gitops-pusher/gitops-pusher_test.go | 2 +- cmd/hello/hello.go | 2 +- cmd/jsonimports/format.go | 2 +- cmd/jsonimports/format_test.go | 2 +- cmd/jsonimports/jsonimports.go | 2 +- cmd/k8s-nameserver/main.go | 2 +- cmd/k8s-nameserver/main_test.go | 2 +- cmd/k8s-operator/api-server-proxy-pg.go | 2 +- cmd/k8s-operator/api-server-proxy-pg_test.go | 2 +- cmd/k8s-operator/api-server-proxy.go | 2 +- cmd/k8s-operator/connector.go | 2 +- cmd/k8s-operator/connector_test.go | 2 +- cmd/k8s-operator/deploy/chart/Chart.yaml | 2 +- .../chart/templates/apiserverproxy-rbac.yaml | 2 +- .../deploy/chart/templates/deployment.yaml | 2 +- .../deploy/chart/templates/oauth-secret.yaml | 2 +- .../deploy/chart/templates/operator-rbac.yaml | 2 +- .../deploy/chart/templates/proxy-rbac.yaml | 2 +- cmd/k8s-operator/deploy/chart/values.yaml | 2 +- .../deploy/manifests/authproxy-rbac.yaml | 2 +- cmd/k8s-operator/deploy/manifests/operator.yaml | 2 +- .../deploy/manifests/templates/01-header.yaml | 2 +- cmd/k8s-operator/dnsrecords.go | 2 +- cmd/k8s-operator/dnsrecords_test.go | 2 +- cmd/k8s-operator/e2e/doc.go | 2 +- cmd/k8s-operator/e2e/ingress_test.go | 2 +- cmd/k8s-operator/e2e/main_test.go | 2 +- cmd/k8s-operator/e2e/pebble.go | 2 +- cmd/k8s-operator/e2e/proxy_test.go | 2 +- cmd/k8s-operator/e2e/setup.go | 2 +- cmd/k8s-operator/e2e/ssh.go | 2 +- cmd/k8s-operator/egress-eps.go | 2 +- cmd/k8s-operator/egress-eps_test.go | 2 +- cmd/k8s-operator/egress-pod-readiness.go | 2 +- cmd/k8s-operator/egress-pod-readiness_test.go | 2 +- cmd/k8s-operator/egress-services-readiness.go | 2 +- .../egress-services-readiness_test.go | 2 +- cmd/k8s-operator/egress-services.go | 2 +- cmd/k8s-operator/egress-services_test.go | 2 +- cmd/k8s-operator/generate/main.go | 2 +- cmd/k8s-operator/generate/main_test.go | 2 +- cmd/k8s-operator/ingress-for-pg.go | 2 +- cmd/k8s-operator/ingress-for-pg_test.go | 2 +- cmd/k8s-operator/ingress.go | 2 +- cmd/k8s-operator/ingress_test.go | 2 +- cmd/k8s-operator/logger.go | 2 +- cmd/k8s-operator/metrics_resources.go | 2 +- cmd/k8s-operator/nameserver.go | 2 +- cmd/k8s-operator/nameserver_test.go | 2 +- cmd/k8s-operator/nodeport-service-ports.go | 2 +- .../nodeport-services-ports_test.go | 2 +- cmd/k8s-operator/operator.go | 2 +- cmd/k8s-operator/operator_test.go | 2 +- cmd/k8s-operator/proxyclass.go | 2 +- cmd/k8s-operator/proxyclass_test.go | 2 +- cmd/k8s-operator/proxygroup.go | 2 +- cmd/k8s-operator/proxygroup_specs.go | 2 +- cmd/k8s-operator/proxygroup_test.go | 2 +- cmd/k8s-operator/sts.go | 2 +- cmd/k8s-operator/sts_test.go | 2 +- cmd/k8s-operator/svc-for-pg.go | 2 +- cmd/k8s-operator/svc-for-pg_test.go | 2 +- cmd/k8s-operator/svc.go | 2 +- cmd/k8s-operator/tailnet.go | 2 +- cmd/k8s-operator/testutils_test.go | 2 +- cmd/k8s-operator/tsclient.go | 2 +- cmd/k8s-operator/tsclient_test.go | 2 +- cmd/k8s-operator/tsrecorder.go | 2 +- cmd/k8s-operator/tsrecorder_specs.go | 2 +- cmd/k8s-operator/tsrecorder_specs_test.go | 2 +- cmd/k8s-operator/tsrecorder_test.go | 2 +- cmd/k8s-proxy/internal/config/config.go | 2 +- cmd/k8s-proxy/internal/config/config_test.go | 2 +- cmd/k8s-proxy/k8s-proxy.go | 2 +- cmd/mkmanifest/main.go | 2 +- cmd/mkpkg/main.go | 2 +- cmd/mkversion/mkversion.go | 2 +- cmd/nardump/nardump.go | 2 +- cmd/nardump/nardump_test.go | 2 +- cmd/natc/ippool/consensusippool.go | 2 +- cmd/natc/ippool/consensusippool_test.go | 2 +- cmd/natc/ippool/consensusippoolserialize.go | 2 +- cmd/natc/ippool/ippool.go | 2 +- cmd/natc/ippool/ippool_test.go | 2 +- cmd/natc/ippool/ipx.go | 2 +- cmd/natc/ippool/ipx_test.go | 2 +- cmd/natc/natc.go | 2 +- cmd/natc/natc_test.go | 2 +- cmd/netlogfmt/main.go | 2 +- cmd/nginx-auth/nginx-auth.go | 2 +- cmd/omitsize/omitsize.go | 2 +- cmd/pgproxy/pgproxy.go | 2 +- cmd/printdep/printdep.go | 2 +- cmd/proxy-test-server/proxy-test-server.go | 2 +- cmd/proxy-to-grafana/proxy-to-grafana.go | 2 +- cmd/proxy-to-grafana/proxy-to-grafana_test.go | 2 +- cmd/sniproxy/handlers.go | 2 +- cmd/sniproxy/handlers_test.go | 2 +- cmd/sniproxy/server.go | 2 +- cmd/sniproxy/server_test.go | 2 +- cmd/sniproxy/sniproxy.go | 2 +- cmd/sniproxy/sniproxy_test.go | 2 +- cmd/speedtest/speedtest.go | 2 +- cmd/ssh-auth-none-demo/ssh-auth-none-demo.go | 2 +- cmd/stunc/stunc.go | 2 +- cmd/stund/stund.go | 2 +- cmd/stunstamp/stunstamp.go | 2 +- cmd/stunstamp/stunstamp_default.go | 2 +- cmd/stunstamp/stunstamp_linux.go | 2 +- cmd/sync-containers/main.go | 2 +- cmd/systray/systray.go | 2 +- cmd/tailscale/cli/appcroutes.go | 2 +- cmd/tailscale/cli/bugreport.go | 2 +- cmd/tailscale/cli/cert.go | 2 +- cmd/tailscale/cli/cli.go | 2 +- cmd/tailscale/cli/cli_test.go | 2 +- cmd/tailscale/cli/configure-jetkvm.go | 2 +- cmd/tailscale/cli/configure-kube.go | 2 +- cmd/tailscale/cli/configure-kube_omit.go | 2 +- cmd/tailscale/cli/configure-kube_test.go | 2 +- cmd/tailscale/cli/configure-synology-cert.go | 2 +- .../cli/configure-synology-cert_test.go | 2 +- cmd/tailscale/cli/configure-synology.go | 2 +- cmd/tailscale/cli/configure.go | 2 +- cmd/tailscale/cli/configure_apple-all.go | 2 +- cmd/tailscale/cli/configure_apple.go | 2 +- cmd/tailscale/cli/configure_linux-all.go | 2 +- cmd/tailscale/cli/configure_linux.go | 2 +- cmd/tailscale/cli/debug-capture.go | 2 +- cmd/tailscale/cli/debug-peer-relay.go | 2 +- cmd/tailscale/cli/debug-portmap.go | 2 +- cmd/tailscale/cli/debug.go | 2 +- cmd/tailscale/cli/diag.go | 2 +- cmd/tailscale/cli/dns-query.go | 2 +- cmd/tailscale/cli/dns-status.go | 2 +- cmd/tailscale/cli/dns.go | 2 +- cmd/tailscale/cli/down.go | 2 +- cmd/tailscale/cli/drive.go | 2 +- cmd/tailscale/cli/exitnode.go | 2 +- cmd/tailscale/cli/exitnode_test.go | 2 +- cmd/tailscale/cli/ffcomplete/complete.go | 2 +- cmd/tailscale/cli/ffcomplete/complete_omit.go | 2 +- cmd/tailscale/cli/ffcomplete/ffcomplete.go | 2 +- .../cli/ffcomplete/internal/complete.go | 2 +- .../cli/ffcomplete/internal/complete_test.go | 2 +- cmd/tailscale/cli/ffcomplete/scripts.go | 2 +- cmd/tailscale/cli/ffcomplete/scripts_omit.go | 2 +- cmd/tailscale/cli/file.go | 2 +- cmd/tailscale/cli/funnel.go | 2 +- cmd/tailscale/cli/id-token.go | 2 +- cmd/tailscale/cli/ip.go | 2 +- cmd/tailscale/cli/jsonoutput/jsonoutput.go | 2 +- .../cli/jsonoutput/network-lock-log.go | 2 +- .../cli/jsonoutput/network-lock-status.go | 2 +- cmd/tailscale/cli/licenses.go | 2 +- cmd/tailscale/cli/login.go | 2 +- cmd/tailscale/cli/logout.go | 2 +- cmd/tailscale/cli/maybe_syspolicy.go | 2 +- cmd/tailscale/cli/metrics.go | 2 +- cmd/tailscale/cli/nc.go | 2 +- cmd/tailscale/cli/netcheck.go | 2 +- cmd/tailscale/cli/network-lock.go | 2 +- cmd/tailscale/cli/network-lock_test.go | 2 +- cmd/tailscale/cli/ping.go | 2 +- cmd/tailscale/cli/risks.go | 2 +- cmd/tailscale/cli/serve_legacy.go | 2 +- cmd/tailscale/cli/serve_legacy_test.go | 2 +- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tailscale/cli/serve_v2_test.go | 2 +- cmd/tailscale/cli/serve_v2_unix_test.go | 2 +- cmd/tailscale/cli/set.go | 2 +- cmd/tailscale/cli/set_test.go | 2 +- cmd/tailscale/cli/ssh.go | 2 +- cmd/tailscale/cli/ssh_exec.go | 2 +- cmd/tailscale/cli/ssh_exec_js.go | 2 +- cmd/tailscale/cli/ssh_exec_windows.go | 2 +- cmd/tailscale/cli/ssh_unix.go | 2 +- cmd/tailscale/cli/status.go | 2 +- cmd/tailscale/cli/switch.go | 2 +- cmd/tailscale/cli/syspolicy.go | 2 +- cmd/tailscale/cli/systray.go | 2 +- cmd/tailscale/cli/systray_omit.go | 2 +- cmd/tailscale/cli/up.go | 2 +- cmd/tailscale/cli/up_test.go | 2 +- cmd/tailscale/cli/update.go | 2 +- cmd/tailscale/cli/version.go | 2 +- cmd/tailscale/cli/web.go | 2 +- cmd/tailscale/cli/web_test.go | 2 +- cmd/tailscale/cli/whois.go | 2 +- cmd/tailscale/deps_test.go | 2 +- cmd/tailscale/generate.go | 2 +- cmd/tailscale/tailscale.go | 2 +- cmd/tailscale/tailscale_test.go | 2 +- cmd/tailscaled/childproc/childproc.go | 2 +- cmd/tailscaled/debug.go | 2 +- cmd/tailscaled/debug_forcereflect.go | 2 +- cmd/tailscaled/deps_test.go | 2 +- cmd/tailscaled/flag.go | 2 +- cmd/tailscaled/generate.go | 2 +- cmd/tailscaled/install_darwin.go | 2 +- cmd/tailscaled/install_windows.go | 2 +- cmd/tailscaled/netstack.go | 2 +- cmd/tailscaled/proxy.go | 2 +- cmd/tailscaled/required_version.go | 2 +- cmd/tailscaled/sigpipe.go | 2 +- cmd/tailscaled/ssh.go | 2 +- cmd/tailscaled/tailscaled.go | 2 +- cmd/tailscaled/tailscaled_bird.go | 2 +- cmd/tailscaled/tailscaled_drive.go | 2 +- cmd/tailscaled/tailscaled_notwindows.go | 2 +- cmd/tailscaled/tailscaled_test.go | 2 +- cmd/tailscaled/tailscaled_windows.go | 2 +- .../tailscaledhooks/tailscaledhooks.go | 2 +- cmd/tailscaled/webclient.go | 2 +- cmd/tailscaled/with_cli.go | 2 +- cmd/testcontrol/testcontrol.go | 2 +- cmd/testwrapper/args.go | 2 +- cmd/testwrapper/args_test.go | 2 +- cmd/testwrapper/flakytest/flakytest.go | 2 +- cmd/testwrapper/flakytest/flakytest_test.go | 2 +- cmd/testwrapper/testwrapper.go | 2 +- cmd/testwrapper/testwrapper_test.go | 2 +- cmd/tl-longchain/tl-longchain.go | 2 +- cmd/tsconnect/build-pkg.go | 2 +- cmd/tsconnect/build.go | 2 +- cmd/tsconnect/common.go | 2 +- cmd/tsconnect/dev-pkg.go | 2 +- cmd/tsconnect/dev.go | 2 +- cmd/tsconnect/package.json.tmpl | 2 +- cmd/tsconnect/serve.go | 2 +- cmd/tsconnect/src/app/app.tsx | 2 +- cmd/tsconnect/src/app/go-panic-display.tsx | 2 +- cmd/tsconnect/src/app/header.tsx | 2 +- cmd/tsconnect/src/app/index.css | 2 +- cmd/tsconnect/src/app/index.ts | 2 +- cmd/tsconnect/src/app/ssh.tsx | 2 +- cmd/tsconnect/src/app/url-display.tsx | 2 +- cmd/tsconnect/src/lib/js-state-store.ts | 2 +- cmd/tsconnect/src/lib/ssh.ts | 2 +- cmd/tsconnect/src/pkg/pkg.css | 2 +- cmd/tsconnect/src/pkg/pkg.ts | 2 +- cmd/tsconnect/src/types/esbuild.d.ts | 2 +- cmd/tsconnect/src/types/wasm_js.d.ts | 2 +- cmd/tsconnect/tsconnect.go | 2 +- cmd/tsconnect/wasm/wasm_js.go | 2 +- cmd/tsidp/tsidp.go | 2 +- cmd/tsidp/tsidp_test.go | 2 +- cmd/tsidp/ui.go | 2 +- cmd/tsshd/tsshd.go | 2 +- cmd/tta/fw_linux.go | 2 +- cmd/tta/tta.go | 2 +- cmd/vet/jsontags/analyzer.go | 2 +- cmd/vet/jsontags/iszero.go | 2 +- cmd/vet/jsontags/report.go | 2 +- cmd/vet/vet.go | 2 +- cmd/viewer/tests/tests.go | 2 +- cmd/viewer/tests/tests_clone.go | 2 +- cmd/viewer/tests/tests_view.go | 2 +- cmd/viewer/viewer.go | 2 +- cmd/viewer/viewer_test.go | 2 +- cmd/vnet/vnet-main.go | 2 +- cmd/xdpderper/xdpderper.go | 2 +- control/controlbase/conn.go | 2 +- control/controlbase/conn_test.go | 2 +- control/controlbase/handshake.go | 2 +- control/controlbase/handshake_test.go | 2 +- control/controlbase/interop_test.go | 2 +- control/controlbase/messages.go | 2 +- control/controlclient/auto.go | 2 +- control/controlclient/client.go | 2 +- control/controlclient/controlclient_test.go | 2 +- control/controlclient/direct.go | 2 +- control/controlclient/direct_test.go | 2 +- control/controlclient/errors.go | 2 +- control/controlclient/map.go | 2 +- control/controlclient/map_test.go | 2 +- control/controlclient/sign.go | 2 +- control/controlclient/sign_supported.go | 2 +- control/controlclient/sign_supported_test.go | 2 +- control/controlclient/sign_unsupported.go | 2 +- control/controlclient/status.go | 2 +- control/controlhttp/client.go | 2 +- control/controlhttp/client_common.go | 2 +- control/controlhttp/client_js.go | 2 +- control/controlhttp/constants.go | 2 +- .../controlhttpcommon/controlhttpcommon.go | 2 +- .../controlhttpserver/controlhttpserver.go | 2 +- control/controlhttp/http_test.go | 2 +- control/controlknobs/controlknobs.go | 2 +- control/controlknobs/controlknobs_test.go | 2 +- control/ts2021/client.go | 2 +- control/ts2021/client_test.go | 2 +- control/ts2021/conn.go | 2 +- derp/client_test.go | 2 +- derp/derp.go | 2 +- derp/derp_client.go | 2 +- derp/derp_test.go | 2 +- derp/derpconst/derpconst.go | 2 +- derp/derphttp/derphttp_client.go | 2 +- derp/derphttp/derphttp_test.go | 2 +- derp/derphttp/export_test.go | 2 +- derp/derphttp/mesh_client.go | 2 +- derp/derphttp/websocket.go | 2 +- derp/derphttp/websocket_stub.go | 2 +- derp/derpserver/derpserver.go | 2 +- derp/derpserver/derpserver_default.go | 2 +- derp/derpserver/derpserver_linux.go | 2 +- derp/derpserver/derpserver_test.go | 2 +- derp/derpserver/handler.go | 2 +- derp/export_test.go | 2 +- derp/xdp/headers/update.go | 2 +- derp/xdp/xdp.go | 2 +- derp/xdp/xdp_default.go | 2 +- derp/xdp/xdp_linux.go | 2 +- derp/xdp/xdp_linux_test.go | 2 +- disco/disco.go | 2 +- disco/disco_fuzzer.go | 2 +- disco/disco_test.go | 2 +- disco/pcap.go | 2 +- docs/k8s/Makefile | 2 +- docs/k8s/proxy.yaml | 2 +- docs/k8s/role.yaml | 2 +- docs/k8s/rolebinding.yaml | 2 +- docs/k8s/sa.yaml | 2 +- docs/k8s/sidecar.yaml | 2 +- docs/k8s/subnet.yaml | 2 +- docs/k8s/userspace-sidecar.yaml | 2 +- docs/sysv/tailscale.init | 2 +- docs/webhooks/example.go | 2 +- doctor/doctor.go | 2 +- doctor/doctor_test.go | 2 +- doctor/ethtool/ethtool.go | 2 +- doctor/ethtool/ethtool_linux.go | 2 +- doctor/ethtool/ethtool_other.go | 2 +- doctor/permissions/permissions.go | 2 +- doctor/permissions/permissions_bsd.go | 2 +- doctor/permissions/permissions_linux.go | 2 +- doctor/permissions/permissions_other.go | 2 +- doctor/permissions/permissions_test.go | 2 +- doctor/routetable/routetable.go | 2 +- drive/drive_clone.go | 2 +- drive/drive_view.go | 2 +- drive/driveimpl/birthtiming.go | 2 +- drive/driveimpl/birthtiming_test.go | 2 +- drive/driveimpl/compositedav/compositedav.go | 2 +- drive/driveimpl/compositedav/rewriting.go | 2 +- drive/driveimpl/compositedav/stat_cache.go | 2 +- drive/driveimpl/compositedav/stat_cache_test.go | 2 +- drive/driveimpl/connlistener.go | 2 +- drive/driveimpl/connlistener_test.go | 2 +- drive/driveimpl/dirfs/dirfs.go | 2 +- drive/driveimpl/dirfs/dirfs_test.go | 2 +- drive/driveimpl/dirfs/mkdir.go | 2 +- drive/driveimpl/dirfs/openfile.go | 2 +- drive/driveimpl/dirfs/removeall.go | 2 +- drive/driveimpl/dirfs/rename.go | 2 +- drive/driveimpl/dirfs/stat.go | 2 +- drive/driveimpl/drive_test.go | 2 +- drive/driveimpl/fileserver.go | 2 +- drive/driveimpl/local_impl.go | 2 +- drive/driveimpl/remote_impl.go | 2 +- drive/driveimpl/shared/pathutil.go | 2 +- drive/driveimpl/shared/pathutil_test.go | 2 +- drive/driveimpl/shared/readonlydir.go | 2 +- drive/driveimpl/shared/stat.go | 2 +- drive/driveimpl/shared/xml.go | 2 +- drive/local.go | 2 +- drive/remote.go | 2 +- drive/remote_nonunix.go | 2 +- drive/remote_permissions.go | 2 +- drive/remote_permissions_test.go | 2 +- drive/remote_test.go | 2 +- drive/remote_unix.go | 2 +- envknob/envknob.go | 2 +- envknob/envknob_nottest.go | 2 +- envknob/envknob_testable.go | 2 +- envknob/featureknob/featureknob.go | 2 +- envknob/logknob/logknob.go | 2 +- envknob/logknob/logknob_test.go | 2 +- feature/ace/ace.go | 2 +- feature/appconnectors/appconnectors.go | 2 +- feature/buildfeatures/buildfeatures.go | 2 +- feature/buildfeatures/feature_ace_disabled.go | 2 +- feature/buildfeatures/feature_ace_enabled.go | 2 +- feature/buildfeatures/feature_acme_disabled.go | 2 +- feature/buildfeatures/feature_acme_enabled.go | 2 +- .../feature_advertiseexitnode_disabled.go | 2 +- .../feature_advertiseexitnode_enabled.go | 2 +- .../feature_advertiseroutes_disabled.go | 2 +- .../feature_advertiseroutes_enabled.go | 2 +- .../feature_appconnectors_disabled.go | 2 +- .../feature_appconnectors_enabled.go | 2 +- feature/buildfeatures/feature_aws_disabled.go | 2 +- feature/buildfeatures/feature_aws_enabled.go | 2 +- .../feature_bakedroots_disabled.go | 2 +- .../buildfeatures/feature_bakedroots_enabled.go | 2 +- feature/buildfeatures/feature_bird_disabled.go | 2 +- feature/buildfeatures/feature_bird_enabled.go | 2 +- feature/buildfeatures/feature_c2n_disabled.go | 2 +- feature/buildfeatures/feature_c2n_enabled.go | 2 +- .../feature_cachenetmap_disabled.go | 2 +- .../feature_cachenetmap_enabled.go | 2 +- .../feature_captiveportal_disabled.go | 2 +- .../feature_captiveportal_enabled.go | 2 +- .../buildfeatures/feature_capture_disabled.go | 2 +- .../buildfeatures/feature_capture_enabled.go | 2 +- .../feature_cliconndiag_disabled.go | 2 +- .../feature_cliconndiag_enabled.go | 2 +- .../feature_clientmetrics_disabled.go | 2 +- .../feature_clientmetrics_enabled.go | 2 +- .../feature_clientupdate_disabled.go | 2 +- .../feature_clientupdate_enabled.go | 2 +- feature/buildfeatures/feature_cloud_disabled.go | 2 +- feature/buildfeatures/feature_cloud_enabled.go | 2 +- .../feature_completion_disabled.go | 2 +- .../buildfeatures/feature_completion_enabled.go | 2 +- feature/buildfeatures/feature_dbus_disabled.go | 2 +- feature/buildfeatures/feature_dbus_enabled.go | 2 +- feature/buildfeatures/feature_debug_disabled.go | 2 +- feature/buildfeatures/feature_debug_enabled.go | 2 +- .../feature_debugeventbus_disabled.go | 2 +- .../feature_debugeventbus_enabled.go | 2 +- .../feature_debugportmapper_disabled.go | 2 +- .../feature_debugportmapper_enabled.go | 2 +- .../feature_desktop_sessions_disabled.go | 2 +- .../feature_desktop_sessions_enabled.go | 2 +- feature/buildfeatures/feature_dns_disabled.go | 2 +- feature/buildfeatures/feature_dns_enabled.go | 2 +- .../buildfeatures/feature_doctor_disabled.go | 2 +- feature/buildfeatures/feature_doctor_enabled.go | 2 +- feature/buildfeatures/feature_drive_disabled.go | 2 +- feature/buildfeatures/feature_drive_enabled.go | 2 +- feature/buildfeatures/feature_gro_disabled.go | 2 +- feature/buildfeatures/feature_gro_enabled.go | 2 +- .../buildfeatures/feature_health_disabled.go | 2 +- feature/buildfeatures/feature_health_enabled.go | 2 +- .../feature_hujsonconf_disabled.go | 2 +- .../buildfeatures/feature_hujsonconf_enabled.go | 2 +- .../feature_identityfederation_disabled.go | 2 +- .../feature_identityfederation_enabled.go | 2 +- .../buildfeatures/feature_iptables_disabled.go | 2 +- .../buildfeatures/feature_iptables_enabled.go | 2 +- feature/buildfeatures/feature_kube_disabled.go | 2 +- feature/buildfeatures/feature_kube_enabled.go | 2 +- .../buildfeatures/feature_lazywg_disabled.go | 2 +- feature/buildfeatures/feature_lazywg_enabled.go | 2 +- .../buildfeatures/feature_linkspeed_disabled.go | 2 +- .../buildfeatures/feature_linkspeed_enabled.go | 2 +- .../feature_linuxdnsfight_disabled.go | 2 +- .../feature_linuxdnsfight_enabled.go | 2 +- .../feature_listenrawdisco_disabled.go | 2 +- .../feature_listenrawdisco_enabled.go | 2 +- .../buildfeatures/feature_logtail_disabled.go | 2 +- .../buildfeatures/feature_logtail_enabled.go | 2 +- .../buildfeatures/feature_netlog_disabled.go | 2 +- feature/buildfeatures/feature_netlog_enabled.go | 2 +- .../buildfeatures/feature_netstack_disabled.go | 2 +- .../buildfeatures/feature_netstack_enabled.go | 2 +- .../feature_networkmanager_disabled.go | 2 +- .../feature_networkmanager_enabled.go | 2 +- .../buildfeatures/feature_oauthkey_disabled.go | 2 +- .../buildfeatures/feature_oauthkey_enabled.go | 2 +- .../buildfeatures/feature_osrouter_disabled.go | 2 +- .../buildfeatures/feature_osrouter_enabled.go | 2 +- .../feature_outboundproxy_disabled.go | 2 +- .../feature_outboundproxy_enabled.go | 2 +- .../feature_peerapiclient_disabled.go | 2 +- .../feature_peerapiclient_enabled.go | 2 +- .../feature_peerapiserver_disabled.go | 2 +- .../feature_peerapiserver_enabled.go | 2 +- .../buildfeatures/feature_portlist_disabled.go | 2 +- .../buildfeatures/feature_portlist_enabled.go | 2 +- .../feature_portmapper_disabled.go | 2 +- .../buildfeatures/feature_portmapper_enabled.go | 2 +- .../buildfeatures/feature_posture_disabled.go | 2 +- .../buildfeatures/feature_posture_enabled.go | 2 +- .../buildfeatures/feature_qrcodes_disabled.go | 2 +- .../buildfeatures/feature_qrcodes_enabled.go | 2 +- .../feature_relayserver_disabled.go | 2 +- .../feature_relayserver_enabled.go | 2 +- .../buildfeatures/feature_resolved_disabled.go | 2 +- .../buildfeatures/feature_resolved_enabled.go | 2 +- .../buildfeatures/feature_sdnotify_disabled.go | 2 +- .../buildfeatures/feature_sdnotify_enabled.go | 2 +- feature/buildfeatures/feature_serve_disabled.go | 2 +- feature/buildfeatures/feature_serve_enabled.go | 2 +- feature/buildfeatures/feature_ssh_disabled.go | 2 +- feature/buildfeatures/feature_ssh_enabled.go | 2 +- .../buildfeatures/feature_synology_disabled.go | 2 +- .../buildfeatures/feature_synology_enabled.go | 2 +- .../buildfeatures/feature_syspolicy_disabled.go | 2 +- .../buildfeatures/feature_syspolicy_enabled.go | 2 +- .../buildfeatures/feature_systray_disabled.go | 2 +- .../buildfeatures/feature_systray_enabled.go | 2 +- .../buildfeatures/feature_taildrop_disabled.go | 2 +- .../buildfeatures/feature_taildrop_enabled.go | 2 +- .../feature_tailnetlock_disabled.go | 2 +- .../feature_tailnetlock_enabled.go | 2 +- feature/buildfeatures/feature_tap_disabled.go | 2 +- feature/buildfeatures/feature_tap_enabled.go | 2 +- feature/buildfeatures/feature_tpm_disabled.go | 2 +- feature/buildfeatures/feature_tpm_enabled.go | 2 +- .../feature_unixsocketidentity_disabled.go | 2 +- .../feature_unixsocketidentity_enabled.go | 2 +- .../feature_useexitnode_disabled.go | 2 +- .../feature_useexitnode_enabled.go | 2 +- .../buildfeatures/feature_useproxy_disabled.go | 2 +- .../buildfeatures/feature_useproxy_enabled.go | 2 +- .../feature_usermetrics_disabled.go | 2 +- .../feature_usermetrics_enabled.go | 2 +- .../buildfeatures/feature_useroutes_disabled.go | 2 +- .../buildfeatures/feature_useroutes_enabled.go | 2 +- .../buildfeatures/feature_wakeonlan_disabled.go | 2 +- .../buildfeatures/feature_wakeonlan_enabled.go | 2 +- .../buildfeatures/feature_webclient_disabled.go | 2 +- .../buildfeatures/feature_webclient_enabled.go | 2 +- feature/buildfeatures/gen.go | 4 ++-- feature/c2n/c2n.go | 2 +- feature/capture/capture.go | 2 +- feature/capture/dissector/dissector.go | 2 +- feature/clientupdate/clientupdate.go | 2 +- feature/condlite/expvar/expvar.go | 2 +- feature/condlite/expvar/omit.go | 2 +- feature/condregister/condregister.go | 2 +- feature/condregister/identityfederation/doc.go | 2 +- .../maybe_identityfederation.go | 2 +- feature/condregister/maybe_ace.go | 2 +- feature/condregister/maybe_appconnectors.go | 2 +- feature/condregister/maybe_c2n.go | 2 +- feature/condregister/maybe_capture.go | 2 +- feature/condregister/maybe_clientupdate.go | 2 +- feature/condregister/maybe_conn25.go | 2 +- feature/condregister/maybe_debugportmapper.go | 2 +- feature/condregister/maybe_doctor.go | 2 +- feature/condregister/maybe_drive.go | 2 +- feature/condregister/maybe_linkspeed.go | 2 +- feature/condregister/maybe_linuxdnsfight.go | 2 +- feature/condregister/maybe_osrouter.go | 2 +- feature/condregister/maybe_portlist.go | 2 +- feature/condregister/maybe_posture.go | 2 +- feature/condregister/maybe_relayserver.go | 2 +- feature/condregister/maybe_sdnotify.go | 2 +- feature/condregister/maybe_store_aws.go | 2 +- feature/condregister/maybe_store_kube.go | 2 +- feature/condregister/maybe_syspolicy.go | 2 +- feature/condregister/maybe_taildrop.go | 2 +- feature/condregister/maybe_tap.go | 2 +- feature/condregister/maybe_tpm.go | 2 +- feature/condregister/maybe_wakeonlan.go | 2 +- feature/condregister/oauthkey/doc.go | 2 +- feature/condregister/oauthkey/maybe_oauthkey.go | 2 +- feature/condregister/portmapper/doc.go | 2 +- .../condregister/portmapper/maybe_portmapper.go | 2 +- feature/condregister/useproxy/doc.go | 2 +- feature/condregister/useproxy/useproxy.go | 2 +- feature/conn25/conn25.go | 2 +- feature/debugportmapper/debugportmapper.go | 2 +- feature/doctor/doctor.go | 2 +- feature/drive/drive.go | 2 +- feature/feature.go | 2 +- feature/featuretags/featuretags.go | 2 +- feature/featuretags/featuretags_test.go | 2 +- feature/hooks.go | 2 +- .../identityfederation/identityfederation.go | 2 +- .../identityfederation_test.go | 2 +- feature/linkspeed/doc.go | 2 +- feature/linkspeed/linkspeed_linux.go | 2 +- feature/linuxdnsfight/linuxdnsfight.go | 2 +- feature/linuxdnsfight/linuxdnsfight_test.go | 2 +- feature/oauthkey/oauthkey.go | 2 +- feature/oauthkey/oauthkey_test.go | 2 +- feature/portlist/portlist.go | 2 +- feature/portmapper/portmapper.go | 2 +- feature/posture/posture.go | 2 +- feature/relayserver/relayserver.go | 2 +- feature/relayserver/relayserver_test.go | 2 +- feature/sdnotify.go | 2 +- feature/sdnotify/sdnotify.go | 2 +- feature/sdnotify/sdnotify_linux.go | 2 +- feature/syspolicy/syspolicy.go | 2 +- feature/taildrop/delete.go | 2 +- feature/taildrop/delete_test.go | 2 +- feature/taildrop/doc.go | 2 +- feature/taildrop/ext.go | 2 +- feature/taildrop/fileops.go | 2 +- feature/taildrop/fileops_fs.go | 2 +- feature/taildrop/integration_test.go | 2 +- feature/taildrop/localapi.go | 2 +- feature/taildrop/paths.go | 2 +- feature/taildrop/peerapi.go | 2 +- feature/taildrop/peerapi_test.go | 2 +- feature/taildrop/resume.go | 2 +- feature/taildrop/resume_test.go | 2 +- feature/taildrop/retrieve.go | 2 +- feature/taildrop/send.go | 2 +- feature/taildrop/send_test.go | 2 +- feature/taildrop/taildrop.go | 2 +- feature/taildrop/taildrop_test.go | 2 +- feature/taildrop/target_test.go | 2 +- feature/tap/tap_linux.go | 2 +- feature/tpm/attestation.go | 2 +- feature/tpm/attestation_test.go | 2 +- feature/tpm/tpm.go | 2 +- feature/tpm/tpm_linux.go | 2 +- feature/tpm/tpm_other.go | 2 +- feature/tpm/tpm_test.go | 2 +- feature/tpm/tpm_windows.go | 2 +- feature/useproxy/useproxy.go | 2 +- feature/wakeonlan/wakeonlan.go | 2 +- gokrazy/build.go | 2 +- gokrazy/tidy-deps.go | 2 +- gomod_test.go | 2 +- header.txt | 2 +- health/args.go | 2 +- health/health.go | 2 +- health/health_test.go | 2 +- health/healthmsg/healthmsg.go | 2 +- health/state.go | 2 +- health/usermetrics.go | 2 +- health/usermetrics_omit.go | 2 +- health/warnings.go | 2 +- hostinfo/hostinfo.go | 2 +- hostinfo/hostinfo_container_linux_test.go | 2 +- hostinfo/hostinfo_darwin.go | 2 +- hostinfo/hostinfo_freebsd.go | 2 +- hostinfo/hostinfo_linux.go | 2 +- hostinfo/hostinfo_linux_test.go | 2 +- hostinfo/hostinfo_plan9.go | 2 +- hostinfo/hostinfo_test.go | 2 +- hostinfo/hostinfo_uname.go | 2 +- hostinfo/hostinfo_windows.go | 2 +- hostinfo/packagetype_container.go | 2 +- internal/client/tailscale/identityfederation.go | 2 +- internal/client/tailscale/oauthkeys.go | 2 +- internal/client/tailscale/tailscale.go | 2 +- internal/client/tailscale/vip_service.go | 2 +- internal/tooldeps/tooldeps.go | 2 +- ipn/auditlog/auditlog.go | 2 +- ipn/auditlog/auditlog_test.go | 2 +- ipn/auditlog/extension.go | 2 +- ipn/auditlog/store.go | 2 +- ipn/backend.go | 2 +- ipn/backend_test.go | 2 +- ipn/conf.go | 2 +- ipn/conffile/cloudconf.go | 2 +- ipn/conffile/conffile.go | 2 +- ipn/conffile/conffile_hujson.go | 2 +- ipn/conffile/serveconf.go | 2 +- ipn/desktop/doc.go | 2 +- ipn/desktop/extension.go | 2 +- ipn/desktop/mksyscall.go | 2 +- ipn/desktop/session.go | 2 +- ipn/desktop/sessions.go | 2 +- ipn/desktop/sessions_notwindows.go | 2 +- ipn/desktop/sessions_windows.go | 2 +- ipn/doc.go | 2 +- ipn/ipn_clone.go | 2 +- ipn/ipn_test.go | 2 +- ipn/ipn_view.go | 2 +- ipn/ipnauth/access.go | 2 +- ipn/ipnauth/actor.go | 2 +- ipn/ipnauth/actor_windows.go | 2 +- ipn/ipnauth/ipnauth.go | 2 +- ipn/ipnauth/ipnauth_omit_unixsocketidentity.go | 2 +- ipn/ipnauth/ipnauth_unix_creds.go | 2 +- ipn/ipnauth/ipnauth_windows.go | 2 +- ipn/ipnauth/policy.go | 2 +- ipn/ipnauth/self.go | 2 +- ipn/ipnauth/test_actor.go | 2 +- ipn/ipnext/ipnext.go | 2 +- ipn/ipnlocal/breaktcp_darwin.go | 2 +- ipn/ipnlocal/breaktcp_linux.go | 2 +- ipn/ipnlocal/bus.go | 2 +- ipn/ipnlocal/bus_test.go | 2 +- ipn/ipnlocal/c2n.go | 2 +- ipn/ipnlocal/c2n_pprof.go | 2 +- ipn/ipnlocal/c2n_test.go | 2 +- ipn/ipnlocal/captiveportal.go | 2 +- ipn/ipnlocal/cert.go | 2 +- ipn/ipnlocal/cert_disabled.go | 2 +- ipn/ipnlocal/cert_test.go | 2 +- ipn/ipnlocal/dnsconfig_test.go | 2 +- ipn/ipnlocal/drive.go | 2 +- ipn/ipnlocal/drive_test.go | 2 +- ipn/ipnlocal/drive_tomove.go | 2 +- ipn/ipnlocal/expiry.go | 2 +- ipn/ipnlocal/expiry_test.go | 2 +- ipn/ipnlocal/extension_host.go | 2 +- ipn/ipnlocal/extension_host_test.go | 2 +- ipn/ipnlocal/hwattest.go | 2 +- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/local_test.go | 2 +- ipn/ipnlocal/loglines_test.go | 2 +- ipn/ipnlocal/netstack.go | 2 +- ipn/ipnlocal/network-lock.go | 2 +- ipn/ipnlocal/network-lock_test.go | 2 +- ipn/ipnlocal/node_backend.go | 2 +- ipn/ipnlocal/node_backend_test.go | 2 +- ipn/ipnlocal/peerapi.go | 2 +- ipn/ipnlocal/peerapi_drive.go | 2 +- ipn/ipnlocal/peerapi_macios_ext.go | 2 +- ipn/ipnlocal/peerapi_test.go | 2 +- ipn/ipnlocal/prefs_metrics.go | 2 +- ipn/ipnlocal/profiles.go | 2 +- ipn/ipnlocal/profiles_notwindows.go | 2 +- ipn/ipnlocal/profiles_test.go | 2 +- ipn/ipnlocal/profiles_windows.go | 2 +- ipn/ipnlocal/serve.go | 2 +- ipn/ipnlocal/serve_disabled.go | 2 +- ipn/ipnlocal/serve_test.go | 2 +- ipn/ipnlocal/serve_unix_test.go | 2 +- ipn/ipnlocal/ssh.go | 2 +- ipn/ipnlocal/ssh_stub.go | 2 +- ipn/ipnlocal/ssh_test.go | 2 +- ipn/ipnlocal/state_test.go | 2 +- ipn/ipnlocal/tailnetlock_disabled.go | 2 +- ipn/ipnlocal/web_client.go | 2 +- ipn/ipnlocal/web_client_stub.go | 2 +- ipn/ipnserver/actor.go | 2 +- ipn/ipnserver/proxyconnect.go | 2 +- ipn/ipnserver/proxyconnect_js.go | 2 +- ipn/ipnserver/server.go | 2 +- ipn/ipnserver/server_fortest.go | 2 +- ipn/ipnserver/server_test.go | 2 +- ipn/ipnserver/waiterset_test.go | 2 +- ipn/ipnstate/ipnstate.go | 2 +- ipn/ipnstate/ipnstate_clone.go | 2 +- ipn/lapitest/backend.go | 2 +- ipn/lapitest/client.go | 2 +- ipn/lapitest/example_test.go | 2 +- ipn/lapitest/opts.go | 2 +- ipn/lapitest/server.go | 2 +- ipn/localapi/cert.go | 2 +- ipn/localapi/debug.go | 2 +- ipn/localapi/debugderp.go | 2 +- ipn/localapi/disabled_stubs.go | 2 +- ipn/localapi/localapi.go | 2 +- ipn/localapi/localapi_drive.go | 2 +- ipn/localapi/localapi_test.go | 2 +- ipn/localapi/pprof.go | 2 +- ipn/localapi/serve.go | 2 +- ipn/localapi/syspolicy_api.go | 2 +- ipn/localapi/tailnetlock.go | 2 +- ipn/policy/policy.go | 2 +- ipn/prefs.go | 2 +- ipn/prefs_test.go | 2 +- ipn/serve.go | 2 +- ipn/serve_expand_test.go | 2 +- ipn/serve_test.go | 2 +- ipn/store.go | 2 +- ipn/store/awsstore/store_aws.go | 2 +- ipn/store/awsstore/store_aws_test.go | 2 +- ipn/store/kubestore/store_kube.go | 2 +- ipn/store/kubestore/store_kube_test.go | 2 +- ipn/store/mem/store_mem.go | 2 +- ipn/store/stores.go | 2 +- ipn/store/stores_test.go | 2 +- ipn/store_test.go | 2 +- jsondb/db.go | 2 +- jsondb/db_test.go | 2 +- k8s-operator/api-docs-config.yaml | 2 +- k8s-operator/api-proxy/doc.go | 2 +- k8s-operator/api-proxy/proxy.go | 2 +- k8s-operator/api-proxy/proxy_events_test.go | 2 +- k8s-operator/api-proxy/proxy_test.go | 2 +- k8s-operator/apis/doc.go | 2 +- k8s-operator/apis/v1alpha1/doc.go | 2 +- k8s-operator/apis/v1alpha1/register.go | 2 +- k8s-operator/apis/v1alpha1/types_connector.go | 2 +- k8s-operator/apis/v1alpha1/types_proxyclass.go | 2 +- k8s-operator/apis/v1alpha1/types_proxygroup.go | 2 +- k8s-operator/apis/v1alpha1/types_recorder.go | 2 +- k8s-operator/apis/v1alpha1/types_tailnet.go | 2 +- k8s-operator/apis/v1alpha1/types_tsdnsconfig.go | 2 +- .../apis/v1alpha1/zz_generated.deepcopy.go | 2 +- k8s-operator/conditions.go | 2 +- k8s-operator/conditions_test.go | 2 +- k8s-operator/reconciler/reconciler.go | 2 +- k8s-operator/reconciler/reconciler_test.go | 2 +- k8s-operator/reconciler/tailnet/mocks_test.go | 2 +- k8s-operator/reconciler/tailnet/tailnet.go | 2 +- k8s-operator/reconciler/tailnet/tailnet_test.go | 2 +- k8s-operator/sessionrecording/fakes/fakes.go | 2 +- k8s-operator/sessionrecording/hijacker.go | 2 +- k8s-operator/sessionrecording/hijacker_test.go | 2 +- k8s-operator/sessionrecording/spdy/conn.go | 2 +- k8s-operator/sessionrecording/spdy/conn_test.go | 2 +- k8s-operator/sessionrecording/spdy/frame.go | 2 +- .../sessionrecording/spdy/frame_test.go | 2 +- .../sessionrecording/spdy/zlib-reader.go | 2 +- .../sessionrecording/tsrecorder/tsrecorder.go | 2 +- k8s-operator/sessionrecording/ws/conn.go | 2 +- k8s-operator/sessionrecording/ws/conn_test.go | 2 +- k8s-operator/sessionrecording/ws/message.go | 2 +- .../sessionrecording/ws/message_test.go | 2 +- k8s-operator/utils.go | 2 +- kube/certs/certs.go | 2 +- kube/certs/certs_test.go | 2 +- kube/egressservices/egressservices.go | 2 +- kube/egressservices/egressservices_test.go | 2 +- kube/health/healthz.go | 2 +- kube/ingressservices/ingressservices.go | 2 +- kube/k8s-proxy/conf/conf.go | 2 +- kube/k8s-proxy/conf/conf_test.go | 2 +- kube/kubeapi/api.go | 2 +- kube/kubeclient/client.go | 2 +- kube/kubeclient/client_test.go | 2 +- kube/kubeclient/fake_client.go | 2 +- kube/kubetypes/grants.go | 2 +- kube/kubetypes/types.go | 2 +- kube/kubetypes/types_test.go | 2 +- kube/localclient/fake-client.go | 2 +- kube/localclient/local-client.go | 2 +- kube/metrics/metrics.go | 2 +- kube/services/services.go | 2 +- kube/state/state.go | 2 +- kube/state/state_test.go | 2 +- license_test.go | 4 ++-- licenses/licenses.go | 2 +- log/filelogger/log.go | 2 +- log/filelogger/log_test.go | 2 +- log/sockstatlog/logger.go | 2 +- log/sockstatlog/logger_test.go | 2 +- logpolicy/logpolicy.go | 2 +- logpolicy/logpolicy_test.go | 2 +- logpolicy/maybe_syspolicy.go | 2 +- logtail/buffer.go | 2 +- logtail/config.go | 2 +- logtail/example/logadopt/logadopt.go | 2 +- logtail/example/logreprocess/demo.sh | 2 +- logtail/example/logreprocess/logreprocess.go | 2 +- logtail/example/logtail/logtail.go | 2 +- logtail/filch/filch.go | 2 +- logtail/filch/filch_omit.go | 2 +- logtail/filch/filch_stub.go | 2 +- logtail/filch/filch_test.go | 2 +- logtail/filch/filch_unix.go | 2 +- logtail/filch/filch_windows.go | 2 +- logtail/logtail.go | 2 +- logtail/logtail_omit.go | 2 +- logtail/logtail_test.go | 2 +- maths/ewma.go | 2 +- maths/ewma_test.go | 2 +- metrics/fds_linux.go | 2 +- metrics/fds_notlinux.go | 2 +- metrics/metrics.go | 2 +- metrics/metrics_test.go | 2 +- metrics/multilabelmap.go | 2 +- metrics/multilabelmap_test.go | 2 +- net/ace/ace.go | 2 +- net/art/art_test.go | 2 +- net/art/stride_table.go | 2 +- net/art/stride_table_test.go | 2 +- net/art/table.go | 2 +- net/art/table_test.go | 2 +- net/bakedroots/bakedroots.go | 2 +- net/bakedroots/bakedroots_test.go | 2 +- net/batching/conn.go | 2 +- net/batching/conn_default.go | 2 +- net/batching/conn_linux.go | 2 +- net/batching/conn_linux_test.go | 2 +- net/captivedetection/captivedetection.go | 2 +- net/captivedetection/captivedetection_test.go | 2 +- net/captivedetection/endpoints.go | 2 +- net/captivedetection/rawconn.go | 2 +- net/captivedetection/rawconn_apple.go | 2 +- net/connectproxy/connectproxy.go | 2 +- net/dns/config.go | 2 +- net/dns/dbus.go | 2 +- net/dns/debian_resolvconf.go | 2 +- net/dns/direct.go | 2 +- net/dns/direct_linux_test.go | 2 +- net/dns/direct_test.go | 2 +- net/dns/direct_unix_test.go | 2 +- net/dns/dns_clone.go | 2 +- net/dns/dns_view.go | 2 +- net/dns/flush_default.go | 2 +- net/dns/flush_windows.go | 2 +- net/dns/ini.go | 2 +- net/dns/ini_test.go | 2 +- net/dns/manager.go | 2 +- net/dns/manager_darwin.go | 2 +- net/dns/manager_default.go | 2 +- net/dns/manager_freebsd.go | 2 +- net/dns/manager_linux.go | 2 +- net/dns/manager_linux_test.go | 2 +- net/dns/manager_openbsd.go | 2 +- net/dns/manager_plan9.go | 2 +- net/dns/manager_plan9_test.go | 2 +- net/dns/manager_solaris.go | 2 +- net/dns/manager_tcp_test.go | 2 +- net/dns/manager_test.go | 2 +- net/dns/manager_windows.go | 2 +- net/dns/manager_windows_test.go | 2 +- net/dns/nm.go | 2 +- net/dns/noop.go | 2 +- net/dns/nrpt_windows.go | 2 +- net/dns/openresolv.go | 2 +- net/dns/osconfig.go | 2 +- net/dns/osconfig_test.go | 2 +- net/dns/publicdns/publicdns.go | 2 +- net/dns/publicdns/publicdns_test.go | 2 +- net/dns/resolvconf-workaround.sh | 2 +- net/dns/resolvconf.go | 2 +- net/dns/resolvconffile/resolvconffile.go | 2 +- net/dns/resolvconffile/resolvconffile_test.go | 2 +- net/dns/resolvconfpath_default.go | 2 +- net/dns/resolvconfpath_gokrazy.go | 2 +- net/dns/resolvd.go | 2 +- net/dns/resolved.go | 2 +- net/dns/resolver/debug.go | 2 +- net/dns/resolver/doh_test.go | 2 +- net/dns/resolver/forwarder.go | 2 +- net/dns/resolver/forwarder_test.go | 2 +- net/dns/resolver/macios_ext.go | 2 +- net/dns/resolver/tsdns.go | 2 +- net/dns/resolver/tsdns_server_test.go | 2 +- net/dns/resolver/tsdns_test.go | 2 +- net/dns/utf.go | 2 +- net/dns/utf_test.go | 2 +- net/dns/wsl_windows.go | 2 +- net/dnscache/dnscache.go | 2 +- net/dnscache/dnscache_test.go | 2 +- net/dnscache/messagecache.go | 2 +- net/dnscache/messagecache_test.go | 2 +- net/dnsfallback/dnsfallback.go | 2 +- net/dnsfallback/dnsfallback_test.go | 2 +- net/dnsfallback/update-dns-fallbacks.go | 2 +- net/flowtrack/flowtrack.go | 2 +- net/flowtrack/flowtrack_test.go | 2 +- net/ipset/ipset.go | 2 +- net/ipset/ipset_test.go | 2 +- net/ktimeout/ktimeout.go | 2 +- net/ktimeout/ktimeout_default.go | 2 +- net/ktimeout/ktimeout_linux.go | 2 +- net/ktimeout/ktimeout_linux_test.go | 2 +- net/ktimeout/ktimeout_test.go | 2 +- net/memnet/conn.go | 2 +- net/memnet/conn_test.go | 2 +- net/memnet/listener.go | 2 +- net/memnet/listener_test.go | 2 +- net/memnet/memnet.go | 2 +- net/memnet/memnet_test.go | 2 +- net/memnet/pipe.go | 2 +- net/memnet/pipe_test.go | 2 +- net/netaddr/netaddr.go | 2 +- net/netcheck/captiveportal.go | 2 +- net/netcheck/netcheck.go | 2 +- net/netcheck/netcheck_test.go | 2 +- net/netcheck/standalone.go | 2 +- net/neterror/neterror.go | 2 +- net/neterror/neterror_linux.go | 2 +- net/neterror/neterror_linux_test.go | 2 +- net/neterror/neterror_windows.go | 2 +- net/netkernelconf/netkernelconf.go | 2 +- net/netkernelconf/netkernelconf_default.go | 2 +- net/netkernelconf/netkernelconf_linux.go | 2 +- net/netknob/netknob.go | 2 +- net/netmon/defaultroute_bsd.go | 2 +- net/netmon/defaultroute_darwin.go | 2 +- net/netmon/interfaces.go | 2 +- net/netmon/interfaces_android.go | 2 +- net/netmon/interfaces_bsd.go | 2 +- net/netmon/interfaces_darwin.go | 2 +- net/netmon/interfaces_darwin_test.go | 2 +- net/netmon/interfaces_default_route_test.go | 2 +- net/netmon/interfaces_defaultrouteif_todo.go | 2 +- net/netmon/interfaces_freebsd.go | 2 +- net/netmon/interfaces_linux.go | 2 +- net/netmon/interfaces_linux_test.go | 2 +- net/netmon/interfaces_test.go | 2 +- net/netmon/interfaces_windows.go | 2 +- net/netmon/interfaces_windows_test.go | 2 +- net/netmon/loghelper.go | 2 +- net/netmon/loghelper_test.go | 2 +- net/netmon/netmon.go | 2 +- net/netmon/netmon_darwin.go | 2 +- net/netmon/netmon_darwin_test.go | 2 +- net/netmon/netmon_freebsd.go | 2 +- net/netmon/netmon_linux.go | 2 +- net/netmon/netmon_linux_test.go | 2 +- net/netmon/netmon_polling.go | 2 +- net/netmon/netmon_test.go | 2 +- net/netmon/netmon_windows.go | 2 +- net/netmon/polling.go | 2 +- net/netmon/state.go | 2 +- net/netns/mksyscall.go | 2 +- net/netns/netns.go | 2 +- net/netns/netns_android.go | 2 +- net/netns/netns_darwin.go | 2 +- net/netns/netns_darwin_test.go | 2 +- net/netns/netns_default.go | 2 +- net/netns/netns_dw.go | 2 +- net/netns/netns_linux.go | 2 +- net/netns/netns_linux_test.go | 2 +- net/netns/netns_test.go | 2 +- net/netns/netns_windows.go | 2 +- net/netns/netns_windows_test.go | 2 +- net/netns/socks.go | 2 +- net/netstat/netstat.go | 2 +- net/netstat/netstat_noimpl.go | 2 +- net/netstat/netstat_test.go | 2 +- net/netstat/netstat_windows.go | 2 +- net/netutil/default_interface_portable.go | 2 +- net/netutil/default_interface_portable_test.go | 2 +- net/netutil/ip_forward.go | 2 +- net/netutil/netutil.go | 2 +- net/netutil/netutil_test.go | 2 +- net/netutil/routes.go | 2 +- net/netx/netx.go | 2 +- net/packet/capture.go | 2 +- net/packet/checksum/checksum.go | 2 +- net/packet/checksum/checksum_test.go | 2 +- net/packet/doc.go | 2 +- net/packet/geneve.go | 2 +- net/packet/geneve_test.go | 2 +- net/packet/header.go | 2 +- net/packet/icmp.go | 2 +- net/packet/icmp4.go | 2 +- net/packet/icmp6.go | 2 +- net/packet/icmp6_test.go | 2 +- net/packet/ip4.go | 2 +- net/packet/ip6.go | 2 +- net/packet/packet.go | 2 +- net/packet/packet_test.go | 2 +- net/packet/tsmp.go | 2 +- net/packet/tsmp_test.go | 2 +- net/packet/udp4.go | 2 +- net/packet/udp6.go | 2 +- net/ping/ping.go | 2 +- net/ping/ping_test.go | 2 +- net/portmapper/disabled_stubs.go | 2 +- net/portmapper/igd_test.go | 2 +- net/portmapper/legacy_upnp.go | 2 +- net/portmapper/pcp.go | 2 +- net/portmapper/pcp_test.go | 2 +- net/portmapper/pcpresultcode_string.go | 2 +- net/portmapper/pmpresultcode_string.go | 2 +- net/portmapper/portmapper.go | 2 +- net/portmapper/portmapper_test.go | 2 +- net/portmapper/portmappertype/portmappertype.go | 2 +- net/portmapper/select_test.go | 2 +- net/portmapper/upnp.go | 2 +- net/portmapper/upnp_test.go | 2 +- net/proxymux/mux.go | 2 +- net/proxymux/mux_test.go | 2 +- net/routetable/routetable.go | 2 +- net/routetable/routetable_bsd.go | 2 +- net/routetable/routetable_bsd_test.go | 2 +- net/routetable/routetable_darwin.go | 2 +- net/routetable/routetable_freebsd.go | 2 +- net/routetable/routetable_linux.go | 2 +- net/routetable/routetable_linux_test.go | 2 +- net/routetable/routetable_other.go | 2 +- net/sockopts/sockopts.go | 2 +- net/sockopts/sockopts_default.go | 2 +- net/sockopts/sockopts_linux.go | 2 +- net/sockopts/sockopts_notwindows.go | 2 +- net/sockopts/sockopts_unix_test.go | 2 +- net/sockopts/sockopts_windows.go | 2 +- net/socks5/socks5.go | 2 +- net/socks5/socks5_test.go | 2 +- net/sockstats/sockstats.go | 2 +- net/sockstats/sockstats_noop.go | 2 +- net/sockstats/sockstats_tsgo.go | 2 +- net/sockstats/sockstats_tsgo_darwin.go | 2 +- net/sockstats/sockstats_tsgo_test.go | 2 +- net/speedtest/speedtest.go | 2 +- net/speedtest/speedtest_client.go | 2 +- net/speedtest/speedtest_server.go | 2 +- net/speedtest/speedtest_test.go | 2 +- net/stun/stun.go | 2 +- net/stun/stun_fuzzer.go | 2 +- net/stun/stun_test.go | 2 +- net/stun/stuntest/stuntest.go | 2 +- net/stunserver/stunserver.go | 2 +- net/stunserver/stunserver_test.go | 2 +- net/tcpinfo/tcpinfo.go | 2 +- net/tcpinfo/tcpinfo_darwin.go | 2 +- net/tcpinfo/tcpinfo_linux.go | 2 +- net/tcpinfo/tcpinfo_other.go | 2 +- net/tcpinfo/tcpinfo_test.go | 2 +- net/tlsdial/blockblame/blockblame.go | 2 +- net/tlsdial/blockblame/blockblame_test.go | 2 +- net/tlsdial/deps_test.go | 2 +- net/tlsdial/tlsdial.go | 2 +- net/tlsdial/tlsdial_test.go | 2 +- net/tsaddr/tsaddr.go | 2 +- net/tsaddr/tsaddr_test.go | 2 +- net/tsdial/dnsmap.go | 2 +- net/tsdial/dnsmap_test.go | 2 +- net/tsdial/dohclient.go | 2 +- net/tsdial/dohclient_test.go | 2 +- net/tsdial/peerapi_macios_ext.go | 2 +- net/tsdial/tsdial.go | 2 +- net/tshttpproxy/mksyscall.go | 2 +- net/tshttpproxy/tshttpproxy.go | 2 +- net/tshttpproxy/tshttpproxy_linux.go | 2 +- net/tshttpproxy/tshttpproxy_synology.go | 2 +- net/tshttpproxy/tshttpproxy_synology_test.go | 2 +- net/tshttpproxy/tshttpproxy_test.go | 2 +- net/tshttpproxy/tshttpproxy_windows.go | 2 +- net/tstun/fake.go | 2 +- net/tstun/ifstatus_noop.go | 2 +- net/tstun/ifstatus_windows.go | 2 +- net/tstun/mtu.go | 2 +- net/tstun/mtu_test.go | 2 +- net/tstun/netstack_disabled.go | 2 +- net/tstun/netstack_enabled.go | 2 +- net/tstun/tstun_stub.go | 2 +- net/tstun/tun.go | 2 +- net/tstun/tun_linux.go | 2 +- net/tstun/tun_macos.go | 2 +- net/tstun/tun_notwindows.go | 2 +- net/tstun/tun_windows.go | 2 +- net/tstun/wrap.go | 2 +- net/tstun/wrap_linux.go | 2 +- net/tstun/wrap_noop.go | 2 +- net/tstun/wrap_test.go | 2 +- net/udprelay/endpoint/endpoint.go | 2 +- net/udprelay/endpoint/endpoint_test.go | 2 +- net/udprelay/metrics.go | 2 +- net/udprelay/metrics_test.go | 2 +- net/udprelay/server.go | 2 +- net/udprelay/server_linux.go | 2 +- net/udprelay/server_notlinux.go | 2 +- net/udprelay/server_test.go | 2 +- net/udprelay/status/status.go | 2 +- net/wsconn/wsconn.go | 2 +- omit/aws_def.go | 2 +- omit/aws_omit.go | 2 +- omit/omit.go | 2 +- packages/deb/deb.go | 2 +- packages/deb/deb_test.go | 2 +- paths/migrate.go | 2 +- paths/paths.go | 2 +- paths/paths_unix.go | 2 +- paths/paths_windows.go | 2 +- pkgdoc_test.go | 2 +- portlist/clean.go | 2 +- portlist/clean_test.go | 2 +- portlist/netstat.go | 2 +- portlist/netstat_test.go | 2 +- portlist/poller.go | 2 +- portlist/portlist.go | 2 +- portlist/portlist_linux.go | 2 +- portlist/portlist_linux_test.go | 2 +- portlist/portlist_macos.go | 2 +- portlist/portlist_plan9.go | 2 +- portlist/portlist_test.go | 2 +- portlist/portlist_windows.go | 2 +- posture/doc.go | 2 +- posture/hwaddr.go | 2 +- posture/serialnumber_macos.go | 2 +- posture/serialnumber_macos_test.go | 2 +- posture/serialnumber_notmacos.go | 2 +- posture/serialnumber_notmacos_test.go | 2 +- posture/serialnumber_stub.go | 2 +- posture/serialnumber_syspolicy.go | 2 +- posture/serialnumber_test.go | 2 +- prober/derp.go | 2 +- prober/derp_test.go | 2 +- prober/dns.go | 2 +- prober/dns_example_test.go | 2 +- prober/dns_test.go | 2 +- prober/histogram.go | 2 +- prober/histogram_test.go | 2 +- prober/http.go | 2 +- prober/prober.go | 2 +- prober/prober_test.go | 2 +- prober/status.go | 2 +- prober/tcp.go | 2 +- prober/tls.go | 2 +- prober/tls_test.go | 2 +- prober/tun_darwin.go | 2 +- prober/tun_default.go | 2 +- prober/tun_linux.go | 2 +- proxymap/proxymap.go | 2 +- release/dist/cli/cli.go | 2 +- release/dist/dist.go | 2 +- release/dist/memoize.go | 2 +- release/dist/qnap/pkgs.go | 2 +- release/dist/qnap/targets.go | 2 +- release/dist/synology/pkgs.go | 2 +- release/dist/synology/targets.go | 2 +- release/dist/unixpkgs/pkgs.go | 2 +- release/dist/unixpkgs/targets.go | 2 +- release/release.go | 2 +- safesocket/basic_test.go | 2 +- safesocket/pipe_windows.go | 2 +- safesocket/pipe_windows_test.go | 2 +- safesocket/safesocket.go | 2 +- safesocket/safesocket_darwin.go | 2 +- safesocket/safesocket_darwin_test.go | 2 +- safesocket/safesocket_js.go | 2 +- safesocket/safesocket_plan9.go | 2 +- safesocket/safesocket_ps.go | 2 +- safesocket/safesocket_test.go | 2 +- safesocket/unixsocket.go | 2 +- safeweb/http.go | 2 +- safeweb/http_test.go | 2 +- scripts/installer.sh | 2 +- sessionrecording/connect.go | 2 +- sessionrecording/connect_test.go | 2 +- sessionrecording/event.go | 2 +- sessionrecording/header.go | 2 +- ssh/tailssh/accept_env.go | 2 +- ssh/tailssh/accept_env_test.go | 2 +- ssh/tailssh/auditd_linux.go | 2 +- ssh/tailssh/auditd_linux_test.go | 2 +- ssh/tailssh/incubator.go | 2 +- ssh/tailssh/incubator_linux.go | 2 +- ssh/tailssh/incubator_plan9.go | 2 +- ssh/tailssh/privs_test.go | 2 +- ssh/tailssh/tailssh.go | 2 +- ssh/tailssh/tailssh_integration_test.go | 2 +- ssh/tailssh/tailssh_test.go | 2 +- ssh/tailssh/user.go | 2 +- syncs/locked.go | 2 +- syncs/locked_test.go | 2 +- syncs/mutex.go | 2 +- syncs/mutex_debug.go | 2 +- syncs/pool.go | 2 +- syncs/pool_test.go | 2 +- syncs/shardedint.go | 2 +- syncs/shardedint_test.go | 2 +- syncs/shardedmap.go | 2 +- syncs/shardedmap_test.go | 2 +- syncs/shardvalue.go | 2 +- syncs/shardvalue_go.go | 2 +- syncs/shardvalue_tailscale.go | 2 +- syncs/shardvalue_test.go | 2 +- syncs/syncs.go | 2 +- syncs/syncs_test.go | 2 +- tailcfg/c2ntypes.go | 2 +- tailcfg/derpmap.go | 2 +- tailcfg/proto_port_range.go | 2 +- tailcfg/proto_port_range_test.go | 2 +- tailcfg/tailcfg.go | 2 +- tailcfg/tailcfg_clone.go | 2 +- tailcfg/tailcfg_test.go | 2 +- tailcfg/tailcfg_view.go | 2 +- tailcfg/tka.go | 2 +- tka/aum.go | 2 +- tka/aum_test.go | 2 +- tka/builder.go | 2 +- tka/builder_test.go | 2 +- tka/chaintest_test.go | 2 +- tka/deeplink.go | 2 +- tka/deeplink_test.go | 2 +- tka/disabled_stub.go | 2 +- tka/key.go | 2 +- tka/key_test.go | 2 +- tka/scenario_test.go | 2 +- tka/sig.go | 2 +- tka/sig_test.go | 2 +- tka/state.go | 2 +- tka/state_test.go | 2 +- tka/sync.go | 2 +- tka/sync_test.go | 2 +- tka/tailchonk.go | 2 +- tka/tailchonk_test.go | 2 +- tka/tka.go | 2 +- tka/tka_clone.go | 2 +- tka/tka_test.go | 2 +- tka/verify.go | 2 +- tka/verify_disabled.go | 2 +- tool/gocross/autoflags.go | 2 +- tool/gocross/autoflags_test.go | 2 +- tool/gocross/env.go | 2 +- tool/gocross/env_test.go | 2 +- tool/gocross/exec_other.go | 2 +- tool/gocross/exec_unix.go | 2 +- tool/gocross/gocross-wrapper.ps1 | 2 +- tool/gocross/gocross-wrapper.sh | 2 +- tool/gocross/gocross.go | 2 +- tool/gocross/gocross_test.go | 2 +- tool/gocross/gocross_wrapper_test.go | 2 +- tool/gocross/gocross_wrapper_windows_test.go | 2 +- tool/gocross/goroot.go | 2 +- tool/gocross/toolchain.go | 2 +- tool/listpkgs/listpkgs.go | 2 +- tsconsensus/authorization.go | 2 +- tsconsensus/authorization_test.go | 2 +- tsconsensus/bolt_store.go | 2 +- tsconsensus/bolt_store_no_bolt.go | 2 +- tsconsensus/http.go | 2 +- tsconsensus/monitor.go | 2 +- tsconsensus/tsconsensus.go | 2 +- tsconsensus/tsconsensus_test.go | 2 +- tsconst/health.go | 2 +- tsconst/linuxfw.go | 2 +- tsconst/tsconst.go | 2 +- tsconst/webclient.go | 2 +- tsd/tsd.go | 2 +- tsnet/example/tshello/tshello.go | 2 +- tsnet/example/tsnet-funnel/tsnet-funnel.go | 2 +- .../tsnet-http-client/tsnet-http-client.go | 2 +- tsnet/example/tsnet-services/tsnet-services.go | 2 +- tsnet/example/web-client/web-client.go | 2 +- tsnet/example_tshello_test.go | 2 +- ..._tsnet_listen_service_multiple_ports_test.go | 2 +- tsnet/example_tsnet_test.go | 2 +- tsnet/packet_filter_test.go | 2 +- tsnet/tsnet.go | 2 +- tsnet/tsnet_test.go | 2 +- tstest/allocs.go | 2 +- tstest/archtest/archtest_test.go | 2 +- tstest/archtest/qemu_test.go | 2 +- tstest/chonktest/chonktest.go | 2 +- tstest/chonktest/tailchonk_test.go | 2 +- tstest/clock.go | 2 +- tstest/clock_test.go | 2 +- tstest/deptest/deptest.go | 2 +- tstest/deptest/deptest_test.go | 2 +- tstest/integration/capmap_test.go | 2 +- tstest/integration/gen_deps.go | 4 ++-- tstest/integration/integration.go | 2 +- tstest/integration/integration_test.go | 2 +- tstest/integration/nat/nat_test.go | 2 +- .../integration/tailscaled_deps_test_darwin.go | 2 +- .../integration/tailscaled_deps_test_freebsd.go | 2 +- .../integration/tailscaled_deps_test_linux.go | 2 +- .../integration/tailscaled_deps_test_openbsd.go | 2 +- .../integration/tailscaled_deps_test_windows.go | 2 +- tstest/integration/testcontrol/testcontrol.go | 2 +- tstest/integration/vms/derive_bindhost_test.go | 2 +- tstest/integration/vms/distros.go | 2 +- tstest/integration/vms/distros_test.go | 2 +- tstest/integration/vms/dns_tester.go | 2 +- tstest/integration/vms/doc.go | 2 +- tstest/integration/vms/harness_test.go | 2 +- tstest/integration/vms/nixos_test.go | 2 +- tstest/integration/vms/top_level_test.go | 2 +- tstest/integration/vms/udp_tester.go | 2 +- tstest/integration/vms/vm_setup_test.go | 2 +- tstest/integration/vms/vms_steps_test.go | 2 +- tstest/integration/vms/vms_test.go | 2 +- tstest/iosdeps/iosdeps.go | 2 +- tstest/iosdeps/iosdeps_test.go | 2 +- tstest/jsdeps/jsdeps.go | 2 +- tstest/jsdeps/jsdeps_test.go | 2 +- tstest/kernel_linux.go | 2 +- tstest/kernel_other.go | 2 +- tstest/log.go | 2 +- tstest/log_test.go | 2 +- tstest/mts/mts.go | 2 +- tstest/natlab/firewall.go | 2 +- tstest/natlab/nat.go | 2 +- tstest/natlab/natlab.go | 2 +- tstest/natlab/natlab_test.go | 2 +- tstest/natlab/vnet/conf.go | 2 +- tstest/natlab/vnet/conf_test.go | 2 +- tstest/natlab/vnet/easyaf.go | 2 +- tstest/natlab/vnet/nat.go | 2 +- tstest/natlab/vnet/pcap.go | 2 +- tstest/natlab/vnet/vip.go | 2 +- tstest/natlab/vnet/vnet.go | 2 +- tstest/natlab/vnet/vnet_test.go | 2 +- tstest/nettest/nettest.go | 2 +- tstest/reflect.go | 2 +- tstest/resource.go | 2 +- tstest/resource_test.go | 2 +- tstest/tailmac/Swift/Common/Config.swift | 2 +- tstest/tailmac/Swift/Common/Notifications.swift | 2 +- .../Swift/Common/TailMacConfigHelper.swift | 2 +- tstest/tailmac/Swift/Host/AppDelegate.swift | 2 +- tstest/tailmac/Swift/Host/HostCli.swift | 2 +- tstest/tailmac/Swift/Host/VMController.swift | 2 +- tstest/tailmac/Swift/TailMac/RestoreImage.swift | 2 +- tstest/tailmac/Swift/TailMac/TailMac.swift | 2 +- tstest/tailmac/Swift/TailMac/VMInstaller.swift | 2 +- tstest/tkatest/tkatest.go | 2 +- tstest/tlstest/tlstest.go | 2 +- tstest/tlstest/tlstest_test.go | 2 +- tstest/tools/tools.go | 2 +- tstest/tstest.go | 2 +- tstest/tstest_test.go | 2 +- tstest/typewalk/typewalk.go | 2 +- tstime/jitter.go | 2 +- tstime/jitter_test.go | 2 +- tstime/mono/mono.go | 2 +- tstime/mono/mono_test.go | 2 +- tstime/rate/rate.go | 2 +- tstime/rate/rate_test.go | 2 +- tstime/rate/value.go | 2 +- tstime/rate/value_test.go | 2 +- tstime/tstime.go | 2 +- tstime/tstime_test.go | 2 +- tsweb/debug.go | 2 +- tsweb/debug_test.go | 2 +- tsweb/log.go | 2 +- tsweb/pprof_default.go | 2 +- tsweb/pprof_js.go | 2 +- tsweb/promvarz/promvarz.go | 2 +- tsweb/promvarz/promvarz_test.go | 2 +- tsweb/request_id.go | 2 +- tsweb/tsweb.go | 2 +- tsweb/tsweb_test.go | 2 +- tsweb/varz/varz.go | 2 +- tsweb/varz/varz_test.go | 2 +- types/appctype/appconnector.go | 2 +- types/appctype/appconnector_test.go | 2 +- types/bools/bools.go | 2 +- types/bools/bools_test.go | 2 +- types/dnstype/dnstype.go | 2 +- types/dnstype/dnstype_clone.go | 2 +- types/dnstype/dnstype_test.go | 2 +- types/dnstype/dnstype_view.go | 2 +- types/empty/message.go | 2 +- types/flagtype/flagtype.go | 2 +- types/geo/doc.go | 2 +- types/geo/point.go | 2 +- types/geo/point_test.go | 2 +- types/geo/quantize.go | 2 +- types/geo/quantize_test.go | 2 +- types/geo/units.go | 2 +- types/geo/units_test.go | 2 +- types/iox/io.go | 2 +- types/iox/io_test.go | 2 +- types/ipproto/ipproto.go | 2 +- types/ipproto/ipproto_test.go | 2 +- types/jsonx/json.go | 2 +- types/jsonx/json_test.go | 2 +- types/key/chal.go | 2 +- types/key/control.go | 2 +- types/key/control_test.go | 2 +- types/key/derp.go | 2 +- types/key/derp_test.go | 2 +- types/key/disco.go | 2 +- types/key/disco_test.go | 2 +- types/key/doc.go | 2 +- types/key/hardware_attestation.go | 2 +- types/key/machine.go | 2 +- types/key/machine_test.go | 2 +- types/key/nl.go | 2 +- types/key/nl_test.go | 2 +- types/key/node.go | 2 +- types/key/node_test.go | 2 +- types/key/util.go | 2 +- types/key/util_test.go | 2 +- types/lazy/deferred.go | 2 +- types/lazy/deferred_test.go | 2 +- types/lazy/lazy.go | 2 +- types/lazy/map.go | 2 +- types/lazy/map_test.go | 2 +- types/lazy/sync_test.go | 2 +- types/lazy/unsync.go | 2 +- types/lazy/unsync_test.go | 2 +- types/logger/logger.go | 2 +- types/logger/logger_test.go | 2 +- types/logger/rusage.go | 2 +- types/logger/rusage_stub.go | 2 +- types/logger/rusage_syscall.go | 2 +- types/logger/tokenbucket.go | 2 +- types/logid/id.go | 2 +- types/logid/id_test.go | 2 +- types/mapx/ordered.go | 2 +- types/mapx/ordered_test.go | 2 +- types/netlogfunc/netlogfunc.go | 2 +- types/netlogtype/netlogtype.go | 2 +- types/netlogtype/netlogtype_test.go | 2 +- types/netmap/netmap.go | 2 +- types/netmap/netmap_test.go | 2 +- types/netmap/nodemut.go | 2 +- types/netmap/nodemut_test.go | 2 +- types/nettype/nettype.go | 2 +- types/opt/bool.go | 2 +- types/opt/bool_test.go | 2 +- types/opt/value.go | 2 +- types/opt/value_test.go | 2 +- types/persist/persist.go | 2 +- types/persist/persist_clone.go | 2 +- types/persist/persist_test.go | 2 +- types/persist/persist_view.go | 2 +- types/prefs/item.go | 2 +- types/prefs/list.go | 2 +- types/prefs/map.go | 2 +- types/prefs/options.go | 2 +- types/prefs/prefs.go | 2 +- types/prefs/prefs_clone_test.go | 2 +- .../prefs/prefs_example/prefs_example_clone.go | 2 +- types/prefs/prefs_example/prefs_example_view.go | 2 +- types/prefs/prefs_example/prefs_test.go | 2 +- types/prefs/prefs_example/prefs_types.go | 2 +- types/prefs/prefs_test.go | 2 +- types/prefs/prefs_view_test.go | 2 +- types/prefs/struct_list.go | 2 +- types/prefs/struct_map.go | 2 +- types/preftype/netfiltermode.go | 2 +- types/ptr/ptr.go | 2 +- types/result/result.go | 2 +- types/structs/structs.go | 2 +- types/tkatype/tkatype.go | 2 +- types/tkatype/tkatype_test.go | 2 +- types/views/views.go | 2 +- types/views/views_test.go | 2 +- util/backoff/backoff.go | 2 +- util/checkchange/checkchange.go | 2 +- util/cibuild/cibuild.go | 2 +- util/clientmetric/clientmetric.go | 2 +- util/clientmetric/clientmetric_test.go | 2 +- util/clientmetric/omit.go | 2 +- util/cloudenv/cloudenv.go | 2 +- util/cloudenv/cloudenv_test.go | 2 +- util/cloudinfo/cloudinfo.go | 2 +- util/cloudinfo/cloudinfo_nocloud.go | 2 +- util/cloudinfo/cloudinfo_test.go | 2 +- util/cmpver/version.go | 2 +- util/cmpver/version_test.go | 2 +- util/codegen/codegen.go | 4 ++-- util/codegen/codegen_test.go | 2 +- util/cstruct/cstruct.go | 2 +- util/cstruct/cstruct_example_test.go | 2 +- util/cstruct/cstruct_test.go | 2 +- util/ctxkey/key.go | 2 +- util/ctxkey/key_test.go | 2 +- util/deephash/debug.go | 2 +- util/deephash/deephash.go | 2 +- util/deephash/deephash_test.go | 2 +- util/deephash/pointer.go | 2 +- util/deephash/pointer_norace.go | 2 +- util/deephash/pointer_race.go | 2 +- util/deephash/tailscale_types_test.go | 2 +- util/deephash/testtype/testtype.go | 2 +- util/deephash/types.go | 2 +- util/deephash/types_test.go | 2 +- util/dirwalk/dirwalk.go | 2 +- util/dirwalk/dirwalk_linux.go | 2 +- util/dirwalk/dirwalk_test.go | 2 +- util/dnsname/dnsname.go | 2 +- util/dnsname/dnsname_test.go | 2 +- util/eventbus/bench_test.go | 2 +- util/eventbus/bus.go | 2 +- util/eventbus/bus_test.go | 2 +- util/eventbus/client.go | 2 +- util/eventbus/debug-demo/main.go | 2 +- util/eventbus/debug.go | 2 +- util/eventbus/debughttp.go | 2 +- util/eventbus/debughttp_off.go | 2 +- util/eventbus/doc.go | 2 +- util/eventbus/eventbustest/doc.go | 2 +- util/eventbus/eventbustest/eventbustest.go | 2 +- util/eventbus/eventbustest/eventbustest_test.go | 2 +- util/eventbus/eventbustest/examples_test.go | 2 +- util/eventbus/fetch-htmx.go | 2 +- util/eventbus/monitor.go | 2 +- util/eventbus/publish.go | 2 +- util/eventbus/queue.go | 2 +- util/eventbus/subscribe.go | 2 +- util/execqueue/execqueue.go | 2 +- util/execqueue/execqueue_test.go | 2 +- util/expvarx/expvarx.go | 2 +- util/expvarx/expvarx_test.go | 2 +- util/goroutines/goroutines.go | 2 +- util/goroutines/goroutines_test.go | 2 +- util/goroutines/tracker.go | 2 +- util/groupmember/groupmember.go | 2 +- util/hashx/block512.go | 2 +- util/hashx/block512_test.go | 2 +- util/httphdr/httphdr.go | 2 +- util/httphdr/httphdr_test.go | 2 +- util/httpm/httpm.go | 2 +- util/httpm/httpm_test.go | 2 +- util/limiter/limiter.go | 2 +- util/limiter/limiter_test.go | 2 +- util/lineiter/lineiter.go | 2 +- util/lineiter/lineiter_test.go | 2 +- util/lineread/lineread.go | 2 +- util/linuxfw/detector.go | 2 +- util/linuxfw/fake.go | 2 +- util/linuxfw/fake_netfilter.go | 2 +- util/linuxfw/helpers.go | 2 +- util/linuxfw/iptables.go | 2 +- util/linuxfw/iptables_disabled.go | 2 +- util/linuxfw/iptables_for_svcs.go | 2 +- util/linuxfw/iptables_for_svcs_test.go | 2 +- util/linuxfw/iptables_runner.go | 2 +- util/linuxfw/iptables_runner_test.go | 2 +- util/linuxfw/linuxfw.go | 2 +- util/linuxfw/linuxfwtest/linuxfwtest.go | 2 +- .../linuxfwtest/linuxfwtest_unsupported.go | 2 +- util/linuxfw/nftables.go | 2 +- util/linuxfw/nftables_for_svcs.go | 2 +- util/linuxfw/nftables_for_svcs_test.go | 2 +- util/linuxfw/nftables_runner.go | 2 +- util/linuxfw/nftables_runner_test.go | 2 +- util/linuxfw/nftables_types.go | 2 +- util/lru/lru.go | 2 +- util/lru/lru_test.go | 2 +- util/mak/mak.go | 2 +- util/mak/mak_test.go | 2 +- util/multierr/multierr.go | 2 +- util/multierr/multierr_test.go | 2 +- util/must/must.go | 2 +- util/nocasemaps/nocase.go | 2 +- util/nocasemaps/nocase_test.go | 2 +- util/osdiag/internal/wsc/wsc_windows.go | 2 +- util/osdiag/mksyscall.go | 2 +- util/osdiag/osdiag.go | 2 +- util/osdiag/osdiag_notwindows.go | 2 +- util/osdiag/osdiag_windows.go | 2 +- util/osdiag/osdiag_windows_test.go | 2 +- util/osshare/filesharingstatus_noop.go | 2 +- util/osshare/filesharingstatus_windows.go | 2 +- util/osuser/group_ids.go | 2 +- util/osuser/group_ids_test.go | 2 +- util/osuser/user.go | 2 +- util/pidowner/pidowner.go | 2 +- util/pidowner/pidowner_linux.go | 2 +- util/pidowner/pidowner_noimpl.go | 2 +- util/pidowner/pidowner_test.go | 2 +- util/pidowner/pidowner_windows.go | 2 +- util/pool/pool.go | 2 +- util/pool/pool_test.go | 2 +- util/precompress/precompress.go | 2 +- util/progresstracking/progresstracking.go | 2 +- util/prompt/prompt.go | 2 +- util/qrcodes/format.go | 2 +- util/qrcodes/qrcodes.go | 2 +- util/qrcodes/qrcodes_disabled.go | 2 +- util/qrcodes/qrcodes_linux.go | 2 +- util/qrcodes/qrcodes_notlinux.go | 2 +- util/quarantine/quarantine.go | 2 +- util/quarantine/quarantine_darwin.go | 2 +- util/quarantine/quarantine_default.go | 2 +- util/quarantine/quarantine_windows.go | 2 +- util/race/race.go | 2 +- util/race/race_test.go | 2 +- util/racebuild/off.go | 2 +- util/racebuild/on.go | 2 +- util/racebuild/racebuild.go | 2 +- util/rands/cheap.go | 2 +- util/rands/cheap_test.go | 2 +- util/rands/rands.go | 2 +- util/rands/rands_test.go | 2 +- util/reload/reload.go | 2 +- util/reload/reload_test.go | 2 +- util/ringlog/ringlog.go | 2 +- util/ringlog/ringlog_test.go | 2 +- util/safediff/diff.go | 2 +- util/safediff/diff_test.go | 2 +- util/set/handle.go | 2 +- util/set/intset.go | 2 +- util/set/intset_test.go | 2 +- util/set/set.go | 2 +- util/set/set_test.go | 2 +- util/set/slice.go | 2 +- util/set/slice_test.go | 2 +- util/set/smallset.go | 2 +- util/set/smallset_test.go | 2 +- util/singleflight/singleflight.go | 2 +- util/singleflight/singleflight_test.go | 2 +- util/slicesx/slicesx.go | 2 +- util/slicesx/slicesx_test.go | 2 +- util/stringsx/stringsx.go | 2 +- util/stringsx/stringsx_test.go | 2 +- util/syspolicy/internal/internal.go | 2 +- util/syspolicy/internal/loggerx/logger.go | 2 +- util/syspolicy/internal/loggerx/logger_test.go | 2 +- util/syspolicy/internal/metrics/metrics.go | 2 +- util/syspolicy/internal/metrics/metrics_test.go | 2 +- util/syspolicy/internal/metrics/test_handler.go | 2 +- util/syspolicy/pkey/pkey.go | 2 +- util/syspolicy/policy_keys.go | 2 +- util/syspolicy/policy_keys_test.go | 2 +- util/syspolicy/policyclient/policyclient.go | 2 +- util/syspolicy/policytest/policytest.go | 2 +- util/syspolicy/ptype/ptype.go | 2 +- util/syspolicy/ptype/ptype_test.go | 2 +- util/syspolicy/rsop/change_callbacks.go | 2 +- util/syspolicy/rsop/resultant_policy.go | 2 +- util/syspolicy/rsop/resultant_policy_test.go | 2 +- util/syspolicy/rsop/rsop.go | 2 +- util/syspolicy/rsop/store_registration.go | 2 +- util/syspolicy/setting/errors.go | 2 +- util/syspolicy/setting/origin.go | 2 +- util/syspolicy/setting/policy_scope.go | 2 +- util/syspolicy/setting/policy_scope_test.go | 2 +- util/syspolicy/setting/raw_item.go | 2 +- util/syspolicy/setting/raw_item_test.go | 2 +- util/syspolicy/setting/setting.go | 2 +- util/syspolicy/setting/setting_test.go | 2 +- util/syspolicy/setting/snapshot.go | 2 +- util/syspolicy/setting/snapshot_test.go | 2 +- util/syspolicy/setting/summary.go | 2 +- util/syspolicy/source/env_policy_store.go | 2 +- util/syspolicy/source/env_policy_store_test.go | 2 +- util/syspolicy/source/policy_reader.go | 2 +- util/syspolicy/source/policy_reader_test.go | 2 +- util/syspolicy/source/policy_source.go | 2 +- util/syspolicy/source/policy_store_windows.go | 2 +- .../source/policy_store_windows_test.go | 2 +- util/syspolicy/source/test_store.go | 2 +- util/syspolicy/syspolicy.go | 2 +- util/syspolicy/syspolicy_test.go | 2 +- util/syspolicy/syspolicy_windows.go | 2 +- util/sysresources/memory.go | 2 +- util/sysresources/memory_bsd.go | 2 +- util/sysresources/memory_darwin.go | 2 +- util/sysresources/memory_linux.go | 2 +- util/sysresources/memory_unsupported.go | 2 +- util/sysresources/sysresources.go | 2 +- util/sysresources/sysresources_test.go | 2 +- util/testenv/testenv.go | 2 +- util/testenv/testenv_test.go | 2 +- util/topk/topk.go | 2 +- util/topk/topk_test.go | 2 +- util/truncate/truncate.go | 2 +- util/truncate/truncate_test.go | 2 +- util/usermetric/metrics.go | 2 +- util/usermetric/omit.go | 2 +- util/usermetric/usermetric.go | 2 +- util/usermetric/usermetric_test.go | 2 +- util/vizerror/vizerror.go | 2 +- util/vizerror/vizerror_test.go | 2 +- .../authenticode/authenticode_windows.go | 2 +- util/winutil/authenticode/mksyscall.go | 2 +- util/winutil/conpty/conpty_windows.go | 2 +- util/winutil/gp/gp_windows.go | 2 +- util/winutil/gp/gp_windows_test.go | 2 +- util/winutil/gp/mksyscall.go | 2 +- util/winutil/gp/policylock_windows.go | 2 +- util/winutil/gp/watcher_windows.go | 2 +- util/winutil/mksyscall.go | 2 +- util/winutil/policy/policy_windows.go | 2 +- util/winutil/policy/policy_windows_test.go | 2 +- util/winutil/restartmgr_windows.go | 2 +- util/winutil/restartmgr_windows_test.go | 2 +- util/winutil/s4u/lsa_windows.go | 2 +- util/winutil/s4u/mksyscall.go | 2 +- util/winutil/s4u/s4u_windows.go | 2 +- util/winutil/startupinfo_windows.go | 2 +- util/winutil/svcdiag_windows.go | 2 +- .../restartableprocess_windows.go | 2 +- util/winutil/userprofile_windows.go | 2 +- util/winutil/userprofile_windows_test.go | 2 +- util/winutil/winenv/mksyscall.go | 2 +- util/winutil/winenv/winenv_windows.go | 2 +- util/winutil/winutil.go | 2 +- util/winutil/winutil_notwindows.go | 2 +- util/winutil/winutil_windows.go | 2 +- util/winutil/winutil_windows_test.go | 2 +- util/zstdframe/options.go | 2 +- util/zstdframe/zstd.go | 2 +- util/zstdframe/zstd_test.go | 2 +- version-embed.go | 2 +- version/cmdname.go | 2 +- version/cmdname_ios.go | 2 +- version/cmp.go | 2 +- version/cmp_test.go | 2 +- version/distro/distro.go | 2 +- version/distro/distro_test.go | 2 +- version/exename.go | 2 +- version/export_test.go | 2 +- version/mkversion/mkversion.go | 2 +- version/mkversion/mkversion_test.go | 2 +- version/modinfo_test.go | 2 +- version/print.go | 2 +- version/prop.go | 2 +- version/race.go | 2 +- version/race_off.go | 2 +- version/version.go | 2 +- version/version_checkformat.go | 2 +- version/version_internal_test.go | 2 +- version/version_test.go | 2 +- version_tailscale_test.go | 2 +- version_test.go | 2 +- wf/firewall.go | 2 +- wgengine/bench/bench.go | 2 +- wgengine/bench/bench_test.go | 2 +- wgengine/bench/trafficgen.go | 2 +- wgengine/bench/wg.go | 2 +- wgengine/filter/filter.go | 2 +- wgengine/filter/filter_test.go | 2 +- wgengine/filter/filtertype/filtertype.go | 2 +- wgengine/filter/filtertype/filtertype_clone.go | 2 +- wgengine/filter/match.go | 2 +- wgengine/filter/tailcfg.go | 2 +- wgengine/magicsock/blockforever_conn.go | 2 +- wgengine/magicsock/debughttp.go | 2 +- wgengine/magicsock/debugknobs.go | 2 +- wgengine/magicsock/debugknobs_stubs.go | 2 +- wgengine/magicsock/derp.go | 2 +- wgengine/magicsock/derp_test.go | 2 +- wgengine/magicsock/disco_atomic.go | 2 +- wgengine/magicsock/disco_atomic_test.go | 2 +- wgengine/magicsock/discopingpurpose_string.go | 2 +- wgengine/magicsock/endpoint.go | 2 +- wgengine/magicsock/endpoint_default.go | 2 +- wgengine/magicsock/endpoint_stub.go | 2 +- wgengine/magicsock/endpoint_test.go | 2 +- wgengine/magicsock/endpoint_tracker.go | 2 +- wgengine/magicsock/endpoint_tracker_test.go | 2 +- wgengine/magicsock/magicsock.go | 2 +- wgengine/magicsock/magicsock_default.go | 2 +- wgengine/magicsock/magicsock_linux.go | 2 +- wgengine/magicsock/magicsock_linux_test.go | 2 +- wgengine/magicsock/magicsock_notplan9.go | 2 +- wgengine/magicsock/magicsock_plan9.go | 2 +- wgengine/magicsock/magicsock_test.go | 2 +- wgengine/magicsock/peermap.go | 2 +- wgengine/magicsock/peermap_test.go | 2 +- wgengine/magicsock/peermtu.go | 2 +- wgengine/magicsock/peermtu_darwin.go | 2 +- wgengine/magicsock/peermtu_linux.go | 2 +- wgengine/magicsock/peermtu_stubs.go | 2 +- wgengine/magicsock/peermtu_unix.go | 2 +- wgengine/magicsock/rebinding_conn.go | 2 +- wgengine/magicsock/relaymanager.go | 2 +- wgengine/magicsock/relaymanager_test.go | 2 +- wgengine/mem_ios.go | 2 +- wgengine/netlog/netlog.go | 2 +- wgengine/netlog/netlog_omit.go | 2 +- wgengine/netlog/netlog_test.go | 2 +- wgengine/netlog/record.go | 2 +- wgengine/netlog/record_test.go | 2 +- wgengine/netstack/gro/gro.go | 2 +- wgengine/netstack/gro/gro_default.go | 2 +- wgengine/netstack/gro/gro_disabled.go | 2 +- wgengine/netstack/gro/gro_test.go | 2 +- wgengine/netstack/gro/netstack_disabled.go | 2 +- wgengine/netstack/link_endpoint.go | 2 +- wgengine/netstack/netstack.go | 2 +- wgengine/netstack/netstack_linux.go | 2 +- wgengine/netstack/netstack_tcpbuf_default.go | 2 +- wgengine/netstack/netstack_tcpbuf_ios.go | 2 +- wgengine/netstack/netstack_test.go | 2 +- wgengine/netstack/netstack_userping.go | 2 +- wgengine/netstack/netstack_userping_apple.go | 2 +- wgengine/netstack/netstack_userping_test.go | 2 +- wgengine/pendopen.go | 2 +- wgengine/pendopen_omit.go | 2 +- wgengine/router/callback.go | 2 +- wgengine/router/consolidating_router.go | 2 +- wgengine/router/consolidating_router_test.go | 2 +- .../router/osrouter/ifconfig_windows_test.go | 2 +- wgengine/router/osrouter/osrouter.go | 2 +- wgengine/router/osrouter/osrouter_test.go | 2 +- wgengine/router/osrouter/router_freebsd.go | 2 +- wgengine/router/osrouter/router_linux.go | 2 +- wgengine/router/osrouter/router_linux_test.go | 2 +- wgengine/router/osrouter/router_openbsd.go | 2 +- wgengine/router/osrouter/router_plan9.go | 2 +- .../router/osrouter/router_userspace_bsd.go | 2 +- wgengine/router/osrouter/router_windows.go | 2 +- wgengine/router/osrouter/router_windows_test.go | 2 +- wgengine/router/osrouter/runner.go | 2 +- wgengine/router/router.go | 2 +- wgengine/router/router_fake.go | 2 +- wgengine/router/router_test.go | 2 +- wgengine/userspace.go | 2 +- wgengine/userspace_ext_test.go | 2 +- wgengine/userspace_test.go | 2 +- wgengine/watchdog.go | 2 +- wgengine/watchdog_omit.go | 2 +- wgengine/watchdog_test.go | 2 +- wgengine/wgcfg/config.go | 2 +- wgengine/wgcfg/config_test.go | 2 +- wgengine/wgcfg/device.go | 2 +- wgengine/wgcfg/device_test.go | 2 +- wgengine/wgcfg/nmcfg/nmcfg.go | 2 +- wgengine/wgcfg/parser.go | 2 +- wgengine/wgcfg/parser_test.go | 2 +- wgengine/wgcfg/wgcfg_clone.go | 2 +- wgengine/wgcfg/writer.go | 2 +- wgengine/wgengine.go | 2 +- wgengine/wgint/wgint.go | 2 +- wgengine/wgint/wgint_test.go | 2 +- wgengine/wglog/wglog.go | 2 +- wgengine/wglog/wglog_test.go | 2 +- wgengine/winnet/winnet.go | 2 +- wgengine/winnet/winnet_windows.go | 2 +- wif/wif.go | 2 +- words/words.go | 2 +- words/words_test.go | 2 +- 2026 files changed, 2031 insertions(+), 2048 deletions(-) delete mode 100644 AUTHORS diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 03d5932c04746..0000000000000 --- a/AUTHORS +++ /dev/null @@ -1,17 +0,0 @@ -# This is the official list of Tailscale -# authors for copyright purposes. -# -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name -# -# Please keep the list sorted. -# -# You do not need to add entries to this list, and we don't actively -# populate this list. If you do want to be acknowledged explicitly as -# a copyright holder, though, then please send a PR referencing your -# earlier contributions and clarifying whether it's you or your -# company that owns the rights to your contribution. - -Tailscale Inc. diff --git a/Dockerfile b/Dockerfile index 7122f99782fec..413a4b8211465 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # Note that this Dockerfile is currently NOT used to build any of the published diff --git a/Dockerfile.base b/Dockerfile.base index 9b7ae512b9945..295950c461339 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause FROM alpine:3.22 diff --git a/LICENSE b/LICENSE index 394db19e4aa5c..ed6e4bb6d6ba6 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2020 Tailscale Inc & AUTHORS. +Copyright (c) 2020 Tailscale Inc & contributors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/appc/appconnector.go b/appc/appconnector.go index d41f9e8ba6357..ee495bd10f100 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appc implements App Connectors. diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index 5c362d6fd1217..a860da6a7c737 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appc diff --git a/appc/appctest/appctest.go b/appc/appctest/appctest.go index 9726a2b97d72b..c5eabf6761ec3 100644 --- a/appc/appctest/appctest.go +++ b/appc/appctest/appctest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appctest contains code to help test App Connectors. diff --git a/appc/conn25.go b/appc/conn25.go index b4890c26c0268..2c3e8c519a976 100644 --- a/appc/conn25.go +++ b/appc/conn25.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appc diff --git a/appc/conn25_test.go b/appc/conn25_test.go index ab6c4be37c592..76cc6cf8c69f4 100644 --- a/appc/conn25_test.go +++ b/appc/conn25_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appc diff --git a/appc/ippool.go b/appc/ippool.go index a2e86a7c296a8..702f79ddef8d8 100644 --- a/appc/ippool.go +++ b/appc/ippool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appc diff --git a/appc/ippool_test.go b/appc/ippool_test.go index 64b76738f661e..8ac457c117475 100644 --- a/appc/ippool_test.go +++ b/appc/ippool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appc diff --git a/appc/observe.go b/appc/observe.go index 06dc04f9dcfdf..3cb2db662b564 100644 --- a/appc/observe.go +++ b/appc/observe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_appconnectors diff --git a/appc/observe_disabled.go b/appc/observe_disabled.go index 45aa285eaa758..743c28a590f8e 100644 --- a/appc/observe_disabled.go +++ b/appc/observe_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_appconnectors diff --git a/assert_ts_toolchain_match.go b/assert_ts_toolchain_match.go index 40b24b334674f..f0760ec039414 100644 --- a/assert_ts_toolchain_match.go +++ b/assert_ts_toolchain_match.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go diff --git a/atomicfile/atomicfile.go b/atomicfile/atomicfile.go index 9cae9bb750fa8..1fa4c0641f74e 100644 --- a/atomicfile/atomicfile.go +++ b/atomicfile/atomicfile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package atomicfile contains code related to writing to filesystems diff --git a/atomicfile/atomicfile_notwindows.go b/atomicfile/atomicfile_notwindows.go index 1ce2bb8acda7a..7104ddd5d9ff6 100644 --- a/atomicfile/atomicfile_notwindows.go +++ b/atomicfile/atomicfile_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/atomicfile/atomicfile_test.go b/atomicfile/atomicfile_test.go index a081c90409788..6dbf4eb430372 100644 --- a/atomicfile/atomicfile_test.go +++ b/atomicfile/atomicfile_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !windows diff --git a/atomicfile/atomicfile_windows.go b/atomicfile/atomicfile_windows.go index c67762df2b56c..d9c6ecf32ac5e 100644 --- a/atomicfile/atomicfile_windows.go +++ b/atomicfile/atomicfile_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package atomicfile diff --git a/atomicfile/atomicfile_windows_test.go b/atomicfile/atomicfile_windows_test.go index 4dec1493e0224..8748fc324f61a 100644 --- a/atomicfile/atomicfile_windows_test.go +++ b/atomicfile/atomicfile_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package atomicfile diff --git a/atomicfile/mksyscall.go b/atomicfile/mksyscall.go index d8951a77c5ac6..2b0e4f9e58939 100644 --- a/atomicfile/mksyscall.go +++ b/atomicfile/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package atomicfile diff --git a/chirp/chirp.go b/chirp/chirp.go index 9653877221778..ed87542bc9a93 100644 --- a/chirp/chirp.go +++ b/chirp/chirp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package chirp implements a client to communicate with the BIRD Internet diff --git a/chirp/chirp_test.go b/chirp/chirp_test.go index c545c277d6e87..eedc17f48afa9 100644 --- a/chirp/chirp_test.go +++ b/chirp/chirp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package chirp diff --git a/client/local/cert.go b/client/local/cert.go index bfaac7303297b..701bfe026ceed 100644 --- a/client/local/cert.go +++ b/client/local/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !ts_omit_acme diff --git a/client/local/debugportmapper.go b/client/local/debugportmapper.go index 04ed1c109a54f..1cbb3ee0a303e 100644 --- a/client/local/debugportmapper.go +++ b/client/local/debugportmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_debugportmapper diff --git a/client/local/local.go b/client/local/local.go index 195a91b1ef4a9..465ba0d67c820 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package local contains a Go client for the Tailscale LocalAPI. diff --git a/client/local/local_test.go b/client/local/local_test.go index 0e01e74cd1813..a5377fbd677a9 100644 --- a/client/local/local_test.go +++ b/client/local/local_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/local/serve.go b/client/local/serve.go index 51d15e7e5439b..7f9a16a03f825 100644 --- a/client/local/serve.go +++ b/client/local/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/client/local/syspolicy.go b/client/local/syspolicy.go index 6eff177833786..49708fa154d9a 100644 --- a/client/local/syspolicy.go +++ b/client/local/syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_syspolicy diff --git a/client/local/tailnetlock.go b/client/local/tailnetlock.go index 9d37d2f3553d5..0084cb42e3ab0 100644 --- a/client/local/tailnetlock.go +++ b/client/local/tailnetlock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/client/systray/logo.go b/client/systray/logo.go index 3467d1b741f93..4cd19778dc3a7 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin diff --git a/client/systray/startup-creator.go b/client/systray/startup-creator.go index cb354856d7f97..34d85e6175fc6 100644 --- a/client/systray/startup-creator.go +++ b/client/systray/startup-creator.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin diff --git a/client/systray/systray.go b/client/systray/systray.go index b9e8fcc59043c..8c30dbf05ef3e 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin diff --git a/client/tailscale/acl.go b/client/tailscale/acl.go index 929ec2b3b1ca9..e69d45a2bff5d 100644 --- a/client/tailscale/acl.go +++ b/client/tailscale/acl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/apitype/apitype.go b/client/tailscale/apitype/apitype.go index 6d239d082cd95..d7d1440be9f8a 100644 --- a/client/tailscale/apitype/apitype.go +++ b/client/tailscale/apitype/apitype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package apitype contains types for the Tailscale LocalAPI and control plane API. diff --git a/client/tailscale/apitype/controltype.go b/client/tailscale/apitype/controltype.go index d9d79f0ade38b..2259bb8861aad 100644 --- a/client/tailscale/apitype/controltype.go +++ b/client/tailscale/apitype/controltype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package apitype diff --git a/client/tailscale/cert.go b/client/tailscale/cert.go index 4f351ab990984..797c5535d17f5 100644 --- a/client/tailscale/cert.go +++ b/client/tailscale/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !ts_omit_acme diff --git a/client/tailscale/devices.go b/client/tailscale/devices.go index 0664f9e63edb1..2b2cf7a0cd049 100644 --- a/client/tailscale/devices.go +++ b/client/tailscale/devices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/dns.go b/client/tailscale/dns.go index bbdc7c56c65f7..427caea0fc593 100644 --- a/client/tailscale/dns.go +++ b/client/tailscale/dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/example/servetls/servetls.go b/client/tailscale/example/servetls/servetls.go index 0ade420887634..864dafd07b242 100644 --- a/client/tailscale/example/servetls/servetls.go +++ b/client/tailscale/example/servetls/servetls.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The servetls program shows how to run an HTTPS server diff --git a/client/tailscale/keys.go b/client/tailscale/keys.go index 79e19e99880f7..6edbae034a759 100644 --- a/client/tailscale/keys.go +++ b/client/tailscale/keys.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index e3492e841b1c9..98a72068a5eba 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/client/tailscale/required_version.go b/client/tailscale/required_version.go index d6bca1c6d8ff9..fb994e55fb604 100644 --- a/client/tailscale/required_version.go +++ b/client/tailscale/required_version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !go1.23 diff --git a/client/tailscale/routes.go b/client/tailscale/routes.go index b72f2743ff9fb..aa6e49e3b7fc2 100644 --- a/client/tailscale/routes.go +++ b/client/tailscale/routes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/tailnet.go b/client/tailscale/tailnet.go index 9453962c908c8..75ca7dfd60a33 100644 --- a/client/tailscale/tailnet.go +++ b/client/tailscale/tailnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/tailscale.go b/client/tailscale/tailscale.go index 76e44454b2fc2..d5585a052bb99 100644 --- a/client/tailscale/tailscale.go +++ b/client/tailscale/tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/client/tailscale/tailscale_test.go b/client/tailscale/tailscale_test.go index 67379293bd580..fe2fbe383b679 100644 --- a/client/tailscale/tailscale_test.go +++ b/client/tailscale/tailscale_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/client/web/assets.go b/client/web/assets.go index c4f4e9e3bcf66..b9e4226299dd1 100644 --- a/client/web/assets.go +++ b/client/web/assets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package web diff --git a/client/web/auth.go b/client/web/auth.go index 27eb24ee444c5..4e25b049b30ac 100644 --- a/client/web/auth.go +++ b/client/web/auth.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package web diff --git a/client/web/qnap.go b/client/web/qnap.go index 9bde64bf5885b..132b95aed086d 100644 --- a/client/web/qnap.go +++ b/client/web/qnap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // qnap.go contains handlers and logic, such as authentication, diff --git a/client/web/src/api.ts b/client/web/src/api.ts index e780c76459dfd..246f74ff231c2 100644 --- a/client/web/src/api.ts +++ b/client/web/src/api.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback } from "react" diff --git a/client/web/src/components/acl-tag.tsx b/client/web/src/components/acl-tag.tsx index cb34375ed293c..95ab764c4a56d 100644 --- a/client/web/src/components/acl-tag.tsx +++ b/client/web/src/components/acl-tag.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/address-copy-card.tsx b/client/web/src/components/address-copy-card.tsx index 6b4f25bed73f4..697086f15c58d 100644 --- a/client/web/src/components/address-copy-card.tsx +++ b/client/web/src/components/address-copy-card.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as Primitive from "@radix-ui/react-popover" diff --git a/client/web/src/components/app.tsx b/client/web/src/components/app.tsx index 981dd8889c4b2..b885125b7f278 100644 --- a/client/web/src/components/app.tsx +++ b/client/web/src/components/app.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/control-components.tsx b/client/web/src/components/control-components.tsx index ffb0a2999558f..42ed25107c986 100644 --- a/client/web/src/components/control-components.tsx +++ b/client/web/src/components/control-components.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/exit-node-selector.tsx b/client/web/src/components/exit-node-selector.tsx index c0fd5e730b04c..a564ebbfc56b1 100644 --- a/client/web/src/components/exit-node-selector.tsx +++ b/client/web/src/components/exit-node-selector.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/login-toggle.tsx b/client/web/src/components/login-toggle.tsx index f5c4efe3ce2ac..397cb2ee1f8f1 100644 --- a/client/web/src/components/login-toggle.tsx +++ b/client/web/src/components/login-toggle.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/nice-ip.tsx b/client/web/src/components/nice-ip.tsx index f00d763f96db9..1f90d1cd73802 100644 --- a/client/web/src/components/nice-ip.tsx +++ b/client/web/src/components/nice-ip.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/update-available.tsx b/client/web/src/components/update-available.tsx index 763007de889c7..9d678d9073aa7 100644 --- a/client/web/src/components/update-available.tsx +++ b/client/web/src/components/update-available.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/views/device-details-view.tsx b/client/web/src/components/views/device-details-view.tsx index fa58e52aea473..e24aacf520743 100644 --- a/client/web/src/components/views/device-details-view.tsx +++ b/client/web/src/components/views/device-details-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/disconnected-view.tsx b/client/web/src/components/views/disconnected-view.tsx index 492c69e469ef1..5ec86aae4643b 100644 --- a/client/web/src/components/views/disconnected-view.tsx +++ b/client/web/src/components/views/disconnected-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/views/home-view.tsx b/client/web/src/components/views/home-view.tsx index 8073823466b34..e9051f22ba1cd 100644 --- a/client/web/src/components/views/home-view.tsx +++ b/client/web/src/components/views/home-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/login-view.tsx b/client/web/src/components/views/login-view.tsx index f8c15b16dbcaa..a6c4a9ae2c7ab 100644 --- a/client/web/src/components/views/login-view.tsx +++ b/client/web/src/components/views/login-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/components/views/ssh-view.tsx b/client/web/src/components/views/ssh-view.tsx index 1337b9fdd10fd..67c324fa53563 100644 --- a/client/web/src/components/views/ssh-view.tsx +++ b/client/web/src/components/views/ssh-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/subnet-router-view.tsx b/client/web/src/components/views/subnet-router-view.tsx index 26369112c1cbf..7f4c682996033 100644 --- a/client/web/src/components/views/subnet-router-view.tsx +++ b/client/web/src/components/views/subnet-router-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/components/views/updating-view.tsx b/client/web/src/components/views/updating-view.tsx index 0c844c7d09faa..d39dc5c63fd27 100644 --- a/client/web/src/components/views/updating-view.tsx +++ b/client/web/src/components/views/updating-view.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/hooks/auth.ts b/client/web/src/hooks/auth.ts index 51eb0c400bae9..c3d0cdc877022 100644 --- a/client/web/src/hooks/auth.ts +++ b/client/web/src/hooks/auth.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback, useEffect, useState } from "react" diff --git a/client/web/src/hooks/exit-nodes.ts b/client/web/src/hooks/exit-nodes.ts index 5e47fbc227cd4..78f8a383dbb00 100644 --- a/client/web/src/hooks/exit-nodes.ts +++ b/client/web/src/hooks/exit-nodes.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useMemo } from "react" diff --git a/client/web/src/hooks/self-update.ts b/client/web/src/hooks/self-update.ts index eb10463c1abe1..e63d6eddaeebf 100644 --- a/client/web/src/hooks/self-update.ts +++ b/client/web/src/hooks/self-update.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback, useEffect, useState } from "react" diff --git a/client/web/src/hooks/toaster.ts b/client/web/src/hooks/toaster.ts index 41fb4f42d0918..8c30cab58a6a5 100644 --- a/client/web/src/hooks/toaster.ts +++ b/client/web/src/hooks/toaster.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useRawToasterForHook } from "src/ui/toaster" diff --git a/client/web/src/hooks/ts-web-connected.ts b/client/web/src/hooks/ts-web-connected.ts index 3145663d7654d..bd020c9e9b595 100644 --- a/client/web/src/hooks/ts-web-connected.ts +++ b/client/web/src/hooks/ts-web-connected.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useCallback, useEffect, useState } from "react" diff --git a/client/web/src/index.tsx b/client/web/src/index.tsx index 31ac7890f45f2..2b970ebca8ed7 100644 --- a/client/web/src/index.tsx +++ b/client/web/src/index.tsx @@ -1,10 +1,10 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Preserved js license comment for web client app. /** * @license - * Copyright (c) Tailscale Inc & AUTHORS + * Copyright (c) Tailscale Inc & contributors * SPDX-License-Identifier: BSD-3-Clause */ diff --git a/client/web/src/types.ts b/client/web/src/types.ts index 62fa4c59f1fbf..ebf11d442fc52 100644 --- a/client/web/src/types.ts +++ b/client/web/src/types.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { assertNever } from "src/utils/util" diff --git a/client/web/src/ui/badge.tsx b/client/web/src/ui/badge.tsx index c0caa6403b37e..de8c21e3568ab 100644 --- a/client/web/src/ui/badge.tsx +++ b/client/web/src/ui/badge.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/button.tsx b/client/web/src/ui/button.tsx index 18dc2939f1889..e38f58f02bd2e 100644 --- a/client/web/src/ui/button.tsx +++ b/client/web/src/ui/button.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/card.tsx b/client/web/src/ui/card.tsx index 4e17c3df6d29e..7d3c9b89e8202 100644 --- a/client/web/src/ui/card.tsx +++ b/client/web/src/ui/card.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/collapsible.tsx b/client/web/src/ui/collapsible.tsx index 6aa8c0b9f5ca1..bd0b0eedad84a 100644 --- a/client/web/src/ui/collapsible.tsx +++ b/client/web/src/ui/collapsible.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as Primitive from "@radix-ui/react-collapsible" diff --git a/client/web/src/ui/dialog.tsx b/client/web/src/ui/dialog.tsx index d5af834ce05d2..6b3bb792b8565 100644 --- a/client/web/src/ui/dialog.tsx +++ b/client/web/src/ui/dialog.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as DialogPrimitive from "@radix-ui/react-dialog" diff --git a/client/web/src/ui/empty-state.tsx b/client/web/src/ui/empty-state.tsx index 6ac7fd4fa87e6..3964a55590ab4 100644 --- a/client/web/src/ui/empty-state.tsx +++ b/client/web/src/ui/empty-state.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/input.tsx b/client/web/src/ui/input.tsx index 756e0fc2ea4d6..7cff6bf5bf074 100644 --- a/client/web/src/ui/input.tsx +++ b/client/web/src/ui/input.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/loading-dots.tsx b/client/web/src/ui/loading-dots.tsx index 6b47552a95844..83c60da93a937 100644 --- a/client/web/src/ui/loading-dots.tsx +++ b/client/web/src/ui/loading-dots.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/popover.tsx b/client/web/src/ui/popover.tsx index c0f01c833f465..0139894bb5e4a 100644 --- a/client/web/src/ui/popover.tsx +++ b/client/web/src/ui/popover.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import * as PopoverPrimitive from "@radix-ui/react-popover" diff --git a/client/web/src/ui/portal-container-context.tsx b/client/web/src/ui/portal-container-context.tsx index d25b30bae1731..922cd0d14ea52 100644 --- a/client/web/src/ui/portal-container-context.tsx +++ b/client/web/src/ui/portal-container-context.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import React from "react" diff --git a/client/web/src/ui/profile-pic.tsx b/client/web/src/ui/profile-pic.tsx index 343fb29b490e0..4bbdad878ab4a 100644 --- a/client/web/src/ui/profile-pic.tsx +++ b/client/web/src/ui/profile-pic.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/quick-copy.tsx b/client/web/src/ui/quick-copy.tsx index bc8f916c84144..0c51f820ccf33 100644 --- a/client/web/src/ui/quick-copy.tsx +++ b/client/web/src/ui/quick-copy.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/search-input.tsx b/client/web/src/ui/search-input.tsx index debba371caec6..9b99984acc014 100644 --- a/client/web/src/ui/search-input.tsx +++ b/client/web/src/ui/search-input.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/spinner.tsx b/client/web/src/ui/spinner.tsx index 51f6e887b836d..be3dc8d5b4fa5 100644 --- a/client/web/src/ui/spinner.tsx +++ b/client/web/src/ui/spinner.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/toaster.tsx b/client/web/src/ui/toaster.tsx index 18f491f3b2552..677ccde4d5d9b 100644 --- a/client/web/src/ui/toaster.tsx +++ b/client/web/src/ui/toaster.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/ui/toggle.tsx b/client/web/src/ui/toggle.tsx index 4922830058f16..83ca92608a4dc 100644 --- a/client/web/src/ui/toggle.tsx +++ b/client/web/src/ui/toggle.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import cx from "classnames" diff --git a/client/web/src/utils/clipboard.ts b/client/web/src/utils/clipboard.ts index f003bc24079ab..3ca5f281ebb93 100644 --- a/client/web/src/utils/clipboard.ts +++ b/client/web/src/utils/clipboard.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { isPromise } from "src/utils/util" diff --git a/client/web/src/utils/util.test.ts b/client/web/src/utils/util.test.ts index 148f6cc365589..2a598d6505654 100644 --- a/client/web/src/utils/util.test.ts +++ b/client/web/src/utils/util.test.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { isTailscaleIPv6, pluralize } from "src/utils/util" diff --git a/client/web/src/utils/util.ts b/client/web/src/utils/util.ts index 5f8eda7b77572..81fc904034c08 100644 --- a/client/web/src/utils/util.ts +++ b/client/web/src/utils/util.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** diff --git a/client/web/synology.go b/client/web/synology.go index 922489d78af16..e39cbc9c5c82e 100644 --- a/client/web/synology.go +++ b/client/web/synology.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // synology.go contains handlers and logic, such as authentication, diff --git a/client/web/web.go b/client/web/web.go index dbd3d5df0be86..f8a9e7c1769a2 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package web provides the Tailscale client for web. diff --git a/client/web/web_test.go b/client/web/web_test.go index 9ba16bccf4884..6b9a51002b33b 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package web diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 3a0a8d03e0425..e75e425a455b8 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package clientupdate implements tailscale client update for all supported diff --git a/clientupdate/clientupdate_downloads.go b/clientupdate/clientupdate_downloads.go index 18d3176b42afe..9458f88fe8a18 100644 --- a/clientupdate/clientupdate_downloads.go +++ b/clientupdate/clientupdate_downloads.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || windows diff --git a/clientupdate/clientupdate_not_downloads.go b/clientupdate/clientupdate_not_downloads.go index 057b4f2cd7574..aaffb76f05b3e 100644 --- a/clientupdate/clientupdate_not_downloads.go +++ b/clientupdate/clientupdate_not_downloads.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !((linux && !android) || windows) diff --git a/clientupdate/clientupdate_notwindows.go b/clientupdate/clientupdate_notwindows.go index edadc210c8a15..12035ff73495a 100644 --- a/clientupdate/clientupdate_notwindows.go +++ b/clientupdate/clientupdate_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/clientupdate/clientupdate_test.go b/clientupdate/clientupdate_test.go index b265d56411bdc..089936a3120f1 100644 --- a/clientupdate/clientupdate_test.go +++ b/clientupdate/clientupdate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package clientupdate diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index 5faeda6dd70e3..70a3c509121ea 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Windows-specific stuff that can't go in clientupdate.go because it needs diff --git a/clientupdate/distsign/distsign.go b/clientupdate/distsign/distsign.go index 954403ae0c62c..c804b855cfc1d 100644 --- a/clientupdate/distsign/distsign.go +++ b/clientupdate/distsign/distsign.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package distsign implements signature and validation of arbitrary diff --git a/clientupdate/distsign/distsign_test.go b/clientupdate/distsign/distsign_test.go index 09a701f499198..0d454771fc9a4 100644 --- a/clientupdate/distsign/distsign_test.go +++ b/clientupdate/distsign/distsign_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distsign diff --git a/clientupdate/distsign/roots.go b/clientupdate/distsign/roots.go index d5b47b7b62e92..2fab3aab90373 100644 --- a/clientupdate/distsign/roots.go +++ b/clientupdate/distsign/roots.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distsign diff --git a/clientupdate/distsign/roots_test.go b/clientupdate/distsign/roots_test.go index 7a94529538ef1..562b06c1c29c1 100644 --- a/clientupdate/distsign/roots_test.go +++ b/clientupdate/distsign/roots_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distsign diff --git a/cmd/addlicense/main.go b/cmd/addlicense/main.go index 1cd1b0f19354a..35d97b72f70b4 100644 --- a/cmd/addlicense/main.go +++ b/cmd/addlicense/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program addlicense adds a license header to a file. @@ -67,7 +67,7 @@ func check(err error) { } var license = ` -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause `[1:] diff --git a/cmd/build-webclient/build-webclient.go b/cmd/build-webclient/build-webclient.go index f92c0858fae25..949d9ef349ef1 100644 --- a/cmd/build-webclient/build-webclient.go +++ b/cmd/build-webclient/build-webclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The build-webclient tool generates the static resources needed for the diff --git a/cmd/checkmetrics/checkmetrics.go b/cmd/checkmetrics/checkmetrics.go index fb9e8ab4c61ec..5612ffbf512f9 100644 --- a/cmd/checkmetrics/checkmetrics.go +++ b/cmd/checkmetrics/checkmetrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // checkmetrics validates that all metrics in the tailscale client-metrics diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go index 872cb195355b5..b308afd06d688 100644 --- a/cmd/cigocacher/cigocacher.go +++ b/cmd/cigocacher/cigocacher.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // cigocacher is an opinionated-to-Tailscale client for gocached. It connects diff --git a/cmd/cigocacher/disk.go b/cmd/cigocacher/disk.go index 57a9b80d5609e..e04dac0509300 100644 --- a/cmd/cigocacher/disk.go +++ b/cmd/cigocacher/disk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/cigocacher/disk_notwindows.go b/cmd/cigocacher/disk_notwindows.go index 705ed92e3d8de..353b734ab9ce7 100644 --- a/cmd/cigocacher/disk_notwindows.go +++ b/cmd/cigocacher/disk_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/cmd/cigocacher/disk_windows.go b/cmd/cigocacher/disk_windows.go index 9efae2c632087..686bcf2b0d68b 100644 --- a/cmd/cigocacher/disk_windows.go +++ b/cmd/cigocacher/disk_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/cigocacher/http.go b/cmd/cigocacher/http.go index 55735f089655e..16d0ae899acbc 100644 --- a/cmd/cigocacher/http.go +++ b/cmd/cigocacher/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index a81bd10bd5401..a3f0684faa589 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Cloner is a tool to automate the creation of a Clone method. diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index 754a4ac49a220..b06f5c4fa5610 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index b9f6d60dedb35..1007d0c6b646d 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer,MapWithPointers,DeeplyNestedMap diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index 13e1276c4e4b8..5c161239fc992 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/cmd/connector-gen/advertise-routes.go b/cmd/connector-gen/advertise-routes.go index 446f4906a4d65..57c101e27af7c 100644 --- a/cmd/connector-gen/advertise-routes.go +++ b/cmd/connector-gen/advertise-routes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/connector-gen/aws.go b/cmd/connector-gen/aws.go index bd2632ae27960..b0d6566b915f6 100644 --- a/cmd/connector-gen/aws.go +++ b/cmd/connector-gen/aws.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/connector-gen/connector-gen.go b/cmd/connector-gen/connector-gen.go index 6947f6410a96f..8693a1bf0490f 100644 --- a/cmd/connector-gen/connector-gen.go +++ b/cmd/connector-gen/connector-gen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // connector-gen is a tool to generate app connector configuration and flags from service provider address data. diff --git a/cmd/connector-gen/github.go b/cmd/connector-gen/github.go index def40872d52c1..a0162aa06cae3 100644 --- a/cmd/connector-gen/github.go +++ b/cmd/connector-gen/github.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 21d9f0bcb9a2b..6526c255eeed7 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/egressservices_test.go b/cmd/containerboot/egressservices_test.go index 724626b072c2b..0d8504bdad7fd 100644 --- a/cmd/containerboot/egressservices_test.go +++ b/cmd/containerboot/egressservices_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/forwarding.go b/cmd/containerboot/forwarding.go index 04d34836c92d8..0ec9c36c0bd30 100644 --- a/cmd/containerboot/forwarding.go +++ b/cmd/containerboot/forwarding.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/ingressservices.go b/cmd/containerboot/ingressservices.go index 1a2da95675f4e..d76bf86e0b8ec 100644 --- a/cmd/containerboot/ingressservices.go +++ b/cmd/containerboot/ingressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/ingressservices_test.go b/cmd/containerboot/ingressservices_test.go index 228bbb159f463..46330103e343b 100644 --- a/cmd/containerboot/ingressservices_test.go +++ b/cmd/containerboot/ingressservices_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index e566fa483447c..4943bddba7ad4 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index c33714ed12ace..bc80e9cdf2cb3 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index a520b5756ade5..9d8d3f02328e8 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 7007cc15202d9..6eeb59c9b2e7e 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 5fa8e580d5828..bc154c7e9f258 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go index fc18f254dad05..0683346f7159a 100644 --- a/cmd/containerboot/serve_test.go +++ b/cmd/containerboot/serve_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index aab2b86314e23..c35fc14079d85 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/settings_test.go b/cmd/containerboot/settings_test.go index 576ea7f3eef3e..5fa0c7dd10724 100644 --- a/cmd/containerboot/settings_test.go +++ b/cmd/containerboot/settings_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index e5b0b8b8ed1b1..9990600c84c65 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go index 56fb68c336cd3..ae2d0cbebb413 100644 --- a/cmd/derper/ace.go +++ b/cmd/derper/ace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO: docs about all this diff --git a/cmd/derper/bootstrap_dns.go b/cmd/derper/bootstrap_dns.go index a58f040bae687..9abc95df56878 100644 --- a/cmd/derper/bootstrap_dns.go +++ b/cmd/derper/bootstrap_dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/bootstrap_dns_test.go b/cmd/derper/bootstrap_dns_test.go index 9b99103abfe33..5b765f6d37b5f 100644 --- a/cmd/derper/bootstrap_dns_test.go +++ b/cmd/derper/bootstrap_dns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index dfd7769905132..979c0d671517f 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index b4e18f6951ae0..e111ed76b7a97 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index ddf45747ac9fe..87f9a0bc084e4 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The derper binary is a simple DERP server. diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index d27f8cb20144d..0a2fd8787d61d 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index 909b5f2ca18c4..34ea7da856220 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go index 82fd30bed165a..1929f16906659 100644 --- a/cmd/derper/websocket.go +++ b/cmd/derper/websocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 5d2179b512c23..549364e5e8f6a 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The derpprobe binary probes derpers. diff --git a/cmd/dist/dist.go b/cmd/dist/dist.go index c7406298d8188..88b9e6fba9133 100644 --- a/cmd/dist/dist.go +++ b/cmd/dist/dist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The dist command builds Tailscale release packages for distribution. diff --git a/cmd/distsign/distsign.go b/cmd/distsign/distsign.go index 051afabcd0b71..e0dba27206be9 100644 --- a/cmd/distsign/distsign.go +++ b/cmd/distsign/distsign.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command distsign tests downloads and signature validating for packages diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go index 8c8a2ceaf54ff..f3aae68cc8b17 100644 --- a/cmd/featuretags/featuretags.go +++ b/cmd/featuretags/featuretags.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The featuretags command helps other build tools select Tailscale's Go build diff --git a/cmd/get-authkey/main.go b/cmd/get-authkey/main.go index ec7ab5d2c6158..da98decda6ae5 100644 --- a/cmd/get-authkey/main.go +++ b/cmd/get-authkey/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // get-authkey allocates an authkey using an OAuth API client diff --git a/cmd/gitops-pusher/cache.go b/cmd/gitops-pusher/cache.go index 6792e5e63e9cc..af5c4606c0d50 100644 --- a/cmd/gitops-pusher/cache.go +++ b/cmd/gitops-pusher/cache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index 0cbbda88a18b9..39a60d3064432 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command gitops-pusher allows users to use a GitOps flow for managing Tailscale ACLs. diff --git a/cmd/gitops-pusher/gitops-pusher_test.go b/cmd/gitops-pusher/gitops-pusher_test.go index e08b06c9cd194..bc339ae6a0b84 100644 --- a/cmd/gitops-pusher/gitops-pusher_test.go +++ b/cmd/gitops-pusher/gitops-pusher_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/hello/hello.go b/cmd/hello/hello.go index fa116b28b15ab..710de49cd67a8 100644 --- a/cmd/hello/hello.go +++ b/cmd/hello/hello.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The hello binary runs hello.ts.net. diff --git a/cmd/jsonimports/format.go b/cmd/jsonimports/format.go index 6dbd175583a4d..e990d0e6745c3 100644 --- a/cmd/jsonimports/format.go +++ b/cmd/jsonimports/format.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/jsonimports/format_test.go b/cmd/jsonimports/format_test.go index 28654eb4550ee..fb3d6fa09698d 100644 --- a/cmd/jsonimports/format_test.go +++ b/cmd/jsonimports/format_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/jsonimports/jsonimports.go b/cmd/jsonimports/jsonimports.go index 4be2e10cbe091..6903844e121ca 100644 --- a/cmd/jsonimports/jsonimports.go +++ b/cmd/jsonimports/jsonimports.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The jsonimports tool formats all Go source files in the repository diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go index 84e65452d2334..1b219fb1ab924 100644 --- a/cmd/k8s-nameserver/main.go +++ b/cmd/k8s-nameserver/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go index bca010048664a..0624800836675 100644 --- a/cmd/k8s-nameserver/main_test.go +++ b/cmd/k8s-nameserver/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go index 1a81e4967e5d8..ff04d553a7da3 100644 --- a/cmd/k8s-operator/api-server-proxy-pg.go +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go index dee5057236675..d7e88123fb28b 100644 --- a/cmd/k8s-operator/api-server-proxy-pg_test.go +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/k8s-operator/api-server-proxy.go b/cmd/k8s-operator/api-server-proxy.go index 70333d2c48d41..492590c9fecd6 100644 --- a/cmd/k8s-operator/api-server-proxy.go +++ b/cmd/k8s-operator/api-server-proxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/connector.go b/cmd/k8s-operator/connector.go index f4d518faa3aad..0c2d32482e78b 100644 --- a/cmd/k8s-operator/connector.go +++ b/cmd/k8s-operator/connector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index afc7d2d6e3975..7866f3e002921 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/deploy/chart/Chart.yaml b/cmd/k8s-operator/deploy/chart/Chart.yaml index 9db6389d1d944..b16fc4c37fb8a 100644 --- a/cmd/k8s-operator/deploy/chart/Chart.yaml +++ b/cmd/k8s-operator/deploy/chart/Chart.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v2 diff --git a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml index d6e9d1bf48ef8..2ca4f398ad2da 100644 --- a/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/apiserverproxy-rbac.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # If old setting used, enable both old (operator) and new (ProxyGroup) workflows. diff --git a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml index df9cb8ce1bcb0..0c0cb64cbb4ed 100644 --- a/cmd/k8s-operator/deploy/chart/templates/deployment.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/deployment.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: apps/v1 diff --git a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml index 759ba341a8f21..34928d6dcd6c8 100644 --- a/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/oauth-secret.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause {{ if and .Values.oauth .Values.oauth.clientId (not .Values.oauth.audience) -}} diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 930eef852c9ce..92decef17aab4 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 diff --git a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml index fa552a7c7e39a..89d6736b790a1 100644 --- a/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/proxy-rbac.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 diff --git a/cmd/k8s-operator/deploy/chart/values.yaml b/cmd/k8s-operator/deploy/chart/values.yaml index eb11fc7f27a86..8517d77aa5c84 100644 --- a/cmd/k8s-operator/deploy/chart/values.yaml +++ b/cmd/k8s-operator/deploy/chart/values.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # Operator oauth credentials. If unset a Secret named operator-oauth must be diff --git a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml index 5818fa69fff7d..2dc9cad228ffa 100644 --- a/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml +++ b/cmd/k8s-operator/deploy/manifests/authproxy-rbac.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 5a64f2c7db307..4c9822847d677 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 diff --git a/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml b/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml index a96d4c37ee421..800025e90003b 100644 --- a/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml +++ b/cmd/k8s-operator/deploy/manifests/templates/01-header.yaml @@ -1,3 +1,3 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index 1a9395aa00aa9..e75bcd4c2e1da 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 13898078fd4ba..0d89c4a863e4d 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/e2e/doc.go b/cmd/k8s-operator/e2e/doc.go index 40fa1f36a1d82..c0cc363160f70 100644 --- a/cmd/k8s-operator/e2e/doc.go +++ b/cmd/k8s-operator/e2e/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package e2e runs end-to-end tests for the Tailscale Kubernetes operator. diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index c5b238e852b89..eb05efa0cd1b8 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index 68f10dbb064cf..cb5c35c0054b2 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go index a3175a4edc771..a3ccb50cd0493 100644 --- a/cmd/k8s-operator/e2e/pebble.go +++ b/cmd/k8s-operator/e2e/pebble.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index b61d6d5763810..f7d11d278ef77 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index 00e75ddd5b3eb..baf763ac61a60 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go index 407e4e085b7a9..8000d13267262 100644 --- a/cmd/k8s-operator/e2e/ssh.go +++ b/cmd/k8s-operator/e2e/ssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package e2e diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 88da9935320bf..5181edf49a26c 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index bd80112aeb8a2..47acb64f27458 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index ebab23ed06337..a8f306353d880 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-pod-readiness_test.go b/cmd/k8s-operator/egress-pod-readiness_test.go index 3c35d9043ebe6..baa1442671907 100644 --- a/cmd/k8s-operator/egress-pod-readiness_test.go +++ b/cmd/k8s-operator/egress-pod-readiness_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-services-readiness.go b/cmd/k8s-operator/egress-services-readiness.go index 80f3c7d285141..965dc08f87f1d 100644 --- a/cmd/k8s-operator/egress-services-readiness.go +++ b/cmd/k8s-operator/egress-services-readiness.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index fdff4fafa3240..ba89903df2f29 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index 05be8efed9402..90ab2c88270ee 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index 202804d3011fd..45861449191cb 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index ca54e90909954..9a910da3eb945 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/generate/main_test.go b/cmd/k8s-operator/generate/main_test.go index 5ea7fec80971a..775d16ba1e827 100644 --- a/cmd/k8s-operator/generate/main_test.go +++ b/cmd/k8s-operator/generate/main_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 && !windows diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 1b35d853688cd..5966ace3c388e 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 0f5527185a738..f285bd8ee947d 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 9ef173ecef746..4952e789f6a02 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index 52afc3be40c50..aac40897cc88e 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/logger.go b/cmd/k8s-operator/logger.go index 46b1fc0c82d48..45018e37eaf30 100644 --- a/cmd/k8s-operator/logger.go +++ b/cmd/k8s-operator/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go index 0579e34661a11..afb055018bb13 100644 --- a/cmd/k8s-operator/metrics_resources.go +++ b/cmd/k8s-operator/metrics_resources.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 39db5f0f9cf16..522b460031530 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 858cd973d82c2..531190cf21dc2 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/nodeport-service-ports.go b/cmd/k8s-operator/nodeport-service-ports.go index a9504e3e94f88..f8d28860bf84e 100644 --- a/cmd/k8s-operator/nodeport-service-ports.go +++ b/cmd/k8s-operator/nodeport-service-ports.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/k8s-operator/nodeport-services-ports_test.go b/cmd/k8s-operator/nodeport-services-ports_test.go index 9418bb8446bd8..9c147f79aecbd 100644 --- a/cmd/k8s-operator/nodeport-services-ports_test.go +++ b/cmd/k8s-operator/nodeport-services-ports_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 7bb8b95f0855f..4f48c1812643a 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index d0f42fe6dfad5..53d16fbd225f3 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/proxyclass.go b/cmd/k8s-operator/proxyclass.go index 2d51b351d3907..c0ea46116373b 100644 --- a/cmd/k8s-operator/proxyclass.go +++ b/cmd/k8s-operator/proxyclass.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/proxyclass_test.go b/cmd/k8s-operator/proxyclass_test.go index ae0f63d99ea4d..171cfc5904cd3 100644 --- a/cmd/k8s-operator/proxyclass_test.go +++ b/cmd/k8s-operator/proxyclass_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 3a50ed8fb4c2b..13c3d7b715e50 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 930b7049d8ea9..6bce004eaa88d 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 2bcc9fb7a9720..c58bd2bb71dc5 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 2919e535c0dca..e81fe2d66f6ed 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index afe54ed98bc49..81c0d25ec0ba4 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 144d3755811da..e0383824a6313 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index baaa07727df06..d01f8e983ad75 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index ec7bb8080dec7..31be22aa12ca3 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tailnet.go b/cmd/k8s-operator/tailnet.go index 8d749545faa46..57c749bec31ee 100644 --- a/cmd/k8s-operator/tailnet.go +++ b/cmd/k8s-operator/tailnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index b0e2cfd734fad..0e4a3eee40e73 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tsclient.go b/cmd/k8s-operator/tsclient.go index d22fa1797dd5c..063c2f768c6c6 100644 --- a/cmd/k8s-operator/tsclient.go +++ b/cmd/k8s-operator/tsclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tsclient_test.go b/cmd/k8s-operator/tsclient_test.go index 16de512d5809f..c08705c78ed8b 100644 --- a/cmd/k8s-operator/tsclient_test.go +++ b/cmd/k8s-operator/tsclient_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 3e8608bc8db8e..60ed24a7006b1 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index b4a10f2962ae9..ab06c01f81b7d 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go index 0d78129fc76b3..47997d1d31b0f 100644 --- a/cmd/k8s-operator/tsrecorder_specs_test.go +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index f7ff797b1ebba..bea734d865f66 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go index 0f0bd1bfcf39d..91b4c54a5c32d 100644 --- a/cmd/k8s-proxy/internal/config/config.go +++ b/cmd/k8s-proxy/internal/config/config.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go index bcb1b9ebd14e6..ac6c6cf93f623 100644 --- a/cmd/k8s-proxy/internal/config/config_test.go +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package config diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 9b2bb67494659..e00d43a948dba 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/mkmanifest/main.go b/cmd/mkmanifest/main.go index fb3c729f12d21..d08700341e7dc 100644 --- a/cmd/mkmanifest/main.go +++ b/cmd/mkmanifest/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The mkmanifest command is a simple helper utility to create a '.syso' file diff --git a/cmd/mkpkg/main.go b/cmd/mkpkg/main.go index 5e26b07f8f9f8..6f4de7e299b50 100644 --- a/cmd/mkpkg/main.go +++ b/cmd/mkpkg/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // mkpkg builds the Tailscale rpm and deb packages. diff --git a/cmd/mkversion/mkversion.go b/cmd/mkversion/mkversion.go index c8c8bf17930f6..ec9b0bb85ace4 100644 --- a/cmd/mkversion/mkversion.go +++ b/cmd/mkversion/mkversion.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // mkversion gets version info from git and outputs a bunch of shell variables diff --git a/cmd/nardump/nardump.go b/cmd/nardump/nardump.go index f8947b02b852c..c8db24cb6736d 100644 --- a/cmd/nardump/nardump.go +++ b/cmd/nardump/nardump.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // nardump is like nix-store --dump, but in Go, writing a NAR diff --git a/cmd/nardump/nardump_test.go b/cmd/nardump/nardump_test.go index 3b87e7962d638..c1ca825e1e288 100644 --- a/cmd/nardump/nardump_test.go +++ b/cmd/nardump/nardump_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/natc/ippool/consensusippool.go b/cmd/natc/ippool/consensusippool.go index bfa909b69a3b4..d595d3e7ddc7a 100644 --- a/cmd/natc/ippool/consensusippool.go +++ b/cmd/natc/ippool/consensusippool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/consensusippool_test.go b/cmd/natc/ippool/consensusippool_test.go index 242cdffaf26d3..fe42b2b223a8b 100644 --- a/cmd/natc/ippool/consensusippool_test.go +++ b/cmd/natc/ippool/consensusippool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/consensusippoolserialize.go b/cmd/natc/ippool/consensusippoolserialize.go index 97dc02f2c7d7c..be3312d300bad 100644 --- a/cmd/natc/ippool/consensusippoolserialize.go +++ b/cmd/natc/ippool/consensusippoolserialize.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/ippool.go b/cmd/natc/ippool/ippool.go index 5a2dcbec911e0..641702f5d31e8 100644 --- a/cmd/natc/ippool/ippool.go +++ b/cmd/natc/ippool/ippool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // ippool implements IP address storage, creation, and retrieval for cmd/natc diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go index 8d474f86a97ed..405ec61564ed8 100644 --- a/cmd/natc/ippool/ippool_test.go +++ b/cmd/natc/ippool/ippool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/ipx.go b/cmd/natc/ippool/ipx.go index 8259a56dbf30e..4f52d6ede049a 100644 --- a/cmd/natc/ippool/ipx.go +++ b/cmd/natc/ippool/ipx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/ippool/ipx_test.go b/cmd/natc/ippool/ipx_test.go index 2e2b9d3d45baf..cb6889b683978 100644 --- a/cmd/natc/ippool/ipx_test.go +++ b/cmd/natc/ippool/ipx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ippool diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index a4f53d657d98e..11975b7d2e1a6 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The natc command is a work-in-progress implementation of a NAT based diff --git a/cmd/natc/natc_test.go b/cmd/natc/natc_test.go index c0a66deb8a4da..e1cc061234d0e 100644 --- a/cmd/natc/natc_test.go +++ b/cmd/natc/natc_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/netlogfmt/main.go b/cmd/netlogfmt/main.go index 0af52f862936c..212b36fb6b0ae 100644 --- a/cmd/netlogfmt/main.go +++ b/cmd/netlogfmt/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // netlogfmt parses a stream of JSON log messages from stdin and diff --git a/cmd/nginx-auth/nginx-auth.go b/cmd/nginx-auth/nginx-auth.go index 09da74da1d3c8..6b791eb6c35fa 100644 --- a/cmd/nginx-auth/nginx-auth.go +++ b/cmd/nginx-auth/nginx-auth.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go index 35e03d268e186..84863865991bc 100644 --- a/cmd/omitsize/omitsize.go +++ b/cmd/omitsize/omitsize.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The omitsize tool prints out how large the Tailscale binaries are with diff --git a/cmd/pgproxy/pgproxy.go b/cmd/pgproxy/pgproxy.go index e102c8ae47411..ded6fa695fe88 100644 --- a/cmd/pgproxy/pgproxy.go +++ b/cmd/pgproxy/pgproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The pgproxy server is a proxy for the Postgres wire protocol. diff --git a/cmd/printdep/printdep.go b/cmd/printdep/printdep.go index 044283209c08c..c4ba5b79a3357 100644 --- a/cmd/printdep/printdep.go +++ b/cmd/printdep/printdep.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The printdep command is a build system tool for printing out information diff --git a/cmd/proxy-test-server/proxy-test-server.go b/cmd/proxy-test-server/proxy-test-server.go index 9f8c94a384ea5..2c705670446ba 100644 --- a/cmd/proxy-test-server/proxy-test-server.go +++ b/cmd/proxy-test-server/proxy-test-server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The proxy-test-server command is a simple HTTP proxy server for testing diff --git a/cmd/proxy-to-grafana/proxy-to-grafana.go b/cmd/proxy-to-grafana/proxy-to-grafana.go index 27f5e338c8d65..23f2640597d59 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // proxy-to-grafana is a reverse proxy which identifies users based on their diff --git a/cmd/proxy-to-grafana/proxy-to-grafana_test.go b/cmd/proxy-to-grafana/proxy-to-grafana_test.go index 4831d54364943..be217043f12d3 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana_test.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/handlers.go b/cmd/sniproxy/handlers.go index 1973eecc017a3..157b9b75f885a 100644 --- a/cmd/sniproxy/handlers.go +++ b/cmd/sniproxy/handlers.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/handlers_test.go b/cmd/sniproxy/handlers_test.go index 4f9fc6a34b184..ad0637421cecc 100644 --- a/cmd/sniproxy/handlers_test.go +++ b/cmd/sniproxy/handlers_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/server.go b/cmd/sniproxy/server.go index b322b6f4b1137..0ff301fe92136 100644 --- a/cmd/sniproxy/server.go +++ b/cmd/sniproxy/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/server_test.go b/cmd/sniproxy/server_test.go index d56f2aa754f85..8e06e8abedf8c 100644 --- a/cmd/sniproxy/server_test.go +++ b/cmd/sniproxy/server_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index 2115c8095b351..45503feca8718 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The sniproxy is an outbound SNI proxy. It receives TLS connections over diff --git a/cmd/sniproxy/sniproxy_test.go b/cmd/sniproxy/sniproxy_test.go index 65e059efaa1d4..a404799d29d7d 100644 --- a/cmd/sniproxy/sniproxy_test.go +++ b/cmd/sniproxy/sniproxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/speedtest/speedtest.go b/cmd/speedtest/speedtest.go index 9a457ed6c7486..2cea97b1edef1 100644 --- a/cmd/speedtest/speedtest.go +++ b/cmd/speedtest/speedtest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program speedtest provides the speedtest command. The reason to keep it separate from diff --git a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go index 39af584ecd481..3c3ade3cd35a3 100644 --- a/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go +++ b/cmd/ssh-auth-none-demo/ssh-auth-none-demo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // ssh-auth-none-demo is a demo SSH server that's meant to run on the diff --git a/cmd/stunc/stunc.go b/cmd/stunc/stunc.go index c4b2eedd39f90..e51cd15ba2248 100644 --- a/cmd/stunc/stunc.go +++ b/cmd/stunc/stunc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command stunc makes a STUN request to a STUN server and prints the result. diff --git a/cmd/stund/stund.go b/cmd/stund/stund.go index 1055d966f42c5..a27e520444464 100644 --- a/cmd/stund/stund.go +++ b/cmd/stund/stund.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The stund binary is a standalone STUN server. diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index 153dc9303bbb0..cfedd82bdd5cc 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The stunstamp binary measures round-trip latency with DERPs. diff --git a/cmd/stunstamp/stunstamp_default.go b/cmd/stunstamp/stunstamp_default.go index a244d9aea6410..3f6613cd060ee 100644 --- a/cmd/stunstamp/stunstamp_default.go +++ b/cmd/stunstamp/stunstamp_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/cmd/stunstamp/stunstamp_linux.go b/cmd/stunstamp/stunstamp_linux.go index 387805feff2f1..201e2f83b384c 100644 --- a/cmd/stunstamp/stunstamp_linux.go +++ b/cmd/stunstamp/stunstamp_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/sync-containers/main.go b/cmd/sync-containers/main.go index 63efa54531b10..ab2a38bd66dab 100644 --- a/cmd/sync-containers/main.go +++ b/cmd/sync-containers/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/systray/systray.go b/cmd/systray/systray.go index d35595e258e0f..9dc35f1420bee 100644 --- a/cmd/systray/systray.go +++ b/cmd/systray/systray.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo || !darwin diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go index 4a1ba87e35bcc..2ea001aec9c84 100644 --- a/cmd/tailscale/cli/appcroutes.go +++ b/cmd/tailscale/cli/appcroutes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/bugreport.go b/cmd/tailscale/cli/bugreport.go index 50e6ffd82bedc..3ffaffa8b1fa5 100644 --- a/cmd/tailscale/cli/bugreport.go +++ b/cmd/tailscale/cli/bugreport.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index 171eebe1eafc9..f38ddbacf1804 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !ts_omit_acme diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 5ebc23a5befea..4d16cfe699537 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cli contains the cmd/tailscale CLI code in a package that can be included diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 8762b7aaeb905..41824701df551 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure-jetkvm.go b/cmd/tailscale/cli/configure-jetkvm.go index c80bf673605cf..1956ac836fe74 100644 --- a/cmd/tailscale/cli/configure-jetkvm.go +++ b/cmd/tailscale/cli/configure-jetkvm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && arm diff --git a/cmd/tailscale/cli/configure-kube.go b/cmd/tailscale/cli/configure-kube.go index bf5624856167a..3dcec250f01ef 100644 --- a/cmd/tailscale/cli/configure-kube.go +++ b/cmd/tailscale/cli/configure-kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_kube diff --git a/cmd/tailscale/cli/configure-kube_omit.go b/cmd/tailscale/cli/configure-kube_omit.go index 130f2870fab44..946fa2294d5aa 100644 --- a/cmd/tailscale/cli/configure-kube_omit.go +++ b/cmd/tailscale/cli/configure-kube_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_kube diff --git a/cmd/tailscale/cli/configure-kube_test.go b/cmd/tailscale/cli/configure-kube_test.go index 0c8b6b2b6cc0e..2df54d5751497 100644 --- a/cmd/tailscale/cli/configure-kube_test.go +++ b/cmd/tailscale/cli/configure-kube_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_kube diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index b5168ef92d11f..0f38f2df2941c 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_acme && !ts_omit_synology diff --git a/cmd/tailscale/cli/configure-synology-cert_test.go b/cmd/tailscale/cli/configure-synology-cert_test.go index c7da5622fb629..08369c135f154 100644 --- a/cmd/tailscale/cli/configure-synology-cert_test.go +++ b/cmd/tailscale/cli/configure-synology-cert_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_acme diff --git a/cmd/tailscale/cli/configure-synology.go b/cmd/tailscale/cli/configure-synology.go index f0f05f75765b9..4cfd4160e066a 100644 --- a/cmd/tailscale/cli/configure-synology.go +++ b/cmd/tailscale/cli/configure-synology.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index 20236eb28b5f5..e7a6448e70822 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure_apple-all.go b/cmd/tailscale/cli/configure_apple-all.go index 5f0da9b95420e..95e9259e96cf7 100644 --- a/cmd/tailscale/cli/configure_apple-all.go +++ b/cmd/tailscale/cli/configure_apple-all.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure_apple.go b/cmd/tailscale/cli/configure_apple.go index c0d99b90aa2c4..465bc7a47ed2c 100644 --- a/cmd/tailscale/cli/configure_apple.go +++ b/cmd/tailscale/cli/configure_apple.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/cmd/tailscale/cli/configure_linux-all.go b/cmd/tailscale/cli/configure_linux-all.go index e645e9654dfe5..2db970eeef497 100644 --- a/cmd/tailscale/cli/configure_linux-all.go +++ b/cmd/tailscale/cli/configure_linux-all.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/configure_linux.go b/cmd/tailscale/cli/configure_linux.go index 4bbde872140ca..ccde06c72ddbc 100644 --- a/cmd/tailscale/cli/configure_linux.go +++ b/cmd/tailscale/cli/configure_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_systray diff --git a/cmd/tailscale/cli/debug-capture.go b/cmd/tailscale/cli/debug-capture.go index a54066fa614cb..ce282b291a587 100644 --- a/cmd/tailscale/cli/debug-capture.go +++ b/cmd/tailscale/cli/debug-capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_capture diff --git a/cmd/tailscale/cli/debug-peer-relay.go b/cmd/tailscale/cli/debug-peer-relay.go index bef8b83693aca..1b28c3f6bb1a4 100644 --- a/cmd/tailscale/cli/debug-peer-relay.go +++ b/cmd/tailscale/cli/debug-peer-relay.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_relayserver diff --git a/cmd/tailscale/cli/debug-portmap.go b/cmd/tailscale/cli/debug-portmap.go index d8db1442c7073..a876971ef00b4 100644 --- a/cmd/tailscale/cli/debug-portmap.go +++ b/cmd/tailscale/cli/debug-portmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_debugportmapper diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index ccbfb59de9221..f406b9f226249 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/diag.go b/cmd/tailscale/cli/diag.go index 3b2aa504b9ea7..8a244ba8817bb 100644 --- a/cmd/tailscale/cli/diag.go +++ b/cmd/tailscale/cli/diag.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux || windows || darwin) && !ts_omit_cliconndiag diff --git a/cmd/tailscale/cli/dns-query.go b/cmd/tailscale/cli/dns-query.go index 11f64453732fa..88a897f21ed8d 100644 --- a/cmd/tailscale/cli/dns-query.go +++ b/cmd/tailscale/cli/dns-query.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/dns-status.go b/cmd/tailscale/cli/dns-status.go index 8c18622ce45af..f63f418281987 100644 --- a/cmd/tailscale/cli/dns-status.go +++ b/cmd/tailscale/cli/dns-status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/dns.go b/cmd/tailscale/cli/dns.go index 086abefd6b2bf..d8db5d466d6b2 100644 --- a/cmd/tailscale/cli/dns.go +++ b/cmd/tailscale/cli/dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/down.go b/cmd/tailscale/cli/down.go index 224198a98deb5..6fecbd76cec12 100644 --- a/cmd/tailscale/cli/down.go +++ b/cmd/tailscale/cli/down.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 131f468477314..280ff3172fb92 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive && !ts_mac_gui diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index b47b9f0bd4949..0445b66ae14ff 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/exitnode_test.go b/cmd/tailscale/cli/exitnode_test.go index cc38fd3a4d39e..9a77cf5d7d3fd 100644 --- a/cmd/tailscale/cli/exitnode_test.go +++ b/cmd/tailscale/cli/exitnode_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ffcomplete/complete.go b/cmd/tailscale/cli/ffcomplete/complete.go index fbd5b9d62823d..7d280f691a407 100644 --- a/cmd/tailscale/cli/ffcomplete/complete.go +++ b/cmd/tailscale/cli/ffcomplete/complete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && !ts_omit_completion diff --git a/cmd/tailscale/cli/ffcomplete/complete_omit.go b/cmd/tailscale/cli/ffcomplete/complete_omit.go index bafc059e7b71d..06efa63fcd3a7 100644 --- a/cmd/tailscale/cli/ffcomplete/complete_omit.go +++ b/cmd/tailscale/cli/ffcomplete/complete_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && ts_omit_completion diff --git a/cmd/tailscale/cli/ffcomplete/ffcomplete.go b/cmd/tailscale/cli/ffcomplete/ffcomplete.go index 4b8207ec60a0c..e6af2515ff26f 100644 --- a/cmd/tailscale/cli/ffcomplete/ffcomplete.go +++ b/cmd/tailscale/cli/ffcomplete/ffcomplete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ffcomplete diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete.go b/cmd/tailscale/cli/ffcomplete/internal/complete.go index b6c39dc837215..911972518d331 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package internal contains internal code for the ffcomplete package. diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go index c216bdeec500d..2bba72283b044 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package internal_test diff --git a/cmd/tailscale/cli/ffcomplete/scripts.go b/cmd/tailscale/cli/ffcomplete/scripts.go index 8218683afa349..bccebed7feec1 100644 --- a/cmd/tailscale/cli/ffcomplete/scripts.go +++ b/cmd/tailscale/cli/ffcomplete/scripts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && !ts_omit_completion && !ts_omit_completion_scripts diff --git a/cmd/tailscale/cli/ffcomplete/scripts_omit.go b/cmd/tailscale/cli/ffcomplete/scripts_omit.go index b5d520c3fe1d9..4c082d9d1ed06 100644 --- a/cmd/tailscale/cli/ffcomplete/scripts_omit.go +++ b/cmd/tailscale/cli/ffcomplete/scripts_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && !ts_omit_completion && ts_omit_completion_scripts diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index e0879197e2dbb..94b36f535bcab 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_taildrop diff --git a/cmd/tailscale/cli/funnel.go b/cmd/tailscale/cli/funnel.go index 34b0c74c23949..f16f571e09508 100644 --- a/cmd/tailscale/cli/funnel.go +++ b/cmd/tailscale/cli/funnel.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/cmd/tailscale/cli/id-token.go b/cmd/tailscale/cli/id-token.go index a4d02c95a82c1..e2707ee84ca42 100644 --- a/cmd/tailscale/cli/id-token.go +++ b/cmd/tailscale/cli/id-token.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ip.go b/cmd/tailscale/cli/ip.go index 8379329120436..01373a073b169 100644 --- a/cmd/tailscale/cli/ip.go +++ b/cmd/tailscale/cli/ip.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/jsonoutput/jsonoutput.go b/cmd/tailscale/cli/jsonoutput/jsonoutput.go index aa49acc28baae..69e7374d93342 100644 --- a/cmd/tailscale/cli/jsonoutput/jsonoutput.go +++ b/cmd/tailscale/cli/jsonoutput/jsonoutput.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsonoutput provides stable and versioned JSON serialisation for CLI output. diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-log.go b/cmd/tailscale/cli/jsonoutput/network-lock-log.go index 88e449db36d2a..c3190e6bac9c7 100644 --- a/cmd/tailscale/cli/jsonoutput/network-lock-log.go +++ b/cmd/tailscale/cli/jsonoutput/network-lock-log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/cmd/tailscale/cli/jsonoutput/network-lock-status.go b/cmd/tailscale/cli/jsonoutput/network-lock-status.go index 0c6481093c9d6..a1d95b871549c 100644 --- a/cmd/tailscale/cli/jsonoutput/network-lock-status.go +++ b/cmd/tailscale/cli/jsonoutput/network-lock-status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/cmd/tailscale/cli/licenses.go b/cmd/tailscale/cli/licenses.go index bede827edf693..35d636aa2eb84 100644 --- a/cmd/tailscale/cli/licenses.go +++ b/cmd/tailscale/cli/licenses.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/login.go b/cmd/tailscale/cli/login.go index fb5b786920660..bdf97c70f8e1d 100644 --- a/cmd/tailscale/cli/login.go +++ b/cmd/tailscale/cli/login.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/logout.go b/cmd/tailscale/cli/logout.go index fbc39473026a1..90843edc2e299 100644 --- a/cmd/tailscale/cli/logout.go +++ b/cmd/tailscale/cli/logout.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/maybe_syspolicy.go b/cmd/tailscale/cli/maybe_syspolicy.go index 937a278334fd9..a66c1a65df5e4 100644 --- a/cmd/tailscale/cli/maybe_syspolicy.go +++ b/cmd/tailscale/cli/maybe_syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_syspolicy diff --git a/cmd/tailscale/cli/metrics.go b/cmd/tailscale/cli/metrics.go index dbdedd5a61037..d16ce76d2725f 100644 --- a/cmd/tailscale/cli/metrics.go +++ b/cmd/tailscale/cli/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/nc.go b/cmd/tailscale/cli/nc.go index 4ea62255412ea..34490ec212557 100644 --- a/cmd/tailscale/cli/nc.go +++ b/cmd/tailscale/cli/nc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index a8a8992f5ba23..c9cbce29a32cf 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index 3b374ece2543f..d8cff4aca402d 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/cmd/tailscale/cli/network-lock_test.go b/cmd/tailscale/cli/network-lock_test.go index aa777ff922ba1..596a51b8a2deb 100644 --- a/cmd/tailscale/cli/network-lock_test.go +++ b/cmd/tailscale/cli/network-lock_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ping.go b/cmd/tailscale/cli/ping.go index 8ece7c93d2311..1e8bbd23f15e8 100644 --- a/cmd/tailscale/cli/ping.go +++ b/cmd/tailscale/cli/ping.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index d4572842bf758..058eff1f8501a 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 0e9b7d0227ccf..837d8851368e4 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/cmd/tailscale/cli/serve_legacy_test.go b/cmd/tailscale/cli/serve_legacy_test.go index 819017ad81bb5..27cbb5712dd0f 100644 --- a/cmd/tailscale/cli/serve_legacy_test.go +++ b/cmd/tailscale/cli/serve_legacy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 6a29074817a59..06a4ce1bbde3e 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/cmd/tailscale/cli/serve_v2_test.go b/cmd/tailscale/cli/serve_v2_test.go index a56fece3e8c59..7b27de6f2eb26 100644 --- a/cmd/tailscale/cli/serve_v2_test.go +++ b/cmd/tailscale/cli/serve_v2_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/serve_v2_unix_test.go b/cmd/tailscale/cli/serve_v2_unix_test.go index 9064655981288..671cdfbfbd1bc 100644 --- a/cmd/tailscale/cli/serve_v2_unix_test.go +++ b/cmd/tailscale/cli/serve_v2_unix_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 31662392f8437..615900833596c 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/set_test.go b/cmd/tailscale/cli/set_test.go index a2f211f8cdc36..63fa3c05c48b3 100644 --- a/cmd/tailscale/cli/set_test.go +++ b/cmd/tailscale/cli/set_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ssh.go b/cmd/tailscale/cli/ssh.go index 9275c9a1c2814..bea18f7abf6ac 100644 --- a/cmd/tailscale/cli/ssh.go +++ b/cmd/tailscale/cli/ssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ssh_exec.go b/cmd/tailscale/cli/ssh_exec.go index 10e52903dea64..ecfd3c4e6052a 100644 --- a/cmd/tailscale/cli/ssh_exec.go +++ b/cmd/tailscale/cli/ssh_exec.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !windows diff --git a/cmd/tailscale/cli/ssh_exec_js.go b/cmd/tailscale/cli/ssh_exec_js.go index 40effc7cafc7e..bf631c3b82d24 100644 --- a/cmd/tailscale/cli/ssh_exec_js.go +++ b/cmd/tailscale/cli/ssh_exec_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ssh_exec_windows.go b/cmd/tailscale/cli/ssh_exec_windows.go index e249afe667401..85e1518175609 100644 --- a/cmd/tailscale/cli/ssh_exec_windows.go +++ b/cmd/tailscale/cli/ssh_exec_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/ssh_unix.go b/cmd/tailscale/cli/ssh_unix.go index 71c0caaa69ad5..768d71116cf2c 100644 --- a/cmd/tailscale/cli/ssh_unix.go +++ b/cmd/tailscale/cli/ssh_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !wasm && !windows && !plan9 diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 89b18335b4ee0..ae4df4da9b51b 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index b315a21e7437f..34ed2c7687c67 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/syspolicy.go b/cmd/tailscale/cli/syspolicy.go index 97f3f2122b40c..e44b01d5ffa15 100644 --- a/cmd/tailscale/cli/syspolicy.go +++ b/cmd/tailscale/cli/syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_syspolicy diff --git a/cmd/tailscale/cli/systray.go b/cmd/tailscale/cli/systray.go index 827e8a9a40a30..ca0840fe9271e 100644 --- a/cmd/tailscale/cli/systray.go +++ b/cmd/tailscale/cli/systray.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_systray diff --git a/cmd/tailscale/cli/systray_omit.go b/cmd/tailscale/cli/systray_omit.go index 8d93fd84b52a9..83ec199a7d895 100644 --- a/cmd/tailscale/cli/systray_omit.go +++ b/cmd/tailscale/cli/systray_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || ts_omit_systray diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index bf0315860fbeb..cdb1d38234cec 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index bb172f9063f59..9af8eae7d9994 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 7eb0dccace7a8..291bf4330cd63 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/version.go b/cmd/tailscale/cli/version.go index b25502d5a4be5..f23ee0b69f834 100644 --- a/cmd/tailscale/cli/version.go +++ b/cmd/tailscale/cli/version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/web.go b/cmd/tailscale/cli/web.go index 2713f730bf600..c13cad2d645ce 100644 --- a/cmd/tailscale/cli/web.go +++ b/cmd/tailscale/cli/web.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_webclient diff --git a/cmd/tailscale/cli/web_test.go b/cmd/tailscale/cli/web_test.go index f2470b364c41e..727c5644be0b4 100644 --- a/cmd/tailscale/cli/web_test.go +++ b/cmd/tailscale/cli/web_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/cli/whois.go b/cmd/tailscale/cli/whois.go index 44ff68dec8777..b2ad74149635b 100644 --- a/cmd/tailscale/cli/whois.go +++ b/cmd/tailscale/cli/whois.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cli diff --git a/cmd/tailscale/deps_test.go b/cmd/tailscale/deps_test.go index 132940e3cc937..ea7bb15d3a895 100644 --- a/cmd/tailscale/deps_test.go +++ b/cmd/tailscale/deps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscale/generate.go b/cmd/tailscale/generate.go index 5c2e9be915980..36a4fa671dddb 100644 --- a/cmd/tailscale/generate.go +++ b/cmd/tailscale/generate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscale/tailscale.go b/cmd/tailscale/tailscale.go index f6adb6c197071..57a51840832b5 100644 --- a/cmd/tailscale/tailscale.go +++ b/cmd/tailscale/tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tailscale command is the Tailscale command-line client. It interacts diff --git a/cmd/tailscale/tailscale_test.go b/cmd/tailscale/tailscale_test.go index a7a3c2323cb8f..ca064b6b7a28a 100644 --- a/cmd/tailscale/tailscale_test.go +++ b/cmd/tailscale/tailscale_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscaled/childproc/childproc.go b/cmd/tailscaled/childproc/childproc.go index cc83a06c6ee7c..7d89b314af820 100644 --- a/cmd/tailscaled/childproc/childproc.go +++ b/cmd/tailscaled/childproc/childproc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package childproc allows other packages to register "tailscaled be-child" diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 8208a6e3c6354..360075f5b0e2b 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_debug diff --git a/cmd/tailscaled/debug_forcereflect.go b/cmd/tailscaled/debug_forcereflect.go index 7378753ceb64c..088010d7db29a 100644 --- a/cmd/tailscaled/debug_forcereflect.go +++ b/cmd/tailscaled/debug_forcereflect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_debug_forcereflect diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 64d1beca7cd75..d06924b927a97 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscaled/flag.go b/cmd/tailscaled/flag.go index f640aceed45d8..357210a29c426 100644 --- a/cmd/tailscaled/flag.go +++ b/cmd/tailscaled/flag.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscaled/generate.go b/cmd/tailscaled/generate.go index 5c2e9be915980..36a4fa671dddb 100644 --- a/cmd/tailscaled/generate.go +++ b/cmd/tailscaled/generate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tailscaled/install_darwin.go b/cmd/tailscaled/install_darwin.go index 05e5eaed8af90..15d9e54621181 100644 --- a/cmd/tailscaled/install_darwin.go +++ b/cmd/tailscaled/install_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index 6013660f5aa20..d0f40b37d1156 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/cmd/tailscaled/netstack.go b/cmd/tailscaled/netstack.go index c0b34ed411c78..d896f384fcc98 100644 --- a/cmd/tailscaled/netstack.go +++ b/cmd/tailscaled/netstack.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netstack diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index 85c3d91f9de96..ea9f54a479dc5 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_outboundproxy diff --git a/cmd/tailscaled/required_version.go b/cmd/tailscaled/required_version.go index 3acb3d52e4d8c..bfde77cd8474b 100644 --- a/cmd/tailscaled/required_version.go +++ b/cmd/tailscaled/required_version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !go1.23 diff --git a/cmd/tailscaled/sigpipe.go b/cmd/tailscaled/sigpipe.go index 2fcdab2a4660e..ba69fcd2a0632 100644 --- a/cmd/tailscaled/sigpipe.go +++ b/cmd/tailscaled/sigpipe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.21 && !plan9 diff --git a/cmd/tailscaled/ssh.go b/cmd/tailscaled/ssh.go index 59a1ddd0df461..e69cbd5dce086 100644 --- a/cmd/tailscaled/ssh.go +++ b/cmd/tailscaled/ssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux || darwin || freebsd || openbsd || plan9) && !ts_omit_ssh diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index 410ae00bc0716..df0d68e077b2b 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.23 diff --git a/cmd/tailscaled/tailscaled_bird.go b/cmd/tailscaled/tailscaled_bird.go index c76f77bec6e36..c1c32d2bb493d 100644 --- a/cmd/tailscaled/tailscaled_bird.go +++ b/cmd/tailscaled/tailscaled_bird.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 && (linux || darwin || freebsd || openbsd) && !ts_omit_bird diff --git a/cmd/tailscaled/tailscaled_drive.go b/cmd/tailscaled/tailscaled_drive.go index 49f35a3811404..6a8590bb82217 100644 --- a/cmd/tailscaled/tailscaled_drive.go +++ b/cmd/tailscaled/tailscaled_drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive diff --git a/cmd/tailscaled/tailscaled_notwindows.go b/cmd/tailscaled/tailscaled_notwindows.go index d5361cf286d3d..735facc37b861 100644 --- a/cmd/tailscaled/tailscaled_notwindows.go +++ b/cmd/tailscaled/tailscaled_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && go1.19 diff --git a/cmd/tailscaled/tailscaled_test.go b/cmd/tailscaled/tailscaled_test.go index 36327cccc7bc7..7d76e7683a623 100644 --- a/cmd/tailscaled/tailscaled_test.go +++ b/cmd/tailscaled/tailscaled_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main // import "tailscale.com/cmd/tailscaled" diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 3019bbaf9695b..63c8b30c99348 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.19 diff --git a/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go b/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go index 6ea662d39230c..42009d02bf6af 100644 --- a/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go +++ b/cmd/tailscaled/tailscaledhooks/tailscaledhooks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailscaledhooks provides hooks for optional features diff --git a/cmd/tailscaled/webclient.go b/cmd/tailscaled/webclient.go index 672ba7126d2a7..e031277abfc27 100644 --- a/cmd/tailscaled/webclient.go +++ b/cmd/tailscaled/webclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_webclient diff --git a/cmd/tailscaled/with_cli.go b/cmd/tailscaled/with_cli.go index a8554eb8ce9dc..33da1f448e727 100644 --- a/cmd/tailscaled/with_cli.go +++ b/cmd/tailscaled/with_cli.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_include_cli diff --git a/cmd/testcontrol/testcontrol.go b/cmd/testcontrol/testcontrol.go index b05b3128df0ef..49e7e429e63e9 100644 --- a/cmd/testcontrol/testcontrol.go +++ b/cmd/testcontrol/testcontrol.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program testcontrol runs a simple test control server. diff --git a/cmd/testwrapper/args.go b/cmd/testwrapper/args.go index 95157bc34efee..11ed1aeaad0bd 100644 --- a/cmd/testwrapper/args.go +++ b/cmd/testwrapper/args.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/testwrapper/args_test.go b/cmd/testwrapper/args_test.go index 10063d7bcf6e1..25364fb96d6a1 100644 --- a/cmd/testwrapper/args_test.go +++ b/cmd/testwrapper/args_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index 856cb28ef275a..b98d739c63620 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package flakytest contains test helpers for marking a test as flaky. For diff --git a/cmd/testwrapper/flakytest/flakytest_test.go b/cmd/testwrapper/flakytest/flakytest_test.go index 9b744de13d446..54dd2121bd1f3 100644 --- a/cmd/testwrapper/flakytest/flakytest_test.go +++ b/cmd/testwrapper/flakytest/flakytest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package flakytest diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 173edee733f04..df10a53bc1a14 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // testwrapper is a wrapper for retrying flaky tests. It is an alternative to diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index ace53ccd0e09a..0ca13e854ff7a 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main_test diff --git a/cmd/tl-longchain/tl-longchain.go b/cmd/tl-longchain/tl-longchain.go index 384d24222e6d5..33d0df3011018 100644 --- a/cmd/tl-longchain/tl-longchain.go +++ b/cmd/tl-longchain/tl-longchain.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Program tl-longchain prints commands to re-sign Tailscale nodes that have diff --git a/cmd/tsconnect/build-pkg.go b/cmd/tsconnect/build-pkg.go index 047504858ae0c..53aacc02ec8ea 100644 --- a/cmd/tsconnect/build-pkg.go +++ b/cmd/tsconnect/build-pkg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/build.go b/cmd/tsconnect/build.go index 364ebf5366dfe..64b6b3582bd62 100644 --- a/cmd/tsconnect/build.go +++ b/cmd/tsconnect/build.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/common.go b/cmd/tsconnect/common.go index ff10e4efbb5d3..9daa402692c04 100644 --- a/cmd/tsconnect/common.go +++ b/cmd/tsconnect/common.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/dev-pkg.go b/cmd/tsconnect/dev-pkg.go index de534c3b20625..3e1018c00df0e 100644 --- a/cmd/tsconnect/dev-pkg.go +++ b/cmd/tsconnect/dev-pkg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/dev.go b/cmd/tsconnect/dev.go index 87b10adaf49c8..2d0eb1036a5ad 100644 --- a/cmd/tsconnect/dev.go +++ b/cmd/tsconnect/dev.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/package.json.tmpl b/cmd/tsconnect/package.json.tmpl index 404b896eaf89e..883d794cacb8b 100644 --- a/cmd/tsconnect/package.json.tmpl +++ b/cmd/tsconnect/package.json.tmpl @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Template for the package.json that is generated by the build-pkg command. diff --git a/cmd/tsconnect/serve.go b/cmd/tsconnect/serve.go index d780bdd57c3e3..3e9f097a248a4 100644 --- a/cmd/tsconnect/serve.go +++ b/cmd/tsconnect/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/src/app/app.tsx b/cmd/tsconnect/src/app/app.tsx index ee538eaeac506..8d25b227437dd 100644 --- a/cmd/tsconnect/src/app/app.tsx +++ b/cmd/tsconnect/src/app/app.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { render, Component } from "preact" diff --git a/cmd/tsconnect/src/app/go-panic-display.tsx b/cmd/tsconnect/src/app/go-panic-display.tsx index 5dd7095a27c7d..e15c58cd183ff 100644 --- a/cmd/tsconnect/src/app/go-panic-display.tsx +++ b/cmd/tsconnect/src/app/go-panic-display.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause export function GoPanicDisplay({ diff --git a/cmd/tsconnect/src/app/header.tsx b/cmd/tsconnect/src/app/header.tsx index 099ff2f8c2f7d..640474090e3a4 100644 --- a/cmd/tsconnect/src/app/header.tsx +++ b/cmd/tsconnect/src/app/header.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause export function Header({ state, ipn }: { state: IPNState; ipn?: IPN }) { diff --git a/cmd/tsconnect/src/app/index.css b/cmd/tsconnect/src/app/index.css index 751b313d9f362..2c9c4c0d3247b 100644 --- a/cmd/tsconnect/src/app/index.css +++ b/cmd/tsconnect/src/app/index.css @@ -1,4 +1,4 @@ -/* Copyright (c) Tailscale Inc & AUTHORS */ +/* Copyright (c) Tailscale Inc & contributors */ /* SPDX-License-Identifier: BSD-3-Clause */ @import "xterm/css/xterm.css"; diff --git a/cmd/tsconnect/src/app/index.ts b/cmd/tsconnect/src/app/index.ts index 24ca4543921ae..bdbcaf3e52d4a 100644 --- a/cmd/tsconnect/src/app/index.ts +++ b/cmd/tsconnect/src/app/index.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import "../wasm_exec" diff --git a/cmd/tsconnect/src/app/ssh.tsx b/cmd/tsconnect/src/app/ssh.tsx index df81745bd3fd7..1faaad6c68220 100644 --- a/cmd/tsconnect/src/app/ssh.tsx +++ b/cmd/tsconnect/src/app/ssh.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useState, useCallback, useMemo, useEffect, useRef } from "preact/hooks" diff --git a/cmd/tsconnect/src/app/url-display.tsx b/cmd/tsconnect/src/app/url-display.tsx index fc82c7fb91b3c..787989ccae018 100644 --- a/cmd/tsconnect/src/app/url-display.tsx +++ b/cmd/tsconnect/src/app/url-display.tsx @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { useState } from "preact/hooks" diff --git a/cmd/tsconnect/src/lib/js-state-store.ts b/cmd/tsconnect/src/lib/js-state-store.ts index e57dfd98efabd..a17090fa0921d 100644 --- a/cmd/tsconnect/src/lib/js-state-store.ts +++ b/cmd/tsconnect/src/lib/js-state-store.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** @fileoverview Callbacks used by jsStateStore to persist IPN state. */ diff --git a/cmd/tsconnect/src/lib/ssh.ts b/cmd/tsconnect/src/lib/ssh.ts index 9c6f71aee4b41..5fae4a5b7451f 100644 --- a/cmd/tsconnect/src/lib/ssh.ts +++ b/cmd/tsconnect/src/lib/ssh.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import { Terminal, ITerminalOptions } from "xterm" diff --git a/cmd/tsconnect/src/pkg/pkg.css b/cmd/tsconnect/src/pkg/pkg.css index 76ea21f5b53b2..f3b32bb95e808 100644 --- a/cmd/tsconnect/src/pkg/pkg.css +++ b/cmd/tsconnect/src/pkg/pkg.css @@ -1,4 +1,4 @@ -/* Copyright (c) Tailscale Inc & AUTHORS */ +/* Copyright (c) Tailscale Inc & contributors */ /* SPDX-License-Identifier: BSD-3-Clause */ @import "xterm/css/xterm.css"; diff --git a/cmd/tsconnect/src/pkg/pkg.ts b/cmd/tsconnect/src/pkg/pkg.ts index 4d535cb404015..a44c57150ce24 100644 --- a/cmd/tsconnect/src/pkg/pkg.ts +++ b/cmd/tsconnect/src/pkg/pkg.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Type definitions need to be manually imported for dts-bundle-generator to diff --git a/cmd/tsconnect/src/types/esbuild.d.ts b/cmd/tsconnect/src/types/esbuild.d.ts index ef28f7b1cf556..d6bbd9310ead7 100644 --- a/cmd/tsconnect/src/types/esbuild.d.ts +++ b/cmd/tsconnect/src/types/esbuild.d.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** diff --git a/cmd/tsconnect/src/types/wasm_js.d.ts b/cmd/tsconnect/src/types/wasm_js.d.ts index 492197ccb1a9b..938ec759c7615 100644 --- a/cmd/tsconnect/src/types/wasm_js.d.ts +++ b/cmd/tsconnect/src/types/wasm_js.d.ts @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /** diff --git a/cmd/tsconnect/tsconnect.go b/cmd/tsconnect/tsconnect.go index ef55593b49268..6de1f26ad389f 100644 --- a/cmd/tsconnect/tsconnect.go +++ b/cmd/tsconnect/tsconnect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index c7aa00d1d794f..8a0177d1d66f7 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The wasm package builds a WebAssembly module that provides a subset of diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 7093ab9ee193a..d6dfc79009796 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tsidp command is an OpenID Connect Identity Provider server. diff --git a/cmd/tsidp/tsidp_test.go b/cmd/tsidp/tsidp_test.go index 4f5af9e598e65..26c906fab216b 100644 --- a/cmd/tsidp/tsidp_test.go +++ b/cmd/tsidp/tsidp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package main tests for tsidp focus on OAuth security boundaries and diff --git a/cmd/tsidp/ui.go b/cmd/tsidp/ui.go index d37b64990cac8..f8717d65e8509 100644 --- a/cmd/tsidp/ui.go +++ b/cmd/tsidp/ui.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tsshd/tsshd.go b/cmd/tsshd/tsshd.go index 950eb661cdb23..51765e2e41526 100644 --- a/cmd/tsshd/tsshd.go +++ b/cmd/tsshd/tsshd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/cmd/tta/fw_linux.go b/cmd/tta/fw_linux.go index a4ceabad8bc05..49d8d41ea4b4d 100644 --- a/cmd/tta/fw_linux.go +++ b/cmd/tta/fw_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/tta/tta.go b/cmd/tta/tta.go index 9f8f002958d61..377d01c9487f7 100644 --- a/cmd/tta/tta.go +++ b/cmd/tta/tta.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tta server is the Tailscale Test Agent. diff --git a/cmd/vet/jsontags/analyzer.go b/cmd/vet/jsontags/analyzer.go index d799b66cbb583..c69634ecd3e8a 100644 --- a/cmd/vet/jsontags/analyzer.go +++ b/cmd/vet/jsontags/analyzer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsontags checks for incompatible usage of JSON struct tags. diff --git a/cmd/vet/jsontags/iszero.go b/cmd/vet/jsontags/iszero.go index 77520d72c66f3..fd25cc120c530 100644 --- a/cmd/vet/jsontags/iszero.go +++ b/cmd/vet/jsontags/iszero.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsontags diff --git a/cmd/vet/jsontags/report.go b/cmd/vet/jsontags/report.go index 8e5869060799c..702de1c4d1c36 100644 --- a/cmd/vet/jsontags/report.go +++ b/cmd/vet/jsontags/report.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsontags diff --git a/cmd/vet/vet.go b/cmd/vet/vet.go index 45473af48f0ee..babc30d254719 100644 --- a/cmd/vet/vet.go +++ b/cmd/vet/vet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vet is a tool to statically check Go source code. diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index d1c753db78710..cbffd38845ec3 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tests serves a list of tests for tailscale.com/cmd/viewer. diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index 4602b9d887d2b..cbf5ec2653d98 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/cmd/viewer/tests/tests_view.go b/cmd/viewer/tests/tests_view.go index 495281c23b3aa..fe073446ea200 100644 --- a/cmd/viewer/tests/tests_view.go +++ b/cmd/viewer/tests/tests_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 3fae737cde692..56b999f5f50fe 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Viewer is a tool to automate the creation of "view" wrapper types that diff --git a/cmd/viewer/viewer_test.go b/cmd/viewer/viewer_test.go index 1e24b705069d7..8bd18d4806ae2 100644 --- a/cmd/viewer/viewer_test.go +++ b/cmd/viewer/viewer_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/cmd/vnet/vnet-main.go b/cmd/vnet/vnet-main.go index 9dd4d8cfafe94..8a3afe2035a95 100644 --- a/cmd/vnet/vnet-main.go +++ b/cmd/vnet/vnet-main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The vnet binary runs a virtual network stack in userspace for qemu instances diff --git a/cmd/xdpderper/xdpderper.go b/cmd/xdpderper/xdpderper.go index c127baf54e340..ea25550bb1189 100644 --- a/cmd/xdpderper/xdpderper.go +++ b/cmd/xdpderper/xdpderper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command xdpderper runs the XDP STUN server. diff --git a/control/controlbase/conn.go b/control/controlbase/conn.go index 78ef73f71000b..8f6e5a7717f79 100644 --- a/control/controlbase/conn.go +++ b/control/controlbase/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlbase implements the base transport of the Tailscale diff --git a/control/controlbase/conn_test.go b/control/controlbase/conn_test.go index ed4642d3b179c..a1e2b313de5b6 100644 --- a/control/controlbase/conn_test.go +++ b/control/controlbase/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/handshake.go b/control/controlbase/handshake.go index 765a4620b876f..919920c344239 100644 --- a/control/controlbase/handshake.go +++ b/control/controlbase/handshake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/handshake_test.go b/control/controlbase/handshake_test.go index 242b1f4d7c658..f6b5409a8904f 100644 --- a/control/controlbase/handshake_test.go +++ b/control/controlbase/handshake_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/interop_test.go b/control/controlbase/interop_test.go index c41fbf4dd4950..87ee7d45876d7 100644 --- a/control/controlbase/interop_test.go +++ b/control/controlbase/interop_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlbase/messages.go b/control/controlbase/messages.go index 59073088f5e81..1357432de7ee5 100644 --- a/control/controlbase/messages.go +++ b/control/controlbase/messages.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlbase diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index 336a8d491bc9c..fe227b45e57aa 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/client.go b/control/controlclient/client.go index 41b39622b0199..3bc53ed5a24fc 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlclient implements the client for the Tailscale diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 57d3ca7ca7ae3..c7d61f6b2d13d 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index d5cd6a13e5120..eb49cf4ab44fb 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index 4329fc878ceb3..d10b346ae39a7 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/errors.go b/control/controlclient/errors.go index 9b4dab84467b8..a2397cedeaa5c 100644 --- a/control/controlclient/errors.go +++ b/control/controlclient/errors.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 9aa8e37107a99..18bd420ebaae3 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 2be4b6ad70b2d..11d4593f03fae 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/sign.go b/control/controlclient/sign.go index e3a479c283c62..6cee1265fe99b 100644 --- a/control/controlclient/sign.go +++ b/control/controlclient/sign.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlclient/sign_supported.go b/control/controlclient/sign_supported.go index 439e6d36b4fe3..ea6fa28e34479 100644 --- a/control/controlclient/sign_supported.go +++ b/control/controlclient/sign_supported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/control/controlclient/sign_supported_test.go b/control/controlclient/sign_supported_test.go index e20349a4e82c3..9d4abafbd12f6 100644 --- a/control/controlclient/sign_supported_test.go +++ b/control/controlclient/sign_supported_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows && cgo diff --git a/control/controlclient/sign_unsupported.go b/control/controlclient/sign_unsupported.go index f6c4ddc6288fb..ff830282e4496 100644 --- a/control/controlclient/sign_unsupported.go +++ b/control/controlclient/sign_unsupported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/control/controlclient/status.go b/control/controlclient/status.go index 65afb7a5011f2..46dc8f773f260 100644 --- a/control/controlclient/status.go +++ b/control/controlclient/status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlclient diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 06a2131fdcb2b..e812091745ea5 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js diff --git a/control/controlhttp/client_common.go b/control/controlhttp/client_common.go index dd94e93cdc3cf..5e49b0bfcc295 100644 --- a/control/controlhttp/client_common.go +++ b/control/controlhttp/client_common.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp diff --git a/control/controlhttp/client_js.go b/control/controlhttp/client_js.go index cc05b5b192766..a3ce7ffe5c765 100644 --- a/control/controlhttp/client_js.go +++ b/control/controlhttp/client_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 359410ae9d29c..26ace871c1268 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp diff --git a/control/controlhttp/controlhttpcommon/controlhttpcommon.go b/control/controlhttp/controlhttpcommon/controlhttpcommon.go index a86b7ca04a7f4..21236b09b5574 100644 --- a/control/controlhttp/controlhttpcommon/controlhttpcommon.go +++ b/control/controlhttp/controlhttpcommon/controlhttpcommon.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlhttpcommon contains common constants for used diff --git a/control/controlhttp/controlhttpserver/controlhttpserver.go b/control/controlhttp/controlhttpserver/controlhttpserver.go index af320781069d1..7b413829eff78 100644 --- a/control/controlhttp/controlhttpserver/controlhttpserver.go +++ b/control/controlhttp/controlhttpserver/controlhttpserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index 648b9e5ed88d5..c02ac758ebf16 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlhttp diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 09c16b8b12f1e..708840155df45 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package controlknobs contains client options configurable from control which can be turned on diff --git a/control/controlknobs/controlknobs_test.go b/control/controlknobs/controlknobs_test.go index 7618b7121c500..495535b1e2807 100644 --- a/control/controlknobs/controlknobs_test.go +++ b/control/controlknobs/controlknobs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package controlknobs diff --git a/control/ts2021/client.go b/control/ts2021/client.go index ca10b1d1b5bc6..0f0e7598b5591 100644 --- a/control/ts2021/client.go +++ b/control/ts2021/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ts2021 diff --git a/control/ts2021/client_test.go b/control/ts2021/client_test.go index 72fa1f44264c3..da823fc548593 100644 --- a/control/ts2021/client_test.go +++ b/control/ts2021/client_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ts2021 diff --git a/control/ts2021/conn.go b/control/ts2021/conn.go index 52d663272a8c6..6832f2df12a4f 100644 --- a/control/ts2021/conn.go +++ b/control/ts2021/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ts2021 handles the details of the Tailscale 2021 control protocol diff --git a/derp/client_test.go b/derp/client_test.go index a731ad197f1e7..e1bcaba8bf2c8 100644 --- a/derp/client_test.go +++ b/derp/client_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derp diff --git a/derp/derp.go b/derp/derp.go index e19a99b0025ce..a7d0ea80191a8 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derp implements the Designated Encrypted Relay for Packets (DERP) diff --git a/derp/derp_client.go b/derp/derp_client.go index d28905cd2c8b2..1e9d48e1456c8 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derp diff --git a/derp/derp_test.go b/derp/derp_test.go index 52793f90fa9f5..cff069dd4470c 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derp_test diff --git a/derp/derpconst/derpconst.go b/derp/derpconst/derpconst.go index 74ca09ccb734b..03ef249ce1b28 100644 --- a/derp/derpconst/derpconst.go +++ b/derp/derpconst/derpconst.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derpconst contains constants used by the DERP client and server. diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index db56c4a44c682..3c8408e95e1f1 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derphttp implements DERP-over-HTTP. diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 5208481ed7258..ae530c93a31c0 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derphttp_test diff --git a/derp/derphttp/export_test.go b/derp/derphttp/export_test.go index 59d8324dcba3e..e3f449277fd6a 100644 --- a/derp/derphttp/export_test.go +++ b/derp/derphttp/export_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derphttp diff --git a/derp/derphttp/mesh_client.go b/derp/derphttp/mesh_client.go index c14a9a7e11111..d8fa7cd9aae03 100644 --- a/derp/derphttp/mesh_client.go +++ b/derp/derphttp/mesh_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derphttp diff --git a/derp/derphttp/websocket.go b/derp/derphttp/websocket.go index 9dd640ee37083..295d0a9bd44de 100644 --- a/derp/derphttp/websocket.go +++ b/derp/derphttp/websocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js || ((linux || darwin) && ts_debug_websockets) diff --git a/derp/derphttp/websocket_stub.go b/derp/derphttp/websocket_stub.go index d84bfba571f80..52d5ed15e3c25 100644 --- a/derp/derphttp/websocket_stub.go +++ b/derp/derphttp/websocket_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(js || ((linux || darwin) && ts_debug_websockets)) diff --git a/derp/derpserver/derpserver.go b/derp/derpserver/derpserver.go index 1879e0c536f3d..f311eb25d9817 100644 --- a/derp/derpserver/derpserver.go +++ b/derp/derpserver/derpserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package derpserver implements a DERP server. diff --git a/derp/derpserver/derpserver_default.go b/derp/derpserver/derpserver_default.go index 874e590d3c812..f664e88d1c3dd 100644 --- a/derp/derpserver/derpserver_default.go +++ b/derp/derpserver/derpserver_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || android diff --git a/derp/derpserver/derpserver_linux.go b/derp/derpserver/derpserver_linux.go index 768e6a2ab6ab7..c6154661c5486 100644 --- a/derp/derpserver/derpserver_linux.go +++ b/derp/derpserver/derpserver_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go index 1dd86f3146c5c..3a778d59fb009 100644 --- a/derp/derpserver/derpserver_test.go +++ b/derp/derpserver/derpserver_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derpserver diff --git a/derp/derpserver/handler.go b/derp/derpserver/handler.go index 7cd6aa2fd5b95..f639cb7123c73 100644 --- a/derp/derpserver/handler.go +++ b/derp/derpserver/handler.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derpserver diff --git a/derp/export_test.go b/derp/export_test.go index 677a4932d2657..9a73dd13e2798 100644 --- a/derp/export_test.go +++ b/derp/export_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package derp diff --git a/derp/xdp/headers/update.go b/derp/xdp/headers/update.go index c41332d077322..a7680c042ae0f 100644 --- a/derp/xdp/headers/update.go +++ b/derp/xdp/headers/update.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The update program fetches the libbpf headers from the libbpf GitHub repository diff --git a/derp/xdp/xdp.go b/derp/xdp/xdp.go index 5b2dbd1c26bcd..5f95b71e50294 100644 --- a/derp/xdp/xdp.go +++ b/derp/xdp/xdp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package xdp contains the XDP STUN program. diff --git a/derp/xdp/xdp_default.go b/derp/xdp/xdp_default.go index 99bc30d2c2ddc..187a112295c22 100644 --- a/derp/xdp/xdp_default.go +++ b/derp/xdp/xdp_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/derp/xdp/xdp_linux.go b/derp/xdp/xdp_linux.go index 309d9ee9a92b4..5d22716be4f16 100644 --- a/derp/xdp/xdp_linux.go +++ b/derp/xdp/xdp_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package xdp diff --git a/derp/xdp/xdp_linux_test.go b/derp/xdp/xdp_linux_test.go index 07f11eff65b09..5c75a69ff3fbb 100644 --- a/derp/xdp/xdp_linux_test.go +++ b/derp/xdp/xdp_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/disco/disco.go b/disco/disco.go index f58bc1b8c1ba1..2147529d175d4 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package disco contains the discovery message types. diff --git a/disco/disco_fuzzer.go b/disco/disco_fuzzer.go index b9ffabfb00906..99a96ae85e34f 100644 --- a/disco/disco_fuzzer.go +++ b/disco/disco_fuzzer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build gofuzz diff --git a/disco/disco_test.go b/disco/disco_test.go index 71b68338a8c90..07b653ceeb950 100644 --- a/disco/disco_test.go +++ b/disco/disco_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package disco diff --git a/disco/pcap.go b/disco/pcap.go index 71035424868e8..e4a910163f4aa 100644 --- a/disco/pcap.go +++ b/disco/pcap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package disco diff --git a/docs/k8s/Makefile b/docs/k8s/Makefile index 55804c857c049..6397957808e49 100644 --- a/docs/k8s/Makefile +++ b/docs/k8s/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause TS_ROUTES ?= "" diff --git a/docs/k8s/proxy.yaml b/docs/k8s/proxy.yaml index 048fd7a5bddf9..bd31b7a97bc83 100644 --- a/docs/k8s/proxy.yaml +++ b/docs/k8s/proxy.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/k8s/role.yaml b/docs/k8s/role.yaml index d7d0846ab29a6..869d71b719118 100644 --- a/docs/k8s/role.yaml +++ b/docs/k8s/role.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/docs/k8s/rolebinding.yaml b/docs/k8s/rolebinding.yaml index 3b18ba8d35e57..1bec3df271e8e 100644 --- a/docs/k8s/rolebinding.yaml +++ b/docs/k8s/rolebinding.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/docs/k8s/sa.yaml b/docs/k8s/sa.yaml index edd3944ba8987..e1d61573c5317 100644 --- a/docs/k8s/sa.yaml +++ b/docs/k8s/sa.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: ServiceAccount diff --git a/docs/k8s/sidecar.yaml b/docs/k8s/sidecar.yaml index 520e4379ad9ee..c119c67bbe5f8 100644 --- a/docs/k8s/sidecar.yaml +++ b/docs/k8s/sidecar.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/k8s/subnet.yaml b/docs/k8s/subnet.yaml index ef4e4748c0ceb..556201deb6500 100644 --- a/docs/k8s/subnet.yaml +++ b/docs/k8s/subnet.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/k8s/userspace-sidecar.yaml b/docs/k8s/userspace-sidecar.yaml index ee19b10a5e5dd..32a949593c040 100644 --- a/docs/k8s/userspace-sidecar.yaml +++ b/docs/k8s/userspace-sidecar.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause apiVersion: v1 kind: Pod diff --git a/docs/sysv/tailscale.init b/docs/sysv/tailscale.init index ca21033df7b27..0168adfdb1041 100755 --- a/docs/sysv/tailscale.init +++ b/docs/sysv/tailscale.init @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause ### BEGIN INIT INFO diff --git a/docs/webhooks/example.go b/docs/webhooks/example.go index 712028362c53e..53ec1c8b74b52 100644 --- a/docs/webhooks/example.go +++ b/docs/webhooks/example.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command webhooks provides example consumer code for Tailscale diff --git a/doctor/doctor.go b/doctor/doctor.go index 7c3047e12b62d..437df5e756dea 100644 --- a/doctor/doctor.go +++ b/doctor/doctor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package doctor contains more in-depth healthchecks that can be run to aid in diff --git a/doctor/doctor_test.go b/doctor/doctor_test.go index 87250f10ed00a..cd9d00ae6868e 100644 --- a/doctor/doctor_test.go +++ b/doctor/doctor_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package doctor diff --git a/doctor/ethtool/ethtool.go b/doctor/ethtool/ethtool.go index f80b00a51ff65..40f39cc21b49a 100644 --- a/doctor/ethtool/ethtool.go +++ b/doctor/ethtool/ethtool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ethtool provides a doctor.Check that prints diagnostic information diff --git a/doctor/ethtool/ethtool_linux.go b/doctor/ethtool/ethtool_linux.go index f6eaac1df0542..3914158741724 100644 --- a/doctor/ethtool/ethtool_linux.go +++ b/doctor/ethtool/ethtool_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/doctor/ethtool/ethtool_other.go b/doctor/ethtool/ethtool_other.go index 7af74eec8f872..91b5f6fdb9a6f 100644 --- a/doctor/ethtool/ethtool_other.go +++ b/doctor/ethtool/ethtool_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || android diff --git a/doctor/permissions/permissions.go b/doctor/permissions/permissions.go index 77fe526262f0c..a98ad1e0826a1 100644 --- a/doctor/permissions/permissions.go +++ b/doctor/permissions/permissions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package permissions provides a doctor.Check that prints the process diff --git a/doctor/permissions/permissions_bsd.go b/doctor/permissions/permissions_bsd.go index 8b034cfff1af3..c72e4d5d7a65c 100644 --- a/doctor/permissions/permissions_bsd.go +++ b/doctor/permissions/permissions_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd || openbsd diff --git a/doctor/permissions/permissions_linux.go b/doctor/permissions/permissions_linux.go index 12bb393d53383..8f8f12161e949 100644 --- a/doctor/permissions/permissions_linux.go +++ b/doctor/permissions/permissions_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/doctor/permissions/permissions_other.go b/doctor/permissions/permissions_other.go index 7e6912b4928cf..e96cf4f16277b 100644 --- a/doctor/permissions/permissions_other.go +++ b/doctor/permissions/permissions_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(linux || darwin || freebsd || openbsd) diff --git a/doctor/permissions/permissions_test.go b/doctor/permissions/permissions_test.go index 941d406ef8318..c7a292f39e783 100644 --- a/doctor/permissions/permissions_test.go +++ b/doctor/permissions/permissions_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package permissions diff --git a/doctor/routetable/routetable.go b/doctor/routetable/routetable.go index 76e4ef949b9af..1751d37448411 100644 --- a/doctor/routetable/routetable.go +++ b/doctor/routetable/routetable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package routetable provides a doctor.Check that dumps the current system's diff --git a/drive/drive_clone.go b/drive/drive_clone.go index 927f3b81c4e2c..724ebc386273d 100644 --- a/drive/drive_clone.go +++ b/drive/drive_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/drive/drive_view.go b/drive/drive_view.go index b481751bb3bff..253a2955b2161 100644 --- a/drive/drive_view.go +++ b/drive/drive_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/drive/driveimpl/birthtiming.go b/drive/driveimpl/birthtiming.go index d55ea0b83c322..c71bba5b47c77 100644 --- a/drive/driveimpl/birthtiming.go +++ b/drive/driveimpl/birthtiming.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/birthtiming_test.go b/drive/driveimpl/birthtiming_test.go index a43ffa33db92e..2bb1259224ff2 100644 --- a/drive/driveimpl/birthtiming_test.go +++ b/drive/driveimpl/birthtiming_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // BirthTime is not supported on Linux, so only run the test on windows and Mac. diff --git a/drive/driveimpl/compositedav/compositedav.go b/drive/driveimpl/compositedav/compositedav.go index 7c035912b946d..c6ec797726643 100644 --- a/drive/driveimpl/compositedav/compositedav.go +++ b/drive/driveimpl/compositedav/compositedav.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package compositedav provides an http.Handler that composes multiple WebDAV diff --git a/drive/driveimpl/compositedav/rewriting.go b/drive/driveimpl/compositedav/rewriting.go index 704be93d1bf76..47f020461b77d 100644 --- a/drive/driveimpl/compositedav/rewriting.go +++ b/drive/driveimpl/compositedav/rewriting.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package compositedav diff --git a/drive/driveimpl/compositedav/stat_cache.go b/drive/driveimpl/compositedav/stat_cache.go index 36463fe7e137f..2e53c82419795 100644 --- a/drive/driveimpl/compositedav/stat_cache.go +++ b/drive/driveimpl/compositedav/stat_cache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package compositedav diff --git a/drive/driveimpl/compositedav/stat_cache_test.go b/drive/driveimpl/compositedav/stat_cache_test.go index baa4fdda2c7f7..b982a3aad1d17 100644 --- a/drive/driveimpl/compositedav/stat_cache_test.go +++ b/drive/driveimpl/compositedav/stat_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package compositedav diff --git a/drive/driveimpl/connlistener.go b/drive/driveimpl/connlistener.go index ff60f73404230..8fcc5a6d262d1 100644 --- a/drive/driveimpl/connlistener.go +++ b/drive/driveimpl/connlistener.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/connlistener_test.go b/drive/driveimpl/connlistener_test.go index 6adf15acbd56f..972791c6e530e 100644 --- a/drive/driveimpl/connlistener_test.go +++ b/drive/driveimpl/connlistener_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/dirfs/dirfs.go b/drive/driveimpl/dirfs/dirfs.go index 50a3330a9d751..3c4297264302d 100644 --- a/drive/driveimpl/dirfs/dirfs.go +++ b/drive/driveimpl/dirfs/dirfs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dirfs provides a webdav.FileSystem that looks like a read-only diff --git a/drive/driveimpl/dirfs/dirfs_test.go b/drive/driveimpl/dirfs/dirfs_test.go index 4d83765d9df23..c5f3aed3a99f0 100644 --- a/drive/driveimpl/dirfs/dirfs_test.go +++ b/drive/driveimpl/dirfs/dirfs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/mkdir.go b/drive/driveimpl/dirfs/mkdir.go index 2fb763dd5848a..6ed3ec27ea332 100644 --- a/drive/driveimpl/dirfs/mkdir.go +++ b/drive/driveimpl/dirfs/mkdir.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/openfile.go b/drive/driveimpl/dirfs/openfile.go index 9b678719b5b6c..71b55ab206e24 100644 --- a/drive/driveimpl/dirfs/openfile.go +++ b/drive/driveimpl/dirfs/openfile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/removeall.go b/drive/driveimpl/dirfs/removeall.go index 8fafc8c92bb04..a01d1dd0493d1 100644 --- a/drive/driveimpl/dirfs/removeall.go +++ b/drive/driveimpl/dirfs/removeall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/rename.go b/drive/driveimpl/dirfs/rename.go index 5049acb895e70..eedb1674318c0 100644 --- a/drive/driveimpl/dirfs/rename.go +++ b/drive/driveimpl/dirfs/rename.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/dirfs/stat.go b/drive/driveimpl/dirfs/stat.go index 2e4243bedcd20..dd0aa976afb8e 100644 --- a/drive/driveimpl/dirfs/stat.go +++ b/drive/driveimpl/dirfs/stat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirfs diff --git a/drive/driveimpl/drive_test.go b/drive/driveimpl/drive_test.go index 818e84990baef..db7bfe60bde19 100644 --- a/drive/driveimpl/drive_test.go +++ b/drive/driveimpl/drive_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/fileserver.go b/drive/driveimpl/fileserver.go index d448d83af761d..6aedfef2ce522 100644 --- a/drive/driveimpl/fileserver.go +++ b/drive/driveimpl/fileserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/local_impl.go b/drive/driveimpl/local_impl.go index 871d033431038..ab908c0d31d9c 100644 --- a/drive/driveimpl/local_impl.go +++ b/drive/driveimpl/local_impl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package driveimpl provides an implementation of package drive. diff --git a/drive/driveimpl/remote_impl.go b/drive/driveimpl/remote_impl.go index 2ff98075e3012..df27ba71627df 100644 --- a/drive/driveimpl/remote_impl.go +++ b/drive/driveimpl/remote_impl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package driveimpl diff --git a/drive/driveimpl/shared/pathutil.go b/drive/driveimpl/shared/pathutil.go index fcadcdd5aa0e0..8c0fb179dcc03 100644 --- a/drive/driveimpl/shared/pathutil.go +++ b/drive/driveimpl/shared/pathutil.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/driveimpl/shared/pathutil_test.go b/drive/driveimpl/shared/pathutil_test.go index daee695632ff4..b938f4c1c153f 100644 --- a/drive/driveimpl/shared/pathutil_test.go +++ b/drive/driveimpl/shared/pathutil_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/driveimpl/shared/readonlydir.go b/drive/driveimpl/shared/readonlydir.go index a495a2d5a93d6..b0f958231968f 100644 --- a/drive/driveimpl/shared/readonlydir.go +++ b/drive/driveimpl/shared/readonlydir.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package shared contains types and functions shared by different drive diff --git a/drive/driveimpl/shared/stat.go b/drive/driveimpl/shared/stat.go index d8022894c0888..93aad90abc49d 100644 --- a/drive/driveimpl/shared/stat.go +++ b/drive/driveimpl/shared/stat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/driveimpl/shared/xml.go b/drive/driveimpl/shared/xml.go index 79fd0885dd500..ffaeb031b7636 100644 --- a/drive/driveimpl/shared/xml.go +++ b/drive/driveimpl/shared/xml.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package shared diff --git a/drive/local.go b/drive/local.go index 052efb3f97ecf..300d142d4445b 100644 --- a/drive/local.go +++ b/drive/local.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package drive provides a filesystem that allows sharing folders between diff --git a/drive/remote.go b/drive/remote.go index 2c6fba894dbff..5f34d0023e6f7 100644 --- a/drive/remote.go +++ b/drive/remote.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_nonunix.go b/drive/remote_nonunix.go index d1153c5925419..4186ec0ad46e7 100644 --- a/drive/remote_nonunix.go +++ b/drive/remote_nonunix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !unix diff --git a/drive/remote_permissions.go b/drive/remote_permissions.go index 420eff9a0e743..31ec0caee881d 100644 --- a/drive/remote_permissions.go +++ b/drive/remote_permissions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_permissions_test.go b/drive/remote_permissions_test.go index ff039c80020c8..5d63a503f75d9 100644 --- a/drive/remote_permissions_test.go +++ b/drive/remote_permissions_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_test.go b/drive/remote_test.go index e05b23839bade..c0de1723aee59 100644 --- a/drive/remote_test.go +++ b/drive/remote_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package drive diff --git a/drive/remote_unix.go b/drive/remote_unix.go index 0e41524dbd304..4b367ef5ff79a 100644 --- a/drive/remote_unix.go +++ b/drive/remote_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/envknob/envknob.go b/envknob/envknob.go index 17a21387ecaea..2b1461f11f308 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package envknob provides access to environment-variable tweakable diff --git a/envknob/envknob_nottest.go b/envknob/envknob_nottest.go index 0dd900cc8104e..4693ceebe746a 100644 --- a/envknob/envknob_nottest.go +++ b/envknob/envknob_nottest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_not_in_tests diff --git a/envknob/envknob_testable.go b/envknob/envknob_testable.go index e7f038336c4f3..5f0beea4f962e 100644 --- a/envknob/envknob_testable.go +++ b/envknob/envknob_testable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_not_in_tests diff --git a/envknob/featureknob/featureknob.go b/envknob/featureknob/featureknob.go index 5a54a1c42978d..049366549fcb3 100644 --- a/envknob/featureknob/featureknob.go +++ b/envknob/featureknob/featureknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package featureknob provides a facility to control whether features diff --git a/envknob/logknob/logknob.go b/envknob/logknob/logknob.go index 93302d0d2bd5c..bc6e8c3627077 100644 --- a/envknob/logknob/logknob.go +++ b/envknob/logknob/logknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logknob provides a helpful wrapper that allows enabling logging diff --git a/envknob/logknob/logknob_test.go b/envknob/logknob/logknob_test.go index aa4fb44214e12..9e7ab8aef6368 100644 --- a/envknob/logknob/logknob_test.go +++ b/envknob/logknob/logknob_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logknob diff --git a/feature/ace/ace.go b/feature/ace/ace.go index b6d36543c5281..b99516657ed25 100644 --- a/feature/ace/ace.go +++ b/feature/ace/ace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ace registers support for Alternate Connectivity Endpoints (ACE). diff --git a/feature/appconnectors/appconnectors.go b/feature/appconnectors/appconnectors.go index 28f5ccde35acb..82d29ce0e6034 100644 --- a/feature/appconnectors/appconnectors.go +++ b/feature/appconnectors/appconnectors.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appconnectors registers support for Tailscale App Connectors. diff --git a/feature/buildfeatures/buildfeatures.go b/feature/buildfeatures/buildfeatures.go index cdb31dc015673..ca4de74344485 100644 --- a/feature/buildfeatures/buildfeatures.go +++ b/feature/buildfeatures/buildfeatures.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:generate go run gen.go diff --git a/feature/buildfeatures/feature_ace_disabled.go b/feature/buildfeatures/feature_ace_disabled.go index b4808d4976b02..91a7eeb46da0d 100644 --- a/feature/buildfeatures/feature_ace_disabled.go +++ b/feature/buildfeatures/feature_ace_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_ace_enabled.go b/feature/buildfeatures/feature_ace_enabled.go index 4812f9a61cd4c..0d975ec7ffb35 100644 --- a/feature/buildfeatures/feature_ace_enabled.go +++ b/feature/buildfeatures/feature_ace_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_acme_disabled.go b/feature/buildfeatures/feature_acme_disabled.go index 0a7f25a821cc5..0add296a67f0a 100644 --- a/feature/buildfeatures/feature_acme_disabled.go +++ b/feature/buildfeatures/feature_acme_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_acme_enabled.go b/feature/buildfeatures/feature_acme_enabled.go index f074bfb4e1a7e..78182eaa488c2 100644 --- a/feature/buildfeatures/feature_acme_enabled.go +++ b/feature/buildfeatures/feature_acme_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_advertiseexitnode_disabled.go b/feature/buildfeatures/feature_advertiseexitnode_disabled.go index d4fdcec22db3c..aeac607012019 100644 --- a/feature/buildfeatures/feature_advertiseexitnode_disabled.go +++ b/feature/buildfeatures/feature_advertiseexitnode_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_advertiseexitnode_enabled.go b/feature/buildfeatures/feature_advertiseexitnode_enabled.go index 28246143ecb3c..0a7451dc3226f 100644 --- a/feature/buildfeatures/feature_advertiseexitnode_enabled.go +++ b/feature/buildfeatures/feature_advertiseexitnode_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_advertiseroutes_disabled.go b/feature/buildfeatures/feature_advertiseroutes_disabled.go index 59042720f3870..dbb3bb059eb04 100644 --- a/feature/buildfeatures/feature_advertiseroutes_disabled.go +++ b/feature/buildfeatures/feature_advertiseroutes_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_advertiseroutes_enabled.go b/feature/buildfeatures/feature_advertiseroutes_enabled.go index 118fcd55d64e4..3abe33644631d 100644 --- a/feature/buildfeatures/feature_advertiseroutes_enabled.go +++ b/feature/buildfeatures/feature_advertiseroutes_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_appconnectors_disabled.go b/feature/buildfeatures/feature_appconnectors_disabled.go index 64ea8f86b4104..dcb9f24d776e8 100644 --- a/feature/buildfeatures/feature_appconnectors_disabled.go +++ b/feature/buildfeatures/feature_appconnectors_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_appconnectors_enabled.go b/feature/buildfeatures/feature_appconnectors_enabled.go index e00eaffa3e6fc..edbfe5fcf1806 100644 --- a/feature/buildfeatures/feature_appconnectors_enabled.go +++ b/feature/buildfeatures/feature_appconnectors_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_aws_disabled.go b/feature/buildfeatures/feature_aws_disabled.go index 66b670c1fe451..22b611e804a86 100644 --- a/feature/buildfeatures/feature_aws_disabled.go +++ b/feature/buildfeatures/feature_aws_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_aws_enabled.go b/feature/buildfeatures/feature_aws_enabled.go index 30203b2aa6df8..5a640a252f149 100644 --- a/feature/buildfeatures/feature_aws_enabled.go +++ b/feature/buildfeatures/feature_aws_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_bakedroots_disabled.go b/feature/buildfeatures/feature_bakedroots_disabled.go index f203bc1b06d44..c06ebd6ff8c02 100644 --- a/feature/buildfeatures/feature_bakedroots_disabled.go +++ b/feature/buildfeatures/feature_bakedroots_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_bakedroots_enabled.go b/feature/buildfeatures/feature_bakedroots_enabled.go index 69cf2c34ccf6a..8477e00514d84 100644 --- a/feature/buildfeatures/feature_bakedroots_enabled.go +++ b/feature/buildfeatures/feature_bakedroots_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_bird_disabled.go b/feature/buildfeatures/feature_bird_disabled.go index 469aa41f954a9..60ca3eaac61b4 100644 --- a/feature/buildfeatures/feature_bird_disabled.go +++ b/feature/buildfeatures/feature_bird_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_bird_enabled.go b/feature/buildfeatures/feature_bird_enabled.go index 792129f64f567..57203324b2a53 100644 --- a/feature/buildfeatures/feature_bird_enabled.go +++ b/feature/buildfeatures/feature_bird_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_c2n_disabled.go b/feature/buildfeatures/feature_c2n_disabled.go index bc37e9e7bfd23..3fcdd3628cb60 100644 --- a/feature/buildfeatures/feature_c2n_disabled.go +++ b/feature/buildfeatures/feature_c2n_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_c2n_enabled.go b/feature/buildfeatures/feature_c2n_enabled.go index 5950e71571652..41f97157f57f8 100644 --- a/feature/buildfeatures/feature_c2n_enabled.go +++ b/feature/buildfeatures/feature_c2n_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_cachenetmap_disabled.go b/feature/buildfeatures/feature_cachenetmap_disabled.go index 22407fe38a57f..d05e9315f2f8d 100644 --- a/feature/buildfeatures/feature_cachenetmap_disabled.go +++ b/feature/buildfeatures/feature_cachenetmap_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_cachenetmap_enabled.go b/feature/buildfeatures/feature_cachenetmap_enabled.go index 02663c416bcbb..b1cd51a704152 100644 --- a/feature/buildfeatures/feature_cachenetmap_enabled.go +++ b/feature/buildfeatures/feature_cachenetmap_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_captiveportal_disabled.go b/feature/buildfeatures/feature_captiveportal_disabled.go index 367fef81bdc16..7535da5066ae6 100644 --- a/feature/buildfeatures/feature_captiveportal_disabled.go +++ b/feature/buildfeatures/feature_captiveportal_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_captiveportal_enabled.go b/feature/buildfeatures/feature_captiveportal_enabled.go index bd8e1f6a80ff1..90d70ab1d1556 100644 --- a/feature/buildfeatures/feature_captiveportal_enabled.go +++ b/feature/buildfeatures/feature_captiveportal_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_capture_disabled.go b/feature/buildfeatures/feature_capture_disabled.go index 58535958f26e8..8f46b9c244f1b 100644 --- a/feature/buildfeatures/feature_capture_disabled.go +++ b/feature/buildfeatures/feature_capture_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_capture_enabled.go b/feature/buildfeatures/feature_capture_enabled.go index 7120a3d06fa7d..3e1a2d7aaa70f 100644 --- a/feature/buildfeatures/feature_capture_enabled.go +++ b/feature/buildfeatures/feature_capture_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_cliconndiag_disabled.go b/feature/buildfeatures/feature_cliconndiag_disabled.go index 06d8c7935fd4a..d38c4a3d6cf35 100644 --- a/feature/buildfeatures/feature_cliconndiag_disabled.go +++ b/feature/buildfeatures/feature_cliconndiag_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_cliconndiag_enabled.go b/feature/buildfeatures/feature_cliconndiag_enabled.go index d6125ef08051c..88775b24de51f 100644 --- a/feature/buildfeatures/feature_cliconndiag_enabled.go +++ b/feature/buildfeatures/feature_cliconndiag_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_clientmetrics_disabled.go b/feature/buildfeatures/feature_clientmetrics_disabled.go index 721908bb079a2..0345ccc609fdf 100644 --- a/feature/buildfeatures/feature_clientmetrics_disabled.go +++ b/feature/buildfeatures/feature_clientmetrics_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_clientmetrics_enabled.go b/feature/buildfeatures/feature_clientmetrics_enabled.go index deaeb6e69b1c3..2e58155bd5261 100644 --- a/feature/buildfeatures/feature_clientmetrics_enabled.go +++ b/feature/buildfeatures/feature_clientmetrics_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_clientupdate_disabled.go b/feature/buildfeatures/feature_clientupdate_disabled.go index 165c9cc9a409d..6662ca2b9cda7 100644 --- a/feature/buildfeatures/feature_clientupdate_disabled.go +++ b/feature/buildfeatures/feature_clientupdate_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_clientupdate_enabled.go b/feature/buildfeatures/feature_clientupdate_enabled.go index 3c3c7878c53a9..041cdf8a53e79 100644 --- a/feature/buildfeatures/feature_clientupdate_enabled.go +++ b/feature/buildfeatures/feature_clientupdate_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_cloud_disabled.go b/feature/buildfeatures/feature_cloud_disabled.go index 3b877a9c68d40..b2dc2607ffda2 100644 --- a/feature/buildfeatures/feature_cloud_disabled.go +++ b/feature/buildfeatures/feature_cloud_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_cloud_enabled.go b/feature/buildfeatures/feature_cloud_enabled.go index 8fd748de56c7e..5ee91b9ed7c4c 100644 --- a/feature/buildfeatures/feature_cloud_enabled.go +++ b/feature/buildfeatures/feature_cloud_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_completion_disabled.go b/feature/buildfeatures/feature_completion_disabled.go index ea319beb0af3e..aa46c9c6157e5 100644 --- a/feature/buildfeatures/feature_completion_disabled.go +++ b/feature/buildfeatures/feature_completion_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_completion_enabled.go b/feature/buildfeatures/feature_completion_enabled.go index 6db41c97b3e76..561a377edea15 100644 --- a/feature/buildfeatures/feature_completion_enabled.go +++ b/feature/buildfeatures/feature_completion_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_dbus_disabled.go b/feature/buildfeatures/feature_dbus_disabled.go index e6ab896773fd1..c09fa7eeb7a34 100644 --- a/feature/buildfeatures/feature_dbus_disabled.go +++ b/feature/buildfeatures/feature_dbus_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_dbus_enabled.go b/feature/buildfeatures/feature_dbus_enabled.go index 374331cdabe0c..f3cc9f003f0ab 100644 --- a/feature/buildfeatures/feature_dbus_enabled.go +++ b/feature/buildfeatures/feature_dbus_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_debug_disabled.go b/feature/buildfeatures/feature_debug_disabled.go index eb048c0826eb9..4faafbb756559 100644 --- a/feature/buildfeatures/feature_debug_disabled.go +++ b/feature/buildfeatures/feature_debug_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_debug_enabled.go b/feature/buildfeatures/feature_debug_enabled.go index 12a2700a45761..a99dc81044cea 100644 --- a/feature/buildfeatures/feature_debug_enabled.go +++ b/feature/buildfeatures/feature_debug_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_debugeventbus_disabled.go b/feature/buildfeatures/feature_debugeventbus_disabled.go index 2eb59993444af..a7cf3dd72e8c6 100644 --- a/feature/buildfeatures/feature_debugeventbus_disabled.go +++ b/feature/buildfeatures/feature_debugeventbus_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_debugeventbus_enabled.go b/feature/buildfeatures/feature_debugeventbus_enabled.go index df13b6fa23167..caa4ca30a5039 100644 --- a/feature/buildfeatures/feature_debugeventbus_enabled.go +++ b/feature/buildfeatures/feature_debugeventbus_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_debugportmapper_disabled.go b/feature/buildfeatures/feature_debugportmapper_disabled.go index eff85b8baaf50..4b3b03be824f9 100644 --- a/feature/buildfeatures/feature_debugportmapper_disabled.go +++ b/feature/buildfeatures/feature_debugportmapper_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_debugportmapper_enabled.go b/feature/buildfeatures/feature_debugportmapper_enabled.go index 491aa5ed84af1..89250083161e6 100644 --- a/feature/buildfeatures/feature_debugportmapper_enabled.go +++ b/feature/buildfeatures/feature_debugportmapper_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_desktop_sessions_disabled.go b/feature/buildfeatures/feature_desktop_sessions_disabled.go index 1536c886fec25..0df68e9d00704 100644 --- a/feature/buildfeatures/feature_desktop_sessions_disabled.go +++ b/feature/buildfeatures/feature_desktop_sessions_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_desktop_sessions_enabled.go b/feature/buildfeatures/feature_desktop_sessions_enabled.go index 84658de952c86..4f03b9da894fa 100644 --- a/feature/buildfeatures/feature_desktop_sessions_enabled.go +++ b/feature/buildfeatures/feature_desktop_sessions_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_dns_disabled.go b/feature/buildfeatures/feature_dns_disabled.go index 30d7379cb9092..e59e4faef0a9a 100644 --- a/feature/buildfeatures/feature_dns_disabled.go +++ b/feature/buildfeatures/feature_dns_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_dns_enabled.go b/feature/buildfeatures/feature_dns_enabled.go index 962f2596bf5c9..f7c7097143aa9 100644 --- a/feature/buildfeatures/feature_dns_enabled.go +++ b/feature/buildfeatures/feature_dns_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_doctor_disabled.go b/feature/buildfeatures/feature_doctor_disabled.go index 8c15e951e311f..a08af9c837771 100644 --- a/feature/buildfeatures/feature_doctor_disabled.go +++ b/feature/buildfeatures/feature_doctor_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_doctor_enabled.go b/feature/buildfeatures/feature_doctor_enabled.go index a8a0bb7d2056b..502950855dd69 100644 --- a/feature/buildfeatures/feature_doctor_enabled.go +++ b/feature/buildfeatures/feature_doctor_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_drive_disabled.go b/feature/buildfeatures/feature_drive_disabled.go index 07202638952e8..90d2eac1b0d15 100644 --- a/feature/buildfeatures/feature_drive_disabled.go +++ b/feature/buildfeatures/feature_drive_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_drive_enabled.go b/feature/buildfeatures/feature_drive_enabled.go index 9f58836a43fc7..8117585c5ef79 100644 --- a/feature/buildfeatures/feature_drive_enabled.go +++ b/feature/buildfeatures/feature_drive_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_gro_disabled.go b/feature/buildfeatures/feature_gro_disabled.go index ffbd0da2e3e4f..9da12c587851f 100644 --- a/feature/buildfeatures/feature_gro_disabled.go +++ b/feature/buildfeatures/feature_gro_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_gro_enabled.go b/feature/buildfeatures/feature_gro_enabled.go index e2c8024e07815..5ca7aeef52b1c 100644 --- a/feature/buildfeatures/feature_gro_enabled.go +++ b/feature/buildfeatures/feature_gro_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_health_disabled.go b/feature/buildfeatures/feature_health_disabled.go index 2f2bcf240a455..59cb53d8c44ba 100644 --- a/feature/buildfeatures/feature_health_disabled.go +++ b/feature/buildfeatures/feature_health_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_health_enabled.go b/feature/buildfeatures/feature_health_enabled.go index 00ce3684eb6db..56b9a97f0b226 100644 --- a/feature/buildfeatures/feature_health_enabled.go +++ b/feature/buildfeatures/feature_health_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_hujsonconf_disabled.go b/feature/buildfeatures/feature_hujsonconf_disabled.go index cee076bc24527..01c82724abc90 100644 --- a/feature/buildfeatures/feature_hujsonconf_disabled.go +++ b/feature/buildfeatures/feature_hujsonconf_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_hujsonconf_enabled.go b/feature/buildfeatures/feature_hujsonconf_enabled.go index aefeeace5f0b9..d321f78ae9d09 100644 --- a/feature/buildfeatures/feature_hujsonconf_enabled.go +++ b/feature/buildfeatures/feature_hujsonconf_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_identityfederation_disabled.go b/feature/buildfeatures/feature_identityfederation_disabled.go index 94488adc8637c..535a478e078f2 100644 --- a/feature/buildfeatures/feature_identityfederation_disabled.go +++ b/feature/buildfeatures/feature_identityfederation_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_identityfederation_enabled.go b/feature/buildfeatures/feature_identityfederation_enabled.go index 892d62d66c37c..85708da513542 100644 --- a/feature/buildfeatures/feature_identityfederation_enabled.go +++ b/feature/buildfeatures/feature_identityfederation_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_iptables_disabled.go b/feature/buildfeatures/feature_iptables_disabled.go index 8cda5be5d6ae6..d444aedbe5aaf 100644 --- a/feature/buildfeatures/feature_iptables_disabled.go +++ b/feature/buildfeatures/feature_iptables_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_iptables_enabled.go b/feature/buildfeatures/feature_iptables_enabled.go index 44d98473f05f2..edc8f110decd0 100644 --- a/feature/buildfeatures/feature_iptables_enabled.go +++ b/feature/buildfeatures/feature_iptables_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_kube_disabled.go b/feature/buildfeatures/feature_kube_disabled.go index 2b76c57e78b94..c16768dabe17d 100644 --- a/feature/buildfeatures/feature_kube_disabled.go +++ b/feature/buildfeatures/feature_kube_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_kube_enabled.go b/feature/buildfeatures/feature_kube_enabled.go index 7abca1759fc49..97fa18e2eed91 100644 --- a/feature/buildfeatures/feature_kube_enabled.go +++ b/feature/buildfeatures/feature_kube_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_lazywg_disabled.go b/feature/buildfeatures/feature_lazywg_disabled.go index ce81d80bab6a1..af1ad388c03a7 100644 --- a/feature/buildfeatures/feature_lazywg_disabled.go +++ b/feature/buildfeatures/feature_lazywg_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_lazywg_enabled.go b/feature/buildfeatures/feature_lazywg_enabled.go index 259357f7f86ef..f2d6a10f81580 100644 --- a/feature/buildfeatures/feature_lazywg_enabled.go +++ b/feature/buildfeatures/feature_lazywg_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_linkspeed_disabled.go b/feature/buildfeatures/feature_linkspeed_disabled.go index 19e254a740ff7..c579fdbdcea06 100644 --- a/feature/buildfeatures/feature_linkspeed_disabled.go +++ b/feature/buildfeatures/feature_linkspeed_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_linkspeed_enabled.go b/feature/buildfeatures/feature_linkspeed_enabled.go index 939858a162910..a63aabc2a3247 100644 --- a/feature/buildfeatures/feature_linkspeed_enabled.go +++ b/feature/buildfeatures/feature_linkspeed_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_linuxdnsfight_disabled.go b/feature/buildfeatures/feature_linuxdnsfight_disabled.go index 2e5b50ea06af0..801696c5f3bc9 100644 --- a/feature/buildfeatures/feature_linuxdnsfight_disabled.go +++ b/feature/buildfeatures/feature_linuxdnsfight_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_linuxdnsfight_enabled.go b/feature/buildfeatures/feature_linuxdnsfight_enabled.go index b9419fccbfc09..9637bdeebcd29 100644 --- a/feature/buildfeatures/feature_linuxdnsfight_enabled.go +++ b/feature/buildfeatures/feature_linuxdnsfight_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_listenrawdisco_disabled.go b/feature/buildfeatures/feature_listenrawdisco_disabled.go index 2911780636cb7..4bad9d002b7ad 100644 --- a/feature/buildfeatures/feature_listenrawdisco_disabled.go +++ b/feature/buildfeatures/feature_listenrawdisco_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_listenrawdisco_enabled.go b/feature/buildfeatures/feature_listenrawdisco_enabled.go index 4a4f85ae37319..e5cfe687f5716 100644 --- a/feature/buildfeatures/feature_listenrawdisco_enabled.go +++ b/feature/buildfeatures/feature_listenrawdisco_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_logtail_disabled.go b/feature/buildfeatures/feature_logtail_disabled.go index 140092a2eba5b..983055d4742a9 100644 --- a/feature/buildfeatures/feature_logtail_disabled.go +++ b/feature/buildfeatures/feature_logtail_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_logtail_enabled.go b/feature/buildfeatures/feature_logtail_enabled.go index 6e777216bf3cb..f9ce154028832 100644 --- a/feature/buildfeatures/feature_logtail_enabled.go +++ b/feature/buildfeatures/feature_logtail_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_netlog_disabled.go b/feature/buildfeatures/feature_netlog_disabled.go index 60367a12600f3..a274f6aca61b6 100644 --- a/feature/buildfeatures/feature_netlog_disabled.go +++ b/feature/buildfeatures/feature_netlog_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_netlog_enabled.go b/feature/buildfeatures/feature_netlog_enabled.go index f9d2abad30553..1206e7e92b062 100644 --- a/feature/buildfeatures/feature_netlog_enabled.go +++ b/feature/buildfeatures/feature_netlog_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go index acb6e8e76396e..45c86c0e362b6 100644 --- a/feature/buildfeatures/feature_netstack_disabled.go +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go index 04f67118523a0..2fc67164ec0b0 100644 --- a/feature/buildfeatures/feature_netstack_enabled.go +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_networkmanager_disabled.go b/feature/buildfeatures/feature_networkmanager_disabled.go index d0ec6f01796ab..9ac3a928b62e0 100644 --- a/feature/buildfeatures/feature_networkmanager_disabled.go +++ b/feature/buildfeatures/feature_networkmanager_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_networkmanager_enabled.go b/feature/buildfeatures/feature_networkmanager_enabled.go index ec284c3109f75..5dd0431e3a892 100644 --- a/feature/buildfeatures/feature_networkmanager_enabled.go +++ b/feature/buildfeatures/feature_networkmanager_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_oauthkey_disabled.go b/feature/buildfeatures/feature_oauthkey_disabled.go index 72ad1723b1d14..8801a90d6db72 100644 --- a/feature/buildfeatures/feature_oauthkey_disabled.go +++ b/feature/buildfeatures/feature_oauthkey_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_oauthkey_enabled.go b/feature/buildfeatures/feature_oauthkey_enabled.go index 39c52a2b0b46d..e03e437957a22 100644 --- a/feature/buildfeatures/feature_oauthkey_enabled.go +++ b/feature/buildfeatures/feature_oauthkey_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_osrouter_disabled.go b/feature/buildfeatures/feature_osrouter_disabled.go index ccd7192bb8899..8004589b8a466 100644 --- a/feature/buildfeatures/feature_osrouter_disabled.go +++ b/feature/buildfeatures/feature_osrouter_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_osrouter_enabled.go b/feature/buildfeatures/feature_osrouter_enabled.go index a5dacc596bfbc..78ed0ca9d96df 100644 --- a/feature/buildfeatures/feature_osrouter_enabled.go +++ b/feature/buildfeatures/feature_osrouter_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go index bf74db0600927..35de2fdbda3b8 100644 --- a/feature/buildfeatures/feature_outboundproxy_disabled.go +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go index 53bb99d5c6a79..5c20b9458375a 100644 --- a/feature/buildfeatures/feature_outboundproxy_enabled.go +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_peerapiclient_disabled.go b/feature/buildfeatures/feature_peerapiclient_disabled.go index 83cc2bdfeef5c..bcb45e5fe3278 100644 --- a/feature/buildfeatures/feature_peerapiclient_disabled.go +++ b/feature/buildfeatures/feature_peerapiclient_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_peerapiclient_enabled.go b/feature/buildfeatures/feature_peerapiclient_enabled.go index 0bd3f50a869ca..214af9312b68e 100644 --- a/feature/buildfeatures/feature_peerapiclient_enabled.go +++ b/feature/buildfeatures/feature_peerapiclient_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_peerapiserver_disabled.go b/feature/buildfeatures/feature_peerapiserver_disabled.go index 4a4f32b8a4065..60b2df96544bb 100644 --- a/feature/buildfeatures/feature_peerapiserver_disabled.go +++ b/feature/buildfeatures/feature_peerapiserver_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_peerapiserver_enabled.go b/feature/buildfeatures/feature_peerapiserver_enabled.go index 17d0547b80946..9c56c5309d483 100644 --- a/feature/buildfeatures/feature_peerapiserver_enabled.go +++ b/feature/buildfeatures/feature_peerapiserver_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_portlist_disabled.go b/feature/buildfeatures/feature_portlist_disabled.go index 934061fd8328f..9269a7b5e4ba6 100644 --- a/feature/buildfeatures/feature_portlist_disabled.go +++ b/feature/buildfeatures/feature_portlist_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_portlist_enabled.go b/feature/buildfeatures/feature_portlist_enabled.go index c1dc1c163b80e..31a2875363517 100644 --- a/feature/buildfeatures/feature_portlist_enabled.go +++ b/feature/buildfeatures/feature_portlist_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_portmapper_disabled.go b/feature/buildfeatures/feature_portmapper_disabled.go index 212b22d40abfb..dea23f2bd6c3b 100644 --- a/feature/buildfeatures/feature_portmapper_disabled.go +++ b/feature/buildfeatures/feature_portmapper_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_portmapper_enabled.go b/feature/buildfeatures/feature_portmapper_enabled.go index 2f915d277a313..495a5bf207d40 100644 --- a/feature/buildfeatures/feature_portmapper_enabled.go +++ b/feature/buildfeatures/feature_portmapper_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_posture_disabled.go b/feature/buildfeatures/feature_posture_disabled.go index a78b1a95720cf..9987819a84980 100644 --- a/feature/buildfeatures/feature_posture_disabled.go +++ b/feature/buildfeatures/feature_posture_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_posture_enabled.go b/feature/buildfeatures/feature_posture_enabled.go index dcd9595f9ca96..4e601d33b578f 100644 --- a/feature/buildfeatures/feature_posture_enabled.go +++ b/feature/buildfeatures/feature_posture_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_qrcodes_disabled.go b/feature/buildfeatures/feature_qrcodes_disabled.go index 4b992501c969e..64d33cfcc731a 100644 --- a/feature/buildfeatures/feature_qrcodes_disabled.go +++ b/feature/buildfeatures/feature_qrcodes_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_qrcodes_enabled.go b/feature/buildfeatures/feature_qrcodes_enabled.go index 5b74e2b3e5cbe..35fe9741b0a59 100644 --- a/feature/buildfeatures/feature_qrcodes_enabled.go +++ b/feature/buildfeatures/feature_qrcodes_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_relayserver_disabled.go b/feature/buildfeatures/feature_relayserver_disabled.go index 08ced83101f96..cee2d93fea7e4 100644 --- a/feature/buildfeatures/feature_relayserver_disabled.go +++ b/feature/buildfeatures/feature_relayserver_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_relayserver_enabled.go b/feature/buildfeatures/feature_relayserver_enabled.go index 6a35f8305d68f..3886c853d6e6c 100644 --- a/feature/buildfeatures/feature_relayserver_enabled.go +++ b/feature/buildfeatures/feature_relayserver_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_resolved_disabled.go b/feature/buildfeatures/feature_resolved_disabled.go index 283dd20c76aaa..e19576e2a9131 100644 --- a/feature/buildfeatures/feature_resolved_disabled.go +++ b/feature/buildfeatures/feature_resolved_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_resolved_enabled.go b/feature/buildfeatures/feature_resolved_enabled.go index af1b3b41e9358..46e59411784fb 100644 --- a/feature/buildfeatures/feature_resolved_enabled.go +++ b/feature/buildfeatures/feature_resolved_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_sdnotify_disabled.go b/feature/buildfeatures/feature_sdnotify_disabled.go index 7efa2d22ff587..4ae1cd8b021b0 100644 --- a/feature/buildfeatures/feature_sdnotify_disabled.go +++ b/feature/buildfeatures/feature_sdnotify_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_sdnotify_enabled.go b/feature/buildfeatures/feature_sdnotify_enabled.go index 40fec9755dd16..0f6adcaaea901 100644 --- a/feature/buildfeatures/feature_sdnotify_enabled.go +++ b/feature/buildfeatures/feature_sdnotify_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_serve_disabled.go b/feature/buildfeatures/feature_serve_disabled.go index 6d79713500e29..51aa5e4cd7291 100644 --- a/feature/buildfeatures/feature_serve_disabled.go +++ b/feature/buildfeatures/feature_serve_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_serve_enabled.go b/feature/buildfeatures/feature_serve_enabled.go index 57bf2c6b0fc2b..10638f5b47a8a 100644 --- a/feature/buildfeatures/feature_serve_enabled.go +++ b/feature/buildfeatures/feature_serve_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_ssh_disabled.go b/feature/buildfeatures/feature_ssh_disabled.go index 754f50eb6a816..c51d5425d4e3a 100644 --- a/feature/buildfeatures/feature_ssh_disabled.go +++ b/feature/buildfeatures/feature_ssh_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_ssh_enabled.go b/feature/buildfeatures/feature_ssh_enabled.go index dbdc3a89fa027..539173db4366d 100644 --- a/feature/buildfeatures/feature_ssh_enabled.go +++ b/feature/buildfeatures/feature_ssh_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_synology_disabled.go b/feature/buildfeatures/feature_synology_disabled.go index 0cdf084c32d8e..98613f16c13c9 100644 --- a/feature/buildfeatures/feature_synology_disabled.go +++ b/feature/buildfeatures/feature_synology_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_synology_enabled.go b/feature/buildfeatures/feature_synology_enabled.go index dde4123b61eb0..2090dafb51f43 100644 --- a/feature/buildfeatures/feature_synology_enabled.go +++ b/feature/buildfeatures/feature_synology_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_syspolicy_disabled.go b/feature/buildfeatures/feature_syspolicy_disabled.go index 54d32e32e71d8..e7b2b3dad1889 100644 --- a/feature/buildfeatures/feature_syspolicy_disabled.go +++ b/feature/buildfeatures/feature_syspolicy_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_syspolicy_enabled.go b/feature/buildfeatures/feature_syspolicy_enabled.go index f7c403ae9d68b..5c3b2794adc1a 100644 --- a/feature/buildfeatures/feature_syspolicy_enabled.go +++ b/feature/buildfeatures/feature_syspolicy_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_systray_disabled.go b/feature/buildfeatures/feature_systray_disabled.go index 4ae1edb0ab83f..bfd16f7a4acb0 100644 --- a/feature/buildfeatures/feature_systray_disabled.go +++ b/feature/buildfeatures/feature_systray_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_systray_enabled.go b/feature/buildfeatures/feature_systray_enabled.go index 5fd7fd220325a..602e1223d7c81 100644 --- a/feature/buildfeatures/feature_systray_enabled.go +++ b/feature/buildfeatures/feature_systray_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_taildrop_disabled.go b/feature/buildfeatures/feature_taildrop_disabled.go index 8ffe90617839f..8165a68a848f0 100644 --- a/feature/buildfeatures/feature_taildrop_disabled.go +++ b/feature/buildfeatures/feature_taildrop_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_taildrop_enabled.go b/feature/buildfeatures/feature_taildrop_enabled.go index 4f55d2801c516..c07a2a037ffff 100644 --- a/feature/buildfeatures/feature_taildrop_enabled.go +++ b/feature/buildfeatures/feature_taildrop_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_tailnetlock_disabled.go b/feature/buildfeatures/feature_tailnetlock_disabled.go index 6b5a57f24ba4f..5a208babb7797 100644 --- a/feature/buildfeatures/feature_tailnetlock_disabled.go +++ b/feature/buildfeatures/feature_tailnetlock_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_tailnetlock_enabled.go b/feature/buildfeatures/feature_tailnetlock_enabled.go index afedb7faad312..c65151152ea83 100644 --- a/feature/buildfeatures/feature_tailnetlock_enabled.go +++ b/feature/buildfeatures/feature_tailnetlock_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_tap_disabled.go b/feature/buildfeatures/feature_tap_disabled.go index f0b3eec8d7e6f..07605ff3792dd 100644 --- a/feature/buildfeatures/feature_tap_disabled.go +++ b/feature/buildfeatures/feature_tap_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_tap_enabled.go b/feature/buildfeatures/feature_tap_enabled.go index 1363c4b44afb2..0b88a42b6604b 100644 --- a/feature/buildfeatures/feature_tap_enabled.go +++ b/feature/buildfeatures/feature_tap_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_tpm_disabled.go b/feature/buildfeatures/feature_tpm_disabled.go index b9d55815ef5df..b0351c80dfd96 100644 --- a/feature/buildfeatures/feature_tpm_disabled.go +++ b/feature/buildfeatures/feature_tpm_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_tpm_enabled.go b/feature/buildfeatures/feature_tpm_enabled.go index dcfc8a30442ad..0af8a10e9b883 100644 --- a/feature/buildfeatures/feature_tpm_enabled.go +++ b/feature/buildfeatures/feature_tpm_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_unixsocketidentity_disabled.go b/feature/buildfeatures/feature_unixsocketidentity_disabled.go index d64e48b825eac..25320544b2282 100644 --- a/feature/buildfeatures/feature_unixsocketidentity_disabled.go +++ b/feature/buildfeatures/feature_unixsocketidentity_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_unixsocketidentity_enabled.go b/feature/buildfeatures/feature_unixsocketidentity_enabled.go index 463ac2ced3636..9511ba00f4094 100644 --- a/feature/buildfeatures/feature_unixsocketidentity_enabled.go +++ b/feature/buildfeatures/feature_unixsocketidentity_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_useexitnode_disabled.go b/feature/buildfeatures/feature_useexitnode_disabled.go index 51bec8046cb35..51e95fca084bd 100644 --- a/feature/buildfeatures/feature_useexitnode_disabled.go +++ b/feature/buildfeatures/feature_useexitnode_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_useexitnode_enabled.go b/feature/buildfeatures/feature_useexitnode_enabled.go index f7ab414de9477..e6df5c85fd3f4 100644 --- a/feature/buildfeatures/feature_useexitnode_enabled.go +++ b/feature/buildfeatures/feature_useexitnode_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_useproxy_disabled.go b/feature/buildfeatures/feature_useproxy_disabled.go index 9f29a9820eb99..604825ba991ca 100644 --- a/feature/buildfeatures/feature_useproxy_disabled.go +++ b/feature/buildfeatures/feature_useproxy_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_useproxy_enabled.go b/feature/buildfeatures/feature_useproxy_enabled.go index 9195f2fdce784..fe2ecc9ea538a 100644 --- a/feature/buildfeatures/feature_useproxy_enabled.go +++ b/feature/buildfeatures/feature_useproxy_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_usermetrics_disabled.go b/feature/buildfeatures/feature_usermetrics_disabled.go index 092c89c3b543f..96441b5138497 100644 --- a/feature/buildfeatures/feature_usermetrics_disabled.go +++ b/feature/buildfeatures/feature_usermetrics_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_usermetrics_enabled.go b/feature/buildfeatures/feature_usermetrics_enabled.go index 813e3c3477b66..427c6fd397657 100644 --- a/feature/buildfeatures/feature_usermetrics_enabled.go +++ b/feature/buildfeatures/feature_usermetrics_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_useroutes_disabled.go b/feature/buildfeatures/feature_useroutes_disabled.go index ecf9d022bed74..26e2311c6fa17 100644 --- a/feature/buildfeatures/feature_useroutes_disabled.go +++ b/feature/buildfeatures/feature_useroutes_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_useroutes_enabled.go b/feature/buildfeatures/feature_useroutes_enabled.go index c0a59322ecdc1..0dc7089d99165 100644 --- a/feature/buildfeatures/feature_useroutes_enabled.go +++ b/feature/buildfeatures/feature_useroutes_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_wakeonlan_disabled.go b/feature/buildfeatures/feature_wakeonlan_disabled.go index 816ac661f78ce..ca76d0b7f00df 100644 --- a/feature/buildfeatures/feature_wakeonlan_disabled.go +++ b/feature/buildfeatures/feature_wakeonlan_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_wakeonlan_enabled.go b/feature/buildfeatures/feature_wakeonlan_enabled.go index 34b3348a10fef..07bb16fba96fc 100644 --- a/feature/buildfeatures/feature_wakeonlan_enabled.go +++ b/feature/buildfeatures/feature_wakeonlan_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_webclient_disabled.go b/feature/buildfeatures/feature_webclient_disabled.go index a7b24f4ac2dda..9792265c6d559 100644 --- a/feature/buildfeatures/feature_webclient_disabled.go +++ b/feature/buildfeatures/feature_webclient_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/feature_webclient_enabled.go b/feature/buildfeatures/feature_webclient_enabled.go index e40dad33c6ebb..cb558a5fe7831 100644 --- a/feature/buildfeatures/feature_webclient_enabled.go +++ b/feature/buildfeatures/feature_webclient_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen.go; DO NOT EDIT. diff --git a/feature/buildfeatures/gen.go b/feature/buildfeatures/gen.go index e967cb8ff1906..cf8e9d49f1f47 100644 --- a/feature/buildfeatures/gen.go +++ b/feature/buildfeatures/gen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore @@ -17,7 +17,7 @@ import ( "tailscale.com/util/must" ) -const header = `// Copyright (c) Tailscale Inc & AUTHORS +const header = `// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code g|e|n|e|r|a|t|e|d by gen.go; D|O N|OT E|D|I|T. diff --git a/feature/c2n/c2n.go b/feature/c2n/c2n.go index ae942e31d0d95..331a9af955a07 100644 --- a/feature/c2n/c2n.go +++ b/feature/c2n/c2n.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package c2n registers support for C2N (Control-to-Node) communications. diff --git a/feature/capture/capture.go b/feature/capture/capture.go index e5e150de8e761..d7145e7c1ebd7 100644 --- a/feature/capture/capture.go +++ b/feature/capture/capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package capture formats packet logging into a debug pcap stream. diff --git a/feature/capture/dissector/dissector.go b/feature/capture/dissector/dissector.go index ab2f6c2ec1607..dec90e28b11b1 100644 --- a/feature/capture/dissector/dissector.go +++ b/feature/capture/dissector/dissector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dissector contains the Lua dissector for Tailscale packets. diff --git a/feature/clientupdate/clientupdate.go b/feature/clientupdate/clientupdate.go index 45fd21129b4e7..d47d048156046 100644 --- a/feature/clientupdate/clientupdate.go +++ b/feature/clientupdate/clientupdate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package clientupdate enables the client update feature. diff --git a/feature/condlite/expvar/expvar.go b/feature/condlite/expvar/expvar.go index edc16ac771b13..68aaf6e2cddf9 100644 --- a/feature/condlite/expvar/expvar.go +++ b/feature/condlite/expvar/expvar.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics) diff --git a/feature/condlite/expvar/omit.go b/feature/condlite/expvar/omit.go index a21d94deb48eb..b5481695c9947 100644 --- a/feature/condlite/expvar/omit.go +++ b/feature/condlite/expvar/omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index 654483d1d7745..e0d72b7ac293c 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The condregister package registers all conditional features guarded diff --git a/feature/condregister/identityfederation/doc.go b/feature/condregister/identityfederation/doc.go index 503b2c8f127d5..ee811bdec8064 100644 --- a/feature/condregister/identityfederation/doc.go +++ b/feature/condregister/identityfederation/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package identityfederation registers support for authkey resolution diff --git a/feature/condregister/identityfederation/maybe_identityfederation.go b/feature/condregister/identityfederation/maybe_identityfederation.go index b1db42fc3c77a..04c37e36faa47 100644 --- a/feature/condregister/identityfederation/maybe_identityfederation.go +++ b/feature/condregister/identityfederation/maybe_identityfederation.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_identityfederation diff --git a/feature/condregister/maybe_ace.go b/feature/condregister/maybe_ace.go index 07023171144a5..a926f5b0d8810 100644 --- a/feature/condregister/maybe_ace.go +++ b/feature/condregister/maybe_ace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_ace diff --git a/feature/condregister/maybe_appconnectors.go b/feature/condregister/maybe_appconnectors.go index 70112d7810b10..3b872bc1eb90d 100644 --- a/feature/condregister/maybe_appconnectors.go +++ b/feature/condregister/maybe_appconnectors.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_appconnectors diff --git a/feature/condregister/maybe_c2n.go b/feature/condregister/maybe_c2n.go index c222af533a37d..99258956ad8b2 100644 --- a/feature/condregister/maybe_c2n.go +++ b/feature/condregister/maybe_c2n.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_c2n diff --git a/feature/condregister/maybe_capture.go b/feature/condregister/maybe_capture.go index 0c68331f101cd..991843cb58194 100644 --- a/feature/condregister/maybe_capture.go +++ b/feature/condregister/maybe_capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_capture diff --git a/feature/condregister/maybe_clientupdate.go b/feature/condregister/maybe_clientupdate.go index bc694f970c543..df36d8e67b92e 100644 --- a/feature/condregister/maybe_clientupdate.go +++ b/feature/condregister/maybe_clientupdate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_clientupdate diff --git a/feature/condregister/maybe_conn25.go b/feature/condregister/maybe_conn25.go index fb885bfe32fc1..6ce14b2b3f1ce 100644 --- a/feature/condregister/maybe_conn25.go +++ b/feature/condregister/maybe_conn25.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_conn25 diff --git a/feature/condregister/maybe_debugportmapper.go b/feature/condregister/maybe_debugportmapper.go index 4990d09ea5833..443b21e02d331 100644 --- a/feature/condregister/maybe_debugportmapper.go +++ b/feature/condregister/maybe_debugportmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_debugportmapper diff --git a/feature/condregister/maybe_doctor.go b/feature/condregister/maybe_doctor.go index 3dc9ffa539312..41d504c5394df 100644 --- a/feature/condregister/maybe_doctor.go +++ b/feature/condregister/maybe_doctor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_doctor diff --git a/feature/condregister/maybe_drive.go b/feature/condregister/maybe_drive.go index cb447ff289a29..4d979e821852b 100644 --- a/feature/condregister/maybe_drive.go +++ b/feature/condregister/maybe_drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive diff --git a/feature/condregister/maybe_linkspeed.go b/feature/condregister/maybe_linkspeed.go index 46064b39a5935..5e9e9e4004b19 100644 --- a/feature/condregister/maybe_linkspeed.go +++ b/feature/condregister/maybe_linkspeed.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_omit_linkspeed diff --git a/feature/condregister/maybe_linuxdnsfight.go b/feature/condregister/maybe_linuxdnsfight.go index 0dae62b00ab8a..2866fd0d7a891 100644 --- a/feature/condregister/maybe_linuxdnsfight.go +++ b/feature/condregister/maybe_linuxdnsfight.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_omit_linuxdnsfight diff --git a/feature/condregister/maybe_osrouter.go b/feature/condregister/maybe_osrouter.go index 7ab85add22021..771a86e48bfbf 100644 --- a/feature/condregister/maybe_osrouter.go +++ b/feature/condregister/maybe_osrouter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_osrouter diff --git a/feature/condregister/maybe_portlist.go b/feature/condregister/maybe_portlist.go index 1be56f177daf8..8de98d528ae48 100644 --- a/feature/condregister/maybe_portlist.go +++ b/feature/condregister/maybe_portlist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_portlist diff --git a/feature/condregister/maybe_posture.go b/feature/condregister/maybe_posture.go index 6f14c27137127..ca056eb3612b8 100644 --- a/feature/condregister/maybe_posture.go +++ b/feature/condregister/maybe_posture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_posture diff --git a/feature/condregister/maybe_relayserver.go b/feature/condregister/maybe_relayserver.go index 3360dd0627cc1..49404bef8a08b 100644 --- a/feature/condregister/maybe_relayserver.go +++ b/feature/condregister/maybe_relayserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_relayserver diff --git a/feature/condregister/maybe_sdnotify.go b/feature/condregister/maybe_sdnotify.go index 647996f881d8f..ac8e180cd791f 100644 --- a/feature/condregister/maybe_sdnotify.go +++ b/feature/condregister/maybe_sdnotify.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_sdnotify diff --git a/feature/condregister/maybe_store_aws.go b/feature/condregister/maybe_store_aws.go index 8358b49f05843..96de819d1ee39 100644 --- a/feature/condregister/maybe_store_aws.go +++ b/feature/condregister/maybe_store_aws.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws diff --git a/feature/condregister/maybe_store_kube.go b/feature/condregister/maybe_store_kube.go index bb795b05e2450..a71ed00e2e248 100644 --- a/feature/condregister/maybe_store_kube.go +++ b/feature/condregister/maybe_store_kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube diff --git a/feature/condregister/maybe_syspolicy.go b/feature/condregister/maybe_syspolicy.go index 49ec5c02c63e1..66d44ea3804e4 100644 --- a/feature/condregister/maybe_syspolicy.go +++ b/feature/condregister/maybe_syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_syspolicy diff --git a/feature/condregister/maybe_taildrop.go b/feature/condregister/maybe_taildrop.go index 5fd7b5f8c9a00..264ccff02006f 100644 --- a/feature/condregister/maybe_taildrop.go +++ b/feature/condregister/maybe_taildrop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_taildrop diff --git a/feature/condregister/maybe_tap.go b/feature/condregister/maybe_tap.go index eca4fc3ac84af..fc3997b17dca4 100644 --- a/feature/condregister/maybe_tap.go +++ b/feature/condregister/maybe_tap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_tap diff --git a/feature/condregister/maybe_tpm.go b/feature/condregister/maybe_tpm.go index caa57fef11d73..f46a0996f4feb 100644 --- a/feature/condregister/maybe_tpm.go +++ b/feature/condregister/maybe_tpm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_tpm diff --git a/feature/condregister/maybe_wakeonlan.go b/feature/condregister/maybe_wakeonlan.go index 14cae605d1468..6fc32bb22fe26 100644 --- a/feature/condregister/maybe_wakeonlan.go +++ b/feature/condregister/maybe_wakeonlan.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_wakeonlan diff --git a/feature/condregister/oauthkey/doc.go b/feature/condregister/oauthkey/doc.go index 4c4ea5e4e3078..af1c931480672 100644 --- a/feature/condregister/oauthkey/doc.go +++ b/feature/condregister/oauthkey/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package oauthkey registers support for OAuth key resolution diff --git a/feature/condregister/oauthkey/maybe_oauthkey.go b/feature/condregister/oauthkey/maybe_oauthkey.go index be8d04b8ec035..9e912f149a3db 100644 --- a/feature/condregister/oauthkey/maybe_oauthkey.go +++ b/feature/condregister/oauthkey/maybe_oauthkey.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_oauthkey diff --git a/feature/condregister/portmapper/doc.go b/feature/condregister/portmapper/doc.go index 5c30538c43a11..21e45c4a676be 100644 --- a/feature/condregister/portmapper/doc.go +++ b/feature/condregister/portmapper/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package portmapper registers support for portmapper diff --git a/feature/condregister/portmapper/maybe_portmapper.go b/feature/condregister/portmapper/maybe_portmapper.go index c306fd3d5a1f0..e1be2b3ced942 100644 --- a/feature/condregister/portmapper/maybe_portmapper.go +++ b/feature/condregister/portmapper/maybe_portmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_portmapper diff --git a/feature/condregister/useproxy/doc.go b/feature/condregister/useproxy/doc.go index 1e8abb358fa83..d5fde367082e2 100644 --- a/feature/condregister/useproxy/doc.go +++ b/feature/condregister/useproxy/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package useproxy registers support for using proxies diff --git a/feature/condregister/useproxy/useproxy.go b/feature/condregister/useproxy/useproxy.go index bda6e49c0bb95..bca17de88f62e 100644 --- a/feature/condregister/useproxy/useproxy.go +++ b/feature/condregister/useproxy/useproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_useproxy diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index e7baca4bd10b7..2a2b75a2d8b19 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package conn25 registers the conn25 feature and implements its associated ipnext.Extension. diff --git a/feature/debugportmapper/debugportmapper.go b/feature/debugportmapper/debugportmapper.go index 2625086c64dcf..45f3c22084fba 100644 --- a/feature/debugportmapper/debugportmapper.go +++ b/feature/debugportmapper/debugportmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package debugportmapper registers support for debugging Tailscale's diff --git a/feature/doctor/doctor.go b/feature/doctor/doctor.go index 875b57d14c4f0..db061311b2e1f 100644 --- a/feature/doctor/doctor.go +++ b/feature/doctor/doctor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The doctor package registers the "doctor" problem diagnosis support into the diff --git a/feature/drive/drive.go b/feature/drive/drive.go index 3660a2b959643..1cf616a143d6e 100644 --- a/feature/drive/drive.go +++ b/feature/drive/drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package drive registers the Taildrive (file server) feature. diff --git a/feature/feature.go b/feature/feature.go index 48a4aff43b84d..5bd79db45c4ed 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package feature tracks which features are linked into the binary. diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 99df18b5a3c3b..c0a72a38d1fdd 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The featuretags package is a registry of all the ts_omit-able build tags. diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go index 893ab0e6a1c71..b970295779591 100644 --- a/feature/featuretags/featuretags_test.go +++ b/feature/featuretags/featuretags_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package featuretags diff --git a/feature/hooks.go b/feature/hooks.go index 7e31061a7eaac..5cd3c0d818ca6 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package feature diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go index f75b096a603a2..4b96fd6a2020c 100644 --- a/feature/identityfederation/identityfederation.go +++ b/feature/identityfederation/identityfederation.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package identityfederation registers support for using ID tokens to diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go index b050f1a019e38..5e3660dc58725 100644 --- a/feature/identityfederation/identityfederation_test.go +++ b/feature/identityfederation/identityfederation_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package identityfederation diff --git a/feature/linkspeed/doc.go b/feature/linkspeed/doc.go index 2d2fcf0929808..b3adf88ef76e8 100644 --- a/feature/linkspeed/doc.go +++ b/feature/linkspeed/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package linkspeed registers support for setting the TUN link speed on Linux, diff --git a/feature/linkspeed/linkspeed_linux.go b/feature/linkspeed/linkspeed_linux.go index 90e33d4c9fea4..4e36e281ca80c 100644 --- a/feature/linkspeed/linkspeed_linux.go +++ b/feature/linkspeed/linkspeed_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/feature/linuxdnsfight/linuxdnsfight.go b/feature/linuxdnsfight/linuxdnsfight.go index 02d99a3144246..ea37ed7a5b9ef 100644 --- a/feature/linuxdnsfight/linuxdnsfight.go +++ b/feature/linuxdnsfight/linuxdnsfight.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/feature/linuxdnsfight/linuxdnsfight_test.go b/feature/linuxdnsfight/linuxdnsfight_test.go index bd3463666d46b..661ba7f6f3a00 100644 --- a/feature/linuxdnsfight/linuxdnsfight_test.go +++ b/feature/linuxdnsfight/linuxdnsfight_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/feature/oauthkey/oauthkey.go b/feature/oauthkey/oauthkey.go index 336340c85109b..532f6ec73bab9 100644 --- a/feature/oauthkey/oauthkey.go +++ b/feature/oauthkey/oauthkey.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package oauthkey registers support for using OAuth client secrets to diff --git a/feature/oauthkey/oauthkey_test.go b/feature/oauthkey/oauthkey_test.go index b550d8c2ce77a..f8027e45a922e 100644 --- a/feature/oauthkey/oauthkey_test.go +++ b/feature/oauthkey/oauthkey_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package oauthkey diff --git a/feature/portlist/portlist.go b/feature/portlist/portlist.go index 7d69796ffd5d2..b651c64cb6afa 100644 --- a/feature/portlist/portlist.go +++ b/feature/portlist/portlist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package portlist contains code to poll the local system for open ports diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go index d1b903cb69c20..3d2004993a510 100644 --- a/feature/portmapper/portmapper.go +++ b/feature/portmapper/portmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package portmapper registers support for NAT-PMP, PCP, and UPnP port diff --git a/feature/posture/posture.go b/feature/posture/posture.go index 977e7429571a8..d8db1ac1933fb 100644 --- a/feature/posture/posture.go +++ b/feature/posture/posture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package posture registers support for device posture checking, diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index b29a6abed5336..45d6abcc1d3d6 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package relayserver registers the relay server feature and implements its diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 807306c707bc1..730e25a00d0d3 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package relayserver diff --git a/feature/sdnotify.go b/feature/sdnotify.go index 7a786dfabd519..45f280c8116f2 100644 --- a/feature/sdnotify.go +++ b/feature/sdnotify.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package feature diff --git a/feature/sdnotify/sdnotify.go b/feature/sdnotify/sdnotify.go index d13aa63f23c15..d74eafd52d1d7 100644 --- a/feature/sdnotify/sdnotify.go +++ b/feature/sdnotify/sdnotify.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause /* diff --git a/feature/sdnotify/sdnotify_linux.go b/feature/sdnotify/sdnotify_linux.go index 2b13e24bbe509..6a3df879638c9 100644 --- a/feature/sdnotify/sdnotify_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/feature/syspolicy/syspolicy.go b/feature/syspolicy/syspolicy.go index 08c3cf3736b29..dce2eb0b18bbb 100644 --- a/feature/syspolicy/syspolicy.go +++ b/feature/syspolicy/syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package syspolicy provides an interface for system-wide policy management. diff --git a/feature/taildrop/delete.go b/feature/taildrop/delete.go index 8b03a125f445e..dc5036006a2f6 100644 --- a/feature/taildrop/delete.go +++ b/feature/taildrop/delete.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/delete_test.go b/feature/taildrop/delete_test.go index 36950f58288cb..2b740a3fb2d6c 100644 --- a/feature/taildrop/delete_test.go +++ b/feature/taildrop/delete_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/doc.go b/feature/taildrop/doc.go index 8980a217096c0..c394ebe82e18a 100644 --- a/feature/taildrop/doc.go +++ b/feature/taildrop/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package taildrop registers the taildrop (file sending) feature. diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index 6bdb375ccfe63..3a4ed456d2269 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/fileops.go b/feature/taildrop/fileops.go index 14f76067a8094..beac0c375c574 100644 --- a/feature/taildrop/fileops.go +++ b/feature/taildrop/fileops.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/fileops_fs.go b/feature/taildrop/fileops_fs.go index 4fecbe4af6bbb..4a5b3e71a0f55 100644 --- a/feature/taildrop/fileops_fs.go +++ b/feature/taildrop/fileops_fs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android diff --git a/feature/taildrop/integration_test.go b/feature/taildrop/integration_test.go index 75896a95b2b54..ad66aa827faa0 100644 --- a/feature/taildrop/integration_test.go +++ b/feature/taildrop/integration_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop_test diff --git a/feature/taildrop/localapi.go b/feature/taildrop/localapi.go index 8a3904f9f0198..2af057ae8d88c 100644 --- a/feature/taildrop/localapi.go +++ b/feature/taildrop/localapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/paths.go b/feature/taildrop/paths.go index 79dc37d8f0699..76054ef4d58b3 100644 --- a/feature/taildrop/paths.go +++ b/feature/taildrop/paths.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/peerapi.go b/feature/taildrop/peerapi.go index b75ce33b864b4..8b92c8c85b0cd 100644 --- a/feature/taildrop/peerapi.go +++ b/feature/taildrop/peerapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 254d8794e8273..65f881be906b7 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/resume.go b/feature/taildrop/resume.go index 20ef527a6da55..208d61de3767a 100644 --- a/feature/taildrop/resume.go +++ b/feature/taildrop/resume.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/resume_test.go b/feature/taildrop/resume_test.go index 4e59d401dcc53..69dd547a123e0 100644 --- a/feature/taildrop/resume_test.go +++ b/feature/taildrop/resume_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index e767bac324684..dd1b75b174db7 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/send.go b/feature/taildrop/send.go index 32ba5f6f0d644..668166d4409cf 100644 --- a/feature/taildrop/send.go +++ b/feature/taildrop/send.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/send_test.go b/feature/taildrop/send_test.go index 9ffa5fccc0a36..c1def2afdd106 100644 --- a/feature/taildrop/send_test.go +++ b/feature/taildrop/send_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/taildrop.go b/feature/taildrop/taildrop.go index 6c3deaed1b538..7042ca97aa7ef 100644 --- a/feature/taildrop/taildrop.go +++ b/feature/taildrop/taildrop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package taildrop contains the implementation of the Taildrop diff --git a/feature/taildrop/taildrop_test.go b/feature/taildrop/taildrop_test.go index 0d77273f0aab0..5c48412e044ba 100644 --- a/feature/taildrop/taildrop_test.go +++ b/feature/taildrop/taildrop_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/taildrop/target_test.go b/feature/taildrop/target_test.go index 57c96a77a4802..7948c488293bf 100644 --- a/feature/taildrop/target_test.go +++ b/feature/taildrop/target_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package taildrop diff --git a/feature/tap/tap_linux.go b/feature/tap/tap_linux.go index 53dcabc364d6b..e66f74ba4d1e4 100644 --- a/feature/tap/tap_linux.go +++ b/feature/tap/tap_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tap registers Tailscale's experimental (demo) Linux TAP (Layer 2) support. diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go index 197a8d6b8798a..8955d5b98f605 100644 --- a/feature/tpm/attestation.go +++ b/feature/tpm/attestation.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go index e7ff729871230..06518b63a9059 100644 --- a/feature/tpm/attestation_test.go +++ b/feature/tpm/attestation_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 8df269b95bc2e..e257aa7bc2174 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tpm implements support for TPM 2.0 devices. diff --git a/feature/tpm/tpm_linux.go b/feature/tpm/tpm_linux.go index 3f05c9a8c38ad..e7c214c0be6a5 100644 --- a/feature/tpm/tpm_linux.go +++ b/feature/tpm/tpm_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm diff --git a/feature/tpm/tpm_other.go b/feature/tpm/tpm_other.go index 108b2c057e4bd..c34d8edb24617 100644 --- a/feature/tpm/tpm_other.go +++ b/feature/tpm/tpm_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !windows diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index afce570fc250d..02fb13f58c908 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm diff --git a/feature/tpm/tpm_windows.go b/feature/tpm/tpm_windows.go index 429d20cb879f7..168c7dca135a4 100644 --- a/feature/tpm/tpm_windows.go +++ b/feature/tpm/tpm_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tpm diff --git a/feature/useproxy/useproxy.go b/feature/useproxy/useproxy.go index a18e60577af85..96be23710caa1 100644 --- a/feature/useproxy/useproxy.go +++ b/feature/useproxy/useproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package useproxy registers support for using system proxies. diff --git a/feature/wakeonlan/wakeonlan.go b/feature/wakeonlan/wakeonlan.go index 96c424084dcc6..5a567ad44237d 100644 --- a/feature/wakeonlan/wakeonlan.go +++ b/feature/wakeonlan/wakeonlan.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wakeonlan registers the Wake-on-LAN feature. diff --git a/gokrazy/build.go b/gokrazy/build.go index c1ee1cbeb1974..ea54cc829d1f1 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This program builds the Tailscale Appliance Gokrazy image. diff --git a/gokrazy/tidy-deps.go b/gokrazy/tidy-deps.go index 104156e473642..8f99f333302b2 100644 --- a/gokrazy/tidy-deps.go +++ b/gokrazy/tidy-deps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build for_go_mod_tidy diff --git a/gomod_test.go b/gomod_test.go index f984b5d6f27a5..a9a3c437d9341 100644 --- a/gomod_test.go +++ b/gomod_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot diff --git a/header.txt b/header.txt index 8111cb74e25c7..5a058566ba4b8 100644 --- a/header.txt +++ b/header.txt @@ -1,2 +1,2 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause \ No newline at end of file diff --git a/health/args.go b/health/args.go index 01a75aa2d79f3..e89f7676f0b64 100644 --- a/health/args.go +++ b/health/args.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health diff --git a/health/health.go b/health/health.go index f0f6a6ffbb162..0cfe570c4296a 100644 --- a/health/health.go +++ b/health/health.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package health is a registry for other packages to report & check diff --git a/health/health_test.go b/health/health_test.go index af7d06c8fe258..953c4dca26ea3 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health diff --git a/health/healthmsg/healthmsg.go b/health/healthmsg/healthmsg.go index 5ea1c736d8851..3de885d53a61a 100644 --- a/health/healthmsg/healthmsg.go +++ b/health/healthmsg/healthmsg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package healthmsg contains some constants for health messages. diff --git a/health/state.go b/health/state.go index e6d937b6a8f02..91e30b75e796d 100644 --- a/health/state.go +++ b/health/state.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health diff --git a/health/usermetrics.go b/health/usermetrics.go index 110c57b57971c..b1af0c5852010 100644 --- a/health/usermetrics.go +++ b/health/usermetrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_health && !ts_omit_usermetrics diff --git a/health/usermetrics_omit.go b/health/usermetrics_omit.go index 9d5e35b861681..8a37d8ad8a311 100644 --- a/health/usermetrics_omit.go +++ b/health/usermetrics_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_health || ts_omit_usermetrics diff --git a/health/warnings.go b/health/warnings.go index a9c4b34a0f849..fc9099af2ecc7 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package health diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index 3e8f2f994791e..f91f52ec0c3d8 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package hostinfo answers questions about the host environment that Tailscale is diff --git a/hostinfo/hostinfo_container_linux_test.go b/hostinfo/hostinfo_container_linux_test.go index 594a5f5120a6a..0c14776e712b7 100644 --- a/hostinfo/hostinfo_container_linux_test.go +++ b/hostinfo/hostinfo_container_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && ts_package_container diff --git a/hostinfo/hostinfo_darwin.go b/hostinfo/hostinfo_darwin.go index 0b1774e7712d7..bce99d7003406 100644 --- a/hostinfo/hostinfo_darwin.go +++ b/hostinfo/hostinfo_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/hostinfo/hostinfo_freebsd.go b/hostinfo/hostinfo_freebsd.go index 3661b13229ac5..3a214ed2463cb 100644 --- a/hostinfo/hostinfo_freebsd.go +++ b/hostinfo/hostinfo_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build freebsd diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go index 66484a3588027..bb9a5c58c1bb0 100644 --- a/hostinfo/hostinfo_linux.go +++ b/hostinfo/hostinfo_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/hostinfo/hostinfo_linux_test.go b/hostinfo/hostinfo_linux_test.go index 0286fadf329ab..ebf285e94baeb 100644 --- a/hostinfo/hostinfo_linux_test.go +++ b/hostinfo/hostinfo_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_package_container diff --git a/hostinfo/hostinfo_plan9.go b/hostinfo/hostinfo_plan9.go index f9aa30e51769f..27a2543b38ff0 100644 --- a/hostinfo/hostinfo_plan9.go +++ b/hostinfo/hostinfo_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hostinfo diff --git a/hostinfo/hostinfo_test.go b/hostinfo/hostinfo_test.go index 15b6971b6ccd0..6508d5d995e3b 100644 --- a/hostinfo/hostinfo_test.go +++ b/hostinfo/hostinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hostinfo diff --git a/hostinfo/hostinfo_uname.go b/hostinfo/hostinfo_uname.go index 32b733a03bcb3..b358c0e2cb108 100644 --- a/hostinfo/hostinfo_uname.go +++ b/hostinfo/hostinfo_uname.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || darwin diff --git a/hostinfo/hostinfo_windows.go b/hostinfo/hostinfo_windows.go index f0422f5a001c5..5e0b340919e34 100644 --- a/hostinfo/hostinfo_windows.go +++ b/hostinfo/hostinfo_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hostinfo diff --git a/hostinfo/packagetype_container.go b/hostinfo/packagetype_container.go index 9bd14493cb34f..8db7df8616e7a 100644 --- a/hostinfo/packagetype_container.go +++ b/hostinfo/packagetype_container.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && ts_package_container diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go index 3bb64b270a017..8c60c1c3c59a5 100644 --- a/internal/client/tailscale/identityfederation.go +++ b/internal/client/tailscale/identityfederation.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/internal/client/tailscale/oauthkeys.go b/internal/client/tailscale/oauthkeys.go index 21102ce0b5fc8..43d5b0744fefe 100644 --- a/internal/client/tailscale/oauthkeys.go +++ b/internal/client/tailscale/oauthkeys.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index 0e603bf792562..1e6b576fb0cc1 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailscale provides a minimal control plane API client for internal diff --git a/internal/client/tailscale/vip_service.go b/internal/client/tailscale/vip_service.go index 48c59ce4569da..5c35a0c299621 100644 --- a/internal/client/tailscale/vip_service.go +++ b/internal/client/tailscale/vip_service.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscale diff --git a/internal/tooldeps/tooldeps.go b/internal/tooldeps/tooldeps.go index 22940c54d8c33..4ea1cf5084a55 100644 --- a/internal/tooldeps/tooldeps.go +++ b/internal/tooldeps/tooldeps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build for_go_mod_tidy_only diff --git a/ipn/auditlog/auditlog.go b/ipn/auditlog/auditlog.go index 0460bc4e2c655..cc6b43cbdba08 100644 --- a/ipn/auditlog/auditlog.go +++ b/ipn/auditlog/auditlog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package auditlog provides a mechanism for logging audit events. diff --git a/ipn/auditlog/auditlog_test.go b/ipn/auditlog/auditlog_test.go index 041cab3546bd0..5e499cc674fcb 100644 --- a/ipn/auditlog/auditlog_test.go +++ b/ipn/auditlog/auditlog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package auditlog diff --git a/ipn/auditlog/extension.go b/ipn/auditlog/extension.go index ae2a296b2c420..293d3742c97f9 100644 --- a/ipn/auditlog/extension.go +++ b/ipn/auditlog/extension.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package auditlog diff --git a/ipn/auditlog/store.go b/ipn/auditlog/store.go index 3b58ffa9318a2..07c9717726147 100644 --- a/ipn/auditlog/store.go +++ b/ipn/auditlog/store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package auditlog diff --git a/ipn/backend.go b/ipn/backend.go index b4ba958c5dd1e..3183c8b5e7a4e 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/backend_test.go b/ipn/backend_test.go index d72b966152ca3..bfb6d6bc41540 100644 --- a/ipn/backend_test.go +++ b/ipn/backend_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/conf.go b/ipn/conf.go index 2c9fb2fd15f9e..ef753a0b48544 100644 --- a/ipn/conf.go +++ b/ipn/conf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/conffile/cloudconf.go b/ipn/conffile/cloudconf.go index 4475a2d7b799e..ac9f640c34275 100644 --- a/ipn/conffile/cloudconf.go +++ b/ipn/conffile/cloudconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package conffile diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index 3a2aeffb3a0c6..c221a3d8c8d94 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package conffile contains code to load, manipulate, and access config file diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go index 1e967f1bdcca2..4f4613dafac4f 100644 --- a/ipn/conffile/conffile_hujson.go +++ b/ipn/conffile/conffile_hujson.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !ts_omit_hujsonconf diff --git a/ipn/conffile/serveconf.go b/ipn/conffile/serveconf.go index bb63c1ac5571a..0d336029246f4 100644 --- a/ipn/conffile/serveconf.go +++ b/ipn/conffile/serveconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/ipn/desktop/doc.go b/ipn/desktop/doc.go index 64a332792a5a4..3aa60619d913a 100644 --- a/ipn/desktop/doc.go +++ b/ipn/desktop/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package desktop facilitates interaction with the desktop environment diff --git a/ipn/desktop/extension.go b/ipn/desktop/extension.go index 0277726714512..3e96a9c4c7929 100644 --- a/ipn/desktop/extension.go +++ b/ipn/desktop/extension.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Both the desktop session manager and multi-user support diff --git a/ipn/desktop/mksyscall.go b/ipn/desktop/mksyscall.go index b7af12366b64e..dd4e2acbf554d 100644 --- a/ipn/desktop/mksyscall.go +++ b/ipn/desktop/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/session.go b/ipn/desktop/session.go index c95378914321d..b5656d99bd4f2 100644 --- a/ipn/desktop/session.go +++ b/ipn/desktop/session.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/sessions.go b/ipn/desktop/sessions.go index 8bf7a75e2dc3a..e7026c1434f44 100644 --- a/ipn/desktop/sessions.go +++ b/ipn/desktop/sessions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/desktop/sessions_notwindows.go b/ipn/desktop/sessions_notwindows.go index da3230a456480..396d57eff39b2 100644 --- a/ipn/desktop/sessions_notwindows.go +++ b/ipn/desktop/sessions_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/ipn/desktop/sessions_windows.go b/ipn/desktop/sessions_windows.go index 83b884228c1f8..6128548a51216 100644 --- a/ipn/desktop/sessions_windows.go +++ b/ipn/desktop/sessions_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package desktop diff --git a/ipn/doc.go b/ipn/doc.go index c98c7e8b3599f..b39310cf51b4b 100644 --- a/ipn/doc.go +++ b/ipn/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:generate go run tailscale.com/cmd/viewer -type=LoginProfile,Prefs,ServeConfig,ServiceConfig,TCPPortHandler,HTTPHandler,WebServerConfig diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 4bf78b40b022b..94aebefdfd73d 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/ipn/ipn_test.go b/ipn/ipn_test.go index cba70bccd658a..2689faa8e37cd 100644 --- a/ipn/ipn_test.go +++ b/ipn/ipn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 4157ec76e61a8..90560cec0e195 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/ipn/ipnauth/access.go b/ipn/ipnauth/access.go index 74c66392221b2..3d320585e9637 100644 --- a/ipn/ipnauth/access.go +++ b/ipn/ipnauth/access.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 108bdd341ae6a..0fa4735f9fe69 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/actor_windows.go b/ipn/ipnauth/actor_windows.go index 90d3bdd362bbf..0345bc5fdb2fe 100644 --- a/ipn/ipnauth/actor_windows.go +++ b/ipn/ipnauth/actor_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index 497f30f8c198e..d48ec1f140c58 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnauth controls access to the LocalAPI. diff --git a/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go index defe7d89c409b..fce0143e43c38 100644 --- a/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go +++ b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && ts_omit_unixsocketidentity diff --git a/ipn/ipnauth/ipnauth_unix_creds.go b/ipn/ipnauth/ipnauth_unix_creds.go index 89a9ceaa99388..d031d3b6e09c2 100644 --- a/ipn/ipnauth/ipnauth_unix_creds.go +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !ts_omit_unixsocketidentity diff --git a/ipn/ipnauth/ipnauth_windows.go b/ipn/ipnauth/ipnauth_windows.go index e3ea448a855e5..b4591ea3a03f8 100644 --- a/ipn/ipnauth/ipnauth_windows.go +++ b/ipn/ipnauth/ipnauth_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index eeee324352387..692005e5516d0 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/self.go b/ipn/ipnauth/self.go index adee0696458d6..0379aede4076c 100644 --- a/ipn/ipnauth/self.go +++ b/ipn/ipnauth/self.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnauth/test_actor.go b/ipn/ipnauth/test_actor.go index 80c5fcc8a6328..667fb84137ca6 100644 --- a/ipn/ipnauth/test_actor.go +++ b/ipn/ipnauth/test_actor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnauth diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index fc93cc8760a0b..275e28c85bddc 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnext defines types and interfaces used for extending the core LocalBackend diff --git a/ipn/ipnlocal/breaktcp_darwin.go b/ipn/ipnlocal/breaktcp_darwin.go index 13566198ce9fc..732c375f7f5af 100644 --- a/ipn/ipnlocal/breaktcp_darwin.go +++ b/ipn/ipnlocal/breaktcp_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/breaktcp_linux.go b/ipn/ipnlocal/breaktcp_linux.go index b82f6521246f0..0ba9ed6d78f19 100644 --- a/ipn/ipnlocal/breaktcp_linux.go +++ b/ipn/ipnlocal/breaktcp_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/bus.go b/ipn/ipnlocal/bus.go index 910e4e774c958..6061f7223988d 100644 --- a/ipn/ipnlocal/bus.go +++ b/ipn/ipnlocal/bus.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/bus_test.go b/ipn/ipnlocal/bus_test.go index 5c75ac54d688d..27ffebcdd570e 100644 --- a/ipn/ipnlocal/bus_test.go +++ b/ipn/ipnlocal/bus_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index b5e722b97c4a4..ccce2a65d99e6 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/c2n_pprof.go b/ipn/ipnlocal/c2n_pprof.go index 13237cc4fad2f..783ba16ff93c5 100644 --- a/ipn/ipnlocal/c2n_pprof.go +++ b/ipn/ipnlocal/c2n_pprof.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !wasm && !ts_omit_debug diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index 86cc6a5490865..810d6765b45e2 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/captiveportal.go b/ipn/ipnlocal/captiveportal.go index 14f8b799eb6dd..ae310c7cc785a 100644 --- a/ipn/ipnlocal/captiveportal.go +++ b/ipn/ipnlocal/captiveportal.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_captiveportal diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index b389c93e7e971..764634d30c276 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !ts_omit_acme diff --git a/ipn/ipnlocal/cert_disabled.go b/ipn/ipnlocal/cert_disabled.go index 17d446c11af39..0caab6bc32b27 100644 --- a/ipn/ipnlocal/cert_disabled.go +++ b/ipn/ipnlocal/cert_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js || ts_omit_acme diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index e2398f670b5ad..ec7be570c78f7 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !js diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index e23d8a057546f..52cc533ff29f6 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index 456cd45441ba9..485114eae9d27 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive diff --git a/ipn/ipnlocal/drive_test.go b/ipn/ipnlocal/drive_test.go index 323c3821499ed..aca05432b8276 100644 --- a/ipn/ipnlocal/drive_test.go +++ b/ipn/ipnlocal/drive_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive diff --git a/ipn/ipnlocal/drive_tomove.go b/ipn/ipnlocal/drive_tomove.go index 290fe097022fd..ccea48f7a5106 100644 --- a/ipn/ipnlocal/drive_tomove.go +++ b/ipn/ipnlocal/drive_tomove.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This is the Taildrive stuff that should ideally be registered in init only when diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index 8ea63d21a4fb0..461dce2fda055 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/expiry_test.go b/ipn/ipnlocal/expiry_test.go index 2c646ca724efd..1d85d81d1819d 100644 --- a/ipn/ipnlocal/expiry_test.go +++ b/ipn/ipnlocal/expiry_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index ca802ab89f747..125a2329447a3 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index f5c081a5bdb3e..3bd302aeab93d 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go index 2c93cad4c97ff..07c09dc7fe043 100644 --- a/ipn/ipnlocal/hwattest.go +++ b/ipn/ipnlocal/hwattest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tpm diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bbd2aa2e0e425..0fc26cd041cb6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnlocal is the heart of the Tailscale node agent that controls diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 23a3161ca47f1..53607cfaaa737 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index d831aa8b075dc..733c7381b2016 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/netstack.go b/ipn/ipnlocal/netstack.go index f7ffd03058879..b331d93e329de 100644 --- a/ipn/ipnlocal/netstack.go +++ b/ipn/ipnlocal/netstack.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netstack diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 246b26409b2b5..242fec0287c65 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index e5df38bdb6d76..8aa0a877b8dd3 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index efef57ea492e7..a252f20fe2074 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index f6698bd4bc920..f1f38dae6aee1 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 318d9bf6bb72f..aa4c1ef527c6c 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/peerapi_drive.go b/ipn/ipnlocal/peerapi_drive.go index 8dffacd9a2513..d42843577059b 100644 --- a/ipn/ipnlocal/peerapi_drive.go +++ b/ipn/ipnlocal/peerapi_drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive diff --git a/ipn/ipnlocal/peerapi_macios_ext.go b/ipn/ipnlocal/peerapi_macios_ext.go index f23b877bd663c..70c1fb850e249 100644 --- a/ipn/ipnlocal/peerapi_macios_ext.go +++ b/ipn/ipnlocal/peerapi_macios_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_macext && (darwin || ios) diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 3c9f57f1fcf6a..63abf089c2abc 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/prefs_metrics.go b/ipn/ipnlocal/prefs_metrics.go index 34c5f5504fac4..7e7a2a5e3a1d0 100644 --- a/ipn/ipnlocal/prefs_metrics.go +++ b/ipn/ipnlocal/prefs_metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 7080e3c3edd50..430fa63152a77 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/profiles_notwindows.go b/ipn/ipnlocal/profiles_notwindows.go index 0ca8f439cf9f4..389dedc9e7015 100644 --- a/ipn/ipnlocal/profiles_notwindows.go +++ b/ipn/ipnlocal/profiles_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 6be7f0e53f59e..ec92673e50b29 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/profiles_windows.go b/ipn/ipnlocal/profiles_windows.go index c4beb22f9d42f..a0b5bbfdd49fb 100644 --- a/ipn/ipnlocal/profiles_windows.go +++ b/ipn/ipnlocal/profiles_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index a857147e1adab..d25251accd797 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/ipn/ipnlocal/serve_disabled.go b/ipn/ipnlocal/serve_disabled.go index a97112941d844..e9d2678a80d8b 100644 --- a/ipn/ipnlocal/serve_disabled.go +++ b/ipn/ipnlocal/serve_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_serve diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index 0892545cceec8..b3f48b105c8f7 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/ipn/ipnlocal/serve_unix_test.go b/ipn/ipnlocal/serve_unix_test.go index e57aafab212ae..2d1f0a1e34af8 100644 --- a/ipn/ipnlocal/serve_unix_test.go +++ b/ipn/ipnlocal/serve_unix_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index e2c2f50671386..52b3066584e08 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go index 6b2e36015c2d7..9a997c9143f7b 100644 --- a/ipn/ipnlocal/ssh_stub.go +++ b/ipn/ipnlocal/ssh_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_ssh || ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) diff --git a/ipn/ipnlocal/ssh_test.go b/ipn/ipnlocal/ssh_test.go index b24cd6732f605..bb293d10ac4d6 100644 --- a/ipn/ipnlocal/ssh_test.go +++ b/ipn/ipnlocal/ssh_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || (darwin && !ios) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 27d53fe01b599..97c2c4d8f9daf 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnlocal diff --git a/ipn/ipnlocal/tailnetlock_disabled.go b/ipn/ipnlocal/tailnetlock_disabled.go index 85cf4bd3f4ea5..0668437b163c6 100644 --- a/ipn/ipnlocal/tailnetlock_disabled.go +++ b/ipn/ipnlocal/tailnetlock_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_tailnetlock diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index a3c9387e46fce..37dba93d0a49b 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !ts_omit_webclient diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 787867b4f450e..02798b4f709e9 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || android || ts_omit_webclient diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 628e3c37cfc0b..c9a4c6e891f86 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnserver/proxyconnect.go b/ipn/ipnserver/proxyconnect.go index 7d41273bdc52a..c8348a76c2b6a 100644 --- a/ipn/ipnserver/proxyconnect.go +++ b/ipn/ipnserver/proxyconnect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js diff --git a/ipn/ipnserver/proxyconnect_js.go b/ipn/ipnserver/proxyconnect_js.go index 368221e2269c8..b4a6aef3a43bd 100644 --- a/ipn/ipnserver/proxyconnect_js.go +++ b/ipn/ipnserver/proxyconnect_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index d473252e134a8..1f8abf0e20128 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnserver runs the LocalAPI HTTP server that communicates diff --git a/ipn/ipnserver/server_fortest.go b/ipn/ipnserver/server_fortest.go index 9aab3b276d31f..70148f030e6b0 100644 --- a/ipn/ipnserver/server_fortest.go +++ b/ipn/ipnserver/server_fortest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 713db9e50085e..9aa9c4c015f23 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver_test diff --git a/ipn/ipnserver/waiterset_test.go b/ipn/ipnserver/waiterset_test.go index b7d5ea144c408..b8a143212c1a3 100644 --- a/ipn/ipnserver/waiterset_test.go +++ b/ipn/ipnserver/waiterset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipnserver diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 213090b559692..4d219d131d528 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipnstate captures the entire state of the Tailscale network. diff --git a/ipn/ipnstate/ipnstate_clone.go b/ipn/ipnstate/ipnstate_clone.go index 20ae43c5fb73e..9af066832b27f 100644 --- a/ipn/ipnstate/ipnstate_clone.go +++ b/ipn/ipnstate/ipnstate_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index 7a1c276a7b229..b622d098f4f55 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/client.go b/ipn/lapitest/client.go index 6d22e938b210e..c2c07dfbaf689 100644 --- a/ipn/lapitest/client.go +++ b/ipn/lapitest/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/example_test.go b/ipn/lapitest/example_test.go index 57479199a8123..648c97880cdb8 100644 --- a/ipn/lapitest/example_test.go +++ b/ipn/lapitest/example_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/opts.go b/ipn/lapitest/opts.go index 6eb1594da2607..5ed2f97573b90 100644 --- a/ipn/lapitest/opts.go +++ b/ipn/lapitest/opts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lapitest diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go index 457a338ab9f5a..8fd3c8cdd361f 100644 --- a/ipn/lapitest/server.go +++ b/ipn/lapitest/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lapitest provides utilities for black-box testing of LocalAPI ([ipnserver]). diff --git a/ipn/localapi/cert.go b/ipn/localapi/cert.go index 2313631cc3229..cd8afa03bf599 100644 --- a/ipn/localapi/cert.go +++ b/ipn/localapi/cert.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !js && !ts_omit_acme diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index ae9cb01e02fe9..fe936db6ab78d 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_debug diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 3edbc0856c8a3..52987ee0a18e6 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_debug diff --git a/ipn/localapi/disabled_stubs.go b/ipn/localapi/disabled_stubs.go index c744f34d5f5c5..0d16de880cf41 100644 --- a/ipn/localapi/disabled_stubs.go +++ b/ipn/localapi/disabled_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || android || js diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 4648b2c49e849..248c0377ec968 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package localapi contains the HTTP server handlers for tailscaled's API server. diff --git a/ipn/localapi/localapi_drive.go b/ipn/localapi/localapi_drive.go index eb765ec2eabba..e1dee441e0fde 100644 --- a/ipn/localapi/localapi_drive.go +++ b/ipn/localapi/localapi_drive.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_drive diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 5d228ffd69343..47e33457188ab 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package localapi diff --git a/ipn/localapi/pprof.go b/ipn/localapi/pprof.go index 9476f721fb1ce..fabdb18e24d87 100644 --- a/ipn/localapi/pprof.go +++ b/ipn/localapi/pprof.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !js && !ts_omit_debug diff --git a/ipn/localapi/serve.go b/ipn/localapi/serve.go index efbbde06ff954..1f677f7ab3a05 100644 --- a/ipn/localapi/serve.go +++ b/ipn/localapi/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_serve diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go index edb82e042f2ce..9962f342bd884 100644 --- a/ipn/localapi/syspolicy_api.go +++ b/ipn/localapi/syspolicy_api.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_syspolicy diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go index e5f999bb8847e..445f705056cf7 100644 --- a/ipn/localapi/tailnetlock.go +++ b/ipn/localapi/tailnetlock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/ipn/policy/policy.go b/ipn/policy/policy.go index 494a0dc408819..bbc78a254e141 100644 --- a/ipn/policy/policy.go +++ b/ipn/policy/policy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package policy contains various policy decisions that need to be diff --git a/ipn/prefs.go b/ipn/prefs.go index 9f98465d2d883..72e0cf8b78424 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index aa152843a5af9..347a91e50739c 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/serve.go b/ipn/serve.go index 240308f290edc..911b408b65026 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/serve_expand_test.go b/ipn/serve_expand_test.go index b977238fe32ff..33f808d62ec05 100644 --- a/ipn/serve_expand_test.go +++ b/ipn/serve_expand_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/serve_test.go b/ipn/serve_test.go index 5e0f4a43a38e7..8be39a1ed81ce 100644 --- a/ipn/serve_test.go +++ b/ipn/serve_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/store.go b/ipn/store.go index 2034ae09a92f9..1bd3e5a3b4e6b 100644 --- a/ipn/store.go +++ b/ipn/store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 78b72d0bc8f45..e06e00eb3d3dd 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_aws diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index 3cc23e48d4b12..ba2274bf1c09e 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_aws diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 5fbd795c2174d..f7d1b90cd1e2c 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubestore contains an ipn.StateStore implementation using Kubernetes Secrets. diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index aea39d3bb51f8..1e6f711d686e2 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubestore diff --git a/ipn/store/mem/store_mem.go b/ipn/store/mem/store_mem.go index 6f474ce993b43..247714c9a2b47 100644 --- a/ipn/store/mem/store_mem.go +++ b/ipn/store/mem/store_mem.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mem provides an in-memory ipn.StateStore implementation. diff --git a/ipn/store/stores.go b/ipn/store/stores.go index bf175da41d8aa..fd51f8c38540d 100644 --- a/ipn/store/stores.go +++ b/ipn/store/stores.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package store provides various implementation of ipn.StateStore. diff --git a/ipn/store/stores_test.go b/ipn/store/stores_test.go index 1f0fc0fef1bff..345b1c10376d9 100644 --- a/ipn/store/stores_test.go +++ b/ipn/store/stores_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package store diff --git a/ipn/store_test.go b/ipn/store_test.go index 4dd7321b9048d..fc42fdbec3610 100644 --- a/ipn/store_test.go +++ b/ipn/store_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipn diff --git a/jsondb/db.go b/jsondb/db.go index 68bb05af45e8e..c45ab4cd39913 100644 --- a/jsondb/db.go +++ b/jsondb/db.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsondb provides a trivial "database": a Go object saved to diff --git a/jsondb/db_test.go b/jsondb/db_test.go index 655754f38e1a9..18797ebd174e6 100644 --- a/jsondb/db_test.go +++ b/jsondb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsondb diff --git a/k8s-operator/api-docs-config.yaml b/k8s-operator/api-docs-config.yaml index 214171ca35c0d..0bfb32be92f27 100644 --- a/k8s-operator/api-docs-config.yaml +++ b/k8s-operator/api-docs-config.yaml @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause processor: {} diff --git a/k8s-operator/api-proxy/doc.go b/k8s-operator/api-proxy/doc.go index 89d8909595fd3..a685b9907d710 100644 --- a/k8s-operator/api-proxy/doc.go +++ b/k8s-operator/api-proxy/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index f5f1da80f1a05..c4c651b1fb029 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go index e35be33a0e734..1426f170c5207 100644 --- a/k8s-operator/api-proxy/proxy_events_test.go +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/api-proxy/proxy_test.go b/k8s-operator/api-proxy/proxy_test.go index 14e6554236234..5d1606d764e45 100644 --- a/k8s-operator/api-proxy/proxy_test.go +++ b/k8s-operator/api-proxy/proxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/doc.go b/k8s-operator/apis/doc.go index 0a1145ca8a0dc..3fea3d6e80e12 100644 --- a/k8s-operator/apis/doc.go +++ b/k8s-operator/apis/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/doc.go b/k8s-operator/apis/v1alpha1/doc.go index 467e73e17cb21..0fc5bb8ece622 100644 --- a/k8s-operator/apis/v1alpha1/doc.go +++ b/k8s-operator/apis/v1alpha1/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index 993a119fad2eb..ebdd2bae1f3ea 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/types_connector.go b/k8s-operator/apis/v1alpha1/types_connector.go index ebedea18f0e98..af2df58af2fd9 100644 --- a/k8s-operator/apis/v1alpha1/types_connector.go +++ b/k8s-operator/apis/v1alpha1/types_connector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index 670df3b95097e..3c2fe76868ae3 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/types_proxygroup.go b/k8s-operator/apis/v1alpha1/types_proxygroup.go index 8cbcc2d196e51..00c196628ab86 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygroup.go +++ b/k8s-operator/apis/v1alpha1/types_proxygroup.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index d5a22e82c2dbc..6cc5e3dd572c9 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/types_tailnet.go b/k8s-operator/apis/v1alpha1/types_tailnet.go index a3a17374be5cd..b11b2cf17658c 100644 --- a/k8s-operator/apis/v1alpha1/types_tailnet.go +++ b/k8s-operator/apis/v1alpha1/types_tailnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 7991003b82dff..c1a2e7906fcd8 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 4743a5156c16b..1081c162c81bc 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ //go:build !ignore_autogenerated && !plan9 -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by controller-gen. DO NOT EDIT. diff --git a/k8s-operator/conditions.go b/k8s-operator/conditions.go index bce6e39bdb142..89b83dd5f83cc 100644 --- a/k8s-operator/conditions.go +++ b/k8s-operator/conditions.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/conditions_test.go b/k8s-operator/conditions_test.go index 7eb65257d3414..940a300d88ba8 100644 --- a/k8s-operator/conditions_test.go +++ b/k8s-operator/conditions_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/reconciler/reconciler.go b/k8s-operator/reconciler/reconciler.go index 2751790964577..fcad7201e31e4 100644 --- a/k8s-operator/reconciler/reconciler.go +++ b/k8s-operator/reconciler/reconciler.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/reconciler/reconciler_test.go b/k8s-operator/reconciler/reconciler_test.go index 573cd4d9db8da..2db77e7aad419 100644 --- a/k8s-operator/reconciler/reconciler_test.go +++ b/k8s-operator/reconciler/reconciler_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/reconciler/tailnet/mocks_test.go b/k8s-operator/reconciler/tailnet/mocks_test.go index 7f3f2ddb91085..4342556885013 100644 --- a/k8s-operator/reconciler/tailnet/mocks_test.go +++ b/k8s-operator/reconciler/tailnet/mocks_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/reconciler/tailnet/tailnet.go b/k8s-operator/reconciler/tailnet/tailnet.go index fe445a36323be..2e7004b698c93 100644 --- a/k8s-operator/reconciler/tailnet/tailnet.go +++ b/k8s-operator/reconciler/tailnet/tailnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/reconciler/tailnet/tailnet_test.go b/k8s-operator/reconciler/tailnet/tailnet_test.go index 471752b86080d..0ed2ca598d720 100644 --- a/k8s-operator/reconciler/tailnet/tailnet_test.go +++ b/k8s-operator/reconciler/tailnet/tailnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/fakes/fakes.go b/k8s-operator/sessionrecording/fakes/fakes.go index 94853df195f7c..26f57e4eb4b69 100644 --- a/k8s-operator/sessionrecording/fakes/fakes.go +++ b/k8s-operator/sessionrecording/fakes/fakes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 7345a407c8faa..cdbeeddb43ac8 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index fb45820a71b86..ac243c2e8bc82 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/spdy/conn.go b/k8s-operator/sessionrecording/spdy/conn.go index 9fefca11fc2b8..682003055acb8 100644 --- a/k8s-operator/sessionrecording/spdy/conn.go +++ b/k8s-operator/sessionrecording/spdy/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/spdy/conn_test.go b/k8s-operator/sessionrecording/spdy/conn_test.go index 3c1cb8427d822..232fa8e2c2227 100644 --- a/k8s-operator/sessionrecording/spdy/conn_test.go +++ b/k8s-operator/sessionrecording/spdy/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/spdy/frame.go b/k8s-operator/sessionrecording/spdy/frame.go index 54b29d33a9622..7087db3c32166 100644 --- a/k8s-operator/sessionrecording/spdy/frame.go +++ b/k8s-operator/sessionrecording/spdy/frame.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/spdy/frame_test.go b/k8s-operator/sessionrecording/spdy/frame_test.go index 4896cdcbf78a5..1b7e54f4cc1fb 100644 --- a/k8s-operator/sessionrecording/spdy/frame_test.go +++ b/k8s-operator/sessionrecording/spdy/frame_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/spdy/zlib-reader.go b/k8s-operator/sessionrecording/spdy/zlib-reader.go index 1eb654be35632..2b63f7e2b592b 100644 --- a/k8s-operator/sessionrecording/spdy/zlib-reader.go +++ b/k8s-operator/sessionrecording/spdy/zlib-reader.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go index a5bdf7ddddeeb..40a96d6d29ac7 100644 --- a/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go +++ b/k8s-operator/sessionrecording/tsrecorder/tsrecorder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index a618f85fb7822..4762630ca7522 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/ws/conn_test.go b/k8s-operator/sessionrecording/ws/conn_test.go index 87205c4e6f610..0b4353698cd9f 100644 --- a/k8s-operator/sessionrecording/ws/conn_test.go +++ b/k8s-operator/sessionrecording/ws/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/ws/message.go b/k8s-operator/sessionrecording/ws/message.go index 35667ae21a5d0..36359996a7c12 100644 --- a/k8s-operator/sessionrecording/ws/message.go +++ b/k8s-operator/sessionrecording/ws/message.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/sessionrecording/ws/message_test.go b/k8s-operator/sessionrecording/ws/message_test.go index f634f86dc55c2..07d55ce4dcbd5 100644 --- a/k8s-operator/sessionrecording/ws/message_test.go +++ b/k8s-operator/sessionrecording/ws/message_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 2acbf338dbdd3..043a9d7b54c7a 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/kube/certs/certs.go b/kube/certs/certs.go index 8e2e5fb43a8ac..dd8fd7d799ac6 100644 --- a/kube/certs/certs.go +++ b/kube/certs/certs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package certs implements logic to help multiple Kubernetes replicas share TLS diff --git a/kube/certs/certs_test.go b/kube/certs/certs_test.go index 8434f21ae6976..91196f5760f72 100644 --- a/kube/certs/certs_test.go +++ b/kube/certs/certs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package certs diff --git a/kube/egressservices/egressservices.go b/kube/egressservices/egressservices.go index 56c874f31dbb1..3828760afccf4 100644 --- a/kube/egressservices/egressservices.go +++ b/kube/egressservices/egressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package egressservices contains shared types for exposing tailnet services to diff --git a/kube/egressservices/egressservices_test.go b/kube/egressservices/egressservices_test.go index 806ad91be61cd..27d818cab970a 100644 --- a/kube/egressservices/egressservices_test.go +++ b/kube/egressservices/egressservices_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package egressservices diff --git a/kube/health/healthz.go b/kube/health/healthz.go index c8cfcc7ec01b4..53888922bb940 100644 --- a/kube/health/healthz.go +++ b/kube/health/healthz.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/kube/ingressservices/ingressservices.go b/kube/ingressservices/ingressservices.go index f79410761af02..440582666a0a1 100644 --- a/kube/ingressservices/ingressservices.go +++ b/kube/ingressservices/ingressservices.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ingressservices contains shared types for exposing Kubernetes Services to tailnet. diff --git a/kube/k8s-proxy/conf/conf.go b/kube/k8s-proxy/conf/conf.go index 5294952438896..62ef67feecb16 100644 --- a/kube/k8s-proxy/conf/conf.go +++ b/kube/k8s-proxy/conf/conf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go index 3082be1ba9dcd..4034bf3cb7752 100644 --- a/kube/k8s-proxy/conf/conf_test.go +++ b/kube/k8s-proxy/conf/conf_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/kube/kubeapi/api.go b/kube/kubeapi/api.go index e62bd6e2b2eb1..c3ed1a3b7e917 100644 --- a/kube/kubeapi/api.go +++ b/kube/kubeapi/api.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubeapi contains Kubernetes API types for internal consumption. diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index 0ed960f4ddcd4..5f5ab138eb65e 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubeclient provides a client to interact with Kubernetes. diff --git a/kube/kubeclient/client_test.go b/kube/kubeclient/client_test.go index 8599e7e3c19e2..9778c7e6fa1ad 100644 --- a/kube/kubeclient/client_test.go +++ b/kube/kubeclient/client_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubeclient diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index 15ebb5f443f2a..7fb102764a939 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubeclient diff --git a/kube/kubetypes/grants.go b/kube/kubetypes/grants.go index 50d7d760ff5a7..8f17a28546d94 100644 --- a/kube/kubetypes/grants.go +++ b/kube/kubetypes/grants.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package kubetypes contains types and constants related to the Tailscale diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index b8b94a4b21a5d..187f54f3481f8 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubetypes diff --git a/kube/kubetypes/types_test.go b/kube/kubetypes/types_test.go index ea1846b3253e8..86b3962ef1b01 100644 --- a/kube/kubetypes/types_test.go +++ b/kube/kubetypes/types_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package kubetypes diff --git a/kube/localclient/fake-client.go b/kube/localclient/fake-client.go index 7f0a08316634e..1bce4bef00d6f 100644 --- a/kube/localclient/fake-client.go +++ b/kube/localclient/fake-client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package localclient diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go index 550b3ae742c34..8cc0d41ffe473 100644 --- a/kube/localclient/local-client.go +++ b/kube/localclient/local-client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package localclient provides an interface for all the local.Client methods diff --git a/kube/metrics/metrics.go b/kube/metrics/metrics.go index 0db683008f91e..062f18b8b95b5 100644 --- a/kube/metrics/metrics.go +++ b/kube/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/kube/services/services.go b/kube/services/services.go index a9e50975ca9f1..36566c2855a9f 100644 --- a/kube/services/services.go +++ b/kube/services/services.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package services manages graceful shutdown of Tailscale Services advertised diff --git a/kube/state/state.go b/kube/state/state.go index 2605f0952f708..ebedb2f725b3d 100644 --- a/kube/state/state.go +++ b/kube/state/state.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/kube/state/state_test.go b/kube/state/state_test.go index 8701aa1b7fa65..9b2ce69be5599 100644 --- a/kube/state/state_test.go +++ b/kube/state/state_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/license_test.go b/license_test.go index 9b62c48ed218e..cac195c49c5a4 100644 --- a/license_test.go +++ b/license_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot @@ -23,7 +23,7 @@ func normalizeLineEndings(b []byte) []byte { // directory tree have a correct-looking Tailscale license header. func TestLicenseHeaders(t *testing.T) { want := normalizeLineEndings([]byte(strings.TrimLeft(` -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause `, "\n"))) diff --git a/licenses/licenses.go b/licenses/licenses.go index 5e59edb9f7b75..a4bf51befe773 100644 --- a/licenses/licenses.go +++ b/licenses/licenses.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package licenses provides utilities for working with open source licenses. diff --git a/log/filelogger/log.go b/log/filelogger/log.go index 599e5237b3e22..268cf1bba7583 100644 --- a/log/filelogger/log.go +++ b/log/filelogger/log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package filelogger provides localdisk log writing & rotation, primarily for Windows diff --git a/log/filelogger/log_test.go b/log/filelogger/log_test.go index dfa489637f720..32c3d0e90bf1b 100644 --- a/log/filelogger/log_test.go +++ b/log/filelogger/log_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filelogger diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 8ddfabb866745..30d16fbcc8c6c 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sockstatlog provides a logger for capturing network socket stats for debugging. diff --git a/log/sockstatlog/logger_test.go b/log/sockstatlog/logger_test.go index e5c2feb2986d8..66228731e368e 100644 --- a/log/sockstatlog/logger_test.go +++ b/log/sockstatlog/logger_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sockstatlog diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index f7491783ad781..7a0027dad74ea 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logpolicy manages the creation or reuse of logtail loggers, diff --git a/logpolicy/logpolicy_test.go b/logpolicy/logpolicy_test.go index c09e590bb8399..64e54467c496b 100644 --- a/logpolicy/logpolicy_test.go +++ b/logpolicy/logpolicy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logpolicy diff --git a/logpolicy/maybe_syspolicy.go b/logpolicy/maybe_syspolicy.go index 8b2836c97411c..7cdaabcc7c77a 100644 --- a/logpolicy/maybe_syspolicy.go +++ b/logpolicy/maybe_syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_syspolicy diff --git a/logtail/buffer.go b/logtail/buffer.go index 6efdbda63ac8e..bc39783ea768a 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_logtail diff --git a/logtail/config.go b/logtail/config.go index bf47dd8aa7b52..c504047a3f2bf 100644 --- a/logtail/config.go +++ b/logtail/config.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logtail diff --git a/logtail/example/logadopt/logadopt.go b/logtail/example/logadopt/logadopt.go index eba3f93112d62..f9104231644b4 100644 --- a/logtail/example/logadopt/logadopt.go +++ b/logtail/example/logadopt/logadopt.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Command logadopt is a CLI tool to adopt a machine into a logtail collection. diff --git a/logtail/example/logreprocess/demo.sh b/logtail/example/logreprocess/demo.sh index 583929c12b4fe..89ff476c248cb 100755 --- a/logtail/example/logreprocess/demo.sh +++ b/logtail/example/logreprocess/demo.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # diff --git a/logtail/example/logreprocess/logreprocess.go b/logtail/example/logreprocess/logreprocess.go index aae65df9f1321..d434da1b187db 100644 --- a/logtail/example/logreprocess/logreprocess.go +++ b/logtail/example/logreprocess/logreprocess.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The logreprocess program tails a log and reprocesses it. diff --git a/logtail/example/logtail/logtail.go b/logtail/example/logtail/logtail.go index 0c9e442584410..24f98090f57c8 100644 --- a/logtail/example/logtail/logtail.go +++ b/logtail/example/logtail/logtail.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The logtail program logs stdin. diff --git a/logtail/filch/filch.go b/logtail/filch/filch.go index 88c72f233daab..32b0b88b15990 100644 --- a/logtail/filch/filch.go +++ b/logtail/filch/filch.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_logtail diff --git a/logtail/filch/filch_omit.go b/logtail/filch/filch_omit.go index 898978e2152ea..c4edc1bcd392f 100644 --- a/logtail/filch/filch_omit.go +++ b/logtail/filch/filch_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_logtail diff --git a/logtail/filch/filch_stub.go b/logtail/filch/filch_stub.go index f2aeeb9b9f819..0bb2c306c05dc 100644 --- a/logtail/filch/filch_stub.go +++ b/logtail/filch/filch_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_logtail && (wasm || plan9 || tamago) diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index 1e33471809dbb..0975a2d11f8a3 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filch diff --git a/logtail/filch/filch_unix.go b/logtail/filch/filch_unix.go index 27f1d02ee86aa..0817e131190bb 100644 --- a/logtail/filch/filch_unix.go +++ b/logtail/filch/filch_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_logtail && !windows && !wasm && !plan9 && !tamago diff --git a/logtail/filch/filch_windows.go b/logtail/filch/filch_windows.go index b08b64db39f61..3bffe8662396d 100644 --- a/logtail/filch/filch_windows.go +++ b/logtail/filch/filch_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_logtail && windows diff --git a/logtail/logtail.go b/logtail/logtail.go index ce50c1c0a7f52..ef296568da957 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_logtail diff --git a/logtail/logtail_omit.go b/logtail/logtail_omit.go index 814fd3be90d8e..21f18c980cce4 100644 --- a/logtail/logtail_omit.go +++ b/logtail/logtail_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_logtail diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index b618fc0d7bc65..67250ae0db03f 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logtail diff --git a/maths/ewma.go b/maths/ewma.go index 0897b73e4727f..1946081cf6d08 100644 --- a/maths/ewma.go +++ b/maths/ewma.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package maths contains additional mathematical functions or structures not diff --git a/maths/ewma_test.go b/maths/ewma_test.go index 307078a38ebdf..9fddf34e17193 100644 --- a/maths/ewma_test.go +++ b/maths/ewma_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package maths diff --git a/metrics/fds_linux.go b/metrics/fds_linux.go index 34740c2bb1c74..b0abf946b7ef6 100644 --- a/metrics/fds_linux.go +++ b/metrics/fds_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/metrics/fds_notlinux.go b/metrics/fds_notlinux.go index 2dae97cad86b9..1877830134ec4 100644 --- a/metrics/fds_notlinux.go +++ b/metrics/fds_notlinux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/metrics/metrics.go b/metrics/metrics.go index 092b56c41b6dc..6540dcc13d66d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package metrics contains expvar & Prometheus types and code used by diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go index a808d5a73eb3e..8478cdedc8de8 100644 --- a/metrics/metrics_test.go +++ b/metrics/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/metrics/multilabelmap.go b/metrics/multilabelmap.go index 223a55a75bf1b..54d41bbae9ef2 100644 --- a/metrics/multilabelmap.go +++ b/metrics/multilabelmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/metrics/multilabelmap_test.go b/metrics/multilabelmap_test.go index 195696234e545..70554c63e50a0 100644 --- a/metrics/multilabelmap_test.go +++ b/metrics/multilabelmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/net/ace/ace.go b/net/ace/ace.go index 47e780313cadd..9b1264dc0927b 100644 --- a/net/ace/ace.go +++ b/net/ace/ace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ace implements a Dialer that dials via a Tailscale ACE (CONNECT) diff --git a/net/art/art_test.go b/net/art/art_test.go index daf8553ca020d..004f31b8ba3b5 100644 --- a/net/art/art_test.go +++ b/net/art/art_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art diff --git a/net/art/stride_table.go b/net/art/stride_table.go index 5050df24500ce..6cdb0b5a0a1e9 100644 --- a/net/art/stride_table.go +++ b/net/art/stride_table.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art diff --git a/net/art/stride_table_test.go b/net/art/stride_table_test.go index 4ccef1fe083cb..e797f40ee0ddc 100644 --- a/net/art/stride_table_test.go +++ b/net/art/stride_table_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art diff --git a/net/art/table.go b/net/art/table.go index fa397577868a8..447a56b394182 100644 --- a/net/art/table.go +++ b/net/art/table.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package art provides a routing table that implements the Allotment Routing diff --git a/net/art/table_test.go b/net/art/table_test.go index a129c8484ddcd..5c35ac7dafd77 100644 --- a/net/art/table_test.go +++ b/net/art/table_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package art diff --git a/net/bakedroots/bakedroots.go b/net/bakedroots/bakedroots.go index b268b1546caac..70d947c6b725d 100644 --- a/net/bakedroots/bakedroots.go +++ b/net/bakedroots/bakedroots.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package bakedroots contains WebPKI CA roots we bake into the tailscaled binary, diff --git a/net/bakedroots/bakedroots_test.go b/net/bakedroots/bakedroots_test.go index 8ba502a7827e0..12a656d4edae9 100644 --- a/net/bakedroots/bakedroots_test.go +++ b/net/bakedroots/bakedroots_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package bakedroots diff --git a/net/batching/conn.go b/net/batching/conn.go index 77cdf8c849ca4..1631c33cfe448 100644 --- a/net/batching/conn.go +++ b/net/batching/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package batching implements a socket optimized for increased throughput. diff --git a/net/batching/conn_default.go b/net/batching/conn_default.go index 37d644f50624c..0d208578b6d06 100644 --- a/net/batching/conn_default.go +++ b/net/batching/conn_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index bd7ac25be2a4d..373625b772738 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package batching diff --git a/net/batching/conn_linux_test.go b/net/batching/conn_linux_test.go index c2cc463ebc6ad..a15de4f671ec6 100644 --- a/net/batching/conn_linux_test.go +++ b/net/batching/conn_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package batching diff --git a/net/captivedetection/captivedetection.go b/net/captivedetection/captivedetection.go index 3ec820b794400..dfd4bbd875608 100644 --- a/net/captivedetection/captivedetection.go +++ b/net/captivedetection/captivedetection.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package captivedetection provides a way to detect if the system is connected to a network that has diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 0778e07df393a..2aa660d88d0a4 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package captivedetection diff --git a/net/captivedetection/endpoints.go b/net/captivedetection/endpoints.go index 57b3e53351a1a..5c1d31d0c35a4 100644 --- a/net/captivedetection/endpoints.go +++ b/net/captivedetection/endpoints.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package captivedetection diff --git a/net/captivedetection/rawconn.go b/net/captivedetection/rawconn.go index a7197d9df2577..3c6f65f84692e 100644 --- a/net/captivedetection/rawconn.go +++ b/net/captivedetection/rawconn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(ios || darwin) diff --git a/net/captivedetection/rawconn_apple.go b/net/captivedetection/rawconn_apple.go index 12b4446e62eb8..ee8e7c4c3f6b9 100644 --- a/net/captivedetection/rawconn_apple.go +++ b/net/captivedetection/rawconn_apple.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || darwin diff --git a/net/connectproxy/connectproxy.go b/net/connectproxy/connectproxy.go index 4bf6875029554..a63c6acf7b7c8 100644 --- a/net/connectproxy/connectproxy.go +++ b/net/connectproxy/connectproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package connectproxy contains some CONNECT proxy code. diff --git a/net/dns/config.go b/net/dns/config.go index 2425b304dffd8..2b5505fc9734c 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:generate go run tailscale.com/cmd/viewer --type=Config --clonefunc diff --git a/net/dns/dbus.go b/net/dns/dbus.go index c53e8b7205949..f80136196376a 100644 --- a/net/dns/dbus.go +++ b/net/dns/dbus.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_omit_dbus diff --git a/net/dns/debian_resolvconf.go b/net/dns/debian_resolvconf.go index 63fd80c1274e8..128b26f2aceca 100644 --- a/net/dns/debian_resolvconf.go +++ b/net/dns/debian_resolvconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || freebsd || openbsd diff --git a/net/dns/direct.go b/net/dns/direct.go index 78495d4737d1d..ec2e42e75176f 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android && !ios diff --git a/net/dns/direct_linux_test.go b/net/dns/direct_linux_test.go index 035763a45f30d..8199b41f3b973 100644 --- a/net/dns/direct_linux_test.go +++ b/net/dns/direct_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/dns/direct_test.go b/net/dns/direct_test.go index 07202502e231e..c96323571c1f9 100644 --- a/net/dns/direct_test.go +++ b/net/dns/direct_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/direct_unix_test.go b/net/dns/direct_unix_test.go index bffa6ade943c8..068c5633645a5 100644 --- a/net/dns/direct_unix_test.go +++ b/net/dns/direct_unix_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go index 807bfce23df8b..de08be8a27b8e 100644 --- a/net/dns/dns_clone.go +++ b/net/dns/dns_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go index c7ce376cba8db..b10861cca8821 100644 --- a/net/dns/dns_view.go +++ b/net/dns/dns_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/net/dns/flush_default.go b/net/dns/flush_default.go index eb6d9da417104..006373a513a76 100644 --- a/net/dns/flush_default.go +++ b/net/dns/flush_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/dns/flush_windows.go b/net/dns/flush_windows.go index d7c7b7fce515d..a3b1f6b3116af 100644 --- a/net/dns/flush_windows.go +++ b/net/dns/flush_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/ini.go b/net/dns/ini.go index 1e47d606e970f..623362bf56fe2 100644 --- a/net/dns/ini.go +++ b/net/dns/ini.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/net/dns/ini_test.go b/net/dns/ini_test.go index 3afe7009caa27..0e5b966a8674f 100644 --- a/net/dns/ini_test.go +++ b/net/dns/ini_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/net/dns/manager.go b/net/dns/manager.go index 4441c4f69ef70..0b7ae465f59eb 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_darwin.go b/net/dns/manager_darwin.go index 01c920626e466..bb590aa4e7c14 100644 --- a/net/dns/manager_darwin.go +++ b/net/dns/manager_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_default.go b/net/dns/manager_default.go index 42e7d295d713f..b514a741799a4 100644 --- a/net/dns/manager_default.go +++ b/net/dns/manager_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (!linux || android) && !freebsd && !openbsd && !windows && !darwin && !illumos && !solaris && !plan9 diff --git a/net/dns/manager_freebsd.go b/net/dns/manager_freebsd.go index da3a821ce3cc4..deea462ed049d 100644 --- a/net/dns/manager_freebsd.go +++ b/net/dns/manager_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 4fbf6a8dbffa2..e68b2e7f9e266 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/dns/manager_linux_test.go b/net/dns/manager_linux_test.go index 605344c062de9..d48fe23e70a8b 100644 --- a/net/dns/manager_linux_test.go +++ b/net/dns/manager_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_openbsd.go b/net/dns/manager_openbsd.go index 766c82f981218..fe4641bbd1ee1 100644 --- a/net/dns/manager_openbsd.go +++ b/net/dns/manager_openbsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_plan9.go b/net/dns/manager_plan9.go index 47c996dad7cda..d7619f414d45f 100644 --- a/net/dns/manager_plan9.go +++ b/net/dns/manager_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO: man 6 ndb | grep -e 'suffix.*same line' diff --git a/net/dns/manager_plan9_test.go b/net/dns/manager_plan9_test.go index 806fdb68ed6ba..cc09b360e0c09 100644 --- a/net/dns/manager_plan9_test.go +++ b/net/dns/manager_plan9_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build plan9 diff --git a/net/dns/manager_solaris.go b/net/dns/manager_solaris.go index dcd8b1fd3951c..3324e2b07af23 100644 --- a/net/dns/manager_solaris.go +++ b/net/dns/manager_solaris.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index 420efe40405df..bdd5cc7bb314b 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 18c88df9125c3..679f81cd5d8a2 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 1eccb9a16ff1d..118dd18dde14b 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/manager_windows_test.go b/net/dns/manager_windows_test.go index 5525096b35c55..d1c65ed2bffd6 100644 --- a/net/dns/manager_windows_test.go +++ b/net/dns/manager_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/nm.go b/net/dns/nm.go index a88d29b374ebb..99f032431ce57 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_omit_networkmanager diff --git a/net/dns/noop.go b/net/dns/noop.go index 9466b57a0f477..70dd93ed22220 100644 --- a/net/dns/noop.go +++ b/net/dns/noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/nrpt_windows.go b/net/dns/nrpt_windows.go index 261ca337558ef..1e1462e9ef908 100644 --- a/net/dns/nrpt_windows.go +++ b/net/dns/nrpt_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/openresolv.go b/net/dns/openresolv.go index c9562b6a91d13..c3aaf3a6948c8 100644 --- a/net/dns/openresolv.go +++ b/net/dns/openresolv.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || freebsd || openbsd diff --git a/net/dns/osconfig.go b/net/dns/osconfig.go index af4c0f01fc75b..f871335ade38c 100644 --- a/net/dns/osconfig.go +++ b/net/dns/osconfig.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/osconfig_test.go b/net/dns/osconfig_test.go index c19db299f4b54..93b13c57d482b 100644 --- a/net/dns/osconfig_test.go +++ b/net/dns/osconfig_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index b8a7f88091617..e3148a5ae8a98 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package publicdns contains mapping and helpers for working with diff --git a/net/dns/publicdns/publicdns_test.go b/net/dns/publicdns/publicdns_test.go index 6efeb2c6f96c8..4f494930b21e9 100644 --- a/net/dns/publicdns/publicdns_test.go +++ b/net/dns/publicdns/publicdns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package publicdns diff --git a/net/dns/resolvconf-workaround.sh b/net/dns/resolvconf-workaround.sh index aec6708a06da1..e0bb250207952 100644 --- a/net/dns/resolvconf-workaround.sh +++ b/net/dns/resolvconf-workaround.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # # This script is a workaround for a vpn-unfriendly behavior of the diff --git a/net/dns/resolvconf.go b/net/dns/resolvconf.go index ca584ffcc5f1f..990a25f2909ac 100644 --- a/net/dns/resolvconf.go +++ b/net/dns/resolvconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd diff --git a/net/dns/resolvconffile/resolvconffile.go b/net/dns/resolvconffile/resolvconffile.go index 753000f6d33da..7a3b90474b5d0 100644 --- a/net/dns/resolvconffile/resolvconffile.go +++ b/net/dns/resolvconffile/resolvconffile.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package resolvconffile parses & serializes /etc/resolv.conf-style files. diff --git a/net/dns/resolvconffile/resolvconffile_test.go b/net/dns/resolvconffile/resolvconffile_test.go index 4f5ddd599899a..21d9e493d56df 100644 --- a/net/dns/resolvconffile/resolvconffile_test.go +++ b/net/dns/resolvconffile/resolvconffile_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolvconffile diff --git a/net/dns/resolvconfpath_default.go b/net/dns/resolvconfpath_default.go index 57e82c4c773ea..00caf30b24a64 100644 --- a/net/dns/resolvconfpath_default.go +++ b/net/dns/resolvconfpath_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !gokrazy diff --git a/net/dns/resolvconfpath_gokrazy.go b/net/dns/resolvconfpath_gokrazy.go index f0759b0e31a0f..ec382ae82e966 100644 --- a/net/dns/resolvconfpath_gokrazy.go +++ b/net/dns/resolvconfpath_gokrazy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build gokrazy diff --git a/net/dns/resolvd.go b/net/dns/resolvd.go index ad1a99c111997..56fedfef94daf 100644 --- a/net/dns/resolvd.go +++ b/net/dns/resolvd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build openbsd diff --git a/net/dns/resolved.go b/net/dns/resolved.go index d8f63c9d66006..754570fdc1779 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android && !ts_omit_resolved diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index a41462e185e24..8d700ce54a143 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/doh_test.go b/net/dns/resolver/doh_test.go index a9c28476166fc..b7045c3211e6b 100644 --- a/net/dns/resolver/doh_test.go +++ b/net/dns/resolver/doh_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 797c5272ad651..0a3daa3bc46ca 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 0b38008c8a9c2..3165bb9783faa 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/macios_ext.go b/net/dns/resolver/macios_ext.go index e3f979c194d91..c9b6626523d84 100644 --- a/net/dns/resolver/macios_ext.go +++ b/net/dns/resolver/macios_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_macext && (darwin || ios) diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 3185cbe2b35ff..a6f05c4702550 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package resolver implements a stub DNS resolver that can also serve diff --git a/net/dns/resolver/tsdns_server_test.go b/net/dns/resolver/tsdns_server_test.go index 82fd3bebf232c..9e18918b9d6e4 100644 --- a/net/dns/resolver/tsdns_server_test.go +++ b/net/dns/resolver/tsdns_server_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index f0dbb48b33f6e..5597c2cf2d921 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package resolver diff --git a/net/dns/utf.go b/net/dns/utf.go index 0c1db69acb33b..b18cdebb4eb56 100644 --- a/net/dns/utf.go +++ b/net/dns/utf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/utf_test.go b/net/dns/utf_test.go index b5fd372622519..7ae5a6854d34c 100644 --- a/net/dns/utf_test.go +++ b/net/dns/utf_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dns/wsl_windows.go b/net/dns/wsl_windows.go index 81e8593160c02..c2400746b8a2d 100644 --- a/net/dns/wsl_windows.go +++ b/net/dns/wsl_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dns diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index e222b983f0287..8300917248773 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnscache contains a minimal DNS cache that makes a bunch of diff --git a/net/dnscache/dnscache_test.go b/net/dnscache/dnscache_test.go index 58bb6cd7f594c..9306c62cc90ee 100644 --- a/net/dnscache/dnscache_test.go +++ b/net/dnscache/dnscache_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnscache diff --git a/net/dnscache/messagecache.go b/net/dnscache/messagecache.go index 040706b9c7746..9bdedf19e6308 100644 --- a/net/dnscache/messagecache.go +++ b/net/dnscache/messagecache.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnscache diff --git a/net/dnscache/messagecache_test.go b/net/dnscache/messagecache_test.go index 0bedfa5ad78e7..79fa49360a281 100644 --- a/net/dnscache/messagecache_test.go +++ b/net/dnscache/messagecache_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnscache diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 74b625970302b..5467127762ecd 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnsfallback contains a DNS fallback mechanism diff --git a/net/dnsfallback/dnsfallback_test.go b/net/dnsfallback/dnsfallback_test.go index 7f881057450e7..4e816c3405e3a 100644 --- a/net/dnsfallback/dnsfallback_test.go +++ b/net/dnsfallback/dnsfallback_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnsfallback diff --git a/net/dnsfallback/update-dns-fallbacks.go b/net/dnsfallback/update-dns-fallbacks.go index 384e77e104cdc..173b464582257 100644 --- a/net/dnsfallback/update-dns-fallbacks.go +++ b/net/dnsfallback/update-dns-fallbacks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/net/flowtrack/flowtrack.go b/net/flowtrack/flowtrack.go index 8b3d799f7bbf4..5df34a5095219 100644 --- a/net/flowtrack/flowtrack.go +++ b/net/flowtrack/flowtrack.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // // Original implementation (from same author) from which this was derived was: diff --git a/net/flowtrack/flowtrack_test.go b/net/flowtrack/flowtrack_test.go index 1a13f7753a547..21e2021e1216f 100644 --- a/net/flowtrack/flowtrack_test.go +++ b/net/flowtrack/flowtrack_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package flowtrack diff --git a/net/ipset/ipset.go b/net/ipset/ipset.go index 27c1e27ed4180..92cec9d0be854 100644 --- a/net/ipset/ipset.go +++ b/net/ipset/ipset.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipset provides code for creating efficient IP-in-set lookup functions diff --git a/net/ipset/ipset_test.go b/net/ipset/ipset_test.go index 2df4939cb99ad..291416f380ef6 100644 --- a/net/ipset/ipset_test.go +++ b/net/ipset/ipset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipset diff --git a/net/ktimeout/ktimeout.go b/net/ktimeout/ktimeout.go index 7cd4391435ed0..abe049b06380a 100644 --- a/net/ktimeout/ktimeout.go +++ b/net/ktimeout/ktimeout.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ktimeout configures kernel TCP stack timeouts via the provided diff --git a/net/ktimeout/ktimeout_default.go b/net/ktimeout/ktimeout_default.go index f1b11661b1335..2304245b3fe21 100644 --- a/net/ktimeout/ktimeout_default.go +++ b/net/ktimeout/ktimeout_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/net/ktimeout/ktimeout_linux.go b/net/ktimeout/ktimeout_linux.go index 84286b647bcba..634c32119b569 100644 --- a/net/ktimeout/ktimeout_linux.go +++ b/net/ktimeout/ktimeout_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ktimeout diff --git a/net/ktimeout/ktimeout_linux_test.go b/net/ktimeout/ktimeout_linux_test.go index 0330923a96c13..dc3dbe12b9363 100644 --- a/net/ktimeout/ktimeout_linux_test.go +++ b/net/ktimeout/ktimeout_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ktimeout diff --git a/net/ktimeout/ktimeout_test.go b/net/ktimeout/ktimeout_test.go index b534f046caddb..f361d35cbbbe5 100644 --- a/net/ktimeout/ktimeout_test.go +++ b/net/ktimeout/ktimeout_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ktimeout diff --git a/net/memnet/conn.go b/net/memnet/conn.go index a9e1fd39901a0..8cab63403bc5e 100644 --- a/net/memnet/conn.go +++ b/net/memnet/conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/conn_test.go b/net/memnet/conn_test.go index 743ce5248cb9d..340c4c6ee00eb 100644 --- a/net/memnet/conn_test.go +++ b/net/memnet/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/listener.go b/net/memnet/listener.go index dded97995bbc1..5d751b12e7f1a 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/listener_test.go b/net/memnet/listener_test.go index b6ceb3dfa94cf..6c767ed57be7a 100644 --- a/net/memnet/listener_test.go +++ b/net/memnet/listener_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index db9e3872f6f26..25b1062a19cec 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package memnet implements an in-memory network implementation. diff --git a/net/memnet/memnet_test.go b/net/memnet/memnet_test.go index 38086cec05f3c..d5a53ba81cb9f 100644 --- a/net/memnet/memnet_test.go +++ b/net/memnet/memnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/pipe.go b/net/memnet/pipe.go index 47163508353a6..8caca57df2f81 100644 --- a/net/memnet/pipe.go +++ b/net/memnet/pipe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/memnet/pipe_test.go b/net/memnet/pipe_test.go index a86d65388e27d..ebd9dd8c2323f 100644 --- a/net/memnet/pipe_test.go +++ b/net/memnet/pipe_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package memnet diff --git a/net/netaddr/netaddr.go b/net/netaddr/netaddr.go index a04acd57aa670..7057a8eec58e8 100644 --- a/net/netaddr/netaddr.go +++ b/net/netaddr/netaddr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netaddr is a transitional package while we finish migrating from inet.af/netaddr diff --git a/net/netcheck/captiveportal.go b/net/netcheck/captiveportal.go index ad11f19a05b6b..310e98ce73a79 100644 --- a/net/netcheck/captiveportal.go +++ b/net/netcheck/captiveportal.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_captiveportal diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index c5a3d2392007e..ebcdc4eaca4e3 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netcheck checks the network conditions from the current host. diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 6830e7f27075c..ab7f58febcb3b 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netcheck diff --git a/net/netcheck/standalone.go b/net/netcheck/standalone.go index b4523a832d463..88d5b4cc5a7f1 100644 --- a/net/netcheck/standalone.go +++ b/net/netcheck/standalone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netcheck diff --git a/net/neterror/neterror.go b/net/neterror/neterror.go index e2387440d33d5..43b96999841a1 100644 --- a/net/neterror/neterror.go +++ b/net/neterror/neterror.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package neterror classifies network errors. diff --git a/net/neterror/neterror_linux.go b/net/neterror/neterror_linux.go index 857367fe8ebb5..9add4fd1d213c 100644 --- a/net/neterror/neterror_linux.go +++ b/net/neterror/neterror_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package neterror diff --git a/net/neterror/neterror_linux_test.go b/net/neterror/neterror_linux_test.go index 5b99060741351..d8846219afad2 100644 --- a/net/neterror/neterror_linux_test.go +++ b/net/neterror/neterror_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package neterror diff --git a/net/neterror/neterror_windows.go b/net/neterror/neterror_windows.go index bf112f5ed7ab7..4b0b2c8024c31 100644 --- a/net/neterror/neterror_windows.go +++ b/net/neterror/neterror_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package neterror diff --git a/net/netkernelconf/netkernelconf.go b/net/netkernelconf/netkernelconf.go index 3ea502b377fdf..7840074c9fbd2 100644 --- a/net/netkernelconf/netkernelconf.go +++ b/net/netkernelconf/netkernelconf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netkernelconf contains code for checking kernel netdev config. diff --git a/net/netkernelconf/netkernelconf_default.go b/net/netkernelconf/netkernelconf_default.go index 3e160e5edf5b0..8e3f2061d999c 100644 --- a/net/netkernelconf/netkernelconf_default.go +++ b/net/netkernelconf/netkernelconf_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || android diff --git a/net/netkernelconf/netkernelconf_linux.go b/net/netkernelconf/netkernelconf_linux.go index 2a4f0a049f56d..b8c165ac558cf 100644 --- a/net/netkernelconf/netkernelconf_linux.go +++ b/net/netkernelconf/netkernelconf_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/netknob/netknob.go b/net/netknob/netknob.go index 53171f4243f8d..a870af824a3ed 100644 --- a/net/netknob/netknob.go +++ b/net/netknob/netknob.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netknob has Tailscale network knobs. diff --git a/net/netmon/defaultroute_bsd.go b/net/netmon/defaultroute_bsd.go index 9195ae0730ebc..88f2c8ea54be1 100644 --- a/net/netmon/defaultroute_bsd.go +++ b/net/netmon/defaultroute_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Common code for FreeBSD. This might also work on other diff --git a/net/netmon/defaultroute_darwin.go b/net/netmon/defaultroute_darwin.go index 57f7e22b7ddce..121535937bc22 100644 --- a/net/netmon/defaultroute_darwin.go +++ b/net/netmon/defaultroute_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || ios diff --git a/net/netmon/interfaces.go b/net/netmon/interfaces.go index 4cf93973c6473..c7a2cb213e893 100644 --- a/net/netmon/interfaces.go +++ b/net/netmon/interfaces.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_android.go b/net/netmon/interfaces_android.go index 26104e879a393..2cd7f23f6f164 100644 --- a/net/netmon/interfaces_android.go +++ b/net/netmon/interfaces_android.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_bsd.go b/net/netmon/interfaces_bsd.go index 86bc5615d2321..d53e2cfc18f99 100644 --- a/net/netmon/interfaces_bsd.go +++ b/net/netmon/interfaces_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Common code for FreeBSD and Darwin. This might also work on other diff --git a/net/netmon/interfaces_darwin.go b/net/netmon/interfaces_darwin.go index 126040350bdb2..c0f588fd20c1b 100644 --- a/net/netmon/interfaces_darwin.go +++ b/net/netmon/interfaces_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_darwin_test.go b/net/netmon/interfaces_darwin_test.go index c3d40a6f0e34e..e4b84a144a432 100644 --- a/net/netmon/interfaces_darwin_test.go +++ b/net/netmon/interfaces_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_default_route_test.go b/net/netmon/interfaces_default_route_test.go index e231eea9ac794..76424aef7af2f 100644 --- a/net/netmon/interfaces_default_route_test.go +++ b/net/netmon/interfaces_default_route_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || (darwin && !ts_macext) diff --git a/net/netmon/interfaces_defaultrouteif_todo.go b/net/netmon/interfaces_defaultrouteif_todo.go index df0820fa94eb4..e428f16a1f946 100644 --- a/net/netmon/interfaces_defaultrouteif_todo.go +++ b/net/netmon/interfaces_defaultrouteif_todo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !windows && !darwin && !freebsd && !android diff --git a/net/netmon/interfaces_freebsd.go b/net/netmon/interfaces_freebsd.go index 654eb5316384a..5573643ca7370 100644 --- a/net/netmon/interfaces_freebsd.go +++ b/net/netmon/interfaces_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This might work on other BSDs, but only tested on FreeBSD. diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index a9b93c0a1ff49..64cb0b9af2ce6 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android diff --git a/net/netmon/interfaces_linux_test.go b/net/netmon/interfaces_linux_test.go index 4f740ac28ba08..5a29c4b8b3ada 100644 --- a/net/netmon/interfaces_linux_test.go +++ b/net/netmon/interfaces_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_test.go b/net/netmon/interfaces_test.go index e4274819f90df..bd81eb96a42bf 100644 --- a/net/netmon/interfaces_test.go +++ b/net/netmon/interfaces_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_windows.go b/net/netmon/interfaces_windows.go index d6625ead3cd05..070b08ba658e2 100644 --- a/net/netmon/interfaces_windows.go +++ b/net/netmon/interfaces_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/interfaces_windows_test.go b/net/netmon/interfaces_windows_test.go index 91db7bcc5266c..13526612eb477 100644 --- a/net/netmon/interfaces_windows_test.go +++ b/net/netmon/interfaces_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 2876e9b12481c..bddbd4d616462 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 468a12505f322..aec5206443aa4 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index e18bc392dd196..c30010ee407da 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package monitor provides facilities for monitoring network diff --git a/net/netmon/netmon_darwin.go b/net/netmon/netmon_darwin.go index 042f9a3b750c2..588cbf6161845 100644 --- a/net/netmon/netmon_darwin.go +++ b/net/netmon/netmon_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/netmon_darwin_test.go b/net/netmon/netmon_darwin_test.go index 84c67cf6fa3e2..e57b5ca84c146 100644 --- a/net/netmon/netmon_darwin_test.go +++ b/net/netmon/netmon_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/netmon_freebsd.go b/net/netmon/netmon_freebsd.go index 3a4fb44d8f0a0..8e99532c589b6 100644 --- a/net/netmon/netmon_freebsd.go +++ b/net/netmon/netmon_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/netmon_linux.go b/net/netmon/netmon_linux.go index aa5253f9be28b..b7d87f995634f 100644 --- a/net/netmon/netmon_linux.go +++ b/net/netmon/netmon_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android diff --git a/net/netmon/netmon_linux_test.go b/net/netmon/netmon_linux_test.go index 75d7c646559f1..c6c12e850f3fe 100644 --- a/net/netmon/netmon_linux_test.go +++ b/net/netmon/netmon_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/netmon/netmon_polling.go b/net/netmon/netmon_polling.go index 3b5ef6fe9206f..bdeb43005782b 100644 --- a/net/netmon/netmon_polling.go +++ b/net/netmon/netmon_polling.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (!linux && !freebsd && !windows && !darwin) || android diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index 50519b4a9c531..97c203274cd8f 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/netmon_windows.go b/net/netmon/netmon_windows.go index e8966faf00f46..91c137de0e328 100644 --- a/net/netmon/netmon_windows.go +++ b/net/netmon/netmon_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netmon/polling.go b/net/netmon/polling.go index 2a3e44cba0b9d..806f0b0451fe1 100644 --- a/net/netmon/polling.go +++ b/net/netmon/polling.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !darwin diff --git a/net/netmon/state.go b/net/netmon/state.go index 79dd8a01ba9e1..10d68ab785edc 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmon diff --git a/net/netns/mksyscall.go b/net/netns/mksyscall.go index ff2c0b8610657..2a8a2176b9c84 100644 --- a/net/netns/mksyscall.go +++ b/net/netns/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns.go b/net/netns/netns.go index 81ab5e2a212a6..5d692c787eae8 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netns contains the common code for using the Go net package diff --git a/net/netns/netns_android.go b/net/netns/netns_android.go index 162e5c79a62fa..e747f61f40e50 100644 --- a/net/netns/netns_android.go +++ b/net/netns/netns_android.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build android diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index ff05a3f3139c3..e5d01542edfb4 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/net/netns/netns_darwin_test.go b/net/netns/netns_darwin_test.go index 2030c169ef68b..768b095b82739 100644 --- a/net/netns/netns_darwin_test.go +++ b/net/netns/netns_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns_default.go b/net/netns/netns_default.go index 58c5936640e4f..4087e40488e60 100644 --- a/net/netns/netns_default.go +++ b/net/netns/netns_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !windows && !darwin diff --git a/net/netns/netns_dw.go b/net/netns/netns_dw.go index b9f750e8a6657..82494737130b6 100644 --- a/net/netns/netns_dw.go +++ b/net/netns/netns_dw.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || windows diff --git a/net/netns/netns_linux.go b/net/netns/netns_linux.go index 609f524b5cc01..02b2dd89b197f 100644 --- a/net/netns/netns_linux.go +++ b/net/netns/netns_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/netns/netns_linux_test.go b/net/netns/netns_linux_test.go index a5000f37f0a44..e467ee41405d6 100644 --- a/net/netns/netns_linux_test.go +++ b/net/netns/netns_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns_test.go b/net/netns/netns_test.go index 82f919b946d4a..9ecc19b424f95 100644 --- a/net/netns/netns_test.go +++ b/net/netns/netns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netns contains the common code for using the Go net package diff --git a/net/netns/netns_windows.go b/net/netns/netns_windows.go index afbda0f47ece6..686c813f6b1d1 100644 --- a/net/netns/netns_windows.go +++ b/net/netns/netns_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/netns_windows_test.go b/net/netns/netns_windows_test.go index 390604f465041..67e7b3de86c09 100644 --- a/net/netns/netns_windows_test.go +++ b/net/netns/netns_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netns diff --git a/net/netns/socks.go b/net/netns/socks.go index 9a137db7f5b18..7746e91778353 100644 --- a/net/netns/socks.go +++ b/net/netns/socks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !js && !android && !ts_omit_useproxy diff --git a/net/netstat/netstat.go b/net/netstat/netstat.go index 53c7d7757eac6..44b421d5d15b5 100644 --- a/net/netstat/netstat.go +++ b/net/netstat/netstat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netstat returns the local machine's network connection table. diff --git a/net/netstat/netstat_noimpl.go b/net/netstat/netstat_noimpl.go index e455c8ce931de..78bb018f213ad 100644 --- a/net/netstat/netstat_noimpl.go +++ b/net/netstat/netstat_noimpl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/netstat/netstat_test.go b/net/netstat/netstat_test.go index 38827df5ef65a..8407db778f001 100644 --- a/net/netstat/netstat_test.go +++ b/net/netstat/netstat_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstat diff --git a/net/netstat/netstat_windows.go b/net/netstat/netstat_windows.go index 24191a50eadab..4b3edbdf8134b 100644 --- a/net/netstat/netstat_windows.go +++ b/net/netstat/netstat_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstat diff --git a/net/netutil/default_interface_portable.go b/net/netutil/default_interface_portable.go index d75cefb7aec74..2a80553715aec 100644 --- a/net/netutil/default_interface_portable.go +++ b/net/netutil/default_interface_portable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netutil/default_interface_portable_test.go b/net/netutil/default_interface_portable_test.go index 03dce340505a5..b54733747524f 100644 --- a/net/netutil/default_interface_portable_test.go +++ b/net/netutil/default_interface_portable_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netutil/ip_forward.go b/net/netutil/ip_forward.go index c64a9e4269ae0..0711953f52e68 100644 --- a/net/netutil/ip_forward.go +++ b/net/netutil/ip_forward.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netutil/netutil.go b/net/netutil/netutil.go index 5c42f51c64837..13882988594d1 100644 --- a/net/netutil/netutil.go +++ b/net/netutil/netutil.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netutil contains misc shared networking code & types. diff --git a/net/netutil/netutil_test.go b/net/netutil/netutil_test.go index 0523946e63c9b..a512238d5f5ee 100644 --- a/net/netutil/netutil_test.go +++ b/net/netutil/netutil_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netutil/routes.go b/net/netutil/routes.go index 7d67d3695e10d..c8212b9af66dd 100644 --- a/net/netutil/routes.go +++ b/net/netutil/routes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netutil diff --git a/net/netx/netx.go b/net/netx/netx.go index 014daa9a795cb..fba6567c4c312 100644 --- a/net/netx/netx.go +++ b/net/netx/netx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netx contains types to describe and abstract over how dialing and diff --git a/net/packet/capture.go b/net/packet/capture.go index dd0ca411f2051..630a4b1610c2b 100644 --- a/net/packet/capture.go +++ b/net/packet/capture.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go index 4b5b82174a22f..e6918e7ae1c9f 100644 --- a/net/packet/checksum/checksum.go +++ b/net/packet/checksum/checksum.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package checksum provides functions for updating checksums in parsed packets. diff --git a/net/packet/checksum/checksum_test.go b/net/packet/checksum/checksum_test.go index bf818743d3dbf..ab7c783b3e96a 100644 --- a/net/packet/checksum/checksum_test.go +++ b/net/packet/checksum/checksum_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package checksum diff --git a/net/packet/doc.go b/net/packet/doc.go index ce6c0c30716c6..4a62b8aa77727 100644 --- a/net/packet/doc.go +++ b/net/packet/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package packet contains packet parsing and marshaling utilities. diff --git a/net/packet/geneve.go b/net/packet/geneve.go index 71b365ae89414..bed54f641425f 100644 --- a/net/packet/geneve.go +++ b/net/packet/geneve.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go index be9784998adf2..bd673cd0d963a 100644 --- a/net/packet/geneve_test.go +++ b/net/packet/geneve_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/header.go b/net/packet/header.go index fa66a8641c6c4..44b99e520d717 100644 --- a/net/packet/header.go +++ b/net/packet/header.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp.go b/net/packet/icmp.go index 89a7aaa32bec4..8f9cd0e2bb4a1 100644 --- a/net/packet/icmp.go +++ b/net/packet/icmp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp4.go b/net/packet/icmp4.go index 06780e0bb48ff..492a0e9dfee98 100644 --- a/net/packet/icmp4.go +++ b/net/packet/icmp4.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp6.go b/net/packet/icmp6.go index f78db1f4a8c3c..a91db53c9e50c 100644 --- a/net/packet/icmp6.go +++ b/net/packet/icmp6.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/icmp6_test.go b/net/packet/icmp6_test.go index f34883ca41e7e..0348824b62296 100644 --- a/net/packet/icmp6_test.go +++ b/net/packet/icmp6_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/ip4.go b/net/packet/ip4.go index 967a8dba7f57b..1964acf1b7900 100644 --- a/net/packet/ip4.go +++ b/net/packet/ip4.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/ip6.go b/net/packet/ip6.go index d26b9a1619b31..eb92f1450f523 100644 --- a/net/packet/ip6.go +++ b/net/packet/ip6.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/packet.go b/net/packet/packet.go index 34b63aadd2c2e..b41e0dcd93301 100644 --- a/net/packet/packet.go +++ b/net/packet/packet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/packet_test.go b/net/packet/packet_test.go index 09c2c101d66d9..4dbf88009b20a 100644 --- a/net/packet/packet_test.go +++ b/net/packet/packet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index 9881299b7d13e..ad1db311a64c2 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TSMP is our ICMP-like "Tailscale Message Protocol" for signaling diff --git a/net/packet/tsmp_test.go b/net/packet/tsmp_test.go index d8f1d38d57180..01bb836d76971 100644 --- a/net/packet/tsmp_test.go +++ b/net/packet/tsmp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/udp4.go b/net/packet/udp4.go index 0d5bca73e8c89..a42222f785292 100644 --- a/net/packet/udp4.go +++ b/net/packet/udp4.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/packet/udp6.go b/net/packet/udp6.go index 10fdcb99e525c..8d7f380884cbb 100644 --- a/net/packet/udp6.go +++ b/net/packet/udp6.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package packet diff --git a/net/ping/ping.go b/net/ping/ping.go index 8e16a692a8136..de79da51c5c48 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ping allows sending ICMP echo requests to a host in order to diff --git a/net/ping/ping_test.go b/net/ping/ping_test.go index bbedbcad80e44..9fe12de7e9a54 100644 --- a/net/ping/ping_test.go +++ b/net/ping/ping_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ping diff --git a/net/portmapper/disabled_stubs.go b/net/portmapper/disabled_stubs.go index a1324c20be9e1..dea4ef0d3e630 100644 --- a/net/portmapper/disabled_stubs.go +++ b/net/portmapper/disabled_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index 77015f5bfb189..9426790639563 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/legacy_upnp.go b/net/portmapper/legacy_upnp.go index 2ce92dc65d6b3..ed2c23a04a975 100644 --- a/net/portmapper/legacy_upnp.go +++ b/net/portmapper/legacy_upnp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js diff --git a/net/portmapper/pcp.go b/net/portmapper/pcp.go index d0752734e8752..0332295b8cfa0 100644 --- a/net/portmapper/pcp.go +++ b/net/portmapper/pcp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/pcp_test.go b/net/portmapper/pcp_test.go index 8f8eef3ef8399..ef2621f7dc401 100644 --- a/net/portmapper/pcp_test.go +++ b/net/portmapper/pcp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/pcpresultcode_string.go b/net/portmapper/pcpresultcode_string.go index 45eb70d39fa06..8ffd5beae0604 100644 --- a/net/portmapper/pcpresultcode_string.go +++ b/net/portmapper/pcpresultcode_string.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by "stringer -type=pcpResultCode -trimprefix=pcpCode"; DO NOT EDIT. diff --git a/net/portmapper/pmpresultcode_string.go b/net/portmapper/pmpresultcode_string.go index 18d911d944126..f32626328fdec 100644 --- a/net/portmapper/pmpresultcode_string.go +++ b/net/portmapper/pmpresultcode_string.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by "stringer -type=pmpResultCode -trimprefix=pmpCode"; DO NOT EDIT. diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index 16a981d1d8336..37d7730c51f0d 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package portmapper is a UDP port mapping client. It currently allows for mapping over diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index a697a39089635..beb14cb8074eb 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/portmappertype/portmappertype.go b/net/portmapper/portmappertype/portmappertype.go index cc8358a4aed12..3b756e0ed04b2 100644 --- a/net/portmapper/portmappertype/portmappertype.go +++ b/net/portmapper/portmappertype/portmappertype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package portmappertype defines the net/portmapper interface, which may or may not be diff --git a/net/portmapper/select_test.go b/net/portmapper/select_test.go index cc685bc253d3d..b7370c24139f1 100644 --- a/net/portmapper/select_test.go +++ b/net/portmapper/select_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 46d7ff70215fd..e3971a2ae0f97 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index a954b2beac094..15b03517708e4 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portmapper diff --git a/net/proxymux/mux.go b/net/proxymux/mux.go index ff5aaff3b975f..d9c57cd76ecf5 100644 --- a/net/proxymux/mux.go +++ b/net/proxymux/mux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package proxymux splits a net.Listener in two, routing SOCKS5 diff --git a/net/proxymux/mux_test.go b/net/proxymux/mux_test.go index 29166f9966bbc..6e84e57d8ef80 100644 --- a/net/proxymux/mux_test.go +++ b/net/proxymux/mux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package proxymux diff --git a/net/routetable/routetable.go b/net/routetable/routetable.go index 2884706f109a1..bfa62af7b3ce3 100644 --- a/net/routetable/routetable.go +++ b/net/routetable/routetable.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package routetable provides functions that operate on the system's route diff --git a/net/routetable/routetable_bsd.go b/net/routetable/routetable_bsd.go index 1de1a2734ce6c..7a6bf48cc96e8 100644 --- a/net/routetable/routetable_bsd.go +++ b/net/routetable/routetable_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd diff --git a/net/routetable/routetable_bsd_test.go b/net/routetable/routetable_bsd_test.go index 29493d59bdc36..df515c5788681 100644 --- a/net/routetable/routetable_bsd_test.go +++ b/net/routetable/routetable_bsd_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd diff --git a/net/routetable/routetable_darwin.go b/net/routetable/routetable_darwin.go index 7f525ae32807a..5c143f0c1d7eb 100644 --- a/net/routetable/routetable_darwin.go +++ b/net/routetable/routetable_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/net/routetable/routetable_freebsd.go b/net/routetable/routetable_freebsd.go index 8e57a330246ed..313febf3ca94d 100644 --- a/net/routetable/routetable_freebsd.go +++ b/net/routetable/routetable_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build freebsd diff --git a/net/routetable/routetable_linux.go b/net/routetable/routetable_linux.go index 0b2cb305d7154..479aa8fd8f0af 100644 --- a/net/routetable/routetable_linux.go +++ b/net/routetable/routetable_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/net/routetable/routetable_linux_test.go b/net/routetable/routetable_linux_test.go index bbf7790e787ca..4d03b7f9d5466 100644 --- a/net/routetable/routetable_linux_test.go +++ b/net/routetable/routetable_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/routetable/routetable_other.go b/net/routetable/routetable_other.go index e547ab0ac769a..da162c3f8e191 100644 --- a/net/routetable/routetable_other.go +++ b/net/routetable/routetable_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build android || (!linux && !darwin && !freebsd) diff --git a/net/sockopts/sockopts.go b/net/sockopts/sockopts.go index 0c0ee7692cf6a..aa10d977f6468 100644 --- a/net/sockopts/sockopts.go +++ b/net/sockopts/sockopts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sockopts contains logic for applying socket options. diff --git a/net/sockopts/sockopts_default.go b/net/sockopts/sockopts_default.go index 3cc8679b512c1..6b728d34c6a42 100644 --- a/net/sockopts/sockopts_default.go +++ b/net/sockopts/sockopts_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/net/sockopts/sockopts_linux.go b/net/sockopts/sockopts_linux.go index 5d778d380f5c9..216c589225d39 100644 --- a/net/sockopts/sockopts_linux.go +++ b/net/sockopts/sockopts_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/sockopts/sockopts_notwindows.go b/net/sockopts/sockopts_notwindows.go index f1bc7fd442ee1..880860a58036f 100644 --- a/net/sockopts/sockopts_notwindows.go +++ b/net/sockopts/sockopts_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/sockopts/sockopts_unix_test.go b/net/sockopts/sockopts_unix_test.go index ebb4354ac1385..d474326a14df8 100644 --- a/net/sockopts/sockopts_unix_test.go +++ b/net/sockopts/sockopts_unix_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/net/sockopts/sockopts_windows.go b/net/sockopts/sockopts_windows.go index 1e6c3f69d3af5..9533fd2a4ca9f 100644 --- a/net/sockopts/sockopts_windows.go +++ b/net/sockopts/sockopts_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index 2e277147bc50d..729fc8e882cf1 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package socks5 is a SOCKS5 server implementation. diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go index bc6fac79fdcf9..9fbc11f8c0dfb 100644 --- a/net/socks5/socks5_test.go +++ b/net/socks5/socks5_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package socks5 diff --git a/net/sockstats/sockstats.go b/net/sockstats/sockstats.go index 715c1ee06e9a9..14a58d19d800d 100644 --- a/net/sockstats/sockstats.go +++ b/net/sockstats/sockstats.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sockstats collects statistics about network sockets used by diff --git a/net/sockstats/sockstats_noop.go b/net/sockstats/sockstats_noop.go index 96723111ade7a..b586a04cbee29 100644 --- a/net/sockstats/sockstats_noop.go +++ b/net/sockstats/sockstats_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !tailscale_go || !(darwin || ios || android || ts_enable_sockstats) diff --git a/net/sockstats/sockstats_tsgo.go b/net/sockstats/sockstats_tsgo.go index 4e9f4a9666308..46ac75c990d48 100644 --- a/net/sockstats/sockstats_tsgo.go +++ b/net/sockstats/sockstats_tsgo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && (darwin || ios || android || ts_enable_sockstats) diff --git a/net/sockstats/sockstats_tsgo_darwin.go b/net/sockstats/sockstats_tsgo_darwin.go index 321d32e04e5f0..e79ff9ee3a02c 100644 --- a/net/sockstats/sockstats_tsgo_darwin.go +++ b/net/sockstats/sockstats_tsgo_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && (darwin || ios) diff --git a/net/sockstats/sockstats_tsgo_test.go b/net/sockstats/sockstats_tsgo_test.go index c467c8a70ff79..b06ffa8946c44 100644 --- a/net/sockstats/sockstats_tsgo_test.go +++ b/net/sockstats/sockstats_tsgo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && (darwin || ios || android || ts_enable_sockstats) diff --git a/net/speedtest/speedtest.go b/net/speedtest/speedtest.go index a462dbeece42b..8b887a8ef8b0b 100644 --- a/net/speedtest/speedtest.go +++ b/net/speedtest/speedtest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package speedtest contains both server and client code for diff --git a/net/speedtest/speedtest_client.go b/net/speedtest/speedtest_client.go index 299a12a8dfaec..099eb48549975 100644 --- a/net/speedtest/speedtest_client.go +++ b/net/speedtest/speedtest_client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package speedtest diff --git a/net/speedtest/speedtest_server.go b/net/speedtest/speedtest_server.go index 72f85fa15b019..6b6f53b7da5a9 100644 --- a/net/speedtest/speedtest_server.go +++ b/net/speedtest/speedtest_server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package speedtest diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index bb8f2676af8c3..1fbd0915b219f 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package speedtest diff --git a/net/stun/stun.go b/net/stun/stun.go index eeac23cbbd45d..7d75e79b8e732 100644 --- a/net/stun/stun.go +++ b/net/stun/stun.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package STUN generates STUN request packets and parses response packets. diff --git a/net/stun/stun_fuzzer.go b/net/stun/stun_fuzzer.go index 6f0c9e3b0beae..b7e3198df873d 100644 --- a/net/stun/stun_fuzzer.go +++ b/net/stun/stun_fuzzer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build gofuzz diff --git a/net/stun/stun_test.go b/net/stun/stun_test.go index 05fc4d2ba727f..7f754324e7597 100644 --- a/net/stun/stun_test.go +++ b/net/stun/stun_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package stun_test diff --git a/net/stun/stuntest/stuntest.go b/net/stun/stuntest/stuntest.go index 09684160055fb..0d3988ce800a9 100644 --- a/net/stun/stuntest/stuntest.go +++ b/net/stun/stuntest/stuntest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package stuntest provides a STUN test server. diff --git a/net/stunserver/stunserver.go b/net/stunserver/stunserver.go index 7397675ca8dc3..97df8cb4d79e9 100644 --- a/net/stunserver/stunserver.go +++ b/net/stunserver/stunserver.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package stunserver implements a STUN server. The package publishes a number of stats diff --git a/net/stunserver/stunserver_test.go b/net/stunserver/stunserver_test.go index 24a7bb570b6bd..c96aea4d15973 100644 --- a/net/stunserver/stunserver_test.go +++ b/net/stunserver/stunserver_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package stunserver diff --git a/net/tcpinfo/tcpinfo.go b/net/tcpinfo/tcpinfo.go index a757add9f8f46..3e2d76a9529fa 100644 --- a/net/tcpinfo/tcpinfo.go +++ b/net/tcpinfo/tcpinfo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tcpinfo provides platform-agnostic accessors to information about a diff --git a/net/tcpinfo/tcpinfo_darwin.go b/net/tcpinfo/tcpinfo_darwin.go index 53fa22fbf5bed..3e53cd4ed0c8d 100644 --- a/net/tcpinfo/tcpinfo_darwin.go +++ b/net/tcpinfo/tcpinfo_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tcpinfo diff --git a/net/tcpinfo/tcpinfo_linux.go b/net/tcpinfo/tcpinfo_linux.go index 885d462c95e35..1ff0c1bc4f539 100644 --- a/net/tcpinfo/tcpinfo_linux.go +++ b/net/tcpinfo/tcpinfo_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tcpinfo diff --git a/net/tcpinfo/tcpinfo_other.go b/net/tcpinfo/tcpinfo_other.go index be45523aeb00d..c7d0f9177af2f 100644 --- a/net/tcpinfo/tcpinfo_other.go +++ b/net/tcpinfo/tcpinfo_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !darwin diff --git a/net/tcpinfo/tcpinfo_test.go b/net/tcpinfo/tcpinfo_test.go index bb3d224ec1beb..6baac934a00f0 100644 --- a/net/tcpinfo/tcpinfo_test.go +++ b/net/tcpinfo/tcpinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tcpinfo diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go index 5b48dc009b980..f2d7db27c1a5e 100644 --- a/net/tlsdial/blockblame/blockblame.go +++ b/net/tlsdial/blockblame/blockblame.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package blockblame blames specific firewall manufacturers for blocking Tailscale, diff --git a/net/tlsdial/blockblame/blockblame_test.go b/net/tlsdial/blockblame/blockblame_test.go index 6d3592c60a3de..3d08bf811601c 100644 --- a/net/tlsdial/blockblame/blockblame_test.go +++ b/net/tlsdial/blockblame/blockblame_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package blockblame diff --git a/net/tlsdial/deps_test.go b/net/tlsdial/deps_test.go index 7a93899c2f126..3600af537cd85 100644 --- a/net/tlsdial/deps_test.go +++ b/net/tlsdial/deps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build for_go_mod_tidy_only diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index ee4771d8db613..ffc8c90a80f96 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tlsdial generates tls.Config values and does x509 validation of diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index a288d765306e1..9ef0f76884c53 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tlsdial diff --git a/net/tsaddr/tsaddr.go b/net/tsaddr/tsaddr.go index 06e6a26ddb721..1eac9eb77cfde 100644 --- a/net/tsaddr/tsaddr.go +++ b/net/tsaddr/tsaddr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsaddr handles Tailscale-specific IPs and ranges. diff --git a/net/tsaddr/tsaddr_test.go b/net/tsaddr/tsaddr_test.go index 9ac1ce3036299..ac5a07fff94f5 100644 --- a/net/tsaddr/tsaddr_test.go +++ b/net/tsaddr/tsaddr_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsaddr diff --git a/net/tsdial/dnsmap.go b/net/tsdial/dnsmap.go index 37fedd14c899d..d7204463f66ed 100644 --- a/net/tsdial/dnsmap.go +++ b/net/tsdial/dnsmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial diff --git a/net/tsdial/dnsmap_test.go b/net/tsdial/dnsmap_test.go index 41a957f186f4a..b2a50fa0c4549 100644 --- a/net/tsdial/dnsmap_test.go +++ b/net/tsdial/dnsmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial diff --git a/net/tsdial/dohclient.go b/net/tsdial/dohclient.go index d830398cdfb9c..59b0da04d25f4 100644 --- a/net/tsdial/dohclient.go +++ b/net/tsdial/dohclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial diff --git a/net/tsdial/dohclient_test.go b/net/tsdial/dohclient_test.go index 23255769f4847..63e5dbd997826 100644 --- a/net/tsdial/dohclient_test.go +++ b/net/tsdial/dohclient_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsdial diff --git a/net/tsdial/peerapi_macios_ext.go b/net/tsdial/peerapi_macios_ext.go index 3ebead3db439f..fa40feef04524 100644 --- a/net/tsdial/peerapi_macios_ext.go +++ b/net/tsdial/peerapi_macios_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file's built on iOS and on two of three macOS build variants: diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index df2d80a619752..ebbafa52b01e9 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsdial provides a Dialer type that can dial out of tailscaled. diff --git a/net/tshttpproxy/mksyscall.go b/net/tshttpproxy/mksyscall.go index f8fdae89b55f0..37824c84653de 100644 --- a/net/tshttpproxy/mksyscall.go +++ b/net/tshttpproxy/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tshttpproxy diff --git a/net/tshttpproxy/tshttpproxy.go b/net/tshttpproxy/tshttpproxy.go index 0456009ed9a81..1ea444c8f5e99 100644 --- a/net/tshttpproxy/tshttpproxy.go +++ b/net/tshttpproxy/tshttpproxy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tshttpproxy contains Tailscale additions to httpproxy not available diff --git a/net/tshttpproxy/tshttpproxy_linux.go b/net/tshttpproxy/tshttpproxy_linux.go index 7e086e4929bc7..30096e214a982 100644 --- a/net/tshttpproxy/tshttpproxy_linux.go +++ b/net/tshttpproxy/tshttpproxy_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/tshttpproxy/tshttpproxy_synology.go b/net/tshttpproxy/tshttpproxy_synology.go index e28844f7dbf67..a632753f7bc1b 100644 --- a/net/tshttpproxy/tshttpproxy_synology.go +++ b/net/tshttpproxy/tshttpproxy_synology.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/tshttpproxy/tshttpproxy_synology_test.go b/net/tshttpproxy/tshttpproxy_synology_test.go index b6e8b948c3ae9..a57ac1558d4f4 100644 --- a/net/tshttpproxy/tshttpproxy_synology_test.go +++ b/net/tshttpproxy/tshttpproxy_synology_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/tshttpproxy/tshttpproxy_test.go b/net/tshttpproxy/tshttpproxy_test.go index 97f8c1f8b049a..da847429d4bd4 100644 --- a/net/tshttpproxy/tshttpproxy_test.go +++ b/net/tshttpproxy/tshttpproxy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tshttpproxy diff --git a/net/tshttpproxy/tshttpproxy_windows.go b/net/tshttpproxy/tshttpproxy_windows.go index 7163c786307ac..1a80be3ff370f 100644 --- a/net/tshttpproxy/tshttpproxy_windows.go +++ b/net/tshttpproxy/tshttpproxy_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tshttpproxy diff --git a/net/tstun/fake.go b/net/tstun/fake.go index 3d86bb3df4ca9..f7925116e80bd 100644 --- a/net/tstun/fake.go +++ b/net/tstun/fake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/ifstatus_noop.go b/net/tstun/ifstatus_noop.go index 8cf569f982010..420326c2fda38 100644 --- a/net/tstun/ifstatus_noop.go +++ b/net/tstun/ifstatus_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/tstun/ifstatus_windows.go b/net/tstun/ifstatus_windows.go index fd9fc2112524c..64c898fd3aef2 100644 --- a/net/tstun/ifstatus_windows.go +++ b/net/tstun/ifstatus_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/mtu.go b/net/tstun/mtu.go index 004529c205f9e..6eceb6833b964 100644 --- a/net/tstun/mtu.go +++ b/net/tstun/mtu.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/mtu_test.go b/net/tstun/mtu_test.go index ec31e45ce73f5..6129e0c140a85 100644 --- a/net/tstun/mtu_test.go +++ b/net/tstun/mtu_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/netstack_disabled.go b/net/tstun/netstack_disabled.go index c1266b30559d4..6425668a36c87 100644 --- a/net/tstun/netstack_disabled.go +++ b/net/tstun/netstack_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_netstack diff --git a/net/tstun/netstack_enabled.go b/net/tstun/netstack_enabled.go index 8fc1a2e20e35a..440013c7e9510 100644 --- a/net/tstun/netstack_enabled.go +++ b/net/tstun/netstack_enabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netstack diff --git a/net/tstun/tstun_stub.go b/net/tstun/tstun_stub.go index d21eda6b07a57..27d530bc8b95c 100644 --- a/net/tstun/tstun_stub.go +++ b/net/tstun/tstun_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build aix || solaris || illumos diff --git a/net/tstun/tun.go b/net/tstun/tun.go index 19b0a53f5be6c..42b0d239c39d4 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !wasm && !tamago && !aix && !solaris && !illumos diff --git a/net/tstun/tun_linux.go b/net/tstun/tun_linux.go index 05cf58c17df8a..028e0a14b5bd8 100644 --- a/net/tstun/tun_linux.go +++ b/net/tstun/tun_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/tun_macos.go b/net/tstun/tun_macos.go index 3506f05b1e4c9..fb8eb9450fb7e 100644 --- a/net/tstun/tun_macos.go +++ b/net/tstun/tun_macos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/net/tstun/tun_notwindows.go b/net/tstun/tun_notwindows.go index 087fcd4eec784..73f80fea12ce8 100644 --- a/net/tstun/tun_notwindows.go +++ b/net/tstun/tun_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/net/tstun/tun_windows.go b/net/tstun/tun_windows.go index 2b1d3054e5ecb..96721021b2022 100644 --- a/net/tstun/tun_windows.go +++ b/net/tstun/tun_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index fe1bc31b812b4..d463948a208fa 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/tstun/wrap_linux.go b/net/tstun/wrap_linux.go index 7498f107b5fda..a4e76de5a9d20 100644 --- a/net/tstun/wrap_linux.go +++ b/net/tstun/wrap_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_gro diff --git a/net/tstun/wrap_noop.go b/net/tstun/wrap_noop.go index 8ad04bafe94c1..8f5b62d0cbcfe 100644 --- a/net/tstun/wrap_noop.go +++ b/net/tstun/wrap_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || ts_omit_gro diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 3bc2ff447422d..8515cb8f0a4c0 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstun diff --git a/net/udprelay/endpoint/endpoint.go b/net/udprelay/endpoint/endpoint.go index 0d2a14e965a4a..7b8368b615e52 100644 --- a/net/udprelay/endpoint/endpoint.go +++ b/net/udprelay/endpoint/endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package endpoint contains types relating to UDP relay server endpoints. It diff --git a/net/udprelay/endpoint/endpoint_test.go b/net/udprelay/endpoint/endpoint_test.go index f12a6e2f62240..eaef289de6725 100644 --- a/net/udprelay/endpoint/endpoint_test.go +++ b/net/udprelay/endpoint/endpoint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package endpoint diff --git a/net/udprelay/metrics.go b/net/udprelay/metrics.go index 235029bf425ce..6e22acd03ce70 100644 --- a/net/udprelay/metrics.go +++ b/net/udprelay/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package udprelay diff --git a/net/udprelay/metrics_test.go b/net/udprelay/metrics_test.go index 0b7650534f884..da90550b88920 100644 --- a/net/udprelay/metrics_test.go +++ b/net/udprelay/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package udprelay diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 38ee04df9e1ca..3d870904493ec 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package udprelay contains constructs for relaying Disco and WireGuard packets diff --git a/net/udprelay/server_linux.go b/net/udprelay/server_linux.go index d4cf2a2b16ee9..3a734f9c75505 100644 --- a/net/udprelay/server_linux.go +++ b/net/udprelay/server_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/net/udprelay/server_notlinux.go b/net/udprelay/server_notlinux.go index f21020631f76e..027ffb7658aa2 100644 --- a/net/udprelay/server_notlinux.go +++ b/net/udprelay/server_notlinux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index cb6b05eea2108..66de0d88a7d0d 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package udprelay diff --git a/net/udprelay/status/status.go b/net/udprelay/status/status.go index 9ed9a0d2a8def..d64792ab6032e 100644 --- a/net/udprelay/status/status.go +++ b/net/udprelay/status/status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package status contains types relating to the status of peer relay sessions diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 9e44da59ca1d7..fed734cf5ffd8 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wsconn contains an adapter type that turns diff --git a/omit/aws_def.go b/omit/aws_def.go index 8ae539736b28c..7f48881c10bcf 100644 --- a/omit/aws_def.go +++ b/omit/aws_def.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_aws diff --git a/omit/aws_omit.go b/omit/aws_omit.go index 5b6957d5b639b..f077041158f0d 100644 --- a/omit/aws_omit.go +++ b/omit/aws_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_aws diff --git a/omit/omit.go b/omit/omit.go index 018cfba94545c..e59d4fc3c61f7 100644 --- a/omit/omit.go +++ b/omit/omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package omit provides consts to access Tailscale ts_omit_FOO build tags. diff --git a/packages/deb/deb.go b/packages/deb/deb.go index cab0fea075e74..63f30fc9d7d4f 100644 --- a/packages/deb/deb.go +++ b/packages/deb/deb.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package deb extracts metadata from Debian packages. diff --git a/packages/deb/deb_test.go b/packages/deb/deb_test.go index 1a25f67ad4875..fb8a6454c3ab7 100644 --- a/packages/deb/deb_test.go +++ b/packages/deb/deb_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deb diff --git a/paths/migrate.go b/paths/migrate.go index 3a23ecca34fdc..22f947611f4cd 100644 --- a/paths/migrate.go +++ b/paths/migrate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package paths diff --git a/paths/paths.go b/paths/paths.go index 6c9c3fa6c9dea..398d8b23d8988 100644 --- a/paths/paths.go +++ b/paths/paths.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package paths returns platform and user-specific default paths to diff --git a/paths/paths_unix.go b/paths/paths_unix.go index d317921d59cd9..b1556b233104f 100644 --- a/paths/paths_unix.go +++ b/paths/paths_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !wasm && !plan9 && !tamago diff --git a/paths/paths_windows.go b/paths/paths_windows.go index 4705400655212..850a1c97b52a0 100644 --- a/paths/paths_windows.go +++ b/paths/paths_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package paths diff --git a/pkgdoc_test.go b/pkgdoc_test.go index 0f4a455288950..b3a902bf41f4b 100644 --- a/pkgdoc_test.go +++ b/pkgdoc_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot diff --git a/portlist/clean.go b/portlist/clean.go index 7e137de948e99..f6c3f4a6b3587 100644 --- a/portlist/clean.go +++ b/portlist/clean.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/clean_test.go b/portlist/clean_test.go index 5a1e34405eed0..e7a5f6a0ca4ac 100644 --- a/portlist/clean_test.go +++ b/portlist/clean_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/netstat.go b/portlist/netstat.go index 5fdef675d0e2a..de625afb52170 100644 --- a/portlist/netstat.go +++ b/portlist/netstat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/portlist/netstat_test.go b/portlist/netstat_test.go index 023b75b794426..7048e90b2ffea 100644 --- a/portlist/netstat_test.go +++ b/portlist/netstat_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/portlist/poller.go b/portlist/poller.go index 423bad3be33ba..a8e611054eb1b 100644 --- a/portlist/poller.go +++ b/portlist/poller.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains the code related to the Poller type and its methods. diff --git a/portlist/portlist.go b/portlist/portlist.go index 9f7af40d08dc1..9430e2562268b 100644 --- a/portlist/portlist.go +++ b/portlist/portlist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file is just the types. The bulk of the code is in poller.go. diff --git a/portlist/portlist_linux.go b/portlist/portlist_linux.go index 94f843746c29d..159c4beb3a74a 100644 --- a/portlist/portlist_linux.go +++ b/portlist/portlist_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_linux_test.go b/portlist/portlist_linux_test.go index 24635fae26577..4b541f8e7dd70 100644 --- a/portlist/portlist_linux_test.go +++ b/portlist/portlist_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_macos.go b/portlist/portlist_macos.go index e67b2c9b8c064..d210fdd946de1 100644 --- a/portlist/portlist_macos.go +++ b/portlist/portlist_macos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/portlist/portlist_plan9.go b/portlist/portlist_plan9.go index 77f8619f97ffa..62ed61fb3ddea 100644 --- a/portlist/portlist_plan9.go +++ b/portlist/portlist_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_test.go b/portlist/portlist_test.go index 8503b0fefdf50..5e0964b248882 100644 --- a/portlist/portlist_test.go +++ b/portlist/portlist_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/portlist/portlist_windows.go b/portlist/portlist_windows.go index f449973599247..bd603dbfd8f91 100644 --- a/portlist/portlist_windows.go +++ b/portlist/portlist_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package portlist diff --git a/posture/doc.go b/posture/doc.go index d061065235b99..14fd21998647e 100644 --- a/posture/doc.go +++ b/posture/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package posture contains functions to query the local system diff --git a/posture/hwaddr.go b/posture/hwaddr.go index dd0b6d8be77ce..2075331f16727 100644 --- a/posture/hwaddr.go +++ b/posture/hwaddr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package posture diff --git a/posture/serialnumber_macos.go b/posture/serialnumber_macos.go index 18c929107a768..fed0d4111fb83 100644 --- a/posture/serialnumber_macos.go +++ b/posture/serialnumber_macos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo && darwin && !ios diff --git a/posture/serialnumber_macos_test.go b/posture/serialnumber_macos_test.go index 9d9b9f578da55..5f1aec5cd790b 100644 --- a/posture/serialnumber_macos_test.go +++ b/posture/serialnumber_macos_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo && darwin && !ios diff --git a/posture/serialnumber_notmacos.go b/posture/serialnumber_notmacos.go index 132fa08f6a56e..e076b8f3dcfdf 100644 --- a/posture/serialnumber_notmacos.go +++ b/posture/serialnumber_notmacos.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Build on Windows, Linux and *BSD diff --git a/posture/serialnumber_notmacos_test.go b/posture/serialnumber_notmacos_test.go index da5aada8509e3..1009ea6b4a208 100644 --- a/posture/serialnumber_notmacos_test.go +++ b/posture/serialnumber_notmacos_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Build on Windows, Linux and *BSD diff --git a/posture/serialnumber_stub.go b/posture/serialnumber_stub.go index 854a0014bd1bf..e040aacfb30e2 100644 --- a/posture/serialnumber_stub.go +++ b/posture/serialnumber_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // js: not implemented diff --git a/posture/serialnumber_syspolicy.go b/posture/serialnumber_syspolicy.go index 64a154a2cae0b..448fdb677abef 100644 --- a/posture/serialnumber_syspolicy.go +++ b/posture/serialnumber_syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build android || ios diff --git a/posture/serialnumber_test.go b/posture/serialnumber_test.go index 6db3651e21cd7..20e726d9ff530 100644 --- a/posture/serialnumber_test.go +++ b/posture/serialnumber_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package posture diff --git a/prober/derp.go b/prober/derp.go index 22843b53a4049..73ea02cf5ad4f 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/derp_test.go b/prober/derp_test.go index 08a65d6978f13..364d57481ae20 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/dns.go b/prober/dns.go index 77e22ea3f89ba..cfef252716ae9 100644 --- a/prober/dns.go +++ b/prober/dns.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/dns_example_test.go b/prober/dns_example_test.go index 089816919489a..625ecec0c1411 100644 --- a/prober/dns_example_test.go +++ b/prober/dns_example_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober_test diff --git a/prober/dns_test.go b/prober/dns_test.go index 1b6c31b554877..4eaea199ae289 100644 --- a/prober/dns_test.go +++ b/prober/dns_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/histogram.go b/prober/histogram.go index c544a5f79bb17..5c52894f9eb02 100644 --- a/prober/histogram.go +++ b/prober/histogram.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/histogram_test.go b/prober/histogram_test.go index dbb5eda6741a5..2c7deea354a89 100644 --- a/prober/histogram_test.go +++ b/prober/histogram_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/http.go b/prober/http.go index e4b0b26fd3e7d..144ed3fb55195 100644 --- a/prober/http.go +++ b/prober/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/prober.go b/prober/prober.go index 6b904dd97d231..16c262bc81c0d 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prober implements a simple blackbox prober. Each probe runs diff --git a/prober/prober_test.go b/prober/prober_test.go index 1e045fa8971b0..c945f617a6633 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/status.go b/prober/status.go index 20fbeec58a77e..a06d3d55c4e6c 100644 --- a/prober/status.go +++ b/prober/status.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/tcp.go b/prober/tcp.go index 22d05461652a4..f932be44553ab 100644 --- a/prober/tcp.go +++ b/prober/tcp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/tls.go b/prober/tls.go index 3ce5354357d71..1247f9502e8f6 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/tls_test.go b/prober/tls_test.go index 86fba91b98836..a32693762f291 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prober diff --git a/prober/tun_darwin.go b/prober/tun_darwin.go index 0ef22e41e4076..45c5415acd8d0 100644 --- a/prober/tun_darwin.go +++ b/prober/tun_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/prober/tun_default.go b/prober/tun_default.go index 93a5b07fd442a..2094e19933c06 100644 --- a/prober/tun_default.go +++ b/prober/tun_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !darwin diff --git a/prober/tun_linux.go b/prober/tun_linux.go index 52a31efbbf66a..7a28a4b3f829e 100644 --- a/prober/tun_linux.go +++ b/prober/tun_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/proxymap/proxymap.go b/proxymap/proxymap.go index 20dc96c848307..2407371513d89 100644 --- a/proxymap/proxymap.go +++ b/proxymap/proxymap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package proxymap contains a mapping table for ephemeral localhost ports used diff --git a/release/dist/cli/cli.go b/release/dist/cli/cli.go index f4480cbdbdfa4..ca4977f5d2cbe 100644 --- a/release/dist/cli/cli.go +++ b/release/dist/cli/cli.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cli provides the skeleton of a CLI for building release packages. diff --git a/release/dist/dist.go b/release/dist/dist.go index 6fb0102993cbd..094d0a0e04c46 100644 --- a/release/dist/dist.go +++ b/release/dist/dist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dist is a release artifact builder library. diff --git a/release/dist/memoize.go b/release/dist/memoize.go index 0927ac0a81540..bdf0f68ff9fd6 100644 --- a/release/dist/memoize.go +++ b/release/dist/memoize.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dist diff --git a/release/dist/qnap/pkgs.go b/release/dist/qnap/pkgs.go index 5062011f06ea6..1d69b3eaf3500 100644 --- a/release/dist/qnap/pkgs.go +++ b/release/dist/qnap/pkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package qnap contains dist Targets for building QNAP Tailscale packages. diff --git a/release/dist/qnap/targets.go b/release/dist/qnap/targets.go index 0a02139548b17..3eef3cbbe693d 100644 --- a/release/dist/qnap/targets.go +++ b/release/dist/qnap/targets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package qnap diff --git a/release/dist/synology/pkgs.go b/release/dist/synology/pkgs.go index ab89dbee3e19f..c2fe6528e4dfe 100644 --- a/release/dist/synology/pkgs.go +++ b/release/dist/synology/pkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package synology contains dist Targets for building Synology Tailscale packages. diff --git a/release/dist/synology/targets.go b/release/dist/synology/targets.go index bc7b20afca5d3..2f08510557d8d 100644 --- a/release/dist/synology/targets.go +++ b/release/dist/synology/targets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package synology diff --git a/release/dist/unixpkgs/pkgs.go b/release/dist/unixpkgs/pkgs.go index bad6ce572e675..d251ff621f98a 100644 --- a/release/dist/unixpkgs/pkgs.go +++ b/release/dist/unixpkgs/pkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package unixpkgs contains dist Targets for building unix Tailscale packages. diff --git a/release/dist/unixpkgs/targets.go b/release/dist/unixpkgs/targets.go index 42bab6d3b2685..b5f96fc38b3f0 100644 --- a/release/dist/unixpkgs/targets.go +++ b/release/dist/unixpkgs/targets.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package unixpkgs diff --git a/release/release.go b/release/release.go index a8d0e6b62e8d7..314bb0d8e7473 100644 --- a/release/release.go +++ b/release/release.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package release provides functionality for building client releases. diff --git a/safesocket/basic_test.go b/safesocket/basic_test.go index 292a3438a0e75..9cef300497422 100644 --- a/safesocket/basic_test.go +++ b/safesocket/basic_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/pipe_windows.go b/safesocket/pipe_windows.go index 2968542f2ccf4..0ffee762f8840 100644 --- a/safesocket/pipe_windows.go +++ b/safesocket/pipe_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/pipe_windows_test.go b/safesocket/pipe_windows_test.go index 8d9cbd19b5e43..5d4e68cc251c9 100644 --- a/safesocket/pipe_windows_test.go +++ b/safesocket/pipe_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index 287cdca599f77..6be8ae5b8fac3 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package safesocket creates either a Unix socket, if possible, or diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index e2b3ea4581059..8cbabff63364e 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index e52959ad58dcf..d828a80f5dfe2 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/safesocket_js.go b/safesocket/safesocket_js.go index 38e615da43535..746fea51115c4 100644 --- a/safesocket/safesocket_js.go +++ b/safesocket/safesocket_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/safesocket_plan9.go b/safesocket/safesocket_plan9.go index c8a5e3b05bbef..921e758748d5c 100644 --- a/safesocket/safesocket_plan9.go +++ b/safesocket/safesocket_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build plan9 diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index d3f409df58d15..6130ca5b0d574 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ((linux && !android) || windows || (darwin && !ios) || freebsd) && !ts_omit_cliconndiag diff --git a/safesocket/safesocket_test.go b/safesocket/safesocket_test.go index 3f36a1cf6ca1f..be2dd193d8e32 100644 --- a/safesocket/safesocket_test.go +++ b/safesocket/safesocket_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safesocket diff --git a/safesocket/unixsocket.go b/safesocket/unixsocket.go index ec8635bbbf0d7..6fe3883c32c9d 100644 --- a/safesocket/unixsocket.go +++ b/safesocket/unixsocket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !js && !plan9 diff --git a/safeweb/http.go b/safeweb/http.go index d085fcb8819d8..f76591cbd0e16 100644 --- a/safeweb/http.go +++ b/safeweb/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package safeweb provides a wrapper around an http.Server that applies diff --git a/safeweb/http_test.go b/safeweb/http_test.go index 852ce326ba374..cbac7210a4807 100644 --- a/safeweb/http_test.go +++ b/safeweb/http_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safeweb diff --git a/scripts/installer.sh b/scripts/installer.sh index 76e8943e9931f..8ffd3f5720a2d 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -1,5 +1,5 @@ #!/bin/sh -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # # This script detects the current operating system, and installs diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index 9d20b41f9b31a..6135688ca0153 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sessionrecording contains session recording utils shared amongst diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index e834828f5a6cc..64bcb1c3185d3 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sessionrecording diff --git a/sessionrecording/event.go b/sessionrecording/event.go index 8f8172cc4b303..0597048a2113f 100644 --- a/sessionrecording/event.go +++ b/sessionrecording/event.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sessionrecording diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 2208522168dec..95b70962cefd8 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sessionrecording diff --git a/ssh/tailssh/accept_env.go b/ssh/tailssh/accept_env.go index 6461a79a3408b..6354d41d76a6b 100644 --- a/ssh/tailssh/accept_env.go +++ b/ssh/tailssh/accept_env.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailssh diff --git a/ssh/tailssh/accept_env_test.go b/ssh/tailssh/accept_env_test.go index b54c980978ece..25787db302357 100644 --- a/ssh/tailssh/accept_env_test.go +++ b/ssh/tailssh/accept_env_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailssh diff --git a/ssh/tailssh/auditd_linux.go b/ssh/tailssh/auditd_linux.go index e9f551d9e7991..bddb901d5cebe 100644 --- a/ssh/tailssh/auditd_linux.go +++ b/ssh/tailssh/auditd_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/ssh/tailssh/auditd_linux_test.go b/ssh/tailssh/auditd_linux_test.go index 93f5442918a98..c3c2302fe9e66 100644 --- a/ssh/tailssh/auditd_linux_test.go +++ b/ssh/tailssh/auditd_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index f75646771057a..b414ce3fbf42a 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains the code for the incubator process. Tailscaled diff --git a/ssh/tailssh/incubator_linux.go b/ssh/tailssh/incubator_linux.go index 4dfb9f27cc097..cff46160758f3 100644 --- a/ssh/tailssh/incubator_linux.go +++ b/ssh/tailssh/incubator_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/ssh/tailssh/incubator_plan9.go b/ssh/tailssh/incubator_plan9.go index 61b6a54ebdc94..69112635f5c11 100644 --- a/ssh/tailssh/incubator_plan9.go +++ b/ssh/tailssh/incubator_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains the plan9-specific version of the incubator. Tailscaled diff --git a/ssh/tailssh/privs_test.go b/ssh/tailssh/privs_test.go index 32b219a7798ca..f0ec66c64e581 100644 --- a/ssh/tailssh/privs_test.go +++ b/ssh/tailssh/privs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 91e1779bfd543..9d5a7d2a880db 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 9ab26e169665b..1135bebbc2a5b 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build integrationtest diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 3b6d3c52c391c..f91cbafe72213 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin diff --git a/ssh/tailssh/user.go b/ssh/tailssh/user.go index ac92c762a875e..7da6bb4eb387f 100644 --- a/ssh/tailssh/user.go +++ b/ssh/tailssh/user.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 diff --git a/syncs/locked.go b/syncs/locked.go index d2e9edef7a9dd..5c94e6336fb7a 100644 --- a/syncs/locked.go +++ b/syncs/locked.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/locked_test.go b/syncs/locked_test.go index 90b36e8321d82..94481f9cb2205 100644 --- a/syncs/locked_test.go +++ b/syncs/locked_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build go1.13 && !go1.19 diff --git a/syncs/mutex.go b/syncs/mutex.go index 8034e17121717..cb60c3432b5cc 100644 --- a/syncs/mutex.go +++ b/syncs/mutex.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_mutex_debug diff --git a/syncs/mutex_debug.go b/syncs/mutex_debug.go index 55a9b1231092f..7af1e9abfe12d 100644 --- a/syncs/mutex_debug.go +++ b/syncs/mutex_debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_mutex_debug diff --git a/syncs/pool.go b/syncs/pool.go index 46ffd2e521783..9a13dd526f55d 100644 --- a/syncs/pool.go +++ b/syncs/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/pool_test.go b/syncs/pool_test.go index 798b18cbabfd8..34ca9973ff334 100644 --- a/syncs/pool_test.go +++ b/syncs/pool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardedint.go b/syncs/shardedint.go index 28c4168d54c79..c0fda341fb564 100644 --- a/syncs/shardedint.go +++ b/syncs/shardedint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardedint_test.go b/syncs/shardedint_test.go index 815a739d13842..8c3f7ef7bd915 100644 --- a/syncs/shardedint_test.go +++ b/syncs/shardedint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs_test diff --git a/syncs/shardedmap.go b/syncs/shardedmap.go index 12edf5bfce475..6f53522360464 100644 --- a/syncs/shardedmap.go +++ b/syncs/shardedmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardedmap_test.go b/syncs/shardedmap_test.go index 993ffdff875c2..0491bf3dd1fbf 100644 --- a/syncs/shardedmap_test.go +++ b/syncs/shardedmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardvalue.go b/syncs/shardvalue.go index b1474477c7082..fcb5d3c73207e 100644 --- a/syncs/shardvalue.go +++ b/syncs/shardvalue.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/shardvalue_go.go b/syncs/shardvalue_go.go index 9b9d252a796d4..8531993319d1e 100644 --- a/syncs/shardvalue_go.go +++ b/syncs/shardvalue_go.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !tailscale_go diff --git a/syncs/shardvalue_tailscale.go b/syncs/shardvalue_tailscale.go index 8ef778ff3e669..6b03d7d0dd58e 100644 --- a/syncs/shardvalue_tailscale.go +++ b/syncs/shardvalue_tailscale.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO(raggi): update build tag after toolchain update diff --git a/syncs/shardvalue_test.go b/syncs/shardvalue_test.go index 8f6ac6414dee7..1dd0a542e60c2 100644 --- a/syncs/shardvalue_test.go +++ b/syncs/shardvalue_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/syncs/syncs.go b/syncs/syncs.go index 3b37bca085c89..d447b2e7bc4e0 100644 --- a/syncs/syncs.go +++ b/syncs/syncs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package syncs contains additional sync types and functionality. diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index a546b8d0a2343..81fcccbf63aca 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syncs diff --git a/tailcfg/c2ntypes.go b/tailcfg/c2ntypes.go index d78baef1c29a4..d3f5755e81ba1 100644 --- a/tailcfg/c2ntypes.go +++ b/tailcfg/c2ntypes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // c2n (control-to-node) API types. diff --git a/tailcfg/derpmap.go b/tailcfg/derpmap.go index e05559f3ed7f1..c18b04ea11342 100644 --- a/tailcfg/derpmap.go +++ b/tailcfg/derpmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tailcfg/proto_port_range.go b/tailcfg/proto_port_range.go index 03505dbd131e7..63012e93b2b8e 100644 --- a/tailcfg/proto_port_range.go +++ b/tailcfg/proto_port_range.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tailcfg/proto_port_range_test.go b/tailcfg/proto_port_range_test.go index 59ccc9be4a1a8..c0c5ff5d5cb76 100644 --- a/tailcfg/proto_port_range_test.go +++ b/tailcfg/proto_port_range_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 8468aa09efb3e..535c42b212b2e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailcfg contains types used by the Tailscale protocol with between diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 751b7c288f274..483746145b6e1 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 6691263eb997a..4e9909db09f89 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg_test diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index dbd29a87a354e..b2734d8af36c9 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/tailcfg/tka.go b/tailcfg/tka.go index 97fdcc0db687a..29c17b7567198 100644 --- a/tailcfg/tka.go +++ b/tailcfg/tka.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailcfg diff --git a/tka/aum.go b/tka/aum.go index b8c4b6c9e14d4..44d289906566c 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/aum_test.go b/tka/aum_test.go index 833a026544f54..4f32e91a1964f 100644 --- a/tka/aum_test.go +++ b/tka/aum_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/builder.go b/tka/builder.go index ab2364d856ee2..1e7b130151876 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/builder_test.go b/tka/builder_test.go index 3fd32f64eac12..edca1e95a516e 100644 --- a/tka/builder_test.go +++ b/tka/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/chaintest_test.go b/tka/chaintest_test.go index a3122b5d19da8..c370bf60a2e4c 100644 --- a/tka/chaintest_test.go +++ b/tka/chaintest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/deeplink.go b/tka/deeplink.go index 5570a19d7371b..34f80be034bb0 100644 --- a/tka/deeplink.go +++ b/tka/deeplink.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/deeplink_test.go b/tka/deeplink_test.go index 03523202fed8b..6d85b158589ac 100644 --- a/tka/deeplink_test.go +++ b/tka/deeplink_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go index 4c4afa3706d98..d14473e5ec1ac 100644 --- a/tka/disabled_stub.go +++ b/tka/disabled_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_tailnetlock diff --git a/tka/key.go b/tka/key.go index dca1b4416560b..bc946156eb9be 100644 --- a/tka/key.go +++ b/tka/key.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/key_test.go b/tka/key_test.go index 327de1a0e2851..799accc857e1c 100644 --- a/tka/key_test.go +++ b/tka/key_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/scenario_test.go b/tka/scenario_test.go index a0361a130dcc6..cf4ee2d5b2582 100644 --- a/tka/scenario_test.go +++ b/tka/scenario_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/sig.go b/tka/sig.go index 46d598ad97b47..9d107c98ff64c 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/sig_test.go b/tka/sig_test.go index c5c03ef2e0055..efec62b7d791f 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/state.go b/tka/state.go index 95a319bd9bd7d..06fdc65048b59 100644 --- a/tka/state.go +++ b/tka/state.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/state_test.go b/tka/state_test.go index 32b6563145ee7..337e3c3ceff85 100644 --- a/tka/state_test.go +++ b/tka/state_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/sync.go b/tka/sync.go index 2dbfb7ac435b2..27e1c0e633329 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/sync_test.go b/tka/sync_test.go index ea14a37e57e9b..158f73c46cb01 100644 --- a/tka/sync_test.go +++ b/tka/sync_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 13bdf6aac86d4..256faaea2b8b9 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index eeb6edfff3018..d40e4b09da769 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/tka.go b/tka/tka.go index ed029c82e0592..e3862c29d3264 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/tka_clone.go b/tka/tka_clone.go index 323a824fe5a63..9c7a6eeb3350d 100644 --- a/tka/tka_clone.go +++ b/tka/tka_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/tka/tka_test.go b/tka/tka_test.go index cc9ea57ee2f6a..f2ce73d357343 100644 --- a/tka/tka_test.go +++ b/tka/tka_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tka diff --git a/tka/verify.go b/tka/verify.go index ed0ecea669817..1ef4fbbb19308 100644 --- a/tka/verify.go +++ b/tka/verify.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/tka/verify_disabled.go b/tka/verify_disabled.go index ba72f93e27d8f..a4b3136d2ffea 100644 --- a/tka/verify_disabled.go +++ b/tka/verify_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_tailnetlock diff --git a/tool/gocross/autoflags.go b/tool/gocross/autoflags.go index b28d3bc5dd26e..405cad8b3b68e 100644 --- a/tool/gocross/autoflags.go +++ b/tool/gocross/autoflags.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/autoflags_test.go b/tool/gocross/autoflags_test.go index a0f3edfd2bb68..7363e452ed635 100644 --- a/tool/gocross/autoflags_test.go +++ b/tool/gocross/autoflags_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/env.go b/tool/gocross/env.go index 9d8a4f1b390b4..6b22f9365d255 100644 --- a/tool/gocross/env.go +++ b/tool/gocross/env.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/env_test.go b/tool/gocross/env_test.go index 001487bb8e1a6..39af579eb15f1 100644 --- a/tool/gocross/env_test.go +++ b/tool/gocross/env_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 4dd74f84d7d2b..20e52aa8f9496 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !unix diff --git a/tool/gocross/exec_unix.go b/tool/gocross/exec_unix.go index 79cbf764ad2f6..2d9fd72ba046b 100644 --- a/tool/gocross/exec_unix.go +++ b/tool/gocross/exec_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build unix diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 index 324b220c8319d..df00d36641ad7 100644 --- a/tool/gocross/gocross-wrapper.ps1 +++ b/tool/gocross/gocross-wrapper.ps1 @@ -1,4 +1,4 @@ -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause #Requires -Version 7.4 diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index d93b137aab6f5..352d639b75530 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright (c) Tailscale Inc & AUTHORS +# Copyright (c) Tailscale Inc & contributors # SPDX-License-Identifier: BSD-3-Clause # # gocross-wrapper.sh is a wrapper that can be aliased to 'go', which diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index 41fab3d584260..67d4bfceee74f 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // gocross is a wrapper around the `go` tool that invokes `go` from Tailscale's diff --git a/tool/gocross/gocross_test.go b/tool/gocross/gocross_test.go index 82afd268c6d8f..2737432e2d0dc 100644 --- a/tool/gocross/gocross_test.go +++ b/tool/gocross/gocross_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/gocross_wrapper_test.go b/tool/gocross/gocross_wrapper_test.go index 6937ccec7188f..7fc81207f6379 100644 --- a/tool/gocross/gocross_wrapper_test.go +++ b/tool/gocross/gocross_wrapper_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin diff --git a/tool/gocross/gocross_wrapper_windows_test.go b/tool/gocross/gocross_wrapper_windows_test.go index aa4277425d442..ed565e15ad677 100644 --- a/tool/gocross/gocross_wrapper_windows_test.go +++ b/tool/gocross/gocross_wrapper_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/goroot.go b/tool/gocross/goroot.go index 00e629fdeba63..8ff771889747a 100644 --- a/tool/gocross/goroot.go +++ b/tool/gocross/goroot.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index 9cf7f892b9b17..2eb675861bbce 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/tool/listpkgs/listpkgs.go b/tool/listpkgs/listpkgs.go index 400bf90c18315..e2c286efc0f7d 100644 --- a/tool/listpkgs/listpkgs.go +++ b/tool/listpkgs/listpkgs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // listpkgs prints the import paths that match the Go package patterns diff --git a/tsconsensus/authorization.go b/tsconsensus/authorization.go index bd8e2f39a014b..6261a8f1debb6 100644 --- a/tsconsensus/authorization.go +++ b/tsconsensus/authorization.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconsensus/authorization_test.go b/tsconsensus/authorization_test.go index e0023f4ff24d2..0f7a4e5958595 100644 --- a/tsconsensus/authorization_test.go +++ b/tsconsensus/authorization_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconsensus/bolt_store.go b/tsconsensus/bolt_store.go index ca347cfc049b2..e8dbb5a227505 100644 --- a/tsconsensus/bolt_store.go +++ b/tsconsensus/bolt_store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !loong64 diff --git a/tsconsensus/bolt_store_no_bolt.go b/tsconsensus/bolt_store_no_bolt.go index 33b3bd6c7a29f..f799cc5938d10 100644 --- a/tsconsensus/bolt_store_no_bolt.go +++ b/tsconsensus/bolt_store_no_bolt.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build loong64 diff --git a/tsconsensus/http.go b/tsconsensus/http.go index d2a44015f8f68..a7e3af35d94f6 100644 --- a/tsconsensus/http.go +++ b/tsconsensus/http.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index c84e83454f3f7..cc5ac812c49d9 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconsensus/tsconsensus.go b/tsconsensus/tsconsensus.go index 1f7dc1b7b6a5e..27cbf964e7207 100644 --- a/tsconsensus/tsconsensus.go +++ b/tsconsensus/tsconsensus.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsconsensus implements a consensus algorithm for a group of tsnet.Servers diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 796c8f51b76a9..2199a0c6b9441 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconsensus diff --git a/tsconst/health.go b/tsconst/health.go index 5db9b1fc286ec..93c6550efaba4 100644 --- a/tsconst/health.go +++ b/tsconst/health.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconst diff --git a/tsconst/linuxfw.go b/tsconst/linuxfw.go index ce571e40239ed..3a7a4cf2e0b5e 100644 --- a/tsconst/linuxfw.go +++ b/tsconst/linuxfw.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconst diff --git a/tsconst/tsconst.go b/tsconst/tsconst.go index d17aa356d25fe..85f05e54905f9 100644 --- a/tsconst/tsconst.go +++ b/tsconst/tsconst.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsconst exports some constants used elsewhere in the diff --git a/tsconst/webclient.go b/tsconst/webclient.go index d4b3c8db51b2a..705931159d24f 100644 --- a/tsconst/webclient.go +++ b/tsconst/webclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsconst diff --git a/tsd/tsd.go b/tsd/tsd.go index 8dc0c14278864..4284a8cd3bade 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsd (short for "Tailscale Daemon") contains a System type that diff --git a/tsnet/example/tshello/tshello.go b/tsnet/example/tshello/tshello.go index 0cadcdd837d99..d45d209dc7abc 100644 --- a/tsnet/example/tshello/tshello.go +++ b/tsnet/example/tshello/tshello.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tshello server demonstrates how to use Tailscale as a library. diff --git a/tsnet/example/tsnet-funnel/tsnet-funnel.go b/tsnet/example/tsnet-funnel/tsnet-funnel.go index 1dac57a1ebf86..27c3e1e5cdf2e 100644 --- a/tsnet/example/tsnet-funnel/tsnet-funnel.go +++ b/tsnet/example/tsnet-funnel/tsnet-funnel.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tsnet-funnel server demonstrates how to use tsnet with Funnel. diff --git a/tsnet/example/tsnet-http-client/tsnet-http-client.go b/tsnet/example/tsnet-http-client/tsnet-http-client.go index 9666fe9992745..e61c512a0b085 100644 --- a/tsnet/example/tsnet-http-client/tsnet-http-client.go +++ b/tsnet/example/tsnet-http-client/tsnet-http-client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tshello server demonstrates how to use Tailscale as a library. diff --git a/tsnet/example/tsnet-services/tsnet-services.go b/tsnet/example/tsnet-services/tsnet-services.go index 6eb1a76ab5f5c..d72fd68fd412a 100644 --- a/tsnet/example/tsnet-services/tsnet-services.go +++ b/tsnet/example/tsnet-services/tsnet-services.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The tsnet-services example demonstrates how to use tsnet with Services. diff --git a/tsnet/example/web-client/web-client.go b/tsnet/example/web-client/web-client.go index 541efbaedf3d3..e64eb47e6e14f 100644 --- a/tsnet/example/web-client/web-client.go +++ b/tsnet/example/web-client/web-client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The web-client command demonstrates serving the Tailscale web client over tsnet. diff --git a/tsnet/example_tshello_test.go b/tsnet/example_tshello_test.go index d534bcfd1f1d4..62b6737fd5245 100644 --- a/tsnet/example_tshello_test.go +++ b/tsnet/example_tshello_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet_test diff --git a/tsnet/example_tsnet_listen_service_multiple_ports_test.go b/tsnet/example_tsnet_listen_service_multiple_ports_test.go index 04781c2b20d16..0c7b3899955e1 100644 --- a/tsnet/example_tsnet_listen_service_multiple_ports_test.go +++ b/tsnet/example_tsnet_listen_service_multiple_ports_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet_test diff --git a/tsnet/example_tsnet_test.go b/tsnet/example_tsnet_test.go index 2a3236b3b6501..dbaa8111fb623 100644 --- a/tsnet/example_tsnet_test.go +++ b/tsnet/example_tsnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet_test diff --git a/tsnet/packet_filter_test.go b/tsnet/packet_filter_test.go index 455400eaa0c8a..ca776436e7085 100644 --- a/tsnet/packet_filter_test.go +++ b/tsnet/packet_filter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index d627d55b37314..ccea22d1619f1 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsnet provides Tailscale as a library. diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 2c6970fa3b723..aeee43646cb0a 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsnet diff --git a/tstest/allocs.go b/tstest/allocs.go index f15a00508d87f..6c2a1a22bec6a 100644 --- a/tstest/allocs.go +++ b/tstest/allocs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/archtest/archtest_test.go b/tstest/archtest/archtest_test.go index 1aeca5c109073..1523baf7b2044 100644 --- a/tstest/archtest/archtest_test.go +++ b/tstest/archtest/archtest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package archtest diff --git a/tstest/archtest/qemu_test.go b/tstest/archtest/qemu_test.go index 68ec38851069e..400f8bc4f9ea0 100644 --- a/tstest/archtest/qemu_test.go +++ b/tstest/archtest/qemu_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && amd64 && !race diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go index 404f1ec47f16c..b0b32e6151c82 100644 --- a/tstest/chonktest/chonktest.go +++ b/tstest/chonktest/chonktest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package chonktest contains a shared set of tests for the Chonk diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go index d9343e9160ea9..99b57f54f5900 100644 --- a/tstest/chonktest/tailchonk_test.go +++ b/tstest/chonktest/tailchonk_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package chonktest diff --git a/tstest/clock.go b/tstest/clock.go index ee7523430ff54..f11187a4a69e5 100644 --- a/tstest/clock.go +++ b/tstest/clock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/clock_test.go b/tstest/clock_test.go index 2ebaf752a1963..cdfc2319ac115 100644 --- a/tstest/clock_test.go +++ b/tstest/clock_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index c0b6d8b8cffb5..3117af2fffa01 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The deptest package contains a shared implementation of negative diff --git a/tstest/deptest/deptest_test.go b/tstest/deptest/deptest_test.go index ebafa56849efb..1b83d46d3cc31 100644 --- a/tstest/deptest/deptest_test.go +++ b/tstest/deptest/deptest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deptest diff --git a/tstest/integration/capmap_test.go b/tstest/integration/capmap_test.go index 0ee05be2f57d7..aea4a210b44e1 100644 --- a/tstest/integration/capmap_test.go +++ b/tstest/integration/capmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package integration diff --git a/tstest/integration/gen_deps.go b/tstest/integration/gen_deps.go index 23bb95ee56a9f..7e668266bbb78 100644 --- a/tstest/integration/gen_deps.go +++ b/tstest/integration/gen_deps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore @@ -35,7 +35,7 @@ func generate(goos string) { log.Fatal(err) } var out bytes.Buffer - out.WriteString(`// Copyright (c) Tailscale Inc & AUTHORS + out.WriteString(`// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index a62173ae3e353..a98df81808097 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package integration contains Tailscale integration tests. diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index fc891ad722b28..779cba6290cfe 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package integration diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 15f1269858947..2aea7c296701b 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package nat diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 9f92839d8cde7..112f04767c89d 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 9f92839d8cde7..112f04767c89d 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 9f92839d8cde7..112f04767c89d 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 9f92839d8cde7..112f04767c89d 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 82f8097c8bc36..cabac744a5c6c 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by gen_deps.go; DO NOT EDIT. diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 447efb0c1b15d..4607665924c45 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package testcontrol contains a minimal control plane server for testing purposes. diff --git a/tstest/integration/vms/derive_bindhost_test.go b/tstest/integration/vms/derive_bindhost_test.go index 728f60c01e465..079308055da3a 100644 --- a/tstest/integration/vms/derive_bindhost_test.go +++ b/tstest/integration/vms/derive_bindhost_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vms diff --git a/tstest/integration/vms/distros.go b/tstest/integration/vms/distros.go index ca2bf53ba66a7..94f11c77aac5d 100644 --- a/tstest/integration/vms/distros.go +++ b/tstest/integration/vms/distros.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vms diff --git a/tstest/integration/vms/distros_test.go b/tstest/integration/vms/distros_test.go index 462aa2a6bc825..8cc15aa7297fc 100644 --- a/tstest/integration/vms/distros_test.go +++ b/tstest/integration/vms/distros_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vms diff --git a/tstest/integration/vms/dns_tester.go b/tstest/integration/vms/dns_tester.go index 50b39bb5f1fa1..8a0ca5afaf366 100644 --- a/tstest/integration/vms/dns_tester.go +++ b/tstest/integration/vms/dns_tester.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/tstest/integration/vms/doc.go b/tstest/integration/vms/doc.go index 6093b53ac8ed5..0c9eced92d686 100644 --- a/tstest/integration/vms/doc.go +++ b/tstest/integration/vms/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vms does VM-based integration/functional tests by using diff --git a/tstest/integration/vms/harness_test.go b/tstest/integration/vms/harness_test.go index 256227d6c64cc..ccff6e81e0f33 100644 --- a/tstest/integration/vms/harness_test.go +++ b/tstest/integration/vms/harness_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/nixos_test.go b/tstest/integration/vms/nixos_test.go index 02b040fedfaff..7d7a104363761 100644 --- a/tstest/integration/vms/nixos_test.go +++ b/tstest/integration/vms/nixos_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/top_level_test.go b/tstest/integration/vms/top_level_test.go index 5db237b6e33b7..849abfd2469ed 100644 --- a/tstest/integration/vms/top_level_test.go +++ b/tstest/integration/vms/top_level_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/udp_tester.go b/tstest/integration/vms/udp_tester.go index be44aa9636103..46bc1261f1765 100644 --- a/tstest/integration/vms/udp_tester.go +++ b/tstest/integration/vms/udp_tester.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/tstest/integration/vms/vm_setup_test.go b/tstest/integration/vms/vm_setup_test.go index 0c6901014bb74..690c89dcf487b 100644 --- a/tstest/integration/vms/vm_setup_test.go +++ b/tstest/integration/vms/vm_setup_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/vms_steps_test.go b/tstest/integration/vms/vms_steps_test.go index 94e4114f01e78..940c92ddac63c 100644 --- a/tstest/integration/vms/vms_steps_test.go +++ b/tstest/integration/vms/vms_steps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index c3a3775de9407..5ebb12b71032b 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !plan9 diff --git a/tstest/iosdeps/iosdeps.go b/tstest/iosdeps/iosdeps.go index f414f53dfd0b6..f6290af676e97 100644 --- a/tstest/iosdeps/iosdeps.go +++ b/tstest/iosdeps/iosdeps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package iosdeps is a just a list of the packages we import on iOS, to let us diff --git a/tstest/iosdeps/iosdeps_test.go b/tstest/iosdeps/iosdeps_test.go index b533724eb4b3d..870088e38db9a 100644 --- a/tstest/iosdeps/iosdeps_test.go +++ b/tstest/iosdeps/iosdeps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package iosdeps diff --git a/tstest/jsdeps/jsdeps.go b/tstest/jsdeps/jsdeps.go index 1d188152f73b1..964ca51e10fb6 100644 --- a/tstest/jsdeps/jsdeps.go +++ b/tstest/jsdeps/jsdeps.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsdeps is a just a list of the packages we import in the diff --git a/tstest/jsdeps/jsdeps_test.go b/tstest/jsdeps/jsdeps_test.go index 27570fc2676b0..ba6dad6badf3d 100644 --- a/tstest/jsdeps/jsdeps_test.go +++ b/tstest/jsdeps/jsdeps_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsdeps diff --git a/tstest/kernel_linux.go b/tstest/kernel_linux.go index 664fe9bdd7b9f..ab7c0d529fc13 100644 --- a/tstest/kernel_linux.go +++ b/tstest/kernel_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/tstest/kernel_other.go b/tstest/kernel_other.go index bf69be6df4b27..3dfc3c239576f 100644 --- a/tstest/kernel_other.go +++ b/tstest/kernel_other.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux diff --git a/tstest/log.go b/tstest/log.go index d081c819d8ce2..73e973d238d66 100644 --- a/tstest/log.go +++ b/tstest/log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/log_test.go b/tstest/log_test.go index 51a5743c2c7f2..34aab000d4ad3 100644 --- a/tstest/log_test.go +++ b/tstest/log_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/mts/mts.go b/tstest/mts/mts.go index c10d69d8daca4..c91e0ce996f44 100644 --- a/tstest/mts/mts.go +++ b/tstest/mts/mts.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux || darwin diff --git a/tstest/natlab/firewall.go b/tstest/natlab/firewall.go index c427d6692a29c..e9192cfbd8ab7 100644 --- a/tstest/natlab/firewall.go +++ b/tstest/natlab/firewall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package natlab diff --git a/tstest/natlab/nat.go b/tstest/natlab/nat.go index d756c5bf11833..67e84f44e7b7a 100644 --- a/tstest/natlab/nat.go +++ b/tstest/natlab/nat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package natlab diff --git a/tstest/natlab/natlab.go b/tstest/natlab/natlab.go index ffa02eee46e06..add812d8fe6e3 100644 --- a/tstest/natlab/natlab.go +++ b/tstest/natlab/natlab.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package natlab lets us simulate different types of networks all diff --git a/tstest/natlab/natlab_test.go b/tstest/natlab/natlab_test.go index 84388373236be..d604907017a84 100644 --- a/tstest/natlab/natlab_test.go +++ b/tstest/natlab/natlab_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package natlab diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index 07b181540838c..3f83e35c09ba3 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/conf_test.go b/tstest/natlab/vnet/conf_test.go index 6566ac8cf4610..5716a503e4007 100644 --- a/tstest/natlab/vnet/conf_test.go +++ b/tstest/natlab/vnet/conf_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/easyaf.go b/tstest/natlab/vnet/easyaf.go index 0901bbdffdd7d..1edc9b3cd16b2 100644 --- a/tstest/natlab/vnet/easyaf.go +++ b/tstest/natlab/vnet/easyaf.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/nat.go b/tstest/natlab/vnet/nat.go index ad6f29b3adb58..172e19767b179 100644 --- a/tstest/natlab/vnet/nat.go +++ b/tstest/natlab/vnet/nat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/pcap.go b/tstest/natlab/vnet/pcap.go index 41a443e30b6c5..3a766b3759a52 100644 --- a/tstest/natlab/vnet/pcap.go +++ b/tstest/natlab/vnet/pcap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/vip.go b/tstest/natlab/vnet/vip.go index 190c9e75f1a62..9d7aa56a3d2a0 100644 --- a/tstest/natlab/vnet/vip.go +++ b/tstest/natlab/vnet/vip.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 49d47f02937ae..357fe213c8c28 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vnet simulates a virtual Internet containing a set of networks with various diff --git a/tstest/natlab/vnet/vnet_test.go b/tstest/natlab/vnet/vnet_test.go index 5ffa2b1049c88..93f208c29ca0a 100644 --- a/tstest/natlab/vnet/vnet_test.go +++ b/tstest/natlab/vnet/vnet_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vnet diff --git a/tstest/nettest/nettest.go b/tstest/nettest/nettest.go index c78677dd45c59..0ceef463d8160 100644 --- a/tstest/nettest/nettest.go +++ b/tstest/nettest/nettest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package nettest contains additional test helpers related to network state diff --git a/tstest/reflect.go b/tstest/reflect.go index 125391349a941..22903e7e9fca2 100644 --- a/tstest/reflect.go +++ b/tstest/reflect.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/resource.go b/tstest/resource.go index f50bb3330e846..867925b7ddeb1 100644 --- a/tstest/resource.go +++ b/tstest/resource.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/resource_test.go b/tstest/resource_test.go index 7199ac5d11cbf..ecef91cf60b08 100644 --- a/tstest/resource_test.go +++ b/tstest/resource_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/tailmac/Swift/Common/Config.swift b/tstest/tailmac/Swift/Common/Config.swift index 18b68ae9b9d14..53d7680205a00 100644 --- a/tstest/tailmac/Swift/Common/Config.swift +++ b/tstest/tailmac/Swift/Common/Config.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/Common/Notifications.swift b/tstest/tailmac/Swift/Common/Notifications.swift index de2216e227eb7..b91741a463c8e 100644 --- a/tstest/tailmac/Swift/Common/Notifications.swift +++ b/tstest/tailmac/Swift/Common/Notifications.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift index c0961c883fdbb..fc7f2d89dc0e2 100644 --- a/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift +++ b/tstest/tailmac/Swift/Common/TailMacConfigHelper.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/Host/AppDelegate.swift b/tstest/tailmac/Swift/Host/AppDelegate.swift index 63c0192da236e..378a524d13c37 100644 --- a/tstest/tailmac/Swift/Host/AppDelegate.swift +++ b/tstest/tailmac/Swift/Host/AppDelegate.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Cocoa diff --git a/tstest/tailmac/Swift/Host/HostCli.swift b/tstest/tailmac/Swift/Host/HostCli.swift index c31478cc39d45..9c9ae6fa0476e 100644 --- a/tstest/tailmac/Swift/Host/HostCli.swift +++ b/tstest/tailmac/Swift/Host/HostCli.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Cocoa diff --git a/tstest/tailmac/Swift/Host/VMController.swift b/tstest/tailmac/Swift/Host/VMController.swift index fe4a3828b18fe..a19d7222e1e9e 100644 --- a/tstest/tailmac/Swift/Host/VMController.swift +++ b/tstest/tailmac/Swift/Host/VMController.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Cocoa diff --git a/tstest/tailmac/Swift/TailMac/RestoreImage.swift b/tstest/tailmac/Swift/TailMac/RestoreImage.swift index c2b8b3dd6a878..8346cbe26c408 100644 --- a/tstest/tailmac/Swift/TailMac/RestoreImage.swift +++ b/tstest/tailmac/Swift/TailMac/RestoreImage.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/TailMac/TailMac.swift b/tstest/tailmac/Swift/TailMac/TailMac.swift index 84aa5e498a008..3859b9b0b0aeb 100644 --- a/tstest/tailmac/Swift/TailMac/TailMac.swift +++ b/tstest/tailmac/Swift/TailMac/TailMac.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tailmac/Swift/TailMac/VMInstaller.swift b/tstest/tailmac/Swift/TailMac/VMInstaller.swift index 568b6efc4bfe0..7e90079b596c2 100644 --- a/tstest/tailmac/Swift/TailMac/VMInstaller.swift +++ b/tstest/tailmac/Swift/TailMac/VMInstaller.swift @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause import Foundation diff --git a/tstest/tkatest/tkatest.go b/tstest/tkatest/tkatest.go index fb157a1a19315..2726b4deca249 100644 --- a/tstest/tkatest/tkatest.go +++ b/tstest/tkatest/tkatest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // tkatest has functions for creating a mock control server that responds diff --git a/tstest/tlstest/tlstest.go b/tstest/tlstest/tlstest.go index 76ec0e7e2dfad..3ab08c61fb211 100644 --- a/tstest/tlstest/tlstest.go +++ b/tstest/tlstest/tlstest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tlstest contains code to help test Tailscale's TLS support without diff --git a/tstest/tlstest/tlstest_test.go b/tstest/tlstest/tlstest_test.go index 8497b872ec7c5..7f3583c8af15a 100644 --- a/tstest/tlstest/tlstest_test.go +++ b/tstest/tlstest/tlstest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tlstest diff --git a/tstest/tools/tools.go b/tstest/tools/tools.go index 4d810483b78b5..439acc053250a 100644 --- a/tstest/tools/tools.go +++ b/tstest/tools/tools.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tools diff --git a/tstest/tstest.go b/tstest/tstest.go index d0828f508a46c..4e00fbaa38ae8 100644 --- a/tstest/tstest.go +++ b/tstest/tstest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tstest provides utilities for use in unit tests. diff --git a/tstest/tstest_test.go b/tstest/tstest_test.go index ce59bde538b9a..0c281d2352f7b 100644 --- a/tstest/tstest_test.go +++ b/tstest/tstest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstest diff --git a/tstest/typewalk/typewalk.go b/tstest/typewalk/typewalk.go index b22505351b1a2..f989b4c180394 100644 --- a/tstest/typewalk/typewalk.go +++ b/tstest/typewalk/typewalk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package typewalk provides utilities to walk Go types using reflection. diff --git a/tstime/jitter.go b/tstime/jitter.go index c5095c15d87ae..987680f3c0ba7 100644 --- a/tstime/jitter.go +++ b/tstime/jitter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstime diff --git a/tstime/jitter_test.go b/tstime/jitter_test.go index 579287bda0bc2..149ed3fa5d6d8 100644 --- a/tstime/jitter_test.go +++ b/tstime/jitter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstime diff --git a/tstime/mono/mono.go b/tstime/mono/mono.go index 260e02b0fb0f3..8975c2480c748 100644 --- a/tstime/mono/mono.go +++ b/tstime/mono/mono.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mono provides fast monotonic time. diff --git a/tstime/mono/mono_test.go b/tstime/mono/mono_test.go index 67a8614baf2ef..dfa6fe1f078a3 100644 --- a/tstime/mono/mono_test.go +++ b/tstime/mono/mono_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package mono diff --git a/tstime/rate/rate.go b/tstime/rate/rate.go index f0473862a2890..3f2f5c9be55f5 100644 --- a/tstime/rate/rate.go +++ b/tstime/rate/rate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This is a modified, simplified version of code from golang.org/x/time/rate. diff --git a/tstime/rate/rate_test.go b/tstime/rate/rate_test.go index dc3f9e84bb851..3486371be565a 100644 --- a/tstime/rate/rate_test.go +++ b/tstime/rate/rate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This is a modified, simplified version of code from golang.org/x/time/rate. diff --git a/tstime/rate/value.go b/tstime/rate/value.go index 610f06bbd7991..8a627ff36119e 100644 --- a/tstime/rate/value.go +++ b/tstime/rate/value.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rate diff --git a/tstime/rate/value_test.go b/tstime/rate/value_test.go index a26442650cf94..e6d60798407f1 100644 --- a/tstime/rate/value_test.go +++ b/tstime/rate/value_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rate diff --git a/tstime/tstime.go b/tstime/tstime.go index 6e5b7f9f47146..8c52a4652beca 100644 --- a/tstime/tstime.go +++ b/tstime/tstime.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tstime defines Tailscale-specific time utilities. diff --git a/tstime/tstime_test.go b/tstime/tstime_test.go index 556ad4e8bb1d0..80d4e318e66bc 100644 --- a/tstime/tstime_test.go +++ b/tstime/tstime_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tstime diff --git a/tsweb/debug.go b/tsweb/debug.go index 4c0fabaff4aea..e4ac7a55909dd 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/debug_test.go b/tsweb/debug_test.go index 2a68ab6fb27b9..b46a3a3f37c32 100644 --- a/tsweb/debug_test.go +++ b/tsweb/debug_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/log.go b/tsweb/log.go index 51f95e95f5d07..1cb5f28eff29f 100644 --- a/tsweb/log.go +++ b/tsweb/log.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/pprof_default.go b/tsweb/pprof_default.go index 7d22a61619855..a4ac86cdba161 100644 --- a/tsweb/pprof_default.go +++ b/tsweb/pprof_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !wasm diff --git a/tsweb/pprof_js.go b/tsweb/pprof_js.go index 1212b37e86f5a..5635fbb2ca7e9 100644 --- a/tsweb/pprof_js.go +++ b/tsweb/pprof_js.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js && wasm diff --git a/tsweb/promvarz/promvarz.go b/tsweb/promvarz/promvarz.go index 1d978c7677328..4fdf394d0891b 100644 --- a/tsweb/promvarz/promvarz.go +++ b/tsweb/promvarz/promvarz.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package promvarz combines Prometheus metrics exported by our expvar converter diff --git a/tsweb/promvarz/promvarz_test.go b/tsweb/promvarz/promvarz_test.go index cffbbec2273c8..123330d6e5831 100644 --- a/tsweb/promvarz/promvarz_test.go +++ b/tsweb/promvarz/promvarz_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package promvarz diff --git a/tsweb/request_id.go b/tsweb/request_id.go index 46e52385240ca..351ed1710802b 100644 --- a/tsweb/request_id.go +++ b/tsweb/request_id.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index f6196174b38b2..f464e7af2141e 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tsweb contains code used in various Tailscale webservers. diff --git a/tsweb/tsweb_test.go b/tsweb/tsweb_test.go index d4c9721e97215..af8e52420bd50 100644 --- a/tsweb/tsweb_test.go +++ b/tsweb/tsweb_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tsweb diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index b1c66b859e8cf..d6100672c6c56 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package varz contains code to export metrics in Prometheus format. diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index 5bbacbe356940..6505ba985160e 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package varz diff --git a/types/appctype/appconnector.go b/types/appctype/appconnector.go index 567ab755f0598..5442e8290cb8a 100644 --- a/types/appctype/appconnector.go +++ b/types/appctype/appconnector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package appcfg contains an experimental configuration structure for diff --git a/types/appctype/appconnector_test.go b/types/appctype/appconnector_test.go index 390d1776a3280..f411faec5bef0 100644 --- a/types/appctype/appconnector_test.go +++ b/types/appctype/appconnector_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package appctype diff --git a/types/bools/bools.go b/types/bools/bools.go index e64068746ed9e..d271b8c28e19a 100644 --- a/types/bools/bools.go +++ b/types/bools/bools.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package bools contains the [Int], [Compare], and [IfElse] functions. diff --git a/types/bools/bools_test.go b/types/bools/bools_test.go index 67faf3bcc92d8..70fcd0fbcb1a2 100644 --- a/types/bools/bools_test.go +++ b/types/bools/bools_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package bools diff --git a/types/dnstype/dnstype.go b/types/dnstype/dnstype.go index a3ba1b0a981e2..1cd38d38385ba 100644 --- a/types/dnstype/dnstype.go +++ b/types/dnstype/dnstype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnstype defines types for working with DNS. diff --git a/types/dnstype/dnstype_clone.go b/types/dnstype/dnstype_clone.go index 3985704aa0638..e690ebaec3865 100644 --- a/types/dnstype/dnstype_clone.go +++ b/types/dnstype/dnstype_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/types/dnstype/dnstype_test.go b/types/dnstype/dnstype_test.go index ada5f687def9f..cf20f4f7f6618 100644 --- a/types/dnstype/dnstype_test.go +++ b/types/dnstype/dnstype_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnstype diff --git a/types/dnstype/dnstype_view.go b/types/dnstype/dnstype_view.go index a983864d0ce42..c91feb6b8aa38 100644 --- a/types/dnstype/dnstype_view.go +++ b/types/dnstype/dnstype_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/types/empty/message.go b/types/empty/message.go index dc8eb4cc2dc37..bee653038be33 100644 --- a/types/empty/message.go +++ b/types/empty/message.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package empty defines an empty struct type. diff --git a/types/flagtype/flagtype.go b/types/flagtype/flagtype.go index be160dee82a21..1e45b04f453ed 100644 --- a/types/flagtype/flagtype.go +++ b/types/flagtype/flagtype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package flagtype defines flag.Value types. diff --git a/types/geo/doc.go b/types/geo/doc.go index 749c6308093f6..61c78f78cb653 100644 --- a/types/geo/doc.go +++ b/types/geo/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package geo provides functionality to represent and process geographical diff --git a/types/geo/point.go b/types/geo/point.go index d7160ac593338..820582b0ff6b3 100644 --- a/types/geo/point.go +++ b/types/geo/point.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package geo diff --git a/types/geo/point_test.go b/types/geo/point_test.go index 308c1a1834377..f0d0cb3abba3e 100644 --- a/types/geo/point_test.go +++ b/types/geo/point_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package geo_test diff --git a/types/geo/quantize.go b/types/geo/quantize.go index 18ec11f9f119c..f07562424f96e 100644 --- a/types/geo/quantize.go +++ b/types/geo/quantize.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package geo diff --git a/types/geo/quantize_test.go b/types/geo/quantize_test.go index bc1f62c9be32f..59d5587e565dc 100644 --- a/types/geo/quantize_test.go +++ b/types/geo/quantize_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package geo_test diff --git a/types/geo/units.go b/types/geo/units.go index 76a4c02f79f34..74df9624d6bd9 100644 --- a/types/geo/units.go +++ b/types/geo/units.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package geo diff --git a/types/geo/units_test.go b/types/geo/units_test.go index b6f724ce0d9b3..cfbb7ae6a6685 100644 --- a/types/geo/units_test.go +++ b/types/geo/units_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package geo_test diff --git a/types/iox/io.go b/types/iox/io.go index a5ca1be43f737..f78328a10a969 100644 --- a/types/iox/io.go +++ b/types/iox/io.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package iox provides types to implement [io] functionality. diff --git a/types/iox/io_test.go b/types/iox/io_test.go index 9fba39605d28d..7a902841b75fe 100644 --- a/types/iox/io_test.go +++ b/types/iox/io_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package iox diff --git a/types/ipproto/ipproto.go b/types/ipproto/ipproto.go index b5333eb56ace0..a08985b3aba26 100644 --- a/types/ipproto/ipproto.go +++ b/types/ipproto/ipproto.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ipproto contains IP Protocol constants. diff --git a/types/ipproto/ipproto_test.go b/types/ipproto/ipproto_test.go index 102b79cffae5b..8bfeb13fa4246 100644 --- a/types/ipproto/ipproto_test.go +++ b/types/ipproto/ipproto_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ipproto diff --git a/types/jsonx/json.go b/types/jsonx/json.go index 3f01ea358df30..36516f495380b 100644 --- a/types/jsonx/json.go +++ b/types/jsonx/json.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package jsonx contains helper types and functionality to use with diff --git a/types/jsonx/json_test.go b/types/jsonx/json_test.go index 0f2a646c40d6d..5c302d9746c3e 100644 --- a/types/jsonx/json_test.go +++ b/types/jsonx/json_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package jsonx diff --git a/types/key/chal.go b/types/key/chal.go index 742ac5479e4a1..50827d28ed0fe 100644 --- a/types/key/chal.go +++ b/types/key/chal.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/control.go b/types/key/control.go index 96021249ba047..384be160265fa 100644 --- a/types/key/control.go +++ b/types/key/control.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/control_test.go b/types/key/control_test.go index a98a586f3ba5a..928be4283bc3a 100644 --- a/types/key/control_test.go +++ b/types/key/control_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/derp.go b/types/key/derp.go index 1466b85bc5288..a85611d241765 100644 --- a/types/key/derp.go +++ b/types/key/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/derp_test.go b/types/key/derp_test.go index b91cbbf8c4e01..ab98671e5bd29 100644 --- a/types/key/derp_test.go +++ b/types/key/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/disco.go b/types/key/disco.go index 52b40c766fbbf..f46347c919ebb 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/disco_test.go b/types/key/disco_test.go index 131fe350f508a..fb22fa82f5400 100644 --- a/types/key/disco_test.go +++ b/types/key/disco_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/doc.go b/types/key/doc.go index b2aad72d612bb..cbee21e5f5732 100644 --- a/types/key/doc.go +++ b/types/key/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package key contains types for different types of public and private keys diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index 9d4a21ee42706..5ca7e936b3d72 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/machine.go b/types/key/machine.go index a05f3cc1f5735..9ad73bec1a434 100644 --- a/types/key/machine.go +++ b/types/key/machine.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/machine_test.go b/types/key/machine_test.go index 157df9e4356b1..3db92ed406bd6 100644 --- a/types/key/machine_test.go +++ b/types/key/machine_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/nl.go b/types/key/nl.go index 50caed98c2d0b..fc11d5b20ff64 100644 --- a/types/key/nl.go +++ b/types/key/nl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/nl_test.go b/types/key/nl_test.go index 75b7765a19ea1..84fa920569f08 100644 --- a/types/key/nl_test.go +++ b/types/key/nl_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/node.go b/types/key/node.go index 11ee1fa3cfd41..1402aad361870 100644 --- a/types/key/node.go +++ b/types/key/node.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/node_test.go b/types/key/node_test.go index 80a2dadf90f5f..77eef2b28d2f5 100644 --- a/types/key/node_test.go +++ b/types/key/node_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/util.go b/types/key/util.go index 50fac827556aa..c336d38792a25 100644 --- a/types/key/util.go +++ b/types/key/util.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/key/util_test.go b/types/key/util_test.go index 4d6f8242280ad..3323e0e574684 100644 --- a/types/key/util_test.go +++ b/types/key/util_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package key diff --git a/types/lazy/deferred.go b/types/lazy/deferred.go index 973082914c48c..582090ab93112 100644 --- a/types/lazy/deferred.go +++ b/types/lazy/deferred.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/deferred_test.go b/types/lazy/deferred_test.go index 98cacbfce7088..61cc8f8ac6c27 100644 --- a/types/lazy/deferred_test.go +++ b/types/lazy/deferred_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index f537758fa6415..915ae2002c135 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lazy provides types for lazily initialized values. diff --git a/types/lazy/map.go b/types/lazy/map.go index 75a1dd739d3bc..4718c5b873c4b 100644 --- a/types/lazy/map.go +++ b/types/lazy/map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/map_test.go b/types/lazy/map_test.go index ec1152b0b802c..5f09da5aea1f1 100644 --- a/types/lazy/map_test.go +++ b/types/lazy/map_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/sync_test.go b/types/lazy/sync_test.go index 4d1278253955b..b517594d0a8e3 100644 --- a/types/lazy/sync_test.go +++ b/types/lazy/sync_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/unsync.go b/types/lazy/unsync.go index 0f89ce4f6935a..75d7be23f1e04 100644 --- a/types/lazy/unsync.go +++ b/types/lazy/unsync.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/lazy/unsync_test.go b/types/lazy/unsync_test.go index f0d2494d12b6e..c3fcf27acad65 100644 --- a/types/lazy/unsync_test.go +++ b/types/lazy/unsync_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lazy diff --git a/types/logger/logger.go b/types/logger/logger.go index 6c4edf6336005..71086e87dbd83 100644 --- a/types/logger/logger.go +++ b/types/logger/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logger defines a type for writing to logs. It's just a diff --git a/types/logger/logger_test.go b/types/logger/logger_test.go index 52c1d3900e1c5..f55a9484d344e 100644 --- a/types/logger/logger_test.go +++ b/types/logger/logger_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logger diff --git a/types/logger/rusage.go b/types/logger/rusage.go index 3943636d6e255..c1bbbaa5378f7 100644 --- a/types/logger/rusage.go +++ b/types/logger/rusage.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logger diff --git a/types/logger/rusage_stub.go b/types/logger/rusage_stub.go index f646f1e1eee7f..e94478ef7c59b 100644 --- a/types/logger/rusage_stub.go +++ b/types/logger/rusage_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows || wasm || plan9 || tamago diff --git a/types/logger/rusage_syscall.go b/types/logger/rusage_syscall.go index 2871b66c6bb24..25b026994afe8 100644 --- a/types/logger/rusage_syscall.go +++ b/types/logger/rusage_syscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !wasm && !plan9 && !tamago diff --git a/types/logger/tokenbucket.go b/types/logger/tokenbucket.go index 83d4059c2af00..fdee56237757e 100644 --- a/types/logger/tokenbucket.go +++ b/types/logger/tokenbucket.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logger diff --git a/types/logid/id.go b/types/logid/id.go index fd46a7bef735c..94e363879d324 100644 --- a/types/logid/id.go +++ b/types/logid/id.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package logid contains ID types for interacting with the log service. diff --git a/types/logid/id_test.go b/types/logid/id_test.go index c93d1f1c1adc0..86a736bd877e6 100644 --- a/types/logid/id_test.go +++ b/types/logid/id_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package logid diff --git a/types/mapx/ordered.go b/types/mapx/ordered.go index 1991f039d7726..caaa4d098f8fb 100644 --- a/types/mapx/ordered.go +++ b/types/mapx/ordered.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mapx contains extra map types and functions. diff --git a/types/mapx/ordered_test.go b/types/mapx/ordered_test.go index 7dcb7e40558c3..9bf0be6410e80 100644 --- a/types/mapx/ordered_test.go +++ b/types/mapx/ordered_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package mapx diff --git a/types/netlogfunc/netlogfunc.go b/types/netlogfunc/netlogfunc.go index 6185fcb715c65..db856f0cf49f1 100644 --- a/types/netlogfunc/netlogfunc.go +++ b/types/netlogfunc/netlogfunc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netlogfunc defines types for network logging. diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index cc38684a30dbf..24fb32ab0bb5f 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netlogtype defines types for network logging. diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 00f89b228aa96..8271f0ae04144 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_tailnetlock diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index 18abd1c195024..d809cbab4ad5d 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netmap contains the netmap.NetworkMap type. diff --git a/types/netmap/netmap_test.go b/types/netmap/netmap_test.go index ee4fecdb4ff4e..e68b243b37f42 100644 --- a/types/netmap/netmap_test.go +++ b/types/netmap/netmap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmap diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index 4f93be21c6d68..5c9000d56ef38 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmap diff --git a/types/netmap/nodemut_test.go b/types/netmap/nodemut_test.go index 374f8623ad564..f7302d48df097 100644 --- a/types/netmap/nodemut_test.go +++ b/types/netmap/nodemut_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netmap diff --git a/types/nettype/nettype.go b/types/nettype/nettype.go index 5d3d303c38a0d..e44daa0c709f3 100644 --- a/types/nettype/nettype.go +++ b/types/nettype/nettype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package nettype defines an interface that doesn't exist in the Go net package. diff --git a/types/opt/bool.go b/types/opt/bool.go index fbc39e1dc3754..cecbf5eac1c68 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package opt defines optional types. diff --git a/types/opt/bool_test.go b/types/opt/bool_test.go index e61d66dbe9e96..de4da3788d1e7 100644 --- a/types/opt/bool_test.go +++ b/types/opt/bool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package opt diff --git a/types/opt/value.go b/types/opt/value.go index c71c53e511aca..1ccdd75a47c72 100644 --- a/types/opt/value.go +++ b/types/opt/value.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package opt diff --git a/types/opt/value_test.go b/types/opt/value_test.go index 890f9a5795cb3..0b73182996ad2 100644 --- a/types/opt/value_test.go +++ b/types/opt/value_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package opt diff --git a/types/persist/persist.go b/types/persist/persist.go index 80bac9b5e2741..2a8c2fb824d36 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package persist contains the Persist type. diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 9dbe7e0f6fa6d..f5fa36b6da0fc 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index 713114b74dcd5..b25af5a0b2066 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package persist diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index dbf8294ef5a7a..b18634917c651 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/types/prefs/item.go b/types/prefs/item.go index 717a0c76cf291..fdb9301f9fdf8 100644 --- a/types/prefs/item.go +++ b/types/prefs/item.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/list.go b/types/prefs/list.go index ae6b2fae335db..20e4dad463135 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/map.go b/types/prefs/map.go index 4b64690ed1351..6bf1948b87ab4 100644 --- a/types/prefs/map.go +++ b/types/prefs/map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/options.go b/types/prefs/options.go index 3769b784b731a..bc0123a526084 100644 --- a/types/prefs/options.go +++ b/types/prefs/options.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index a6caf12838b79..3f18886a724bc 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prefs contains types and functions to work with arbitrary diff --git a/types/prefs/prefs_clone_test.go b/types/prefs/prefs_clone_test.go index 2a03fba8b092c..07dc24fdc7361 100644 --- a/types/prefs/prefs_clone_test.go +++ b/types/prefs/prefs_clone_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/types/prefs/prefs_example/prefs_example_clone.go b/types/prefs/prefs_example/prefs_example_clone.go index 5c707b46343e1..c5fdc49fc3b9b 100644 --- a/types/prefs/prefs_example/prefs_example_clone.go +++ b/types/prefs/prefs_example/prefs_example_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/types/prefs/prefs_example/prefs_example_view.go b/types/prefs/prefs_example/prefs_example_view.go index 6a1a36865fe00..67a284bb5b4bb 100644 --- a/types/prefs/prefs_example/prefs_example_view.go +++ b/types/prefs/prefs_example/prefs_example_view.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/types/prefs/prefs_example/prefs_test.go b/types/prefs/prefs_example/prefs_test.go index aefbae9f2873a..93ed5b4fea27b 100644 --- a/types/prefs/prefs_example/prefs_test.go +++ b/types/prefs/prefs_example/prefs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs_example diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go index c35f1f62fde3d..d0764c64b6efc 100644 --- a/types/prefs/prefs_example/prefs_types.go +++ b/types/prefs/prefs_example/prefs_types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prefs_example contains a [Prefs] type, which is like [tailscale.com/ipn.Prefs], diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index dc1213adb27ab..ccc37b0a74df7 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/prefs_view_test.go b/types/prefs/prefs_view_test.go index 8993cb535bd67..ce4dee726badc 100644 --- a/types/prefs/prefs_view_test.go +++ b/types/prefs/prefs_view_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale/cmd/viewer; DO NOT EDIT. diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index ba145e2cf7086..09aa808ccc37e 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index 83cc7447baedd..2f2715a62a94a 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package prefs diff --git a/types/preftype/netfiltermode.go b/types/preftype/netfiltermode.go index 273e173444365..f108bebc35688 100644 --- a/types/preftype/netfiltermode.go +++ b/types/preftype/netfiltermode.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package preftype is a leaf package containing types for various diff --git a/types/ptr/ptr.go b/types/ptr/ptr.go index beb17bee8ee0e..5b65a0e1c13e7 100644 --- a/types/ptr/ptr.go +++ b/types/ptr/ptr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ptr contains the ptr.To function. diff --git a/types/result/result.go b/types/result/result.go index 6bd1c2ea62004..4d537b084ea54 100644 --- a/types/result/result.go +++ b/types/result/result.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package result contains the Of result type, which is diff --git a/types/structs/structs.go b/types/structs/structs.go index 47c359f0caa0f..dd0cd809b8928 100644 --- a/types/structs/structs.go +++ b/types/structs/structs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package structs contains the Incomparable type. diff --git a/types/tkatype/tkatype.go b/types/tkatype/tkatype.go index 6ad51f6a90240..e315f4422ae98 100644 --- a/types/tkatype/tkatype.go +++ b/types/tkatype/tkatype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tkatype defines types for working with the tka package. diff --git a/types/tkatype/tkatype_test.go b/types/tkatype/tkatype_test.go index c81891b9ce103..337167a7d3bd8 100644 --- a/types/tkatype/tkatype_test.go +++ b/types/tkatype/tkatype_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tkatype diff --git a/types/views/views.go b/types/views/views.go index 252f126a79f57..9260311edc29a 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package views provides read-only accessors for commonly used diff --git a/types/views/views_test.go b/types/views/views_test.go index 5a30c11a13c86..7cdd1ab020312 100644 --- a/types/views/views_test.go +++ b/types/views/views_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package views diff --git a/util/backoff/backoff.go b/util/backoff/backoff.go index 95089fc2479ff..2edb1e7712e65 100644 --- a/util/backoff/backoff.go +++ b/util/backoff/backoff.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package backoff provides a back-off timer type. diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go index 8ba64720d7e14..45e3c0bf54660 100644 --- a/util/checkchange/checkchange.go +++ b/util/checkchange/checkchange.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package checkchange defines a utility for determining whether a value diff --git a/util/cibuild/cibuild.go b/util/cibuild/cibuild.go index c1e337f9a142a..4a4e241ac2cf0 100644 --- a/util/cibuild/cibuild.go +++ b/util/cibuild/cibuild.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cibuild reports runtime CI information. diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 50cf3b2960499..b67cbbd39aa1e 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_clientmetrics diff --git a/util/clientmetric/clientmetric_test.go b/util/clientmetric/clientmetric_test.go index 555d7a71170a4..db1cfe1893512 100644 --- a/util/clientmetric/clientmetric_test.go +++ b/util/clientmetric/clientmetric_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package clientmetric diff --git a/util/clientmetric/omit.go b/util/clientmetric/omit.go index 6d678cf20d1ae..725b18fe48d3c 100644 --- a/util/clientmetric/omit.go +++ b/util/clientmetric/omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_clientmetrics diff --git a/util/cloudenv/cloudenv.go b/util/cloudenv/cloudenv.go index f55f7dfb0794a..aee4bac723f2d 100644 --- a/util/cloudenv/cloudenv.go +++ b/util/cloudenv/cloudenv.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cloudenv reports which known cloud environment we're running in. diff --git a/util/cloudenv/cloudenv_test.go b/util/cloudenv/cloudenv_test.go index c4486b2841ec1..c928fe660ee72 100644 --- a/util/cloudenv/cloudenv_test.go +++ b/util/cloudenv/cloudenv_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cloudenv diff --git a/util/cloudinfo/cloudinfo.go b/util/cloudinfo/cloudinfo.go index 2c4a32c031d2c..5f6a54ebdcb95 100644 --- a/util/cloudinfo/cloudinfo.go +++ b/util/cloudinfo/cloudinfo.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(ios || android || js) diff --git a/util/cloudinfo/cloudinfo_nocloud.go b/util/cloudinfo/cloudinfo_nocloud.go index 6a525cd2a5725..b7ea210c15c6f 100644 --- a/util/cloudinfo/cloudinfo_nocloud.go +++ b/util/cloudinfo/cloudinfo_nocloud.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || android || js diff --git a/util/cloudinfo/cloudinfo_test.go b/util/cloudinfo/cloudinfo_test.go index 38817f47a6e56..721eca25f960d 100644 --- a/util/cloudinfo/cloudinfo_test.go +++ b/util/cloudinfo/cloudinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cloudinfo diff --git a/util/cmpver/version.go b/util/cmpver/version.go index 972c7b95f9a5e..69b01c48b1d4b 100644 --- a/util/cmpver/version.go +++ b/util/cmpver/version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cmpver implements a variant of debian version number diff --git a/util/cmpver/version_test.go b/util/cmpver/version_test.go index 8a3e470d1d37f..5688aa037927b 100644 --- a/util/cmpver/version_test.go +++ b/util/cmpver/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cmpver_test diff --git a/util/codegen/codegen.go b/util/codegen/codegen.go index ec02d652b8760..2023c8d9b1e08 100644 --- a/util/codegen/codegen.go +++ b/util/codegen/codegen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package codegen contains shared utilities for generating code. @@ -69,7 +69,7 @@ func HasNoClone(structTag string) bool { return false } -const copyrightHeader = `// Copyright (c) Tailscale Inc & AUTHORS +const copyrightHeader = `// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause ` diff --git a/util/codegen/codegen_test.go b/util/codegen/codegen_test.go index 74715eecae6ef..49656401a9b18 100644 --- a/util/codegen/codegen_test.go +++ b/util/codegen/codegen_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package codegen diff --git a/util/cstruct/cstruct.go b/util/cstruct/cstruct.go index 4d1d0a98b8032..afb0150bb1e77 100644 --- a/util/cstruct/cstruct.go +++ b/util/cstruct/cstruct.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package cstruct provides a helper for decoding binary data that is in the diff --git a/util/cstruct/cstruct_example_test.go b/util/cstruct/cstruct_example_test.go index 17032267b9dc6..a665abe355f6a 100644 --- a/util/cstruct/cstruct_example_test.go +++ b/util/cstruct/cstruct_example_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Only built on 64-bit platforms to avoid complexity diff --git a/util/cstruct/cstruct_test.go b/util/cstruct/cstruct_test.go index 5a75f338502bc..95d4876ca9256 100644 --- a/util/cstruct/cstruct_test.go +++ b/util/cstruct/cstruct_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package cstruct diff --git a/util/ctxkey/key.go b/util/ctxkey/key.go index e2b0e9d4ce0bf..982c65f04c8d7 100644 --- a/util/ctxkey/key.go +++ b/util/ctxkey/key.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // ctxkey provides type-safe key-value pairs for use with [context.Context]. diff --git a/util/ctxkey/key_test.go b/util/ctxkey/key_test.go index 20d85a3c0d2ae..413c3eacdd847 100644 --- a/util/ctxkey/key_test.go +++ b/util/ctxkey/key_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ctxkey diff --git a/util/deephash/debug.go b/util/deephash/debug.go index 50b3d5605f327..70c7a965551a4 100644 --- a/util/deephash/debug.go +++ b/util/deephash/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build deephash_debug diff --git a/util/deephash/deephash.go b/util/deephash/deephash.go index 29f47e3386ebd..ae082ef35e019 100644 --- a/util/deephash/deephash.go +++ b/util/deephash/deephash.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package deephash hashes a Go value recursively, in a predictable order, diff --git a/util/deephash/deephash_test.go b/util/deephash/deephash_test.go index 413893ff967d2..c50d70bc6ed7f 100644 --- a/util/deephash/deephash_test.go +++ b/util/deephash/deephash_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/deephash/pointer.go b/util/deephash/pointer.go index aafae47a23673..448f12108eeee 100644 --- a/util/deephash/pointer.go +++ b/util/deephash/pointer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/deephash/pointer_norace.go b/util/deephash/pointer_norace.go index f98a70f6a18e5..dc77bbeaaf4a6 100644 --- a/util/deephash/pointer_norace.go +++ b/util/deephash/pointer_norace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !race diff --git a/util/deephash/pointer_race.go b/util/deephash/pointer_race.go index c638c7d39f393..15fe45b9113b3 100644 --- a/util/deephash/pointer_race.go +++ b/util/deephash/pointer_race.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build race diff --git a/util/deephash/tailscale_types_test.go b/util/deephash/tailscale_types_test.go index eeb7fdf84d11f..7e803c841c1f5 100644 --- a/util/deephash/tailscale_types_test.go +++ b/util/deephash/tailscale_types_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains tests and benchmarks that use types from other packages diff --git a/util/deephash/testtype/testtype.go b/util/deephash/testtype/testtype.go index 3c90053d6dfd5..b5775c62a0af1 100644 --- a/util/deephash/testtype/testtype.go +++ b/util/deephash/testtype/testtype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package testtype contains types for testing deephash. diff --git a/util/deephash/types.go b/util/deephash/types.go index 54edcbffc3fe8..ef19207bf68b8 100644 --- a/util/deephash/types.go +++ b/util/deephash/types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/deephash/types_test.go b/util/deephash/types_test.go index 78b40d88e5094..7a0a43b27e6d3 100644 --- a/util/deephash/types_test.go +++ b/util/deephash/types_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package deephash diff --git a/util/dirwalk/dirwalk.go b/util/dirwalk/dirwalk.go index 811766892896a..38f58d517df77 100644 --- a/util/dirwalk/dirwalk.go +++ b/util/dirwalk/dirwalk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dirwalk contains code to walk a directory. diff --git a/util/dirwalk/dirwalk_linux.go b/util/dirwalk/dirwalk_linux.go index 256467ebd8ac5..4a12f8ebe075b 100644 --- a/util/dirwalk/dirwalk_linux.go +++ b/util/dirwalk/dirwalk_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirwalk diff --git a/util/dirwalk/dirwalk_test.go b/util/dirwalk/dirwalk_test.go index 15ebc13dd404d..f9ba842977d6b 100644 --- a/util/dirwalk/dirwalk_test.go +++ b/util/dirwalk/dirwalk_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dirwalk diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index ef898ebbd842f..09b44e73e2faa 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package dnsname contains string functions for working with DNS names. diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index b038bb1bd10e1..35e04de2ebb35 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package dnsname diff --git a/util/eventbus/bench_test.go b/util/eventbus/bench_test.go index 25f5b80020880..7cd7a424184d2 100644 --- a/util/eventbus/bench_test.go +++ b/util/eventbus/bench_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus_test diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 880e075ccaf3c..1bc8aaed63dfc 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 88e11e7199aee..e7fa7577f2bdd 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus_test diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a7a5ab673bdfd..f405146ce4a82 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go index 71894d2eab94e..64b51a0fa4fac 100644 --- a/util/eventbus/debug-demo/main.go +++ b/util/eventbus/debug-demo/main.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // debug-demo is a program that serves a bus's debug interface over diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 0453defb1a77e..7a37aeac8b6f3 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index 9e03676d07128..1c5a64074e441 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !android && !ts_omit_debugeventbus diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 332525262aa29..4b31bd6b78a79 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || android || ts_omit_debugeventbus diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index f95f9398c8de9..89af076f9b637 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package eventbus provides an in-process event bus. diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go index 1e9928b9d7cf9..504d40d9546fe 100644 --- a/util/eventbus/eventbustest/doc.go +++ b/util/eventbus/eventbustest/doc.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package eventbustest provides helper methods for testing an [eventbus.Bus]. diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index fd8a150812e0d..b3ef6c884d89f 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbustest diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index ac454023c9c47..810312fcb411a 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbustest_test diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go index c848113173bc6..87a0efe31c6e9 100644 --- a/util/eventbus/eventbustest/examples_test.go +++ b/util/eventbus/eventbustest/examples_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbustest_test diff --git a/util/eventbus/fetch-htmx.go b/util/eventbus/fetch-htmx.go index f80d5025727fd..6a780d3025681 100644 --- a/util/eventbus/fetch-htmx.go +++ b/util/eventbus/fetch-htmx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ignore diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go index db6fe1be44737..0d3056e206664 100644 --- a/util/eventbus/monitor.go +++ b/util/eventbus/monitor.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 348bb9dff950c..f6fd029b7902e 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go index 2589b75cef999..0483a05a68d5c 100644 --- a/util/eventbus/queue.go +++ b/util/eventbus/queue.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index b0348e125c393..3edf6deb44bb2 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package eventbus diff --git a/util/execqueue/execqueue.go b/util/execqueue/execqueue.go index 87616a6b50a45..b2c7014377e13 100644 --- a/util/execqueue/execqueue.go +++ b/util/execqueue/execqueue.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package execqueue implements an ordered asynchronous queue for executing functions. diff --git a/util/execqueue/execqueue_test.go b/util/execqueue/execqueue_test.go index 1bce69556e1f7..c9f3a449ef67e 100644 --- a/util/execqueue/execqueue_test.go +++ b/util/execqueue/execqueue_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package execqueue diff --git a/util/expvarx/expvarx.go b/util/expvarx/expvarx.go index bcdc4a91a7982..6dc2379b961a5 100644 --- a/util/expvarx/expvarx.go +++ b/util/expvarx/expvarx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package expvarx provides some extensions to the [expvar] package. diff --git a/util/expvarx/expvarx_test.go b/util/expvarx/expvarx_test.go index 9ed2e8f209115..f8d2139d3ecb1 100644 --- a/util/expvarx/expvarx_test.go +++ b/util/expvarx/expvarx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package expvarx diff --git a/util/goroutines/goroutines.go b/util/goroutines/goroutines.go index d40cbecb10876..fd0a4dd7eb321 100644 --- a/util/goroutines/goroutines.go +++ b/util/goroutines/goroutines.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The goroutines package contains utilities for tracking and getting active goroutines. diff --git a/util/goroutines/goroutines_test.go b/util/goroutines/goroutines_test.go index ae17c399ca274..97adccf1c2ab1 100644 --- a/util/goroutines/goroutines_test.go +++ b/util/goroutines/goroutines_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package goroutines diff --git a/util/goroutines/tracker.go b/util/goroutines/tracker.go index c2a0cb8c3a3ed..b0513ef4efa3f 100644 --- a/util/goroutines/tracker.go +++ b/util/goroutines/tracker.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package goroutines diff --git a/util/groupmember/groupmember.go b/util/groupmember/groupmember.go index d604168169022..090e1561dcded 100644 --- a/util/groupmember/groupmember.go +++ b/util/groupmember/groupmember.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package groupmember verifies group membership of the provided user on the diff --git a/util/hashx/block512.go b/util/hashx/block512.go index e637c0c030653..5f32f33a6c8d2 100644 --- a/util/hashx/block512.go +++ b/util/hashx/block512.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package hashx provides a concrete implementation of [hash.Hash] diff --git a/util/hashx/block512_test.go b/util/hashx/block512_test.go index ca3ee0d784514..91d5d9ee67749 100644 --- a/util/hashx/block512_test.go +++ b/util/hashx/block512_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package hashx diff --git a/util/httphdr/httphdr.go b/util/httphdr/httphdr.go index 852e28b8fae03..01e8eddc67ac1 100644 --- a/util/httphdr/httphdr.go +++ b/util/httphdr/httphdr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package httphdr implements functionality for parsing and formatting diff --git a/util/httphdr/httphdr_test.go b/util/httphdr/httphdr_test.go index 81feeaca080d8..37906a5bf6603 100644 --- a/util/httphdr/httphdr_test.go +++ b/util/httphdr/httphdr_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package httphdr diff --git a/util/httpm/httpm.go b/util/httpm/httpm.go index a9a691b8a69e2..f15912ecb772a 100644 --- a/util/httpm/httpm.go +++ b/util/httpm/httpm.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package httpm has shorter names for HTTP method constants. diff --git a/util/httpm/httpm_test.go b/util/httpm/httpm_test.go index 0c71edc2f3c42..4e7f7b5ab277c 100644 --- a/util/httpm/httpm_test.go +++ b/util/httpm/httpm_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package httpm diff --git a/util/limiter/limiter.go b/util/limiter/limiter.go index b5fbb6fa6b2f7..f48114d531fa4 100644 --- a/util/limiter/limiter.go +++ b/util/limiter/limiter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package limiter provides a keyed token bucket rate limiter. diff --git a/util/limiter/limiter_test.go b/util/limiter/limiter_test.go index d3f3e307a2b82..5210322bbd3b0 100644 --- a/util/limiter/limiter_test.go +++ b/util/limiter/limiter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package limiter diff --git a/util/lineiter/lineiter.go b/util/lineiter/lineiter.go index 5cb1eeef3ee1d..06d35909022b8 100644 --- a/util/lineiter/lineiter.go +++ b/util/lineiter/lineiter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lineiter iterates over lines in things. diff --git a/util/lineiter/lineiter_test.go b/util/lineiter/lineiter_test.go index 3373d5fe7b122..6e9e285501fab 100644 --- a/util/lineiter/lineiter_test.go +++ b/util/lineiter/lineiter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lineiter diff --git a/util/lineread/lineread.go b/util/lineread/lineread.go index 6b01d2b69ffd7..25cc63247e953 100644 --- a/util/lineread/lineread.go +++ b/util/lineread/lineread.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lineread reads lines from files. It's not fancy, but it got repetitive. diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index 149e0c96049c8..a3a1c1ddaa547 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index d01849a2e5c9d..1886e25429537 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/fake_netfilter.go b/util/linuxfw/fake_netfilter.go index a998ed765fd63..d760edfcf757e 100644 --- a/util/linuxfw/fake_netfilter.go +++ b/util/linuxfw/fake_netfilter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/helpers.go b/util/linuxfw/helpers.go index a4b9fdf402558..a369b6a8841b3 100644 --- a/util/linuxfw/helpers.go +++ b/util/linuxfw/helpers.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 76c5400becff8..f054e7abe1718 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_iptables diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go index 538e33647381a..c986fe7c206ea 100644 --- a/util/linuxfw/iptables_disabled.go +++ b/util/linuxfw/iptables_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && ts_omit_iptables diff --git a/util/linuxfw/iptables_for_svcs.go b/util/linuxfw/iptables_for_svcs.go index 2cd8716e4622b..acc2baf6c6fcf 100644 --- a/util/linuxfw/iptables_for_svcs.go +++ b/util/linuxfw/iptables_for_svcs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go index 0e56d70ba7078..b4dfe19c84c14 100644 --- a/util/linuxfw/iptables_for_svcs_test.go +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index 4443a907107d6..ed55960b36d7c 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index ce905aef3f75b..0dcade35188fc 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index ec73aaceea03a..325a5809f8586 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/linuxfwtest/linuxfwtest.go b/util/linuxfw/linuxfwtest/linuxfwtest.go index ee2cbd1b227f4..bf1477ad9b994 100644 --- a/util/linuxfw/linuxfwtest/linuxfwtest.go +++ b/util/linuxfw/linuxfwtest/linuxfwtest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build cgo && linux diff --git a/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go b/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go index 6e95699001d4b..ec2d24d3521c9 100644 --- a/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go +++ b/util/linuxfw/linuxfwtest/linuxfwtest_unsupported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !cgo || !linux diff --git a/util/linuxfw/nftables.go b/util/linuxfw/nftables.go index 94ce51a1405a4..6059128a97c2f 100644 --- a/util/linuxfw/nftables.go +++ b/util/linuxfw/nftables.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO(#8502): add support for more architectures diff --git a/util/linuxfw/nftables_for_svcs.go b/util/linuxfw/nftables_for_svcs.go index 474b980869691..c2425e2ff285b 100644 --- a/util/linuxfw/nftables_for_svcs.go +++ b/util/linuxfw/nftables_for_svcs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/nftables_for_svcs_test.go b/util/linuxfw/nftables_for_svcs_test.go index 73472ce20cbe5..c3be3fc3b7bee 100644 --- a/util/linuxfw/nftables_for_svcs_test.go +++ b/util/linuxfw/nftables_for_svcs_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index faa02f7c75956..2c44a6218e76e 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index 6fb180ed67ce6..dc4d3194a23ba 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/linuxfw/nftables_types.go b/util/linuxfw/nftables_types.go index b6e24d2a67b5b..27c5ee5981e13 100644 --- a/util/linuxfw/nftables_types.go +++ b/util/linuxfw/nftables_types.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // TODO(#8502): add support for more architectures diff --git a/util/lru/lru.go b/util/lru/lru.go index 8e4dd417b98d2..7fb191535dcce 100644 --- a/util/lru/lru.go +++ b/util/lru/lru.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package lru contains a typed Least-Recently-Used cache. diff --git a/util/lru/lru_test.go b/util/lru/lru_test.go index 04de2e5070c87..5fbc718b1decd 100644 --- a/util/lru/lru_test.go +++ b/util/lru/lru_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package lru diff --git a/util/mak/mak.go b/util/mak/mak.go index fbdb40b0afd21..97daab98a7650 100644 --- a/util/mak/mak.go +++ b/util/mak/mak.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mak helps make maps. It contains generic helpers to make/assign diff --git a/util/mak/mak_test.go b/util/mak/mak_test.go index e47839a3c8fe9..7a4090c20292c 100644 --- a/util/mak/mak_test.go +++ b/util/mak/mak_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mak contains code to help make things. diff --git a/util/multierr/multierr.go b/util/multierr/multierr.go index 93ca068f56532..3acdb7d773222 100644 --- a/util/multierr/multierr.go +++ b/util/multierr/multierr.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package multierr provides a simple multiple-error type. diff --git a/util/multierr/multierr_test.go b/util/multierr/multierr_test.go index de7721a665f40..35195b3770db1 100644 --- a/util/multierr/multierr_test.go +++ b/util/multierr/multierr_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package multierr_test diff --git a/util/must/must.go b/util/must/must.go index a292da2268c27..6a4b519361f2f 100644 --- a/util/must/must.go +++ b/util/must/must.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package must assists in calling functions that must succeed. diff --git a/util/nocasemaps/nocase.go b/util/nocasemaps/nocase.go index 2d91d8fe96a7a..737ab5de7c3bb 100644 --- a/util/nocasemaps/nocase.go +++ b/util/nocasemaps/nocase.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // nocasemaps provides efficient functions to set and get entries in Go maps diff --git a/util/nocasemaps/nocase_test.go b/util/nocasemaps/nocase_test.go index 5275b3ee6ef23..cae36242c3040 100644 --- a/util/nocasemaps/nocase_test.go +++ b/util/nocasemaps/nocase_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package nocasemaps diff --git a/util/osdiag/internal/wsc/wsc_windows.go b/util/osdiag/internal/wsc/wsc_windows.go index b402946eda4d2..8bc43ac54bbb9 100644 --- a/util/osdiag/internal/wsc/wsc_windows.go +++ b/util/osdiag/internal/wsc/wsc_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by 'go generate'; DO NOT EDIT. diff --git a/util/osdiag/mksyscall.go b/util/osdiag/mksyscall.go index bcbe113b051cd..688e0a31a7cc7 100644 --- a/util/osdiag/mksyscall.go +++ b/util/osdiag/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osdiag diff --git a/util/osdiag/osdiag.go b/util/osdiag/osdiag.go index 2ebecbdbf74a2..9845bd3f8be46 100644 --- a/util/osdiag/osdiag.go +++ b/util/osdiag/osdiag.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osdiag provides loggers for OS-specific diagnostic information. diff --git a/util/osdiag/osdiag_notwindows.go b/util/osdiag/osdiag_notwindows.go index 0e46c97e50803..72237438b480b 100644 --- a/util/osdiag/osdiag_notwindows.go +++ b/util/osdiag/osdiag_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/util/osdiag/osdiag_windows.go b/util/osdiag/osdiag_windows.go index 5dcce3beaf76e..d6ba1d30bb674 100644 --- a/util/osdiag/osdiag_windows.go +++ b/util/osdiag/osdiag_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osdiag diff --git a/util/osdiag/osdiag_windows_test.go b/util/osdiag/osdiag_windows_test.go index b29b602ccb73c..f285f80feac43 100644 --- a/util/osdiag/osdiag_windows_test.go +++ b/util/osdiag/osdiag_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osdiag diff --git a/util/osshare/filesharingstatus_noop.go b/util/osshare/filesharingstatus_noop.go index 7f2b131904ea9..22f0a33785131 100644 --- a/util/osshare/filesharingstatus_noop.go +++ b/util/osshare/filesharingstatus_noop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/util/osshare/filesharingstatus_windows.go b/util/osshare/filesharingstatus_windows.go index c125de15990c3..d21c394d0a27c 100644 --- a/util/osshare/filesharingstatus_windows.go +++ b/util/osshare/filesharingstatus_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osshare provides utilities for enabling/disabling Taildrop file diff --git a/util/osuser/group_ids.go b/util/osuser/group_ids.go index 7c2b5b090cbcc..2a1f147d87b00 100644 --- a/util/osuser/group_ids.go +++ b/util/osuser/group_ids.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osuser diff --git a/util/osuser/group_ids_test.go b/util/osuser/group_ids_test.go index 69e8336ea6872..79e189ed8c866 100644 --- a/util/osuser/group_ids_test.go +++ b/util/osuser/group_ids_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osuser diff --git a/util/osuser/user.go b/util/osuser/user.go index 8b96194d716ce..2de3da762739d 100644 --- a/util/osuser/user.go +++ b/util/osuser/user.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osuser implements OS user lookup. It's a wrapper around os/user that diff --git a/util/pidowner/pidowner.go b/util/pidowner/pidowner.go index 56bb640b785dd..cec92ba367e49 100644 --- a/util/pidowner/pidowner.go +++ b/util/pidowner/pidowner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package pidowner handles lookups from process ID to its owning user. diff --git a/util/pidowner/pidowner_linux.go b/util/pidowner/pidowner_linux.go index a07f512427062..f3f5cd97ddcb2 100644 --- a/util/pidowner/pidowner_linux.go +++ b/util/pidowner/pidowner_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pidowner diff --git a/util/pidowner/pidowner_noimpl.go b/util/pidowner/pidowner_noimpl.go index 50add492fda76..4bc665d61071e 100644 --- a/util/pidowner/pidowner_noimpl.go +++ b/util/pidowner/pidowner_noimpl.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows && !linux diff --git a/util/pidowner/pidowner_test.go b/util/pidowner/pidowner_test.go index 19c9ab46dff01..2774a8ab0fe36 100644 --- a/util/pidowner/pidowner_test.go +++ b/util/pidowner/pidowner_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pidowner diff --git a/util/pidowner/pidowner_windows.go b/util/pidowner/pidowner_windows.go index dbf13ac8135f1..8edd7698d4207 100644 --- a/util/pidowner/pidowner_windows.go +++ b/util/pidowner/pidowner_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pidowner diff --git a/util/pool/pool.go b/util/pool/pool.go index 7014751e7ab77..7042fb893a59e 100644 --- a/util/pool/pool.go +++ b/util/pool/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package pool contains a generic type for managing a pool of resources; for diff --git a/util/pool/pool_test.go b/util/pool/pool_test.go index 9d8eacbcb9d0b..ac7cf86be3ef7 100644 --- a/util/pool/pool_test.go +++ b/util/pool/pool_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package pool diff --git a/util/precompress/precompress.go b/util/precompress/precompress.go index 6d1a26efdd767..80aed36821b2e 100644 --- a/util/precompress/precompress.go +++ b/util/precompress/precompress.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package precompress provides build- and serving-time support for diff --git a/util/progresstracking/progresstracking.go b/util/progresstracking/progresstracking.go index a9411fb46f7fd..21cbfa52ba3ff 100644 --- a/util/progresstracking/progresstracking.go +++ b/util/progresstracking/progresstracking.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package progresstracking provides wrappers around io.Reader and io.Writer diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go index a6d86fb481769..b84993a0aeed9 100644 --- a/util/prompt/prompt.go +++ b/util/prompt/prompt.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package prompt provides a simple way to prompt the user for input. diff --git a/util/qrcodes/format.go b/util/qrcodes/format.go index dbd565b2ec9d3..99b58ff747fde 100644 --- a/util/qrcodes/format.go +++ b/util/qrcodes/format.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package qrcodes diff --git a/util/qrcodes/qrcodes.go b/util/qrcodes/qrcodes.go index 02e06e59b4be3..dc16fee8cfd2a 100644 --- a/util/qrcodes/qrcodes.go +++ b/util/qrcodes/qrcodes.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_qrcodes diff --git a/util/qrcodes/qrcodes_disabled.go b/util/qrcodes/qrcodes_disabled.go index fa1b89cf437ef..bda8957573780 100644 --- a/util/qrcodes/qrcodes_disabled.go +++ b/util/qrcodes/qrcodes_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_qrcodes diff --git a/util/qrcodes/qrcodes_linux.go b/util/qrcodes/qrcodes_linux.go index 8f0d40f0a5e4a..474e231e23aff 100644 --- a/util/qrcodes/qrcodes_linux.go +++ b/util/qrcodes/qrcodes_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_qrcodes diff --git a/util/qrcodes/qrcodes_notlinux.go b/util/qrcodes/qrcodes_notlinux.go index 3149a60605bf3..4a7b493ff55a9 100644 --- a/util/qrcodes/qrcodes_notlinux.go +++ b/util/qrcodes/qrcodes_notlinux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux && !ts_omit_qrcodes diff --git a/util/quarantine/quarantine.go b/util/quarantine/quarantine.go index 7ad65a81d69ee..48c032d06cfcb 100644 --- a/util/quarantine/quarantine.go +++ b/util/quarantine/quarantine.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package quarantine sets platform specific "quarantine" attributes on files diff --git a/util/quarantine/quarantine_darwin.go b/util/quarantine/quarantine_darwin.go index 35405d9cc7a87..de1bbf70df985 100644 --- a/util/quarantine/quarantine_darwin.go +++ b/util/quarantine/quarantine_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package quarantine diff --git a/util/quarantine/quarantine_default.go b/util/quarantine/quarantine_default.go index 65954a4d25415..5158bda54314b 100644 --- a/util/quarantine/quarantine_default.go +++ b/util/quarantine/quarantine_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !windows diff --git a/util/quarantine/quarantine_windows.go b/util/quarantine/quarantine_windows.go index 6fdf4e699b75b..886b2202a4beb 100644 --- a/util/quarantine/quarantine_windows.go +++ b/util/quarantine/quarantine_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package quarantine diff --git a/util/race/race.go b/util/race/race.go index 26c8e13eb468e..8e339dad2fd03 100644 --- a/util/race/race.go +++ b/util/race/race.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package race contains a helper to "race" two functions, returning the first diff --git a/util/race/race_test.go b/util/race/race_test.go index d3838271226ac..90b049909ce3c 100644 --- a/util/race/race_test.go +++ b/util/race/race_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package race diff --git a/util/racebuild/off.go b/util/racebuild/off.go index 8f4fe998fb4bb..2ffe9fd5370e5 100644 --- a/util/racebuild/off.go +++ b/util/racebuild/off.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !race diff --git a/util/racebuild/on.go b/util/racebuild/on.go index 69ae2bcae4239..794171c55a792 100644 --- a/util/racebuild/on.go +++ b/util/racebuild/on.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build race diff --git a/util/racebuild/racebuild.go b/util/racebuild/racebuild.go index d061276cb8a0a..9dc0fb9f77ce9 100644 --- a/util/racebuild/racebuild.go +++ b/util/racebuild/racebuild.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package racebuild exports a constant about whether the current binary diff --git a/util/rands/cheap.go b/util/rands/cheap.go index 69785e086e664..f3b931d34662b 100644 --- a/util/rands/cheap.go +++ b/util/rands/cheap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Copyright 2009 The Go Authors. All rights reserved. diff --git a/util/rands/cheap_test.go b/util/rands/cheap_test.go index 756b55b4e0ddc..874592a1b647e 100644 --- a/util/rands/cheap_test.go +++ b/util/rands/cheap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rands diff --git a/util/rands/rands.go b/util/rands/rands.go index d83e1e55898dc..94c6e6f4a1c29 100644 --- a/util/rands/rands.go +++ b/util/rands/rands.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package rands contains utility functions for randomness. diff --git a/util/rands/rands_test.go b/util/rands/rands_test.go index 5813f2bb46763..81cdf3bec02e0 100644 --- a/util/rands/rands_test.go +++ b/util/rands/rands_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rands diff --git a/util/reload/reload.go b/util/reload/reload.go index f18f9ebd1028c..edcb90c12a3f2 100644 --- a/util/reload/reload.go +++ b/util/reload/reload.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package reload contains functions that allow periodically reloading a value diff --git a/util/reload/reload_test.go b/util/reload/reload_test.go index f6a38168659cd..7e7963c3f7a9e 100644 --- a/util/reload/reload_test.go +++ b/util/reload/reload_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package reload diff --git a/util/ringlog/ringlog.go b/util/ringlog/ringlog.go index 62dfbae5bd5c3..d8197dda84deb 100644 --- a/util/ringlog/ringlog.go +++ b/util/ringlog/ringlog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ringlog contains a limited-size concurrency-safe generic ring log. diff --git a/util/ringlog/ringlog_test.go b/util/ringlog/ringlog_test.go index d6776e181a4f8..8ecf99cd0f3f1 100644 --- a/util/ringlog/ringlog_test.go +++ b/util/ringlog/ringlog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ringlog diff --git a/util/safediff/diff.go b/util/safediff/diff.go index cf8add94b21dd..c9a2c60bea1a8 100644 --- a/util/safediff/diff.go +++ b/util/safediff/diff.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package safediff computes the difference between two lists. diff --git a/util/safediff/diff_test.go b/util/safediff/diff_test.go index e580bd9222dd9..4251d788b10fe 100644 --- a/util/safediff/diff_test.go +++ b/util/safediff/diff_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package safediff diff --git a/util/set/handle.go b/util/set/handle.go index 9c6b6dab0549b..1ad86d1fd307e 100644 --- a/util/set/handle.go +++ b/util/set/handle.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/intset.go b/util/set/intset.go index d325246914488..04f614742e796 100644 --- a/util/set/intset.go +++ b/util/set/intset.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/intset_test.go b/util/set/intset_test.go index d838215c97848..6cbf5a0bb472b 100644 --- a/util/set/intset_test.go +++ b/util/set/intset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/set.go b/util/set/set.go index eb0697536f73b..df4b1fa3a24ac 100644 --- a/util/set/set.go +++ b/util/set/set.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package set contains set types. diff --git a/util/set/set_test.go b/util/set/set_test.go index 85913ad24a216..4afaeea5747fc 100644 --- a/util/set/set_test.go +++ b/util/set/set_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/slice.go b/util/set/slice.go index 2fc65b82d1c6e..921da4fa21622 100644 --- a/util/set/slice.go +++ b/util/set/slice.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/slice_test.go b/util/set/slice_test.go index 9134c296292d3..468ba686c5772 100644 --- a/util/set/slice_test.go +++ b/util/set/slice_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/smallset.go b/util/set/smallset.go index 1b77419d27dc9..da52dd265a939 100644 --- a/util/set/smallset.go +++ b/util/set/smallset.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/set/smallset_test.go b/util/set/smallset_test.go index d6f446df08e81..019a9d24d1f5c 100644 --- a/util/set/smallset_test.go +++ b/util/set/smallset_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package set diff --git a/util/singleflight/singleflight.go b/util/singleflight/singleflight.go index 9df47448b70ab..23cf7e21fec15 100644 --- a/util/singleflight/singleflight.go +++ b/util/singleflight/singleflight.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Copyright 2013 The Go Authors. All rights reserved. diff --git a/util/singleflight/singleflight_test.go b/util/singleflight/singleflight_test.go index 031922736fab6..9f0ca7f1de853 100644 --- a/util/singleflight/singleflight_test.go +++ b/util/singleflight/singleflight_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Copyright 2013 The Go Authors. All rights reserved. diff --git a/util/slicesx/slicesx.go b/util/slicesx/slicesx.go index ff9d473759fb0..660110a3c4f28 100644 --- a/util/slicesx/slicesx.go +++ b/util/slicesx/slicesx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package slicesx contains some helpful generic slice functions. diff --git a/util/slicesx/slicesx_test.go b/util/slicesx/slicesx_test.go index 34644928465d8..d5c87a3727748 100644 --- a/util/slicesx/slicesx_test.go +++ b/util/slicesx/slicesx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package slicesx diff --git a/util/stringsx/stringsx.go b/util/stringsx/stringsx.go index 6c7a8d20d4221..5afea98a6a7c6 100644 --- a/util/stringsx/stringsx.go +++ b/util/stringsx/stringsx.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package stringsx provides additional string manipulation functions diff --git a/util/stringsx/stringsx_test.go b/util/stringsx/stringsx_test.go index 8575c0b278fca..afce987c08a53 100644 --- a/util/stringsx/stringsx_test.go +++ b/util/stringsx/stringsx_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package stringsx diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go index 6ab147de6d096..4179f26c82cb2 100644 --- a/util/syspolicy/internal/internal.go +++ b/util/syspolicy/internal/internal.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package internal contains miscellaneous functions and types diff --git a/util/syspolicy/internal/loggerx/logger.go b/util/syspolicy/internal/loggerx/logger.go index d1f48cbb428fe..412616cb132cd 100644 --- a/util/syspolicy/internal/loggerx/logger.go +++ b/util/syspolicy/internal/loggerx/logger.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package loggerx provides logging functions to the rest of the syspolicy packages. diff --git a/util/syspolicy/internal/loggerx/logger_test.go b/util/syspolicy/internal/loggerx/logger_test.go index 9735b5d30c20b..5c8fb7e2860d5 100644 --- a/util/syspolicy/internal/loggerx/logger_test.go +++ b/util/syspolicy/internal/loggerx/logger_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package loggerx diff --git a/util/syspolicy/internal/metrics/metrics.go b/util/syspolicy/internal/metrics/metrics.go index 8f27456735ca6..8a3b5327fbab8 100644 --- a/util/syspolicy/internal/metrics/metrics.go +++ b/util/syspolicy/internal/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package metrics provides logging and reporting for policy settings and scopes. diff --git a/util/syspolicy/internal/metrics/metrics_test.go b/util/syspolicy/internal/metrics/metrics_test.go index a99938769712f..ce9dea98b86d7 100644 --- a/util/syspolicy/internal/metrics/metrics_test.go +++ b/util/syspolicy/internal/metrics/metrics_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/util/syspolicy/internal/metrics/test_handler.go b/util/syspolicy/internal/metrics/test_handler.go index 36c3f2cad876a..1ec0c9f4c3015 100644 --- a/util/syspolicy/internal/metrics/test_handler.go +++ b/util/syspolicy/internal/metrics/test_handler.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package metrics diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index e450625cd1710..9ed1d5b210b8d 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package pkey defines the keys used to store system policies in the registry. diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index 3a54f9dde5dd7..2a4599cb85869 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy diff --git a/util/syspolicy/policy_keys_test.go b/util/syspolicy/policy_keys_test.go index c2b8d5741831d..17e2e7a9b8f92 100644 --- a/util/syspolicy/policy_keys_test.go +++ b/util/syspolicy/policy_keys_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy diff --git a/util/syspolicy/policyclient/policyclient.go b/util/syspolicy/policyclient/policyclient.go index 728a16718e8e4..e6ad208b02f65 100644 --- a/util/syspolicy/policyclient/policyclient.go +++ b/util/syspolicy/policyclient/policyclient.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package policyclient contains the minimal syspolicy interface as needed by diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go index e5c1c7856d0a3..ef5ce889dd2de 100644 --- a/util/syspolicy/policytest/policytest.go +++ b/util/syspolicy/policytest/policytest.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package policytest contains test helpers for the syspolicy packages. diff --git a/util/syspolicy/ptype/ptype.go b/util/syspolicy/ptype/ptype.go index 65ca9e63108eb..ea8b03ad7db22 100644 --- a/util/syspolicy/ptype/ptype.go +++ b/util/syspolicy/ptype/ptype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package ptype contains types used by syspolicy. diff --git a/util/syspolicy/ptype/ptype_test.go b/util/syspolicy/ptype/ptype_test.go index 7c963398b41b1..ba7eab471b5ea 100644 --- a/util/syspolicy/ptype/ptype_test.go +++ b/util/syspolicy/ptype/ptype_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package ptype diff --git a/util/syspolicy/rsop/change_callbacks.go b/util/syspolicy/rsop/change_callbacks.go index 71135bb2ac788..0b6cd10c4cc1a 100644 --- a/util/syspolicy/rsop/change_callbacks.go +++ b/util/syspolicy/rsop/change_callbacks.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop diff --git a/util/syspolicy/rsop/resultant_policy.go b/util/syspolicy/rsop/resultant_policy.go index bdda909763008..5f8081a677a79 100644 --- a/util/syspolicy/rsop/resultant_policy.go +++ b/util/syspolicy/rsop/resultant_policy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop diff --git a/util/syspolicy/rsop/resultant_policy_test.go b/util/syspolicy/rsop/resultant_policy_test.go index 3ff1421197b1f..60132eae7a1d8 100644 --- a/util/syspolicy/rsop/resultant_policy_test.go +++ b/util/syspolicy/rsop/resultant_policy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop diff --git a/util/syspolicy/rsop/rsop.go b/util/syspolicy/rsop/rsop.go index 333dca64343c1..a57a4b34825ac 100644 --- a/util/syspolicy/rsop/rsop.go +++ b/util/syspolicy/rsop/rsop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package rsop facilitates [source.Store] registration via [RegisterStore] diff --git a/util/syspolicy/rsop/store_registration.go b/util/syspolicy/rsop/store_registration.go index a7c354b6d5678..99dbc7096fc56 100644 --- a/util/syspolicy/rsop/store_registration.go +++ b/util/syspolicy/rsop/store_registration.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package rsop diff --git a/util/syspolicy/setting/errors.go b/util/syspolicy/setting/errors.go index 38dc6a88c7f1d..655018d4b5aff 100644 --- a/util/syspolicy/setting/errors.go +++ b/util/syspolicy/setting/errors.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go index 4c7cc7025cc48..8ed629e72a322 100644 --- a/util/syspolicy/setting/origin.go +++ b/util/syspolicy/setting/origin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/policy_scope.go b/util/syspolicy/setting/policy_scope.go index c2039fdda15b8..4162614929dd2 100644 --- a/util/syspolicy/setting/policy_scope.go +++ b/util/syspolicy/setting/policy_scope.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/policy_scope_test.go b/util/syspolicy/setting/policy_scope_test.go index e1b6cf7ea0a78..a2f6328151d05 100644 --- a/util/syspolicy/setting/policy_scope_test.go +++ b/util/syspolicy/setting/policy_scope_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index ea97865f5a396..4bfb50faa1b62 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/raw_item_test.go b/util/syspolicy/setting/raw_item_test.go index 05562d78c41f3..1a40bc829c351 100644 --- a/util/syspolicy/setting/raw_item_test.go +++ b/util/syspolicy/setting/raw_item_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/setting.go b/util/syspolicy/setting/setting.go index 97362b1dca8e0..4384e64c234f9 100644 --- a/util/syspolicy/setting/setting.go +++ b/util/syspolicy/setting/setting.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package setting contains types for defining and representing policy settings. diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index 9d99884f6436f..3ccd2ef606c50 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 94c7ecadb2533..74cadd0be7296 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/snapshot_test.go b/util/syspolicy/setting/snapshot_test.go index 762a9681c6d7e..0385e4aefc8b4 100644 --- a/util/syspolicy/setting/snapshot_test.go +++ b/util/syspolicy/setting/snapshot_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go index 9864822f7a235..4cb15c7c4d078 100644 --- a/util/syspolicy/setting/summary.go +++ b/util/syspolicy/setting/summary.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package setting diff --git a/util/syspolicy/source/env_policy_store.go b/util/syspolicy/source/env_policy_store.go index be363b79a84eb..9b7cebfbf19e5 100644 --- a/util/syspolicy/source/env_policy_store.go +++ b/util/syspolicy/source/env_policy_store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/source/env_policy_store_test.go b/util/syspolicy/source/env_policy_store_test.go index 3255095b2d286..5cda0f32a2984 100644 --- a/util/syspolicy/source/env_policy_store_test.go +++ b/util/syspolicy/source/env_policy_store_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/source/policy_reader.go b/util/syspolicy/source/policy_reader.go index 33ef22912f172..177985322d318 100644 --- a/util/syspolicy/source/policy_reader.go +++ b/util/syspolicy/source/policy_reader.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/source/policy_reader_test.go b/util/syspolicy/source/policy_reader_test.go index 32e8c51a6d3c9..e5a893f56877a 100644 --- a/util/syspolicy/source/policy_reader_test.go +++ b/util/syspolicy/source/policy_reader_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/source/policy_source.go b/util/syspolicy/source/policy_source.go index c4774217c09ac..3dfa83fd17a30 100644 --- a/util/syspolicy/source/policy_source.go +++ b/util/syspolicy/source/policy_source.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package source defines interfaces for policy stores, diff --git a/util/syspolicy/source/policy_store_windows.go b/util/syspolicy/source/policy_store_windows.go index f97b17f3afee6..edcdcae69b408 100644 --- a/util/syspolicy/source/policy_store_windows.go +++ b/util/syspolicy/source/policy_store_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/source/policy_store_windows_test.go b/util/syspolicy/source/policy_store_windows_test.go index 4ab1da805d6c8..b3ca5083d2a05 100644 --- a/util/syspolicy/source/policy_store_windows_test.go +++ b/util/syspolicy/source/policy_store_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/source/test_store.go b/util/syspolicy/source/test_store.go index ddec9efbb2d01..1baa138319337 100644 --- a/util/syspolicy/source/test_store.go +++ b/util/syspolicy/source/test_store.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package source diff --git a/util/syspolicy/syspolicy.go b/util/syspolicy/syspolicy.go index 48e430b674e35..7451bde758d4f 100644 --- a/util/syspolicy/syspolicy.go +++ b/util/syspolicy/syspolicy.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package syspolicy contains the implementation of system policy management. diff --git a/util/syspolicy/syspolicy_test.go b/util/syspolicy/syspolicy_test.go index 10f8da48657d3..532cd03b8b9a7 100644 --- a/util/syspolicy/syspolicy_test.go +++ b/util/syspolicy/syspolicy_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy diff --git a/util/syspolicy/syspolicy_windows.go b/util/syspolicy/syspolicy_windows.go index ca0fd329aca04..80c84b4570b9f 100644 --- a/util/syspolicy/syspolicy_windows.go +++ b/util/syspolicy/syspolicy_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package syspolicy diff --git a/util/sysresources/memory.go b/util/sysresources/memory.go index 7363155cdb2ae..3c6b9ae852e47 100644 --- a/util/sysresources/memory.go +++ b/util/sysresources/memory.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sysresources diff --git a/util/sysresources/memory_bsd.go b/util/sysresources/memory_bsd.go index 26850dce652ff..945f86ea35ec9 100644 --- a/util/sysresources/memory_bsd.go +++ b/util/sysresources/memory_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || dragonfly || netbsd diff --git a/util/sysresources/memory_darwin.go b/util/sysresources/memory_darwin.go index e07bac0cd7f9b..165f12eb3b808 100644 --- a/util/sysresources/memory_darwin.go +++ b/util/sysresources/memory_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin diff --git a/util/sysresources/memory_linux.go b/util/sysresources/memory_linux.go index 0239b0e80d62a..3885a8aa6c66e 100644 --- a/util/sysresources/memory_linux.go +++ b/util/sysresources/memory_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/util/sysresources/memory_unsupported.go b/util/sysresources/memory_unsupported.go index 0fde256e0543d..c88e9ed5201e9 100644 --- a/util/sysresources/memory_unsupported.go +++ b/util/sysresources/memory_unsupported.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !(linux || darwin || freebsd || openbsd || dragonfly || netbsd) diff --git a/util/sysresources/sysresources.go b/util/sysresources/sysresources.go index 32d972ab15513..33d0d5d96a96e 100644 --- a/util/sysresources/sysresources.go +++ b/util/sysresources/sysresources.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package sysresources provides OS-independent methods of determining the diff --git a/util/sysresources/sysresources_test.go b/util/sysresources/sysresources_test.go index 331ad913bfba1..7fea1bf0f5b32 100644 --- a/util/sysresources/sysresources_test.go +++ b/util/sysresources/sysresources_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package sysresources diff --git a/util/testenv/testenv.go b/util/testenv/testenv.go index aa6660411c91b..1ae1fe8a8a0f1 100644 --- a/util/testenv/testenv.go +++ b/util/testenv/testenv.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package testenv provides utility functions for tests. It does not depend on diff --git a/util/testenv/testenv_test.go b/util/testenv/testenv_test.go index c647d9aec1ea4..3001d19eb2722 100644 --- a/util/testenv/testenv_test.go +++ b/util/testenv/testenv_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package testenv diff --git a/util/topk/topk.go b/util/topk/topk.go index d3bbb2c6d1055..95ebd895d05aa 100644 --- a/util/topk/topk.go +++ b/util/topk/topk.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package topk defines a count-min sketch and a cheap probabilistic top-K data diff --git a/util/topk/topk_test.go b/util/topk/topk_test.go index d30342e90de7b..06656c4204fe6 100644 --- a/util/topk/topk_test.go +++ b/util/topk/topk_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package topk diff --git a/util/truncate/truncate.go b/util/truncate/truncate.go index 310b81dd07100..7b98013f0bd59 100644 --- a/util/truncate/truncate.go +++ b/util/truncate/truncate.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package truncate provides a utility function for safely truncating UTF-8 diff --git a/util/truncate/truncate_test.go b/util/truncate/truncate_test.go index c0d9e6e14df99..6a99a0efc4706 100644 --- a/util/truncate/truncate_test.go +++ b/util/truncate/truncate_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package truncate_test diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index be425fb87fd6c..14c2fabbec1f5 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // This file contains user-facing metrics that are used by multiple packages. diff --git a/util/usermetric/omit.go b/util/usermetric/omit.go index 0611990abe89e..c2681ebdaa3b4 100644 --- a/util/usermetric/omit.go +++ b/util/usermetric/omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_usermetrics diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index 1805a5dbee626..f435f3ec23da3 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_usermetrics diff --git a/util/usermetric/usermetric_test.go b/util/usermetric/usermetric_test.go index e92db5bfce130..cdbb44ec057bc 100644 --- a/util/usermetric/usermetric_test.go +++ b/util/usermetric/usermetric_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package usermetric diff --git a/util/vizerror/vizerror.go b/util/vizerror/vizerror.go index 919d765d0ef2d..479bd2de9e7c8 100644 --- a/util/vizerror/vizerror.go +++ b/util/vizerror/vizerror.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package vizerror provides types and utility funcs for handling visible errors diff --git a/util/vizerror/vizerror_test.go b/util/vizerror/vizerror_test.go index 242ca6462f37b..10e8376030beb 100644 --- a/util/vizerror/vizerror_test.go +++ b/util/vizerror/vizerror_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package vizerror diff --git a/util/winutil/authenticode/authenticode_windows.go b/util/winutil/authenticode/authenticode_windows.go index 27c09b8cbb758..46f60caf76f79 100644 --- a/util/winutil/authenticode/authenticode_windows.go +++ b/util/winutil/authenticode/authenticode_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package authenticode contains Windows Authenticode signature verification code. diff --git a/util/winutil/authenticode/mksyscall.go b/util/winutil/authenticode/mksyscall.go index 8b7cabe6e4d7f..198081fce185a 100644 --- a/util/winutil/authenticode/mksyscall.go +++ b/util/winutil/authenticode/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package authenticode diff --git a/util/winutil/conpty/conpty_windows.go b/util/winutil/conpty/conpty_windows.go index 0a35759b49136..1071493f529b0 100644 --- a/util/winutil/conpty/conpty_windows.go +++ b/util/winutil/conpty/conpty_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package conpty implements support for Windows pseudo-consoles. diff --git a/util/winutil/gp/gp_windows.go b/util/winutil/gp/gp_windows.go index dd0e695eb08f2..dd13a27012d18 100644 --- a/util/winutil/gp/gp_windows.go +++ b/util/winutil/gp/gp_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package gp contains [Group Policy]-related functions and types. diff --git a/util/winutil/gp/gp_windows_test.go b/util/winutil/gp/gp_windows_test.go index f892068835bce..dfad029302c47 100644 --- a/util/winutil/gp/gp_windows_test.go +++ b/util/winutil/gp/gp_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp diff --git a/util/winutil/gp/mksyscall.go b/util/winutil/gp/mksyscall.go index 3f3682d64d07e..22fd0c137894f 100644 --- a/util/winutil/gp/mksyscall.go +++ b/util/winutil/gp/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp diff --git a/util/winutil/gp/policylock_windows.go b/util/winutil/gp/policylock_windows.go index 6c3ca0baf6d21..6e6f63f820489 100644 --- a/util/winutil/gp/policylock_windows.go +++ b/util/winutil/gp/policylock_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp diff --git a/util/winutil/gp/watcher_windows.go b/util/winutil/gp/watcher_windows.go index ae66c391ff595..8a6538e8bb619 100644 --- a/util/winutil/gp/watcher_windows.go +++ b/util/winutil/gp/watcher_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gp diff --git a/util/winutil/mksyscall.go b/util/winutil/mksyscall.go index afee739986cda..f3b61f2dda4ad 100644 --- a/util/winutil/mksyscall.go +++ b/util/winutil/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/policy/policy_windows.go b/util/winutil/policy/policy_windows.go index 89142951f8bd5..c831066034608 100644 --- a/util/winutil/policy/policy_windows.go +++ b/util/winutil/policy/policy_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package policy contains higher-level abstractions for accessing Windows enterprise policies. diff --git a/util/winutil/policy/policy_windows_test.go b/util/winutil/policy/policy_windows_test.go index cf2390c568cce..881c08356b99a 100644 --- a/util/winutil/policy/policy_windows_test.go +++ b/util/winutil/policy/policy_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package policy diff --git a/util/winutil/restartmgr_windows.go b/util/winutil/restartmgr_windows.go index 6f549de557653..3ef8a0383b2ba 100644 --- a/util/winutil/restartmgr_windows.go +++ b/util/winutil/restartmgr_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/restartmgr_windows_test.go b/util/winutil/restartmgr_windows_test.go index 6b2d75c3c5459..eb11ffc9ce51f 100644 --- a/util/winutil/restartmgr_windows_test.go +++ b/util/winutil/restartmgr_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/s4u/lsa_windows.go b/util/winutil/s4u/lsa_windows.go index 3276b26766c08..a26a7bcf094fa 100644 --- a/util/winutil/s4u/lsa_windows.go +++ b/util/winutil/s4u/lsa_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package s4u diff --git a/util/winutil/s4u/mksyscall.go b/util/winutil/s4u/mksyscall.go index 8925c0209b124..b8ab33672c563 100644 --- a/util/winutil/s4u/mksyscall.go +++ b/util/winutil/s4u/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package s4u diff --git a/util/winutil/s4u/s4u_windows.go b/util/winutil/s4u/s4u_windows.go index 8c8e02dbe83bc..a5b543cab5ea1 100644 --- a/util/winutil/s4u/s4u_windows.go +++ b/util/winutil/s4u/s4u_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package s4u is an API for accessing Service-For-User (S4U) functionality on Windows. diff --git a/util/winutil/startupinfo_windows.go b/util/winutil/startupinfo_windows.go index edf48fa651cb5..5ded67c7c78e1 100644 --- a/util/winutil/startupinfo_windows.go +++ b/util/winutil/startupinfo_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/svcdiag_windows.go b/util/winutil/svcdiag_windows.go index 372377cf93217..e28f9b6af58d6 100644 --- a/util/winutil/svcdiag_windows.go +++ b/util/winutil/svcdiag_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go b/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go index 8a4e1b7f72c79..3ef834fd54546 100644 --- a/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go +++ b/util/winutil/testdata/testrestartableprocesses/restartableprocess_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // The testrestartableprocesses is a program for a test. diff --git a/util/winutil/userprofile_windows.go b/util/winutil/userprofile_windows.go index d2e6067c7a93f..c7fb028966ba6 100644 --- a/util/winutil/userprofile_windows.go +++ b/util/winutil/userprofile_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/userprofile_windows_test.go b/util/winutil/userprofile_windows_test.go index 09dcfd59627aa..0a21cea6ac10f 100644 --- a/util/winutil/userprofile_windows_test.go +++ b/util/winutil/userprofile_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/winenv/mksyscall.go b/util/winutil/winenv/mksyscall.go index 9737c40c470bb..77d8ec66ab2a2 100644 --- a/util/winutil/winenv/mksyscall.go +++ b/util/winutil/winenv/mksyscall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winenv diff --git a/util/winutil/winenv/winenv_windows.go b/util/winutil/winenv/winenv_windows.go index 81fe4202633fb..eb7e87cedc320 100644 --- a/util/winutil/winenv/winenv_windows.go +++ b/util/winutil/winenv/winenv_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package winenv provides information about the current Windows environment. diff --git a/util/winutil/winutil.go b/util/winutil/winutil.go index ca231363acf1b..84ac2b1e341ef 100644 --- a/util/winutil/winutil.go +++ b/util/winutil/winutil.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package winutil contains misc Windows/Win32 helper functions. diff --git a/util/winutil/winutil_notwindows.go b/util/winutil/winutil_notwindows.go index caa415e08a513..774a0dad7af08 100644 --- a/util/winutil/winutil_notwindows.go +++ b/util/winutil/winutil_notwindows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !windows diff --git a/util/winutil/winutil_windows.go b/util/winutil/winutil_windows.go index c935b210e9e6a..cab0dabdf3694 100644 --- a/util/winutil/winutil_windows.go +++ b/util/winutil/winutil_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/winutil/winutil_windows_test.go b/util/winutil/winutil_windows_test.go index ead10a45d7ee8..955006789bc43 100644 --- a/util/winutil/winutil_windows_test.go +++ b/util/winutil/winutil_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winutil diff --git a/util/zstdframe/options.go b/util/zstdframe/options.go index b4b0f2b85304c..67ab27169166d 100644 --- a/util/zstdframe/options.go +++ b/util/zstdframe/options.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package zstdframe diff --git a/util/zstdframe/zstd.go b/util/zstdframe/zstd.go index b207984182b15..69fe3ee4017df 100644 --- a/util/zstdframe/zstd.go +++ b/util/zstdframe/zstd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package zstdframe provides functionality for encoding and decoding diff --git a/util/zstdframe/zstd_test.go b/util/zstdframe/zstd_test.go index 120fd3508460f..302090b9951b8 100644 --- a/util/zstdframe/zstd_test.go +++ b/util/zstdframe/zstd_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package zstdframe diff --git a/version-embed.go b/version-embed.go index 17bf578dd33f1..9f48d1384ff67 100644 --- a/version-embed.go +++ b/version-embed.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package tailscaleroot embeds VERSION.txt into the binary. diff --git a/version/cmdname.go b/version/cmdname.go index c38544ce1642c..8a4040f9718b9 100644 --- a/version/cmdname.go +++ b/version/cmdname.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios diff --git a/version/cmdname_ios.go b/version/cmdname_ios.go index 6bfed38b64226..1e6ec9dec4b23 100644 --- a/version/cmdname_ios.go +++ b/version/cmdname_ios.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios diff --git a/version/cmp.go b/version/cmp.go index 494a7ea72947f..4af0aec69ea6e 100644 --- a/version/cmp.go +++ b/version/cmp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/cmp_test.go b/version/cmp_test.go index e244d5e16fe22..10fc130b768eb 100644 --- a/version/cmp_test.go +++ b/version/cmp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version_test diff --git a/version/distro/distro.go b/version/distro/distro.go index 0e88bdd2fa297..03c02ccab91cf 100644 --- a/version/distro/distro.go +++ b/version/distro/distro.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package distro reports which distro we're running on. diff --git a/version/distro/distro_test.go b/version/distro/distro_test.go index 4d61c720581c7..f3460a180edc2 100644 --- a/version/distro/distro_test.go +++ b/version/distro/distro_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package distro diff --git a/version/exename.go b/version/exename.go index d5047c2038ffe..adb5236177e31 100644 --- a/version/exename.go +++ b/version/exename.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/export_test.go b/version/export_test.go index 8e8ce5ecb2129..ec43ad33248a7 100644 --- a/version/export_test.go +++ b/version/export_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/mkversion/mkversion.go b/version/mkversion/mkversion.go index 2fa84480dd144..f42b3ad036de3 100644 --- a/version/mkversion/mkversion.go +++ b/version/mkversion/mkversion.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package mkversion gets version info from git and provides a bunch of diff --git a/version/mkversion/mkversion_test.go b/version/mkversion/mkversion_test.go index 210d3053a14a3..2f1a922c98924 100644 --- a/version/mkversion/mkversion_test.go +++ b/version/mkversion/mkversion_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package mkversion diff --git a/version/modinfo_test.go b/version/modinfo_test.go index 746e6296de795..ef75ce0771a47 100644 --- a/version/modinfo_test.go +++ b/version/modinfo_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version_test diff --git a/version/print.go b/version/print.go index 43ee2b5591410..ca62226ee2b6d 100644 --- a/version/print.go +++ b/version/print.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/prop.go b/version/prop.go index 795f3a9127be0..36d7699176f1e 100644 --- a/version/prop.go +++ b/version/prop.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/race.go b/version/race.go index e1dc76591ebf4..1cea65e7111e4 100644 --- a/version/race.go +++ b/version/race.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build race diff --git a/version/race_off.go b/version/race_off.go index 6db901974bb77..cbe7c198b2754 100644 --- a/version/race_off.go +++ b/version/race_off.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !race diff --git a/version/version.go b/version/version.go index 2add25689e1dd..1171ed2ffe722 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package version provides the version that the binary was built at. diff --git a/version/version_checkformat.go b/version/version_checkformat.go index 05a97d1912dbe..970010ddf21d6 100644 --- a/version/version_checkformat.go +++ b/version/version_checkformat.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go && android diff --git a/version/version_internal_test.go b/version/version_internal_test.go index b3b848276e820..c78df4ff81a70 100644 --- a/version/version_internal_test.go +++ b/version/version_internal_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version diff --git a/version/version_test.go b/version/version_test.go index a515650586cc4..ebae7f177613a 100644 --- a/version/version_test.go +++ b/version/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package version_test diff --git a/version_tailscale_test.go b/version_tailscale_test.go index 0a690e312202f..60a8d54f48093 100644 --- a/version_tailscale_test.go +++ b/version_tailscale_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build tailscale_go diff --git a/version_test.go b/version_test.go index 3d983a19d51db..6fb3ddef9d52b 100644 --- a/version_test.go +++ b/version_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package tailscaleroot diff --git a/wf/firewall.go b/wf/firewall.go index 07e160eb36071..5209c2293b10c 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/wgengine/bench/bench.go b/wgengine/bench/bench.go index 8695f18d15899..7ce673b488e4a 100644 --- a/wgengine/bench/bench.go +++ b/wgengine/bench/bench.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Create two wgengine instances and pass data through them, measuring diff --git a/wgengine/bench/bench_test.go b/wgengine/bench/bench_test.go index 4fae86c0580ba..8788f4721a0e0 100644 --- a/wgengine/bench/bench_test.go +++ b/wgengine/bench/bench_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Create two wgengine instances and pass data through them, measuring diff --git a/wgengine/bench/trafficgen.go b/wgengine/bench/trafficgen.go index ce79c616f86ed..3be398d5348d1 100644 --- a/wgengine/bench/trafficgen.go +++ b/wgengine/bench/trafficgen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index ce6add866f9e8..7b35a089aebcc 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package main diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go index 987fcee0153a6..63a7aee1e461f 100644 --- a/wgengine/filter/filter.go +++ b/wgengine/filter/filter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package filter is a stateful packet filter. diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index ae39eeb08692f..4b364d30e85cb 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filter diff --git a/wgengine/filter/filtertype/filtertype.go b/wgengine/filter/filtertype/filtertype.go index 212eda43f1404..aab5fe8eef046 100644 --- a/wgengine/filter/filtertype/filtertype.go +++ b/wgengine/filter/filtertype/filtertype.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package filtertype defines the types used by wgengine/filter. diff --git a/wgengine/filter/filtertype/filtertype_clone.go b/wgengine/filter/filtertype/filtertype_clone.go index 63709188ea5c1..094063a5d1305 100644 --- a/wgengine/filter/filtertype/filtertype_clone.go +++ b/wgengine/filter/filtertype/filtertype_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/wgengine/filter/match.go b/wgengine/filter/match.go index 6292c49714a49..eee6ddf258fa1 100644 --- a/wgengine/filter/match.go +++ b/wgengine/filter/match.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filter diff --git a/wgengine/filter/tailcfg.go b/wgengine/filter/tailcfg.go index ff81077f727b7..e7e71526a43bc 100644 --- a/wgengine/filter/tailcfg.go +++ b/wgengine/filter/tailcfg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package filter diff --git a/wgengine/magicsock/blockforever_conn.go b/wgengine/magicsock/blockforever_conn.go index 272a12513b353..a215826b751f7 100644 --- a/wgengine/magicsock/blockforever_conn.go +++ b/wgengine/magicsock/blockforever_conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index 9aecab74b4278..68019d0a76cbb 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index b0a47ff87f31b..39cec25e64885 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !js diff --git a/wgengine/magicsock/debugknobs_stubs.go b/wgengine/magicsock/debugknobs_stubs.go index 7dee1d6b0b91c..c156ff8a7d92b 100644 --- a/wgengine/magicsock/debugknobs_stubs.go +++ b/wgengine/magicsock/debugknobs_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || js diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 1c5225e2249b5..b3cc5c2ce4927 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/derp_test.go b/wgengine/magicsock/derp_test.go index ffb230789e4c8..084f710d8526d 100644 --- a/wgengine/magicsock/derp_test.go +++ b/wgengine/magicsock/derp_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/disco_atomic.go b/wgengine/magicsock/disco_atomic.go index 5b765fbc2c9a0..e17ce2f97eb30 100644 --- a/wgengine/magicsock/disco_atomic.go +++ b/wgengine/magicsock/disco_atomic.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/disco_atomic_test.go b/wgengine/magicsock/disco_atomic_test.go index a1de9b843379f..cec4b1133b274 100644 --- a/wgengine/magicsock/disco_atomic_test.go +++ b/wgengine/magicsock/disco_atomic_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/discopingpurpose_string.go b/wgengine/magicsock/discopingpurpose_string.go index 8eebf97a2dbd9..4cfbc751cf81c 100644 --- a/wgengine/magicsock/discopingpurpose_string.go +++ b/wgengine/magicsock/discopingpurpose_string.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by "stringer -type=discoPingPurpose -trimprefix=ping"; DO NOT EDIT. diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 586a2dc75c5cc..1f99f57ec2d16 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/endpoint_default.go b/wgengine/magicsock/endpoint_default.go index 1ed6e5e0e2399..59a47a98602bc 100644 --- a/wgengine/magicsock/endpoint_default.go +++ b/wgengine/magicsock/endpoint_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !wasm && !plan9 diff --git a/wgengine/magicsock/endpoint_stub.go b/wgengine/magicsock/endpoint_stub.go index a209c352bfe5e..da153abe57152 100644 --- a/wgengine/magicsock/endpoint_stub.go +++ b/wgengine/magicsock/endpoint_stub.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build wasm || plan9 diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index f1dab924f5d3b..43ff012c73d61 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/endpoint_tracker.go b/wgengine/magicsock/endpoint_tracker.go index e95852d2491b7..372f346853723 100644 --- a/wgengine/magicsock/endpoint_tracker.go +++ b/wgengine/magicsock/endpoint_tracker.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/endpoint_tracker_test.go b/wgengine/magicsock/endpoint_tracker_test.go index 6fccdfd576878..b3b1a63d94e29 100644 --- a/wgengine/magicsock/endpoint_tracker_test.go +++ b/wgengine/magicsock/endpoint_tracker_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1c13093478a2e..7c5442d0b996c 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package magicsock implements a socket that can change its communication path while diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 88759d3acc2e3..fc3e656b245f3 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !linux || ts_omit_listenrawdisco diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index f37e19165141f..522341e721317 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !ts_omit_listenrawdisco diff --git a/wgengine/magicsock/magicsock_linux_test.go b/wgengine/magicsock/magicsock_linux_test.go index 28ccd220ee784..b670fa6bab601 100644 --- a/wgengine/magicsock/magicsock_linux_test.go +++ b/wgengine/magicsock/magicsock_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/magicsock_notplan9.go b/wgengine/magicsock/magicsock_notplan9.go index 86d099ee7f48c..db2c5fca052b9 100644 --- a/wgengine/magicsock/magicsock_notplan9.go +++ b/wgengine/magicsock/magicsock_notplan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !plan9 diff --git a/wgengine/magicsock/magicsock_plan9.go b/wgengine/magicsock/magicsock_plan9.go index 65714c3e13c33..a234beecf6f8a 100644 --- a/wgengine/magicsock/magicsock_plan9.go +++ b/wgengine/magicsock/magicsock_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build plan9 diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 68ab4dfa012a7..3b7ceeaa23323 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/peermap.go b/wgengine/magicsock/peermap.go index 136353563e2bd..b6e9b08a360ed 100644 --- a/wgengine/magicsock/peermap.go +++ b/wgengine/magicsock/peermap.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/peermap_test.go b/wgengine/magicsock/peermap_test.go index 171e22a6d5795..7fcd09384e540 100644 --- a/wgengine/magicsock/peermap_test.go +++ b/wgengine/magicsock/peermap_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/peermtu.go b/wgengine/magicsock/peermtu.go index b675bf409cfa4..6f3df50a3c435 100644 --- a/wgengine/magicsock/peermtu.go +++ b/wgengine/magicsock/peermtu.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (darwin && !ios) || (linux && !android) diff --git a/wgengine/magicsock/peermtu_darwin.go b/wgengine/magicsock/peermtu_darwin.go index a0a1aacb55f5f..007c413f5efc5 100644 --- a/wgengine/magicsock/peermtu_darwin.go +++ b/wgengine/magicsock/peermtu_darwin.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin && !ios diff --git a/wgengine/magicsock/peermtu_linux.go b/wgengine/magicsock/peermtu_linux.go index b76f30f081042..5a6b5a64e9bc9 100644 --- a/wgengine/magicsock/peermtu_linux.go +++ b/wgengine/magicsock/peermtu_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux && !android diff --git a/wgengine/magicsock/peermtu_stubs.go b/wgengine/magicsock/peermtu_stubs.go index e4f8038a42f21..a7fc5c99ba869 100644 --- a/wgengine/magicsock/peermtu_stubs.go +++ b/wgengine/magicsock/peermtu_stubs.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (!linux && !darwin) || android || ios diff --git a/wgengine/magicsock/peermtu_unix.go b/wgengine/magicsock/peermtu_unix.go index eec3d744f3ded..7c394e98e0fa5 100644 --- a/wgengine/magicsock/peermtu_unix.go +++ b/wgengine/magicsock/peermtu_unix.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build (darwin && !ios) || (linux && !android) diff --git a/wgengine/magicsock/rebinding_conn.go b/wgengine/magicsock/rebinding_conn.go index c98e645705b46..e00eed1f5c88c 100644 --- a/wgengine/magicsock/rebinding_conn.go +++ b/wgengine/magicsock/rebinding_conn.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 69831a4df19f8..e4cd5eb9ff537 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index e8fddfd91b46e..7d773e381a7c4 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package magicsock diff --git a/wgengine/mem_ios.go b/wgengine/mem_ios.go index cc266ea3aadc8..f278359a8809b 100644 --- a/wgengine/mem_ios.go +++ b/wgengine/mem_ios.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine diff --git a/wgengine/netlog/netlog.go b/wgengine/netlog/netlog.go index 12fe9c797641a..e03a520f29bce 100644 --- a/wgengine/netlog/netlog.go +++ b/wgengine/netlog/netlog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netlog && !ts_omit_logtail diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go index 03610a1ef017a..041a183769554 100644 --- a/wgengine/netlog/netlog_omit.go +++ b/wgengine/netlog/netlog_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_netlog || ts_omit_logtail diff --git a/wgengine/netlog/netlog_test.go b/wgengine/netlog/netlog_test.go index b4758c7ec7beb..cc6968547cdbf 100644 --- a/wgengine/netlog/netlog_test.go +++ b/wgengine/netlog/netlog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netlog && !ts_omit_logtail diff --git a/wgengine/netlog/record.go b/wgengine/netlog/record.go index 25b6b1148793a..62c3a6866da2a 100644 --- a/wgengine/netlog/record.go +++ b/wgengine/netlog/record.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netlog && !ts_omit_logtail diff --git a/wgengine/netlog/record_test.go b/wgengine/netlog/record_test.go index ec0229534f244..1edae7450c842 100644 --- a/wgengine/netlog/record_test.go +++ b/wgengine/netlog/record_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netlog && !ts_omit_logtail diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go index c8e5e56e1acb5..152b252951c80 100644 --- a/wgengine/netstack/gro/gro.go +++ b/wgengine/netstack/gro/gro.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_netstack diff --git a/wgengine/netstack/gro/gro_default.go b/wgengine/netstack/gro/gro_default.go index c70e19f7c5861..ac9d672ab8a6e 100644 --- a/wgengine/netstack/gro/gro_default.go +++ b/wgengine/netstack/gro/gro_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios && !ts_omit_gro diff --git a/wgengine/netstack/gro/gro_disabled.go b/wgengine/netstack/gro/gro_disabled.go index d7ffbd9139d99..9b2ae69955c97 100644 --- a/wgengine/netstack/gro/gro_disabled.go +++ b/wgengine/netstack/gro/gro_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios || ts_omit_gro diff --git a/wgengine/netstack/gro/gro_test.go b/wgengine/netstack/gro/gro_test.go index 1eb200a05134c..49171b78c97aa 100644 --- a/wgengine/netstack/gro/gro_test.go +++ b/wgengine/netstack/gro/gro_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package gro diff --git a/wgengine/netstack/gro/netstack_disabled.go b/wgengine/netstack/gro/netstack_disabled.go index a0f56fa4499cf..a61b90b48ed91 100644 --- a/wgengine/netstack/gro/netstack_disabled.go +++ b/wgengine/netstack/gro/netstack_disabled.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_netstack diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index c5a9dbcbca538..4800ed1673d20 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index e05846e150a27..59fc0e0694bcc 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package netstack wires up gVisor's netstack into Tailscale. diff --git a/wgengine/netstack/netstack_linux.go b/wgengine/netstack/netstack_linux.go index a0bfb44567da7..a0f431cf6fdb4 100644 --- a/wgengine/netstack/netstack_linux.go +++ b/wgengine/netstack/netstack_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack diff --git a/wgengine/netstack/netstack_tcpbuf_default.go b/wgengine/netstack/netstack_tcpbuf_default.go index 3640964ffe399..ed93175c4fed1 100644 --- a/wgengine/netstack/netstack_tcpbuf_default.go +++ b/wgengine/netstack/netstack_tcpbuf_default.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ios diff --git a/wgengine/netstack/netstack_tcpbuf_ios.go b/wgengine/netstack/netstack_tcpbuf_ios.go index a4210c9ac7517..a5368da8633d7 100644 --- a/wgengine/netstack/netstack_tcpbuf_ios.go +++ b/wgengine/netstack/netstack_tcpbuf_ios.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ios diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 93022811ce409..f9903c0c210d5 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack diff --git a/wgengine/netstack/netstack_userping.go b/wgengine/netstack/netstack_userping.go index b35a6eca9e11b..d42c8fbe7c271 100644 --- a/wgengine/netstack/netstack_userping.go +++ b/wgengine/netstack/netstack_userping.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !ios diff --git a/wgengine/netstack/netstack_userping_apple.go b/wgengine/netstack/netstack_userping_apple.go index 52fb7a24a4c41..a82b81e99e827 100644 --- a/wgengine/netstack/netstack_userping_apple.go +++ b/wgengine/netstack/netstack_userping_apple.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || ios diff --git a/wgengine/netstack/netstack_userping_test.go b/wgengine/netstack/netstack_userping_test.go index a179f74673469..cba298d453698 100644 --- a/wgengine/netstack/netstack_userping_test.go +++ b/wgengine/netstack/netstack_userping_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package netstack diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 7eaf43e52a816..77cb4a7b9b451 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !ts_omit_debug diff --git a/wgengine/pendopen_omit.go b/wgengine/pendopen_omit.go index 013425d357f26..01d33306b291e 100644 --- a/wgengine/pendopen_omit.go +++ b/wgengine/pendopen_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build ts_omit_debug diff --git a/wgengine/router/callback.go b/wgengine/router/callback.go index c1838539ba2a3..11ce832f4f5e4 100644 --- a/wgengine/router/callback.go +++ b/wgengine/router/callback.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router diff --git a/wgengine/router/consolidating_router.go b/wgengine/router/consolidating_router.go index 10c4825d8856a..14283330b124c 100644 --- a/wgengine/router/consolidating_router.go +++ b/wgengine/router/consolidating_router.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router diff --git a/wgengine/router/consolidating_router_test.go b/wgengine/router/consolidating_router_test.go index ba2e4d07a746a..1bf79a29d614d 100644 --- a/wgengine/router/consolidating_router_test.go +++ b/wgengine/router/consolidating_router_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router diff --git a/wgengine/router/osrouter/ifconfig_windows_test.go b/wgengine/router/osrouter/ifconfig_windows_test.go index b858ef4f60d19..f272a59f899f6 100644 --- a/wgengine/router/osrouter/ifconfig_windows_test.go +++ b/wgengine/router/osrouter/ifconfig_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/osrouter.go b/wgengine/router/osrouter/osrouter.go index 281454b069984..ac4e48c7268c7 100644 --- a/wgengine/router/osrouter/osrouter.go +++ b/wgengine/router/osrouter/osrouter.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package osrouter contains OS-specific router implementations. diff --git a/wgengine/router/osrouter/osrouter_test.go b/wgengine/router/osrouter/osrouter_test.go index d0cb3db6968c1..5e81d6297d035 100644 --- a/wgengine/router/osrouter/osrouter_test.go +++ b/wgengine/router/osrouter/osrouter_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/router_freebsd.go b/wgengine/router/osrouter/router_freebsd.go index a142e7a84e14a..c1e1a389b8537 100644 --- a/wgengine/router/osrouter/router_freebsd.go +++ b/wgengine/router/osrouter/router_freebsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 7442c045ee079..8ca38f9ecd15d 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !android diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index 68ed8dbb2bb64..bce0ea09275e3 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go index 55b485f0e7a9e..8807a32d5b860 100644 --- a/wgengine/router/osrouter/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/router_plan9.go b/wgengine/router/osrouter/router_plan9.go index a5b461a6fff67..1436ee8a2191a 100644 --- a/wgengine/router/osrouter/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go index 70ef2b6bf3ca9..272594d7c427b 100644 --- a/wgengine/router/osrouter/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd diff --git a/wgengine/router/osrouter/router_windows.go b/wgengine/router/osrouter/router_windows.go index a1acbe3b67287..ef9eb04a147a7 100644 --- a/wgengine/router/osrouter/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/router_windows_test.go b/wgengine/router/osrouter/router_windows_test.go index 119b6a77867f9..abbbdf93b1fcb 100644 --- a/wgengine/router/osrouter/router_windows_test.go +++ b/wgengine/router/osrouter/router_windows_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package osrouter diff --git a/wgengine/router/osrouter/runner.go b/wgengine/router/osrouter/runner.go index 7afb7fdc2088f..bdc710a8d369a 100644 --- a/wgengine/router/osrouter/runner.go +++ b/wgengine/router/osrouter/runner.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build linux diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 04cc898876557..6868acb43ee2b 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package router presents an interface to manipulate the host network diff --git a/wgengine/router/router_fake.go b/wgengine/router/router_fake.go index db35fc9eebe15..6b3bc044aea6d 100644 --- a/wgengine/router/router_fake.go +++ b/wgengine/router/router_fake.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router diff --git a/wgengine/router/router_test.go b/wgengine/router/router_test.go index fd17b8c5d5297..28750e115a9e3 100644 --- a/wgengine/router/router_test.go +++ b/wgengine/router/router_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package router diff --git a/wgengine/userspace.go b/wgengine/userspace.go index dbc8e8b573c49..e69712061f5c9 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 8e7bbb7a9c5c9..2d41a2df08dd2 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine_test diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 0a1d2924d593b..b06ea527b27ba 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 9cc4ed3b594c3..18b36e0039d6d 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build !js && !ts_omit_debug diff --git a/wgengine/watchdog_omit.go b/wgengine/watchdog_omit.go index 1d175b41a87eb..b4ed4344292e6 100644 --- a/wgengine/watchdog_omit.go +++ b/wgengine/watchdog_omit.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build js || ts_omit_debug diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index 35fd8f33105e6..47f133373c445 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgengine diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 2734f6c6ea969..7828121390fba 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wgcfg has types and a parser for representing WireGuard config. diff --git a/wgengine/wgcfg/config_test.go b/wgengine/wgcfg/config_test.go index 5ac3b7cd56376..b15b8cbf56f8b 100644 --- a/wgengine/wgcfg/config_test.go +++ b/wgengine/wgcfg/config_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/device.go b/wgengine/wgcfg/device.go index ee7eb91c93b66..ba29cfbdca8c0 100644 --- a/wgengine/wgcfg/device.go +++ b/wgengine/wgcfg/device.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/device_test.go b/wgengine/wgcfg/device_test.go index 9138d6e5a0f47..a0443147db80d 100644 --- a/wgengine/wgcfg/device_test.go +++ b/wgengine/wgcfg/device_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/nmcfg/nmcfg.go b/wgengine/wgcfg/nmcfg/nmcfg.go index a42827337d5c6..f99b7b007a564 100644 --- a/wgengine/wgcfg/nmcfg/nmcfg.go +++ b/wgengine/wgcfg/nmcfg/nmcfg.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package nmcfg converts a controlclient.NetMap into a wgcfg config. diff --git a/wgengine/wgcfg/parser.go b/wgengine/wgcfg/parser.go index ec3d008f7de97..8fb9214091a42 100644 --- a/wgengine/wgcfg/parser.go +++ b/wgengine/wgcfg/parser.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/parser_test.go b/wgengine/wgcfg/parser_test.go index a5d7ad44f2e39..8c38ec0251b21 100644 --- a/wgengine/wgcfg/parser_test.go +++ b/wgengine/wgcfg/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgcfg/wgcfg_clone.go b/wgengine/wgcfg/wgcfg_clone.go index 9f3cabde182f9..5c771a2288fce 100644 --- a/wgengine/wgcfg/wgcfg_clone.go +++ b/wgengine/wgcfg/wgcfg_clone.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. diff --git a/wgengine/wgcfg/writer.go b/wgengine/wgcfg/writer.go index 9cdd31df2e38c..f4981e3e9185b 100644 --- a/wgengine/wgcfg/writer.go +++ b/wgengine/wgcfg/writer.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgcfg diff --git a/wgengine/wgengine.go b/wgengine/wgengine.go index be78731474bc9..9dd782e4ab44f 100644 --- a/wgengine/wgengine.go +++ b/wgengine/wgengine.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wgengine provides the Tailscale WireGuard engine interface. diff --git a/wgengine/wgint/wgint.go b/wgengine/wgint/wgint.go index 309113df71d41..88c48486e3fc6 100644 --- a/wgengine/wgint/wgint.go +++ b/wgengine/wgint/wgint.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wgint provides somewhat shady access to wireguard-go diff --git a/wgengine/wgint/wgint_test.go b/wgengine/wgint/wgint_test.go index 714d2044b1806..3409a7fde2d15 100644 --- a/wgengine/wgint/wgint_test.go +++ b/wgengine/wgint/wgint_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wgint diff --git a/wgengine/wglog/wglog.go b/wgengine/wglog/wglog.go index dabd4562ad704..174babb91a933 100644 --- a/wgengine/wglog/wglog.go +++ b/wgengine/wglog/wglog.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wglog contains logging helpers for wireguard-go. diff --git a/wgengine/wglog/wglog_test.go b/wgengine/wglog/wglog_test.go index 9e9850f39ef59..2e82f9312a5dc 100644 --- a/wgengine/wglog/wglog_test.go +++ b/wgengine/wglog/wglog_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package wglog_test diff --git a/wgengine/winnet/winnet.go b/wgengine/winnet/winnet.go index e04e6f5c5b1a0..a5a84b04bd25c 100644 --- a/wgengine/winnet/winnet.go +++ b/wgengine/winnet/winnet.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause //go:build windows diff --git a/wgengine/winnet/winnet_windows.go b/wgengine/winnet/winnet_windows.go index 283ce5ad17b68..6ce298f8165ab 100644 --- a/wgengine/winnet/winnet_windows.go +++ b/wgengine/winnet/winnet_windows.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package winnet diff --git a/wif/wif.go b/wif/wif.go index 557685c448c0b..bb2e760f2c7b7 100644 --- a/wif/wif.go +++ b/wif/wif.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package wif deals with obtaining ID tokens from provider VMs diff --git a/words/words.go b/words/words.go index b373ffef6541f..ebac1cc0a571e 100644 --- a/words/words.go +++ b/words/words.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause // Package words contains accessors for some nice words. diff --git a/words/words_test.go b/words/words_test.go index a9691792a5c00..3547411a1e160 100644 --- a/words/words_test.go +++ b/words/words_test.go @@ -1,4 +1,4 @@ -// Copyright (c) Tailscale Inc & AUTHORS +// Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause package words From 2a69f48541e0ed7fdf81fc88b079474331eeee76 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 23 Jan 2026 17:53:00 -0600 Subject: [PATCH 0892/1093] wf: allow limited broadcast to/from permitted interfaces when using an exit node on Windows Similarly to allowing link-local multicast in #13661, we should also allow broadcast traffic on permitted interfaces when the killswitch is enabled due to exit node usage on Windows. This always includes internal interfaces, such as Hyper-V/WSL2, and also the LAN when "Allow local network access" is enabled in the client. Updates #18504 Signed-off-by: Nick Khyl --- tstest/test-wishlist.md | 3 ++ wf/firewall.go | 82 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 79 insertions(+), 6 deletions(-) diff --git a/tstest/test-wishlist.md b/tstest/test-wishlist.md index eb4601b929650..39b4da6c05453 100644 --- a/tstest/test-wishlist.md +++ b/tstest/test-wishlist.md @@ -18,3 +18,6 @@ reference to an issue or PR about the feature. When the option is disabled, we should still permit it for internal interfaces, such as Hyper-V/WSL2 on Windows. +- Inbound and outbound broadcasts when an exit node is used, both with and without + the "Allow local network access" option enabled. When the option is disabled, + we should still permit traffic on internal interfaces, such as Hyper-V/WSL2 on Windows. \ No newline at end of file diff --git a/wf/firewall.go b/wf/firewall.go index 5209c2293b10c..995a60c3e3356 100644 --- a/wf/firewall.go +++ b/wf/firewall.go @@ -25,6 +25,8 @@ var ( linkLocalMulticastIPv4Range = netip.MustParsePrefix("224.0.0.0/24") linkLocalMulticastIPv6Range = netip.MustParsePrefix("ff02::/16") + + limitedBroadcast = netip.MustParsePrefix("255.255.255.255/32") ) type direction int @@ -233,26 +235,41 @@ func (f *Firewall) UpdatePermittedRoutes(newRoutes []netip.Prefix) error { return err } - name = "link-local multicast - " + r.String() - conditions = matchLinkLocalMulticast(r, false) - multicastRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + multicastRules, err := f.addLinkLocalMulticastRules(p, r) if err != nil { return err } rules = append(rules, multicastRules...) - conditions = matchLinkLocalMulticast(r, true) - multicastRules, err = f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) + broadcastRules, err := f.addLimitedBroadcastRules(p, r) if err != nil { return err } - rules = append(rules, multicastRules...) + rules = append(rules, broadcastRules...) f.permittedRoutes[r] = rules } return nil } +// addLinkLocalMulticastRules adds rules to allow inbound and outbound +// link-local multicast traffic to or from the specified network. +// It returns the added rules, or an error. +func (f *Firewall) addLinkLocalMulticastRules(p protocol, r netip.Prefix) ([]*wf.Rule, error) { + name := "link-local multicast - " + r.String() + conditions := matchLinkLocalMulticast(r, false) + outboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + if err != nil { + return nil, err + } + conditions = matchLinkLocalMulticast(r, true) + inboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) + if err != nil { + return nil, err + } + return append(outboundRules, inboundRules...), nil +} + // matchLinkLocalMulticast returns a list of conditions that match // outbound or inbound link-local multicast traffic to or from the // specified network. @@ -288,6 +305,59 @@ func matchLinkLocalMulticast(pfx netip.Prefix, inbound bool) []*wf.Match { } } +// addLimitedBroadcastRules adds rules to allow inbound and outbound +// limited broadcast traffic to or from the specified network, +// if the network is IPv4. It returns the added rules, or an error. +func (f *Firewall) addLimitedBroadcastRules(p protocol, r netip.Prefix) ([]*wf.Rule, error) { + if !r.Addr().Is4() { + return nil, nil + } + name := "broadcast - " + r.String() + conditions := matchLimitedBroadcast(r, false) + outboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionOutbound) + if err != nil { + return nil, err + } + conditions = matchLimitedBroadcast(r, true) + inboundRules, err := f.addRules(name, weightKnownTraffic, conditions, wf.ActionPermit, p, directionInbound) + if err != nil { + return nil, err + } + return append(outboundRules, inboundRules...), nil +} + +// matchLimitedBroadcast returns a list of conditions that match +// outbound or inbound limited broadcast traffic to or from the +// specified network. It panics if the pfx is not IPv4. +func matchLimitedBroadcast(pfx netip.Prefix, inbound bool) []*wf.Match { + if !pfx.Addr().Is4() { + panic("limited broadcast is only applicable to IPv4") + } + var localAddr, remoteAddr netip.Prefix + if inbound { + localAddr, remoteAddr = limitedBroadcast, pfx + } else { + localAddr, remoteAddr = pfx, limitedBroadcast + } + return []*wf.Match{ + { + Field: wf.FieldIPProtocol, + Op: wf.MatchTypeEqual, + Value: wf.IPProtoUDP, + }, + { + Field: wf.FieldIPLocalAddress, + Op: wf.MatchTypeEqual, + Value: localAddr, + }, + { + Field: wf.FieldIPRemoteAddress, + Op: wf.MatchTypeEqual, + Value: remoteAddr, + }, + } +} + func (f *Firewall) newRule(name string, w weight, layer wf.LayerID, conditions []*wf.Match, action wf.Action) (*wf.Rule, error) { id, err := windows.GenerateGUID() if err != nil { From bfa90ea9b38e1b20c9944abade6258db6e3d4157 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 23 Jan 2026 17:08:46 -0800 Subject: [PATCH 0893/1093] go.toolchain.rev: update to Go 1.25.6 (#18507) Updates #18506 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index a8ec79e6e014f..7fd487382e574 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.5 +go 1.25.6 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index 16058a407c704..dbf37cef1af47 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -0bab982699fa5903259ba9b4cba3e5fd6cb3baf2 +0c028efa1dac96fbb046b793877061645d01ed74 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 310dcf87fcf1c..26fe3501b5d26 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-fBezkBGRHCnfJiOUmMMqBCPCqjlGC4F6KEt5h1JhsCg= +sha256-1AG7yXAbDsBdKUNe5FQ45YXWJ3eLekD4t9mwKrqxiOY= diff --git a/go.toolchain.version b/go.toolchain.version index b45fe310644f7..198ec23ccfcc9 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.25.5 +1.25.6 From 76839587ebd51507df41532eba474c5fd68134b7 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 19 Jan 2026 15:04:12 +0000 Subject: [PATCH 0894/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 28 ++++++++++---------- licenses/apple.md | 59 ++++++++++++++++++++++--------------------- licenses/tailscale.md | 52 +++++++++++++++++++------------------- licenses/windows.md | 26 +++++++++---------- 4 files changed, 83 insertions(+), 82 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 4dc8e6c6de06c..5c46b3cb13340 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -11,23 +11,23 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/go-tpm](https://pkg.go.dev/github.com/google/go-tpm) ([Apache-2.0](https://github.com/google/go-tpm/blob/v0.9.4/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) + - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) @@ -36,16 +36,16 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.30.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.31.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.32.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.39.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index c3f2d3bb7a3c3..d51d67190b1fa 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -12,43 +12,45 @@ See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.39.6/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.4.13/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.7.13/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.39.6/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.32.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.19.5/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.18.16/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.4.16/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.7.16/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.4/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.13.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.13.16/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/signin](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/signin) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/signin/v1.0.4/service/signin/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.23.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.23.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.30.7/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.35.12/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.41.5/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.24.0/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.24.0/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.8.1/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/4849db3c2f7e/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) + - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/15c9b8791914/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/genetlink](https://pkg.go.dev/github.com/mdlayher/genetlink) ([MIT](https://github.com/mdlayher/genetlink/blob/v1.3.2/LICENSE.md)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) @@ -59,7 +61,6 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) - - [github.com/huin/goupnp](https://pkg.go.dev/github.com/huin/goupnp) ([BSD-2-Clause](https://github.com/huin/goupnp/blob/v1.3.0/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) @@ -69,13 +70,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.31.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.32.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 85c0f33fc09d2..28eb73db42cc6 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -20,22 +20,22 @@ Some packages may only be included on certain architectures or operating systems - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/anmitsu/go-shlex](https://pkg.go.dev/github.com/anmitsu/go-shlex) ([MIT](https://github.com/anmitsu/go-shlex/blob/38f4b401e2be/LICENSE)) - [github.com/atotto/clipboard](https://pkg.go.dev/github.com/atotto/clipboard) ([BSD-3-Clause](https://github.com/atotto/clipboard/blob/v0.1.4/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.4.16/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.7.16/internal/endpoints/v2/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.13.4/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.13.16/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.41.5/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.24.0/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.24.0/internal/sync/singleflight/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) @@ -43,24 +43,24 @@ Some packages may only be included on certain architectures or operating systems - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fogleman/gg](https://pkg.go.dev/github.com/fogleman/gg) ([MIT](https://github.com/fogleman/gg/blob/v1.3.0/LICENSE.md)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/freetype/raster](https://pkg.go.dev/github.com/golang/freetype/raster) ([Unknown](Unknown)) - [github.com/golang/freetype/truetype](https://pkg.go.dev/github.com/golang/freetype/truetype) ([Unknown](Unknown)) - - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/kballard/go-shellquote](https://pkg.go.dev/github.com/kballard/go-shellquote) ([MIT](https://github.com/kballard/go-shellquote/blob/95032a82bc51/LICENSE)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/kr/fs](https://pkg.go.dev/github.com/kr/fs) ([BSD-3-Clause](https://github.com/kr/fs/blob/v0.1.0/LICENSE)) - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) @@ -83,24 +83,24 @@ Some packages may only be included on certain architectures or operating systems - [github.com/u-root/u-root/pkg/termios](https://pkg.go.dev/github.com/u-root/u-root/pkg/termios) ([BSD-3-Clause](https://github.com/u-root/u-root/blob/v0.14.0/LICENSE)) - [github.com/u-root/uio](https://pkg.go.dev/github.com/u-root/uio) ([BSD-3-Clause](https://github.com/u-root/uio/blob/d2acac8f3701/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) + - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) - [golang.org/x/image](https://pkg.go.dev/golang.org/x/image) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.30.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.31.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.11.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.32.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.32.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE)) - - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) + - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.34.0/LICENSE)) + - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.6.0/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) - [tailscale.com/tempfork/gliderlabs/ssh](https://pkg.go.dev/tailscale.com/tempfork/gliderlabs/ssh) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/gliderlabs/ssh/LICENSE)) - [tailscale.com/tempfork/spf13/cobra](https://pkg.go.dev/tailscale.com/tempfork/spf13/cobra) ([Apache-2.0](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/spf13/cobra/LICENSE.txt)) diff --git a/licenses/windows.md b/licenses/windows.md index 0b8344b4d66d4..902d0f2a1f5a8 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -15,22 +15,22 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) + - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.8.1/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) + - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/4849db3c2f7e/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) + - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gregjones/httpcache](https://pkg.go.dev/github.com/gregjones/httpcache) ([MIT](https://github.com/gregjones/httpcache/blob/901d90724c79/LICENSE.txt)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) + - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.2/LICENSE)) + - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.2/internal/snapref/LICENSE)) + - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) @@ -51,17 +51,17 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.45.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.30.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.47.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.18.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.38.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.37.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.8/LICENSE)) + - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.11/LICENSE)) - [gopkg.in/Knetic/govaluate.v3](https://pkg.go.dev/gopkg.in/Knetic/govaluate.v3) ([MIT](https://github.com/Knetic/govaluate/blob/v3.0.0/LICENSE)) - [gopkg.in/yaml.v3](https://pkg.go.dev/gopkg.in/yaml.v3) ([MIT](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) From 1183f7a191739040c7e1abf77d9c555e82767b54 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 23 Jan 2026 15:07:50 -0800 Subject: [PATCH 0895/1093] tstest/integration/testcontrol: fix unguarded read of DNS config Fixes #18498 Signed-off-by: James Tucker --- tstest/integration/testcontrol/testcontrol.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 4607665924c45..f61d1b53a6d99 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1327,16 +1327,19 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, s.mu.Lock() nodeCapMap := maps.Clone(s.nodeCapMaps[nk]) + var dns *tailcfg.DNSConfig + if s.DNSConfig != nil { + dns = s.DNSConfig.Clone() + } + magicDNSDomain := s.MagicDNSDomain s.mu.Unlock() node.CapMap = nodeCapMap node.Capabilities = append(node.Capabilities, tailcfg.NodeAttrDisableUPnP) t := time.Date(2020, 8, 3, 0, 0, 0, 1, time.UTC) - dns := s.DNSConfig - if dns != nil && s.MagicDNSDomain != "" { - dns = dns.Clone() - dns.CertDomains = append(dns.CertDomains, node.Hostinfo.Hostname()+"."+s.MagicDNSDomain) + if dns != nil && magicDNSDomain != "" { + dns.CertDomains = append(dns.CertDomains, node.Hostinfo.Hostname()+"."+magicDNSDomain) } res = &tailcfg.MapResponse{ From 9d13a6df9c4d84f2db700960ee5e64f9b272fa34 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 14 Jan 2026 11:53:14 -0800 Subject: [PATCH 0896/1093] appc,ipn/ipnlocal: Add split DNS entries for conn25 peers If conn25 config is sent in the netmap: add split DNS entries to use appropriately tagged peers' PeerAPI to resolve DNS requests for those domains. This will enable future work where we use the peers as connectors for the configured domains. Updates tailscale/corp#34252 Signed-off-by: Fran Bull --- appc/conn25.go | 63 +++++++++++++++++ appc/conn25_test.go | 123 +++++++++++++++++++++++++++++++++ ipn/ipnlocal/dnsconfig_test.go | 91 ++++++++++++++++++++++++ ipn/ipnlocal/node_backend.go | 21 ++++++ 4 files changed, 298 insertions(+) diff --git a/appc/conn25.go b/appc/conn25.go index 2c3e8c519a976..08ca651fda7e9 100644 --- a/appc/conn25.go +++ b/appc/conn25.go @@ -4,10 +4,15 @@ package appc import ( + "cmp" "net/netip" + "slices" "sync" "tailscale.com/tailcfg" + "tailscale.com/types/appctype" + "tailscale.com/util/mak" + "tailscale.com/util/set" ) // Conn25 holds the developing state for the as yet nascent next generation app connector. @@ -108,3 +113,61 @@ type ConnectorTransitIPResponse struct { // correspond to the order of [ConnectorTransitIPRequest.TransitIPs]. TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"` } + +const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" + +// PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers +// want to be connectors for which domains. +func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.NodeView, peers map[tailcfg.NodeID]tailcfg.NodeView) map[string][]tailcfg.NodeView { + var m map[string][]tailcfg.NodeView + if !hasCap(AppConnectorsExperimentalAttrName) { + return m + } + apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorAttr](self.CapMap(), AppConnectorsExperimentalAttrName) + if err != nil { + return m + } + tagToDomain := make(map[string][]string) + for _, app := range apps { + for _, tag := range app.Connectors { + tagToDomain[tag] = append(tagToDomain[tag], app.Domains...) + } + } + // NodeIDs are Comparable, and we have a map of NodeID to NodeView anyway, so + // use a Set of NodeIDs to deduplicate, and populate into a []NodeView later. + var work map[string]set.Set[tailcfg.NodeID] + for _, peer := range peers { + if !peer.Valid() || !peer.Hostinfo().Valid() { + continue + } + if isConn, _ := peer.Hostinfo().AppConnector().Get(); !isConn { + continue + } + for _, t := range peer.Tags().All() { + domains := tagToDomain[t] + for _, domain := range domains { + if work[domain] == nil { + mak.Set(&work, domain, set.Set[tailcfg.NodeID]{}) + } + work[domain].Add(peer.ID()) + } + } + } + + // Populate m. Make a []tailcfg.NodeView from []tailcfg.NodeID using the peers map. + // And sort it to our preference. + for domain, ids := range work { + nodes := make([]tailcfg.NodeView, 0, ids.Len()) + for id := range ids { + nodes = append(nodes, peers[id]) + } + // The ordering of the nodes in the map vals is semantic (dnsConfigForNetmap uses the first node it can + // get a peer api url for as its split dns target). We can think of it as a preference order, except that + // we don't (currently 2026-01-14) have any preference over which node is chosen. + slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) + mak.Set(&m, domain, nodes) + } + return m +} diff --git a/appc/conn25_test.go b/appc/conn25_test.go index 76cc6cf8c69f4..33f89749ca748 100644 --- a/appc/conn25_test.go +++ b/appc/conn25_test.go @@ -4,10 +4,14 @@ package appc import ( + "encoding/json" "net/netip" + "reflect" "testing" "tailscale.com/tailcfg" + "tailscale.com/types/appctype" + "tailscale.com/types/opt" ) // TestHandleConnectorTransitIPRequestZeroLength tests that if sent a @@ -186,3 +190,122 @@ func TestTransitIPTargetUnknownTIP(t *testing.T) { t.Fatalf("Unknown transit addr, want: %v, got %v", want, got) } } + +func TestPickSplitDNSPeers(t *testing.T) { + getBytesForAttr := func(name string, domains []string, tags []string) []byte { + attr := appctype.AppConnectorAttr{ + Name: name, + Domains: domains, + Connectors: tags, + } + bs, err := json.Marshal(attr) + if err != nil { + t.Fatalf("test setup: %v", err) + } + return bs + } + appOneBytes := getBytesForAttr("app1", []string{"example.com"}, []string{"tag:one"}) + appTwoBytes := getBytesForAttr("app2", []string{"a.example.com"}, []string{"tag:two"}) + appThreeBytes := getBytesForAttr("app3", []string{"woo.b.example.com", "hoo.b.example.com"}, []string{"tag:three1", "tag:three2"}) + appFourBytes := getBytesForAttr("app4", []string{"woo.b.example.com", "c.example.com"}, []string{"tag:four1", "tag:four2"}) + + makeNodeView := func(id tailcfg.NodeID, name string, tags []string) tailcfg.NodeView { + return (&tailcfg.Node{ + ID: id, + Name: name, + Tags: tags, + Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(true)}).View(), + }).View() + } + nvp1 := makeNodeView(1, "p1", []string{"tag:one"}) + nvp2 := makeNodeView(2, "p2", []string{"tag:four1", "tag:four2"}) + nvp3 := makeNodeView(3, "p3", []string{"tag:two", "tag:three1"}) + nvp4 := makeNodeView(4, "p4", []string{"tag:two", "tag:three2", "tag:four2"}) + + for _, tt := range []struct { + name string + want map[string][]tailcfg.NodeView + peers []tailcfg.NodeView + config []tailcfg.RawMessage + }{ + { + name: "empty", + }, + { + name: "bad-config", // bad config should return a nil map rather than error. + config: []tailcfg.RawMessage{tailcfg.RawMessage(`hey`)}, + }, + { + name: "no-peers", + config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)}, + }, + { + name: "peers-that-are-not-connectors", + config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)}, + peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 5, + Name: "p5", + Tags: []string{"tag:one"}, + }).View(), + (&tailcfg.Node{ + ID: 6, + Name: "p6", + Tags: []string{"tag:one"}, + }).View(), + }, + }, + { + name: "peers-that-dont-match-tags", + config: []tailcfg.RawMessage{tailcfg.RawMessage(appOneBytes)}, + peers: []tailcfg.NodeView{ + makeNodeView(5, "p5", []string{"tag:seven"}), + makeNodeView(6, "p6", nil), + }, + }, + { + name: "matching-tagged-connector-peers", + config: []tailcfg.RawMessage{ + tailcfg.RawMessage(appOneBytes), + tailcfg.RawMessage(appTwoBytes), + tailcfg.RawMessage(appThreeBytes), + tailcfg.RawMessage(appFourBytes), + }, + peers: []tailcfg.NodeView{ + nvp1, + nvp2, + nvp3, + nvp4, + makeNodeView(5, "p5", nil), + }, + want: map[string][]tailcfg.NodeView{ + // p5 has no matching tags and so doesn't appear + "example.com": {nvp1}, + "a.example.com": {nvp3, nvp4}, + "woo.b.example.com": {nvp2, nvp3, nvp4}, + "hoo.b.example.com": {nvp3, nvp4}, + "c.example.com": {nvp2, nvp4}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + selfNode := &tailcfg.Node{} + if tt.config != nil { + selfNode.CapMap = tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): tt.config, + } + } + selfView := selfNode.View() + peers := map[tailcfg.NodeID]tailcfg.NodeView{} + for _, p := range tt.peers { + peers[p.ID()] = p + } + got := PickSplitDNSPeers(func(_ tailcfg.NodeCapability) bool { + return true + }, selfView, peers) + if !reflect.DeepEqual(got, tt.want) { + t.Fatalf("got %v, want %v", got, tt.want) + } + }) + } +} diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 52cc533ff29f6..594d2c5476177 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -10,14 +10,17 @@ import ( "reflect" "testing" + "tailscale.com/appc" "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/dnstype" "tailscale.com/types/netmap" + "tailscale.com/types/opt" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) func ipps(ippStrs ...string) (ipps []netip.Prefix) { @@ -349,6 +352,94 @@ func TestDNSConfigForNetmap(t *testing.T) { prefs: &ipn.Prefs{}, want: &dns.Config{}, }, + { + name: "conn25-split-dns", + nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "a", + Addresses: ipps("100.101.101.101"), + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`), + }, + }, + }).View(), + AllCaps: set.Of(tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName)), + }, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "p1", + Addresses: ipps("100.102.0.1"), + Tags: []string{"tag:woo"}, + Hostinfo: (&tailcfg.Hostinfo{ + Services: []tailcfg.Service{ + { + Proto: tailcfg.PeerAPI4, + Port: 1234, + }, + }, + AppConnector: opt.NewBool(true), + }).View(), + }, + }), + prefs: &ipn.Prefs{ + CorpDNS: true, + }, + want: &dns.Config{ + Hosts: map[dnsname.FQDN][]netip.Addr{ + "a.": ips("100.101.101.101"), + "p1.": ips("100.102.0.1"), + }, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{ + dnsname.FQDN("example.com."): { + {Addr: "http://100.102.0.1:1234/dns-query"}, + }, + }, + }, + }, + { + name: "conn25-split-dns-no-matching-peers", + nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "a", + Addresses: ipps("100.101.101.101"), + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}`), + }, + }, + }).View(), + AllCaps: set.Of(tailcfg.NodeCapability(appc.AppConnectorsExperimentalAttrName)), + }, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "p1", + Addresses: ipps("100.102.0.1"), + Tags: []string{"tag:nomatch"}, + Hostinfo: (&tailcfg.Hostinfo{ + Services: []tailcfg.Service{ + { + Proto: tailcfg.PeerAPI4, + Port: 1234, + }, + }, + AppConnector: opt.NewBool(true), + }).View(), + }, + }), + prefs: &ipn.Prefs{ + CorpDNS: true, + }, + want: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "a.": ips("100.101.101.101"), + "p1.": ips("100.102.0.1"), + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index a252f20fe2074..4a32b14dd49dc 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -6,12 +6,14 @@ package ipnlocal import ( "cmp" "context" + "fmt" "net/netip" "slices" "sync" "sync/atomic" "go4.org/netipx" + "tailscale.com/appc" "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/dns" @@ -842,6 +844,25 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // Add split DNS routes, with no regard to exit node configuration. addSplitDNSRoutes(nm.DNS.Routes) + // Add split DNS routes for conn25 + conn25DNSTargets := appc.PickSplitDNSPeers(nm.HasCap, nm.SelfNode, peers) + if conn25DNSTargets != nil { + var m map[string][]*dnstype.Resolver + for domain, candidateSplitDNSPeers := range conn25DNSTargets { + for _, peer := range candidateSplitDNSPeers { + base := peerAPIBase(nm, peer) + if base == "" { + continue + } + mak.Set(&m, domain, []*dnstype.Resolver{{Addr: fmt.Sprintf("%s/dns-query", base)}}) + break // Just make one resolver for the first peer we can get a peerAPIBase for. + } + } + if m != nil { + addSplitDNSRoutes(m) + } + } + // Set FallbackResolvers as the default resolvers in the // scenarios that can't handle a purely split-DNS config. See // https://github.com/tailscale/tailscale/issues/1743 for From 0e1b2b15f1a9a609213d99d527ca448711775b13 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Mon, 26 Jan 2026 12:36:02 -0500 Subject: [PATCH 0897/1093] net/dns/publicdns: support CIRA Canadian Shield RELNOTE=Add DNS-over-HTTPS support for CIRA Canadian Shield Fixes #18524 Signed-off-by: Andrew Dunham --- net/dns/publicdns/publicdns.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index e3148a5ae8a98..7ceaf1813db6c 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -275,6 +275,26 @@ func populate() { addDoH("76.76.10.4", "https://freedns.controld.com/family") addDoH("2606:1a40::4", "https://freedns.controld.com/family") addDoH("2606:1a40:1::4", "https://freedns.controld.com/family") + + // CIRA Canadian Shield: https://www.cira.ca/en/canadian-shield/configure/summary-cira-canadian-shield-dns-resolver-addresses/ + + // CIRA Canadian Shield Private (DNS resolution only) + addDoH("149.112.121.10", "https://private.canadianshield.cira.ca/dns-query") + addDoH("149.112.122.10", "https://private.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bb::10", "https://private.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bc::10", "https://private.canadianshield.cira.ca/dns-query") + + // CIRA Canadian Shield Protected (Malware and phishing protection) + addDoH("149.112.121.20", "https://protected.canadianshield.cira.ca/dns-query") + addDoH("149.112.122.20", "https://protected.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bb::20", "https://protected.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bc::20", "https://protected.canadianshield.cira.ca/dns-query") + + // CIRA Canadian Shield Family (Protected + blocking adult content) + addDoH("149.112.121.30", "https://family.canadianshield.cira.ca/dns-query") + addDoH("149.112.122.30", "https://family.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bb::30", "https://family.canadianshield.cira.ca/dns-query") + addDoH("2620:10a:80bc::30", "https://family.canadianshield.cira.ca/dns-query") } var ( From 8d875a301c8bdaceb5814eab100a90cb725b2018 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Mon, 26 Jan 2026 12:43:24 -0500 Subject: [PATCH 0898/1093] net/dns: add test for DoH upgrade of system DNS Someone asked me if we use DNS-over-HTTPS if the system's resolver is an IP address that supports DoH and there's no global nameserver set (i.e. no "Override DNS servers" set). I didn't know the answer offhand, and it took a while for me to figure it out. The answer is yes, in cases where we take over the system's DNS configuration and read the base config, we do upgrade any DoH-capable resolver to use DoH. Here's a test that verifies this behaviour (and hopefully helps as documentation the next time someone has this question). Updates #cleanup Signed-off-by: Andrew Dunham --- net/dns/manager_test.go | 204 +++++++++++++++++++++++++++++++++ net/dns/publicdns/publicdns.go | 38 ++++++ 2 files changed, 242 insertions(+) diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 679f81cd5d8a2..cf0c2458e395f 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -4,24 +4,36 @@ package dns import ( + "bytes" + "context" "errors" + "io" + "net/http" + "net/http/httptest" "net/netip" "reflect" "runtime" + "slices" "strings" + "sync" "testing" "testing/synctest" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/net/dns/publicdns" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" + "tailscale.com/tstest" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/httpm" ) type fakeOSConfigurator struct { @@ -1116,3 +1128,195 @@ func TestTrampleRetrample(t *testing.T) { } }) } + +// TestSystemDNSDoHUpgrade tests that if the user doesn't configure DNS servers +// in their tailnet, and the system DNS happens to be a known DoH provider, +// queries will use DNS-over-HTTPS. +func TestSystemDNSDoHUpgrade(t *testing.T) { + var ( + // This is a non-routable TEST-NET-2 IP (RFC 5737). + testDoHResolverIP = netip.MustParseAddr("198.51.100.1") + // This is a non-routable TEST-NET-1 IP (RFC 5737). + testResponseIP = netip.MustParseAddr("192.0.2.1") + ) + const testDomain = "test.example.com." + + var ( + mu sync.Mutex + dohRequestSeen bool + receivedQuery []byte + ) + dohServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Logf("[DoH Server] received request: %v %v", r.Method, r.URL) + + if r.Method != httpm.POST { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + if r.Header.Get("Content-Type") != "application/dns-message" { + http.Error(w, "bad content type", http.StatusBadRequest) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "read error", http.StatusInternalServerError) + return + } + + mu.Lock() + defer mu.Unlock() + + dohRequestSeen = true + receivedQuery = body + + // Build a DNS response + response := buildTestDNSResponse(t, testDomain, testResponseIP) + w.Header().Set("Content-Type", "application/dns-message") + w.Write(response) + })) + t.Cleanup(dohServer.Close) + + // Register the test IP to route to our mock DoH server + cleanup := publicdns.RegisterTestDoHEndpoint(testDoHResolverIP, dohServer.URL) + t.Cleanup(cleanup) + + // This simulates a system with the single DoH-capable DNS server + // configured. + f := &fakeOSConfigurator{ + SplitDNS: false, // non-split DNS required to use the forwarder + BaseConfig: OSConfig{ + Nameservers: []netip.Addr{testDoHResolverIP}, + }, + } + + logf := tstest.WhileTestRunningLogger(t) + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(logf, f, health.NewTracker(bus), dialer, nil, &controlknobs.Knobs{}, "linux", bus) + t.Cleanup(func() { m.Down() }) + + // Set up hook to capture the resolver config + m.resolver.TestOnlySetHook(f.SetResolver) + + // Configure the manager with routes but no default resolvers, which + // reads BaseConfig from the OS configurator. + config := Config{ + Routes: upstreams("tailscale.com.", "10.0.0.1"), + SearchDomains: fqdns("tailscale.com."), + } + if err := m.Set(config); err != nil { + t.Fatal(err) + } + + // Verify the resolver config has our test IP in Routes["."] + if f.ResolverConfig.Routes == nil { + t.Fatal("ResolverConfig.Routes is nil (SetResolver hook not called)") + } + + const defaultRouteKey = "." + defaultRoute, ok := f.ResolverConfig.Routes[defaultRouteKey] + if !ok { + t.Fatalf("ResolverConfig.Routes[%q] not found", defaultRouteKey) + } + if !slices.ContainsFunc(defaultRoute, func(r *dnstype.Resolver) bool { + return r.Addr == testDoHResolverIP.String() + }) { + t.Errorf("test IP %v not found in Routes[%q], got: %v", testDoHResolverIP, defaultRouteKey, defaultRoute) + } + + // Build a DNS query to something not handled by our split DNS route + // (tailscale.com) above. + query := buildTestDNSQuery(t, testDomain) + + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + resp, err := m.Query(ctx, query, "udp", netip.MustParseAddrPort("127.0.0.1:12345")) + if err != nil { + t.Fatal(err) + } + if len(resp) == 0 { + t.Fatal("empty response") + } + + // Parse the response to verify we get our test IP back. + var parser dns.Parser + if _, err := parser.Start(resp); err != nil { + t.Fatalf("parsing response header: %v", err) + } + if err := parser.SkipAllQuestions(); err != nil { + t.Fatalf("skipping questions: %v", err) + } + answers, err := parser.AllAnswers() + if err != nil { + t.Fatalf("parsing answers: %v", err) + } + if len(answers) == 0 { + t.Fatal("no answers in response") + } + aRecord, ok := answers[0].Body.(*dns.AResource) + if !ok { + t.Fatalf("first answer is not A record: %T", answers[0].Body) + } + gotIP := netip.AddrFrom4(aRecord.A) + if gotIP != testResponseIP { + t.Errorf("wrong A record IP: got %v, want %v", gotIP, testResponseIP) + } + + // Also verify that our DoH server received the query. + mu.Lock() + defer mu.Unlock() + if !dohRequestSeen { + t.Error("DoH server never received request") + } + if !bytes.Equal(receivedQuery, query) { + t.Errorf("DoH server received wrong query:\ngot: %x\nwant: %x", receivedQuery, query) + } +} + +// buildTestDNSQuery builds a simple DNS A query for the given domain. +func buildTestDNSQuery(t *testing.T, domain string) []byte { + t.Helper() + + builder := dns.NewBuilder(nil, dns.Header{}) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: dns.MustNewName(domain), + Type: dns.TypeA, + Class: dns.ClassINET, + }) + msg, err := builder.Finish() + if err != nil { + t.Fatal(err) + } + + return msg +} + +// buildTestDNSResponse builds a DNS response for the given query with the specified IP. +func buildTestDNSResponse(t *testing.T, domain string, ip netip.Addr) []byte { + t.Helper() + + builder := dns.NewBuilder(nil, dns.Header{Response: true}) + builder.StartQuestions() + builder.Question(dns.Question{ + Name: dns.MustNewName(domain), + Type: dns.TypeA, + Class: dns.ClassINET, + }) + + builder.StartAnswers() + builder.AResource(dns.ResourceHeader{ + Name: dns.MustNewName(domain), + Class: dns.ClassINET, + TTL: 300, + }, dns.AResource{A: ip.As4()}) + + msg, err := builder.Finish() + if err != nil { + t.Fatal(err) + } + + return msg +} diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index 7ceaf1813db6c..3666bd77847c9 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -13,12 +13,14 @@ import ( "log" "math/big" "net/netip" + "slices" "sort" "strconv" "strings" "sync" "tailscale.com/feature/buildfeatures" + "tailscale.com/util/testenv" ) // dohOfIP maps from public DNS IPs to their DoH base URL. @@ -367,3 +369,39 @@ func IPIsDoHOnlyServer(ip netip.Addr) bool { controlDv6RangeA.Contains(ip) || controlDv6RangeB.Contains(ip) || ip == controlDv4One || ip == controlDv4Two } + +var testMu sync.Mutex + +// RegisterTestDoHEndpoint registers a test DoH endpoint mapping for use in tests. +// It maps the given IP to the DoH base URL, and the URL back to the IP. +// +// This function panics if called outside of tests, and cannot be called +// concurrently with any usage of this package (i.e. before any DNS forwarders +// are created). It is safe to call concurrently with itself. +// +// It returns a cleanup function that removes the registration. +func RegisterTestDoHEndpoint(ip netip.Addr, dohBase string) func() { + if !testenv.InTest() { + panic("RegisterTestDoHEndpoint called outside of tests") + } + populateOnce.Do(populate) + + testMu.Lock() + defer testMu.Unlock() + + dohOfIP[ip] = dohBase + dohIPsOfBase[dohBase] = append(dohIPsOfBase[dohBase], ip) + + return func() { + testMu.Lock() + defer testMu.Unlock() + + delete(dohOfIP, ip) + dohIPsOfBase[dohBase] = slices.DeleteFunc(dohIPsOfBase[dohBase], func(addr netip.Addr) bool { + return addr == ip + }) + if len(dohIPsOfBase[dohBase]) == 0 { + delete(dohIPsOfBase, dohBase) + } + } +} From 6e44cb6ab30404ef03e16aedd0ccd476431d843b Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Mon, 26 Jan 2026 14:34:01 -0700 Subject: [PATCH 0899/1093] tsnet: make ListenService examples consistent with other tsnet examples Fixes tailscale/corp#36365 Signed-off-by: Harry Harpham --- ...e_tsnet_listen_service_multiple_ports_test.go | 10 ++++------ tsnet/example_tsnet_test.go | 16 ++++++---------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/tsnet/example_tsnet_listen_service_multiple_ports_test.go b/tsnet/example_tsnet_listen_service_multiple_ports_test.go index 0c7b3899955e1..5fe86a9ecf9fe 100644 --- a/tsnet/example_tsnet_listen_service_multiple_ports_test.go +++ b/tsnet/example_tsnet_listen_service_multiple_ports_test.go @@ -19,21 +19,19 @@ import ( // Service on multiple ports. In this example, we run an HTTPS server on 443 and // an HTTP server handling pprof requests to the same runtime on 6060. func ExampleServer_ListenService_multiplePorts() { - s := &tsnet.Server{ - Hostname: "tsnet-services-demo", + srv := &tsnet.Server{ + Hostname: "shu", } - defer s.Close() - ln, err := s.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + ln, err := srv.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ HTTPS: true, Port: 443, }) if err != nil { log.Fatal(err) } - defer ln.Close() - pprofLn, err := s.ListenService("svc:my-service", tsnet.ServiceModeTCP{ + pprofLn, err := srv.ListenService("svc:my-service", tsnet.ServiceModeTCP{ Port: 6060, }) if err != nil { diff --git a/tsnet/example_tsnet_test.go b/tsnet/example_tsnet_test.go index dbaa8111fb623..2af31a76f787f 100644 --- a/tsnet/example_tsnet_test.go +++ b/tsnet/example_tsnet_test.go @@ -205,19 +205,17 @@ func ExampleServer_ListenFunnel_funnelOnly() { // ExampleServer_ListenService demonstrates how to advertise an HTTPS Service. func ExampleServer_ListenService() { - s := &tsnet.Server{ - Hostname: "tsnet-services-demo", + srv := &tsnet.Server{ + Hostname: "atum", } - defer s.Close() - ln, err := s.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + ln, err := srv.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ HTTPS: true, Port: 443, }) if err != nil { log.Fatal(err) } - defer ln.Close() log.Printf("Listening on https://%v\n", ln.FQDN) log.Fatal(http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -238,19 +236,17 @@ func ExampleServer_ListenService_reverseProxy() { Host: targetAddress, }) - s := &tsnet.Server{ - Hostname: "tsnet-services-demo", + srv := &tsnet.Server{ + Hostname: "tefnut", } - defer s.Close() - ln, err := s.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ + ln, err := srv.ListenService("svc:my-service", tsnet.ServiceModeHTTP{ HTTPS: true, Port: 443, }) if err != nil { log.Fatal(err) } - defer ln.Close() log.Printf("Listening on https://%v\n", ln.FQDN) log.Fatal(http.Serve(ln, reverseProxy)) From 9385dfe7f654d74c177ffc3e7f4b6fe428562022 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Mon, 26 Jan 2026 14:55:30 -0800 Subject: [PATCH 0900/1093] ipn/ipnlocal/netmapcache: add a package to split and cache network maps (#18497) This commit is based on part of #17925, reworked as a separate package. Add a package that can store and load netmap.NetworkMap values in persistent storage, using a basic columnar representation. This commit includes a default storage interface based on plain files, but the interface can be implemented with more structured storage if we want to later. The tests are set up to require that all the fields of the NetworkMap are handled, except those explicitly designated as not-cached, and check that a fully-populated value can round-trip correctly through the cache. Adding or removing fields, either in the NetworkMap or in the cached representation, will trigger either build failures (e.g., for type mismatch) or test failures (e.g., for representation changes or missing fields). This isn't quite as nice as automatically updating the representation, which I also prototyped, but is much simpler to maintain and less code. This commit does not yet hook up the cache to the backend, that will be a subsequent change. Updates #12639 Change-Id: Icb48639e1d61f2aec59904ecd172c73e05ba7bf9 Signed-off-by: M. J. Fromberger --- flake.nix | 2 +- go.mod | 1 + go.mod.sri | 2 +- ipn/ipnlocal/netmapcache/netmapcache.go | 351 +++++++++++++++++++ ipn/ipnlocal/netmapcache/netmapcache_test.go | 298 ++++++++++++++++ ipn/ipnlocal/netmapcache/types.go | 52 +++ shell.nix | 2 +- types/netmap/netmap.go | 2 + 8 files changed, 707 insertions(+), 3 deletions(-) create mode 100644 ipn/ipnlocal/netmapcache/netmapcache.go create mode 100644 ipn/ipnlocal/netmapcache/netmapcache_test.go create mode 100644 ipn/ipnlocal/netmapcache/types.go diff --git a/flake.nix b/flake.nix index 149223d0aac60..76e68e4acd57f 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-WeMTOkERj4hvdg4yPaZ1gRgKnhRIBXX55kUVbX/k/xM= +# nix-direnv cache busting line: sha256-+tOYqRV8ZUA95dfVyRpjnJvwuSMobu/EhtXxq4bwvio= diff --git a/go.mod b/go.mod index 7fd487382e574..bcdc7e19d3162 100644 --- a/go.mod +++ b/go.mod @@ -23,6 +23,7 @@ require ( github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/creachadair/mds v0.25.9 github.com/creachadair/msync v0.7.1 github.com/creachadair/taskgroup v0.13.2 github.com/creack/pty v1.1.23 diff --git a/go.mod.sri b/go.mod.sri index b533a75654aa6..d46c84a110095 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-WeMTOkERj4hvdg4yPaZ1gRgKnhRIBXX55kUVbX/k/xM= +sha256-+tOYqRV8ZUA95dfVyRpjnJvwuSMobu/EhtXxq4bwvio= diff --git a/ipn/ipnlocal/netmapcache/netmapcache.go b/ipn/ipnlocal/netmapcache/netmapcache.go new file mode 100644 index 0000000000000..6992e0691f125 --- /dev/null +++ b/ipn/ipnlocal/netmapcache/netmapcache.go @@ -0,0 +1,351 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package netmapcache implements a persistent cache for [netmap.NetworkMap] +// values, allowing a client to start up using stale but previously-valid state +// even if a connection to the control plane is not immediately available. +package netmapcache + +import ( + "cmp" + "context" + "crypto/sha256" + "encoding/hex" + jsonv1 "encoding/json" + "errors" + "fmt" + "io/fs" + "iter" + "os" + "path/filepath" + "slices" + "strings" + "time" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/tailcfg" + "tailscale.com/types/netmap" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +var ( + // ErrKeyNotFound is a sentinel error reported by implementations of the [Store] + // interface when loading a key that is not found in the store. + ErrKeyNotFound = errors.New("storage key not found") + + // ErrCacheNotAvailable is a sentinel error reported by cache methods when + // the netmap caching feature is not enabled in the build. + ErrCacheNotAvailable = errors.New("netmap cache is not available") +) + +// A Cache manages a columnar cache of a [netmap.NetworkMap]. Each Cache holds +// a single netmap value; use [Cache.Store] to update or replace the cached +// value and [Cache.Load] to read the cached value. +type Cache struct { + store Store + + // wantKeys records the storage keys from the last write or load of a cached + // netmap. This is used to prune keys that are no longer referenced after an + // update. + wantKeys set.Set[string] + + // lastWrote records the last values written to each stored key. + // + // TODO(creachadair): This is meant to avoid disk writes, but I'm not + // convinced we need it. Or maybe just track hashes of the content rather + // than caching a complete copy. + lastWrote map[string]lastWrote +} + +// NewCache constructs a new empty [Cache] from the given [Store]. +// It will panic if s == nil. +func NewCache(s Store) *Cache { + if s == nil { + panic("a non-nil Store is required") + } + return &Cache{ + store: s, + wantKeys: make(set.Set[string]), + lastWrote: make(map[string]lastWrote), + } +} + +type lastWrote struct { + digest string + at time.Time +} + +func (c *Cache) writeJSON(ctx context.Context, key string, v any) error { + j, err := jsonv1.Marshal(v) + if err != nil { + return fmt.Errorf("JSON marshalling %q: %w", key, err) + } + + // TODO(creachadair): Maybe use a hash instead of the contents? Do we need + // this at all? + last, ok := c.lastWrote[key] + if ok && cacheDigest(j) == last.digest { + return nil + } + + if err := c.store.Store(ctx, key, j); err != nil { + return err + } + + // Track the storage keys the current map is using, for storage GC. + c.wantKeys.Add(key) + c.lastWrote[key] = lastWrote{ + digest: cacheDigest(j), + at: time.Now(), + } + return nil +} + +func (c *Cache) removeUnwantedKeys(ctx context.Context) error { + var errs []error + for key, err := range c.store.List(ctx, "") { + if err != nil { + errs = append(errs, err) + break + } + if !c.wantKeys.Contains(key) { + if err := c.store.Remove(ctx, key); err != nil { + errs = append(errs, fmt.Errorf("remove key %q: %w", key, err)) + } + delete(c.lastWrote, key) // even if removal failed, we don't want it + } + } + return errors.Join(errs...) +} + +// FileStore implements the [Store] interface using a directory of files, in +// which each key is encoded as a filename in the directory. +// The caller is responsible to ensure the directory path exists before +// using the store methods. +type FileStore string + +// List implements part of the [Store] interface. +func (s FileStore) List(ctx context.Context, prefix string) iter.Seq2[string, error] { + return func(yield func(string, error) bool) { + des, err := os.ReadDir(string(s)) + if os.IsNotExist(err) { + return // nothing to read + } else if err != nil { + yield("", err) + return + } + + // os.ReadDir reports entries already sorted, and the encoding preserves that. + for _, de := range des { + key, err := hex.DecodeString(de.Name()) + if err != nil { + yield("", err) + return + } + name := string(key) + if !strings.HasPrefix(name, prefix) { + continue + } else if !yield(name, nil) { + return + } + } + } +} + +// Load implements part of the [Store] interface. +func (s FileStore) Load(ctx context.Context, key string) ([]byte, error) { + return os.ReadFile(filepath.Join(string(s), hex.EncodeToString([]byte(key)))) +} + +// Store implements part of the [Store] interface. +func (s FileStore) Store(ctx context.Context, key string, value []byte) error { + return os.WriteFile(filepath.Join(string(s), hex.EncodeToString([]byte(key))), value, 0600) +} + +// Remove implements part of the [Store] interface. +func (s FileStore) Remove(ctx context.Context, key string) error { + err := os.Remove(filepath.Join(string(s), hex.EncodeToString([]byte(key)))) + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err +} + +// Store records nm in the cache, replacing any previously-cached values. +func (c *Cache) Store(ctx context.Context, nm *netmap.NetworkMap) error { + if !buildfeatures.HasCacheNetMap || nm == nil || nm.Cached { + return nil + } + if selfID := nm.User(); selfID == 0 { + return errors.New("no user in netmap") + } + + clear(c.wantKeys) + if err := c.writeJSON(ctx, "misc", netmapMisc{ + MachineKey: &nm.MachineKey, + CollectServices: &nm.CollectServices, + DisplayMessages: &nm.DisplayMessages, + TKAEnabled: &nm.TKAEnabled, + TKAHead: &nm.TKAHead, + Domain: &nm.Domain, + DomainAuditLogID: &nm.DomainAuditLogID, + }); err != nil { + return err + } + if err := c.writeJSON(ctx, "dns", netmapDNS{DNS: &nm.DNS}); err != nil { + return err + } + if err := c.writeJSON(ctx, "derpmap", netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { + return err + } + if err := c.writeJSON(ctx, "self", netmapNode{Node: &nm.SelfNode}); err != nil { + return err + + // N.B. The NodeKey and AllCaps fields can be recovered from SelfNode on + // load, and do not need to be stored separately. + } + for _, p := range nm.Peers { + key := fmt.Sprintf("peer-%s", p.StableID()) + if err := c.writeJSON(ctx, key, netmapNode{Node: &p}); err != nil { + return err + } + } + for uid, u := range nm.UserProfiles { + key := fmt.Sprintf("user-%d", uid) + if err := c.writeJSON(ctx, key, netmapUserProfile{UserProfile: &u}); err != nil { + return err + } + } + + if buildfeatures.HasSSH && nm.SSHPolicy != nil { + if err := c.writeJSON(ctx, "ssh", netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { + return err + } + } + + return c.removeUnwantedKeys(ctx) +} + +// Load loads the cached [netmap.NetworkMap] value stored in c, if one is available. +// It reports [ErrCacheNotAvailable] if no cached data are available. +// On success, the Cached field of the returned network map is true. +func (c *Cache) Load(ctx context.Context) (*netmap.NetworkMap, error) { + if !buildfeatures.HasCacheNetMap { + return nil, ErrCacheNotAvailable + } + + nm := netmap.NetworkMap{Cached: true} + + // At minimum, we require that the cache contain a "self" node, or the data + // are not usable. + if self, err := c.store.Load(ctx, "self"); errors.Is(err, ErrKeyNotFound) { + return nil, ErrCacheNotAvailable + } else if err := jsonv1.Unmarshal(self, &netmapNode{Node: &nm.SelfNode}); err != nil { + return nil, err + } + c.wantKeys.Add("self") + + // If we successfully recovered a SelfNode, pull out its related fields. + if s := nm.SelfNode; s.Valid() { + nm.NodeKey = s.Key() + nm.AllCaps = make(set.Set[tailcfg.NodeCapability]) + for _, c := range s.Capabilities().All() { + nm.AllCaps.Add(c) + } + for c := range s.CapMap().All() { + nm.AllCaps.Add(c) + } + } + + // Unmarshal the contents of each specified cache entry directly into the + // fields of the output. See the comment in types.go for more detail. + + if err := c.readJSON(ctx, "misc", &netmapMisc{ + MachineKey: &nm.MachineKey, + CollectServices: &nm.CollectServices, + DisplayMessages: &nm.DisplayMessages, + TKAEnabled: &nm.TKAEnabled, + TKAHead: &nm.TKAHead, + Domain: &nm.Domain, + DomainAuditLogID: &nm.DomainAuditLogID, + }); err != nil { + return nil, err + } + + if err := c.readJSON(ctx, "dns", &netmapDNS{DNS: &nm.DNS}); err != nil { + return nil, err + } + if err := c.readJSON(ctx, "derpmap", &netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { + return nil, err + } + + for key, err := range c.store.List(ctx, "peer-") { + if err != nil { + return nil, err + } + var peer tailcfg.NodeView + if err := c.readJSON(ctx, key, &netmapNode{Node: &peer}); err != nil { + return nil, err + } + nm.Peers = append(nm.Peers, peer) + } + slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) + for key, err := range c.store.List(ctx, "user-") { + if err != nil { + return nil, err + } + var up tailcfg.UserProfileView + if err := c.readJSON(ctx, key, &netmapUserProfile{UserProfile: &up}); err != nil { + return nil, err + } + mak.Set(&nm.UserProfiles, up.ID(), up) + } + if err := c.readJSON(ctx, "ssh", &netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { + return nil, err + } + + return &nm, nil +} + +func (c *Cache) readJSON(ctx context.Context, key string, value any) error { + data, err := c.store.Load(ctx, key) + if errors.Is(err, ErrKeyNotFound) { + return nil + } else if err != nil { + return err + } + if err := jsonv1.Unmarshal(data, value); err != nil { + return err + } + c.wantKeys.Add(key) + c.lastWrote[key] = lastWrote{digest: cacheDigest(data), at: time.Now()} + return nil +} + +// Store is the interface to persistent key-value storage used by a [Cache]. +type Store interface { + // List lists all the stored keys having the specified prefixes, in + // lexicographic order. + // + // Each pair yielded by the iterator is either a valid storage key and a nil + // error, or an empty key and a non-nil error. After reporting an error, the + // iterator must immediately return. + List(ctx context.Context, prefix string) iter.Seq2[string, error] + + // Load fetches the contents of the specified key. + // If the key is not found in the store, Load must report [ErrKeyNotFound]. + Load(ctx context.Context, key string) ([]byte, error) + + // Store marshals and stores the contents of the specified value under key. + // If the key already exists, its contents are replaced. + Store(ctx context.Context, key string, value []byte) error + + // Remove removes the specified key from the store. If the key does not exist, + // Remove reports success (nil). + Remove(ctx context.Context, key string) error +} + +// cacheDigest computes a string digest of the specified data, for use in +// detecting cache hits. +func cacheDigest(data []byte) string { h := sha256.Sum256(data); return string(h[:]) } diff --git a/ipn/ipnlocal/netmapcache/netmapcache_test.go b/ipn/ipnlocal/netmapcache/netmapcache_test.go new file mode 100644 index 0000000000000..1f7d9b3bf6f07 --- /dev/null +++ b/ipn/ipnlocal/netmapcache/netmapcache_test.go @@ -0,0 +1,298 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package netmapcache_test + +import ( + "context" + "errors" + "flag" + "fmt" + "iter" + "os" + "reflect" + "slices" + "strings" + "testing" + + "github.com/creachadair/mds/mtest" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/ipn/ipnlocal/netmapcache" + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/netmap" + "tailscale.com/types/views" + "tailscale.com/util/set" +) + +// Input values for valid-looking placeholder values for keys, hashes, etc. +const ( + testNodeKeyString = "nodekey:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + testMachineKeyString = "mkey:fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210" + testAUMHashString = "APPLEPEARPLUMCHERRYAPPLEPEARPLUMCHERRYAPPLEPEARPLUMA" // base32, no padding +) + +var keepTestOutput = flag.String("keep-output", "", "directory to keep test output (if empty, use a test temp)") + +var ( + testNode1 = (&tailcfg.Node{ + ID: 99001, + StableID: "n99001FAKE", + Name: "test1.example.com.", + }).View() + testNode2 = (&tailcfg.Node{ + ID: 99002, + StableID: "n99002FAKE", + Name: "test2.example.com.", + }).View() + + // The following fields are set in init. + testNodeKey key.NodePublic + testMachineKey key.MachinePublic + testAUMHash tka.AUMHash + testMap *netmap.NetworkMap +) + +func init() { + if err := testNodeKey.UnmarshalText([]byte(testNodeKeyString)); err != nil { + panic(fmt.Sprintf("invalid test nodekey %q: %v", testNodeKeyString, err)) + } + if err := testMachineKey.UnmarshalText([]byte(testMachineKeyString)); err != nil { + panic(fmt.Sprintf("invalid test machine key %q: %v", testMachineKeyString, err)) + } + if err := testAUMHash.UnmarshalText([]byte(testAUMHashString)); err != nil { + panic(fmt.Sprintf("invalid test AUM hash %q: %v", testAUMHashString, err)) + } + + // The following network map must have a non-zero non-empty value for every + // field that is to be stored in the cache. The test checks for this using + // reflection, as a way to ensure that new fields added to the type are + // covered by a test (see checkFieldCoverage). + // + // The exact values are unimportant, except that they should be values that + // give us confidence that a network map round-tripped through the cache and + // compared will accurately reflect the information we care about. + testMap = &netmap.NetworkMap{ + Cached: false, // not cached, this is metadata for the cache machinery + + PacketFilter: nil, // not cached + PacketFilterRules: views.Slice[tailcfg.FilterRule]{}, // not cached + + // Fields stored under the "self" key. + // Note that SelfNode must have a valid user in order to be considered + // cacheable. Moreover, it must mention all the capabilities we expect + // to see advertised in the AllCaps set, and its public key must match the + // one advertised in the NodeKey field. + SelfNode: (&tailcfg.Node{ + ID: 12345, + StableID: "n12345FAKE", + User: 30337, + Name: "test.example.com.", + Key: testNodeKey, + Capabilities: []tailcfg.NodeCapability{"cap1"}, + CapMap: map[tailcfg.NodeCapability][]tailcfg.RawMessage{ + "cap2": nil, + }, + }).View(), + AllCaps: set.Of[tailcfg.NodeCapability]("cap1", "cap2"), + NodeKey: testNodeKey, + + DNS: tailcfg.DNSConfig{Domains: []string{"example1.com", "example2.ac.uk"}}, // "dns" + + SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{{ // "ssh" + SSHUsers: map[string]string{"amelie": "ubuntu"}, + Action: &tailcfg.SSHAction{Message: "hello", Accept: true}, + AcceptEnv: []string{"MAGIC_SSH_*"}, + }}}, + + DERPMap: &tailcfg.DERPMap{ // "derp" + HomeParams: &tailcfg.DERPHomeParams{ + RegionScore: map[int]float64{10: 0.31, 20: 0.141, 30: 0.592}, + }, + OmitDefaultRegions: true, + }, + + // Peers stored under "peer-" keys. + Peers: []tailcfg.NodeView{testNode1, testNode2}, + + // Profiles stored under "user-" keys. + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + 12345: (&tailcfg.UserProfile{ID: 12345, DisplayName: "me"}).View(), + 67890: (&tailcfg.UserProfile{ID: 67890, DisplayName: "you"}).View(), + }, + + // Fields stored under "misc" + MachineKey: testMachineKey, + CollectServices: true, + DisplayMessages: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "test-message-1": {Title: "hello", Text: "this is your wakeup call"}, + "test-message-2": {Title: "goodbye", Text: "good night", ImpactsConnectivity: true}, + }, + TKAEnabled: true, + TKAHead: testAUMHash, + Domain: "example.com", + DomainAuditLogID: "0f1e2d3c4b5a67890f1e2d3c4b5a67890f1e2d3c4b5a67890f1e2d3c4b5a6789", + } +} + +func TestNewStore(t *testing.T) { + mtest.MustPanicf(t, func() { netmapcache.NewCache(nil) }, "NewCache should panic for a nil store") +} + +func TestRoundTrip(t *testing.T) { + checkFieldCoverage(t, testMap) + + dir := *keepTestOutput + if dir == "" { + dir = t.TempDir() + } else if err := os.MkdirAll(dir, 0700); err != nil { + t.Fatalf("Create --keep-output directory: %v", err) + } + + tests := []struct { + name string + store netmapcache.Store + }{ + {"MemStore", make(testStore)}, + {"FileStore", netmapcache.FileStore(dir)}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := netmapcache.NewCache(tt.store) + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store netmap failed; %v", err) + } + + cmap, err := c.Load(t.Context()) + if err != nil { + t.Fatalf("Load netmap failed: %v", err) + } + + if !cmap.Cached { + t.Error("Cached map is not marked as such") + } + + opts := []cmp.Option{ + cmpopts.IgnoreFields(netmap.NetworkMap{}, skippedMapFields...), + cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}), + } + if diff := cmp.Diff(cmap, testMap, opts...); diff != "" { + t.Fatalf("Cached map differs (-got, +want):\n%s", diff) + } + + }) + } +} + +func TestInvalidCache(t *testing.T) { + t.Run("Empty", func(t *testing.T) { + c := netmapcache.NewCache(make(testStore)) + got, err := c.Load(t.Context()) + if !errors.Is(err, netmapcache.ErrCacheNotAvailable) { + t.Errorf("Load from empty cache: got %+v, %v; want nil, %v", got, err, netmapcache.ErrCacheNotAvailable) + } + }) + + t.Run("Incomplete", func(t *testing.T) { + s := make(testStore) + c := netmapcache.NewCache(s) + + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store initial netmap: %v", err) + } + + // Drop the "self" node from the cache, and verify it makes the results + // unloadable. + if err := s.Remove(t.Context(), "self"); err != nil { + t.Fatalf("Remove self: %v", err) + } + + got, err := c.Load(t.Context()) + if !errors.Is(err, netmapcache.ErrCacheNotAvailable) { + t.Errorf("Load from invalid cache: got %+v, %v; want nil, %v", got, err, netmapcache.ErrCacheNotAvailable) + } + }) +} + +// skippedMapFields are the names of fields that should not be considered by +// network map caching, and thus skipped when comparing test results. +var skippedMapFields = []string{ + "Cached", "PacketFilter", "PacketFilterRules", +} + +// checkFieldCoverage logs an error in t if any of the fields of nm are zero +// valued, except those listed in skippedMapFields. +// +// This ensures if any new fields are added to the [netmap.NetworkMap] type in +// the future, the test will fail until non-trivial test data are added to this +// test, or the fields are recorded as skipped. It also helps ensure that +// changing the field types or deleting fields will make compilation fail, so +// the tests get updated. +func checkFieldCoverage(t *testing.T, nm *netmap.NetworkMap) { + t.Helper() + + mt := reflect.TypeOf(nm).Elem() + mv := reflect.ValueOf(nm).Elem() + for i := 0; i < mt.NumField(); i++ { + f := mt.Field(i) + if slices.Contains(skippedMapFields, f.Name) { + continue + } + fv := mv.Field(i) + if fv.IsZero() { + t.Errorf("Field %d (%q) of test value is zero (%+v). "+ + "A non-zero value is required for each cached field in the test value.", + i, f.Name, fv.Interface()) + } + } + + // Verify that skip-listed fields exist on the type. FieldByName thwarts the + // linker, but it's OK in a test. + for _, skip := range skippedMapFields { + if _, ok := mt.FieldByName(skip); !ok { + t.Errorf("Skipped field %q not found on type %T. "+ + "If a field was deleted from the type, you may need to update skippedMapFields.", + skip, nm) + } + } + if t.Failed() { + t.FailNow() + } +} + +// testStore is an in-memory implementation of the [netmapcache.Store] interface. +type testStore map[string][]byte + +func (t testStore) List(_ context.Context, prefix string) iter.Seq2[string, error] { + var matching []string + for key := range t { + if strings.HasPrefix(key, prefix) { + matching = append(matching, key) + } + } + slices.Sort(matching) + return func(yield func(string, error) bool) { + for _, key := range matching { + if !yield(key, nil) { + return + } + } + } +} + +func (t testStore) Load(_ context.Context, key string) ([]byte, error) { + val, ok := t[key] + if !ok { + return nil, netmapcache.ErrKeyNotFound + } + return val, nil +} + +func (t testStore) Store(_ context.Context, key string, value []byte) error { + t[key] = value + return nil +} + +func (t testStore) Remove(_ context.Context, key string) error { delete(t, key); return nil } diff --git a/ipn/ipnlocal/netmapcache/types.go b/ipn/ipnlocal/netmapcache/types.go new file mode 100644 index 0000000000000..2fb5a1575f1b3 --- /dev/null +++ b/ipn/ipnlocal/netmapcache/types.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package netmapcache + +import ( + "tailscale.com/tailcfg" + "tailscale.com/tka" + "tailscale.com/types/key" +) + +// The fields in the following wrapper types are all pointers, even when their +// target type is also a pointer, so that they can be used to unmarshal +// directly into the fields of another value. These wrappers intentionally do +// not omit zero or empty values, since we want the cache to reflect the value +// the object had at the time it was written, even if the default changes +// later. +// +// Moreover, these are all struct types so that each cached record will be a +// JSON object even if the underlying value marshals to an array or primitive +// type, and so that we have a seam if we want to replace or version the cached +// representation separately from the default JSON layout. + +type netmapMisc struct { + MachineKey *key.MachinePublic + CollectServices *bool + DisplayMessages *map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage + TKAEnabled *bool + TKAHead *tka.AUMHash + Domain *string + DomainAuditLogID *string +} + +type netmapSSH struct { + SSHPolicy **tailcfg.SSHPolicy +} + +type netmapDNS struct { + DNS *tailcfg.DNSConfig +} + +type netmapDERPMap struct { + DERPMap **tailcfg.DERPMap +} + +type netmapNode struct { + Node *tailcfg.NodeView +} + +type netmapUserProfile struct { + UserProfile *tailcfg.UserProfileView +} diff --git a/shell.nix b/shell.nix index ccec5faf538e0..3accd73c55ffb 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-WeMTOkERj4hvdg4yPaZ1gRgKnhRIBXX55kUVbX/k/xM= +# nix-direnv cache busting line: sha256-+tOYqRV8ZUA95dfVyRpjnJvwuSMobu/EhtXxq4bwvio= diff --git a/types/netmap/netmap.go b/types/netmap/netmap.go index d809cbab4ad5d..ac95254daee1d 100644 --- a/types/netmap/netmap.go +++ b/types/netmap/netmap.go @@ -27,6 +27,8 @@ import ( // The fields should all be considered read-only. They might // alias parts of previous NetworkMap values. type NetworkMap struct { + Cached bool // whether this NetworkMap was loaded from disk cache (as opposed to live from network) + SelfNode tailcfg.NodeView AllCaps set.Set[tailcfg.NodeCapability] // set version of SelfNode.Capabilities + SelfNode.CapMap NodeKey key.NodePublic From 6de5b01e04beeb8504c1644e26b7b239a8a12e8c Mon Sep 17 00:00:00 2001 From: Amal Bansode Date: Mon, 26 Jan 2026 16:41:03 -0800 Subject: [PATCH 0901/1093] ipn/localapi: stop logging "broken pipe" errors (#18487) The Tailscale CLI has some methods to watch the IPN bus for messages, say, the current netmap (`tailscale debug netmap`). The Tailscale daemon supports this using a streaming HTTP response. Sometimes, the client can close its connection abruptly -- due to an interruption, or in the case of `debug netmap`, intentionally after consuming one message. If the server daemon is writing a response as the client closes its end of the socket, the daemon typically encounters a "broken pipe" error. The "Watch IPN Bus" handler currently logs such errors after they're propagated by a JSON encoding/writer helper. Since the Tailscale CLI nominally closes its socket with the daemon in this slightly ungraceful way (viz. `debug netmap`), stop logging these broken pipe errors as far as possible. This will help avoid confounding users when they scan backend logs. Updates #18477 Signed-off-by: Amal Bansode --- ipn/localapi/localapi.go | 5 +++- net/neterror/neterror_js.go | 20 +++++++++++++++ net/neterror/neterror_plan9.go | 24 ++++++++++++++++++ net/neterror/neterror_posix.go | 32 ++++++++++++++++++++++++ wgengine/magicsock/magicsock_notplan9.go | 4 ++- 5 files changed, 83 insertions(+), 2 deletions(-) create mode 100644 net/neterror/neterror_js.go create mode 100644 net/neterror/neterror_plan9.go create mode 100644 net/neterror/neterror_posix.go diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 248c0377ec968..dc558b36e61d9 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -35,6 +35,7 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" + "tailscale.com/net/neterror" "tailscale.com/net/netns" "tailscale.com/net/netutil" "tailscale.com/tailcfg" @@ -913,7 +914,9 @@ func (h *Handler) serveWatchIPNBus(w http.ResponseWriter, r *http.Request) { h.b.WatchNotificationsAs(ctx, h.Actor, mask, f.Flush, func(roNotify *ipn.Notify) (keepGoing bool) { err := enc.Encode(roNotify) if err != nil { - h.logf("json.Encode: %v", err) + if !neterror.IsClosedPipeError(err) { + h.logf("json.Encode: %v", err) + } return false } f.Flush() diff --git a/net/neterror/neterror_js.go b/net/neterror/neterror_js.go new file mode 100644 index 0000000000000..591367120fd85 --- /dev/null +++ b/net/neterror/neterror_js.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || wasip1 || wasm + +package neterror + +import ( + "errors" + "io" + "io/fs" +) + +// Reports whether err resulted from reading or writing to a closed or broken pipe. +func IsClosedPipeError(err error) bool { + // Libraries may also return root errors like fs.ErrClosed/io.ErrClosedPipe + // due to a closed socket. + return errors.Is(err, fs.ErrClosed) || + errors.Is(err, io.ErrClosedPipe) +} diff --git a/net/neterror/neterror_plan9.go b/net/neterror/neterror_plan9.go new file mode 100644 index 0000000000000..a60c4dd6496fa --- /dev/null +++ b/net/neterror/neterror_plan9.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build plan9 + +package neterror + +import ( + "errors" + "io" + "io/fs" + "strings" +) + +// Reports whether err resulted from reading or writing to a closed or broken pipe. +func IsClosedPipeError(err error) bool { + // Libraries may also return root errors like fs.ErrClosed/io.ErrClosedPipe + // due to a closed socket. + // For a raw syscall error, check for error string containing "closed pipe", + // per the note set by the system: https://9p.io/magic/man2html/2/pipe + return errors.Is(err, fs.ErrClosed) || + errors.Is(err, io.ErrClosedPipe) || + strings.Contains(err.Error(), "closed pipe") +} diff --git a/net/neterror/neterror_posix.go b/net/neterror/neterror_posix.go new file mode 100644 index 0000000000000..71dda6b4cf3a8 --- /dev/null +++ b/net/neterror/neterror_posix.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 && !js && !wasip1 && !wasm + +package neterror + +import ( + "errors" + "io" + "io/fs" + "runtime" + "syscall" +) + +// Reports whether err resulted from reading or writing to a closed or broken pipe. +func IsClosedPipeError(err error) bool { + // 232 is Windows error code ERROR_NO_DATA, "The pipe is being closed". + if runtime.GOOS == "windows" && errors.Is(err, syscall.Errno(232)) { + return true + } + + // EPIPE/ENOTCONN are common errors when a send fails due to a closed + // socket. There is some platform and version inconsistency in which + // error is returned, but the meaning is the same. + // Libraries may also return root errors like fs.ErrClosed/io.ErrClosedPipe + // due to a closed socket. + return errors.Is(err, syscall.EPIPE) || + errors.Is(err, syscall.ENOTCONN) || + errors.Is(err, fs.ErrClosed) || + errors.Is(err, io.ErrClosedPipe) +} diff --git a/wgengine/magicsock/magicsock_notplan9.go b/wgengine/magicsock/magicsock_notplan9.go index db2c5fca052b9..6bb9db5d7f1d6 100644 --- a/wgengine/magicsock/magicsock_notplan9.go +++ b/wgengine/magicsock/magicsock_notplan9.go @@ -8,6 +8,8 @@ package magicsock import ( "errors" "syscall" + + "tailscale.com/net/neterror" ) // shouldRebind returns if the error is one that is known to be healed by a @@ -17,7 +19,7 @@ func shouldRebind(err error) (ok bool, reason string) { // EPIPE/ENOTCONN are common errors when a send fails due to a closed // socket. There is some platform and version inconsistency in which // error is returned, but the meaning is the same. - case errors.Is(err, syscall.EPIPE), errors.Is(err, syscall.ENOTCONN): + case neterror.IsClosedPipeError(err): return true, "broken-pipe" // EPERM is typically caused by EDR software, and has been observed to be From ae625691597787dab7fc6aa04c17ac78fcebc9af Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Tue, 27 Jan 2026 14:25:27 +0000 Subject: [PATCH 0902/1093] hostinfo: retrieve OS version for Macs running the OSS client Updates #18520 Change-Id: If86a1f702c704b003002aa7e2f5a6b1418b469cc Signed-off-by: Alex Chan --- hostinfo/hostinfo_darwin.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/hostinfo/hostinfo_darwin.go b/hostinfo/hostinfo_darwin.go index bce99d7003406..cd551ca425790 100644 --- a/hostinfo/hostinfo_darwin.go +++ b/hostinfo/hostinfo_darwin.go @@ -8,14 +8,31 @@ package hostinfo import ( "os" "path/filepath" + + "golang.org/x/sys/unix" + "tailscale.com/types/ptr" ) func init() { + osVersion = lazyOSVersion.Get packageType = packageTypeDarwin } +var ( + lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionDarwin)} +) + func packageTypeDarwin() string { // Using tailscaled or IPNExtension? exe, _ := os.Executable() return filepath.Base(exe) } + +// Returns the marketing version (e.g., "15.0.1" or "26.0.0") +func osVersionDarwin() string { + version, err := unix.Sysctl("kern.osproductversion") + if err != nil { + return "" + } + return version +} From aac12ba799f1af9021cac5dfbdcc0e4df4601626 Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Tue, 27 Jan 2026 13:42:04 -0800 Subject: [PATCH 0903/1093] cmd/tailscale/cli: add json output option to `switch --list` (#18501) * cmd/tailscale/cli: add json output option to `switch --list` Closes #14783 Signed-off-by: Cameron Stokes --- cmd/tailscale/cli/switch.go | 50 ++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index 34ed2c7687c67..bd90c522e3393 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -5,6 +5,7 @@ package cli import ( "context" + "encoding/json" "flag" "fmt" "os" @@ -18,9 +19,12 @@ import ( ) var switchCmd = &ffcli.Command{ - Name: "switch", - ShortUsage: "tailscale switch ", - ShortHelp: "Switch to a different Tailscale account", + Name: "switch", + ShortUsage: strings.Join([]string{ + "tailscale switch ", + "tailscale switch --list [--json]", + }, "\n"), + ShortHelp: "Switch to a different Tailscale account", LongHelp: `"tailscale switch" switches between logged in accounts. You can use the ID that's returned from 'tailnet switch -list' to pick which profile you want to switch to. Alternatively, you @@ -31,6 +35,7 @@ This command is currently in alpha and may change in the future.`, FlagSet: func() *flag.FlagSet { fs := flag.NewFlagSet("switch", flag.ExitOnError) fs.BoolVar(&switchArgs.list, "list", false, "list available accounts") + fs.BoolVar(&switchArgs.json, "json", false, "list available accounts in JSON format") return fs }(), Exec: switchProfile, @@ -82,6 +87,7 @@ func init() { var switchArgs struct { list bool + json bool } func listProfiles(ctx context.Context) error { @@ -109,10 +115,48 @@ func listProfiles(ctx context.Context) error { return nil } +type switchProfileJSON struct { + ID string `json:"id"` + Nickname string `json:"nickname"` + Tailnet string `json:"tailnet"` + Account string `json:"account"` + Selected bool `json:"selected"` +} + +func listProfilesJSON(ctx context.Context) error { + curP, all, err := localClient.ProfileStatus(ctx) + if err != nil { + return err + } + profiles := make([]switchProfileJSON, 0, len(all)) + for _, prof := range all { + profiles = append(profiles, switchProfileJSON{ + ID: string(prof.ID), + Tailnet: prof.NetworkProfile.DisplayNameOrDefault(), + Account: prof.UserProfile.LoginName, + Nickname: prof.Name, + Selected: prof.ID == curP.ID, + }) + } + profilesJSON, err := json.MarshalIndent(profiles, "", " ") + if err != nil { + return err + } + printf("%s\n", profilesJSON) + return nil +} + func switchProfile(ctx context.Context, args []string) error { if switchArgs.list { + if switchArgs.json { + return listProfilesJSON(ctx) + } return listProfiles(ctx) } + if switchArgs.json { + outln("--json argument cannot be used with tailscale switch NAME") + os.Exit(1) + } if len(args) != 1 { outln("usage: tailscale switch NAME") os.Exit(1) From a374cc344e48067a64cacf5bebd49fbe99596688 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 26 Jan 2026 17:21:08 -0800 Subject: [PATCH 0904/1093] tool/gocross, pull-toolchain.sh: support a "next" Go toolchain When TS_GO_NEXT=1 is set, update/use the go.toolchain.next.{branch,rev} files instead. This lets us do test deploys of Go release candidates on some backends, without affecting all backends. Updates tailscale/corp#36382 Change-Id: I00dbde87b219b720be5ea142325c4711f101a364 Signed-off-by: Brad Fitzpatrick --- go.toolchain.next.branch | 1 + go.toolchain.next.rev | 1 + pull-toolchain.sh | 27 ++++++++++++++++++++------- tool/gocross/gocross-wrapper.sh | 12 +++++++++--- 4 files changed, 31 insertions(+), 10 deletions(-) create mode 100644 go.toolchain.next.branch create mode 100644 go.toolchain.next.rev diff --git a/go.toolchain.next.branch b/go.toolchain.next.branch new file mode 100644 index 0000000000000..6022b95593bbe --- /dev/null +++ b/go.toolchain.next.branch @@ -0,0 +1 @@ +tailscale.go1.26 diff --git a/go.toolchain.next.rev b/go.toolchain.next.rev new file mode 100644 index 0000000000000..ee8816b6ff3dd --- /dev/null +++ b/go.toolchain.next.rev @@ -0,0 +1 @@ +07d023ba9bb6d17a84b492f1524fabfa69a31bda diff --git a/pull-toolchain.sh b/pull-toolchain.sh index eb8febf6bb32d..b10e3cd68cf11 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -1,20 +1,33 @@ #!/bin/sh # Retrieve the latest Go toolchain. +# Set TS_GO_NEXT=1 to update go.toolchain.next.rev instead. # set -eu cd "$(dirname "$0")" -read -r go_branch go.toolchain.rev + echo "$upstream" >"$go_toolchain_rev_file" fi -./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version - -./update-flake.sh +# Only update go.toolchain.version and go.toolchain.rev.sri for the main toolchain, +# skipping it if TS_GO_NEXT=1. Those two files are only used by Nix, and as of 2026-01-26 +# don't yet support TS_GO_NEXT=1 with flake.nix or in our corp CI. +if [ "${TS_GO_NEXT:-}" != "1" ]; then + ./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version + ./update-flake.sh +fi -if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev go.toolchain.rev.sri go.toolchain.version)" ]; then +if [ -n "$(git diff-index --name-only HEAD -- "$go_toolchain_rev_file" go.toolchain.rev.sri go.toolchain.version)" ]; then echo "pull-toolchain.sh: changes imported. Use git commit to make them permanent." >&2 fi diff --git a/tool/gocross/gocross-wrapper.sh b/tool/gocross/gocross-wrapper.sh index 352d639b75530..05a35ba424cc2 100755 --- a/tool/gocross/gocross-wrapper.sh +++ b/tool/gocross/gocross-wrapper.sh @@ -4,7 +4,7 @@ # # gocross-wrapper.sh is a wrapper that can be aliased to 'go', which # transparently runs the version of github.com/tailscale/go as specified repo's -# go.toolchain.rev file. +# go.toolchain.rev file (or go.toolchain.next.rev if TS_GO_NEXT=1). # # It also conditionally (if TS_USE_GOCROSS=1) builds gocross and uses it as a go # wrapper to inject certain go flags. @@ -21,6 +21,12 @@ if [[ "${OSTYPE:-}" == "cygwin" || "${OSTYPE:-}" == "msys" ]]; then exit fi +if [[ "${TS_GO_NEXT:-}" == "1" ]]; then + go_toolchain_rev_file="go.toolchain.next.rev" +else + go_toolchain_rev_file="go.toolchain.rev" +fi + # Locate a bootstrap toolchain and (re)build gocross if necessary. We run all of # this in a subshell because posix shell semantics make it very easy to # accidentally mutate the input environment that will get passed to gocross at @@ -45,7 +51,7 @@ cd "$repo_root" # https://github.com/tailscale/go release artifact to download. toolchain="" -read -r REV Date: Tue, 27 Jan 2026 14:44:32 -0800 Subject: [PATCH 0905/1093] cmd/printdep: add --next flag to use rc Go build hash instead Updates tailscale/corp#36382 Change-Id: Ib7474b0aab901e98f0fe22761e26fd181650743c Signed-off-by: Brad Fitzpatrick --- assert_ts_toolchain_match.go | 3 +++ cmd/printdep/printdep.go | 9 +++++++-- version-embed.go | 6 ++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/assert_ts_toolchain_match.go b/assert_ts_toolchain_match.go index f0760ec039414..901dbb8ec83a1 100644 --- a/assert_ts_toolchain_match.go +++ b/assert_ts_toolchain_match.go @@ -17,6 +17,9 @@ func init() { panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info") } want := strings.TrimSpace(GoToolchainRev) + if os.Getenv("TS_GO_NEXT") == "1" { + want = strings.TrimSpace(GoToolchainNextRev) + } if tsRev != want { if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want) diff --git a/cmd/printdep/printdep.go b/cmd/printdep/printdep.go index c4ba5b79a3357..f5aeab7a561b6 100644 --- a/cmd/printdep/printdep.go +++ b/cmd/printdep/printdep.go @@ -19,6 +19,7 @@ var ( goToolchain = flag.Bool("go", false, "print the supported Go toolchain git hash (a github.com/tailscale/go commit)") goToolchainURL = flag.Bool("go-url", false, "print the URL to the tarball of the Tailscale Go toolchain") alpine = flag.Bool("alpine", false, "print the tag of alpine docker image") + next = flag.Bool("next", false, "if set, modifies --go or --go-url to use the upcoming/unreleased/rc Go release version instead") ) func main() { @@ -27,8 +28,12 @@ func main() { fmt.Println(strings.TrimSpace(ts.AlpineDockerTag)) return } + goRev := strings.TrimSpace(ts.GoToolchainRev) + if *next { + goRev = strings.TrimSpace(ts.GoToolchainNextRev) + } if *goToolchain { - fmt.Println(strings.TrimSpace(ts.GoToolchainRev)) + fmt.Println(goRev) } if *goToolchainURL { switch runtime.GOOS { @@ -36,6 +41,6 @@ func main() { default: log.Fatalf("unsupported GOOS %q", runtime.GOOS) } - fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s-%s.tar.gz\n", strings.TrimSpace(ts.GoToolchainRev), runtime.GOOS, runtime.GOARCH) + fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s-%s.tar.gz\n", goRev, runtime.GOOS, runtime.GOARCH) } } diff --git a/version-embed.go b/version-embed.go index 9f48d1384ff67..c368186ab3a7f 100644 --- a/version-embed.go +++ b/version-embed.go @@ -26,6 +26,12 @@ var AlpineDockerTag string //go:embed go.toolchain.rev var GoToolchainRev string +// GoToolchainNextRev is like GoToolchainRev, but when using the +// "go.toolchain.next.rev" when TS_GO_NEXT=1 is set in the environment. +// +//go:embed go.toolchain.next.rev +var GoToolchainNextRev string + //lint:ignore U1000 used by tests + assert_ts_toolchain_match.go w/ right build tags func tailscaleToolchainRev() (gitHash string, ok bool) { bi, ok := debug.ReadBuildInfo() From d7d12761ba8c9fc029ef4fae5e5644eb6cdae2d7 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 27 Jan 2026 16:15:17 -0800 Subject: [PATCH 0906/1093] Add .stignore for syncthing (#18540) This symlink tells synchting to ignore stuff that's in .gitignore. Updates https://github.com/tailscale/corp/issues/36250 Signed-off-by: Andrew Lytvynov --- .gitignore | 3 +++ .stignore | 1 + 2 files changed, 4 insertions(+) create mode 120000 .stignore diff --git a/.gitignore b/.gitignore index 3941fd06ef6d5..4bfabc80f0415 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,6 @@ client/web/build/assets # Ignore personal IntelliJ settings .idea/ + +# Ignore syncthing state directory. +/.stfolder diff --git a/.stignore b/.stignore new file mode 120000 index 0000000000000..3e4e48b0b5fe6 --- /dev/null +++ b/.stignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file From 72f736134d741b5825b8952a1e33f37a79e4acfb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 28 Jan 2026 08:41:38 -0800 Subject: [PATCH 0907/1093] cmd/testwrapper/flakytest: skip flaky tests if TS_SKIP_FLAKY_TESTS set This is for a future test scheduler, so it can run potentially flaky tests separately, doing all the non-flaky ones together in one batch. Updates tailscale/corp#28679 Change-Id: Ic4a11f9bf394528ef75792fd622f17bc01a4ec8a Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/flakytest/flakytest.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index b98d739c63620..5e1591e817e00 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -11,6 +11,7 @@ import ( "os" "path" "regexp" + "strconv" "sync" "testing" @@ -60,6 +61,10 @@ func Mark(t testing.TB, issue string) { // And then remove this Logf a month or so after that. t.Logf("flakytest: issue tracking this flaky test: %s", issue) + if boolEnv("TS_SKIP_FLAKY_TESTS") { + t.Skipf("skipping due to TS_SKIP_FLAKY_TESTS") + } + // Record the root test name as flakey. rootFlakesMu.Lock() defer rootFlakesMu.Unlock() @@ -80,3 +85,12 @@ func Marked(t testing.TB) bool { } return false } + +func boolEnv(k string) bool { + s := os.Getenv(k) + if s == "" { + return false + } + v, _ := strconv.ParseBool(s) + return v +} From aca1b5da0f91729c6cde1d634ef65a4f7f74d278 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 28 Jan 2026 10:12:32 -0800 Subject: [PATCH 0908/1093] go.toolchain.rev: bump for cmd/go caching work This pulls in tailscale/go#151, which we want to begin experimenting with. Updates tailscale/go#150 Change-Id: I69aa2631ecf36356430969f423ea3943643a144a Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index dbf37cef1af47..db7deab6f2baa 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -0c028efa1dac96fbb046b793877061645d01ed74 +485c68998494d1343d75389bd493d4dca20df644 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 26fe3501b5d26..3e92fc65dd7e0 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-1AG7yXAbDsBdKUNe5FQ45YXWJ3eLekD4t9mwKrqxiOY= +sha256-JXvC+IS+n+0y7gWDKUv1iAO+6ihm9tviNqyHpSK3cGs= From 99584b26aee31597d40c9e6e1949ef23cef83e13 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 28 Jan 2026 14:32:40 -0800 Subject: [PATCH 0909/1093] ipn/ipnlocal/netmapcache: report the correct error for a missing column (#18547) The file-based cache implementation was not reporting the correct error when attempting to load a missing column key. Make it do so, and update the tests to cover that case. Updates #12639 Change-Id: Ie2c45a0a7e528d4125f857859c92df807116a56e Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/netmapcache/netmapcache.go | 6 +- ipn/ipnlocal/netmapcache/netmapcache_test.go | 64 ++++++++++++++++++-- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/ipn/ipnlocal/netmapcache/netmapcache.go b/ipn/ipnlocal/netmapcache/netmapcache.go index 6992e0691f125..d5706f9b773ac 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache.go +++ b/ipn/ipnlocal/netmapcache/netmapcache.go @@ -155,7 +155,11 @@ func (s FileStore) List(ctx context.Context, prefix string) iter.Seq2[string, er // Load implements part of the [Store] interface. func (s FileStore) Load(ctx context.Context, key string) ([]byte, error) { - return os.ReadFile(filepath.Join(string(s), hex.EncodeToString([]byte(key)))) + data, err := os.ReadFile(filepath.Join(string(s), hex.EncodeToString([]byte(key)))) + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("key %q not found: %w", key, ErrKeyNotFound) + } + return data, err } // Store implements part of the [Store] interface. diff --git a/ipn/ipnlocal/netmapcache/netmapcache_test.go b/ipn/ipnlocal/netmapcache/netmapcache_test.go index 1f7d9b3bf6f07..437015ccc53e8 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache_test.go +++ b/ipn/ipnlocal/netmapcache/netmapcache_test.go @@ -5,6 +5,7 @@ package netmapcache_test import ( "context" + jsonv1 "encoding/json" "errors" "flag" "fmt" @@ -174,11 +175,7 @@ func TestRoundTrip(t *testing.T) { t.Error("Cached map is not marked as such") } - opts := []cmp.Option{ - cmpopts.IgnoreFields(netmap.NetworkMap{}, skippedMapFields...), - cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}), - } - if diff := cmp.Diff(cmap, testMap, opts...); diff != "" { + if diff := diffNetMaps(cmap, testMap); diff != "" { t.Fatalf("Cached map differs (-got, +want):\n%s", diff) } @@ -262,6 +259,56 @@ func checkFieldCoverage(t *testing.T, nm *netmap.NetworkMap) { } } +func TestPartial(t *testing.T) { + t.Run("Empty", func(t *testing.T) { + c := netmapcache.NewCache(make(testStore)) // empty + nm, err := c.Load(t.Context()) + if !errors.Is(err, netmapcache.ErrCacheNotAvailable) { + t.Errorf("Load empty cache: got %+v, %v; want %v", nm, err, netmapcache.ErrCacheNotAvailable) + } + }) + + t.Run("SelfOnly", func(t *testing.T) { + self := (&tailcfg.Node{ + ID: 24680, + StableID: "u24680FAKE", + User: 6174, + Name: "test.example.com.", + Key: testNodeKey, + }).View() + + // A cached netmap must at least have a self node to be loaded without error, + // but other parts can be omitted without error. + // + // Set up a cache store with only the self node populated, and verify we + // can load that back into something with the right shape. + data, err := jsonv1.Marshal(struct { + Node tailcfg.NodeView + }{Node: self}) + if err != nil { + t.Fatalf("Marshal test node: %v", err) + } + + s := netmapcache.FileStore(t.TempDir()) + if err := s.Store(t.Context(), "self", data); err != nil { + t.Fatalf("Write test cache: %v", err) + } + + c := netmapcache.NewCache(s) + got, err := c.Load(t.Context()) + if err != nil { + t.Fatalf("Load cached netmap: %v", err) + } + if diff := diffNetMaps(got, &netmap.NetworkMap{ + Cached: true, // because we loaded it + SelfNode: self, // what we originally stored + NodeKey: testNodeKey, // the self-related field is populated + }); diff != "" { + t.Errorf("Cached map differs (-got, +want):\n%s", diff) + } + }) +} + // testStore is an in-memory implementation of the [netmapcache.Store] interface. type testStore map[string][]byte @@ -296,3 +343,10 @@ func (t testStore) Store(_ context.Context, key string, value []byte) error { } func (t testStore) Remove(_ context.Context, key string) error { delete(t, key); return nil } + +func diffNetMaps(got, want *netmap.NetworkMap) string { + return cmp.Diff(got, want, + cmpopts.IgnoreFields(netmap.NetworkMap{}, skippedMapFields...), + cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}), + ) +} From e39a7305942e958ba4b9333cc2d3222023e33f0d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 28 Jan 2026 14:52:10 -0800 Subject: [PATCH 0910/1093] go.toolchain.rev: bump for cmd/go caching work This pulls in tailscale/go#153, which we want to begin experimenting with. Updates tailscale/go#150 Change-Id: Id3e03558ee69e74361431650530e8227dfdef978 Signed-off-by: Brad Fitzpatrick --- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index db7deab6f2baa..930ed5ad251a9 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -485c68998494d1343d75389bd493d4dca20df644 +799b25336eeb52e2f8b4521fba5870c2ad2d9f43 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 3e92fc65dd7e0..ae95ed0ff869d 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-JXvC+IS+n+0y7gWDKUv1iAO+6ihm9tviNqyHpSK3cGs= +sha256-27ymqBnopujAGo02TZ5IPX8bVkp+rLTuVSn/QzZufJc= From 9e7f536a7c145fd54a36e508b272d0312c8b7dad Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 28 Jan 2026 16:39:26 -0800 Subject: [PATCH 0911/1093] cmd/testwrapper: show "(cached)" for packages that hit the cache We weren't parsing that out previously, making it look like tests were re-running even though they were cached. Updates tailscale/go#150 Updates tailscale/corp#28679 Updates tailscale/corp#34696 Change-Id: I6254362852a82ccc86ac464a805379d941408dad Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/testwrapper.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index df10a53bc1a14..d9d3cc7db60a7 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -36,6 +36,7 @@ type testAttempt struct { pkg string // "tailscale.com/types/key" testName string // "TestFoo" outcome string // "pass", "fail", "skip" + cached bool // whether package-level (non-testName specific) was pass due to being cached logs bytes.Buffer start, end time.Time isMarkedFlaky bool // set if the test is marked as flaky @@ -108,6 +109,8 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te os.Exit(1) } + pkgCached := map[string]bool{} + s := bufio.NewScanner(r) resultMap := make(map[string]map[string]*testAttempt) // pkg -> test -> testAttempt for s.Scan() { @@ -127,6 +130,9 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te resultMap[pkg] = pkgTests } if goOutput.Test == "" { + if strings.HasSuffix(goOutput.Output, "\t(cached)\n") && goOutput.Package != "" { + pkgCached[goOutput.Package] = true + } switch goOutput.Action { case "start": pkgTests[""].start = goOutput.Time @@ -151,6 +157,7 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te end: goOutput.Time, logs: pkgTests[""].logs, pkgFinished: true, + cached: pkgCached[goOutput.Package], } case "output": // Capture all output from the package except for the final @@ -235,7 +242,7 @@ func main() { firstRun.tests = append(firstRun.tests, &packageTests{Pattern: pkg}) } toRun := []*nextRun{firstRun} - printPkgOutcome := func(pkg, outcome string, attempt int, runtime time.Duration) { + printPkgOutcome := func(pkg, outcome string, cached bool, attempt int, testDur time.Duration) { if pkg == "" { return // We reach this path on a build error. } @@ -250,10 +257,16 @@ func main() { outcome = "FAIL" } if attempt > 1 { - fmt.Printf("%s\t%s\t%.3fs\t[attempt=%d]\n", outcome, pkg, runtime.Seconds(), attempt) + fmt.Printf("%s\t%s\t%.3fs\t[attempt=%d]\n", outcome, pkg, testDur.Seconds(), attempt) return } - fmt.Printf("%s\t%s\t%.3fs\n", outcome, pkg, runtime.Seconds()) + var lastCol string + if cached { + lastCol = "(cached)" + } else { + lastCol = fmt.Sprintf("%.3f", testDur.Seconds()) + } + fmt.Printf("%s\t%s\t%v\n", outcome, pkg, lastCol) } for len(toRun) > 0 { @@ -300,7 +313,7 @@ func main() { // panics outside tests will be printed io.Copy(os.Stdout, &tr.logs) } - printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start)) + printPkgOutcome(tr.pkg, tr.outcome, tr.cached, thisRun.attempt, tr.end.Sub(tr.start)) continue } if testingVerbose || tr.outcome == "fail" { From 6f55309f348bc545b80ddf036a5cb1cac86a719b Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Wed, 28 Jan 2026 18:28:25 -0800 Subject: [PATCH 0912/1093] logtail/filch: fix panic in concurrent file access (#18555) In the event of multiple Filch intances being backed by the same file, it is possible that concurrent rotateLocked calls occur. One operation might clear the file, resulting in another skipping the call to resetReadBuffer, resulting in a later panic because the read index is invalid. To at least avoid the panic, always call resetReadBuffer. Note that the behavior of Filch is undefined when using the same file. While this avoids the panic, we may still experience data corruption or less. Fixes #18552 Signed-off-by: Joe Tsai --- logtail/filch/filch.go | 4 ++-- logtail/filch/filch_test.go | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/logtail/filch/filch.go b/logtail/filch/filch.go index 32b0b88b15990..1bd82d8c41e8a 100644 --- a/logtail/filch/filch.go +++ b/logtail/filch/filch.go @@ -316,7 +316,7 @@ func waitIdleStderrForTest() { // No data should be lost under this condition. // // - The writer exceeded a limit for f.newer. -// Data may be lost under this cxondition. +// Data may be lost under this condition. func (f *Filch) rotateLocked() error { f.rotateCalls.Add(1) @@ -329,7 +329,6 @@ func (f *Filch) rotateLocked() error { rdPos := pos - int64(len(f.unreadReadBuffer())) // adjust for data already read into the read buffer f.droppedBytes.Add(max(0, fi.Size()-rdPos)) } - f.resetReadBuffer() // Truncate the older file and write relative to the start. if err := f.older.Truncate(0); err != nil { @@ -339,6 +338,7 @@ func (f *Filch) rotateLocked() error { return err } } + f.resetReadBuffer() // Swap newer and older. f.newer, f.older = f.older, f.newer diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index 0975a2d11f8a3..3c7ba03ca3358 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -381,3 +381,26 @@ func testMaxFileSize(t *testing.T, replaceStderr bool) { t.Errorf("readBytes = %v, want %v", f.readBytes.Value(), readBytes) } } + +// TestConcurrentSameFile tests that concurrent Filch operations on the same +// set of log files does not result in a panic. +// The exact behavior is undefined, but we should at least avoid a panic. +func TestConcurrentSameFile(t *testing.T) { + filePrefix := filepath.Join(t.TempDir(), "testlog") + f1 := must.Get(New(filePrefix, Options{MaxFileSize: 1000})) + f2 := must.Get(New(filePrefix, Options{MaxFileSize: 1000})) + var group sync.WaitGroup + for _, f := range []*Filch{f1, f2} { + group.Go(func() { + for range 1000 { + for range rand.IntN(10) { + f.Write([]byte("hello, world")) + } + for range rand.IntN(10) { + f.TryReadLine() + } + } + }) + } + group.Wait() +} From 2d2d5e6cc7ab097c43516c5db4372cec8b63c81a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 28 Jan 2026 17:04:50 -0800 Subject: [PATCH 0913/1093] .github/workflows: set CMD_GO_USE_GIT_HASH=true for our cmd/go Updates tailscale/go#150 Updates tailscale/corp#28679 Change-Id: Ieb4780f157451f5c6660c96c6efaec9ddcfcb415 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 1 + .github/workflows/vet.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e99e75b22f8a6..a6906e53ef680 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,6 +19,7 @@ env: # toplevel directories "src" (for the checked out source code), and "gomodcache" # and other caches as siblings to follow. GOMODCACHE: ${{ github.workspace }}/gomodcache + CMD_GO_USE_GIT_HASH: "true" on: push: diff --git a/.github/workflows/vet.yml b/.github/workflows/vet.yml index b7862889daa7f..c85e3ec86a67f 100644 --- a/.github/workflows/vet.yml +++ b/.github/workflows/vet.yml @@ -6,6 +6,7 @@ env: # toplevel directories "src" (for the checked out source code), and "gomodcache" # and other caches as siblings to follow. GOMODCACHE: ${{ github.workspace }}/gomodcache + CMD_GO_USE_GIT_HASH: "true" on: push: From afc90ce804e0f0684d3887c1bcf56498ede399a9 Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Thu, 29 Jan 2026 10:45:13 +0000 Subject: [PATCH 0914/1093] control/controlclient: add PersistView.Valid() check in NetmapFromMapResponseForDebug (#17878) We were seeing some panics from nodes: panic: runtime error: invalid memory address or nil pointer dereference [signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0xd42570] goroutine 362555 [running]: tailscale.com/types/persist.PersistView.PrivateNodeKey(...) tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/types/persist/persist_view.go:89 tailscale.com/control/controlclient.NetmapFromMapResponseForDebug({0x1bac2e0, 0xc0a8692380}, {0xc0de5da0c0?}, 0xc0de66fd40) tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/control/controlclient/direct.go:1175 +0x90 tailscale.com/ipn/ipnlocal.handleC2NDebugNetMap(0xc0b3f5af08, {0x1baa520, 0xc0a887b0c0}, 0xc0a869a280) tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/ipn/ipnlocal/c2n.go:186 +0x405 tailscale.com/ipn/ipnlocal.(*LocalBackend).handleC2N(0xc0b3f5af08, {0x1baa520, 0xc0a887b0c0}, 0xc0a869a280) tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/ipn/ipnlocal/c2n.go:121 +0x155 net/http.HandlerFunc.ServeHTTP(0x1bac150?, {0x1baa520?, 0xc0a887b0c0?}, 0xc049d47b20?) net/http/server.go:2322 +0x29 tailscale.com/control/controlclient.answerC2NPing(0xc0d9808f20, {0x1b90f40, 0xc0c3bd0db0}, 0xc0b1c84ea0, 0xc0a29b3c80) tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/control/controlclient/direct.go:1454 +0x455 tailscale.com/control/controlclient.(*Direct).answerPing(0xc09b173b88, 0xc0a29b3c80) tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/control/controlclient/direct.go:1398 +0x127 created by tailscale.com/control/controlclient.(*Direct).sendMapRequest in goroutine 361922 tailscale.com@v1.89.0-pre.0.20250926180200-7cbf56345bb3/control/controlclient/direct.go:1104 +0x20e5 Updates tailscale/corp#31367 Updates tailscale/corp#32095 Signed-off-by: Paul Scott <408401+icio@users.noreply.github.com> --- control/controlclient/direct.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index eb49cf4ab44fb..a368d6f858384 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1230,6 +1230,9 @@ func NetmapFromMapResponseForDebug(ctx context.Context, pr persist.PersistView, if resp.Node == nil { return nil, errors.New("MapResponse lacks Node") } + if !pr.Valid() { + return nil, errors.New("PersistView invalid") + } nu := &rememberLastNetmapUpdater{} sess := newMapSession(pr.PrivateNodeKey(), nu, nil) From ce5c08e4cbdb03b9652e15142e4a596e1f054ef1 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 29 Jan 2026 16:09:19 +0000 Subject: [PATCH 0915/1093] cmd/testwrapper: detect cached tests with coverage output (#18559) Using -coverprofile was breaking the (cached) detection logic because that adds extra information to the end of the line. Updates tailscale/go#150 Change-Id: Ie1bf4e1e04e21db00a6829695098fb61d80a2641 Signed-off-by: Tom Proctor --- cmd/testwrapper/testwrapper.go | 5 ++- cmd/testwrapper/testwrapper_test.go | 58 +++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index d9d3cc7db60a7..e35b83407bbb8 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -130,7 +130,10 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te resultMap[pkg] = pkgTests } if goOutput.Test == "" { - if strings.HasSuffix(goOutput.Output, "\t(cached)\n") && goOutput.Package != "" { + // Detect output lines like: + // ok \ttailscale.com/cmd/testwrapper\t(cached) + // ok \ttailscale.com/cmd/testwrapper\t(cached)\tcoverage: 17.0% of statements + if goOutput.Package != "" && strings.Contains(goOutput.Output, fmt.Sprintf("%s\t(cached)", goOutput.Package)) { pkgCached[goOutput.Package] = true } switch goOutput.Action { diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index 0ca13e854ff7a..cf023f4367483 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -11,6 +11,7 @@ import ( "os/exec" "path/filepath" "regexp" + "runtime" "strings" "sync" "testing" @@ -214,6 +215,63 @@ func TestTimeout(t *testing.T) { } } +func TestCached(t *testing.T) { + t.Parallel() + + // Construct our trivial package. + pkgDir := t.TempDir() + goMod := fmt.Sprintf(`module example.com + +go %s +`, runtime.Version()[2:]) // strip leading "go" + + test := `package main +import "testing" + +func TestCached(t *testing.T) {} +` + + for f, c := range map[string]string{ + "go.mod": goMod, + "cached_test.go": test, + } { + err := os.WriteFile(filepath.Join(pkgDir, f), []byte(c), 0o644) + if err != nil { + t.Fatalf("writing package: %s", err) + } + } + + for name, args := range map[string][]string{ + "without_flags": {"./..."}, + "with_short": {"./...", "-short"}, + "with_coverprofile": {"./...", "-coverprofile=" + filepath.Join(t.TempDir(), "coverage.out")}, + } { + t.Run(name, func(t *testing.T) { + var ( + out []byte + err error + ) + for range 2 { + cmd := cmdTestwrapper(t, args...) + cmd.Dir = pkgDir + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("testwrapper ./...: expected no error but got: %v; output was:\n%s", err, out) + } + } + + want := []byte("ok\texample.com\t(cached)") + if !bytes.Contains(out, want) { + t.Fatalf("wanted output containing %q but got:\n%s", want, out) + } + + if testing.Verbose() { + t.Logf("success - output:\n%s", out) + } + }) + } +} + func errExitCode(err error) (int, bool) { var exit *exec.ExitError if errors.As(err, &exit) { From 65d6793204893983e89824797253e349ff114558 Mon Sep 17 00:00:00 2001 From: License Updater Date: Thu, 29 Jan 2026 17:21:00 +0000 Subject: [PATCH 0916/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 10 +++++----- licenses/windows.md | 14 +++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index d51d67190b1fa..f61291c943cb8 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -70,13 +70,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.47.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/a4bb9ffd:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.49.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.32.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.39.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.33.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 902d0f2a1f5a8..03d0ce40ef717 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -39,7 +39,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/peterbourgon/diskv](https://pkg.go.dev/github.com/peterbourgon/diskv) ([MIT](https://github.com/peterbourgon/diskv/blob/v2.0.1/LICENSE)) - [github.com/prometheus/client_golang/prometheus](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus) ([Apache-2.0](https://github.com/prometheus/client_golang/blob/v1.23.2/LICENSE)) - [github.com/prometheus/client_model/go](https://pkg.go.dev/github.com/prometheus/client_model/go) ([Apache-2.0](https://github.com/prometheus/client_model/blob/v0.6.2/LICENSE)) - - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.66.1/LICENSE)) + - [github.com/prometheus/common](https://pkg.go.dev/github.com/prometheus/common) ([Apache-2.0](https://github.com/prometheus/common/blob/v0.67.5/LICENSE)) - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) @@ -48,17 +48,17 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.2/LICENSE)) + - [go.yaml.in/yaml/v2](https://pkg.go.dev/go.yaml.in/yaml/v2) ([Apache-2.0](https://github.com/yaml/go-yaml/blob/v2.4.3/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.46.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.47.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/a4bb9ffd:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.30.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.32.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.49.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.39.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.11/LICENSE)) From bcceef36825278a7406dd38d2832f20540d698a0 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 14 Jan 2026 02:29:06 -0500 Subject: [PATCH 0917/1093] cmd/tailscale/cli: allow fetching keys from AWS Parameter Store This allows fetching auth keys, OAuth client secrets, and ID tokens (for workload identity federation) from AWS Parameter Store by passing an ARN as the value. This is a relatively low-overhead mechanism for fetching these values from an external secret store without needing to run a secret service. Usage examples: # Auth key tailscale up \ --auth-key=arn:aws:ssm:us-east-1:123456789012:parameter/tailscale/auth-key # OAuth client secret tailscale up \ --client-secret=arn:aws:ssm:us-east-1:123456789012:parameter/tailscale/oauth-secret \ --advertise-tags=tag:server # ID token (for workload identity federation) tailscale up \ --client-id=my-client \ --id-token=arn:aws:ssm:us-east-1:123456789012:parameter/tailscale/id-token \ --advertise-tags=tag:server Updates tailscale/corp#28792 Signed-off-by: Andrew Dunham --- cmd/tailscale/cli/cli_test.go | 76 ++++++++++++++++ cmd/tailscale/cli/up.go | 42 +++++++-- cmd/tailscale/depaware.txt | 14 ++- cmd/tailscaled/depaware-minbox.txt | 1 + feature/awsparamstore/awsparamstore.go | 88 +++++++++++++++++++ feature/awsparamstore/awsparamstore_test.go | 83 +++++++++++++++++ feature/condregister/awsparamstore/doc.go | 6 ++ .../awsparamstore/maybe_awsparamstore.go | 8 ++ internal/client/tailscale/awsparamstore.go | 21 +++++ 9 files changed, 327 insertions(+), 12 deletions(-) create mode 100644 feature/awsparamstore/awsparamstore.go create mode 100644 feature/awsparamstore/awsparamstore_test.go create mode 100644 feature/condregister/awsparamstore/doc.go create mode 100644 feature/condregister/awsparamstore/maybe_awsparamstore.go create mode 100644 internal/client/tailscale/awsparamstore.go diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 41824701df551..370b730af8f35 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -6,11 +6,14 @@ package cli import ( "bytes" stdcmp "cmp" + "context" "encoding/json" "flag" "fmt" "io" "net/netip" + "os" + "path/filepath" "reflect" "strings" "testing" @@ -20,6 +23,7 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" "tailscale.com/health/healthmsg" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" @@ -1696,6 +1700,78 @@ func TestDocs(t *testing.T) { walk(t, root) } +func TestUpResolves(t *testing.T) { + const testARN = "arn:aws:ssm:us-east-1:123456789012:parameter/my-parameter" + undo := tailscale.HookResolveValueFromParameterStore.SetForTest(func(_ context.Context, valueOrARN string) (string, error) { + if valueOrARN == testARN { + return "resolved-value", nil + } + return valueOrARN, nil + }) + defer undo() + + const content = "file-content" + fpath := filepath.Join(t.TempDir(), "testfile") + if err := os.WriteFile(fpath, []byte(content), 0600); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + arg string + want string + }{ + {"parameter_store", testARN, "resolved-value"}, + {"file", "file:" + fpath, "file-content"}, + } + + for _, tt := range testCases { + t.Run(tt.name+"_auth_key", func(t *testing.T) { + args := upArgsT{authKeyOrFile: tt.arg} + got, err := args.getAuthKey(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + + t.Run(tt.name+"_client_secret", func(t *testing.T) { + args := upArgsT{clientSecretOrFile: tt.arg} + got, err := args.getClientSecret(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + + t.Run(tt.name+"_id_token", func(t *testing.T) { + args := upArgsT{idTokenOrFile: tt.arg} + got, err := args.getIDToken(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } + + t.Run("passthrough", func(t *testing.T) { + args := upArgsT{authKeyOrFile: "tskey-abcd1234"} + got, err := args.getAuthKey(t.Context()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != "tskey-abcd1234" { + t.Errorf("got %q, want %q", got, "tskey-abcd1234") + } + }) +} + func TestDeps(t *testing.T) { deptest.DepChecker{ GOOS: "linux", diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index cdb1d38234cec..79f7cc3f44a88 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -24,6 +24,7 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/feature/buildfeatures" + _ "tailscale.com/feature/condregister/awsparamstore" _ "tailscale.com/feature/condregister/identityfederation" _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" @@ -220,16 +221,39 @@ func resolveValueFromFile(v string) (string, error) { return v, nil } -func (a upArgsT) getAuthKey() (string, error) { - return resolveValueFromFile(a.authKeyOrFile) +// resolveValueFromParameterStore resolves a value from AWS Parameter Store if +// the value looks like an SSM ARN. If the hook is not available or the value +// is not an SSM ARN, it returns the value unchanged. +func resolveValueFromParameterStore(ctx context.Context, v string) (string, error) { + if f, ok := tailscale.HookResolveValueFromParameterStore.GetOk(); ok { + return f(ctx, v) + } + return v, nil +} + +// resolveValue will take the given value (e.g. as passed to --auth-key), and +// depending on the prefix, resolve the value from either a file or AWS +// Parameter Store. Values with an unknown prefix are returned as-is. +func resolveValue(ctx context.Context, v string) (string, error) { + switch { + case strings.HasPrefix(v, "file:"): + return resolveValueFromFile(v) + case strings.HasPrefix(v, tailscale.ResolvePrefixAWSParameterStore): + return resolveValueFromParameterStore(ctx, v) + } + return v, nil +} + +func (a upArgsT) getAuthKey(ctx context.Context) (string, error) { + return resolveValue(ctx, a.authKeyOrFile) } -func (a upArgsT) getClientSecret() (string, error) { - return resolveValueFromFile(a.clientSecretOrFile) +func (a upArgsT) getClientSecret(ctx context.Context) (string, error) { + return resolveValue(ctx, a.clientSecretOrFile) } -func (a upArgsT) getIDToken() (string, error) { - return resolveValueFromFile(a.idTokenOrFile) +func (a upArgsT) getIDToken(ctx context.Context) (string, error) { + return resolveValue(ctx, a.idTokenOrFile) } var upArgsGlobal upArgsT @@ -602,7 +626,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return err } - authKey, err := upArgs.getAuthKey() + authKey, err := upArgs.getAuthKey(ctx) if err != nil { return err } @@ -611,7 +635,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { clientSecret := authKey // the authkey argument accepts client secrets, if both arguments are provided authkey has precedence if clientSecret == "" { - clientSecret, err = upArgs.getClientSecret() + clientSecret, err = upArgs.getClientSecret(ctx) if err != nil { return err } @@ -625,7 +649,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE // Try to resolve the auth key via workload identity federation if that functionality // is available and no auth key is yet determined. if f, ok := tailscale.HookResolveAuthKeyViaWIF.GetOk(); ok && authKey == "" { - idToken, err := upArgs.getIDToken() + idToken, err := upArgs.getIDToken(ctx) if err != nil { return err } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 67ffa4fbc0fda..b148423750b97 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -11,6 +11,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy L github.com/atotto/clipboard from tailscale.com/client/systray github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ + L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/feature/awsparamstore github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/sso+ github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts @@ -21,7 +22,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif + github.com/aws/aws-sdk-go-v2/config from tailscale.com/wif+ github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config @@ -49,6 +50,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts + L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/feature/awsparamstore + L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm + L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso @@ -65,7 +69,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/sso+ github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc + github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssooidc+ github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/sso+ github.com/aws/smithy-go/endpoints/private/rulesfn from github.com/aws/aws-sdk-go-v2/service/sts @@ -76,11 +80,12 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware + github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/sso+ github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws+ github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http + L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket @@ -112,6 +117,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/huin/goupnp/scpd from github.com/huin/goupnp github.com/huin/goupnp/soap from github.com/huin/goupnp+ github.com/huin/goupnp/ssdp from github.com/huin/goupnp + L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli @@ -168,8 +174,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web tailscale.com/feature from tailscale.com/tsweb+ + L tailscale.com/feature/awsparamstore from tailscale.com/feature/condregister/awsparamstore tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/awsparamstore from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 4b2f71983d441..083db4c5af1d4 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -73,6 +73,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/awsparamstore from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ diff --git a/feature/awsparamstore/awsparamstore.go b/feature/awsparamstore/awsparamstore.go new file mode 100644 index 0000000000000..f63f546ed70ed --- /dev/null +++ b/feature/awsparamstore/awsparamstore.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_aws + +// Package awsparamstore registers support for fetching secret values from AWS +// Parameter Store. +package awsparamstore + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ssm" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" +) + +func init() { + feature.Register("awsparamstore") + tailscale.HookResolveValueFromParameterStore.Set(ResolveValue) +} + +// parseARN parses and verifies that the input string is an +// ARN for AWS Parameter Store, returning the region and parameter name if so. +// +// If the input is not a valid Parameter Store ARN, it returns ok==false. +func parseARN(s string) (region, parameterName string, ok bool) { + parsed, err := arn.Parse(s) + if err != nil { + return "", "", false + } + + if parsed.Service != "ssm" { + return "", "", false + } + parameterName, ok = strings.CutPrefix(parsed.Resource, "parameter/") + if !ok { + return "", "", false + } + + // NOTE: parameter names must have a leading slash + return parsed.Region, "/" + parameterName, true +} + +// ResolveValue fetches a value from AWS Parameter Store if the input +// looks like an SSM ARN (e.g., arn:aws:ssm:us-east-1:123456789012:parameter/my-secret). +// +// If the input is not a Parameter Store ARN, it returns the value unchanged. +// +// If the input is a Parameter Store ARN and fetching the parameter fails, it +// returns an error. +func ResolveValue(ctx context.Context, valueOrARN string) (string, error) { + // If it doesn't look like an ARN, return as-is + region, parameterName, ok := parseARN(valueOrARN) + if !ok { + return valueOrARN, nil + } + + // Load AWS config with the region from the ARN + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + if err != nil { + return "", fmt.Errorf("loading AWS config in region %q: %w", region, err) + } + + // Create SSM client and fetch the parameter + client := ssm.NewFromConfig(cfg) + output, err := client.GetParameter(ctx, &ssm.GetParameterInput{ + // The parameter to fetch. + Name: aws.String(parameterName), + + // If the parameter is a SecureString, decrypt it. + WithDecryption: aws.Bool(true), + }) + if err != nil { + return "", fmt.Errorf("getting SSM parameter %q: %w", parameterName, err) + } + + if output.Parameter == nil || output.Parameter.Value == nil { + return "", fmt.Errorf("SSM parameter %q has no value", parameterName) + } + + return strings.TrimSpace(*output.Parameter.Value), nil +} diff --git a/feature/awsparamstore/awsparamstore_test.go b/feature/awsparamstore/awsparamstore_test.go new file mode 100644 index 0000000000000..9ccea63ec11e1 --- /dev/null +++ b/feature/awsparamstore/awsparamstore_test.go @@ -0,0 +1,83 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_aws + +package awsparamstore + +import ( + "testing" +) + +func TestParseARN(t *testing.T) { + tests := []struct { + name string + input string + wantOk bool + wantRegion string + wantParamName string + }{ + { + name: "non-arn-passthrough", + input: "tskey-abcd1234", + wantOk: false, + }, + { + name: "file-prefix-passthrough", + input: "file:/path/to/key", + wantOk: false, + }, + { + name: "empty-passthrough", + input: "", + wantOk: false, + }, + { + name: "non-ssm-arn-passthrough", + input: "arn:aws:s3:::my-bucket", + wantOk: false, + }, + { + name: "invalid-arn-passthrough", + input: "arn:invalid", + wantOk: false, + }, + { + name: "arn-invalid-resource-passthrough", + input: "arn:aws:ssm:us-east-1:123456789012:document/myDoc", + wantOk: false, + }, + { + name: "valid-arn", + input: "arn:aws:ssm:us-west-2:123456789012:parameter/my-secret", + wantOk: true, + wantRegion: "us-west-2", + wantParamName: "/my-secret", + }, + { + name: "valid-arn-with-path", + input: "arn:aws:ssm:eu-central-1:123456789012:parameter/path/to/secret", + wantOk: true, + wantRegion: "eu-central-1", + wantParamName: "/path/to/secret", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotRegion, gotParamName, gotOk := parseARN(tt.input) + if gotOk != tt.wantOk { + t.Errorf("parseARN(%q) got ok=%v, want %v", tt.input, gotOk, tt.wantOk) + } + if !tt.wantOk { + return + } + if gotRegion != tt.wantRegion { + t.Errorf("parseARN(%q) got region=%q, want %q", tt.input, gotRegion, tt.wantRegion) + } + if gotParamName != tt.wantParamName { + t.Errorf("parseARN(%q) got paramName=%q, want %q", tt.input, gotParamName, tt.wantParamName) + } + }) + } +} diff --git a/feature/condregister/awsparamstore/doc.go b/feature/condregister/awsparamstore/doc.go new file mode 100644 index 0000000000000..93a26e3c22fae --- /dev/null +++ b/feature/condregister/awsparamstore/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package awsparamstore conditionally registers the awsparamstore feature for +// resolving secrets from AWS Parameter Store. +package awsparamstore diff --git a/feature/condregister/awsparamstore/maybe_awsparamstore.go b/feature/condregister/awsparamstore/maybe_awsparamstore.go new file mode 100644 index 0000000000000..78c3f31006765 --- /dev/null +++ b/feature/condregister/awsparamstore/maybe_awsparamstore.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws + +package awsparamstore + +import _ "tailscale.com/feature/awsparamstore" diff --git a/internal/client/tailscale/awsparamstore.go b/internal/client/tailscale/awsparamstore.go new file mode 100644 index 0000000000000..bb0a31d45cc8a --- /dev/null +++ b/internal/client/tailscale/awsparamstore.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// ResolvePrefixAWSParameterStore is the string prefix for values that can be +// resolved from AWS Parameter Store. +const ResolvePrefixAWSParameterStore = "arn:aws:ssm:" + +// HookResolveValueFromParameterStore resolves to [awsparamstore.ResolveValue] when +// the corresponding feature tag is enabled in the build process. +// +// It fetches a value from AWS Parameter Store given an ARN. If the provided +// value is not an Parameter Store ARN, it returns the value unchanged. +var HookResolveValueFromParameterStore feature.Hook[func(ctx context.Context, valueOrARN string) (string, error)] From db96e52d6f82e594f93eb44431a1b7fc732299be Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 30 Jan 2026 09:00:46 -0800 Subject: [PATCH 0918/1093] cmd/tailscale/cli: redact auth keys in FlagSet output (#18563) Running a command like `tailscale up --auth-key tskey-foo --auth-key tskey-bar` used to print ``` invalid value "tskey-bar" for flag -auth-key: flag provided multiple times ``` but now we print ``` invalid value "tskey-REDACTED" for flag -auth-key: flag provided multiple times ``` Fixes #18562 Signed-off-by: Andrew Lytvynov --- cmd/tailscale/cli/cli.go | 22 ++++++++++++++++++++++ cmd/tailscaled/depaware-minbox.txt | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 4d16cfe699537..b8ac768746d27 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -14,6 +14,7 @@ import ( "io" "log" "os" + "regexp" "runtime" "strings" "sync" @@ -294,6 +295,10 @@ change in the future. if w.UsageFunc == nil { w.UsageFunc = usageFunc } + if w.FlagSet != nil { + // If flags cannot be parsed, redact any keys in the error output . + w.FlagSet.SetOutput(sanitizeOutput(w.FlagSet.Output())) + } return true }) @@ -566,3 +571,20 @@ func fixTailscaledConnectError(origErr error) error { } return origErr } + +func sanitizeOutput(w io.Writer) io.Writer { + return sanitizeWriter{w} +} + +type sanitizeWriter struct { + w io.Writer +} + +var reTskey = regexp.MustCompile(`tskey-\w+`) + +func (w sanitizeWriter) Write(buf []byte) (int, error) { + sanitized := reTskey.ReplaceAll(buf, []byte("tskey-REDACTED")) + diff := len(buf) - len(sanitized) + n, err := w.w.Write(sanitized) + return n - diff, err +} diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 083db4c5af1d4..5121b56d0d281 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -428,7 +428,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from tailscale.com/clientupdate + regexp from tailscale.com/clientupdate+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ From 214b70cc1aeeee7205a22a25cce261de40e2c0d9 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Fri, 30 Jan 2026 12:14:47 -0500 Subject: [PATCH 0919/1093] net/dns: skip DNS base config when using userspace networking (#18355) When tailscaled gets started with userspace networking, it won't modify your system's network configuration. For this, it creates a noopManager for DNS management. noopManager correctly observes that there's no real OS DNS to send queries to. This leads to we completely dropping any DNS internal resolution from `dns query` This change alters this so that even without a base config we'll still allow the internal resolver to handle internal DNS queries Fixes #18354 Signed-off-by: Fernando Serboncini --- net/dns/manager.go | 6 +++--- net/dns/noop.go | 5 +++++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/net/dns/manager.go b/net/dns/manager.go index 0b7ae465f59eb..0d74febffe7ca 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -388,9 +388,9 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig cfg, err := m.os.GetBaseConfig() if err == nil { baseCfg = &cfg - } else if isApple && err == ErrGetBaseConfigNotSupported { - // This is currently (2022-10-13) expected on certain iOS and macOS - // builds. + } else if (isApple || isNoopManager(m.os)) && err == ErrGetBaseConfigNotSupported { + // Expected when using noopManager (userspace networking) or on + // certain iOS/macOS builds. Continue without base config. } else { m.health.SetUnhealthy(osConfigurationReadWarnable, health.Args{health.ArgError: err.Error()}) return resolver.Config{}, OSConfig{}, err diff --git a/net/dns/noop.go b/net/dns/noop.go index 70dd93ed22220..aaf3a56ed68eb 100644 --- a/net/dns/noop.go +++ b/net/dns/noop.go @@ -15,3 +15,8 @@ func (m noopManager) GetBaseConfig() (OSConfig, error) { func NewNoopManager() (noopManager, error) { return noopManager{}, nil } + +func isNoopManager(c OSConfigurator) bool { + _, ok := c.(noopManager) + return ok +} From f48cd466624e06b2110eb6171c712eccfd0b4abe Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Fri, 30 Jan 2026 13:32:34 -0500 Subject: [PATCH 0920/1093] net/dns,ipn/ipnlocal: add nodecap to resolve subdomains (#18258) This adds a new node capability 'dns-subdomain-resolve' that signals that all of hosts' subdomains should resolve to the same IP address. It allows wildcard matching on any node marked with this capability. This change also includes an util/dnsname utility function that lets us access the parent of a full qualified domain name. MagicDNS takes this function and recursively searchs for a matching real node name. One important thing to observe is that, in this context, a subdomain can have multiple sub labels. This means that for a given node named machine, both my.machine and be.my.machine will be a positive match. Updates #1196 Signed-off-by: Fernando Serboncini --- ipn/ipnlocal/dnsconfig_test.go | 33 ++++++++++++++++++++++ ipn/ipnlocal/node_backend.go | 12 ++++++++ net/dns/config.go | 6 ++++ net/dns/dns_clone.go | 4 +++ net/dns/dns_view.go | 10 +++++++ net/dns/manager.go | 1 + net/dns/resolver/tsdns.go | 26 ++++++++++++++--- net/dns/resolver/tsdns_test.go | 51 ++++++++++++++++++++++++++++++++++ tailcfg/tailcfg.go | 7 +++++ util/dnsname/dnsname.go | 12 ++++++++ util/dnsname/dnsname_test.go | 28 +++++++++++++++++++ 11 files changed, 186 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index 594d2c5476177..ab00b47404216 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -106,6 +106,39 @@ func TestDNSConfigForNetmap(t *testing.T) { }, }, }, + { + name: "subdomain_resolve_capability", + nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "myname.net.", + Addresses: ipps("100.101.101.101"), + }).View(), + AllCaps: set.SetOf([]tailcfg.NodeCapability{tailcfg.NodeAttrDNSSubdomainResolve}), + }, + peers: nodeViews([]*tailcfg.Node{ + { + ID: 1, + Name: "peer-with-cap.net.", + Addresses: ipps("100.102.0.1"), + CapMap: tailcfg.NodeCapMap{tailcfg.NodeAttrDNSSubdomainResolve: nil}, + }, + { + ID: 2, + Name: "peer-without-cap.net.", + Addresses: ipps("100.102.0.2"), + }, + }), + prefs: &ipn.Prefs{}, + want: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{ + "myname.net.": ips("100.101.101.101"), + "peer-with-cap.net.": ips("100.102.0.1"), + "peer-without-cap.net.": ips("100.102.0.2"), + }, + SubdomainHosts: set.Of[dnsname.FQDN]("myname.net.", "peer-with-cap.net."), + }, + }, { // An ephemeral node with only an IPv6 address // should get IPv6 records for all its peers, diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 4a32b14dd49dc..170dae9569c8c 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -751,8 +751,20 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. dcfg.Hosts[fqdn] = ips } set(nm.SelfName(), nm.GetAddresses()) + if nm.AllCaps.Contains(tailcfg.NodeAttrDNSSubdomainResolve) { + if fqdn, err := dnsname.ToFQDN(nm.SelfName()); err == nil { + dcfg.SubdomainHosts.Make() + dcfg.SubdomainHosts.Add(fqdn) + } + } for _, peer := range peers { set(peer.Name(), peer.Addresses()) + if peer.CapMap().Contains(tailcfg.NodeAttrDNSSubdomainResolve) { + if fqdn, err := dnsname.ToFQDN(peer.Name()); err == nil { + dcfg.SubdomainHosts.Make() + dcfg.SubdomainHosts.Add(fqdn) + } + } } for _, rec := range nm.DNS.ExtraRecords { switch rec.Type { diff --git a/net/dns/config.go b/net/dns/config.go index 2b5505fc9734c..f776d1af04443 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -21,6 +21,7 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) // Config is a DNS configuration. @@ -48,6 +49,11 @@ type Config struct { // it to resolve, you also need to add appropriate routes to // Routes. Hosts map[dnsname.FQDN][]netip.Addr + // SubdomainHosts is a set of FQDNs from Hosts that should also + // resolve subdomain queries to the same IPs. For example, if + // "node.tailnet.ts.net" is in SubdomainHosts, then queries for + // "anything.node.tailnet.ts.net" will resolve to node's IPs. + SubdomainHosts set.Set[dnsname.FQDN] // OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) // instead of the IPv4 version (100.100.100.100). OnlyIPv6 bool diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go index de08be8a27b8e..ea5e5299beb7d 100644 --- a/net/dns/dns_clone.go +++ b/net/dns/dns_clone.go @@ -6,10 +6,12 @@ package dns import ( + "maps" "net/netip" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) // Clone makes a deep copy of Config. @@ -43,6 +45,7 @@ func (src *Config) Clone() *Config { dst.Hosts[k] = append([]netip.Addr{}, src.Hosts[k]...) } } + dst.SubdomainHosts = maps.Clone(src.SubdomainHosts) return dst } @@ -52,6 +55,7 @@ var _ConfigCloneNeedsRegeneration = Config(struct { Routes map[dnsname.FQDN][]*dnstype.Resolver SearchDomains []dnsname.FQDN Hosts map[dnsname.FQDN][]netip.Addr + SubdomainHosts set.Set[dnsname.FQDN] OnlyIPv6 bool }{}) diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go index b10861cca8821..313621c86e85b 100644 --- a/net/dns/dns_view.go +++ b/net/dns/dns_view.go @@ -15,6 +15,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/views" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) //go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Config @@ -123,6 +124,14 @@ func (v ConfigView) Hosts() views.MapSlice[dnsname.FQDN, netip.Addr] { return views.MapSliceOf(v.ж.Hosts) } +// SubdomainHosts is a set of FQDNs from Hosts that should also +// resolve subdomain queries to the same IPs. For example, if +// "node.tailnet.ts.net" is in SubdomainHosts, then queries for +// "anything.node.tailnet.ts.net" will resolve to node's IPs. +func (v ConfigView) SubdomainHosts() views.Map[dnsname.FQDN, struct{}] { + return views.MapOf(v.ж.SubdomainHosts) +} + // OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) // instead of the IPv4 version (100.100.100.100). func (v ConfigView) OnlyIPv6() bool { return v.ж.OnlyIPv6 } @@ -134,5 +143,6 @@ var _ConfigViewNeedsRegeneration = Config(struct { Routes map[dnsname.FQDN][]*dnstype.Resolver SearchDomains []dnsname.FQDN Hosts map[dnsname.FQDN][]netip.Addr + SubdomainHosts set.Set[dnsname.FQDN] OnlyIPv6 bool }{}) diff --git a/net/dns/manager.go b/net/dns/manager.go index 0d74febffe7ca..faca1053cf852 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -291,6 +291,7 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // authoritative suffixes, even if we don't propagate MagicDNS to // the OS. rcfg.Hosts = cfg.Hosts + rcfg.SubdomainHosts = cfg.SubdomainHosts routes := map[dnsname.FQDN][]*dnstype.Resolver{} // assigned conditionally to rcfg.Routes below. var propagateHostsToOS bool for suffix, resolvers := range cfg.Routes { diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index a6f05c4702550..f71c1b7708b4c 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -39,6 +39,7 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" + "tailscale.com/util/set" ) const dnsSymbolicFQDN = "magicdns.localhost-tailscale-daemon." @@ -79,6 +80,12 @@ type Config struct { // LocalDomains is a list of DNS name suffixes that should not be // routed to upstream resolvers. LocalDomains []dnsname.FQDN + // SubdomainHosts is a set of FQDNs from Hosts that should also + // resolve subdomain queries to the same IPs. If a query like + // "sub.node.tailnet.ts.net" doesn't match Hosts directly, and + // "node.tailnet.ts.net" is in SubdomainHosts, the query resolves + // to the IPs for "node.tailnet.ts.net". + SubdomainHosts set.Set[dnsname.FQDN] } // WriteToBufioWriter write a debug version of c for logs to w, omitting @@ -214,10 +221,11 @@ type Resolver struct { closed chan struct{} // mu guards the following fields from being updated while used. - mu syncs.Mutex - localDomains []dnsname.FQDN - hostToIP map[dnsname.FQDN][]netip.Addr - ipToHost map[netip.Addr]dnsname.FQDN + mu syncs.Mutex + localDomains []dnsname.FQDN + hostToIP map[dnsname.FQDN][]netip.Addr + ipToHost map[netip.Addr]dnsname.FQDN + subdomainHosts set.Set[dnsname.FQDN] } type ForwardLinkSelector interface { @@ -278,6 +286,7 @@ func (r *Resolver) SetConfig(cfg Config) error { r.localDomains = cfg.LocalDomains r.hostToIP = cfg.Hosts r.ipToHost = reverse + r.subdomainHosts = cfg.SubdomainHosts return nil } @@ -642,9 +651,18 @@ func (r *Resolver) resolveLocal(domain dnsname.FQDN, typ dns.Type) (netip.Addr, r.mu.Lock() hosts := r.hostToIP localDomains := r.localDomains + subdomainHosts := r.subdomainHosts r.mu.Unlock() addrs, found := hosts[domain] + if !found { + for parent := domain.Parent(); parent != ""; parent = parent.Parent() { + if subdomainHosts.Contains(parent) { + addrs, found = hosts[parent] + break + } + } + } if !found { for _, suffix := range localDomains { if suffix.Contains(domain) { diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 5597c2cf2d921..712fa88dcad82 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/set" ) var ( @@ -429,6 +430,56 @@ func TestResolveLocal(t *testing.T) { } } +func TestResolveLocalSubdomain(t *testing.T) { + r := newResolver(t) + defer r.Close() + + // Configure with SubdomainHosts set for test1.ipn.dev + cfg := Config{ + Hosts: map[dnsname.FQDN][]netip.Addr{ + "test1.ipn.dev.": {testipv4}, + "test2.ipn.dev.": {testipv6}, + }, + LocalDomains: []dnsname.FQDN{"ipn.dev."}, + SubdomainHosts: set.Of[dnsname.FQDN]("test1.ipn.dev."), + } + r.SetConfig(cfg) + + tests := []struct { + name string + qname dnsname.FQDN + qtype dns.Type + ip netip.Addr + code dns.RCode + }{ + // Exact matches still work + {"exact-ipv4", "test1.ipn.dev.", dns.TypeA, testipv4, dns.RCodeSuccess}, + {"exact-ipv6", "test2.ipn.dev.", dns.TypeAAAA, testipv6, dns.RCodeSuccess}, + + // Subdomain of test1 resolves (test1 has SubdomainHosts set) + {"subdomain-ipv4", "foo.test1.ipn.dev.", dns.TypeA, testipv4, dns.RCodeSuccess}, + {"subdomain-deep", "bar.foo.test1.ipn.dev.", dns.TypeA, testipv4, dns.RCodeSuccess}, // Multi-level subdomain + + // Subdomain of test2 does NOT resolve (test2 lacks SubdomainHosts) + {"subdomain-no-cap", "foo.test2.ipn.dev.", dns.TypeAAAA, netip.Addr{}, dns.RCodeNameError}, + + // Non-existent parent still returns NXDOMAIN + {"subdomain-no-parent", "foo.test3.ipn.dev.", dns.TypeA, netip.Addr{}, dns.RCodeNameError}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ip, code := r.resolveLocal(tt.qname, tt.qtype) + if code != tt.code { + t.Errorf("code = %v; want %v", code, tt.code) + } + if ip != tt.ip { + t.Errorf("ip = %v; want %v", ip, tt.ip) + } + }) + } +} + func TestResolveLocalReverse(t *testing.T) { r := newResolver(t) defer r.Close() diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 535c42b212b2e..f76eb8f55d241 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2707,6 +2707,13 @@ const ( // server to answer AAAA queries about its peers. See tailscale/tailscale#1152. NodeAttrMagicDNSPeerAAAA NodeCapability = "magicdns-aaaa" + // NodeAttrDNSSubdomainResolve, when set on Self or a Peer node, indicates + // that the subdomains of that node's MagicDNS name should resolve to the + // same IP addresses as the node itself. + // For example, if node "myserver.tailnet.ts.net" has this capability, + // then "anything.myserver.tailnet.ts.net" will resolve to myserver's IPs. + NodeAttrDNSSubdomainResolve NodeCapability = "dns-subdomain-resolve" + // NodeAttrTrafficSteering configures the node to use the traffic // steering subsystem for via routes. See tailscale/corp#29966. NodeAttrTrafficSteering NodeCapability = "traffic-steering" diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index 09b44e73e2faa..263c376aac674 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -94,6 +94,18 @@ func (f FQDN) Contains(other FQDN) bool { return strings.HasSuffix(other.WithTrailingDot(), cmp) } +// Parent returns the parent domain by stripping the first label. +// For "foo.bar.baz.", it returns "bar.baz." +// It returns an empty FQDN for root or single-label domains. +func (f FQDN) Parent() FQDN { + s := f.WithTrailingDot() + _, rest, ok := strings.Cut(s, ".") + if !ok || rest == "" { + return "" + } + return FQDN(rest) +} + // ValidLabel reports whether label is a valid DNS label. All errors are // [vizerror.Error]. func ValidLabel(label string) error { diff --git a/util/dnsname/dnsname_test.go b/util/dnsname/dnsname_test.go index 35e04de2ebb35..e349e51c7ad99 100644 --- a/util/dnsname/dnsname_test.go +++ b/util/dnsname/dnsname_test.go @@ -123,6 +123,34 @@ func TestFQDNContains(t *testing.T) { } } +func TestFQDNParent(t *testing.T) { + tests := []struct { + in string + want FQDN + }{ + {"", ""}, + {".", ""}, + {"com.", ""}, + {"foo.com.", "com."}, + {"www.foo.com.", "foo.com."}, + {"a.b.c.d.", "b.c.d."}, + {"sub.node.tailnet.ts.net.", "node.tailnet.ts.net."}, + } + + for _, test := range tests { + t.Run(test.in, func(t *testing.T) { + in, err := ToFQDN(test.in) + if err != nil { + t.Fatalf("ToFQDN(%q): %v", test.in, err) + } + got := in.Parent() + if got != test.want { + t.Errorf("ToFQDN(%q).Parent() = %q, want %q", test.in, got, test.want) + } + }) + } +} + func TestSanitizeLabel(t *testing.T) { tests := []struct { name string From 698e92a761d7a0d95f6b9929c2654f565b219793 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Fri, 30 Jan 2026 14:18:45 -0500 Subject: [PATCH 0921/1093] logtail/filch: close Filch instances in TestConcurrentSameFile (#18571) On Windows, TempDir cleanup fails if file handles are still open. TestConcurrentSameFile wasn't closing Filch instances before exit Fixes #18570 Signed-off-by: Fernando Serboncini --- logtail/filch/filch_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index 3c7ba03ca3358..2538233cfd84c 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -388,7 +388,9 @@ func testMaxFileSize(t *testing.T, replaceStderr bool) { func TestConcurrentSameFile(t *testing.T) { filePrefix := filepath.Join(t.TempDir(), "testlog") f1 := must.Get(New(filePrefix, Options{MaxFileSize: 1000})) + defer f1.Close() f2 := must.Get(New(filePrefix, Options{MaxFileSize: 1000})) + defer f2.Close() var group sync.WaitGroup for _, f := range []*Filch{f1, f2} { group.Go(func() { From 3ce13eb2b9c0a654da964e29ff8d2d145f3d396b Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 30 Jan 2026 12:35:00 -0800 Subject: [PATCH 0922/1093] cmd/testwrapper: add support for the -vet test flag So callers can run testwrapper with -vet=off if they're already running vet explicitly in a concurrent test job. Updates tailscale/corp#28679 Change-Id: I74ad56e560076d187f5e3a7d7381e1dac89d860c Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/args.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/testwrapper/args.go b/cmd/testwrapper/args.go index 11ed1aeaad0bd..350197d4f1271 100644 --- a/cmd/testwrapper/args.go +++ b/cmd/testwrapper/args.go @@ -89,6 +89,7 @@ func newTestFlagSet() *flag.FlagSet { // TODO(maisem): figure out what other flags we need to register explicitly. fs.String("exec", "", "Command to run tests with") fs.Bool("race", false, "build with race detector") + fs.String("vet", "", "vet checks to run, or 'off' or 'all'") return fs } From 3b6d542923cc1e53fa304a5b366c94789662e260 Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 29 Jan 2026 15:41:55 -0800 Subject: [PATCH 0923/1093] wgengine/magicsock: make debugNeverDirectUDP influence remote peer decisions By dropping inbound disco.Ping messages received over direct UDP paths. Fixes #18560 Signed-off-by: Jordan Whited --- wgengine/magicsock/debugknobs.go | 3 ++- wgengine/magicsock/magicsock.go | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/debugknobs.go b/wgengine/magicsock/debugknobs.go index 39cec25e64885..580d954c0bc40 100644 --- a/wgengine/magicsock/debugknobs.go +++ b/wgengine/magicsock/debugknobs.go @@ -62,7 +62,8 @@ var ( // //lint:ignore U1000 used on Linux/Darwin only debugPMTUD = envknob.RegisterBool("TS_DEBUG_PMTUD") - // debugNeverDirectUDP disables the use of direct UDP connections, forcing + // debugNeverDirectUDP disables the use of direct UDP connections by + // suppressing/dropping inbound/outbound [disco.Ping] messages, forcing // all peer communication over DERP or peer relay. debugNeverDirectUDP = envknob.RegisterBool("TS_DEBUG_NEVER_DIRECT_UDP") // Hey you! Adding a new debugknob? Make sure to stub it out in the diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 7c5442d0b996c..d6f411f4ac2dc 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -2555,6 +2555,10 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // This is a naked [disco.Ping] without a VNI. + if debugNeverDirectUDP() && !isDerp { + return + } + // If we can figure out with certainty which node key this disco // message is for, eagerly update our [epAddr]<>node and disco<>node // mappings to make p2p path discovery faster in simple From 03461ea7fb9c2c318a355498811481ad9c74b119 Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 30 Jan 2026 16:46:03 -0500 Subject: [PATCH 0924/1093] wgengine/netstack: add local tailscale service IPs to route and terminate locally (#18461) * wgengine/netstack: add local tailscale service IPs to route and terminate locally This commit adds the tailscales service IPs served locally to OS routes, and make interception to packets so that the traffic terminates locally without making affects to the HA traffics. Fixes tailscale/corp#34048 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * fix test Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * add ready field to avoid accessing lb before netstack starts Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * wgengine/netstack: store values from lb to avoid acquiring a lock Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * add active services to netstack on starts with stored prefs. Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * fix comments Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> * update comments Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --------- Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 48 +++++++++++++++++-- ipn/ipnlocal/local_test.go | 25 +++++++++- tsd/tsd.go | 3 ++ wgengine/netstack/netstack.go | 77 +++++++++++++++++++++++++++++- wgengine/netstack/netstack_test.go | 62 ++++++++++++++++++++++-- 5 files changed, 205 insertions(+), 10 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 0fc26cd041cb6..300f7a4c3186d 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -922,6 +922,22 @@ func (b *LocalBackend) setStateLocked(state ipn.State) { } } +func (b *LocalBackend) IPServiceMappings() netmap.IPServiceMappings { + b.mu.Lock() + defer b.mu.Unlock() + return b.ipVIPServiceMap +} + +func (b *LocalBackend) SetIPServiceMappingsForTest(m netmap.IPServiceMappings) { + b.mu.Lock() + defer b.mu.Unlock() + testenv.AssertInTest() + b.ipVIPServiceMap = m + if ns, ok := b.sys.Netstack.GetOK(); ok { + ns.UpdateIPServiceMappings(m) + } +} + // setConfigLocked uses the provided config to update the backend's prefs // and other state. func (b *LocalBackend) setConfigLocked(conf *conffile.Config) error { @@ -4502,6 +4518,12 @@ func (b *LocalBackend) onEditPrefsLocked(_ ipnauth.Actor, mp *ipn.MaskedPrefs, o } } + if mp.AdvertiseServicesSet { + if ns, ok := b.sys.Netstack.GetOK(); ok { + ns.UpdateActiveVIPServices(newPrefs.AdvertiseServices()) + } + } + // This is recorded here in the EditPrefs path, not the setPrefs path on purpose. // recordForEdit records metrics related to edits and changes, not the final state. // If, in the future, we want to record gauge-metrics related to the state of prefs, @@ -5125,7 +5147,7 @@ func (b *LocalBackend) authReconfigLocked() { } oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.NetMon.Get(), b.sys.ControlKnobs(), version.OS()) - rcfg := b.routerConfigLocked(cfg, prefs, oneCGNATRoute) + rcfg := b.routerConfigLocked(cfg, prefs, nm, oneCGNATRoute) err = b.e.Reconfig(cfg, rcfg, dcfg) if err == wgengine.ErrNoChanges { @@ -5500,7 +5522,7 @@ func peerRoutes(logf logger.Logf, peers []wgcfg.Peer, cgnatThreshold int, routeA // routerConfig produces a router.Config from a wireguard config and IPN prefs. // // b.mu must be held. -func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView, oneCGNATRoute bool) *router.Config { +func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView, nm *netmap.NetworkMap, oneCGNATRoute bool) *router.Config { singleRouteThreshold := 10_000 if oneCGNATRoute { singleRouteThreshold = 1 @@ -5585,11 +5607,23 @@ func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView } } + // Get the VIPs for VIP services this node hosts. We will add all locally served VIPs to routes then + // we terminate these connection locally in netstack instead of routing to peer. + vipServiceIPs := nm.GetIPVIPServiceMap() + v4, v6 := false, false + if slices.ContainsFunc(rs.LocalAddrs, tsaddr.PrefixIs4) { rs.Routes = append(rs.Routes, netip.PrefixFrom(tsaddr.TailscaleServiceIP(), 32)) + v4 = true } if slices.ContainsFunc(rs.LocalAddrs, tsaddr.PrefixIs6) { rs.Routes = append(rs.Routes, netip.PrefixFrom(tsaddr.TailscaleServiceIPv6(), 128)) + v6 = true + } + for vip := range vipServiceIPs { + if (vip.Is4() && v4) || (vip.Is6() && v6) { + rs.Routes = append(rs.Routes, netip.PrefixFrom(vip, vip.BitLen())) + } } return rs @@ -6267,7 +6301,15 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) if buildfeatures.HasServe { - b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + m := nm.GetIPVIPServiceMap() + b.ipVIPServiceMap = m + if ns, ok := b.sys.Netstack.GetOK(); ok { + ns.UpdateIPServiceMappings(m) + // In case the prefs reloaded from Profile Manager but didn't change, + // we still need to load the active VIP services into netstack. + ns.UpdateActiveVIPServices(b.pm.CurrentPrefs().AdvertiseServices()) + } + } if !oldSelf.Equal(nm.SelfNodeOrZero()) { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 53607cfaaa737..cd44acdd1fecf 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -7430,8 +7430,31 @@ func TestRouteAllDisabled(t *testing.T) { cfg := &wgcfg.Config{ Peers: tt.peers, } + ServiceIPMappings := tailcfg.ServiceIPMappings{ + "svc:test-service": []netip.Addr{ + netip.MustParseAddr("100.64.1.2"), + netip.MustParseAddr("fd7a:abcd:1234::1"), + }, + } + svcIPMapJSON, err := json.Marshal(ServiceIPMappings) + if err != nil { + t.Fatalf("failed to marshal ServiceIPMappings: %v", err) + } + nm := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "test-node", + Addresses: []netip.Prefix{ + pp("100.64.1.1/32"), + }, + CapMap: tailcfg.NodeCapMap{ + tailcfg.NodeAttrServiceHost: []tailcfg.RawMessage{ + tailcfg.RawMessage(svcIPMapJSON), + }, + }, + }).View(), + } - rcfg := lb.routerConfigLocked(cfg, prefs.View(), false) + rcfg := lb.routerConfigLocked(cfg, prefs.View(), nm, false) for _, p := range rcfg.Routes { found := false for _, r := range tt.wantEndpoints { diff --git a/tsd/tsd.go b/tsd/tsd.go index 4284a8cd3bade..9d79334d68e2b 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -32,6 +32,7 @@ import ( "tailscale.com/net/tstun" "tailscale.com/proxymap" "tailscale.com/types/netmap" + "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/usermetric" @@ -111,6 +112,8 @@ type LocalBackend = any type NetstackImpl interface { Start(LocalBackend) error UpdateNetstackIPs(*netmap.NetworkMap) + UpdateIPServiceMappings(netmap.IPServiceMappings) + UpdateActiveVIPServices(views.Slice[string]) } // Set is a convenience method to set a subsystem value. diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 59fc0e0694bcc..42ac0ab1e4dba 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -51,6 +51,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/nettype" + "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/set" "tailscale.com/version" @@ -200,6 +201,10 @@ type Impl struct { lb *ipnlocal.LocalBackend // or nil dns *dns.Manager + // Before Start is called, there can IPv6 Neighbor Discovery from the + // OS landing on netstack. We need to drop those packets until Start. + ready atomic.Bool // set to true once Start has been called + // loopbackPort, if non-nil, will enable Impl to loop back (dnat to // :loopbackPort) TCP & UDP flows originally // destined to serviceIP{v6}:loopbackPort. @@ -216,6 +221,10 @@ type Impl struct { atomicIsVIPServiceIPFunc syncs.AtomicValue[func(netip.Addr) bool] + atomicIPVIPServiceMap syncs.AtomicValue[netmap.IPServiceMappings] + // make this a set of strings for faster lookup + atomicActiveVIPServices syncs.AtomicValue[set.Set[tailcfg.ServiceName]] + // forwardDialFunc, if non-nil, is the net.Dialer.DialContext-style // function that is used to make outgoing connections when forwarding a // TCP connection to another host (e.g. in subnet router mode). @@ -608,6 +617,9 @@ func (ns *Impl) Start(b LocalBackend) error { ns.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, ns.wrapTCPProtocolHandler(tcpFwd.HandlePacket)) ns.ipstack.SetTransportProtocolHandler(udp.ProtocolNumber, ns.wrapUDPProtocolHandler(udpFwd.HandlePacket)) go ns.inject() + if ns.ready.Swap(true) { + panic("already started") + } return nil } @@ -765,6 +777,25 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { } } +// UpdateIPServiceMappings updates the IPServiceMappings when there is a change +// in this value in localbackend. This is usually triggered from a netmap update. +func (ns *Impl) UpdateIPServiceMappings(mappings netmap.IPServiceMappings) { + ns.mu.Lock() + defer ns.mu.Unlock() + ns.atomicIPVIPServiceMap.Store(mappings) +} + +// UpdateActiveVIPServices updates the set of active VIP services names. +func (ns *Impl) UpdateActiveVIPServices(activeServices views.Slice[string]) { + ns.mu.Lock() + defer ns.mu.Unlock() + activeServicesSet := make(set.Set[tailcfg.ServiceName], activeServices.Len()) + for _, s := range activeServices.All() { + activeServicesSet.Add(tailcfg.AsServiceName(s)) + } + ns.atomicActiveVIPServices.Store(activeServicesSet) +} + func (ns *Impl) isLoopbackPort(port uint16) bool { if ns.loopbackPort != nil && int(port) == *ns.loopbackPort { return true @@ -775,13 +806,15 @@ func (ns *Impl) isLoopbackPort(port uint16) bool { // handleLocalPackets is hooked into the tun datapath for packets leaving // the host and arriving at tailscaled. This method returns filter.DropSilently // to intercept a packet for handling, for instance traffic to quad-100. +// Caution: can be called before Start func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { - if ns.ctx.Err() != nil { + if !ns.ready.Load() || ns.ctx.Err() != nil { return filter.DropSilently, gro } // Determine if we care about this local packet. dst := p.Dst.Addr() + serviceName, isVIPServiceIP := ns.atomicIPVIPServiceMap.Load()[dst] switch { case dst == serviceIP || dst == serviceIPv6: // We want to intercept some traffic to the "service IP" (e.g. @@ -798,6 +831,25 @@ func (ns *Impl) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper, gro *gro. return filter.Accept, gro } } + case isVIPServiceIP: + // returns all active VIP services in a set, since the IPVIPServiceMap + // contains inactive service IPs when node hosts the service, we need to + // check the service is active or not before dropping the packet. + activeServices := ns.atomicActiveVIPServices.Load() + if !activeServices.Contains(serviceName) { + // Other host might have the service active, so we let the packet go through. + return filter.Accept, gro + } + if p.IPProto != ipproto.TCP { + // We currenly only support VIP services over TCP. If service is in Tun mode, + // it's up to the service host to set up local packet handling which shouldn't + // arrive here. + return filter.DropSilently, gro + } + if debugNetstack() { + ns.logf("netstack: intercepting local VIP service packet: proto=%v dst=%v src=%v", + p.IPProto, p.Dst, p.Src) + } case viaRange.Contains(dst): // We need to handle 4via6 packets leaving the host if the via // route is for this host; otherwise the packet will be dropped @@ -1009,12 +1061,32 @@ func (ns *Impl) shouldSendToHost(pkt *stack.PacketBuffer) bool { return true } + if ns.isVIPServiceIP(srcIP) { + dstIP := netip.AddrFrom4(v.DestinationAddress().As4()) + if ns.isLocalIP(dstIP) { + if debugNetstack() { + ns.logf("netstack: sending VIP service packet to host: src=%v dst=%v", srcIP, dstIP) + } + return true + } + } + case header.IPv6: srcIP := netip.AddrFrom16(v.SourceAddress().As16()) if srcIP == serviceIPv6 { return true } + if ns.isVIPServiceIP(srcIP) { + dstIP := netip.AddrFrom16(v.DestinationAddress().As16()) + if ns.isLocalIP(dstIP) { + if debugNetstack() { + ns.logf("netstack: sending VIP service packet to host: src=%v dst=%v", srcIP, dstIP) + } + return true + } + } + if viaRange.Contains(srcIP) { // Only send to the host if this 4via6 route is // something this node handles. @@ -1233,8 +1305,9 @@ func (ns *Impl) userPing(dstIP netip.Addr, pingResPkt []byte, direction userPing // continue normally (typically being delivered to the host networking stack), // whereas returning filter.DropSilently is done when netstack intercepts the // packet and no further processing towards to host should be done. +// Caution: can be called before Start func (ns *Impl) injectInbound(p *packet.Parsed, t *tstun.Wrapper, gro *gro.GRO) (filter.Response, *gro.GRO) { - if ns.ctx.Err() != nil { + if !ns.ready.Load() || ns.ctx.Err() != nil { return filter.DropSilently, gro } diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index f9903c0c210d5..eea598937e4cf 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -31,6 +31,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/ipproto" "tailscale.com/types/logid" + "tailscale.com/types/netmap" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" ) @@ -125,6 +126,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { tb.Fatal(err) } tb.Cleanup(func() { ns.Close() }) + sys.Set(ns) lb, err := ipnlocal.NewLocalBackend(logf, logid.PublicID{}, sys, 0) if err != nil { @@ -741,13 +743,20 @@ func TestHandleLocalPackets(t *testing.T) { // fd7a:115c:a1e0:b1a:0:7:a01:100/120 netip.MustParsePrefix("fd7a:115c:a1e0:b1a:0:7:a01:100/120"), } + prefs.AdvertiseServices = []string{"svc:test-service"} _, err := impl.lb.EditPrefs(&ipn.MaskedPrefs{ - Prefs: *prefs, - AdvertiseRoutesSet: true, + Prefs: *prefs, + AdvertiseRoutesSet: true, + AdvertiseServicesSet: true, }) if err != nil { t.Fatalf("EditPrefs: %v", err) } + IPServiceMap := netmap.IPServiceMappings{ + netip.MustParseAddr("100.99.55.111"): "svc:test-service", + netip.MustParseAddr("fd7a:115c:a1e0::abcd"): "svc:test-service", + } + impl.lb.SetIPServiceMappingsForTest(IPServiceMap) t.Run("ShouldHandleServiceIP", func(t *testing.T) { pkt := &packet.Parsed{ @@ -784,6 +793,19 @@ func TestHandleLocalPackets(t *testing.T) { t.Errorf("got filter outcome %v, want filter.DropSilently", resp) } }) + t.Run("ShouldHandleLocalTailscaleServices", func(t *testing.T) { + pkt := &packet.Parsed{ + IPVersion: 4, + IPProto: ipproto.TCP, + Src: netip.MustParseAddrPort("127.0.0.1:9999"), + Dst: netip.MustParseAddrPort("100.99.55.111:80"), + TCPFlags: packet.TCPSyn, + } + resp, _ := impl.handleLocalPackets(pkt, impl.tundev, nil) + if resp != filter.DropSilently { + t.Errorf("got filter outcome %v, want filter.DropSilently", resp) + } + }) t.Run("OtherNonHandled", func(t *testing.T) { pkt := &packet.Parsed{ IPVersion: 6, @@ -809,8 +831,10 @@ func TestHandleLocalPackets(t *testing.T) { func TestShouldSendToHost(t *testing.T) { var ( - selfIP4 = netip.MustParseAddr("100.64.1.2") - selfIP6 = netip.MustParseAddr("fd7a:115c:a1e0::123") + selfIP4 = netip.MustParseAddr("100.64.1.2") + selfIP6 = netip.MustParseAddr("fd7a:115c:a1e0::123") + tailscaleServiceIP4 = netip.MustParseAddr("100.99.55.111") + tailscaleServiceIP6 = netip.MustParseAddr("fd7a:115c:a1e0::abcd") ) makeTestNetstack := func(tb testing.TB) *Impl { @@ -820,6 +844,9 @@ func TestShouldSendToHost(t *testing.T) { impl.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool { return addr == selfIP4 || addr == selfIP6 }) + impl.atomicIsVIPServiceIPFunc.Store(func(addr netip.Addr) bool { + return addr == tailscaleServiceIP4 || addr == tailscaleServiceIP6 + }) }) prefs := ipn.NewPrefs() @@ -919,6 +946,33 @@ func TestShouldSendToHost(t *testing.T) { dst: netip.MustParseAddrPort("[fd7a:115:a1e0::99]:7777"), want: false, }, + // After accessing the Tailscale service from host, replies from Tailscale Service IPs + // to the local Tailscale IPs should be sent to the host. + { + name: "from_service_ip_to_local_ip", + src: netip.AddrPortFrom(tailscaleServiceIP4, 80), + dst: netip.AddrPortFrom(selfIP4, 12345), + want: true, + }, + { + name: "from_service_ip_to_local_ip_v6", + src: netip.AddrPortFrom(tailscaleServiceIP6, 80), + dst: netip.AddrPortFrom(selfIP6, 12345), + want: true, + }, + // Traffic from remote IPs to Tailscale Service IPs should be sent over WireGuard. + { + name: "from_service_ip_to_remote", + src: netip.AddrPortFrom(tailscaleServiceIP4, 80), + dst: netip.MustParseAddrPort("173.201.32.56:54321"), + want: false, + }, + { + name: "from_service_ip_to_remote_v6", + src: netip.AddrPortFrom(tailscaleServiceIP6, 80), + dst: netip.MustParseAddrPort("[2001:4860:4860::8888]:54321"), + want: false, + }, } for _, tt := range testCases { From b4d39e2fd92538384aa7388fdbeda0ec51973bfc Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Fri, 30 Jan 2026 17:03:17 -0700 Subject: [PATCH 0925/1093] cmd/gitops-pusher: fix precedence when id token env var is empty Fix precedence logic to skip federated identity logic when the associated environment variables are empty. Updates https://github.com/tailscale/gitops-acl-action/issues/71 Signed-off-by: Mario Minardi --- cmd/gitops-pusher/gitops-pusher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/gitops-pusher/gitops-pusher.go b/cmd/gitops-pusher/gitops-pusher.go index 39a60d3064432..11448e30da1aa 100644 --- a/cmd/gitops-pusher/gitops-pusher.go +++ b/cmd/gitops-pusher/gitops-pusher.go @@ -252,7 +252,7 @@ func getCredentials() (*http.Client, string) { TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer), } client = oauthConfig.Client(context.Background()) - } else if idok { + } else if idok && idToken != "" && oiok && oauthId != "" { if exchangeJWTForToken, ok := tailscale.HookExchangeJWTForTokenViaWIF.GetOk(); ok { var err error apiKeyEnv, err = exchangeJWTForToken(context.Background(), fmt.Sprintf("https://%s", *apiServer), oauthId, idToken) From 8cac8b117b0b2da307369fe3dd0b5cd6b9ed3711 Mon Sep 17 00:00:00 2001 From: Brendan Creane Date: Fri, 30 Jan 2026 17:52:54 -0800 Subject: [PATCH 0926/1093] net/dns/resolver: set TC flag when UDP responses exceed size limits (#18157) The forwarder was not setting the Truncated (TC) flag when UDP DNS responses exceeded either the EDNS buffer size (if present) or the RFC 1035 default 512-byte limit. This affected DoH, TCP fallback, and UDP response paths. The fix ensures checkResponseSizeAndSetTC is called in all code paths that return UDP responses, enforcing both EDNS and default UDP size limits. Added comprehensive unit tests and consolidated duplicate test helpers. Updates #18107 Signed-off-by: Brendan Creane --- net/dns/resolver/forwarder.go | 137 +++++++-- net/dns/resolver/forwarder_test.go | 475 ++++++++++++++++++++++++++--- net/dns/resolver/tsdns.go | 7 +- net/dns/resolver/tsdns_test.go | 99 ++++++ 4 files changed, 654 insertions(+), 64 deletions(-) diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 0a3daa3bc46ca..189911ee24c0a 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -63,6 +63,17 @@ func truncatedFlagSet(pkt []byte) bool { return (binary.BigEndian.Uint16(pkt[2:4]) & dnsFlagTruncated) != 0 } +// setTCFlag sets the TC (truncated) flag in the DNS packet header. +// The packet must be at least headerBytes in length. +func setTCFlag(packet []byte) { + if len(packet) < headerBytes { + return + } + flags := binary.BigEndian.Uint16(packet[2:4]) + flags |= dnsFlagTruncated + binary.BigEndian.PutUint16(packet[2:4], flags) +} + const ( // dohIdleConnTimeout is how long to keep idle HTTP connections // open to DNS-over-HTTPS servers. 10 seconds is a sensible @@ -131,47 +142,59 @@ func getRCode(packet []byte) dns.RCode { return dns.RCode(packet[3] & 0x0F) } -// clampEDNSSize attempts to limit the maximum EDNS response size. This is not -// an exhaustive solution, instead only easy cases are currently handled in the -// interest of speed and reduced complexity. Only OPT records at the very end of -// the message with no option codes are addressed. -// TODO: handle more situations if we discover that they happen often -func clampEDNSSize(packet []byte, maxSize uint16) { - // optFixedBytes is the size of an OPT record with no option codes. - const optFixedBytes = 11 - const edns0Version = 0 +// findOPTRecord finds and validates the EDNS OPT record at the end of a DNS packet. +// Returns the requested buffer size and a pointer to the OPT record bytes if valid, +// or (0, nil) if no valid OPT record is found. +// The OPT record must be at the very end of the packet with no option codes. +func findOPTRecord(packet []byte) (requestedSize uint16, opt []byte) { + const optFixedBytes = 11 // size of an OPT record with no option codes + const edns0Version = 0 // EDNS version number (currently only version 0 is defined) if len(packet) < headerBytes+optFixedBytes { - return + return 0, nil } arCount := binary.BigEndian.Uint16(packet[10:12]) if arCount == 0 { // OPT shows up in an AR, so there must be no OPT - return + return 0, nil } // https://datatracker.ietf.org/doc/html/rfc6891#section-6.1.2 - opt := packet[len(packet)-optFixedBytes:] + opt = packet[len(packet)-optFixedBytes:] if opt[0] != 0 { // OPT NAME must be 0 (root domain) - return + return 0, nil } if dns.Type(binary.BigEndian.Uint16(opt[1:3])) != dns.TypeOPT { // Not an OPT record - return + return 0, nil } - requestedSize := binary.BigEndian.Uint16(opt[3:5]) + requestedSize = binary.BigEndian.Uint16(opt[3:5]) // Ignore extended RCODE in opt[5] if opt[6] != edns0Version { // Be conservative and don't touch unknown versions. - return + return 0, nil } // Ignore flags in opt[6:9] if binary.BigEndian.Uint16(opt[9:11]) != 0 { // RDLEN must be 0 (no variable length data). We're at the end of the - // packet so this should be 0 anyway).. + // packet so this should be 0 anyway. + return 0, nil + } + + return requestedSize, opt +} + +// clampEDNSSize attempts to limit the maximum EDNS response size. This is not +// an exhaustive solution, instead only easy cases are currently handled in the +// interest of speed and reduced complexity. Only OPT records at the very end of +// the message with no option codes are addressed. +// TODO: handle more situations if we discover that they happen often +func clampEDNSSize(packet []byte, maxSize uint16) { + requestedSize, opt := findOPTRecord(packet) + if opt == nil { return } @@ -183,6 +206,57 @@ func clampEDNSSize(packet []byte, maxSize uint16) { binary.BigEndian.PutUint16(opt[3:5], maxSize) } +// getEDNSBufferSize extracts the EDNS buffer size from a DNS request packet. +// Returns (bufferSize, true) if a valid EDNS OPT record is found, +// or (0, false) if no EDNS OPT record is found or if there's an error. +func getEDNSBufferSize(packet []byte) (uint16, bool) { + requestedSize, opt := findOPTRecord(packet) + return requestedSize, opt != nil +} + +// checkResponseSizeAndSetTC sets the TC (truncated) flag in the DNS header when +// the response exceeds the maximum UDP size. If no EDNS OPT record is present +// in the request, it sets the TC flag when the response is bigger than 512 bytes +// per RFC 1035. If an EDNS OPT record is present, it sets the TC flag when the +// response is bigger than the EDNS buffer size. The response buffer is not +// truncated; only the TC flag is set. Returns the response unchanged except for +// the TC flag being set if needed. +func checkResponseSizeAndSetTC(response []byte, request []byte, family string, logf logger.Logf) []byte { + const defaultUDPSize = 512 // default maximum UDP DNS packet size per RFC 1035 + + // Only check for UDP queries; TCP can handle larger responses + if family != "udp" { + return response + } + + // Check if TC flag is already set + if len(response) < headerBytes { + return response + } + if truncatedFlagSet(response) { + // TC flag already set, nothing to do + return response + } + + ednsSize, hasEDNS := getEDNSBufferSize(request) + + // Determine maximum allowed size + var maxSize int + if hasEDNS { + maxSize = int(ednsSize) + } else { + // No EDNS: enforce default UDP size limit per RFC 1035 + maxSize = defaultUDPSize + } + + // Check if response exceeds maximum size + if len(response) > maxSize { + setTCFlag(response) + } + + return response +} + // dnsForwarderFailing should be raised when the forwarder is unable to reach the // upstream resolvers. This is a high severity warning as it results in "no internet". // This warning must be cleared when the forwarder is working again. @@ -535,7 +609,13 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe if !buildfeatures.HasPeerAPIClient { return nil, feature.ErrUnavailable } - return f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) + res, err := f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) + if err != nil { + return nil, err + } + // Check response size and set TC flag if needed (only for UDP queries) + res = checkResponseSizeAndSetTC(res, fq.packet, fq.family, f.logf) + return res, nil } if strings.HasPrefix(rr.name.Addr, "https://") { // Only known DoH providers are supported currently. Specifically, we @@ -546,7 +626,13 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe // them. urlBase := rr.name.Addr if hc, ok := f.getKnownDoHClientForProvider(urlBase); ok { - return f.sendDoH(ctx, urlBase, hc, fq.packet) + res, err := f.sendDoH(ctx, urlBase, hc, fq.packet) + if err != nil { + return nil, err + } + // Check response size and set TC flag if needed (only for UDP queries) + res = checkResponseSizeAndSetTC(res, fq.packet, fq.family, f.logf) + return res, nil } metricDNSFwdErrorType.Add(1) return nil, fmt.Errorf("arbitrary https:// resolvers not supported yet") @@ -710,12 +796,15 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn f.logf("recv: packet too small (%d bytes)", n) } out = out[:n] + tcFlagAlreadySet := truncatedFlagSet(out) + txid := getTxID(out) if txid != fq.txid { metricDNSFwdUDPErrorTxID.Add(1) return nil, errTxIDMismatch } rcode := getRCode(out) + // don't forward transient errors back to the client when the server fails if rcode == dns.RCodeServerFailure { f.logf("recv: response code indicating server failure: %d", rcode) @@ -723,11 +812,9 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn return nil, errServerFailure } - if truncated { - // Set the truncated bit if it wasn't already. - flags := binary.BigEndian.Uint16(out[2:4]) - flags |= dnsFlagTruncated - binary.BigEndian.PutUint16(out[2:4], flags) + // Set the truncated bit if buffer was truncated during read and the flag isn't already set + if truncated && !tcFlagAlreadySet { + setTCFlag(out) // TODO(#2067): Remove any incomplete records? RFC 1035 section 6.2 // states that truncation should head drop so that the authority @@ -736,6 +823,8 @@ func (f *forwarder) sendUDP(ctx context.Context, fq *forwardQuery, rr resolverAn // best we can do. } + out = checkResponseSizeAndSetTC(out, fq.packet, fq.family, f.logf) + if truncatedFlagSet(out) { metricDNSFwdTruncated.Add(1) } diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 3165bb9783faa..6c7459b1f619c 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -34,6 +34,46 @@ func (rr resolverAndDelay) String() string { return fmt.Sprintf("%v+%v", rr.name, rr.startDelay) } +// setTCFlagInPacket sets the TC flag in a DNS packet (for testing). +func setTCFlagInPacket(packet []byte) { + if len(packet) < headerBytes { + return + } + flags := binary.BigEndian.Uint16(packet[2:4]) + flags |= dnsFlagTruncated + binary.BigEndian.PutUint16(packet[2:4], flags) +} + +// clearTCFlagInPacket clears the TC flag in a DNS packet (for testing). +func clearTCFlagInPacket(packet []byte) { + if len(packet) < headerBytes { + return + } + flags := binary.BigEndian.Uint16(packet[2:4]) + flags &^= dnsFlagTruncated + binary.BigEndian.PutUint16(packet[2:4], flags) +} + +// verifyEDNSBufferSize verifies a request has the expected EDNS buffer size. +func verifyEDNSBufferSize(t *testing.T, request []byte, expectedSize uint16) { + t.Helper() + ednsSize, hasEDNS := getEDNSBufferSize(request) + if !hasEDNS { + t.Fatalf("request should have EDNS OPT record") + } + if ednsSize != expectedSize { + t.Fatalf("request EDNS size = %d, want %d", ednsSize, expectedSize) + } +} + +// setupForwarderWithTCPRetriesDisabled returns a forwarder modifier that disables TCP retries. +func setupForwarderWithTCPRetriesDisabled() func(*forwarder) { + return func(fwd *forwarder) { + fwd.controlKnobs = &controlknobs.Knobs{} + fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) + } +} + func TestResolversWithDelays(t *testing.T) { // query q := func(ss ...string) (ipps []*dnstype.Resolver) { @@ -428,22 +468,16 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) } // Our request is a single A query for the domain in the answer, above. - builder = dns.NewBuilder(nil, dns.Header{}) - builder.StartQuestions() - builder.Question(dns.Question{ - Name: dns.MustNewName(domain), - Type: dns.TypeA, - Class: dns.ClassINET, - }) - request, err = builder.Finish() - if err != nil { - tb.Fatal(err) - } + request = makeTestRequest(tb, domain, dns.TypeA, 0) return } func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { + return runTestQueryWithFamily(tb, request, "udp", modify, ports...) +} + +func runTestQueryWithFamily(tb testing.TB, request []byte, family string, modify func(*forwarder), ports ...uint16) ([]byte, error) { logf := tstest.WhileTestRunningLogger(tb) bus := eventbustest.NewBus(tb) netMon, err := netmon.New(bus, logf) @@ -467,7 +501,7 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports rpkt := packet{ bs: request, - family: "tcp", + family: family, addr: netip.MustParseAddrPort("127.0.0.1:12345"), } @@ -483,17 +517,29 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports } } -// makeTestRequest returns a new TypeA request for the given domain. -func makeTestRequest(tb testing.TB, domain string) []byte { +// makeTestRequest returns a new DNS request for the given domain. +// If queryType is 0, it defaults to TypeA. If ednsSize > 0, it adds an EDNS OPT record. +func makeTestRequest(tb testing.TB, domain string, queryType dns.Type, ednsSize uint16) []byte { tb.Helper() + if queryType == 0 { + queryType = dns.TypeA + } name := dns.MustNewName(domain) builder := dns.NewBuilder(nil, dns.Header{}) builder.StartQuestions() builder.Question(dns.Question{ Name: name, - Type: dns.TypeA, + Type: queryType, Class: dns.ClassINET, }) + if ednsSize > 0 { + builder.StartAdditionals() + builder.OPTResource(dns.ResourceHeader{ + Name: dns.MustNewName("."), + Type: dns.TypeOPT, + Class: dns.Class(ednsSize), + }, dns.OPTResource{}) + } request, err := builder.Finish() if err != nil { tb.Fatal(err) @@ -549,6 +595,371 @@ func beVerbose(f *forwarder) { f.verboseFwd = true } +// makeTestRequestWithEDNS returns a new TypeTXT request for the given domain with EDNS buffer size. +// Deprecated: Use makeTestRequest with queryType and ednsSize parameters instead. +func makeTestRequestWithEDNS(tb testing.TB, domain string, ednsSize uint16) []byte { + return makeTestRequest(tb, domain, dns.TypeTXT, ednsSize) +} + +// makeEDNSResponse creates a DNS response of approximately the specified size +// with TXT records and an OPT record. The response will NOT have the TC flag set +// (simulating a non-compliant server that doesn't set TC when response exceeds EDNS buffer). +// The actual size may vary significantly due to DNS packet structure constraints. +func makeEDNSResponse(tb testing.TB, domain string, targetSize int) []byte { + tb.Helper() + // Use makeResponseOfSize with includeOPT=true + // Allow significant variance since DNS packet sizes are hard to predict exactly + // Use a combination of fixed tolerance (200 bytes) and percentage (25%) for larger targets + response := makeResponseOfSize(tb, domain, targetSize, true) + actualSize := len(response) + maxVariance := 200 + if targetSize > 400 { + // For larger targets, allow 25% variance + maxVariance = targetSize * 25 / 100 + } + if actualSize < targetSize-maxVariance || actualSize > targetSize+maxVariance { + tb.Fatalf("response size = %d, want approximately %d (variance: %d, allowed: ±%d)", + actualSize, targetSize, actualSize-targetSize, maxVariance) + } + return response +} + +func TestEDNSBufferSizeTruncation(t *testing.T) { + const domain = "edns-test.example.com." + const ednsBufferSize = 500 // Small EDNS buffer + const responseSize = 800 // Response exceeds EDNS but < maxResponseBytes + + // Create a response that exceeds EDNS buffer size but doesn't have TC flag set + response := makeEDNSResponse(t, domain, responseSize) + + // Create a request with EDNS buffer size + request := makeTestRequest(t, domain, dns.TypeTXT, ednsBufferSize) + verifyEDNSBufferSize(t, request, ednsBufferSize) + + // Verify response doesn't have TC flag set initially + if truncatedFlagSet(response) { + t.Fatal("test response should not have TC flag set initially") + } + + // Set up test DNS server + port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { + verifyEDNSBufferSize(t, gotRequest, ednsBufferSize) + }) + + // Disable TCP retries to ensure we test UDP path + resp := mustRunTestQuery(t, request, setupForwarderWithTCPRetriesDisabled(), port) + + // Verify the response has TC flag set by forwarder + if !truncatedFlagSet(resp) { + t.Errorf("TC flag not set in response (response size=%d, EDNS=%d)", len(resp), ednsBufferSize) + } + + // Verify response size is preserved (not truncated by buffer) + if len(resp) != len(response) { + t.Errorf("response size = %d, want %d (response should not be truncated by buffer)", len(resp), len(response)) + } + + // Verify response size exceeds EDNS buffer + if len(resp) <= int(ednsBufferSize) { + t.Errorf("response size = %d, should exceed EDNS buffer size %d", len(resp), ednsBufferSize) + } +} + +// makeResponseOfSize creates a DNS response of approximately the specified size +// with TXT records. The response will NOT have the TC flag set initially. +// If includeOPT is true, an OPT record is added to the response. +func makeResponseOfSize(tb testing.TB, domain string, targetSize int, includeOPT bool) []byte { + tb.Helper() + name := dns.MustNewName(domain) + + // Estimate how many TXT records we need + // Each TXT record with ~200 bytes of data adds roughly 220-230 bytes to the packet + // (including DNS headers, name compression, etc.) + bytesPerRecord := 220 + baseSize := 50 // Approximate base packet size (header + question) + if includeOPT { + baseSize += 11 // OPT record adds ~11 bytes + } + estimatedRecords := (targetSize - baseSize) / bytesPerRecord + if estimatedRecords < 1 { + estimatedRecords = 1 + } + + // Start with estimated records and adjust + txtLen := 200 + var response []byte + var err error + + for attempt := 0; attempt < 10; attempt++ { + testBuilder := dns.NewBuilder(nil, dns.Header{ + Response: true, + Authoritative: true, + RCode: dns.RCodeSuccess, + }) + testBuilder.StartQuestions() + testBuilder.Question(dns.Question{ + Name: name, + Type: dns.TypeTXT, + Class: dns.ClassINET, + }) + testBuilder.StartAnswers() + + for i := 0; i < estimatedRecords; i++ { + txtValue := strings.Repeat("x", txtLen) + testBuilder.TXTResource(dns.ResourceHeader{ + Name: name, + Type: dns.TypeTXT, + Class: dns.ClassINET, + TTL: 300, + }, dns.TXTResource{ + TXT: []string{txtValue}, + }) + } + + // Optionally add OPT record + if includeOPT { + testBuilder.StartAdditionals() + testBuilder.OPTResource(dns.ResourceHeader{ + Name: dns.MustNewName("."), + Type: dns.TypeOPT, + Class: dns.Class(4096), + }, dns.OPTResource{}) + } + + response, err = testBuilder.Finish() + if err != nil { + tb.Fatal(err) + } + + actualSize := len(response) + // Stop if we've reached or slightly exceeded the target + // Allow up to 20% overshoot to avoid excessive iterations + if actualSize >= targetSize && actualSize <= targetSize*120/100 { + break + } + // If we've overshot significantly, we're done (better than undershooting) + if actualSize > targetSize*120/100 { + break + } + + // Adjust for next attempt + needed := targetSize - actualSize + additionalRecords := (needed / bytesPerRecord) + 1 + estimatedRecords += additionalRecords + if estimatedRecords > 200 { + // If we need too many records, increase TXT length instead + txtLen = 255 // Max single TXT string length + bytesPerRecord = 280 // Adjusted estimate + estimatedRecords = (targetSize - baseSize) / bytesPerRecord + if estimatedRecords < 1 { + estimatedRecords = 1 + } + } + } + + // Ensure TC flag is NOT set initially + clearTCFlagInPacket(response) + + return response +} + +func TestCheckResponseSizeAndSetTC(t *testing.T) { + const domain = "test.example.com." + logf := func(format string, args ...any) { + // Silent logger for tests + } + + tests := []struct { + name string + responseSize int + requestHasEDNS bool + ednsSize uint16 + family string + responseTCSet bool // Whether response has TC flag set initially + wantTCSet bool // Whether TC flag should be set after function call + skipIfNotExact bool // Skip test if we can't hit exact size (for edge cases) + }{ + // Default UDP size (512 bytes) without EDNS + { + name: "UDP_noEDNS_small_should_not_set_TC", + responseSize: 400, + requestHasEDNS: false, + family: "udp", + wantTCSet: false, + }, + { + name: "UDP_noEDNS_512bytes_should_not_set_TC", + responseSize: 512, + requestHasEDNS: false, + family: "udp", + wantTCSet: false, + skipIfNotExact: true, + }, + { + name: "UDP_noEDNS_513bytes_should_set_TC", + responseSize: 513, + requestHasEDNS: false, + family: "udp", + wantTCSet: true, + skipIfNotExact: true, + }, + { + name: "UDP_noEDNS_large_should_set_TC", + responseSize: 600, + requestHasEDNS: false, + family: "udp", + wantTCSet: true, + }, + + // EDNS edge cases + { + name: "UDP_EDNS_small_under_limit_should_not_set_TC", + responseSize: 450, + requestHasEDNS: true, + ednsSize: 500, + family: "udp", + wantTCSet: false, + }, + { + name: "UDP_EDNS_at_limit_should_not_set_TC", + responseSize: 500, + requestHasEDNS: true, + ednsSize: 500, + family: "udp", + wantTCSet: false, + }, + { + name: "UDP_EDNS_over_limit_should_set_TC", + responseSize: 550, + requestHasEDNS: true, + ednsSize: 500, + family: "udp", + wantTCSet: true, + }, + { + name: "UDP_EDNS_large_over_limit_should_set_TC", + responseSize: 1500, + requestHasEDNS: true, + ednsSize: 1200, + family: "udp", + wantTCSet: true, + }, + + // Early return paths + { + name: "TCP_query_should_skip", + responseSize: 1000, + family: "tcp", + wantTCSet: false, + }, + { + name: "response_too_small_should_skip", + responseSize: headerBytes - 1, + family: "udp", + wantTCSet: false, + }, + { + name: "response_exactly_headerBytes_should_not_set_TC", + responseSize: headerBytes, + family: "udp", + wantTCSet: false, + }, + { + name: "response_TC_already_set_should_skip", + responseSize: 600, + family: "udp", + responseTCSet: true, + wantTCSet: true, // Should remain set + }, + { + name: "UDP_noEDNS_large_TC_already_set_should_skip", + responseSize: 600, + requestHasEDNS: false, + family: "udp", + responseTCSet: true, + wantTCSet: true, // Should remain set + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var response []byte + + // Create response of specified size + if tt.responseSize < headerBytes { + // For too-small test, create minimal invalid packet + response = make([]byte, tt.responseSize) + // Don't set any flags, just make it too small + } else { + response = makeResponseOfSize(t, domain, tt.responseSize, false) + actualSize := len(response) + + // Only adjust expectations for UDP queries that go through size checking + // TCP queries and other early-return cases should keep their original expectations + if tt.family == "udp" && !tt.responseTCSet && actualSize >= headerBytes { + // Determine the maximum allowed size based on request + var maxSize int + if tt.requestHasEDNS { + maxSize = int(tt.ednsSize) + } else { + maxSize = 512 // default UDP size per RFC 1035 + } + + // For edge cases where exact size matters, verify we're close enough + if tt.skipIfNotExact { + // For 512/513 byte tests, we need to be very close + if actualSize < tt.responseSize-10 || actualSize > tt.responseSize+10 { + t.Skipf("skipping: could not create response close to target size %d (got %d)", tt.responseSize, actualSize) + } + // Function sets TC if response > maxSize, so adjust expectation based on actual size + tt.wantTCSet = actualSize > maxSize + } else { + // For non-exact tests, adjust expectation based on actual response size + // The function sets TC if actualSize > maxSize + tt.wantTCSet = actualSize > maxSize + } + } + } + + // Set TC flag initially if requested + if tt.responseTCSet { + setTCFlagInPacket(response) + } + + // Create request with or without EDNS + var ednsSize uint16 + if tt.requestHasEDNS { + ednsSize = tt.ednsSize + } + request := makeTestRequest(t, domain, dns.TypeTXT, ednsSize) + + // Call the function + result := checkResponseSizeAndSetTC(response, request, tt.family, logf) + + // Verify response size is preserved (function should not truncate, only set flag) + if len(result) != len(response) { + t.Errorf("response size changed: got %d, want %d", len(result), len(response)) + } + + // Verify TC flag state + if len(result) >= headerBytes { + hasTC := truncatedFlagSet(result) + if hasTC != tt.wantTCSet { + t.Errorf("TC flag: got %v, want %v (response size=%d)", hasTC, tt.wantTCSet, len(result)) + } + } else if tt.responseSize >= headerBytes { + // If we expected a valid response but got too small, that's unexpected + t.Errorf("response too small (%d bytes) but expected valid response", len(result)) + } + + // Verify response pointer is same (should be in-place modification) + if &result[0] != &response[0] { + t.Errorf("function should modify response in place, but got new slice") + } + }) + } +} + func TestForwarderTCPFallback(t *testing.T) { const domain = "large-dns-response.tailscale.com." @@ -569,7 +980,10 @@ func TestForwarderTCPFallback(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, beVerbose, port) + resp, err := runTestQueryWithFamily(t, request, "tcp", beVerbose, port) + if err != nil { + t.Fatalf("error making request: %v", err) + } if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -636,17 +1050,13 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { resp := mustRunTestQuery(t, request, func(fwd *forwarder) { fwd.verboseFwd = true - // Disable retries for this test. - fwd.controlKnobs = &controlknobs.Knobs{} - fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) + setupForwarderWithTCPRetriesDisabled()(fwd) }, port) wantResp := append([]byte(nil), largeResponse[:maxResponseBytes]...) // Set the truncated flag on the expected response, since that's what we expect. - flags := binary.BigEndian.Uint16(wantResp[2:4]) - flags |= dnsFlagTruncated - binary.BigEndian.PutUint16(wantResp[2:4], flags) + setTCFlagInPacket(wantResp) if !bytes.Equal(resp, wantResp) { t.Errorf("invalid response\ngot (%d): %+v\nwant (%d): %+v", len(resp), resp, len(wantResp), wantResp) @@ -664,7 +1074,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { response := makeTestResponse(t, domain, dns.RCodeServerFailure) // Our request is a single A query for the domain in the answer, above. - request := makeTestRequest(t, domain) + request := makeTestRequest(t, domain, dns.TypeA, 0) var sawRequest atomic.Bool port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { @@ -695,7 +1105,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { // returns a successful response, we propagate it. func TestForwarderWithManyResolvers(t *testing.T) { const domain = "example.com." - request := makeTestRequest(t, domain) + request := makeTestRequest(t, domain, dns.TypeA, 0) tests := []struct { name string @@ -837,20 +1247,7 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { }() // Our request is a single PTR query for the domain in the answer, above. - request := func() []byte { - builder := dns.NewBuilder(nil, dns.Header{}) - builder.StartQuestions() - builder.Question(dns.Question{ - Name: dns.MustNewName(domain), - Type: dns.TypePTR, - Class: dns.ClassINET, - }) - request, err := builder.Finish() - if err != nil { - t.Fatal(err) - } - return request - }() + request := makeTestRequest(t, domain, dns.TypePTR, 0) port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { }) @@ -868,7 +1265,7 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { func TestForwarderVerboseLogs(t *testing.T) { const domain = "test.tailscale.com." response := makeTestResponse(t, domain, dns.RCodeServerFailure) - request := makeTestRequest(t, domain) + request := makeTestRequest(t, domain, dns.TypeA, 0) port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { if !bytes.Equal(request, gotRequest) { diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index f71c1b7708b4c..5b44f6c2d586f 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -337,7 +337,12 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net return (<-responses).bs, nil } - return out, err + if err != nil { + return out, err + } + + out = checkResponseSizeAndSetTC(out, bs, family, r.logf) + return out, nil } // GetUpstreamResolvers returns the resolvers that would be used to resolve diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 712fa88dcad82..8ee22dd1384c0 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -1572,3 +1572,102 @@ func TestServfail(t *testing.T) { t.Errorf("response was %X, want %X", pkt, wantPkt) } } + +// TestLocalResponseTCFlagIntegration tests that checkResponseSizeAndSetTC is +// correctly applied to local DNS responses through the Resolver.Query integration path. +// This complements the unit test in forwarder_test.go by verifying the end-to-end behavior. +func TestLocalResponseTCFlagIntegration(t *testing.T) { + r := newResolver(t) + defer r.Close() + + r.SetConfig(dnsCfg) + + tests := []struct { + name string + query []byte + family string + wantTCSet bool + desc string + }{ + { + name: "UDP_small_local_response_no_TC", + query: dnspacket("test1.ipn.dev.", dns.TypeA, noEdns), + family: "udp", + wantTCSet: false, + desc: "Small local response (< 512 bytes) should not have TC flag set", + }, + { + name: "TCP_local_response_no_TC", + query: dnspacket("test1.ipn.dev.", dns.TypeA, noEdns), + family: "tcp", + wantTCSet: false, + desc: "TCP queries should skip TC flag setting (even for large responses)", + }, + { + name: "UDP_EDNS_request_small_response", + query: dnspacket("test1.ipn.dev.", dns.TypeA, 1500), + family: "udp", + wantTCSet: false, + desc: "Small response with EDNS request should not have TC flag set", + }, + { + name: "UDP_IPv6_response_no_TC", + query: dnspacket("test2.ipn.dev.", dns.TypeAAAA, noEdns), + family: "udp", + wantTCSet: false, + desc: "Small IPv6 local response should not have TC flag set", + }, + { + name: "UDP_reverse_lookup_no_TC", + query: dnspacket("4.3.2.1.in-addr.arpa.", dns.TypePTR, noEdns), + family: "udp", + wantTCSet: false, + desc: "Small reverse lookup response should not have TC flag set", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + response, err := r.Query(context.Background(), tt.query, tt.family, netip.AddrPort{}) + if err != nil { + t.Fatalf("Query failed: %v", err) + } + + if len(response) < headerBytes { + t.Fatalf("Response too small: %d bytes", len(response)) + } + + hasTC := truncatedFlagSet(response) + if hasTC != tt.wantTCSet { + t.Errorf("%s: TC flag = %v, want %v (response size=%d bytes)", tt.desc, hasTC, tt.wantTCSet, len(response)) + } + + // Verify response is valid by parsing it (if possible) + // Note: unpackResponse may not support all record types (e.g., PTR) + parsed, err := unpackResponse(response) + if err == nil { + // Verify the truncated field in parsed response matches the flag + if parsed.truncated != hasTC { + t.Errorf("Parsed truncated field (%v) doesn't match TC flag (%v)", parsed.truncated, hasTC) + } + } else { + // For unsupported types, just verify we can parse the header + var parser dns.Parser + h, err := parser.Start(response) + if err != nil { + t.Errorf("Failed to parse DNS header: %v", err) + } else { + // Verify header truncated flag matches + if h.Truncated != hasTC { + t.Errorf("Header truncated field (%v) doesn't match TC flag (%v)", h.Truncated, hasTC) + } + } + } + + // Verify response size is reasonable (local responses are typically small) + if len(response) > 1000 { + t.Logf("Warning: Local response is unusually large: %d bytes", len(response)) + } + }) + } +} From 274ab995d29a9c0a9160b0534a20b9f6db29726e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 1 Feb 2026 13:09:33 -0800 Subject: [PATCH 0927/1093] go.toolchain.*: bump our Go 1.25 and Go 1.26 toolchains Go1.25 for tailscale/go#149 Go1.26 for tailscale/go#149 + upstream release-branch.go1.26 work since rc2. Updates tailscale/go#149 Change-Id: Ib56b5b5119f181c4a81d4b599b8bbdb405ee6704 Signed-off-by: Brad Fitzpatrick --- go.toolchain.next.rev | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.toolchain.next.rev b/go.toolchain.next.rev index ee8816b6ff3dd..be0f53a9c5a18 100644 --- a/go.toolchain.next.rev +++ b/go.toolchain.next.rev @@ -1 +1 @@ -07d023ba9bb6d17a84b492f1524fabfa69a31bda +64a6cb4cba579e2865654747d4d672ead07b8375 diff --git a/go.toolchain.rev b/go.toolchain.rev index 930ed5ad251a9..cb3fa64623175 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -799b25336eeb52e2f8b4521fba5870c2ad2d9f43 +779d878b6a943cecd2f359699001a03d7cedf222 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index ae95ed0ff869d..3b8ce70f3ac31 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-27ymqBnopujAGo02TZ5IPX8bVkp+rLTuVSn/QzZufJc= +sha256-e081DbI45vGMmi3drwqz2UOxRwffEuEDSVZupDtOVuk= From abdbca47af098469fba238c408dd1f4b373d254c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Neal=20Gompa=20=28=E3=83=8B=E3=83=BC=E3=83=AB=E3=83=BB?= =?UTF-8?q?=E3=82=B4=E3=83=B3=E3=83=91=29?= Date: Mon, 2 Feb 2026 11:49:44 -0500 Subject: [PATCH 0928/1093] client/systray: Update systemd unit to use correct dependencies (#18457) This ensures that D-Bus is active for the unit and will correctly shut down when the default target ends. Fixes: https://github.com/tailscale/tailscale/issues/18458 Signed-off-by: Neal Gompa --- client/systray/tailscale-systray.service | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/client/systray/tailscale-systray.service b/client/systray/tailscale-systray.service index 01d0b383c0634..bc4b470043bf7 100644 --- a/client/systray/tailscale-systray.service +++ b/client/systray/tailscale-systray.service @@ -1,6 +1,9 @@ [Unit] Description=Tailscale System Tray -After=graphical.target +Documentation=https://tailscale.com/kb/1597/linux-systray +Requires=dbus.service +After=dbus.service +PartOf=default.target [Service] Type=simple From 8736fbb754e7f6ce1cc391b7013ce7e184504faa Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 30 Jan 2026 13:59:09 -0800 Subject: [PATCH 0929/1093] cmd/tailscale/cli: add 'wait' listening subcommand and ip --assert= This provides a mechanism to block, waiting for Tailscale's IP to be ready for a bind/listen, to gate the starting of other services. It also adds a new --assert=[IP] option to "tailscale ip", for services that want extra paranoia about what IP is in use, if they're worried about having switched to the wrong tailnet prior to reboot or something. Updates #3340 Updates #11504 ... and many more, IIRC Change-Id: I88ab19ac5fae58fd8c516065bab685e292395565 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 1 + cmd/tailscale/cli/ip.go | 16 +++- cmd/tailscale/cli/wait.go | 157 +++++++++++++++++++++++++++++++++++++ cmd/tailscale/depaware.txt | 1 + 4 files changed, 172 insertions(+), 3 deletions(-) create mode 100644 cmd/tailscale/cli/wait.go diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index b8ac768746d27..07c7656dfa875 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -278,6 +278,7 @@ change in the future. configureHostCmd(), systrayCmd, appcRoutesCmd, + waitCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/ip.go b/cmd/tailscale/cli/ip.go index 01373a073b169..7159904c732d6 100644 --- a/cmd/tailscale/cli/ip.go +++ b/cmd/tailscale/cli/ip.go @@ -25,14 +25,16 @@ var ipCmd = &ffcli.Command{ fs.BoolVar(&ipArgs.want1, "1", false, "only print one IP address") fs.BoolVar(&ipArgs.want4, "4", false, "only print IPv4 address") fs.BoolVar(&ipArgs.want6, "6", false, "only print IPv6 address") + fs.StringVar(&ipArgs.assert, "assert", "", "assert that one of the node's IP(s) matches this IP address") return fs })(), } var ipArgs struct { - want1 bool - want4 bool - want6 bool + want1 bool + want4 bool + want6 bool + assert string } func runIP(ctx context.Context, args []string) error { @@ -62,6 +64,14 @@ func runIP(ctx context.Context, args []string) error { return err } ips := st.TailscaleIPs + if ipArgs.assert != "" { + for _, ip := range ips { + if ip.String() == ipArgs.assert { + return nil + } + } + return fmt.Errorf("assertion failed: IP %q not found among %v", ipArgs.assert, ips) + } if of != "" { ip, _, err := tailscaleIPFromArg(ctx, of) if err != nil { diff --git a/cmd/tailscale/cli/wait.go b/cmd/tailscale/cli/wait.go new file mode 100644 index 0000000000000..ce9c6aba65b40 --- /dev/null +++ b/cmd/tailscale/cli/wait.go @@ -0,0 +1,157 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "flag" + "fmt" + "net" + "net/netip" + "strings" + "time" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/ipn" + "tailscale.com/types/logger" + "tailscale.com/util/backoff" +) + +var waitCmd = &ffcli.Command{ + Name: "wait", + ShortHelp: "Wait for Tailscale interface/IPs to be ready for binding", + LongHelp: strings.TrimSpace(` +Wait for Tailscale resources to be available. As of 2026-01-02, the only +resource that's available to wait for by is the Tailscale interface and its +IPs. + +With no arguments, this command will block until tailscaled is up, its backend is running, +and the Tailscale interface is up and has a Tailscale IP address assigned to it. + +If running in userspace-networking mode, this command only waits for tailscaled and +the Running state, as no physical network interface exists. + +A future version of this command may support waiting for other types of resources. + +The command returns exit code 0 on success, and non-zero on failure or timeout. + +To wait on a specific type of IP address, use 'tailscale ip' in combination with +the 'tailscale wait' command. For example, to wait for an IPv4 address: + + tailscale wait && tailscale ip --assert= + +Linux systemd users can wait for the "tailscale-online.target" target, which runs +this command. + +More generally, a service that wants to bind to (listen on) a Tailscale interface or IP address +can run it like 'tailscale wait && /path/to/service [...]' to ensure that Tailscale is ready +before the program starts. +`), + + ShortUsage: "tailscale wait", + Exec: runWait, + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("wait") + fs.DurationVar(&waitArgs.timeout, "timeout", 0, "how long to wait before giving up (0 means wait indefinitely)") + return fs + })(), +} + +var waitArgs struct { + timeout time.Duration +} + +func runWait(ctx context.Context, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unexpected arguments: %q", args) + } + if waitArgs.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, waitArgs.timeout) + defer cancel() + } + + bo := backoff.NewBackoff("wait", logger.Discard, 2*time.Second) + for { + _, err := localClient.StatusWithoutPeers(ctx) + bo.BackOff(ctx, err) + if err == nil { + break + } + if ctx.Err() != nil { + return ctx.Err() + } + } + + watcher, err := localClient.WatchIPNBus(ctx, ipn.NotifyInitialState) + if err != nil { + return err + } + defer watcher.Close() + var firstIP netip.Addr + for { + not, err := watcher.Next() + if err != nil { + return err + } + if not.State != nil && *not.State == ipn.Running { + + st, err := localClient.StatusWithoutPeers(ctx) + if err != nil { + return err + } + if len(st.TailscaleIPs) > 0 { + firstIP = st.TailscaleIPs[0] + break + } + } + } + + st, err := localClient.StatusWithoutPeers(ctx) + if err != nil { + return err + } + if !st.TUN { + // No TUN; nothing more to wait for. + return nil + } + + // Verify we have an interface using that IP. + for { + err := checkForInterfaceIP(firstIP) + if err == nil { + return nil + } + bo.BackOff(ctx, err) + if ctx.Err() != nil { + return ctx.Err() + } + } +} + +func checkForInterfaceIP(ip netip.Addr) error { + ifs, err := net.Interfaces() + if err != nil { + return err + } + for _, ifi := range ifs { + addrs, err := ifi.Addrs() + if err != nil { + return err + } + for _, addr := range addrs { + var aip netip.Addr + switch v := addr.(type) { + case *net.IPNet: + aip, _ = netip.AddrFromSlice(v.IP) + case *net.IPAddr: + aip, _ = netip.AddrFromSlice(v.IP) + } + if aip.Unmap() == ip { + return nil + } + } + } + return fmt.Errorf("no interface has IP %v", ip) +} diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b148423750b97..85bf2312a5f0f 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -251,6 +251,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/types/key+ tailscale.com/types/views from tailscale.com/tailcfg+ + tailscale.com/util/backoff from tailscale.com/cmd/tailscale/cli tailscale.com/util/cibuild from tailscale.com/health+ tailscale.com/util/clientmetric from tailscale.com/net/netcheck+ tailscale.com/util/cloudenv from tailscale.com/net/dnscache+ From ae95d8d22231a6e24e11bf44eadaf48b9a62aa3d Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Mon, 2 Feb 2026 15:38:40 -0800 Subject: [PATCH 0930/1093] cmd/tailscale: fix sanitizeOutput and add a test (#18589) Follow up from https://github.com/tailscale/tailscale/pull/18563 which I totally botched. Updates #18562 Signed-off-by: Andrew Lytvynov --- cmd/tailscale/cli/cli.go | 6 +++--- cmd/tailscale/cli/cli_test.go | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 07c7656dfa875..dca7559cf1923 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -581,11 +581,11 @@ type sanitizeWriter struct { w io.Writer } -var reTskey = regexp.MustCompile(`tskey-\w+`) +var rxTskey = regexp.MustCompile(`tskey-[\w-]+`) func (w sanitizeWriter) Write(buf []byte) (int, error) { - sanitized := reTskey.ReplaceAll(buf, []byte("tskey-REDACTED")) - diff := len(buf) - len(sanitized) + sanitized := rxTskey.ReplaceAll(buf, []byte("tskey-REDACTED")) + diff := len(sanitized) - len(buf) n, err := w.w.Write(sanitized) return n - diff, err } diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 370b730af8f35..ac6a94d52f88d 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -1799,3 +1799,21 @@ func TestDepsNoCapture(t *testing.T) { }.Check(t) } + +func TestSanitizeWriter(t *testing.T) { + buf := new(bytes.Buffer) + w := sanitizeOutput(buf) + + in := []byte(`my auth key is tskey-auth-abc123-def456, what's yours?`) + want := []byte(`my auth key is tskey-REDACTED, what's yours?`) + n, err := w.Write(in) + if err != nil { + t.Fatal(err) + } + if n != len(in) { + t.Errorf("unexpected write length %d, want %d", n, len(in)) + } + if got := buf.Bytes(); !bytes.Equal(got, want) { + t.Errorf("unexpected sanitized content\ngot: %q\nwant: %q", got, want) + } +} From f2b4d7065dbf18a2171205965dfde06b8051a034 Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 3 Feb 2026 11:16:59 +0000 Subject: [PATCH 0931/1093] cmd/containerboot: handle v6 pod ips that are missing square brackets (#18519) This commit fixes an issue within containerboot that arose from the kubernetes operator. When users enable metrics on custom resources that are running on dual stack or ipv6 only clusters, they end up with an error as we pass the hostport combintation using $(POD_IP):PORT. In go, `netip.ParseAddrPort` expects square brackets `[]` to wrap the host portion of an ipv6 address and would naturally, crash. When loading the containerboot configuration from the environment we now check if the `TS_LOCAL_ADDR_PORT` value contains the pod's v6 ip address. If it does & does not already contain brackets, we add the brackets in. Closes: #15762 Closes: #15467 Signed-off-by: David Bond --- cmd/containerboot/settings.go | 12 ++++++++++++ cmd/containerboot/settings_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index c35fc14079d85..181a94dd71114 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -126,6 +126,7 @@ func configFromEnv() (*settings, error) { IngressProxiesCfgPath: defaultEnv("TS_INGRESS_PROXIES_CONFIG_PATH", ""), PodUID: defaultEnv("POD_UID", ""), } + podIPs, ok := os.LookupEnv("POD_IPS") if ok { ips := strings.Split(podIPs, ",") @@ -144,6 +145,7 @@ func configFromEnv() (*settings, error) { cfg.PodIPv6 = parsed.String() } } + // If cert share is enabled, set the replica as read or write. Only 0th // replica should be able to write. isInCertShareMode := defaultBool("TS_EXPERIMENTAL_CERT_SHARE", false) @@ -165,9 +167,19 @@ func configFromEnv() (*settings, error) { cfg.AcceptDNS = &acceptDNSNew } + // In Kubernetes clusters, people like to use the "$(POD_IP):PORT" combination to configure the TS_LOCAL_ADDR_PORT + // environment variable (we even do this by default in the operator when enabling metrics), leading to a v6 address + // and port combo we cannot parse, as netip.ParseAddrPort expects the host segment to be enclosed in square brackets. + // We perform a check here to see if TS_LOCAL_ADDR_PORT is using the pod's IPv6 address and is not using brackets, + // adding the brackets in if need be. + if cfg.PodIPv6 != "" && strings.Contains(cfg.LocalAddrPort, cfg.PodIPv6) && !strings.ContainsAny(cfg.LocalAddrPort, "[]") { + cfg.LocalAddrPort = strings.Replace(cfg.LocalAddrPort, cfg.PodIPv6, "["+cfg.PodIPv6+"]", 1) + } + if err := cfg.validate(); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } + return cfg, nil } diff --git a/cmd/containerboot/settings_test.go b/cmd/containerboot/settings_test.go index 5fa0c7dd10724..eca50101b6c70 100644 --- a/cmd/containerboot/settings_test.go +++ b/cmd/containerboot/settings_test.go @@ -6,6 +6,7 @@ package main import ( + "net/netip" "strings" "testing" ) @@ -226,3 +227,30 @@ func TestValidateAuthMethods(t *testing.T) { }) } } + +func TestHandlesKubeIPV6(t *testing.T) { + t.Setenv("TS_LOCAL_ADDR_PORT", "fd7a:115c:a1e0::6c34:352:9002") + t.Setenv("POD_IPS", "fd7a:115c:a1e0::6c34:352") + + cfg, err := configFromEnv() + if err != nil { + t.Fatal(err) + } + + if cfg.LocalAddrPort != "[fd7a:115c:a1e0::6c34:352]:9002" { + t.Errorf("LocalAddrPort is not set correctly") + } + + parsed, err := netip.ParseAddrPort(cfg.LocalAddrPort) + if err != nil { + t.Fatal(err) + } + + if !parsed.Addr().Is6() { + t.Errorf("expected v6 address but got %s", parsed) + } + + if parsed.Port() != 9002 { + t.Errorf("expected port 9002 but got %d", parsed.Port()) + } +} From 77f5200164378213da4a9eda6de3a5801472a297 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Tue, 3 Feb 2026 14:12:38 +0000 Subject: [PATCH 0932/1093] cmd/k8s-operator,k8s-operator:ensure that recorder replicas default to 1 (#18375) Updates #17965 Signed-off-by: chaosinthecrd --- cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml | 1 + cmd/k8s-operator/deploy/manifests/operator.yaml | 1 + k8s-operator/api.md | 2 +- k8s-operator/apis/v1alpha1/types_recorder.go | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml index 28d2be78e509c..ca43a72a557f2 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_recorders.yaml @@ -72,6 +72,7 @@ spec: description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1. type: integer format: int32 + default: 1 minimum: 0 statefulSet: description: |- diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 4c9822847d677..b31e45eb7befc 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -3355,6 +3355,7 @@ spec: Required if S3 storage is not set up, to ensure that recordings are accessible. type: boolean replicas: + default: 1 description: Replicas specifies how many instances of tsrecorder to run. Defaults to 1. format: int32 minimum: 0 diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 31f351013164a..51a354b925574 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -904,7 +904,7 @@ _Appears in:_ | `tags` _[Tags](#tags)_ | Tags that the Tailscale device will be tagged with. Defaults to [tag:k8s].
      If you specify custom tags here, make sure you also make the operator
      an owner of these tags.
      See https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator.
      Tags cannot be changed once a Recorder node has been created.
      Tag values must be in form ^tag:[a-zA-Z][a-zA-Z0-9-]*$. | | Pattern: `^tag:[a-zA-Z][a-zA-Z0-9-]*$`
      Type: string
      | | `enableUI` _boolean_ | Set to true to enable the Recorder UI. The UI lists and plays recorded sessions.
      The UI will be served at :443. Defaults to false.
      Corresponds to --ui tsrecorder flag https://tailscale.com/kb/1246/tailscale-ssh-session-recording#deploy-a-recorder-node.
      Required if S3 storage is not set up, to ensure that recordings are accessible. | | | | `storage` _[Storage](#storage)_ | Configure where to store session recordings. By default, recordings will
      be stored in a local ephemeral volume, and will not be persisted past the
      lifetime of a specific pod. | | | -| `replicas` _integer_ | Replicas specifies how many instances of tsrecorder to run. Defaults to 1. | | Minimum: 0
      | +| `replicas` _integer_ | Replicas specifies how many instances of tsrecorder to run. Defaults to 1. | 1 | Minimum: 0
      | | `tailnet` _string_ | Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this
      name must match that of a valid Tailnet resource. This field is immutable and cannot be changed once set. | | | diff --git a/k8s-operator/apis/v1alpha1/types_recorder.go b/k8s-operator/apis/v1alpha1/types_recorder.go index 6cc5e3dd572c9..284c3b0ae48f4 100644 --- a/k8s-operator/apis/v1alpha1/types_recorder.go +++ b/k8s-operator/apis/v1alpha1/types_recorder.go @@ -80,6 +80,7 @@ type RecorderSpec struct { // Replicas specifies how many instances of tsrecorder to run. Defaults to 1. // +optional // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=1 Replicas *int32 `json:"replicas,omitzero"` // Tailnet specifies the tailnet this Recorder should join. If blank, the default tailnet is used. When set, this From 14322713a5847176dcc29b8970d71a9ef4361713 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 3 Feb 2026 07:55:41 -0800 Subject: [PATCH 0933/1093] ipn/ipnlocal/netmapcache: ensure cache updates preserve unchanged data (#18590) Found by @cmol. When rewriting the same value into the cache, we were dropping the unchanged keys, resulting in the cache being pruned incorrectly. Also update the tests to catch this. Updates #12639 Change-Id: Iab67e444eb7ddc22ccc680baa2f6a741a00eb325 Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/netmapcache/netmapcache.go | 1 + ipn/ipnlocal/netmapcache/netmapcache_test.go | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/ipn/ipnlocal/netmapcache/netmapcache.go b/ipn/ipnlocal/netmapcache/netmapcache.go index d5706f9b773ac..b12443b99f473 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache.go +++ b/ipn/ipnlocal/netmapcache/netmapcache.go @@ -86,6 +86,7 @@ func (c *Cache) writeJSON(ctx context.Context, key string, v any) error { // this at all? last, ok := c.lastWrote[key] if ok && cacheDigest(j) == last.digest { + c.wantKeys.Add(key) return nil } diff --git a/ipn/ipnlocal/netmapcache/netmapcache_test.go b/ipn/ipnlocal/netmapcache/netmapcache_test.go index 437015ccc53e8..b31db2d5eb8b5 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache_test.go +++ b/ipn/ipnlocal/netmapcache/netmapcache_test.go @@ -10,6 +10,7 @@ import ( "flag" "fmt" "iter" + "maps" "os" "reflect" "slices" @@ -181,6 +182,24 @@ func TestRoundTrip(t *testing.T) { }) } + + t.Run("Twice", func(t *testing.T) { + // Verify that storing the same network map twice results in no change. + + s := make(testStore) + c := netmapcache.NewCache(s) + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store 1 netmap failed: %v", err) + } + scp := maps.Clone(s) // for comparison, see below + + if err := c.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store 2 netmap failed; %v", err) + } + if diff := cmp.Diff(s, scp); diff != "" { + t.Errorf("Updated store (-got, +want):\n%s", diff) + } + }) } func TestInvalidCache(t *testing.T) { From 7b96c4c23e76beaf4c78ff1d8f96e11e0b07863a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 2 Feb 2026 17:11:01 -0800 Subject: [PATCH 0934/1093] cmd/testwrapper: support experimental -cachelink Updates tailscale/go#149 Change-Id: If0483466eb1fc2196838c75f6d53925b1809abff Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/args.go | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/cmd/testwrapper/args.go b/cmd/testwrapper/args.go index 350197d4f1271..22e5d4c902f8e 100644 --- a/cmd/testwrapper/args.go +++ b/cmd/testwrapper/args.go @@ -8,6 +8,7 @@ import ( "io" "os" "slices" + "strconv" "strings" "testing" ) @@ -60,6 +61,9 @@ func splitArgs(args []string) (pre, pkgs, post []string, _ error) { return nil, nil, nil, err } fs.Visit(func(f *flag.Flag) { + if f.Name == "cachelink" && !cacheLink.enabled { + return + } if f.Value.String() != f.DefValue && f.DefValue != "false" { pre = append(pre, "-"+f.Name, f.Value.String()) } else { @@ -79,6 +83,37 @@ func splitArgs(args []string) (pre, pkgs, post []string, _ error) { return pre, pkgs, post, nil } +// cacheLink is whether the -cachelink flag is enabled. +// +// The -cachelink flag is Tailscale-specific addition to the "go test" command; +// see https://github.com/tailscale/go/issues/149 and +// https://github.com/golang/go/issues/77349. +// +// In that PR, it's only a boolean, but we implement a custom flag type +// so we can support -cachelink=auto, which enables cachelink if GOCACHEPROG +// is set, which is a behavior we want in our CI environment. +var cacheLink cacheLinkVal + +type cacheLinkVal struct { + enabled bool +} + +func (c *cacheLinkVal) String() string { + return strconv.FormatBool(c.enabled) +} + +func (c *cacheLinkVal) Set(s string) error { + if s == "auto" { + c.enabled = os.Getenv("GOCACHEPROG") != "" + return nil + } + var err error + c.enabled, err = strconv.ParseBool(s) + return err +} + +func (*cacheLinkVal) IsBoolFlag() bool { return true } + func newTestFlagSet() *flag.FlagSet { fs := flag.NewFlagSet("testwrapper", flag.ContinueOnError) fs.SetOutput(io.Discard) @@ -90,6 +125,8 @@ func newTestFlagSet() *flag.FlagSet { fs.String("exec", "", "Command to run tests with") fs.Bool("race", false, "build with race detector") fs.String("vet", "", "vet checks to run, or 'off' or 'all'") + + fs.Var(&cacheLink, "cachelink", "Go -cachelink value (bool); or 'auto' to enable if GOCACHEPROG is set") return fs } From 54d70c831226010cf12a388ec69dee333c4eb915 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 3 Feb 2026 12:57:05 -0800 Subject: [PATCH 0935/1093] clientupdate: best-effort restart of tailscaled on init.d systems (#18568) Not all Linux distros use systemd yet, for example GL.iNet KVM devices use busybox's init, which is similar to SysV init. This is a best-effort restart attempt after the update, it probably won't cover 100% of init.d setups out there. Fixes #18567 Signed-off-by: Andrew Lytvynov --- clientupdate/clientupdate.go | 58 +++++++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 7 deletions(-) diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index e75e425a455b8..09f9d0be1787d 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -11,11 +11,11 @@ import ( "bufio" "bytes" "compress/gzip" - "context" "encoding/json" "errors" "fmt" "io" + "io/fs" "maps" "net/http" "os" @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/types/lazy" @@ -288,6 +289,10 @@ func Update(args Arguments) error { } func (up *Updater) confirm(ver string) bool { + if envknob.Bool("TS_UPDATE_SKIP_VERSION_CHECK") { + up.Logf("current version: %v, latest version %v; forcing an update due to TS_UPDATE_SKIP_VERSION_CHECK", up.currentVersion, ver) + return true + } // Only check version when we're not switching tracks. if up.Track == "" || up.Track == CurrentTrack { switch c := cmpver.Compare(up.currentVersion, ver); { @@ -865,12 +870,17 @@ func (up *Updater) updateLinuxBinary() error { if err := os.Remove(dlPath); err != nil { up.Logf("failed to clean up %q: %v", dlPath, err) } - if err := restartSystemdUnit(context.Background()); err != nil { + + err = restartSystemdUnit(up.Logf) + if errors.Is(err, errors.ErrUnsupported) { + err = restartInitD() if errors.Is(err, errors.ErrUnsupported) { - up.Logf("Tailscale binaries updated successfully.\nPlease restart tailscaled to finish the update.") - } else { - up.Logf("Tailscale binaries updated successfully, but failed to restart tailscaled: %s.\nPlease restart tailscaled to finish the update.", err) + err = errors.New("tailscaled is not running under systemd or init.d") } + } + if err != nil { + up.Logf("Tailscale binaries updated successfully, but failed to restart tailscaled: %s.\nPlease restart tailscaled to finish the update.", err) + } else { up.Logf("Success") } @@ -878,13 +888,13 @@ func (up *Updater) updateLinuxBinary() error { return nil } -func restartSystemdUnit(ctx context.Context) error { +func restartSystemdUnit(logf logger.Logf) error { if _, err := exec.LookPath("systemctl"); err != nil { // Likely not a systemd-managed distro. return errors.ErrUnsupported } if out, err := exec.Command("systemctl", "daemon-reload").CombinedOutput(); err != nil { - return fmt.Errorf("systemctl daemon-reload failed: %w\noutput: %s", err, out) + logf("systemctl daemon-reload failed: %w\noutput: %s", err, out) } if out, err := exec.Command("systemctl", "restart", "tailscaled.service").CombinedOutput(); err != nil { return fmt.Errorf("systemctl restart failed: %w\noutput: %s", err, out) @@ -892,6 +902,40 @@ func restartSystemdUnit(ctx context.Context) error { return nil } +// restartInitD attempts best-effort restart of tailscale on init.d systems +// (for example, GL.iNet KVM devices running busybox). It returns +// errors.ErrUnsupported if the expected service script is not found. +// +// There's probably a million variations of init.d configs out there, and this +// function does not intend to support all of them. +func restartInitD() error { + const initDir = "/etc/init.d/" + files, err := os.ReadDir(initDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return errors.ErrUnsupported + } + return err + } + for _, f := range files { + // Skip anything other than regular files. + if !f.Type().IsRegular() { + continue + } + // The script will be called something like /etc/init.d/tailscale or + // /etc/init.d/S99tailscale. + if n := f.Name(); strings.HasSuffix(n, "tailscale") { + path := filepath.Join(initDir, n) + if out, err := exec.Command(path, "restart").CombinedOutput(); err != nil { + return fmt.Errorf("%q failed: %w\noutput: %s", path+" restart", err, out) + } + return nil + } + } + // Init script for tailscale not found. + return errors.ErrUnsupported +} + func (up *Updater) downloadLinuxTarball(ver string) (string, error) { dlDir, err := os.UserCacheDir() if err != nil { From 5edfa6f9a8b409908861172882de03e9a67f0c2f Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Tue, 3 Feb 2026 16:08:36 -0500 Subject: [PATCH 0936/1093] ipn/ipnlocal: add wildcard TLS certificate support for subdomains (#18356) When the NodeAttrDNSSubdomainResolve capability is present, enable wildcard certificate issuance to cover all single-level subdomains of a node's CertDomain. Without the capability, only exact CertDomain matches are allowed, so node.ts.net yields a cert for node.ts.net. With the capability, we now generate wildcard certificates. Wildcard certs include both the wildcard and base domain in their SANs, and ACME authorization requests both identifiers. The cert filenames are kept still based on the base domain with the wildcard prefix stripped, so we aren't creating separate files. DNS challenges still used the base domain The checkCertDomain function is replaced by resolveCertDomain that both validates and returns the appropriate cert domain to request. Name validation is now moved earlier into GetCertPEMWithValidity() Fixes #1196 Signed-off-by: Fernando Serboncini --- ipn/ipnlocal/cert.go | 135 ++++++++++++++++++------ ipn/ipnlocal/cert_test.go | 217 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 312 insertions(+), 40 deletions(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 764634d30c276..027e7c810778b 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -37,7 +37,6 @@ import ( "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" @@ -106,6 +105,13 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK // // If a cert is expired, or expires sooner than minValidity, it will be renewed // synchronously. Otherwise it will be renewed asynchronously. +// +// The domain must be one of: +// +// - An exact CertDomain (e.g., "node.ts.net") +// - A wildcard domain (e.g., "*.node.ts.net") +// +// The wildcard format requires the NodeAttrDNSSubdomainResolve capability. func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string, minValidity time.Duration) (*TLSCertKeyPair, error) { b.mu.Lock() getCertForTest := b.getCertForTest @@ -119,6 +125,13 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if !validLookingCertDomain(domain) { return nil, errors.New("invalid domain") } + + certDomain, err := b.resolveCertDomain(domain) + if err != nil { + return nil, err + } + storageKey := strings.TrimPrefix(certDomain, "*.") + logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain)) now := b.clock.Now() traceACME := func(v any) { @@ -134,13 +147,13 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string return nil, err } - if pair, err := getCertPEMCached(cs, domain, now); err == nil { + if pair, err := getCertPEMCached(cs, storageKey, now); err == nil { if envknob.IsCertShareReadOnlyMode() { return pair, nil } // If we got here, we have a valid unexpired cert. // Check whether we should start an async renewal. - shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity) + shouldRenew, err := b.shouldStartDomainRenewal(cs, storageKey, now, pair, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) // Renewal check failed, but the current cert is valid and not @@ -154,7 +167,7 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string logf("starting async renewal") // Start renewal in the background, return current valid cert. b.goTracker.Go(func() { - if _, err := getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity); err != nil { + if _, err := getCertPEM(context.Background(), b, cs, logf, traceACME, certDomain, now, minValidity); err != nil { logf("async renewal failed: getCertPem: %v", err) } }) @@ -169,7 +182,7 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string return nil, fmt.Errorf("retrieving cached TLS certificate failed and cert store is configured in read-only mode, not attempting to issue a new certificate: %w", err) } - pair, err := getCertPEM(ctx, b, cs, logf, traceACME, domain, now, minValidity) + pair, err := getCertPEM(ctx, b, cs, logf, traceACME, certDomain, now, minValidity) if err != nil { logf("getCertPEM: %v", err) return nil, err @@ -506,19 +519,24 @@ func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKey } // getCertPem checks if a cert needs to be renewed and if so, renews it. +// domain is the resolved cert domain (e.g., "*.node.ts.net" for wildcards). // It can be overridden in tests. var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { acmeMu.Lock() defer acmeMu.Unlock() + // storageKey is used for file storage and renewal tracking. + // For wildcards, "*.node.ts.net" -> "node.ts.net" + storageKey, isWildcard := strings.CutPrefix(domain, "*.") + // In case this method was triggered multiple times in parallel (when // serving incoming requests), check whether one of the other goroutines // already renewed the cert before us. - previous, err := getCertPEMCached(cs, domain, now) + previous, err := getCertPEMCached(cs, storageKey, now) if err == nil { // shouldStartDomainRenewal caches its result so it's OK to call this // frequently. - shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, previous, minValidity) + shouldRenew, err := b.shouldStartDomainRenewal(cs, storageKey, now, previous, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) } else if !shouldRenew { @@ -561,12 +579,6 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l return nil, fmt.Errorf("unexpected ACME account status %q", a.Status) } - // Before hitting LetsEncrypt, see if this is a domain that Tailscale will do DNS challenges for. - st := b.StatusWithoutPeers() - if err := checkCertDomain(st, domain); err != nil { - return nil, err - } - // If we have a previous cert, include it in the order. Assuming we're // within the ARI renewal window this should exclude us from LE rate // limits. @@ -580,7 +592,18 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l opts = append(opts, acme.WithOrderReplacesCert(prevCrt)) } } - order, err := ac.AuthorizeOrder(ctx, []acme.AuthzID{{Type: "dns", Value: domain}}, opts...) + + // For wildcards, we need to authorize both the wildcard and base domain. + var authzIDs []acme.AuthzID + if isWildcard { + authzIDs = []acme.AuthzID{ + {Type: "dns", Value: domain}, + {Type: "dns", Value: storageKey}, + } + } else { + authzIDs = []acme.AuthzID{{Type: "dns", Value: domain}} + } + order, err := ac.AuthorizeOrder(ctx, authzIDs, opts...) if err != nil { return nil, err } @@ -598,7 +621,9 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l if err != nil { return nil, err } - key := "_acme-challenge." + domain + // For wildcards, the challenge is on the base domain. + // e.g., "*.node.ts.net" -> "_acme-challenge.node.ts.net" + key := "_acme-challenge." + strings.TrimPrefix(az.Identifier.Value, "*.") // Do a best-effort lookup to see if we've already created this DNS name // in a previous attempt. Don't burn too much time on it, though. Worst @@ -608,14 +633,14 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l txts, _ := resolver.LookupTXT(lookupCtx, key) lookupCancel() if slices.Contains(txts, rec) { - logf("TXT record already existed") + logf("TXT record already existed for %s", key) } else { - logf("starting SetDNS call...") + logf("starting SetDNS call for %s...", key) err = b.SetDNS(ctx, key, rec) if err != nil { return nil, fmt.Errorf("SetDNS %q => %q: %w", key, rec, err) } - logf("did SetDNS") + logf("did SetDNS for %s", key) } chal, err := ac.Accept(ctx, ch) @@ -672,19 +697,27 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l return nil, err } } - if err := cs.WriteTLSCertAndKey(domain, certPEM.Bytes(), privPEM.Bytes()); err != nil { + if err := cs.WriteTLSCertAndKey(storageKey, certPEM.Bytes(), privPEM.Bytes()); err != nil { return nil, err } - b.domainRenewed(domain) + b.domainRenewed(storageKey) return &TLSCertKeyPair{CertPEM: certPEM.Bytes(), KeyPEM: privPEM.Bytes()}, nil } -// certRequest generates a CSR for the given common name cn and optional SANs. -func certRequest(key crypto.Signer, name string, ext []pkix.Extension) ([]byte, error) { +// certRequest generates a CSR for the given domain and optional SANs. +func certRequest(key crypto.Signer, domain string, ext []pkix.Extension) ([]byte, error) { + dnsNames := []string{domain} + if base, ok := strings.CutPrefix(domain, "*."); ok { + // Wildcard cert must also include the base domain as a SAN. + // This is load-bearing: getCertPEMCached validates certs using + // the storage key (base domain), which only passes x509 verification + // if the base domain is in DNSNames. + dnsNames = append(dnsNames, base) + } req := &x509.CertificateRequest{ - Subject: pkix.Name{CommonName: name}, - DNSNames: []string{name}, + Subject: pkix.Name{CommonName: domain}, + DNSNames: dnsNames, ExtraExtensions: ext, } return x509.CreateCertificateRequest(rand.Reader, req, key) @@ -844,7 +877,7 @@ func isDefaultDirectoryURL(u string) bool { // we might be able to get a cert for. // // It's a light check primarily for double checking before it's used -// as part of a filesystem path. The actual validation happens in checkCertDomain. +// as part of a filesystem path. The actual validation happens in resolveCertDomain. func validLookingCertDomain(name string) bool { if name == "" || strings.Contains(name, "..") || @@ -852,22 +885,56 @@ func validLookingCertDomain(name string) bool { !strings.Contains(name, ".") { return false } + // Only allow * as a wildcard prefix "*.domain.tld" + if rest, ok := strings.CutPrefix(name, "*."); ok { + if strings.Contains(rest, "*") || !strings.Contains(rest, ".") { + return false + } + } else if strings.Contains(name, "*") { + return false + } return true } -func checkCertDomain(st *ipnstate.Status, domain string) error { +// resolveCertDomain validates a domain and returns the cert domain to use. +// +// - "node.ts.net" -> "node.ts.net" (exact CertDomain match) +// - "*.node.ts.net" -> "*.node.ts.net" (explicit wildcard, requires NodeAttrDNSSubdomainResolve) +// +// Subdomain requests like "app.node.ts.net" are rejected; callers should +// request "*.node.ts.net" explicitly for subdomain coverage. +func (b *LocalBackend) resolveCertDomain(domain string) (string, error) { if domain == "" { - return errors.New("missing domain name") + return "", errors.New("missing domain name") } - for _, d := range st.CertDomains { - if d == domain { - return nil + + // Read the netmap once to get both CertDomains and capabilities atomically. + nm := b.NetMap() + if nm == nil { + return "", errors.New("no netmap available") + } + certDomains := nm.DNS.CertDomains + if len(certDomains) == 0 { + return "", errors.New("your Tailscale account does not support getting TLS certs") + } + + // Wildcard request like "*.node.ts.net". + if base, ok := strings.CutPrefix(domain, "*."); ok { + if !nm.AllCaps.Contains(tailcfg.NodeAttrDNSSubdomainResolve) { + return "", fmt.Errorf("wildcard certificates are not enabled for this node") } + if !slices.Contains(certDomains, base) { + return "", fmt.Errorf("invalid domain %q; parent domain must be one of %q", domain, certDomains) + } + return domain, nil } - if len(st.CertDomains) == 0 { - return errors.New("your Tailscale account does not support getting TLS certs") + + // Exact CertDomain match. + if slices.Contains(certDomains, domain) { + return domain, nil } - return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains) + + return "", fmt.Errorf("invalid domain %q; must be one of %q", domain, certDomains) } // handleC2NTLSCertStatus returns info about the last TLS certificate issued for the @@ -884,7 +951,7 @@ func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Requ return } - domain := r.FormValue("domain") + domain := strings.TrimPrefix(r.FormValue("domain"), "*.") if domain == "" { http.Error(w, "no 'domain'", http.StatusBadRequest) return diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index ec7be570c78f7..b8acb710ac7d3 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -17,17 +17,205 @@ import ( "math/big" "os" "path/filepath" + "slices" "testing" "time" "github.com/google/go-cmp/cmp" "tailscale.com/envknob" "tailscale.com/ipn/store/mem" + "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/logger" + "tailscale.com/types/netmap" "tailscale.com/util/must" + "tailscale.com/util/set" ) +func TestCertRequest(t *testing.T) { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("GenerateKey: %v", err) + } + + tests := []struct { + domain string + wantSANs []string + }{ + { + domain: "example.com", + wantSANs: []string{"example.com"}, + }, + { + domain: "*.example.com", + wantSANs: []string{"*.example.com", "example.com"}, + }, + { + domain: "*.foo.bar.com", + wantSANs: []string{"*.foo.bar.com", "foo.bar.com"}, + }, + } + + for _, tt := range tests { + t.Run(tt.domain, func(t *testing.T) { + csrDER, err := certRequest(key, tt.domain, nil) + if err != nil { + t.Fatalf("certRequest: %v", err) + } + csr, err := x509.ParseCertificateRequest(csrDER) + if err != nil { + t.Fatalf("ParseCertificateRequest: %v", err) + } + if csr.Subject.CommonName != tt.domain { + t.Errorf("CommonName = %q, want %q", csr.Subject.CommonName, tt.domain) + } + if !slices.Equal(csr.DNSNames, tt.wantSANs) { + t.Errorf("DNSNames = %v, want %v", csr.DNSNames, tt.wantSANs) + } + }) + } +} + +func TestResolveCertDomain(t *testing.T) { + tests := []struct { + name string + domain string + certDomains []string + hasCap bool + skipNetmap bool + want string + wantErr string + }{ + { + name: "exact_match", + domain: "node.ts.net", + certDomains: []string{"node.ts.net"}, + want: "node.ts.net", + }, + { + name: "exact_match_with_cap", + domain: "node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + want: "node.ts.net", + }, + { + name: "wildcard_with_cap", + domain: "*.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + want: "*.node.ts.net", + }, + { + name: "wildcard_without_cap", + domain: "*.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: false, + wantErr: "wildcard certificates are not enabled for this node", + }, + { + name: "subdomain_with_cap_rejected", + domain: "app.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "app.node.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "subdomain_without_cap_rejected", + domain: "app.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: false, + wantErr: `invalid domain "app.node.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "multi_level_subdomain_rejected", + domain: "a.b.node.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "a.b.node.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "wildcard_no_matching_parent", + domain: "*.unrelated.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "*.unrelated.ts.net"; parent domain must be one of ["node.ts.net"]`, + }, + { + name: "subdomain_unrelated_rejected", + domain: "app.unrelated.ts.net", + certDomains: []string{"node.ts.net"}, + hasCap: true, + wantErr: `invalid domain "app.unrelated.ts.net"; must be one of ["node.ts.net"]`, + }, + { + name: "no_cert_domains", + domain: "node.ts.net", + certDomains: nil, + wantErr: "your Tailscale account does not support getting TLS certs", + }, + { + name: "wildcard_no_cert_domains", + domain: "*.foo.ts.net", + certDomains: nil, + hasCap: true, + wantErr: "your Tailscale account does not support getting TLS certs", + }, + { + name: "empty_domain", + domain: "", + certDomains: []string{"node.ts.net"}, + wantErr: "missing domain name", + }, + { + name: "nil_netmap", + domain: "node.ts.net", + skipNetmap: true, + wantErr: "no netmap available", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + + if !tt.skipNetmap { + // Set up netmap with CertDomains and capability + var allCaps set.Set[tailcfg.NodeCapability] + if tt.hasCap { + allCaps = set.Of(tailcfg.NodeAttrDNSSubdomainResolve) + } + b.mu.Lock() + b.currentNode().SetNetMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{}).View(), + DNS: tailcfg.DNSConfig{ + CertDomains: tt.certDomains, + }, + AllCaps: allCaps, + }) + b.mu.Unlock() + } + + got, err := b.resolveCertDomain(tt.domain) + if tt.wantErr != "" { + if err == nil { + t.Errorf("resolveCertDomain(%q) = %q, want error %q", tt.domain, got, tt.wantErr) + } else if err.Error() != tt.wantErr { + t.Errorf("resolveCertDomain(%q) error = %q, want %q", tt.domain, err.Error(), tt.wantErr) + } + return + } + if err != nil { + t.Errorf("resolveCertDomain(%q) error = %v, want nil", tt.domain, err) + return + } + if got != tt.want { + t.Errorf("resolveCertDomain(%q) = %q, want %q", tt.domain, got, tt.want) + } + }) + } +} + func TestValidLookingCertDomain(t *testing.T) { tests := []struct { in string @@ -40,6 +228,16 @@ func TestValidLookingCertDomain(t *testing.T) { {"", false}, {"foo\\bar.com", false}, {"foo\x00bar.com", false}, + // Wildcard tests + {"*.foo.com", true}, + {"*.foo.bar.com", true}, + {"*foo.com", false}, // must be *. + {"*.com", false}, // must have domain after *. + {"*.", false}, // must have domain after *. + {"*.*.foo.com", false}, // no nested wildcards + {"foo.*.bar.com", false}, // no wildcard mid-string + {"app.foo.com", true}, // regular subdomain + {"*", false}, // bare asterisk } for _, tt := range tests { if got := validLookingCertDomain(tt.in); got != tt.want { @@ -231,12 +429,19 @@ func TestDebugACMEDirectoryURL(t *testing.T) { func TestGetCertPEMWithValidity(t *testing.T) { const testDomain = "example.com" - b := &LocalBackend{ - store: &mem.Store{}, - varRoot: t.TempDir(), - ctx: context.Background(), - logf: t.Logf, - } + b := newTestLocalBackend(t) + b.varRoot = t.TempDir() + + // Set up netmap with CertDomains so resolveCertDomain works + b.mu.Lock() + b.currentNode().SetNetMap(&netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{}).View(), + DNS: tailcfg.DNSConfig{ + CertDomains: []string{testDomain}, + }, + }) + b.mu.Unlock() + certDir, err := b.certDir() if err != nil { t.Fatalf("certDir error: %v", err) From 569caefeb55dd3af2d86d558d46cb2d2d1ef4258 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Thu, 29 Jan 2026 14:25:32 -0800 Subject: [PATCH 0937/1093] tsnet: add tests to TestListenService for user-supplied TUN devices This resolves a gap in test coverage, ensuring Server.ListenService functions as expected in combination with user-supplied TUN devices Fixes tailscale/corp#36603 Co-authored-by: Harry Harpham Signed-off-by: Harry Harpham --- tsnet/tsnet_test.go | 195 +++++++++++++++++++++++--------------------- 1 file changed, 103 insertions(+), 92 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index aeee43646cb0a..41d239e3b91be 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1141,83 +1141,91 @@ func TestListenService(t *testing.T) { // This ends up also testing the Service forwarding logic in // LocalBackend, but that's useful too. t.Run(tt.name, func(t *testing.T) { - ctx := t.Context() - - controlURL, control := startControl(t) - serviceHost, _, _ := startServer(t, ctx, controlURL, "service-host") - serviceClient, _, _ := startServer(t, ctx, controlURL, "service-client") - - const serviceName = tailcfg.ServiceName("svc:foo") - const serviceVIP = "100.11.22.33" - - // == Set up necessary state in our mock == - - // The Service host must have the 'service-host' capability, which - // is a mapping from the Service name to the Service VIP. - var serviceHostCaps map[tailcfg.ServiceName]views.Slice[netip.Addr] - mak.Set(&serviceHostCaps, serviceName, views.SliceOf([]netip.Addr{netip.MustParseAddr(serviceVIP)})) - j := must.Get(json.Marshal(serviceHostCaps)) - cm := serviceHost.lb.NetMap().SelfNode.CapMap().AsMap() - mak.Set(&cm, tailcfg.NodeAttrServiceHost, []tailcfg.RawMessage{tailcfg.RawMessage(j)}) - control.SetNodeCapMap(serviceHost.lb.NodeKey(), cm) - - // The Service host must be allowed to advertise the Service VIP. - control.SetSubnetRoutes(serviceHost.lb.NodeKey(), []netip.Prefix{ - netip.MustParsePrefix(serviceVIP + `/32`), - }) - - // The Service host must be a tagged node (any tag will do). - serviceHostNode := control.Node(serviceHost.lb.NodeKey()) - serviceHostNode.Tags = append(serviceHostNode.Tags, "some-tag") - control.UpdateNode(serviceHostNode) - - // The service client must accept routes advertised by other nodes - // (RouteAll is equivalent to --accept-routes). - must.Get(serviceClient.localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ - RouteAllSet: true, - Prefs: ipn.Prefs{ - RouteAll: true, - }, - })) - - // Set up DNS for our Service. - control.AddDNSRecords(tailcfg.DNSRecord{ - Name: serviceName.WithoutPrefix() + "." + control.MagicDNSDomain, - Value: serviceVIP, - }) + // We run each test with and without a TUN device ([Server.Tun]). + // Note that this TUN device is distinct from TUN mode for Services. + doTest := func(t *testing.T, withTUNDevice bool) { + ctx := t.Context() + + lt := setupTwoClientTest(t, withTUNDevice) + serviceHost := lt.s2 + serviceClient := lt.s1 + control := lt.control + + const serviceName = tailcfg.ServiceName("svc:foo") + const serviceVIP = "100.11.22.33" + + // == Set up necessary state in our mock == + + // The Service host must have the 'service-host' capability, which + // is a mapping from the Service name to the Service VIP. + var serviceHostCaps map[tailcfg.ServiceName]views.Slice[netip.Addr] + mak.Set(&serviceHostCaps, serviceName, views.SliceOf([]netip.Addr{netip.MustParseAddr(serviceVIP)})) + j := must.Get(json.Marshal(serviceHostCaps)) + cm := serviceHost.lb.NetMap().SelfNode.CapMap().AsMap() + mak.Set(&cm, tailcfg.NodeAttrServiceHost, []tailcfg.RawMessage{tailcfg.RawMessage(j)}) + control.SetNodeCapMap(serviceHost.lb.NodeKey(), cm) + + // The Service host must be allowed to advertise the Service VIP. + control.SetSubnetRoutes(serviceHost.lb.NodeKey(), []netip.Prefix{ + netip.MustParsePrefix(serviceVIP + `/32`), + }) - if tt.extraSetup != nil { - tt.extraSetup(t, control) - } + // The Service host must be a tagged node (any tag will do). + serviceHostNode := control.Node(serviceHost.lb.NodeKey()) + serviceHostNode.Tags = append(serviceHostNode.Tags, "some-tag") + control.UpdateNode(serviceHostNode) + + // The service client must accept routes advertised by other nodes + // (RouteAll is equivalent to --accept-routes). + must.Get(serviceClient.localClient.EditPrefs(ctx, &ipn.MaskedPrefs{ + RouteAllSet: true, + Prefs: ipn.Prefs{ + RouteAll: true, + }, + })) - // Force netmap updates to avoid race conditions. The nodes need to - // see our control updates before we can start the test. - must.Do(control.ForceNetmapUpdate(ctx, serviceHost.lb.NodeKey())) - must.Do(control.ForceNetmapUpdate(ctx, serviceClient.lb.NodeKey())) - netmapUpToDate := func(s *Server) bool { - nm := s.lb.NetMap() - return slices.ContainsFunc(nm.DNS.ExtraRecords, func(r tailcfg.DNSRecord) bool { - return r.Value == serviceVIP + // Set up DNS for our Service. + control.AddDNSRecords(tailcfg.DNSRecord{ + Name: serviceName.WithoutPrefix() + "." + control.MagicDNSDomain, + Value: serviceVIP, }) - } - for !netmapUpToDate(serviceClient) { - time.Sleep(10 * time.Millisecond) - } - for !netmapUpToDate(serviceHost) { - time.Sleep(10 * time.Millisecond) - } - // == Done setting up mock state == + if tt.extraSetup != nil { + tt.extraSetup(t, control) + } + + // Force netmap updates to avoid race conditions. The nodes need to + // see our control updates before we can start the test. + must.Do(control.ForceNetmapUpdate(ctx, serviceHost.lb.NodeKey())) + must.Do(control.ForceNetmapUpdate(ctx, serviceClient.lb.NodeKey())) + netmapUpToDate := func(s *Server) bool { + nm := s.lb.NetMap() + return slices.ContainsFunc(nm.DNS.ExtraRecords, func(r tailcfg.DNSRecord) bool { + return r.Value == serviceVIP + }) + } + for !netmapUpToDate(serviceClient) { + time.Sleep(10 * time.Millisecond) + } + for !netmapUpToDate(serviceHost) { + time.Sleep(10 * time.Millisecond) + } - // Start the Service listeners. - listeners := make([]*ServiceListener, 0, len(tt.modes)) - for _, input := range tt.modes { - ln := must.Get(serviceHost.ListenService(serviceName.String(), input)) - defer ln.Close() - listeners = append(listeners, ln) + // == Done setting up mock state == + + // Start the Service listeners. + listeners := make([]*ServiceListener, 0, len(tt.modes)) + for _, input := range tt.modes { + ln := must.Get(serviceHost.ListenService(serviceName.String(), input)) + defer ln.Close() + listeners = append(listeners, ln) + } + + tt.run(t, listeners, serviceClient) } - tt.run(t, listeners, serviceClient) + t.Run("TUN", func(t *testing.T) { doTest(t, true) }) + t.Run("netstack", func(t *testing.T) { doTest(t, false) }) }) } } @@ -1928,20 +1936,21 @@ func (t *chanTUN) BatchSize() int { return 1 } // listenTest provides common setup for listener and TUN tests. type listenTest struct { + control *testcontrol.Server s1, s2 *Server s1ip4, s1ip6 netip.Addr s2ip4, s2ip6 netip.Addr tun *chanTUN // nil for netstack mode } -// setupListenTest creates two tsnet servers for testing. +// setupTwoClientTest creates two tsnet servers for testing. // If useTUN is true, s2 uses a chanTUN; otherwise it uses netstack only. -func setupListenTest(t *testing.T, useTUN bool) *listenTest { +func setupTwoClientTest(t *testing.T, useTUN bool) *listenTest { t.Helper() tstest.Shard(t) tstest.ResourceCheck(t) ctx := t.Context() - controlURL, _ := startControl(t) + controlURL, control := startControl(t) s1, _, _ := startServer(t, ctx, controlURL, "s1") tmp := filepath.Join(t.TempDir(), "s2") @@ -1969,6 +1978,7 @@ func setupListenTest(t *testing.T, useTUN bool) *listenTest { if err != nil { t.Fatal(err) } + s2.lb.ConfigureCertsForTest(testCertRoot.getCert) s1ip4, s1ip6 := s1.TailscaleIPs() s2ip4 := s2status.TailscaleIPs[0] @@ -1981,13 +1991,14 @@ func setupListenTest(t *testing.T, useTUN bool) *listenTest { must.Get(lc1.Ping(ctx, s2ip4, tailcfg.PingTSMP)) return &listenTest{ - s1: s1, - s2: s2, - s1ip4: s1ip4, - s1ip6: s1ip6, - s2ip4: s2ip4, - s2ip6: s2ip6, - tun: tun, + control: control, + s1: s1, + s2: s2, + s1ip4: s1ip4, + s1ip6: s1ip6, + s2ip4: s2ip4, + s2ip6: s2ip6, + tun: tun, } } @@ -2016,7 +2027,7 @@ func echoUDP(pkt []byte) []byte { } func TestTUN(t *testing.T) { - tt := setupListenTest(t, true) + tt := setupTwoClientTest(t, true) go func() { for pkt := range tt.tun.Inbound { @@ -2059,7 +2070,7 @@ func TestTUN(t *testing.T) { // responses. This verifies that handleLocalPackets intercepts outbound traffic // to the service IP. func TestTUNDNS(t *testing.T) { - tt := setupListenTest(t, true) + tt := setupTwoClientTest(t, true) test := func(t *testing.T, srcIP netip.Addr, serviceIP netip.Addr) { tt.tun.Outbound <- buildDNSQuery("s2", srcIP) @@ -2149,13 +2160,13 @@ func TestListenPacket(t *testing.T) { } t.Run("Netstack", func(t *testing.T) { - lt := setupListenTest(t, false) + lt := setupTwoClientTest(t, false) t.Run("IPv4", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip4) }) t.Run("IPv6", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip6) }) }) t.Run("TUN", func(t *testing.T) { - lt := setupListenTest(t, true) + lt := setupTwoClientTest(t, true) t.Run("IPv4", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip4) }) t.Run("IPv6", func(t *testing.T) { testListenPacket(t, lt, lt.s2ip6) }) }) @@ -2221,13 +2232,13 @@ func TestListenTCP(t *testing.T) { } t.Run("Netstack", func(t *testing.T) { - lt := setupListenTest(t, false) + lt := setupTwoClientTest(t, false) t.Run("IPv4", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip4) }) t.Run("IPv6", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip6) }) }) t.Run("TUN", func(t *testing.T) { - lt := setupListenTest(t, true) + lt := setupTwoClientTest(t, true) t.Run("IPv4", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip4) }) t.Run("IPv6", func(t *testing.T) { testListenTCP(t, lt, lt.s2ip6) }) }) @@ -2299,13 +2310,13 @@ func TestListenTCPDualStack(t *testing.T) { } t.Run("Netstack", func(t *testing.T) { - lt := setupListenTest(t, false) + lt := setupTwoClientTest(t, false) t.Run("DialIPv4", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip4) }) t.Run("DialIPv6", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip6) }) }) t.Run("TUN", func(t *testing.T) { - lt := setupListenTest(t, true) + lt := setupTwoClientTest(t, true) t.Run("DialIPv4", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip4) }) t.Run("DialIPv6", func(t *testing.T) { testListenTCPDualStack(t, lt, lt.s2ip6) }) }) @@ -2372,13 +2383,13 @@ func TestDialTCP(t *testing.T) { } t.Run("Netstack", func(t *testing.T) { - lt := setupListenTest(t, false) + lt := setupTwoClientTest(t, false) t.Run("IPv4", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip4) }) t.Run("IPv6", func(t *testing.T) { testDialTCP(t, lt, lt.s1ip6) }) }) t.Run("TUN", func(t *testing.T) { - lt := setupListenTest(t, true) + lt := setupTwoClientTest(t, true) var escapedTCPPackets atomic.Int32 var wg sync.WaitGroup @@ -2460,13 +2471,13 @@ func TestDialUDP(t *testing.T) { } t.Run("Netstack", func(t *testing.T) { - lt := setupListenTest(t, false) + lt := setupTwoClientTest(t, false) t.Run("IPv4", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip4) }) t.Run("IPv6", func(t *testing.T) { testDialUDP(t, lt, lt.s1ip6) }) }) t.Run("TUN", func(t *testing.T) { - lt := setupListenTest(t, true) + lt := setupTwoClientTest(t, true) var escapedUDPPackets atomic.Int32 var wg sync.WaitGroup From 40cd54daf73a154c3f8b60c020d70b11c1b5aa85 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Feb 2026 10:30:55 -0800 Subject: [PATCH 0938/1093] cmd/tailscale: remove dep on clientupdate package if feature is omitted We already had a featuretag for clientupdate, but the CLI wasn't using it, making the "minbox" build (minimal combined tailscaled + CLI build) larger than necessary. Updates #12614 Change-Id: Idd7546c67dece7078f25b8f2ae9886f58d599002 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 3 ++- cmd/tailscale/cli/update.go | 10 ++++++++++ cmd/tailscale/cli/version.go | 10 ++++++++-- cmd/tailscaled/depaware-minbox.txt | 9 +-------- cmd/tailscaled/deps_test.go | 11 ++++++++--- 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index dca7559cf1923..cde9d341c43c7 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -219,6 +219,7 @@ var ( maybeFunnelCmd, maybeServeCmd, maybeCertCmd, + maybeUpdateCmd, _ func() *ffcli.Command ) @@ -270,7 +271,7 @@ change in the future. nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), - updateCmd, + nilOrCall(maybeUpdateCmd), whoisCmd, debugCmd(), nilOrCall(maybeDriveCmd), diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 291bf4330cd63..6d57e6d41f110 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_clientupdate + package cli import ( @@ -17,6 +19,14 @@ import ( "tailscale.com/version/distro" ) +func init() { + maybeUpdateCmd = func() *ffcli.Command { return updateCmd } + + clientupdateLatestTailscaleVersion.Set(func() (string, error) { + return clientupdate.LatestTailscaleVersion(clientupdate.CurrentTrack) + }) +} + var updateCmd = &ffcli.Command{ Name: "update", ShortUsage: "tailscale update", diff --git a/cmd/tailscale/cli/version.go b/cmd/tailscale/cli/version.go index f23ee0b69f834..2c6a3738bd36a 100644 --- a/cmd/tailscale/cli/version.go +++ b/cmd/tailscale/cli/version.go @@ -10,7 +10,7 @@ import ( "fmt" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/clientupdate" + "tailscale.com/feature" "tailscale.com/ipn/ipnstate" "tailscale.com/version" ) @@ -35,6 +35,8 @@ var versionArgs struct { upstream bool } +var clientupdateLatestTailscaleVersion feature.Hook[func() (string, error)] + func runVersion(ctx context.Context, args []string) error { if len(args) > 0 { return fmt.Errorf("too many non-flag arguments: %q", args) @@ -51,7 +53,11 @@ func runVersion(ctx context.Context, args []string) error { var upstreamVer string if versionArgs.upstream { - upstreamVer, err = clientupdate.LatestTailscaleVersion(clientupdate.CurrentTrack) + f, ok := clientupdateLatestTailscaleVersion.GetOk() + if !ok { + return fmt.Errorf("fetching latest version not supported in this build") + } + upstreamVer, err = f() if err != nil { return err } diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 5121b56d0d281..938df6bb46be8 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,7 +1,5 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) - filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus - filippo.io/edwards25519/field from filippo.io/edwards25519 github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart @@ -12,7 +10,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli @@ -50,8 +47,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ - tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli - tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscaled tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete @@ -175,7 +170,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ tailscale.com/util/cloudinfo from tailscale.com/wgengine/magicsock - tailscale.com/util/cmpver from tailscale.com/clientupdate tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/client/local+ @@ -268,7 +262,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna - archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ @@ -428,7 +421,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from tailscale.com/clientupdate+ + regexp from tailscale.com/cmd/tailscale/cli regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index d06924b927a97..9a6f532c1f803 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -285,9 +285,14 @@ func TestMinTailscaledWithCLI(t *testing.T) { } }, BadDeps: map[string]string{ - "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", - "expvar": "unexpected expvar dep", - "github.com/mdlayher/genetlink": "unexpected genetlink dep", + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", + "github.com/mdlayher/genetlink": "unexpected genetlink dep", + "tailscale.com/clientupdate": "unexpected clientupdate dep", + "filippo.io/edwards25519": "unexpected edwards25519 dep", + "github.com/hdevalence/ed25519consensus": "unexpected ed25519consensus dep", + "tailscale.com/clientupdate/distsign": "unexpected distsign dep", + "archive/tar": "unexpected archive/tar dep", }, }.Check(t) } From 642d1aaa6096993e0a3640dd6afccc6b74f083be Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 4 Feb 2026 12:11:00 -0800 Subject: [PATCH 0939/1093] cmd/tailscaled,feature/conn25,feature/featuretags: add conn25 to featuretags Package feature/conn25 is excludeable from a build via the featuretag. Test it is excluded for minimal builds. Updates #12614 Signed-off-by: Fran Bull --- cmd/tailscaled/depaware-min.txt | 5 ++--- cmd/tailscaled/depaware-minbox.txt | 5 ++--- cmd/tailscaled/deps_test.go | 1 + feature/buildfeatures/feature_conn25_disabled.go | 13 +++++++++++++ feature/buildfeatures/feature_conn25_enabled.go | 13 +++++++++++++ feature/conn25/conn25.go | 3 +++ feature/featuretags/featuretags.go | 1 + 7 files changed, 35 insertions(+), 6 deletions(-) create mode 100644 feature/buildfeatures/feature_conn25_disabled.go create mode 100644 feature/buildfeatures/feature_conn25_enabled.go diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index a2d20dedaf243..e536ac59dde37 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -35,7 +35,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled @@ -58,14 +58,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister - tailscale.com/feature/conn25 from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 938df6bb46be8..41bf4d9842084 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -42,7 +42,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/tailscale+ tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale @@ -73,7 +73,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ - tailscale.com/feature/conn25 from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ @@ -81,7 +80,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 9a6f532c1f803..118913848a52b 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -293,6 +293,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { "github.com/hdevalence/ed25519consensus": "unexpected ed25519consensus dep", "tailscale.com/clientupdate/distsign": "unexpected distsign dep", "archive/tar": "unexpected archive/tar dep", + "tailscale.com/feature/conn25": "unexpected conn25 dep", }, }.Check(t) } diff --git a/feature/buildfeatures/feature_conn25_disabled.go b/feature/buildfeatures/feature_conn25_disabled.go new file mode 100644 index 0000000000000..29d6452400cb3 --- /dev/null +++ b/feature/buildfeatures/feature_conn25_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_conn25 + +package buildfeatures + +// HasConn25 is whether the binary was built with support for modular feature "Route traffic for configured domains through connector devices". +// Specifically, it's whether the binary was NOT built with the "ts_omit_conn25" build tag. +// It's a const so it can be used for dead code elimination. +const HasConn25 = false diff --git a/feature/buildfeatures/feature_conn25_enabled.go b/feature/buildfeatures/feature_conn25_enabled.go new file mode 100644 index 0000000000000..a0d95477cf2d1 --- /dev/null +++ b/feature/buildfeatures/feature_conn25_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_conn25 + +package buildfeatures + +// HasConn25 is whether the binary was built with support for modular feature "Route traffic for configured domains through connector devices". +// Specifically, it's whether the binary was NOT built with the "ts_omit_conn25" build tag. +// It's a const so it can be used for dead code elimination. +const HasConn25 = true diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index 2a2b75a2d8b19..33ba0e486abe3 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -2,6 +2,9 @@ // SPDX-License-Identifier: BSD-3-Clause // Package conn25 registers the conn25 feature and implements its associated ipnext.Extension. +// conn25 will be an app connector like feature that routes traffic for configured domains via +// connector devices and avoids the "too many routes" pitfall of app connector. It is currently +// (2026-02-04) some peer API routes for clients to tell connectors about their desired routing. package conn25 import ( diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index c0a72a38d1fdd..5f72e3dda98b4 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -138,6 +138,7 @@ var Features = map[FeatureTag]FeatureMeta{ Deps: []FeatureTag{"c2n"}, }, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "conn25": {Sym: "Conn25", Desc: "Route traffic for configured domains through connector devices"}, "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "dbus": { Sym: "DBus", From 036b6a12621306da8368b167deb9858d4a8d6ce9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Feb 2026 12:10:46 -0800 Subject: [PATCH 0940/1093] feature/featuretags: add test that all ts_omit_foo tags are declared Updates #12614 Change-Id: I49351fe0c463af0b8d940e8088d4748906a8aec3 Signed-off-by: Brad Fitzpatrick --- .../feature_completion_scripts_disabled.go | 13 ++++++ .../feature_completion_scripts_enabled.go | 13 ++++++ feature/featuretags/featuretags.go | 6 ++- feature/featuretags/featuretags_test.go | 40 +++++++++++++++++++ 4 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 feature/buildfeatures/feature_completion_scripts_disabled.go create mode 100644 feature/buildfeatures/feature_completion_scripts_enabled.go diff --git a/feature/buildfeatures/feature_completion_scripts_disabled.go b/feature/buildfeatures/feature_completion_scripts_disabled.go new file mode 100644 index 0000000000000..e22ce69fc708a --- /dev/null +++ b/feature/buildfeatures/feature_completion_scripts_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_completion_scripts + +package buildfeatures + +// HasCompletionScripts is whether the binary was built with support for modular feature "embed CLI shell completion scripts". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion_scripts" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletionScripts = false diff --git a/feature/buildfeatures/feature_completion_scripts_enabled.go b/feature/buildfeatures/feature_completion_scripts_enabled.go new file mode 100644 index 0000000000000..c3ecd83cac661 --- /dev/null +++ b/feature/buildfeatures/feature_completion_scripts_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_completion_scripts + +package buildfeatures + +// HasCompletionScripts is whether the binary was built with support for modular feature "embed CLI shell completion scripts". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion_scripts" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletionScripts = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 5f72e3dda98b4..45daaec5ec29f 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -139,7 +139,11 @@ var Features = map[FeatureTag]FeatureMeta{ }, "completion": {Sym: "Completion", Desc: "CLI shell completion"}, "conn25": {Sym: "Conn25", Desc: "Route traffic for configured domains through connector devices"}, - "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "completion_scripts": { + Sym: "CompletionScripts", Desc: "embed CLI shell completion scripts", + Deps: []FeatureTag{"completion"}, + }, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, "dbus": { Sym: "DBus", Desc: "Linux DBus support", diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go index b970295779591..19c9722a6f300 100644 --- a/feature/featuretags/featuretags_test.go +++ b/feature/featuretags/featuretags_test.go @@ -5,7 +5,12 @@ package featuretags import ( "maps" + "os" + "os/exec" + "path/filepath" + "regexp" "slices" + "strings" "testing" "tailscale.com/util/set" @@ -83,3 +88,38 @@ func TestRequiredBy(t *testing.T) { } } } + +// Verify that all "ts_omit_foo" build tags are declared in featuretags.go +func TestAllOmitBuildTagsDeclared(t *testing.T) { + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + root := filepath.Join(dir, "..", "..") + + cmd := exec.Command("git", "grep", "ts_omit_") + cmd.Dir = root + out, err := cmd.CombinedOutput() + if err != nil { + if _, err := exec.LookPath("git"); err != nil { + t.Skipf("git not found in PATH; skipping test") + } + t.Fatalf("git grep failed: %v\nOutput:\n%s", err, out) + } + rx := regexp.MustCompile(`\bts_omit_[\w_]+\b`) + found := set.Set[string]{} + rx.ReplaceAllFunc(out, func(tag []byte) []byte { + tagStr := string(tag) + found.Add(tagStr) + return tag + }) + for tag := range found { + if strings.EqualFold(tag, "ts_omit_foo") { + continue + } + ft := FeatureTag(strings.TrimPrefix(tag, "ts_omit_")) + if _, ok := Features[ft]; !ok { + t.Errorf("found undeclared ts_omit_* build tags: %v", tag) + } + } +} From 6587cafb3fa3c59b81c92e566f851b2efd65524b Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Thu, 5 Feb 2026 10:45:24 -0700 Subject: [PATCH 0941/1093] cmd/tailscale: use advertise tags from prefs for OAuth and id federation Use the parsed and validated advertise tags value from prefs instead of doing a strings.Split on the raw tags value as an input to the OAuth and identity federation auth key generation methods. The previous strings.Split method would return an array with a single empty string element which would pass downstream length checks on the tags argument before eventually failing with a confusing message when hitting the API. Fixes https://github.com/tailscale/tailscale/issues/18617 Signed-off-by: Mario Minardi --- cmd/tailscale/cli/up.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 79f7cc3f44a88..d78cb2d44bfb2 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -641,7 +641,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - authKey, err = f(ctx, clientSecret, strings.Split(upArgs.advertiseTags, ",")) + authKey, err = f(ctx, clientSecret, prefs.AdvertiseTags) if err != nil { return err } @@ -654,7 +654,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return err } - authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, upArgs.audience, strings.Split(upArgs.advertiseTags, ",")) + authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, upArgs.audience, prefs.AdvertiseTags) if err != nil { return err } From 058cc3f82bfcaa8d5b49d00d5e9c46fdcd289bbd Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Fri, 6 Feb 2026 09:40:55 -0500 Subject: [PATCH 0942/1093] ipn/ipnlocal: skip AuthKey use if profiles exist (#18619) If any profiles exist and an Authkey is provided via syspolicy, the AuthKey is ignored on backend start, preventing re-auth attempts. This is useful for one-time device provisioning scenarios, skipping authKey use after initial setup when the authKey may no longer be valid. updates #18618 Signed-off-by: Will Hannah --- ipn/ipnlocal/local.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 300f7a4c3186d..821f79abfbcec 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2478,7 +2478,9 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { if b.state != ipn.Running && b.conf == nil && opts.AuthKey == "" { sysak, _ := b.polc.GetString(pkey.AuthKey, "") - if sysak != "" { + if sysak != "" && len(b.pm.Profiles()) > 0 && b.state != ipn.NeedsLogin { + logf("not setting opts.AuthKey from syspolicy; login profiles exist, state=%v", b.state) + } else if sysak != "" { logf("setting opts.AuthKey by syspolicy, len=%v", len(sysak)) opts.AuthKey = strings.TrimSpace(sysak) } From 0c5b17c1d34ce1a67d3af7fbc0e7908e8b74cf09 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Feb 2026 11:12:47 -0800 Subject: [PATCH 0943/1093] cmd/tailscale: don't depend on regexp in minbox builds Updates #12614 Updates #18562 Change-Id: Ife4f10c55d1d68569938ffd68ffe72eef889e200 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 35 ++++++++++++++++++++++++------ cmd/tailscale/cli/cli_test.go | 4 ++-- cmd/tailscaled/depaware-minbox.txt | 2 -- cmd/tailscaled/deps_test.go | 1 + 4 files changed, 31 insertions(+), 11 deletions(-) diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index cde9d341c43c7..1ba66531a7cb5 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -6,6 +6,7 @@ package cli import ( + "bytes" "context" "encoding/json" "errors" @@ -14,7 +15,6 @@ import ( "io" "log" "os" - "regexp" "runtime" "strings" "sync" @@ -582,11 +582,32 @@ type sanitizeWriter struct { w io.Writer } -var rxTskey = regexp.MustCompile(`tskey-[\w-]+`) - +// Write logically replaces /tskey-[A-Za-z0-9-]+/ with /tskey-XXXX.../ in buf +// before writing to the underlying writer. +// +// We avoid the "regexp" package to not bloat the minbox build, and without +// making this a featuretag-omittable protection. func (w sanitizeWriter) Write(buf []byte) (int, error) { - sanitized := rxTskey.ReplaceAll(buf, []byte("tskey-REDACTED")) - diff := len(sanitized) - len(buf) - n, err := w.w.Write(sanitized) - return n - diff, err + const prefix = "tskey-" + scrub := buf + for { + i := bytes.Index(scrub, []byte(prefix)) + if i == -1 { + break + } + scrub = scrub[i+len(prefix):] + + for i, b := range scrub { + if (b >= 'a' && b <= 'z') || + (b >= 'A' && b <= 'Z') || + (b >= '0' && b <= '9') || + b == '-' { + scrub[i] = 'X' + } else { + break + } + } + } + + return w.w.Write(buf) } diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index ac6a94d52f88d..537e641fc4160 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -1804,8 +1804,8 @@ func TestSanitizeWriter(t *testing.T) { buf := new(bytes.Buffer) w := sanitizeOutput(buf) - in := []byte(`my auth key is tskey-auth-abc123-def456, what's yours?`) - want := []byte(`my auth key is tskey-REDACTED, what's yours?`) + in := []byte(`my auth key is tskey-auth-abc123-def456 and tskey-foo, what's yours?`) + want := []byte(`my auth key is tskey-XXXXXXXXXXXXXXXXXX and tskey-XXX, what's yours?`) n, err := w.Write(in) if err != nil { t.Fatal(err) diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 41bf4d9842084..f087e68096750 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -420,8 +420,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de path from io/fs+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from tailscale.com/cmd/tailscale/cli - regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ slices from crypto/tls+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 118913848a52b..5969850435b41 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -294,6 +294,7 @@ func TestMinTailscaledWithCLI(t *testing.T) { "tailscale.com/clientupdate/distsign": "unexpected distsign dep", "archive/tar": "unexpected archive/tar dep", "tailscale.com/feature/conn25": "unexpected conn25 dep", + "regexp": "unexpected regexp dep; bloats binary", }, }.Check(t) } From de4a8dbcfcca4b3cdc4b437d7d2aceacfb89f0d0 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 6 Feb 2026 09:07:33 -0800 Subject: [PATCH 0944/1093] control/controlclient: fix canSkipStatus online conditions concurrent netmaps that if the first is logged in, it is never skipped. This should have been covered be the skip test case, but that case wasn't updated to include level set state. Updates #12639 Updates #17869 Signed-off-by: James Tucker --- control/controlclient/auto.go | 15 +++++++-------- control/controlclient/controlclient_test.go | 5 +++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index fe227b45e57aa..783ca36c4f45d 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -674,17 +674,16 @@ func canSkipStatus(s1, s2 *Status) bool { // we can't skip it. return false } - if s1.Err != nil || s1.URL != "" || s1.LoggedIn { - // If s1 has an error, a URL, or LoginFinished set, we shouldn't skip it, - // lest the error go away in s2 or in-between. We want to make sure all - // the subsystems see it. Plus there aren't many of these, so not worth - // skipping. + if s1.Err != nil || s1.URL != "" { + // If s1 has an error or an URL, we shouldn't skip it, lest the error go + // away in s2 or in-between. We want to make sure all the subsystems see + // it. Plus there aren't many of these, so not worth skipping. return false } if !s1.Persist.Equals(s2.Persist) || s1.LoggedIn != s2.LoggedIn || s1.InMapPoll != s2.InMapPoll || s1.URL != s2.URL { - // If s1 has a different Persist, LoginFinished, Synced, or URL than s2, - // don't skip it. We only care about skipping the typical - // entries where the only difference is the NetMap. + // If s1 has a different Persist, has changed login state, changed map + // poll state, or has a new login URL, don't skip it. We only care about + // skipping the typical entries where the only difference is the NetMap. return false } // If nothing above precludes it, and both s1 and s2 have NetMaps, then diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index c7d61f6b2d13d..dca1d8ddf2f8b 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -98,6 +98,7 @@ func TestCanSkipStatus(t *testing.T) { nm1 := &netmap.NetworkMap{} nm2 := &netmap.NetworkMap{} + commonPersist := new(persist.Persist).View() tests := []struct { name string s1, s2 *Status @@ -165,8 +166,8 @@ func TestCanSkipStatus(t *testing.T) { }, { name: "skip", - s1: &Status{NetMap: nm1}, - s2: &Status{NetMap: nm2}, + s1: &Status{NetMap: nm1, LoggedIn: true, InMapPoll: true, Persist: commonPersist}, + s2: &Status{NetMap: nm2, LoggedIn: true, InMapPoll: true, Persist: commonPersist}, want: true, }, } From 826fd544cc6ae1a995dd0bd47c6b4f8f82caa448 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 6 Feb 2026 16:55:25 +0000 Subject: [PATCH 0945/1093] tsweb/varz: only export numeric expvar.Map values Currently the expvar exporter attempts to write expvar.String, which breaks the Prometheus metric page. Updates tailscale/corp#36552 Signed-off-by: Anton Tolchanov --- tsweb/varz/varz.go | 14 ++++++++++++-- tsweb/varz/varz_test.go | 43 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index d6100672c6c56..a2286c7603be3 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -245,11 +245,21 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { if label != "" && typ != "" { fmt.Fprintf(w, "# TYPE %s %s\n", name, typ) v.Do(func(kv expvar.KeyValue) { - fmt.Fprintf(w, "%s{%s=%q} %v\n", name, label, kv.Key, kv.Value) + switch kv.Value.(type) { + case *expvar.Int, *expvar.Float: + fmt.Fprintf(w, "%s{%s=%q} %v\n", name, label, kv.Key, kv.Value) + default: + fmt.Fprintf(w, "# skipping %q expvar map key %q with unknown value type %T\n", name, kv.Key, kv.Value) + } }) } else { v.Do(func(kv expvar.KeyValue) { - fmt.Fprintf(w, "%s_%s %v\n", name, kv.Key, kv.Value) + switch kv.Value.(type) { + case *expvar.Int, *expvar.Float: + fmt.Fprintf(w, "%s_%s %v\n", name, kv.Key, kv.Value) + default: + fmt.Fprintf(w, "# skipping %q expvar map key %q with unknown value type %T\n", name, kv.Key, kv.Value) + } }) } } diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index 6505ba985160e..770144016b7fc 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -180,6 +180,43 @@ func TestVarzHandler(t *testing.T) { }, "# TYPE m counter\nm{label=\"bar\"} 2\nm{label=\"foo\"} 1\n", }, + { + "metrics_label_map_float", + "float_map", + func() *expvar.Map { + m := new(expvar.Map) + m.Init() + f := new(expvar.Float) + f.Set(1.5) + m.Set("a", f) + return m + }(), + "float_map_a 1.5\n", + }, + { + "metrics_label_map_int", + "int_map", + func() *expvar.Map { + m := new(expvar.Map) + m.Init() + f := new(expvar.Int) + f.Set(55) + m.Set("a", f) + return m + }(), + "int_map_a 55\n", + }, + { + "metrics_label_map_string", + "string_map", + func() *expvar.Map { + m := new(expvar.Map) + m.Init() + m.Set("a", expvar.NewString("foo")) + return m + }(), + "# skipping \"string_map\" expvar map key \"a\" with unknown value type *expvar.String\n", + }, { "metrics_label_map_untyped", "control_save_config", @@ -298,6 +335,12 @@ foo_foo_b 1 api_status_code 42 `) + "\n", }, + { + "string_expvar_is_not_exported", + "foo_string", + new(expvar.String), + "# skipping expvar \"foo_string\" (Go type *expvar.String) with undeclared Prometheus type\n", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 35e656a35f7b8166358cc4ba230f2853c0f616bb Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Fri, 6 Feb 2026 17:48:50 +0000 Subject: [PATCH 0946/1093] tsweb/varz: remove unnecessary Map.Init() calls in tests Updates #cleanup Signed-off-by: Anton Tolchanov --- tsweb/varz/varz_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index 770144016b7fc..d041edb4b93d4 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -113,7 +113,6 @@ func TestVarzHandler(t *testing.T) { &metrics.Set{ Map: *(func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -127,7 +126,6 @@ func TestVarzHandler(t *testing.T) { &metrics.Set{ Map: *(func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -140,7 +138,6 @@ func TestVarzHandler(t *testing.T) { "api_status_code", func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("2xx", 100) m.Add("5xx", 2) return m @@ -172,7 +169,6 @@ func TestVarzHandler(t *testing.T) { Label: "label", Map: *(func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m @@ -185,7 +181,6 @@ func TestVarzHandler(t *testing.T) { "float_map", func() *expvar.Map { m := new(expvar.Map) - m.Init() f := new(expvar.Float) f.Set(1.5) m.Set("a", f) @@ -198,7 +193,6 @@ func TestVarzHandler(t *testing.T) { "int_map", func() *expvar.Map { m := new(expvar.Map) - m.Init() f := new(expvar.Int) f.Set(55) m.Set("a", f) @@ -211,7 +205,6 @@ func TestVarzHandler(t *testing.T) { "string_map", func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Set("a", expvar.NewString("foo")) return m }(), @@ -256,7 +249,6 @@ func TestVarzHandler(t *testing.T) { "counter_labelmap_keyname_m", func() *expvar.Map { m := new(expvar.Map) - m.Init() m.Add("foo", 1) m.Add("bar", 2) return m From fe69b7f0e50e9015d451ba61c8279b0eae993116 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 6 Feb 2026 01:06:41 -0800 Subject: [PATCH 0947/1093] cmd/tailscale: add event bus queue depth debugging Under extremely high load it appears we may have some retention issues as a result of queue depth build up, but there is currently no direct way to observe this. The scenario does not trigger the slow subscriber log message, and the event stream debugging endpoint produces a saturating volume of information. Updates tailscale/corp#36904 Signed-off-by: James Tucker --- client/local/local.go | 5 ++++ cmd/tailscale/cli/debug.go | 15 ++++++++++ ipn/localapi/debug.go | 58 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+) diff --git a/client/local/local.go b/client/local/local.go index 465ba0d67c820..5794734f27133 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -446,6 +446,11 @@ func (lc *Client) EventBusGraph(ctx context.Context) ([]byte, error) { return lc.get200(ctx, "/localapi/v0/debug-bus-graph") } +// EventBusQueues returns a JSON snapshot of event bus queue depths per client. +func (lc *Client) EventBusQueues(ctx context.Context) ([]byte, error) { + return lc.get200(ctx, "/localapi/v0/debug-bus-queues") +} + // StreamBusEvents returns an iterator of Tailscale bus events as they arrive. // Each pair is a valid event and a nil error, or a zero event a non-nil error. // In case of error, the iterator ends after the pair reporting the error. diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index f406b9f226249..629c694c0c6b4 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -124,6 +124,12 @@ func debugCmd() *ffcli.Command { return fs })(), }, + { + Name: "daemon-bus-queues", + ShortUsage: "tailscale debug daemon-bus-queues", + Exec: runDaemonBusQueues, + ShortHelp: "Print event bus queue depths per client", + }, { Name: "metrics", ShortUsage: "tailscale debug metrics", @@ -840,6 +846,15 @@ func runDaemonBusGraph(ctx context.Context, args []string) error { return nil } +func runDaemonBusQueues(ctx context.Context, args []string) error { + data, err := localClient.EventBusQueues(ctx) + if err != nil { + return err + } + fmt.Print(string(data)) + return nil +} + // generateDOTGraph generates the DOT graph format based on the events func generateDOTGraph(topics []eventbus.DebugTopic) string { var sb strings.Builder diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index fe936db6ab78d..d1348abaafef5 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -6,6 +6,7 @@ package localapi import ( + "cmp" "context" "encoding/json" "fmt" @@ -35,6 +36,7 @@ func init() { Register("dev-set-state-store", (*Handler).serveDevSetStateStore) Register("debug-bus-events", (*Handler).serveDebugBusEvents) Register("debug-bus-graph", (*Handler).serveEventBusGraph) + Register("debug-bus-queues", (*Handler).serveDebugBusQueues) Register("debug-derp-region", (*Handler).serveDebugDERPRegion) Register("debug-dial-types", (*Handler).serveDebugDialTypes) Register("debug-log", (*Handler).serveDebugLog) @@ -424,6 +426,62 @@ func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(topics) } +func (h *Handler) serveDebugBusQueues(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + + type clientQueue struct { + Name string `json:"name"` + SubscribeDepth int `json:"subscribeDepth"` + SubscribeTypes []string `json:"subscribeTypes,omitempty"` + PublishTypes []string `json:"publishTypes,omitempty"` + } + + publishQueue := debugger.PublishQueue() + clients := debugger.Clients() + result := struct { + PublishQueueDepth int `json:"publishQueueDepth"` + Clients []clientQueue `json:"clients"` + }{ + PublishQueueDepth: len(publishQueue), + } + + for _, c := range clients { + sq := debugger.SubscribeQueue(c) + cq := clientQueue{ + Name: c.Name(), + SubscribeDepth: len(sq), + } + for _, t := range debugger.SubscribeTypes(c) { + cq.SubscribeTypes = append(cq.SubscribeTypes, t.String()) + } + for _, t := range debugger.PublishTypes(c) { + cq.PublishTypes = append(cq.PublishTypes, t.String()) + } + result.Clients = append(result.Clients, cq) + } + + slices.SortFunc(result.Clients, func(a, b clientQueue) int { + if a.SubscribeDepth != b.SubscribeDepth { + return b.SubscribeDepth - a.SubscribeDepth + } + return cmp.Compare(a.Name, b.Name) + }) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(result) +} + func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { if !buildfeatures.HasLogTail { http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) From 9ba2a80ab64f5507ca6e6cbba4e91d082ec2d8df Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 6 Feb 2026 12:54:11 -0800 Subject: [PATCH 0948/1093] go.toolchain.{rev,next.rev}: update to Go 1.25.7 / Go 1.26rc3 (#18633) Updates #18629 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.toolchain.next.rev | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- pull-toolchain.sh | 1 + 6 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index bcdc7e19d3162..12ffd3ebd5b2b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.6 +go 1.25.7 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.next.rev b/go.toolchain.next.rev index be0f53a9c5a18..abdc21022aa19 100644 --- a/go.toolchain.next.rev +++ b/go.toolchain.next.rev @@ -1 +1 @@ -64a6cb4cba579e2865654747d4d672ead07b8375 +5ba287c89a4cef2f4a419aed4e6bc3121c5c4dad diff --git a/go.toolchain.rev b/go.toolchain.rev index cb3fa64623175..05e37f312da5c 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -779d878b6a943cecd2f359699001a03d7cedf222 +692441891e061f8ae2cb2f8f2c898f86bb1c5dca diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 3b8ce70f3ac31..b7a7163f79f68 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-e081DbI45vGMmi3drwqz2UOxRwffEuEDSVZupDtOVuk= +sha256-gWKrpBTXfsQmgOWoMrbvCaWGsBXCt5X12BAcwfAPMQY= diff --git a/go.toolchain.version b/go.toolchain.version index 198ec23ccfcc9..f1968aa8818d5 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.25.6 +1.25.7 diff --git a/pull-toolchain.sh b/pull-toolchain.sh index b10e3cd68cf11..c80c913bb17b2 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -25,6 +25,7 @@ fi # don't yet support TS_GO_NEXT=1 with flake.nix or in our corp CI. if [ "${TS_GO_NEXT:-}" != "1" ]; then ./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version + ./tool/go mod edit -go "$(cat go.toolchain.version)" ./update-flake.sh fi From 5eaaf9786b84931ac3cba16cc4bc737e4b60502a Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Wed, 4 Feb 2026 10:30:07 -0500 Subject: [PATCH 0949/1093] tailcfg: add peerRelay bool to hostinfo This commit adds a bool named PeerRelay to Hostinfo, to identify the host's status of acting as a peer relay. Considering the RelayServerPort number can be 0, I just made this a bool in stead of a port number. If the port info is needed in future this would also help indicating if the port was set to 0 (meaning any port in peer relay context). Updates tailscale/corp#35862 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 4 ++++ tailcfg/tailcfg.go | 1 + tailcfg/tailcfg_clone.go | 1 + tailcfg/tailcfg_test.go | 11 +++++++++++ tailcfg/tailcfg_view.go | 4 ++++ 5 files changed, 21 insertions(+) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 821f79abfbcec..8f8051f4b7273 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5671,6 +5671,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip } hi.SSH_HostKeys = sshHostKeys + if buildfeatures.HasRelayServer { + hi.PeerRelay = prefs.RelayServerPort().Valid() + } + for _, f := range hookMaybeMutateHostinfoLocked { f(b, hi, prefs) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index f76eb8f55d241..171f88fd77b5c 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -887,6 +887,7 @@ type Hostinfo struct { UserspaceRouter opt.Bool `json:",omitzero"` // if the client's subnet router is running in userspace (netstack) mode AppConnector opt.Bool `json:",omitzero"` // if the client is running the app-connector service ServicesHash string `json:",omitzero"` // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n + PeerRelay bool `json:",omitzero"` // if the client is willing to relay traffic for other peers ExitNodeID StableNodeID `json:",omitzero"` // the client’s selected exit node, empty when unselected. // Location represents geographical location data about a diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 483746145b6e1..a60f301d763c7 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -186,6 +186,7 @@ var _HostinfoCloneNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + PeerRelay bool ExitNodeID StableNodeID Location *Location TPM *TPMInfo diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 4e9909db09f89..f649e43ab57b8 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -67,6 +67,7 @@ func TestHostinfoEqual(t *testing.T) { "UserspaceRouter", "AppConnector", "ServicesHash", + "PeerRelay", "ExitNodeID", "Location", "TPM", @@ -244,6 +245,16 @@ func TestHostinfoEqual(t *testing.T) { &Hostinfo{AppConnector: opt.Bool("false")}, false, }, + { + &Hostinfo{PeerRelay: true}, + &Hostinfo{PeerRelay: true}, + true, + }, + { + &Hostinfo{PeerRelay: true}, + &Hostinfo{PeerRelay: false}, + false, + }, { &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"}, &Hostinfo{ServicesHash: "73475cb40a568e8da8a045ced110137e159f890ac4da883b6b17dc651b3a8049"}, diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index b2734d8af36c9..7960000fd3d6a 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -606,6 +606,9 @@ func (v HostinfoView) AppConnector() opt.Bool { return v.ж.AppConnector } // opaque hash of the most recent list of tailnet services, change in hash indicates config should be fetched via c2n func (v HostinfoView) ServicesHash() string { return v.ж.ServicesHash } +// if the client is willing to relay traffic for other peers +func (v HostinfoView) PeerRelay() bool { return v.ж.PeerRelay } + // the client’s selected exit node, empty when unselected. func (v HostinfoView) ExitNodeID() StableNodeID { return v.ж.ExitNodeID } @@ -664,6 +667,7 @@ var _HostinfoViewNeedsRegeneration = Hostinfo(struct { UserspaceRouter opt.Bool AppConnector opt.Bool ServicesHash string + PeerRelay bool ExitNodeID StableNodeID Location *Location TPM *TPMInfo From a3215f1f9d3afd4a35973e4df12dc5fca87a3056 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 8 Feb 2026 04:46:09 +0000 Subject: [PATCH 0950/1093] cmd/tailscale,feature/featuretags: make webbrowser and colorable deps omittable Add new "webbrowser" and "colorable" feature tags so that the github.com/toqueteos/webbrowser and mattn/go-colorable packages can be excluded from minbox builds. Updates #12614 Change-Id: Iabd38b242f5a56aa10ef2050113785283f4e1fe8 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/cli.go | 16 ----------- cmd/tailscale/cli/colorable.go | 28 +++++++++++++++++++ cmd/tailscale/cli/colorable_omit.go | 12 ++++++++ cmd/tailscale/cli/open_browser.go | 12 ++++++++ cmd/tailscale/cli/status.go | 7 +++-- cmd/tailscaled/depaware-minbox.txt | 4 +-- cmd/tailscaled/deps_test.go | 2 ++ .../feature_colorable_disabled.go | 13 +++++++++ .../feature_colorable_enabled.go | 13 +++++++++ .../feature_webbrowser_disabled.go | 13 +++++++++ .../feature_webbrowser_enabled.go | 13 +++++++++ feature/featuretags/featuretags.go | 9 ++++-- 12 files changed, 119 insertions(+), 23 deletions(-) create mode 100644 cmd/tailscale/cli/colorable.go create mode 100644 cmd/tailscale/cli/colorable_omit.go create mode 100644 cmd/tailscale/cli/open_browser.go create mode 100644 feature/buildfeatures/feature_colorable_disabled.go create mode 100644 feature/buildfeatures/feature_colorable_enabled.go create mode 100644 feature/buildfeatures/feature_webbrowser_disabled.go create mode 100644 feature/buildfeatures/feature_webbrowser_enabled.go diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 1ba66531a7cb5..fda6b4546324a 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -21,8 +21,6 @@ import ( "text/tabwriter" "time" - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" @@ -484,20 +482,6 @@ func countFlags(fs *flag.FlagSet) (n int) { return n } -// colorableOutput returns a colorable writer if stdout is a terminal (not, say, -// redirected to a file or pipe), the Stdout writer is os.Stdout (we're not -// embedding the CLI in wasm or a mobile app), and NO_COLOR is not set (see -// https://no-color.org/). If any of those is not the case, ok is false -// and w is Stdout. -func colorableOutput() (w io.Writer, ok bool) { - if Stdout != os.Stdout || - os.Getenv("NO_COLOR") != "" || - !isatty.IsTerminal(os.Stdout.Fd()) { - return Stdout, false - } - return colorable.NewColorableStdout(), true -} - type commandDoc struct { Name string Desc string diff --git a/cmd/tailscale/cli/colorable.go b/cmd/tailscale/cli/colorable.go new file mode 100644 index 0000000000000..6ecd36b1a409f --- /dev/null +++ b/cmd/tailscale/cli/colorable.go @@ -0,0 +1,28 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_colorable + +package cli + +import ( + "io" + "os" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +// colorableOutput returns a colorable writer if stdout is a terminal (not, say, +// redirected to a file or pipe), the Stdout writer is os.Stdout (we're not +// embedding the CLI in wasm or a mobile app), and NO_COLOR is not set (see +// https://no-color.org/). If any of those is not the case, ok is false +// and w is Stdout. +func colorableOutput() (w io.Writer, ok bool) { + if Stdout != os.Stdout || + os.Getenv("NO_COLOR") != "" || + !isatty.IsTerminal(os.Stdout.Fd()) { + return Stdout, false + } + return colorable.NewColorableStdout(), true +} diff --git a/cmd/tailscale/cli/colorable_omit.go b/cmd/tailscale/cli/colorable_omit.go new file mode 100644 index 0000000000000..a821bdbbdc92e --- /dev/null +++ b/cmd/tailscale/cli/colorable_omit.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_colorable + +package cli + +import "io" + +func colorableOutput() (w io.Writer, ok bool) { + return Stdout, false +} diff --git a/cmd/tailscale/cli/open_browser.go b/cmd/tailscale/cli/open_browser.go new file mode 100644 index 0000000000000..a006b9765da7a --- /dev/null +++ b/cmd/tailscale/cli/open_browser.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_webbrowser + +package cli + +import "github.com/toqueteos/webbrowser" + +func init() { + hookOpenURL.Set(webbrowser.Open) +} diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index ae4df4da9b51b..49c565febb9cc 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -18,7 +18,6 @@ import ( "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" - "github.com/toqueteos/webbrowser" "golang.org/x/net/idna" "tailscale.com/feature" "tailscale.com/ipn" @@ -113,7 +112,9 @@ func runStatus(ctx context.Context, args []string) error { ln.Close() }() if statusArgs.browser { - go webbrowser.Open(statusURL) + if f, ok := hookOpenURL.GetOk(); ok { + go f(statusURL) + } } err = http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.RequestURI != "/" { @@ -252,6 +253,8 @@ func runStatus(ctx context.Context, args []string) error { return nil } +var hookOpenURL feature.Hook[func(string) error] + var hookPrintFunnelStatus feature.Hook[func(context.Context)] // isRunningOrStarting reports whether st is in state Running or Starting. diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index f087e68096750..15ba39dbacb0d 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -21,8 +21,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli - github.com/mattn/go-isatty from github.com/mattn/go-colorable+ + github.com/mattn/go-isatty from tailscale.com/util/prompt 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink @@ -38,7 +37,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli 💣 go4.org/mem from tailscale.com/control/controlbase+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ tailscale.com from tailscale.com/version diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index 5969850435b41..c7ab01298f223 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -295,6 +295,8 @@ func TestMinTailscaledWithCLI(t *testing.T) { "archive/tar": "unexpected archive/tar dep", "tailscale.com/feature/conn25": "unexpected conn25 dep", "regexp": "unexpected regexp dep; bloats binary", + "github.com/toqueteos/webbrowser": "unexpected webbrowser dep with ts_omit_webbrowser", + "github.com/mattn/go-colorable": "unexpected go-colorable dep with ts_omit_colorable", }, }.Check(t) } diff --git a/feature/buildfeatures/feature_colorable_disabled.go b/feature/buildfeatures/feature_colorable_disabled.go new file mode 100644 index 0000000000000..3a7bc54234fe5 --- /dev/null +++ b/feature/buildfeatures/feature_colorable_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_colorable + +package buildfeatures + +// HasColorable is whether the binary was built with support for modular feature "Colorized terminal output". +// Specifically, it's whether the binary was NOT built with the "ts_omit_colorable" build tag. +// It's a const so it can be used for dead code elimination. +const HasColorable = false diff --git a/feature/buildfeatures/feature_colorable_enabled.go b/feature/buildfeatures/feature_colorable_enabled.go new file mode 100644 index 0000000000000..b6a08366eba32 --- /dev/null +++ b/feature/buildfeatures/feature_colorable_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_colorable + +package buildfeatures + +// HasColorable is whether the binary was built with support for modular feature "Colorized terminal output". +// Specifically, it's whether the binary was NOT built with the "ts_omit_colorable" build tag. +// It's a const so it can be used for dead code elimination. +const HasColorable = true diff --git a/feature/buildfeatures/feature_webbrowser_disabled.go b/feature/buildfeatures/feature_webbrowser_disabled.go new file mode 100644 index 0000000000000..e6484479c979b --- /dev/null +++ b/feature/buildfeatures/feature_webbrowser_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_webbrowser + +package buildfeatures + +// HasWebBrowser is whether the binary was built with support for modular feature "Open URLs in the user's web browser". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webbrowser" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebBrowser = false diff --git a/feature/buildfeatures/feature_webbrowser_enabled.go b/feature/buildfeatures/feature_webbrowser_enabled.go new file mode 100644 index 0000000000000..68d80b49f5444 --- /dev/null +++ b/feature/buildfeatures/feature_webbrowser_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_webbrowser + +package buildfeatures + +// HasWebBrowser is whether the binary was built with support for modular feature "Open URLs in the user's web browser". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webbrowser" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebBrowser = true diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go index 45daaec5ec29f..4220c02b75fa2 100644 --- a/feature/featuretags/featuretags.go +++ b/feature/featuretags/featuretags.go @@ -84,7 +84,7 @@ type FeatureMeta struct { Deps []FeatureTag // other features this feature requires // ImplementationDetail is whether the feature is an internal implementation - // detail. That is, it's not something a user wuold care about having or not + // detail. That is, it's not something a user would care about having or not // having, but we'd like to able to omit from builds if no other // user-visible features depend on it. ImplementationDetail bool @@ -130,6 +130,7 @@ var Features = map[FeatureTag]FeatureMeta{ "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, "capture": {Sym: "Capture", Desc: "Packet capture"}, "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, + "colorable": {Sym: "Colorable", Desc: "Colorized terminal output"}, "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, "clientupdate": { @@ -256,7 +257,7 @@ var Features = map[FeatureTag]FeatureMeta{ "systray": { Sym: "SysTray", Desc: "Linux system tray", - Deps: []FeatureTag{"dbus"}, + Deps: []FeatureTag{"dbus", "webbrowser"}, }, "taildrop": { Sym: "Taildrop", @@ -290,6 +291,10 @@ var Features = map[FeatureTag]FeatureMeta{ Desc: "Usermetrics (documented, stable) metrics support", }, "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, + "webbrowser": { + Sym: "WebBrowser", + Desc: "Open URLs in the user's web browser", + }, "webclient": { Sym: "WebClient", Desc: "Web client support", Deps: []FeatureTag{"serve"}, From dfba01ca9bd8c4df02c3c32f400d9aeb897c5fc7 Mon Sep 17 00:00:00 2001 From: Tim Walters Date: Sun, 8 Feb 2026 10:56:32 -0500 Subject: [PATCH 0951/1093] cmd/tailscaled: update documentation url This updates the URL shown by systemd to the new URL used by the docs after the recent migration. Fixes #18646 Signed-off-by: Tim Walters --- cmd/tailscaled/tailscaled.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tailscaled/tailscaled.service b/cmd/tailscaled/tailscaled.service index 719a3c0c96398..9950891a35fa4 100644 --- a/cmd/tailscaled/tailscaled.service +++ b/cmd/tailscaled/tailscaled.service @@ -1,6 +1,6 @@ [Unit] Description=Tailscale node agent -Documentation=https://tailscale.com/kb/ +Documentation=https://tailscale.com/docs/ Wants=network-pre.target After=network-pre.target NetworkManager.service systemd-resolved.service From fff623206ef14ca65e3436d94a4e7d8d4dadc905 Mon Sep 17 00:00:00 2001 From: faukah Date: Mon, 9 Feb 2026 21:39:28 +0100 Subject: [PATCH 0952/1093] flake.nix: update NixOS wiki link (#18662) wiki.nixos.org is and has been the official wiki for quite some time now. Signed-off-by: faukah --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index 76e68e4acd57f..d4a10bcacf769 100644 --- a/flake.nix +++ b/flake.nix @@ -4,7 +4,7 @@ # environment for working on tailscale, for use with "nix develop". # # For more information about this and why this file is useful, see: -# https://nixos.wiki/wiki/Flakes +# https://wiki.nixos.org/wiki/Flakes # # Also look into direnv: https://direnv.net/, this can make it so that you can # automatically get your environment set up when you change folders into the From 00e180cdd79e1c82a77c29c7cac3e0116a427fa5 Mon Sep 17 00:00:00 2001 From: Amal Bansode Date: Mon, 9 Feb 2026 13:24:25 -0800 Subject: [PATCH 0953/1093] go.mod: update bart dep to v0.26.1 (#18659) bart has gained a bunch of purported performance and usability improvements since the current version we are using (0.18.0, from 1y ago) Updates tailscale/corp#36982 Signed-off-by: Amal Bansode --- cmd/k8s-operator/depaware.txt | 9 +++++++-- cmd/tailscale/depaware.txt | 7 ++++++- cmd/tailscaled/depaware-min.txt | 7 ++++++- cmd/tailscaled/depaware-minbox.txt | 7 ++++++- cmd/tailscaled/depaware.txt | 7 ++++++- cmd/tsidp/depaware.txt | 7 ++++++- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- tsnet/depaware.txt | 7 ++++++- 12 files changed, 49 insertions(+), 14 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 6a6e7d61f9aa3..5565ec01921bb 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -99,8 +99,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/fsnotify/fsnotify/internal from github.com/fsnotify/fsnotify github.com/fxamacker/cbor/v2 from tailscale.com/tka+ github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ @@ -1060,7 +1065,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ - cmp from github.com/gaissmai/bart+ + cmp from encoding/json+ compress/flate from compress/gzip+ compress/gzip from github.com/emicklei/go-restful/v3+ compress/zlib from github.com/emicklei/go-restful/v3+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 85bf2312a5f0f..58f9e1c0bfb83 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -95,8 +95,13 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/tsdial + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index e536ac59dde37..b7df3a48a6b1e 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -1,8 +1,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/drive+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 15ba39dbacb0d..ca029194c101e 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,8 +1,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/drive+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index da480d1a694e3..71a1df1d4c6c2 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -98,8 +98,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 github.com/djherbis/times from tailscale.com/drive/driveimpl github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/tstun+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json/internal/jsonflags+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json/internal/jsonopts+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index e29ae93484c95..4dfb831b59f43 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -88,8 +88,13 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ diff --git a/flake.nix b/flake.nix index d4a10bcacf769..b29d45aacf43b 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-+tOYqRV8ZUA95dfVyRpjnJvwuSMobu/EhtXxq4bwvio= +# nix-direnv cache busting line: sha256-5A6EShJ33yHQdr6tgsNCRFLvNUUjIKXDv5DvzsiUwFI= diff --git a/go.mod b/go.mod index 12ffd3ebd5b2b..c69f4f1edc5aa 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( github.com/fogleman/gg v1.3.0 github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.9.0 - github.com/gaissmai/bart v0.18.0 + github.com/gaissmai/bart v0.26.1 github.com/go-json-experiment/json v0.0.0-20250813024750-ebf49471dced github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 diff --git a/go.mod.sri b/go.mod.sri index d46c84a110095..4edd4e7acabad 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-+tOYqRV8ZUA95dfVyRpjnJvwuSMobu/EhtXxq4bwvio= +sha256-5A6EShJ33yHQdr6tgsNCRFLvNUUjIKXDv5DvzsiUwFI= diff --git a/go.sum b/go.sum index 541cef6058655..e925fcc3d4371 100644 --- a/go.sum +++ b/go.sum @@ -373,8 +373,8 @@ github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sa github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo= -github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY= +github.com/gaissmai/bart v0.26.1 h1:+w4rnLGNlA2GDVn382Tfe3jOsK5vOr5n4KmigJ9lbTo= +github.com/gaissmai/bart v0.26.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c= github.com/ghostiam/protogetter v0.3.5 h1:+f7UiF8XNd4w3a//4DnusQ2SZjPkUjxkMEfjbxOK4Ug= github.com/ghostiam/protogetter v0.3.5/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I= diff --git a/shell.nix b/shell.nix index 3accd73c55ffb..ff44b9b89631b 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-+tOYqRV8ZUA95dfVyRpjnJvwuSMobu/EhtXxq4bwvio= +# nix-direnv cache busting line: sha256-5A6EShJ33yHQdr6tgsNCRFLvNUUjIKXDv5DvzsiUwFI= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 5b08200c97f6d..46acadd1dd750 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -88,8 +88,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ - github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/lpm from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/nodes from github.com/gaissmai/bart + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart/internal/nodes + github.com/gaissmai/bart/internal/value from github.com/gaissmai/bart+ github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ From 5a5572e48acc4eee0ddbb6680d47881efe807177 Mon Sep 17 00:00:00 2001 From: Michael Ben-Ami Date: Thu, 11 Dec 2025 15:31:15 -0500 Subject: [PATCH 0954/1093] tstun,wgengine: add new datapath hooks for intercepting Connectors 2025 app connector packets We introduce the Conn25PacketHooks interface to be used as a nil-able field in userspaceEngine. The engine then plumbs through the functions to the corresponding tstun.Wrapper intercepts. The new intercepts run pre-filter when egressing toward WireGuard, and post-filter when ingressing from WireGuard. This is preserve the design invariant that the filter recognizes the traffic as interesting app connector traffic. This commit does not plumb through implementation of the interface, so should be a functional no-op. Fixes tailscale/corp#35985 Signed-off-by: Michael Ben-Ami --- net/tstun/wrap.go | 20 +++++++++++++++ wgengine/userspace.go | 59 ++++++++++++++++++++++++++++++++----------- 2 files changed, 64 insertions(+), 15 deletions(-) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index d463948a208fa..3c1315437f510 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -171,6 +171,9 @@ type Wrapper struct { // PreFilterPacketInboundFromWireGuard is the inbound filter function that runs before the main filter // and therefore sees the packets that may be later dropped by it. PreFilterPacketInboundFromWireGuard FilterFunc + // PostFilterPacketInboundFromWireGuardAppConnector runs after the filter, but before PostFilterPacketInboundFromWireGuard. + // Non-app connector traffic is passed along. Invalid app connector traffic is dropped. + PostFilterPacketInboundFromWireGuardAppConnector FilterFunc // PostFilterPacketInboundFromWireGuard is the inbound filter function that runs after the main filter. PostFilterPacketInboundFromWireGuard GROFilterFunc // PreFilterPacketOutboundToWireGuardNetstackIntercept is a filter function that runs before the main filter @@ -183,6 +186,10 @@ type Wrapper struct { // packets which it handles internally. If both this and PreFilterFromTunToNetstack // filter functions are non-nil, this filter runs second. PreFilterPacketOutboundToWireGuardEngineIntercept FilterFunc + // PreFilterPacketOutboundToWireGuardAppConnectorIntercept runs after PreFilterPacketOutboundToWireGuardEngineIntercept + // for app connector specific traffic. Non-app connector traffic is passed along. Invalid app connector traffic is + // dropped. + PreFilterPacketOutboundToWireGuardAppConnectorIntercept FilterFunc // PostFilterPacketOutboundToWireGuard is the outbound filter function that runs after the main filter. PostFilterPacketOutboundToWireGuard FilterFunc @@ -872,6 +879,12 @@ func (t *Wrapper) filterPacketOutboundToWireGuard(p *packet.Parsed, pc *peerConf return res, gro } } + if t.PreFilterPacketOutboundToWireGuardAppConnectorIntercept != nil { + if res := t.PreFilterPacketOutboundToWireGuardAppConnectorIntercept(p, t); res.IsDrop() { + // Handled by userspaceEngine's configured hook for Connectors 2025 app connectors. + return res, gro + } + } // If the outbound packet is to a jailed peer, use our jailed peer // packet filter. @@ -1234,6 +1247,13 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook pa return filter.Drop, gro } + if t.PostFilterPacketInboundFromWireGuardAppConnector != nil { + if res := t.PostFilterPacketInboundFromWireGuardAppConnector(p, t); res.IsDrop() { + // Handled by userspaceEngine's configured hook for Connectors 2025 app connectors. + return res, gro + } + } + if t.PostFilterPacketInboundFromWireGuard != nil { var res filter.Response res, gro = t.PostFilterPacketInboundFromWireGuard(p, t, gro) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index e69712061f5c9..245ce421fbe5a 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -143,8 +143,9 @@ type userspaceEngine struct { trimmedNodes map[key.NodePublic]bool // set of node keys of peers currently excluded from wireguard config sentActivityAt map[netip.Addr]*mono.Time // value is accessed atomically destIPActivityFuncs map[netip.Addr]func() - lastStatusPollTime mono.Time // last time we polled the engine status - reconfigureVPN func() error // or nil + lastStatusPollTime mono.Time // last time we polled the engine status + reconfigureVPN func() error // or nil + conn25PacketHooks Conn25PacketHooks // or nil mu sync.Mutex // guards following; see lock order comment below netMap *netmap.NetworkMap // or nil @@ -175,6 +176,19 @@ type BIRDClient interface { Close() error } +// Conn25PacketHooks are hooks for Connectors 2025 app connectors. +// They are meant to be wired into to corresponding hooks in the +// [tstun.Wrapper]. They may modify the packet (e.g., NAT), or drop +// invalid app connector traffic. +type Conn25PacketHooks interface { + // HandlePacketsFromTunDevice sends packets originating from the tun device + // for further Connectors 2025 app connectors processing. + HandlePacketsFromTunDevice(*packet.Parsed) filter.Response + // HandlePacketsFromWireguard sends packets originating from WireGuard + // for further Connectors 2025 app connectors processing. + HandlePacketsFromWireGuard(*packet.Parsed) filter.Response +} + // Config is the engine configuration. type Config struct { // Tun is the device used by the Engine to exchange packets with @@ -247,6 +261,10 @@ type Config struct { // TODO(creachadair): As of 2025-03-19 this is optional, but is intended to // become required non-nil. EventBus *eventbus.Bus + + // Conn25PacketHooks, if non-nil, is used to hook packets for Connectors 2025 + // app connector handling logic. + Conn25PacketHooks Conn25PacketHooks } // NewFakeUserspaceEngine returns a new userspace engine for testing. @@ -348,19 +366,20 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e := &userspaceEngine{ - eventBus: conf.EventBus, - timeNow: mono.Now, - logf: logf, - reqCh: make(chan struct{}, 1), - waitCh: make(chan struct{}), - tundev: tsTUNDev, - router: rtr, - dialer: conf.Dialer, - confListenPort: conf.ListenPort, - birdClient: conf.BIRDClient, - controlKnobs: conf.ControlKnobs, - reconfigureVPN: conf.ReconfigureVPN, - health: conf.HealthTracker, + eventBus: conf.EventBus, + timeNow: mono.Now, + logf: logf, + reqCh: make(chan struct{}, 1), + waitCh: make(chan struct{}), + tundev: tsTUNDev, + router: rtr, + dialer: conf.Dialer, + confListenPort: conf.ListenPort, + birdClient: conf.BIRDClient, + controlKnobs: conf.ControlKnobs, + reconfigureVPN: conf.ReconfigureVPN, + health: conf.HealthTracker, + conn25PacketHooks: conf.Conn25PacketHooks, } if e.birdClient != nil { @@ -434,6 +453,16 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.tundev.PreFilterPacketOutboundToWireGuardEngineIntercept = e.handleLocalPackets + if e.conn25PacketHooks != nil { + e.tundev.PreFilterPacketOutboundToWireGuardAppConnectorIntercept = func(p *packet.Parsed, _ *tstun.Wrapper) filter.Response { + return e.conn25PacketHooks.HandlePacketsFromTunDevice(p) + } + + e.tundev.PostFilterPacketInboundFromWireGuardAppConnector = func(p *packet.Parsed, _ *tstun.Wrapper) filter.Response { + return e.conn25PacketHooks.HandlePacketsFromWireGuard(p) + } + } + if buildfeatures.HasDebug && envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { if e.tundev.PreFilterPacketInboundFromWireGuard != nil { return nil, errors.New("unexpected PreFilterIn already set") From d26d3fcb95b75e9fdc3acb53529e97a1a14cc3c6 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 9 Feb 2026 13:25:07 -0800 Subject: [PATCH 0955/1093] .github/workflows: add macos runner Fixes #18118 Change-Id: I118fcc6537af9ccbdc7ce6b78134e8059b0b5ccf Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 62 ++++++++++++++++++++++++++++++++++++- logtail/filch/filch_test.go | 14 +++++++++ ssh/tailssh/tailssh_test.go | 3 ++ 3 files changed, 78 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a6906e53ef680..152ef7bce9008 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -300,6 +300,63 @@ jobs: working-directory: src run: ./tool/go version + macos: + runs-on: macos-latest + needs: gomod-cache + steps: + - name: checkout + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + path: src + - name: Restore Go module cache + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + path: gomodcache + key: ${{ needs.gomod-cache.outputs.cache-key }} + enableCrossOsArchive: true + - name: Restore Cache + id: restore-cache + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + path: ~/Library/Caches/go-build + key: ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} + restore-keys: | + ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}- + ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}- + ${{ runner.os }}-go-test- + - name: build test wrapper + working-directory: src + run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper + - name: test all + working-directory: src + run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... + - name: check that no tracked files changed + working-directory: src + run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1) + - name: check that no new files were added + working-directory: src + run: | + # Note: The "error: pathspec..." you see below is normal! + # In the success case in which there are no new untracked files, + # git ls-files complains about the pathspec not matching anything. + # That's OK. It's not worth the effort to suppress. Please ignore it. + if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*' + then + echo "Build/test created untracked files in the repo (file names above)." + exit 1 + fi + - name: Tidy cache + working-directory: src + run: | + find $(./tool/go env GOCACHE) -type f -mmin +90 -delete + - name: Save Cache + # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. + if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + with: + path: ~/Library/Caches/go-build + key: ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} + privileged: needs: gomod-cache runs-on: ubuntu-24.04 @@ -851,10 +908,11 @@ jobs: notify_slack: if: always() # Any of these jobs failing causes a slack notification. - needs: + needs: - android - test - windows + - macos - vm - cross - ios @@ -900,6 +958,7 @@ jobs: - android - test - windows + - macos - vm - cross - ios @@ -949,6 +1008,7 @@ jobs: - check_mergeability_strict - test - windows + - macos - vm - wasm - fuzz diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index 2538233cfd84c..f2f9e9e3bcd6b 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -5,6 +5,7 @@ package filch import ( "bytes" + "crypto/sha256" "encoding/json" "fmt" "io" @@ -120,7 +121,19 @@ func setupStderr(t *testing.T) { tstest.Replace(t, &os.Stderr, pipeW) } +func skipDarwin(t testing.TB) { + if runtime.GOOS != "darwin" { + return + } + src := must.Get(os.ReadFile("filch.go")) + if fmt.Sprintf("%x", sha256.Sum256(src)) != "a32da5e22034823c19ac7f29960e3646f540d67f85a0028832cab1f1557fc693" { + t.Errorf("filch.go has changed since this test was skipped; please delete this skip") + } + t.Skip("skipping known failing test on darwin; fixed in progress by https://github.com/tailscale/tailscale/pull/18660") +} + func TestConcurrentWriteAndRead(t *testing.T) { + skipDarwin(t) if replaceStderrSupportedForTest { setupStderr(t) } @@ -283,6 +296,7 @@ func TestMaxLineSize(t *testing.T) { } func TestMaxFileSize(t *testing.T) { + skipDarwin(t) if replaceStderrSupportedForTest { t.Run("ReplaceStderr:true", func(t *testing.T) { testMaxFileSize(t, true) }) } diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index f91cbafe72213..44db0cc000beb 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -495,6 +495,9 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } + if runtime.GOOS == "darwin" && cibuild.On() { + t.Skipf("this fails on CI on macOS; see https://github.com/tailscale/tailscale/issues/7707") + } var handler http.HandlerFunc recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) { From 770bf000de965697ae3a194448994f015586c509 Mon Sep 17 00:00:00 2001 From: Aaron Klotz Date: Mon, 9 Feb 2026 16:16:44 -0700 Subject: [PATCH 0956/1093] tool/gocross: replace use of Start-Process -Wait flag with WaitForExit -Wait does not just wait for the created process; it waits for the entire process tree rooted at that process! This can cause the shell to wait indefinitely if something in that tree fired up any background processes. Instead we call WaitForExit on the returned process. Updates https://github.com/tailscale/corp/issues/29940 Signed-off-by: Aaron Klotz --- tool/gocross/gocross-wrapper.ps1 | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tool/gocross/gocross-wrapper.ps1 b/tool/gocross/gocross-wrapper.ps1 index df00d36641ad7..23bd6eb2771dd 100644 --- a/tool/gocross/gocross-wrapper.ps1 +++ b/tool/gocross/gocross-wrapper.ps1 @@ -190,7 +190,8 @@ $bootstrapScriptBlock = { $goBuildEnv['GOROOT'] = $null $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve - $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -Wait -PassThru + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $goBuildEnv -ArgumentList 'build', '-o', $gocrossPath, "-ldflags=-X=tailscale.com/version.gitCommitStamp=$wantVer", 'tailscale.com/tool/gocross' -NoNewWindow -PassThru + $proc.WaitForExit() if ($proc.ExitCode -ne 0) { throw 'error building gocross' } @@ -222,10 +223,12 @@ if ($Env:TS_USE_GOCROSS -ne '1') { } $procExe = Join-Path $toolchain 'bin' 'go.exe' -Resolve - $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -Wait -PassThru + $proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -PassThru + $proc.WaitForExit() exit $proc.ExitCode } $procExe = Join-Path $repoRoot 'gocross.exe' -Resolve -$proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -Wait -PassThru +$proc = Start-Process -FilePath $procExe -WorkingDirectory $repoRoot -Environment $execEnv -ArgumentList $argList -NoNewWindow -PassThru +$proc.WaitForExit() exit $proc.ExitCode From e4008d1994db0ea71d1c64f3d5d8db9e67f5d427 Mon Sep 17 00:00:00 2001 From: BeckyPauley <64131207+BeckyPauley@users.noreply.github.com> Date: Tue, 10 Feb 2026 10:19:06 +0000 Subject: [PATCH 0957/1093] cmd/containerboot: fix error handling for egress (#18657) Fixes #18631 Signed-off-by: Becky Pauley --- cmd/containerboot/egressservices.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 6526c255eeed7..e60d65c047f95 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -478,7 +478,8 @@ func (ep *egressProxy) tailnetTargetIPsForSvc(svc egressservices.Config, n ipn.N } egressAddrs, err := resolveTailnetFQDN(n.NetMap, svc.TailnetTarget.FQDN) if err != nil { - return nil, fmt.Errorf("error fetching backend addresses for %q: %w", svc.TailnetTarget.FQDN, err) + log.Printf("error fetching backend addresses for %q: %v", svc.TailnetTarget.FQDN, err) + return addrs, nil } if len(egressAddrs) == 0 { log.Printf("tailnet target %q does not have any backend addresses, skipping", svc.TailnetTarget.FQDN) From 086968c15b5b000f3533ab981ec0201678ca78f3 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 10 Feb 2026 09:29:14 -0500 Subject: [PATCH 0958/1093] net/dns, ipn/local: skip health warnings in dns forwarder when accept-dns is false (#18572) fixes tailscale/tailscale#18436 Queries can still make their way to the forwarder when accept-dns is disabled. Since we have not configured the forwarder if --accept-dns is false, this errors out (correctly) but it also generates a persistent health warning. This forwards the Pref setting all the way through the stack to the forwarder so that we can be more judicious about when we decide that the forward path is unintentionally missing, vs simply not configured. Testing: tailscale set --accept-dns=false. (or from the GUI) dig @100.100.100.100 example.com tailscale status No dns related health warnings should be surfaced. Signed-off-by: Jonathan Nobels --- ipn/ipnlocal/dnsconfig_test.go | 20 ++++++++++++------- ipn/ipnlocal/node_backend.go | 5 +++-- ipn/ipnlocal/state_test.go | 35 ++++++++++++++++++++-------------- net/dns/config.go | 4 ++++ net/dns/dns_clone.go | 1 + net/dns/dns_view.go | 6 ++++++ net/dns/manager.go | 1 + net/dns/resolver/forwarder.go | 21 ++++++++++++++++---- net/dns/resolver/tsdns.go | 5 ++++- 9 files changed, 70 insertions(+), 28 deletions(-) diff --git a/ipn/ipnlocal/dnsconfig_test.go b/ipn/ipnlocal/dnsconfig_test.go index ab00b47404216..9d30029ff8659 100644 --- a/ipn/ipnlocal/dnsconfig_test.go +++ b/ipn/ipnlocal/dnsconfig_test.go @@ -219,7 +219,8 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, Routes: map[dnsname.FQDN][]*dnstype.Resolver{ "0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.": nil, "100.100.in-addr.arpa.": nil, @@ -319,7 +320,8 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, DefaultResolvers: []*dnstype.Resolver{ {Addr: "8.8.8.8"}, }, @@ -342,8 +344,9 @@ func TestDNSConfigForNetmap(t *testing.T) { ExitNodeID: "some-id", }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, DefaultResolvers: []*dnstype.Resolver{ {Addr: "8.8.4.4"}, }, @@ -362,8 +365,9 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Hosts: map[dnsname.FQDN][]netip.Addr{}, - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + AcceptDNS: true, + Hosts: map[dnsname.FQDN][]netip.Addr{}, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, }, }, { @@ -420,6 +424,7 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ + AcceptDNS: true, Hosts: map[dnsname.FQDN][]netip.Addr{ "a.": ips("100.101.101.101"), "p1.": ips("100.102.0.1"), @@ -466,7 +471,8 @@ func TestDNSConfigForNetmap(t *testing.T) { CorpDNS: true, }, want: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, Hosts: map[dnsname.FQDN][]netip.Addr{ "a.": ips("100.101.101.101"), "p1.": ips("100.102.0.1"), diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 170dae9569c8c..929ef34a48881 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -696,8 +696,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. } dcfg := &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: map[dnsname.FQDN][]netip.Addr{}, + AcceptDNS: prefs.CorpDNS(), + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: map[dnsname.FQDN][]netip.Addr{}, } // selfV6Only is whether we only have IPv6 addresses ourselves. diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 97c2c4d8f9daf..ed6ad06ef191e 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1300,8 +1300,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, { @@ -1356,8 +1357,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node2), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node2), }, }, { @@ -1404,8 +1406,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, { @@ -1436,8 +1439,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node3), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node3), }, }, { @@ -1500,8 +1504,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, { @@ -1529,8 +1534,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, { @@ -1560,8 +1566,9 @@ func TestEngineReconfigOnStateChange(t *testing.T) { Routes: routesWithQuad100(), }, wantDNSCfg: &dns.Config{ - Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, - Hosts: hostsFor(node1), + AcceptDNS: true, + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), }, }, { diff --git a/net/dns/config.go b/net/dns/config.go index f776d1af04443..47fac83c2df48 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -26,6 +26,10 @@ import ( // Config is a DNS configuration. type Config struct { + // AcceptDNS true if [Prefs.CorpDNS] is enabled (or --accept-dns=true). + // This should be used for error handling and health reporting + // purposes only. + AcceptDNS bool // DefaultResolvers are the DNS resolvers to use for DNS names // which aren't covered by more specific per-domain routes below. // If empty, the OS's default resolvers (the ones that predate diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go index ea5e5299beb7d..291f96ec2b51f 100644 --- a/net/dns/dns_clone.go +++ b/net/dns/dns_clone.go @@ -51,6 +51,7 @@ func (src *Config) Clone() *Config { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ConfigCloneNeedsRegeneration = Config(struct { + AcceptDNS bool DefaultResolvers []*dnstype.Resolver Routes map[dnsname.FQDN][]*dnstype.Resolver SearchDomains []dnsname.FQDN diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go index 313621c86e85b..70cb89dcaf128 100644 --- a/net/dns/dns_view.go +++ b/net/dns/dns_view.go @@ -87,6 +87,11 @@ func (v *ConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { return nil } +// AcceptDNS true if [Prefs.CorpDNS] is enabled (or --accept-dns=true). +// This should be used for error handling and health reporting +// purposes only. +func (v ConfigView) AcceptDNS() bool { return v.ж.AcceptDNS } + // DefaultResolvers are the DNS resolvers to use for DNS names // which aren't covered by more specific per-domain routes below. // If empty, the OS's default resolvers (the ones that predate @@ -139,6 +144,7 @@ func (v ConfigView) Equal(v2 ConfigView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _ConfigViewNeedsRegeneration = Config(struct { + AcceptDNS bool DefaultResolvers []*dnstype.Resolver Routes map[dnsname.FQDN][]*dnstype.Resolver SearchDomains []dnsname.FQDN diff --git a/net/dns/manager.go b/net/dns/manager.go index faca1053cf852..c052055654f1d 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -292,6 +292,7 @@ func (m *Manager) compileConfig(cfg Config) (rcfg resolver.Config, ocfg OSConfig // the OS. rcfg.Hosts = cfg.Hosts rcfg.SubdomainHosts = cfg.SubdomainHosts + rcfg.AcceptDNS = cfg.AcceptDNS routes := map[dnsname.FQDN][]*dnstype.Resolver{} // assigned conditionally to rcfg.Routes below. var propagateHostsToOS bool for suffix, resolvers := range cfg.Routes { diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 189911ee24c0a..6fec32d6a2685 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -323,6 +323,12 @@ type forwarder struct { // /etc/resolv.conf is missing/corrupt, and the peerapi ExitDNS stub // resolver lookup. cloudHostFallback []resolverAndDelay + + // acceptDNS tracks the CorpDNS pref (--accept-dns) + // This lets us skip health warnings if the forwarder receives inbound + // queries directly - but we didn't configure it with any upstream resolvers. + // That's an error, but not a health error if the user has disabled CorpDNS. + acceptDNS bool } func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { @@ -434,7 +440,7 @@ func cloudResolvers() []resolverAndDelay { // Resolver.SetConfig on reconfig. // // The memory referenced by routesBySuffix should not be modified. -func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]*dnstype.Resolver) { +func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]*dnstype.Resolver, acceptDNS bool) { routes := make([]route, 0, len(routesBySuffix)) cloudHostFallback := cloudResolvers() @@ -468,6 +474,7 @@ func (f *forwarder) setRoutes(routesBySuffix map[dnsname.FQDN][]*dnstype.Resolve f.mu.Lock() defer f.mu.Unlock() + f.acceptDNS = acceptDNS f.routes = routes f.cloudHostFallback = cloudHostFallback } @@ -1056,7 +1063,9 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo resolvers = f.resolvers(domain) if len(resolvers) == 0 { metricDNSFwdErrorNoUpstream.Add(1) - f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""}) + if f.acceptDNS { + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: ""}) + } f.logf("no upstream resolvers set, returning SERVFAIL") res, err := servfailResponse(query) @@ -1156,7 +1165,9 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo for _, rr := range resolvers { resolverAddrs = append(resolverAddrs, rr.name.Addr) } - f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + if f.acceptDNS { + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + } case responseChan <- res: if f.verboseFwd { f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) @@ -1181,7 +1192,9 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo for _, rr := range resolvers { resolverAddrs = append(resolverAddrs, rr.name.Addr) } - f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + if f.acceptDNS { + f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) + } return fmt.Errorf("waiting for response or error from %v: %w", resolverAddrs, ctx.Err()) } } diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 5b44f6c2d586f..d0601de7bfe25 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -70,6 +70,9 @@ type packet struct { // Else forward the query to the most specific matching entry in Routes. // Else return SERVFAIL. type Config struct { + // True if [Prefs.CorpDNS] is true or --accept-dns=true was specified. + // This should only be used for error handling and health reporting. + AcceptDNS bool // Routes is a map of DNS name suffix to the resolvers to use for // queries within that suffix. // Queries only match the most specific suffix. @@ -279,7 +282,7 @@ func (r *Resolver) SetConfig(cfg Config) error { } } - r.forwarder.setRoutes(cfg.Routes) + r.forwarder.setRoutes(cfg.Routes, cfg.AcceptDNS) r.mu.Lock() defer r.mu.Unlock() From dc1d811d4838cb73216244ecaf7be923f005548e Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 8 Feb 2026 18:07:33 +0000 Subject: [PATCH 0959/1093] magicsock, ipnlocal: revert eventbus-based node/filter updates, remove Synchronize hack Restore synchronous method calls from LocalBackend to magicsock.Conn for node views, filter, and delta mutations. The eventbus delivery introduced in 8e6f63cf1 was invalid for these updates because subsequent operations in the same call chain depend on magicsock already having the current state. The Synchronize/settleEventBus workaround was fragile and kept requiring more workarounds and introducing new mystery bugs. Since eventbus was added, we've since learned more about when to use eventbus, and this wasn't one of the cases. We can take another swing at using eventbus for netmap changes in a future change. Fixes #16369 Updates #18575 (likely fixes) Change-Id: I79057cc9259993368bb1e350ff0e073adf6b9a8f Signed-off-by: Brad Fitzpatrick --- ipn/ipnlocal/local.go | 36 +++---- ipn/ipnlocal/node_backend.go | 18 ---- ipn/ipnlocal/state_test.go | 5 - wgengine/magicsock/magicsock.go | 146 ++++++++------------------- wgengine/magicsock/magicsock_test.go | 107 +++++++------------- 5 files changed, 95 insertions(+), 217 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 8f8051f4b7273..981e2df73a83b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1562,22 +1562,6 @@ func (b *LocalBackend) GetFilterForTest() *filter.Filter { return nb.filterAtomic.Load() } -func (b *LocalBackend) settleEventBus() { - // The move to eventbus made some things racy that - // weren't before so we have to wait for it to all be settled - // before we call certain things. - // See https://github.com/tailscale/tailscale/issues/16369 - // But we can't do this while holding b.mu without deadlocks, - // (https://github.com/tailscale/tailscale/pull/17804#issuecomment-3514426485) so - // now we just do it in lots of places before acquiring b.mu. - // Is this winning?? - if b.sys != nil { - if ms, ok := b.sys.MagicSock.GetOK(); ok { - ms.Synchronize() - } - } -} - // SetControlClientStatus is the callback invoked by the control client whenever it posts a new status. // Among other things, this is where we update the netmap, packet filters, DNS and DERP maps. func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st controlclient.Status) { @@ -2115,16 +2099,16 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo } }() - // Gross. See https://github.com/tailscale/tailscale/issues/16369 - b.settleEventBus() - defer b.settleEventBus() - b.mu.Lock() defer b.mu.Unlock() cn := b.currentNode() cn.UpdateNetmapDelta(muts) + if ms, ok := b.sys.MagicSock.GetOK(); ok { + ms.UpdateNetmapDelta(muts) + } + // If auto exit nodes are enabled and our exit node went offline, // we need to schedule picking a new one. // TODO(nickkhyl): move the auto exit node logic to a feature package. @@ -2440,7 +2424,6 @@ func (b *LocalBackend) initOnce() { // actually a supported operation (it should be, but it's very unclear // from the following whether or not that is a safe transition). func (b *LocalBackend) Start(opts ipn.Options) error { - defer b.settleEventBus() // with b.mu unlocked b.mu.Lock() defer b.mu.Unlock() return b.startLocked(opts) @@ -2936,6 +2919,9 @@ func packetFilterPermitsUnlockedNodes(peers map[tailcfg.NodeID]tailcfg.NodeView, func (b *LocalBackend) setFilter(f *filter.Filter) { b.currentNode().setFilter(f) b.e.SetFilter(f) + if ms, ok := b.sys.MagicSock.GetOK(); ok { + ms.SetFilter(f) + } } var removeFromDefaultRoute = []netip.Prefix{ @@ -4352,7 +4338,6 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip if mp.SetsInternal() { return ipn.PrefsView{}, errors.New("can't set Internal fields") } - defer b.settleEventBus() b.mu.Lock() defer b.mu.Unlock() @@ -6264,6 +6249,13 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") } b.currentNode().SetNetMap(nm) + if ms, ok := b.sys.MagicSock.GetOK(); ok { + if nm != nil { + ms.SetNetworkMap(nm.SelfNode, nm.Peers) + } else { + ms.SetNetworkMap(tailcfg.NodeView{}, nil) + } + } if login != b.activeLogin { b.logf("active login: %v", login) b.activeLogin = login diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 929ef34a48881..b70d71cb934f2 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -31,7 +31,6 @@ import ( "tailscale.com/util/mak" "tailscale.com/util/slicesx" "tailscale.com/wgengine/filter" - "tailscale.com/wgengine/magicsock" ) // nodeBackend is node-specific [LocalBackend] state. It is usually the current node. @@ -79,9 +78,6 @@ type nodeBackend struct { // initialized once and immutable eventClient *eventbus.Client - filterPub *eventbus.Publisher[magicsock.FilterUpdate] - nodeViewsPub *eventbus.Publisher[magicsock.NodeViewsUpdate] - nodeMutsPub *eventbus.Publisher[magicsock.NodeMutationsUpdate] derpMapViewPub *eventbus.Publisher[tailcfg.DERPMapView] // TODO(nickkhyl): maybe use sync.RWMutex? @@ -122,11 +118,7 @@ func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *n // Default filter blocks everything and logs nothing. noneFilter := filter.NewAllowNone(logger.Discard, &netipx.IPSet{}) nb.filterAtomic.Store(noneFilter) - nb.filterPub = eventbus.Publish[magicsock.FilterUpdate](nb.eventClient) - nb.nodeViewsPub = eventbus.Publish[magicsock.NodeViewsUpdate](nb.eventClient) - nb.nodeMutsPub = eventbus.Publish[magicsock.NodeMutationsUpdate](nb.eventClient) nb.derpMapViewPub = eventbus.Publish[tailcfg.DERPMapView](nb.eventClient) - nb.filterPub.Publish(magicsock.FilterUpdate{Filter: nb.filterAtomic.Load()}) return nb } @@ -436,15 +428,11 @@ func (nb *nodeBackend) SetNetMap(nm *netmap.NetworkMap) { nb.netMap = nm nb.updateNodeByAddrLocked() nb.updatePeersLocked() - nv := magicsock.NodeViewsUpdate{} if nm != nil { - nv.SelfNode = nm.SelfNode - nv.Peers = nm.Peers nb.derpMapViewPub.Publish(nm.DERPMap.View()) } else { nb.derpMapViewPub.Publish(tailcfg.DERPMapView{}) } - nb.nodeViewsPub.Publish(nv) } func (nb *nodeBackend) updateNodeByAddrLocked() { @@ -520,9 +508,6 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // call (e.g. its endpoints + online status both change) var mutableNodes map[tailcfg.NodeID]*tailcfg.Node - update := magicsock.NodeMutationsUpdate{ - Mutations: make([]netmap.NodeMutation, 0, len(muts)), - } for _, m := range muts { n, ok := mutableNodes[m.NodeIDBeingMutated()] if !ok { @@ -533,14 +518,12 @@ func (nb *nodeBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo } n = nv.AsStruct() mak.Set(&mutableNodes, nv.ID(), n) - update.Mutations = append(update.Mutations, m) } m.Apply(n) } for nid, n := range mutableNodes { nb.peers[nid] = n.View() } - nb.nodeMutsPub.Publish(update) return true } @@ -562,7 +545,6 @@ func (nb *nodeBackend) filter() *filter.Filter { func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterAtomic.Store(f) - nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f}) } func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, versionOS string) *dns.Config { diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index ed6ad06ef191e..39796ec325367 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -1600,11 +1600,6 @@ func TestEngineReconfigOnStateChange(t *testing.T) { tt.steps(t, lb, cc) } - // TODO(bradfitz): this whole event bus settling thing - // should be unnecessary once the bogus uses of eventbus - // are removed. (https://github.com/tailscale/tailscale/issues/16369) - lb.settleEventBus() - if gotState := lb.State(); gotState != tt.wantState { t.Errorf("State: got %v; want %v", gotState, tt.wantState) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index d6f411f4ac2dc..b2852d2e2fdbc 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -177,9 +177,6 @@ type Conn struct { connCtxCancel func() // closes connCtx donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call - // A publisher for synchronization points to ensure correct ordering of - // config changes between magicsock and wireguard. - syncPub *eventbus.Publisher[syncPoint] allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] portUpdatePub *eventbus.Publisher[router.PortUpdate] tsmpDiscoKeyAvailablePub *eventbus.Publisher[NewDiscoKeyAvailable] @@ -362,11 +359,11 @@ type Conn struct { netInfoLast *tailcfg.NetInfo derpMap *tailcfg.DERPMap // nil (or zero regions/nodes) means DERP is disabled - self tailcfg.NodeView // from last onNodeViewsUpdate - peers views.Slice[tailcfg.NodeView] // from last onNodeViewsUpdate, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in onNodeMutationsUpdate are never applied - filt *filter.Filter // from last onFilterUpdate + self tailcfg.NodeView // from last SetNetworkMap + peers views.Slice[tailcfg.NodeView] // from last SetNetworkMap, sorted by Node.ID; Note: [netmap.NodeMutation]'s rx'd in UpdateNetmapDelta are never applied + filt *filter.Filter // from last SetFilter relayClientEnabled bool // whether we can allocate UDP relay endpoints on UDP relay servers or receive CallMeMaybeVia messages from peers - lastFlags debugFlags // at time of last onNodeViewsUpdate + lastFlags debugFlags // at time of last SetNetworkMap privateKey key.NodePrivate // WireGuard private key for this node everHadKey bool // whether we ever had a non-zero private key myDerp int // nearest DERP region ID; 0 means none/unknown @@ -521,47 +518,6 @@ func (o *Options) derpActiveFunc() func() { return o.DERPActiveFunc } -// NodeViewsUpdate represents an update event of [tailcfg.NodeView] for all -// nodes. This event is published over an [eventbus.Bus]. It may be published -// with an invalid SelfNode, and/or zero/nil Peers. [magicsock.Conn] is the sole -// subscriber as of 2025-06. If you are adding more subscribers consider moving -// this type out of magicsock. -type NodeViewsUpdate struct { - SelfNode tailcfg.NodeView - Peers []tailcfg.NodeView // sorted by Node.ID -} - -// NodeMutationsUpdate represents an update event of one or more -// [netmap.NodeMutation]. This event is published over an [eventbus.Bus]. -// [magicsock.Conn] is the sole subscriber as of 2025-06. If you are adding more -// subscribers consider moving this type out of magicsock. -type NodeMutationsUpdate struct { - Mutations []netmap.NodeMutation -} - -// FilterUpdate represents an update event for a [*filter.Filter]. This event is -// signaled over an [eventbus.Bus]. [magicsock.Conn] is the sole subscriber as -// of 2025-06. If you are adding more subscribers consider moving this type out -// of magicsock. -type FilterUpdate struct { - *filter.Filter -} - -// syncPoint is an event published over an [eventbus.Bus] by [Conn.Synchronize]. -// It serves as a synchronization point, allowing to wait until magicsock -// has processed all pending events. -type syncPoint chan struct{} - -// Wait blocks until [syncPoint.Signal] is called. -func (s syncPoint) Wait() { - <-s -} - -// Signal signals the sync point, unblocking the [syncPoint.Wait] call. -func (s syncPoint) Signal() { - close(s) -} - // UDPRelayAllocReq represents a [*disco.AllocateUDPRelayEndpointRequest] // reception event. This is signaled over an [eventbus.Bus] from // [magicsock.Conn] towards [relayserver.extension]. @@ -654,21 +610,6 @@ func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { } } -// Synchronize waits for all [eventbus] events published -// prior to this call to be processed by the receiver. -func (c *Conn) Synchronize() { - if c.syncPub == nil { - // Eventbus is not used; no need to synchronize (in certain tests). - return - } - sp := syncPoint(make(chan struct{})) - c.syncPub.Publish(sp) - select { - case <-sp: - case <-c.donec: - } -} - // NewConn creates a magic Conn listening on opts.Port. // As the set of possible endpoints for a Conn changes, the // callback opts.EndpointsFunc is called. @@ -694,18 +635,10 @@ func NewConn(opts Options) (*Conn, error) { // NewConn otherwise published events can be missed. ec := c.eventBus.Client("magicsock.Conn") c.eventClient = ec - c.syncPub = eventbus.Publish[syncPoint](ec) c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec) c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec) c.tsmpDiscoKeyAvailablePub = eventbus.Publish[NewDiscoKeyAvailable](ec) eventbus.SubscribeFunc(ec, c.onPortMapChanged) - eventbus.SubscribeFunc(ec, c.onFilterUpdate) - eventbus.SubscribeFunc(ec, c.onNodeViewsUpdate) - eventbus.SubscribeFunc(ec, c.onNodeMutationsUpdate) - eventbus.SubscribeFunc(ec, func(sp syncPoint) { - c.dlogf("magicsock: received sync point after reconfig") - sp.Signal() - }) eventbus.SubscribeFunc(ec, c.onUDPRelayAllocResp) c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) @@ -2907,11 +2840,12 @@ func capVerIsRelayCapable(version tailcfg.CapabilityVersion) bool { return version >= 121 } -// onFilterUpdate is called when a [FilterUpdate] is received over the -// [eventbus.Bus]. -func (c *Conn) onFilterUpdate(f FilterUpdate) { +// SetFilter updates the packet filter used by the connection. +// It must be called synchronously from the caller's goroutine to ensure +// magicsock has the current filter before subsequent operations proceed. +func (c *Conn) SetFilter(f *filter.Filter) { c.mu.Lock() - c.filt = f.Filter + c.filt = f self := c.self peers := c.peers relayClientEnabled := c.relayClientEnabled @@ -2924,7 +2858,7 @@ func (c *Conn) onFilterUpdate(f FilterUpdate) { // The filter has changed, and we are operating as a relay server client. // Re-evaluate it in order to produce an updated relay server set. - c.updateRelayServersSet(f.Filter, self, peers) + c.updateRelayServersSet(f, self, peers) } // updateRelayServersSet iterates all peers and self, evaluating filt for each @@ -3015,21 +2949,24 @@ func (c *candidatePeerRelay) isValid() bool { return !c.nodeKey.IsZero() && !c.discoKey.IsZero() } -// onNodeViewsUpdate is called when a [NodeViewsUpdate] is received over the -// [eventbus.Bus]. -func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { - peersChanged := c.updateNodes(update) +// SetNetworkMap updates the network map with the given self node and peers. +// It must be called synchronously from the caller's goroutine to ensure +// magicsock has the current state before subsequent operations proceed. +// +// self may be invalid if there's no network map. +func (c *Conn) SetNetworkMap(self tailcfg.NodeView, peers []tailcfg.NodeView) { + peersChanged := c.updateNodes(self, peers) - relayClientEnabled := update.SelfNode.Valid() && - !update.SelfNode.HasCap(tailcfg.NodeAttrDisableRelayClient) && - !update.SelfNode.HasCap(tailcfg.NodeAttrOnlyTCP443) + relayClientEnabled := self.Valid() && + !self.HasCap(tailcfg.NodeAttrDisableRelayClient) && + !self.HasCap(tailcfg.NodeAttrOnlyTCP443) c.mu.Lock() relayClientChanged := c.relayClientEnabled != relayClientEnabled c.relayClientEnabled = relayClientEnabled filt := c.filt - self := c.self - peers := c.peers + selfView := c.self + peersView := c.peers isClosed := c.closed c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) @@ -3042,15 +2979,14 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { c.relayManager.handleRelayServersSet(nil) c.hasPeerRelayServers.Store(false) } else { - c.updateRelayServersSet(filt, self, peers) + c.updateRelayServersSet(filt, selfView, peersView) } } } -// updateNodes updates [Conn] to reflect the [tailcfg.NodeView]'s contained -// in update. It returns true if update.Peers was unequal to c.peers, otherwise -// false. -func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { +// updateNodes updates [Conn] to reflect the given self node and peers. +// It reports whether the peers were changed from before. +func (c *Conn) updateNodes(self tailcfg.NodeView, peers []tailcfg.NodeView) (peersChanged bool) { c.mu.Lock() defer c.mu.Unlock() @@ -3059,11 +2995,11 @@ func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { } priorPeers := c.peers - metricNumPeers.Set(int64(len(update.Peers))) + metricNumPeers.Set(int64(len(peers))) // Update c.self & c.peers regardless, before the following early return. - c.self = update.SelfNode - curPeers := views.SliceOf(update.Peers) + c.self = self + curPeers := views.SliceOf(peers) c.peers = curPeers // [debugFlags] are mutable in [Conn.SetSilentDisco] & @@ -3072,7 +3008,7 @@ func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { // [controlknobs.Knobs] are simply self [tailcfg.NodeCapability]'s. They are // useful as a global view of notable feature toggles, but the magicsock // setters are completely unnecessary as we have the same values right here - // (update.SelfNode.Capabilities) at a time they are considered most + // (self.Capabilities) at a time they are considered most // up-to-date. // TODO: mutate [debugFlags] here instead of in various [Conn] setters. flags := c.debugFlagsLocked() @@ -3088,16 +3024,16 @@ func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { c.lastFlags = flags - c.logf("[v1] magicsock: got updated network map; %d peers", len(update.Peers)) + c.logf("[v1] magicsock: got updated network map; %d peers", len(peers)) - entriesPerBuffer := debugRingBufferSize(len(update.Peers)) + entriesPerBuffer := debugRingBufferSize(len(peers)) // Try a pass of just upserting nodes and creating missing // endpoints. If the set of nodes is the same, this is an // efficient alloc-free update. If the set of nodes is different, // we'll fall through to the next pass, which allocates but can // handle full set updates. - for _, n := range update.Peers { + for _, n := range peers { if n.ID() == 0 { devPanicf("node with zero ID") continue @@ -3197,14 +3133,14 @@ func (c *Conn) updateNodes(update NodeViewsUpdate) (peersChanged bool) { c.peerMap.upsertEndpoint(ep, key.DiscoPublic{}) } - // If the set of nodes changed since the last onNodeViewsUpdate, the + // If the set of nodes changed since the last SetNetworkMap, the // upsert loop just above made c.peerMap contain the union of the // old and new peers - which will be larger than the set from the // current netmap. If that happens, go through the allocful // deletion path to clean up moribund nodes. - if c.peerMap.nodeCount() != len(update.Peers) { + if c.peerMap.nodeCount() != len(peers) { keep := set.Set[key.NodePublic]{} - for _, n := range update.Peers { + for _, n := range peers { keep.Add(n.Key()) } c.peerMap.forEachEndpoint(func(ep *endpoint) { @@ -3739,13 +3675,15 @@ func simpleDur(d time.Duration) time.Duration { return d.Round(time.Minute) } -// onNodeMutationsUpdate is called when a [NodeMutationsUpdate] is received over -// the [eventbus.Bus]. Note: It does not apply these mutations to c.peers. -func (c *Conn) onNodeMutationsUpdate(update NodeMutationsUpdate) { +// UpdateNetmapDelta applies the given node mutations to the connection's peer +// state. It must be called synchronously from the caller's goroutine to ensure +// magicsock has the current state before subsequent operations proceed. +// Note: It does not apply these mutations to c.peers. +func (c *Conn) UpdateNetmapDelta(muts []netmap.NodeMutation) { c.mu.Lock() defer c.mu.Unlock() - for _, m := range update.Mutations { + for _, m := range muts { nodeID := m.NodeIDBeingMutated() ep, ok := c.peerMap.endpointForNodeID(nodeID) if !ok { diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 3b7ceeaa23323..5fa177b3bce8b 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -171,7 +171,7 @@ type magicStack struct { } // newMagicStack builds and initializes an idle magicsock and -// friends. You need to call conn.onNodeViewsUpdate and dev.Reconfig +// friends. You need to call conn.SetNetworkMap and dev.Reconfig // before anything interesting happens. func newMagicStack(t testing.TB, logf logger.Logf, ln nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack { privateKey := key.NewNode() @@ -346,13 +346,9 @@ func meshStacks(logf logger.Logf, mutateNetmap func(idx int, nm *netmap.NetworkM for i, m := range ms { nm := buildNetmapLocked(i) - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) - peerSet := make(set.Set[key.NodePublic], len(nv.Peers)) - for _, peer := range nv.Peers { + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) + peerSet := make(set.Set[key.NodePublic], len(nm.Peers)) + for _, peer := range nm.Peers { peerSet.Add(peer.Key()) } m.conn.UpdatePeers(peerSet) @@ -1388,16 +1384,14 @@ func addTestEndpoint(tb testing.TB, conn *Conn, sendConn net.PacketConn) (key.No // codepath. discoKey := key.DiscoPublicFromRaw32(mem.B([]byte{31: 1})) nodeKey := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 31: 0})) - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews([]*tailcfg.Node{ - { - ID: 1, - Key: nodeKey, - DiscoKey: discoKey, - Endpoints: eps(sendConn.LocalAddr().String()), - }, - }), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews([]*tailcfg.Node{ + { + ID: 1, + Key: nodeKey, + DiscoKey: discoKey, + Endpoints: eps(sendConn.LocalAddr().String()), + }, + })) conn.SetPrivateKey(key.NodePrivateFromRaw32(mem.B([]byte{0: 1, 31: 0}))) _, err := conn.ParseEndpoint(nodeKey.UntypedHexString()) if err != nil { @@ -1581,7 +1575,7 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { // doesn't change its disco key doesn't result in a broken state. // // https://github.com/tailscale/tailscale/issues/1391 -func TestOnNodeViewsUpdateChangingNodeKey(t *testing.T) { +func TestSetNetworkMapChangingNodeKey(t *testing.T) { conn := newTestConn(t) t.Cleanup(func() { conn.Close() }) var buf tstest.MemLogger @@ -1593,32 +1587,28 @@ func TestOnNodeViewsUpdateChangingNodeKey(t *testing.T) { nodeKey1 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '1', 31: 0})) nodeKey2 := key.NodePublicFromRaw32(mem.B([]byte{0: 'N', 1: 'K', 2: '2', 31: 0})) - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews([]*tailcfg.Node{ - { - ID: 1, - Key: nodeKey1, - DiscoKey: discoKey, - Endpoints: eps("192.168.1.2:345"), - }, - }), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews([]*tailcfg.Node{ + { + ID: 1, + Key: nodeKey1, + DiscoKey: discoKey, + Endpoints: eps("192.168.1.2:345"), + }, + })) _, err := conn.ParseEndpoint(nodeKey1.UntypedHexString()) if err != nil { t.Fatal(err) } for range 3 { - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews([]*tailcfg.Node{ - { - ID: 2, - Key: nodeKey2, - DiscoKey: discoKey, - Endpoints: eps("192.168.1.2:345"), - }, - }), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews([]*tailcfg.Node{ + { + ID: 2, + Key: nodeKey2, + DiscoKey: discoKey, + Endpoints: eps("192.168.1.2:345"), + }, + })) } de, ok := conn.peerMap.endpointForNodeKey(nodeKey2) @@ -1932,7 +1922,7 @@ func eps(s ...string) []netip.AddrPort { return eps } -func TestStressOnNodeViewsUpdate(t *testing.T) { +func TestStressSetNetworkMap(t *testing.T) { t.Parallel() conn := newTestConn(t) @@ -1988,9 +1978,7 @@ func TestStressOnNodeViewsUpdate(t *testing.T) { } } // Set the node views. - conn.onNodeViewsUpdate(NodeViewsUpdate{ - Peers: nodeViews(peers), - }) + conn.SetNetworkMap(tailcfg.NodeView{}, nodeViews(peers)) // Check invariants. if err := conn.peerMap.validate(); err != nil { t.Error(err) @@ -2113,10 +2101,10 @@ func TestRebindingUDPConn(t *testing.T) { } // https://github.com/tailscale/tailscale/issues/6680: don't ignore -// onNodeViewsUpdate calls when there are no peers. (A too aggressive fast path was +// SetNetworkMap calls when there are no peers. (A too aggressive fast path was // previously bailing out early, thinking there were no changes since all zero // peers didn't change, but the node views has non-peer info in it too we shouldn't discard) -func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { +func TestSetNetworkMapWithNoPeers(t *testing.T) { var c Conn knobs := &controlknobs.Knobs{} c.logf = logger.Discard @@ -2125,9 +2113,7 @@ func TestOnNodeViewsUpdateWithNoPeers(t *testing.T) { for i := 1; i <= 3; i++ { v := !debugEnableSilentDisco() envknob.Setenv("TS_DEBUG_ENABLE_SILENT_DISCO", fmt.Sprint(v)) - nv := NodeViewsUpdate{} - c.onNodeViewsUpdate(nv) - t.Logf("ptr %d: %p", i, nv) + c.SetNetworkMap(tailcfg.NodeView{}, nil) if c.lastFlags.heartbeatDisabled != v { t.Fatalf("call %d: didn't store netmap", i) } @@ -2215,11 +2201,7 @@ func TestIsWireGuardOnlyPeer(t *testing.T) { }, }), } - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { @@ -2280,11 +2262,7 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { }, }), } - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) cfg, err := nmcfg.WGCfg(m.privateKey, nm, t.Logf, netmap.AllowSubnetRoutes, "") if err != nil { @@ -2321,11 +2299,7 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { // configures WG. func applyNetworkMap(t *testing.T, m *magicStack, nm *netmap.NetworkMap) { t.Helper() - nv := NodeViewsUpdate{ - SelfNode: nm.SelfNode, - Peers: nm.Peers, - } - m.conn.onNodeViewsUpdate(nv) + m.conn.SetNetworkMap(nm.SelfNode, nm.Peers) // Make sure we can't use v6 to avoid test failures. m.conn.noV6.Store(true) @@ -3590,7 +3564,7 @@ func Test_nodeHasCap(t *testing.T) { } } -func TestConn_onNodeViewsUpdate_updateRelayServersSet(t *testing.T) { +func TestConn_SetNetworkMap_updateRelayServersSet(t *testing.T) { peerNodeCandidateRelay := &tailcfg.Node{ Cap: 121, ID: 1, @@ -3752,10 +3726,7 @@ func TestConn_onNodeViewsUpdate_updateRelayServersSet(t *testing.T) { c.hasPeerRelayServers.Store(true) } - c.onNodeViewsUpdate(NodeViewsUpdate{ - SelfNode: tt.self, - Peers: tt.peers, - }) + c.SetNetworkMap(tt.self, tt.peers) got := c.relayManager.getServers() if !got.Equal(tt.wantRelayServers) { t.Fatalf("got: %v != want: %v", got, tt.wantRelayServers) From 6cbfc2f3babe5e6e55ddc589dee413801f663797 Mon Sep 17 00:00:00 2001 From: James Scott Date: Tue, 10 Feb 2026 13:24:00 -0800 Subject: [PATCH 0960/1093] logtail/filch: fix filch test panic (#18660) Updates rotateLocked so that we hold the activeStderrWriteForTest write lock around the dup2Stderr call, rather than acquiring it only after dup2 was already compelete. This ensures no stderrWriteForTest calls can race with the dup2 syscall. The now unused waitIdleStderrForTest has been removed. On macOS, dup2 and write on the same file descriptor are not atomic with respect to each other, when rotateLocked called dup2Stderr to redirect the stderr fd to a new file, concurrent goroutines calling stderrWriteForTest could observe the fd in a transiently invalid state, resulting in the bad file descripter. Fixes tailscale/corp#36953 Signed-off-by: James Scott --- logtail/filch/filch.go | 27 ++++++++++++++------------- logtail/filch/filch_test.go | 14 -------------- 2 files changed, 14 insertions(+), 27 deletions(-) diff --git a/logtail/filch/filch.go b/logtail/filch/filch.go index 1bd82d8c41e8a..8ae9123060e73 100644 --- a/logtail/filch/filch.go +++ b/logtail/filch/filch.go @@ -297,12 +297,6 @@ func stderrWriteForTest(b []byte) int { return must.Get(os.Stderr.Write(b)) } -// waitIdleStderrForTest waits until there are no active stderrWriteForTest calls. -func waitIdleStderrForTest() { - activeStderrWriteForTest.Lock() - defer activeStderrWriteForTest.Unlock() -} - // rotateLocked swaps f.newer and f.older such that: // // - f.newer will be truncated and future writes will be appended to the end. @@ -350,8 +344,15 @@ func (f *Filch) rotateLocked() error { // Note that mutex does not prevent stderr writes. prevSize := f.newlyWrittenBytes + f.newlyFilchedBytes f.newlyWrittenBytes, f.newlyFilchedBytes = 0, 0 + + // Hold the write lock around dup2 to prevent concurrent + // stderrWriteForTest calls from racing with dup2 on the same fd. + // On macOS, dup2 and write are not atomic with respect to each other, + // so a concurrent write can observe a bad file descriptor. + activeStderrWriteForTest.Lock() if f.OrigStderr != nil { if err := dup2Stderr(f.newer); err != nil { + activeStderrWriteForTest.Unlock() return err } } @@ -369,15 +370,15 @@ func (f *Filch) rotateLocked() error { // In rare cases, it is possible that [Filch.TryReadLine] consumes // the entire older file before the write commits, // leading to dropped stderr lines. - waitIdleStderrForTest() - if fi, err := f.older.Stat(); err != nil { + fi, err := f.older.Stat() + activeStderrWriteForTest.Unlock() + if err != nil { return err - } else { - filchedBytes := max(0, fi.Size()-prevSize) - f.writeBytes.Add(filchedBytes) - f.filchedBytes.Add(filchedBytes) - f.storedBytes.Set(fi.Size()) // newer has been truncated, so only older matters } + filchedBytes := max(0, fi.Size()-prevSize) + f.writeBytes.Add(filchedBytes) + f.filchedBytes.Add(filchedBytes) + f.storedBytes.Set(fi.Size()) // newer has been truncated, so only older matters // Start reading from the start of older. if _, err := f.older.Seek(0, io.SeekStart); err != nil { diff --git a/logtail/filch/filch_test.go b/logtail/filch/filch_test.go index f2f9e9e3bcd6b..2538233cfd84c 100644 --- a/logtail/filch/filch_test.go +++ b/logtail/filch/filch_test.go @@ -5,7 +5,6 @@ package filch import ( "bytes" - "crypto/sha256" "encoding/json" "fmt" "io" @@ -121,19 +120,7 @@ func setupStderr(t *testing.T) { tstest.Replace(t, &os.Stderr, pipeW) } -func skipDarwin(t testing.TB) { - if runtime.GOOS != "darwin" { - return - } - src := must.Get(os.ReadFile("filch.go")) - if fmt.Sprintf("%x", sha256.Sum256(src)) != "a32da5e22034823c19ac7f29960e3646f540d67f85a0028832cab1f1557fc693" { - t.Errorf("filch.go has changed since this test was skipped; please delete this skip") - } - t.Skip("skipping known failing test on darwin; fixed in progress by https://github.com/tailscale/tailscale/pull/18660") -} - func TestConcurrentWriteAndRead(t *testing.T) { - skipDarwin(t) if replaceStderrSupportedForTest { setupStderr(t) } @@ -296,7 +283,6 @@ func TestMaxLineSize(t *testing.T) { } func TestMaxFileSize(t *testing.T) { - skipDarwin(t) if replaceStderrSupportedForTest { t.Run("ReplaceStderr:true", func(t *testing.T) { testMaxFileSize(t, true) }) } From 1172b2febd005af4545bfa2d0778001e9a873b8e Mon Sep 17 00:00:00 2001 From: License Updater Date: Wed, 11 Feb 2026 01:11:27 +0000 Subject: [PATCH 0961/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 2 +- licenses/tailscale.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index f61291c943cb8..4170a4c8bac49 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -34,7 +34,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.26.1/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/4849db3c2f7e/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 28eb73db42cc6..9ccc37adb22cc 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -44,7 +44,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fogleman/gg](https://pkg.go.dev/github.com/fogleman/gg) ([MIT](https://github.com/fogleman/gg/blob/v1.3.0/LICENSE.md)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.26.1/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) From 12188c0ade65d5617abd674c7010a5cca9f8519c Mon Sep 17 00:00:00 2001 From: Simon Law Date: Tue, 10 Feb 2026 18:14:32 -0800 Subject: [PATCH 0962/1093] ipn/ipnlocal: log traffic steering scores and suggested exit nodes (#18681) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When traffic steering is enabled, some users are suggested an exit node that is inappropriately far from their location. This seems to happen right when the client connects to the control plane and the client eventually fixes itself. But whenever an affected client reconnects, its suggested exit node flaps, and this happens often enough to be noticeable because connections drop whenever the exit node is switched. This should not happen, since the map response that contains the list of suggested exit nodes that the client picks from, also contains the scores for those nodes. Since our current logging and diagnostic tools don’t give us enough insight into what is happening, this PR adds additional logging when: - traffic steering scores are used to suggest an exit node - an exit node is suggested, no matter how it was determined Updates: tailscale/corp#29964 Updates: tailscale/corp#36446 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 981e2df73a83b..27858484a7a0e 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6,6 +6,7 @@ package ipnlocal import ( + "bufio" "cmp" "context" "crypto/sha256" @@ -7484,13 +7485,16 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta switch { case nb.SelfHasCap(tailcfg.NodeAttrTrafficSteering): // The traffic-steering feature flag is enabled on this tailnet. - return suggestExitNodeUsingTrafficSteering(nb, allowList) + res, err = suggestExitNodeUsingTrafficSteering(nb, allowList) default: // The control plane will always strip the `traffic-steering` // node attribute if it isn’t enabled for this tailnet, even if // it is set in the policy file: tailscale/corp#34401 - return suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) + res, err = suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) } + name, _, _ := strings.Cut(res.Name, ".") + nb.logf("netmap: suggested exit node: %s (%s)", name, res.ID) + return res, err } // suggestExitNodeUsingDERP is the classic algorithm used to suggest exit nodes, @@ -7723,6 +7727,21 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf pick = nodes[0] } + nb.logf("netmap: traffic steering: exit node scores: %v", logger.ArgWriter(func(bw *bufio.Writer) { + const max = 10 + for i, n := range nodes { + if i == max { + fmt.Fprintf(bw, "... +%d", len(nodes)-max) + return + } + if i > 0 { + bw.WriteString(", ") + } + name, _, _ := strings.Cut(n.Name(), ".") + fmt.Fprintf(bw, "%d:%s", score(n), name) + } + })) + if !pick.Valid() { return apitype.ExitNodeSuggestionResponse{}, nil } From 8e39a0aa0fd68a5cccccf2e984a118120d489aeb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 10 Feb 2026 19:44:14 -0800 Subject: [PATCH 0963/1093] go.toolchain.next.rev: update to final Go 1.26.0 [next] This updates the TS_GO_NEXT=1 (testing) toolchain to Go 1.26.0 The default one is still Go 1.25.x. Updates #18682 Change-Id: I99747798c166ce162ee9eee74baa9ff6744a62f6 Signed-off-by: Brad Fitzpatrick --- go.toolchain.next.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.next.rev b/go.toolchain.next.rev index abdc21022aa19..ea3d3c773f779 100644 --- a/go.toolchain.next.rev +++ b/go.toolchain.next.rev @@ -1 +1 @@ -5ba287c89a4cef2f4a419aed4e6bc3121c5c4dad +5b5cb0db47535a0a8d2f450cb1bf83af8e70f164 From 45db3691b9dcd4008157e3b7443bcd21fef85f1a Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Tue, 10 Feb 2026 19:25:50 +0000 Subject: [PATCH 0964/1093] prober: export a metric with the number of in-flight probes Updates tailscale/corp#37049 Signed-off-by: Anton Tolchanov --- prober/prober.go | 7 +++++++ prober/prober_test.go | 19 +++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/prober/prober.go b/prober/prober.go index 16c262bc81c0d..3a43401a14ac3 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -161,6 +161,7 @@ func newProbe(p *Prober, name string, interval time.Duration, lg prometheus.Labe mEndTime: prometheus.NewDesc("end_secs", "Latest probe end time (seconds since epoch)", nil, lg), mLatency: prometheus.NewDesc("latency_millis", "Latest probe latency (ms)", nil, lg), mResult: prometheus.NewDesc("result", "Latest probe result (1 = success, 0 = failure)", nil, lg), + mInFlight: prometheus.NewDesc("in_flight", "Number of probes currently running", nil, lg), mAttempts: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "attempts_total", Help: "Total number of probing attempts", ConstLabels: lg, }, []string{"status"}), @@ -261,10 +262,12 @@ type Probe struct { mEndTime *prometheus.Desc mLatency *prometheus.Desc mResult *prometheus.Desc + mInFlight *prometheus.Desc mAttempts *prometheus.CounterVec mSeconds *prometheus.CounterVec mu sync.Mutex + inFlight int // number of currently running probes start time.Time // last time doProbe started end time.Time // last time doProbe returned latency time.Duration // last successful probe latency @@ -392,11 +395,13 @@ func (p *Probe) run() (pi ProbeInfo, err error) { func (p *Probe) recordStart() { p.mu.Lock() p.start = p.prober.now() + p.inFlight++ p.mu.Unlock() } func (p *Probe) recordEndLocked(err error) { end := p.prober.now() + p.inFlight-- p.end = end p.succeeded = err == nil p.lastErr = err @@ -649,6 +654,7 @@ func (p *Probe) Describe(ch chan<- *prometheus.Desc) { ch <- p.mStartTime ch <- p.mEndTime ch <- p.mResult + ch <- p.mInFlight ch <- p.mLatency p.mAttempts.Describe(ch) p.mSeconds.Describe(ch) @@ -664,6 +670,7 @@ func (p *Probe) Collect(ch chan<- prometheus.Metric) { p.mu.Lock() defer p.mu.Unlock() ch <- prometheus.MustNewConstMetric(p.mInterval, prometheus.GaugeValue, p.interval.Seconds()) + ch <- prometheus.MustNewConstMetric(p.mInFlight, prometheus.GaugeValue, float64(p.inFlight)) if !p.start.IsZero() { ch <- prometheus.MustNewConstMetric(p.mStartTime, prometheus.GaugeValue, float64(p.start.Unix())) } diff --git a/prober/prober_test.go b/prober/prober_test.go index c945f617a6633..8da5127875859 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -213,6 +213,14 @@ func TestProberConcurrency(t *testing.T) { if got, want := ran.Load(), int64(3); got != want { return fmt.Errorf("expected %d probes to run concurrently, got %d", want, got) } + wantMetrics := ` + # HELP prober_in_flight Number of probes currently running + # TYPE prober_in_flight gauge + prober_in_flight{class="",name="foo"} 3 + ` + if err := testutil.GatherAndCompare(p.metrics, strings.NewReader(wantMetrics), "prober_in_flight"); err != nil { + return fmt.Errorf("unexpected metrics: %w", err) + } return nil }); err != nil { t.Fatal(err) @@ -308,9 +316,12 @@ probe_end_secs{class="",label="value",name="testprobe"} %d # HELP probe_result Latest probe result (1 = success, 0 = failure) # TYPE probe_result gauge probe_result{class="",label="value",name="testprobe"} 0 +# HELP probe_in_flight Number of probes currently running +# TYPE probe_in_flight gauge +probe_in_flight{class="",label="value",name="testprobe"} 0 `, probeInterval.Seconds(), epoch.Unix(), epoch.Add(aFewMillis).Unix()) return testutil.GatherAndCompare(p.metrics, strings.NewReader(want), - "probe_interval_secs", "probe_start_secs", "probe_end_secs", "probe_result") + "probe_interval_secs", "probe_start_secs", "probe_end_secs", "probe_result", "probe_in_flight") }) if err != nil { t.Fatal(err) @@ -338,9 +349,13 @@ probe_latency_millis{class="",label="value",name="testprobe"} %d # HELP probe_result Latest probe result (1 = success, 0 = failure) # TYPE probe_result gauge probe_result{class="",label="value",name="testprobe"} 1 +# HELP probe_in_flight Number of probes currently running +# TYPE probe_in_flight gauge +probe_in_flight{class="",label="value",name="testprobe"} 0 `, probeInterval.Seconds(), start.Unix(), end.Unix(), aFewMillis.Milliseconds()) return testutil.GatherAndCompare(p.metrics, strings.NewReader(want), - "probe_interval_secs", "probe_start_secs", "probe_end_secs", "probe_latency_millis", "probe_result") + "probe_interval_secs", "probe_start_secs", "probe_end_secs", + "probe_latency_millis", "probe_result", "probe_in_flight") }) if err != nil { t.Fatal(err) From 73d09316e20fe1ccf8a9613c34d80c0a82c5a490 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 11 Feb 2026 13:47:48 -0500 Subject: [PATCH 0965/1093] tstest: update clock to always use UTC (#18663) Instead of relying on the local timezone, which may cause non-deterministic behavior in some CIs, we force timezone to be UTC on default created clocks. Fixes: tailscale/corp#37005 Signed-off-by: Fernando Serboncini --- tstest/clock.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tstest/clock.go b/tstest/clock.go index f11187a4a69e5..5742c6e5aeda1 100644 --- a/tstest/clock.go +++ b/tstest/clock.go @@ -17,8 +17,11 @@ import ( type ClockOpts struct { // Start is the starting time for the Clock. When FollowRealTime is false, // Start is also the value that will be returned by the first call - // to Clock.Now. + // to Clock.Now. If you are passing a value here, set an explicit + // timezone, otherwise the test may be non-deterministic when TZ environment + // variable is set to different values. The default time is in UTC. Start time.Time + // Step is the amount of time the Clock will advance whenever Clock.Now is // called. If set to zero, the Clock will only advance when Clock.Advance is // called and/or if FollowRealTime is true. @@ -119,7 +122,7 @@ func (c *Clock) init() { } if c.start.IsZero() { if c.realTime.IsZero() { - c.start = time.Now() + c.start = time.Now().UTC() } else { c.start = c.realTime } From 84ee5b640b2101af610a2a554808ec77adbf070e Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Mon, 9 Feb 2026 16:34:46 -0700 Subject: [PATCH 0966/1093] testcontrol: send updates for new DNS records or app capabilities Two methods were recently added to the testcontrol.Server type: AddDNSRecords and SetGlobalAppCaps. These two methods should trigger netmap updates for all nodes connected to the Server instance, the way that other state-change methods do (see SetNodeCapMap, for example). This will also allow us to get rid of Server.ForceNetmapUpdate, which was a band-aid fix to force the netmap updates which should have been triggered by the aforementioned methods. Fixes tailscale/corp#37102 Signed-off-by: Harry Harpham --- tsnet/tsnet_test.go | 6 +-- tstest/integration/testcontrol/testcontrol.go | 41 ++----------------- 2 files changed, 5 insertions(+), 42 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 41d239e3b91be..0b6b61bd10061 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1194,10 +1194,8 @@ func TestListenService(t *testing.T) { tt.extraSetup(t, control) } - // Force netmap updates to avoid race conditions. The nodes need to - // see our control updates before we can start the test. - must.Do(control.ForceNetmapUpdate(ctx, serviceHost.lb.NodeKey())) - must.Do(control.ForceNetmapUpdate(ctx, serviceClient.lb.NodeKey())) + // Wait until both nodes have up-to-date netmaps before + // proceeding with the test. netmapUpToDate := func(s *Server) bool { nm := s.lb.NetMap() return slices.ContainsFunc(nm.DNS.ExtraRecords, func(r tailcfg.DNSRecord) bool { diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index f61d1b53a6d99..56664ba746204 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -299,43 +299,6 @@ func (s *Server) addDebugMessage(nodeKeyDst key.NodePublic, msg any) bool { return sendUpdate(oldUpdatesCh, updateDebugInjection) } -// ForceNetmapUpdate waits for the node to get stuck in a map poll and then -// sends the current netmap (which may result in a redundant netmap). The -// intended use case is ensuring state changes propagate before running tests. -// -// This should only be called for nodes connected as streaming clients. Calling -// this with a non-streaming node will result in non-deterministic behavior. -// -// This function cannot guarantee that the node has processed the issued update, -// so tests should confirm processing by querying the node. By example: -// -// if err := s.ForceNetmapUpdate(node.Key()); err != nil { -// // handle error -// } -// for !updatesPresent(node.NetMap()) { -// time.Sleep(10 * time.Millisecond) -// } -func (s *Server) ForceNetmapUpdate(ctx context.Context, nodeKey key.NodePublic) error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if err := s.AwaitNodeInMapRequest(ctx, nodeKey); err != nil { - return fmt.Errorf("waiting for node to poll: %w", err) - } - mr, err := s.MapResponse(&tailcfg.MapRequest{NodeKey: nodeKey}) - if err != nil { - return fmt.Errorf("generating map response: %w", err) - } - if s.addDebugMessage(nodeKey, mr) { - return nil - } - // If we failed to send the map response, loop around and try again. - } -} - // Mark the Node key of every node as expired func (s *Server) SetExpireAllNodes(expired bool) { s.mu.Lock() @@ -589,8 +552,9 @@ func (s *Server) SetNodeCapMap(nodeKey key.NodePublic, capMap tailcfg.NodeCapMap // ] func (s *Server) SetGlobalAppCaps(appCaps tailcfg.PeerCapMap) { s.mu.Lock() + defer s.mu.Unlock() s.globalAppCaps = appCaps - s.mu.Unlock() + s.updateLocked("SetGlobalAppCaps", s.nodeIDsLocked(0)) } // AddDNSRecords adds records to the server's DNS config. @@ -601,6 +565,7 @@ func (s *Server) AddDNSRecords(records ...tailcfg.DNSRecord) { s.DNSConfig = new(tailcfg.DNSConfig) } s.DNSConfig.ExtraRecords = append(s.DNSConfig.ExtraRecords, records...) + s.updateLocked("AddDNSRecords", s.nodeIDsLocked(0)) } // nodeIDsLocked returns the node IDs of all nodes in the server, except From 0bac4223d140778ec8408d65882151371f76f2cd Mon Sep 17 00:00:00 2001 From: Michael Ben-Ami Date: Tue, 10 Feb 2026 12:58:21 -0500 Subject: [PATCH 0967/1093] tstun: add test for intercept ordering Fixes tailscale/corp#36999 Signed-off-by: Michael Ben-Ami --- net/tstun/wrap_test.go | 65 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 8515cb8f0a4c0..1744fc30266a9 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -41,6 +41,7 @@ import ( "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/netstack/gro" "tailscale.com/wgengine/wgcfg" ) @@ -991,3 +992,67 @@ func TestTSMPDisco(t *testing.T) { } }) } + +func TestInterceptOrdering(t *testing.T) { + bus := eventbustest.NewBus(t) + chtun, tun := newChannelTUN(t.Logf, bus, true) + defer tun.Close() + + var seq uint8 + orderedFilterFn := func(expected uint8) FilterFunc { + return func(_ *packet.Parsed, _ *Wrapper) filter.Response { + seq++ + if expected != seq { + t.Errorf("got sequence %d; want %d", seq, expected) + } + return filter.Accept + } + } + + ordereredGROFilterFn := func(expected uint8) GROFilterFunc { + return func(_ *packet.Parsed, _ *Wrapper, _ *gro.GRO) (filter.Response, *gro.GRO) { + seq++ + if expected != seq { + t.Errorf("got sequence %d; want %d", seq, expected) + } + return filter.Accept, nil + } + } + + // As the number of inbound intercepts change, + // this value should change. + numInboundIntercepts := uint8(3) + + tun.PreFilterPacketInboundFromWireGuard = orderedFilterFn(1) + tun.PostFilterPacketInboundFromWireGuardAppConnector = orderedFilterFn(2) + tun.PostFilterPacketInboundFromWireGuard = ordereredGROFilterFn(3) + + // Write the packet. + go func() { <-chtun.Inbound }() // Simulate tun device receiving. + packet := [][]byte{udp4("5.6.7.8", "1.2.3.4", 89, 89)} + tun.Write(packet, 0) + + if seq != numInboundIntercepts { + t.Errorf("got number of intercepts run in Write(): %d; want: %d", seq, numInboundIntercepts) + } + + // As the number of inbound intercepts change, + // this value should change. + numOutboundIntercepts := uint8(4) + + seq = 0 + tun.PreFilterPacketOutboundToWireGuardNetstackIntercept = ordereredGROFilterFn(1) + tun.PreFilterPacketOutboundToWireGuardEngineIntercept = orderedFilterFn(2) + tun.PreFilterPacketOutboundToWireGuardAppConnectorIntercept = orderedFilterFn(3) + tun.PostFilterPacketOutboundToWireGuard = orderedFilterFn(4) + + // Read the packet. + var buf [MaxPacketSize]byte + sizes := make([]int, 1) + chtun.Outbound <- udp4("1.2.3.4", "5.6.7.8", 98, 98) // Simulate tun device sending. + tun.Read([][]byte{buf[:]}, sizes, 0) + + if seq != numOutboundIntercepts { + t.Errorf("got number of intercepts run in Read(): %d; want: %d", seq, numOutboundIntercepts) + } +} From 36d359e585374358ef04ed28f54c5f1667b0c170 Mon Sep 17 00:00:00 2001 From: Will Hannah Date: Thu, 12 Feb 2026 14:49:52 -0500 Subject: [PATCH 0968/1093] clientupdate, cmd/tailscale/cli: support updating to release-candidates (#18632) Adds a new track for release candidates. Supports querying by track in version and updating to RCs in update for supported platforms. updates #18193 Signed-off-by: Will Hannah --- clientupdate/clientupdate.go | 23 ++++---- clientupdate/clientupdate_test.go | 95 +++++++++++++++++-------------- cmd/tailscale/cli/update.go | 9 ++- cmd/tailscale/cli/version.go | 6 +- 4 files changed, 74 insertions(+), 59 deletions(-) diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 09f9d0be1787d..1ed7894bf3d43 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -38,8 +38,9 @@ import ( ) const ( - StableTrack = "stable" - UnstableTrack = "unstable" + StableTrack = "stable" + UnstableTrack = "unstable" + ReleaseCandidateTrack = "release-candidate" ) var CurrentTrack = func() string { @@ -80,6 +81,8 @@ type Arguments struct { // running binary // - StableTrack and UnstableTrack will use the latest versions of the // corresponding tracks + // - ReleaseCandidateTrack will use the newest version from StableTrack + // and ReleaseCandidateTrack. // // Leaving this empty will use Version or fall back to CurrentTrack if both // Track and Version are empty. @@ -114,7 +117,7 @@ func (args Arguments) validate() error { return fmt.Errorf("only one of Version(%q) or Track(%q) can be set", args.Version, args.Track) } switch args.Track { - case StableTrack, UnstableTrack, "": + case StableTrack, UnstableTrack, ReleaseCandidateTrack, "": // All valid values. default: return fmt.Errorf("unsupported track %q", args.Track) @@ -496,10 +499,10 @@ func (up *Updater) updateDebLike() error { const aptSourcesFile = "/etc/apt/sources.list.d/tailscale.list" // updateDebianAptSourcesList updates the /etc/apt/sources.list.d/tailscale.list -// file to make sure it has the provided track (stable or unstable) in it. +// file to make sure it has the provided track (stable, unstable, or release-candidate) in it. // -// If it already has the right track (including containing both stable and -// unstable), it does nothing. +// If it already has the right track (including containing both stable, +// unstable, and release-candidate), it does nothing. func updateDebianAptSourcesList(dstTrack string) (rewrote bool, err error) { was, err := os.ReadFile(aptSourcesFile) if err != nil { @@ -522,7 +525,7 @@ func updateDebianAptSourcesListBytes(was []byte, dstTrack string) (newContent [] bs := bufio.NewScanner(bytes.NewReader(was)) hadCorrect := false commentLine := regexp.MustCompile(`^\s*\#`) - pkgsURL := regexp.MustCompile(`\bhttps://pkgs\.tailscale\.com/((un)?stable)/`) + pkgsURL := regexp.MustCompile(`\bhttps://pkgs\.tailscale\.com/(stable|unstable|release-candidate)/`) for bs.Scan() { line := bs.Bytes() if !commentLine.Match(line) { @@ -616,15 +619,15 @@ func (up *Updater) updateFedoraLike(packageManager string) func() error { } // updateYUMRepoTrack updates the repoFile file to make sure it has the -// provided track (stable or unstable) in it. +// provided track (stable, unstable, or release-candidate) in it. func updateYUMRepoTrack(repoFile, dstTrack string) (rewrote bool, err error) { was, err := os.ReadFile(repoFile) if err != nil { return false, err } - urlRe := regexp.MustCompile(`^(baseurl|gpgkey)=https://pkgs\.tailscale\.com/(un)?stable/`) - urlReplacement := fmt.Sprintf("$1=https://pkgs.tailscale.com/%s/", dstTrack) + urlRe := regexp.MustCompile(`^(baseurl|gpgkey)=https://pkgs\.tailscale\.com/(stable|unstable|release-candidate)`) + urlReplacement := fmt.Sprintf("$1=https://pkgs.tailscale.com/%s", dstTrack) s := bufio.NewScanner(bytes.NewReader(was)) newContent := bytes.NewBuffer(make([]byte, 0, len(was))) diff --git a/clientupdate/clientupdate_test.go b/clientupdate/clientupdate_test.go index 089936a3120f1..7487026355326 100644 --- a/clientupdate/clientupdate_test.go +++ b/clientupdate/clientupdate_test.go @@ -86,29 +86,8 @@ func TestUpdateDebianAptSourcesListBytes(t *testing.T) { } } -func TestUpdateYUMRepoTrack(t *testing.T) { - tests := []struct { - desc string - before string - track string - after string - rewrote bool - wantErr bool - }{ - { - desc: "same track", - before: ` -[tailscale-stable] -name=Tailscale stable -baseurl=https://pkgs.tailscale.com/stable/fedora/$basearch -enabled=1 -type=rpm -repo_gpgcheck=1 -gpgcheck=0 -gpgkey=https://pkgs.tailscale.com/stable/fedora/repo.gpg -`, - track: StableTrack, - after: ` +var YUMRepos = map[string]string{ + StableTrack: ` [tailscale-stable] name=Tailscale stable baseurl=https://pkgs.tailscale.com/stable/fedora/$basearch @@ -118,35 +97,30 @@ repo_gpgcheck=1 gpgcheck=0 gpgkey=https://pkgs.tailscale.com/stable/fedora/repo.gpg `, - }, - { - desc: "change track", - before: ` -[tailscale-stable] -name=Tailscale stable -baseurl=https://pkgs.tailscale.com/stable/fedora/$basearch + + UnstableTrack: ` +[tailscale-unstable] +name=Tailscale unstable +baseurl=https://pkgs.tailscale.com/unstable/fedora/$basearch enabled=1 type=rpm repo_gpgcheck=1 gpgcheck=0 -gpgkey=https://pkgs.tailscale.com/stable/fedora/repo.gpg +gpgkey=https://pkgs.tailscale.com/unstable/fedora/repo.gpg `, - track: UnstableTrack, - after: ` -[tailscale-unstable] -name=Tailscale unstable -baseurl=https://pkgs.tailscale.com/unstable/fedora/$basearch + + ReleaseCandidateTrack: ` +[tailscale-release-candidate] +name=Tailscale release-candidate +baseurl=https://pkgs.tailscale.com/release-candidate/fedora/$basearch enabled=1 type=rpm repo_gpgcheck=1 gpgcheck=0 -gpgkey=https://pkgs.tailscale.com/unstable/fedora/repo.gpg +gpgkey=https://pkgs.tailscale.com/release-candidate/fedora/repo.gpg `, - rewrote: true, - }, - { - desc: "non-tailscale repo file", - before: ` + + "FakeRepo": ` [fedora] name=Fedora $releasever - $basearch #baseurl=http://download.example/pub/fedora/linux/releases/$releasever/Everything/$basearch/os/ @@ -158,8 +132,41 @@ repo_gpgcheck=0 type=rpm gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch -skip_if_unavailable=False -`, +skip_if_unavailable=False`, +} + +func TestUpdateYUMRepoTrack(t *testing.T) { + tests := []struct { + desc string + before string + track string + after string + rewrote bool + wantErr bool + }{ + { + desc: "same track", + before: YUMRepos[StableTrack], + track: StableTrack, + after: YUMRepos[StableTrack], + }, + { + desc: "change track", + before: YUMRepos[StableTrack], + track: UnstableTrack, + after: YUMRepos[UnstableTrack], + rewrote: true, + }, + { + desc: "change track RC", + before: YUMRepos[StableTrack], + track: ReleaseCandidateTrack, + after: YUMRepos[ReleaseCandidateTrack], + rewrote: true, + }, + { + desc: "non-tailscale repo file", + before: YUMRepos["FakeRepo"], track: StableTrack, wantErr: true, }, diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 6d57e6d41f110..47177347dba85 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -22,8 +22,11 @@ import ( func init() { maybeUpdateCmd = func() *ffcli.Command { return updateCmd } - clientupdateLatestTailscaleVersion.Set(func() (string, error) { - return clientupdate.LatestTailscaleVersion(clientupdate.CurrentTrack) + clientupdateLatestTailscaleVersion.Set(func(track string) (string, error) { + if track == "" { + return clientupdate.LatestTailscaleVersion(clientupdate.CurrentTrack) + } + return clientupdate.LatestTailscaleVersion(track) }) } @@ -50,7 +53,7 @@ var updateCmd = &ffcli.Command{ distro.Get() != distro.Synology && runtime.GOOS != "freebsd" && runtime.GOOS != "darwin" { - fs.StringVar(&updateArgs.track, "track", "", `which track to check for updates: "stable" or "unstable" (dev); empty means same as current`) + fs.StringVar(&updateArgs.track, "track", "", `which track to check for updates: "stable", "release-candidate", or "unstable" (dev); empty means same as current`) fs.StringVar(&updateArgs.version, "version", "", `explicit version to update/downgrade to`) } return fs diff --git a/cmd/tailscale/cli/version.go b/cmd/tailscale/cli/version.go index 2c6a3738bd36a..3d6590a39bf2e 100644 --- a/cmd/tailscale/cli/version.go +++ b/cmd/tailscale/cli/version.go @@ -24,6 +24,7 @@ var versionCmd = &ffcli.Command{ fs.BoolVar(&versionArgs.daemon, "daemon", false, "also print local node's daemon version") fs.BoolVar(&versionArgs.json, "json", false, "output in JSON format") fs.BoolVar(&versionArgs.upstream, "upstream", false, "fetch and print the latest upstream release version from pkgs.tailscale.com") + fs.StringVar(&versionArgs.track, "track", "", `which track to check for updates: "stable", "release-candidate", or "unstable" (dev); empty means same as current`) return fs })(), Exec: runVersion, @@ -33,9 +34,10 @@ var versionArgs struct { daemon bool // also check local node's daemon version json bool upstream bool + track string } -var clientupdateLatestTailscaleVersion feature.Hook[func() (string, error)] +var clientupdateLatestTailscaleVersion feature.Hook[func(string) (string, error)] func runVersion(ctx context.Context, args []string) error { if len(args) > 0 { @@ -57,7 +59,7 @@ func runVersion(ctx context.Context, args []string) error { if !ok { return fmt.Errorf("fetching latest version not supported in this build") } - upstreamVer, err = f() + upstreamVer, err = f(versionArgs.track) if err != nil { return err } From 068074c109a5a78de746b47b0f1bd2915936809c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 12 Feb 2026 13:03:22 -0800 Subject: [PATCH 0969/1093] portlist: also tb.Skip benchmarks (not just tests) on bad Linux kernels Updates #16966 Change-Id: I0269927bdf8e6c4e949fcf755ce7e5fd21386d7d Signed-off-by: Brad Fitzpatrick --- portlist/portlist_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/portlist/portlist_test.go b/portlist/portlist_test.go index 5e0964b248882..922cb7a1ef562 100644 --- a/portlist/portlist_test.go +++ b/portlist/portlist_test.go @@ -11,7 +11,7 @@ import ( "tailscale.com/tstest" ) -func maybeSkip(t *testing.T) { +func maybeSkip(t testing.TB) { if runtime.GOOS == "linux" { tstest.SkipOnKernelVersions(t, "https://github.com/tailscale/tailscale/issues/16966", @@ -214,6 +214,7 @@ func BenchmarkGetListIncremental(b *testing.B) { } func benchmarkGetList(b *testing.B, incremental bool) { + maybeSkip(b) b.ReportAllocs() var p Poller p.init() From d46887031097858c79b102a9fe3f2345bcea084a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 12 Feb 2026 13:15:24 -0800 Subject: [PATCH 0970/1093] .github/workflows: bump oss-fuzz builder hash Fixes #18710 Change-Id: I2ebad48b1227321233172beb9801087963ece4fa Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 152ef7bce9008..cdf8f3f5f69d1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -711,9 +711,9 @@ jobs: steps: - name: build fuzzers id: build - # As of 21 October 2025, this repo doesn't tag releases, so this commit + # As of 12 February 2026, this repo doesn't tag releases, so this commit # hash is just the tip of master. - uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264 + uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@f277aafb36f358582fdb24a41a9a52f2e097a2fd # continue-on-error makes steps.build.conclusion be 'success' even if # steps.build.outcome is 'failure'. This means this step does not # contribute to the job's overall pass/fail evaluation. @@ -743,9 +743,9 @@ jobs: # report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong # value. if: steps.build.outcome == 'success' - # As of 21 October 2025, this repo doesn't tag releases, so this commit + # As of 12 February 2026, this repo doesn't tag releases, so this commit # hash is just the tip of master. - uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@1242ccb5b6352601e73c00f189ac2ae397242264 + uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@f277aafb36f358582fdb24a41a9a52f2e097a2fd with: oss-fuzz-project-name: 'tailscale' fuzz-seconds: 150 From a341eea00bc2f102dcf1bbd19db2fd4a1bebefac Mon Sep 17 00:00:00 2001 From: David Bond Date: Fri, 13 Feb 2026 16:04:34 +0000 Subject: [PATCH 0971/1093] k8s-operator,cmd/k8s-operator: define ProxyGroupPolicy CRD (#18614) This commit adds a new custom resource definition to the kubernetes operator named `ProxyGroupPolicy`. This resource is namespace scoped and is used as an allow list for which `ProxyGroup` resources can be used within its namespace. The `spec` contains two fields, `ingress` and `egress`. These should contain the names of `ProxyGroup` resources to denote which can be used as values in the `tailscale.com/proxy-group` annotation within `Service` and `Ingress` resources. The intention is for these policies to be merged within a namespace and produce a `ValidatingAdmissionPolicy` and `ValidatingAdmissionPolicyBinding` for both ingress and egress that prevents users from using names of `ProxyGroup` resources in those annotations. Closes: https://github.com/tailscale/corp/issues/36829 Signed-off-by: David Bond --- .../tailscale.com_proxygrouppolicies.yaml | 139 ++++++++++++++++++ k8s-operator/api.md | 77 ++++++++++ k8s-operator/apis/v1alpha1/register.go | 2 + .../apis/v1alpha1/types_proxygrouppolicy.go | 67 +++++++++ .../apis/v1alpha1/zz_generated.deepcopy.go | 106 +++++++++++++ 5 files changed, 391 insertions(+) create mode 100644 cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml create mode 100644 k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml new file mode 100644 index 0000000000000..51edcb56f039b --- /dev/null +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml @@ -0,0 +1,139 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: proxygrouppolicies.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroupPolicy + listKind: ProxyGroupPolicyList + plural: proxygrouppolicies + shortNames: + - pgp + singular: proxygrouppolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Status of the deployed ProxyGroupPolicy resources. + jsonPath: .status.conditions[?(@.type == "ProxyGroupPolicyReady")].reason + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the ProxyGroupPolicy. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + type: object + properties: + egress: + description: |- + Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + denotes that no egress via ProxyGroups is allowed within this namespace. + type: array + items: + type: string + ingress: + description: |- + Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + denotes that no ingress via ProxyGroups is allowed within this namespace. + type: array + items: + type: string + status: + description: |- + Status describes the status of the ProxyGroupPolicy. This is set + and managed by the Tailscale operator. + type: object + properties: + conditions: + type: array + items: + description: Condition contains details for one aspect of the current state of this API Resource. + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 51a354b925574..5a60f66e039d0 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -16,6 +16,8 @@ - [ProxyClassList](#proxyclasslist) - [ProxyGroup](#proxygroup) - [ProxyGroupList](#proxygrouplist) +- [ProxyGroupPolicy](#proxygrouppolicy) +- [ProxyGroupPolicyList](#proxygrouppolicylist) - [Recorder](#recorder) - [RecorderList](#recorderlist) - [Tailnet](#tailnet) @@ -725,6 +727,81 @@ _Appears in:_ | `items` _[ProxyGroup](#proxygroup) array_ | | | | +#### ProxyGroupPolicy + + + + + + + +_Appears in:_ +- [ProxyGroupPolicyList](#proxygrouppolicylist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyGroupPolicy` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
      Servers may infer this from the endpoint the client submits requests to.
      Cannot be updated.
      In CamelCase.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
      Servers should convert recognized schemas to the latest internal value, and
      may reject unrecognized values.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ProxyGroupPolicySpec](#proxygrouppolicyspec)_ | Spec describes the desired state of the ProxyGroupPolicy.
      More info:
      https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status | | | +| `status` _[ProxyGroupPolicyStatus](#proxygrouppolicystatus)_ | Status describes the status of the ProxyGroupPolicy. This is set
      and managed by the Tailscale operator. | | | + + +#### ProxyGroupPolicyList + + + + + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `tailscale.com/v1alpha1` | | | +| `kind` _string_ | `ProxyGroupPolicyList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
      Servers may infer this from the endpoint the client submits requests to.
      Cannot be updated.
      In CamelCase.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
      Servers should convert recognized schemas to the latest internal value, and
      may reject unrecognized values.
      More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ProxyGroupPolicy](#proxygrouppolicy) array_ | | | | + + +#### ProxyGroupPolicySpec + + + + + + + +_Appears in:_ +- [ProxyGroupPolicy](#proxygrouppolicy) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `ingress` _string array_ | Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list
      denotes that no ingress via ProxyGroups is allowed within this namespace. | | | +| `egress` _string array_ | Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list
      denotes that no egress via ProxyGroups is allowed within this namespace. | | | + + +#### ProxyGroupPolicyStatus + + + + + + + +_Appears in:_ +- [ProxyGroupPolicy](#proxygrouppolicy) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#condition-v1-meta) array_ | | | | + + #### ProxyGroupSpec diff --git a/k8s-operator/apis/v1alpha1/register.go b/k8s-operator/apis/v1alpha1/register.go index ebdd2bae1f3ea..125d7419866ea 100644 --- a/k8s-operator/apis/v1alpha1/register.go +++ b/k8s-operator/apis/v1alpha1/register.go @@ -69,6 +69,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ProxyGroupList{}, &Tailnet{}, &TailnetList{}, + &ProxyGroupPolicy{}, + &ProxyGroupPolicyList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go new file mode 100644 index 0000000000000..dd06380c271f5 --- /dev/null +++ b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go @@ -0,0 +1,67 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Code comments on these types should be treated as user facing documentation- +// they will appear on the ProxyGroupPolicy CRD i.e. if someone runs kubectl explain tailnet. + +var ProxyGroupPolicyKind = "ProxyGroupPolicy" + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,shortName=pgp +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupPolicyReady")].reason`,description="Status of the deployed ProxyGroupPolicy resources." + +type ProxyGroupPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitzero"` + + // Spec describes the desired state of the ProxyGroupPolicy. + // More info: + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec ProxyGroupPolicySpec `json:"spec"` + + // Status describes the status of the ProxyGroupPolicy. This is set + // and managed by the Tailscale operator. + // +optional + Status ProxyGroupPolicyStatus `json:"status"` +} + +// +kubebuilder:object:root=true + +type ProxyGroupPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ProxyGroupPolicy `json:"items"` +} + +type ProxyGroupPolicySpec struct { + // Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + // denotes that no ingress via ProxyGroups is allowed within this namespace. + // +optional + Ingress []string `json:"ingress,omitempty"` + + // Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + // denotes that no egress via ProxyGroups is allowed within this namespace. + // +optional + Egress []string `json:"egress,omitempty"` +} + +type ProxyGroupPolicyStatus struct { + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions"` +} + +// ProxyGroupPolicyReady is set to True if the ProxyGroupPolicy is available for use by operator workloads. +const ProxyGroupPolicyReady ConditionType = "ProxyGroupPolicyReady" diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index 1081c162c81bc..2528c89f364d6 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -832,6 +832,112 @@ func (in *ProxyGroupList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicy) DeepCopyInto(out *ProxyGroupPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicy. +func (in *ProxyGroupPolicy) DeepCopy() *ProxyGroupPolicy { + if in == nil { + return nil + } + out := new(ProxyGroupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyGroupPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicyList) DeepCopyInto(out *ProxyGroupPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProxyGroupPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicyList. +func (in *ProxyGroupPolicyList) DeepCopy() *ProxyGroupPolicyList { + if in == nil { + return nil + } + out := new(ProxyGroupPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyGroupPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicySpec) DeepCopyInto(out *ProxyGroupPolicySpec) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicySpec. +func (in *ProxyGroupPolicySpec) DeepCopy() *ProxyGroupPolicySpec { + if in == nil { + return nil + } + out := new(ProxyGroupPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyGroupPolicyStatus) DeepCopyInto(out *ProxyGroupPolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyGroupPolicyStatus. +func (in *ProxyGroupPolicyStatus) DeepCopy() *ProxyGroupPolicyStatus { + if in == nil { + return nil + } + out := new(ProxyGroupPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProxyGroupSpec) DeepCopyInto(out *ProxyGroupSpec) { *out = *in From afb065fb6842ccbe6efd2a26096b609e73a3f41b Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 12 Feb 2026 22:37:41 -0600 Subject: [PATCH 0972/1093] net/dns: write MagicDNS host names to the hosts file on domain-joined Windows machines On domain-joined Windows devices the primary search domain (the one the device is joined to) always takes precedence over other search domains. This breaks MagicDNS when we are the primary resolver on the device (see #18712). To work around this Windows behavior, we should write MagicDNS host names the hosts file just as we do when we're not the primary resolver. This commit does exactly that. Fixes #18712 Signed-off-by: Nick Khyl --- net/dns/manager_windows.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index 118dd18dde14b..bc1e645606402 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -399,7 +399,15 @@ func (m *windowsManager) SetDNS(cfg OSConfig) error { if err := m.setSplitDNS(resolvers, domains); err != nil { return err } - if err := m.setHosts(nil); err != nil { + var hosts []*HostEntry + if winenv.IsDomainJoined() { + // On domain-joined Windows devices the primary search domain (the one the device is joined to) + // always takes precedence over other search domains. This breaks MagicDNS when we are the primary + // resolver on the device (see #18712). To work around this Windows behavior, we should write MagicDNS + // host names the hosts file just as we do when we're not the primary resolver. + hosts = cfg.Hosts + } + if err := m.setHosts(hosts); err != nil { return err } if err := m.setPrimaryDNS(cfg.Nameservers, cfg.SearchDomains); err != nil { From 9741c1e846d0812f5b63c2ad9f6825b0f6753dcd Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Thu, 12 Feb 2026 22:38:54 -0600 Subject: [PATCH 0973/1093] control/controlknobs,net/dns,tailcfg: add a control knob that disables hosts file updates on Windows In the absence of a better mechanism, writing unqualified hostnames to the hosts file may be required for MagicDNS to work on some Windows environments, such as domain-joined machines. It can also improve MagicDNS performance on non-domain joined devices when we are not the device's primary DNS resolver. At the same time, updating the hosts file can be slow and expensive, especially when it already contains many entries, as was previously reported in #14327. It may also have negative side effects, such as interfering with the system's DNS resolution policies. Additionally, to fix #18712, we had to extend hosts file usage to domain-joined machines when we are not the primary DNS resolver. For the reasons above, this change may introduce risk. To allow customers to disable hosts file updates remotely without disabling MagicDNS entirely, whether on domain-joined machines or not, this PR introduces the `disable-hosts-file-updates` node attribute. Updates #18712 Updates #14327 Signed-off-by: Nick Khyl --- control/controlknobs/controlknobs.go | 8 ++++++++ net/dns/manager_windows.go | 21 ++++++++++++++------- tailcfg/tailcfg.go | 10 +++++++++- 3 files changed, 31 insertions(+), 8 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 708840155df45..0f85e82368dd9 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -107,6 +107,12 @@ type Knobs struct { // of queued netmap.NetworkMap between the controlclient and LocalBackend. // See tailscale/tailscale#14768. DisableSkipStatusQueue atomic.Bool + + // DisableHostsFileUpdates indicates that the node's DNS manager should not create + // hosts file entries when it normally would, such as when we're not the primary + // resolver on Windows or when the host is domain-joined and its primary domain + // takes precedence over MagicDNS. As of 2026-02-13, it is only used on Windows. + DisableHostsFileUpdates atomic.Bool } // UpdateFromNodeAttributes updates k (if non-nil) based on the provided self @@ -137,6 +143,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { disableLocalDNSOverrideViaNRPT = has(tailcfg.NodeAttrDisableLocalDNSOverrideViaNRPT) disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue) + disableHostsFileUpdates = has(tailcfg.NodeAttrDisableHostsFileUpdates) ) if has(tailcfg.NodeAttrOneCGNATEnable) { @@ -163,6 +170,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) + k.DisableHostsFileUpdates.Store(disableHostsFileUpdates) // If both attributes are present, then "enable" should win. This reflects // the history of seamless key renewal. diff --git a/net/dns/manager_windows.go b/net/dns/manager_windows.go index bc1e645606402..1e412b2d20617 100644 --- a/net/dns/manager_windows.go +++ b/net/dns/manager_windows.go @@ -34,6 +34,7 @@ import ( "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/winutil" + "tailscale.com/util/winutil/winenv" ) const ( @@ -354,6 +355,10 @@ func (m *windowsManager) disableLocalDNSOverrideViaNRPT() bool { return m.knobs != nil && m.knobs.DisableLocalDNSOverrideViaNRPT.Load() } +func (m *windowsManager) disableHostsFileUpdates() bool { + return m.knobs != nil && m.knobs.DisableHostsFileUpdates.Load() +} + func (m *windowsManager) SetDNS(cfg OSConfig) error { // We can configure Windows DNS in one of two ways: // @@ -400,7 +405,7 @@ func (m *windowsManager) SetDNS(cfg OSConfig) error { return err } var hosts []*HostEntry - if winenv.IsDomainJoined() { + if !m.disableHostsFileUpdates() && winenv.IsDomainJoined() { // On domain-joined Windows devices the primary search domain (the one the device is joined to) // always takes precedence over other search domains. This breaks MagicDNS when we are the primary // resolver on the device (see #18712). To work around this Windows behavior, we should write MagicDNS @@ -429,12 +434,14 @@ func (m *windowsManager) SetDNS(cfg OSConfig) error { return err } - // As we are not the primary resolver in this setup, we need to - // explicitly set some single name hosts to ensure that we can resolve - // them quickly and get around the 2.3s delay that otherwise occurs due - // to multicast timeouts. - if err := m.setHosts(cfg.Hosts); err != nil { - return err + if !m.disableHostsFileUpdates() { + // As we are not the primary resolver in this setup, we need to + // explicitly set some single name hosts to ensure that we can resolve + // them quickly and get around the 2.3s delay that otherwise occurs due + // to multicast timeouts. + if err := m.setHosts(cfg.Hosts); err != nil { + return err + } } } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 171f88fd77b5c..69ca20a947735 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -178,7 +178,8 @@ type CapabilityVersion int // - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) // - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest // - 131: 2025-11-25: client respects [NodeAttrDefaultAutoUpdate] -const CurrentCapabilityVersion CapabilityVersion = 131 +// - 132: 2026-02-13: client respects [NodeAttrDisableHostsFileUpdates] +const CurrentCapabilityVersion CapabilityVersion = 132 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2740,6 +2741,13 @@ const ( // // The value of the key in [NodeCapMap] is a JSON boolean. NodeAttrDefaultAutoUpdate NodeCapability = "default-auto-update" + + // NodeAttrDisableHostsFileUpdates indicates that the node's DNS manager should + // not create hosts file entries when it normally would, such as when we're not + // the primary resolver on Windows or when the host is domain-joined and its + // primary domain takes precedence over MagicDNS. As of 2026-02-12, it is only + // used on Windows. + NodeAttrDisableHostsFileUpdates NodeCapability = "disable-hosts-file-updates" ) // SetDNSRequest is a request to add a DNS record. From be4449f6e08af0dd67bf702ce362cc6a895d3f9b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Fri, 13 Feb 2026 13:30:48 -0500 Subject: [PATCH 0974/1093] util/clientmetric, wgengine/watchdog: report watchdog errors in user/client metrics (#18591) fixes tailscale/corp#36708 Sets up a set of metrics to report watchdog timeouts for wgengine and reports an event for any watchdog timeout. Signed-off-by: Jonathan Nobels --- wgengine/watchdog.go | 99 ++++++++++++++++++++++++++++++++------- wgengine/watchdog_test.go | 95 +++++++++++++++++++++++++++++++++++++ 2 files changed, 176 insertions(+), 18 deletions(-) diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 18b36e0039d6d..f12b1c19e2764 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -22,12 +22,47 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/netmap" + "tailscale.com/util/clientmetric" "tailscale.com/wgengine/filter" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" "tailscale.com/wgengine/wgint" ) +type watchdogEvent string + +const ( + Any watchdogEvent = "Any" + Reconfig watchdogEvent = "Reconfig" + ResetAndStop watchdogEvent = "ResetAndStop" + SetFilter watchdogEvent = "SetFilter" + SetJailedFilter watchdogEvent = "SetJailedFilter" + SetStatusCallback watchdogEvent = "SetStatusCallback" + UpdateStatus watchdogEvent = "UpdateStatus" + RequestStatus watchdogEvent = "RequestStatus" + SetNetworkMap watchdogEvent = "SetNetworkMap" + Ping watchdogEvent = "Ping" + Close watchdogEvent = "Close" + PeerForIPEvent watchdogEvent = "PeerForIP" +) + +var ( + watchdogMetrics = map[watchdogEvent]*clientmetric.Metric{ + Any: clientmetric.NewCounter("watchdog_timeout_any_total"), + Reconfig: clientmetric.NewCounter("watchdog_timeout_reconfig"), + ResetAndStop: clientmetric.NewCounter("watchdog_timeout_resetandstop"), + SetFilter: clientmetric.NewCounter("watchdog_timeout_setfilter"), + SetJailedFilter: clientmetric.NewCounter("watchdog_timeout_setjailedfilter"), + SetStatusCallback: clientmetric.NewCounter("watchdog_timeout_setstatuscallback"), + UpdateStatus: clientmetric.NewCounter("watchdog_timeout_updatestatus"), + RequestStatus: clientmetric.NewCounter("watchdog_timeout_requeststatus"), + SetNetworkMap: clientmetric.NewCounter("watchdog_timeout_setnetworkmap"), + Ping: clientmetric.NewCounter("watchdog_timeout_ping"), + Close: clientmetric.NewCounter("watchdog_timeout_close"), + PeerForIPEvent: clientmetric.NewCounter("watchdog_timeout_peerforipevent"), + } +) + // NewWatchdog wraps an Engine and makes sure that all methods complete // within a reasonable amount of time. // @@ -46,7 +81,7 @@ func NewWatchdog(e Engine) Engine { } type inFlightKey struct { - op string + op watchdogEvent ctr uint64 } @@ -62,12 +97,13 @@ type watchdogEngine struct { inFlightCtr uint64 } -func (e *watchdogEngine) watchdogErr(name string, fn func() error) error { +func (e *watchdogEngine) watchdogErr(event watchdogEvent, fn func() error) error { // Track all in-flight operations so we can print more useful error // messages on watchdog failure e.inFlightMu.Lock() + key := inFlightKey{ - op: name, + op: event, ctr: e.inFlightCtr, } e.inFlightCtr++ @@ -93,7 +129,6 @@ func (e *watchdogEngine) watchdogErr(name string, fn func() error) error { buf := new(strings.Builder) pprof.Lookup("goroutine").WriteTo(buf, 1) e.logf("wgengine watchdog stacks:\n%s", buf.String()) - // Collect the list of in-flight operations for debugging. var ( b []byte @@ -104,64 +139,92 @@ func (e *watchdogEngine) watchdogErr(name string, fn func() error) error { dur := now.Sub(t).Round(time.Millisecond) b = fmt.Appendf(b, "in-flight[%d]: name=%s duration=%v start=%s\n", k.ctr, k.op, dur, t.Format(time.RFC3339Nano)) } + e.recordEvent(event) e.inFlightMu.Unlock() // Print everything as a single string to avoid log // rate limits. e.logf("wgengine watchdog in-flight:\n%s", b) - e.fatalf("wgengine: watchdog timeout on %s", name) + e.fatalf("wgengine: watchdog timeout on %s", event) return nil } } -func (e *watchdogEngine) watchdog(name string, fn func()) { - e.watchdogErr(name, func() error { +func (e *watchdogEngine) recordEvent(event watchdogEvent) { + if watchdogMetrics == nil { + return + } + + mEvent, ok := watchdogMetrics[event] + if ok { + mEvent.Add(1) + } + mAny, ok := watchdogMetrics[Any] + if ok { + mAny.Add(1) + } +} + +func (e *watchdogEngine) watchdog(event watchdogEvent, fn func()) { + e.watchdogErr(event, func() error { fn() return nil }) } func (e *watchdogEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error { - return e.watchdogErr("Reconfig", func() error { return e.wrap.Reconfig(cfg, routerCfg, dnsCfg) }) + return e.watchdogErr(Reconfig, func() error { return e.wrap.Reconfig(cfg, routerCfg, dnsCfg) }) } + func (e *watchdogEngine) ResetAndStop() (st *Status, err error) { - e.watchdog("ResetAndStop", func() { + e.watchdog(ResetAndStop, func() { st, err = e.wrap.ResetAndStop() }) return st, err } + func (e *watchdogEngine) GetFilter() *filter.Filter { return e.wrap.GetFilter() } + func (e *watchdogEngine) SetFilter(filt *filter.Filter) { - e.watchdog("SetFilter", func() { e.wrap.SetFilter(filt) }) + e.watchdog(SetFilter, func() { e.wrap.SetFilter(filt) }) } + func (e *watchdogEngine) GetJailedFilter() *filter.Filter { return e.wrap.GetJailedFilter() } + func (e *watchdogEngine) SetJailedFilter(filt *filter.Filter) { - e.watchdog("SetJailedFilter", func() { e.wrap.SetJailedFilter(filt) }) + e.watchdog(SetJailedFilter, func() { e.wrap.SetJailedFilter(filt) }) } + func (e *watchdogEngine) SetStatusCallback(cb StatusCallback) { - e.watchdog("SetStatusCallback", func() { e.wrap.SetStatusCallback(cb) }) + e.watchdog(SetStatusCallback, func() { e.wrap.SetStatusCallback(cb) }) } + func (e *watchdogEngine) UpdateStatus(sb *ipnstate.StatusBuilder) { - e.watchdog("UpdateStatus", func() { e.wrap.UpdateStatus(sb) }) + e.watchdog(UpdateStatus, func() { e.wrap.UpdateStatus(sb) }) } + func (e *watchdogEngine) RequestStatus() { - e.watchdog("RequestStatus", func() { e.wrap.RequestStatus() }) + e.watchdog(RequestStatus, func() { e.wrap.RequestStatus() }) } + func (e *watchdogEngine) SetNetworkMap(nm *netmap.NetworkMap) { - e.watchdog("SetNetworkMap", func() { e.wrap.SetNetworkMap(nm) }) + e.watchdog(SetNetworkMap, func() { e.wrap.SetNetworkMap(nm) }) } + func (e *watchdogEngine) Ping(ip netip.Addr, pingType tailcfg.PingType, size int, cb func(*ipnstate.PingResult)) { - e.watchdog("Ping", func() { e.wrap.Ping(ip, pingType, size, cb) }) + e.watchdog(Ping, func() { e.wrap.Ping(ip, pingType, size, cb) }) } + func (e *watchdogEngine) Close() { - e.watchdog("Close", e.wrap.Close) + e.watchdog(Close, e.wrap.Close) } + func (e *watchdogEngine) PeerForIP(ip netip.Addr) (ret PeerForIP, ok bool) { - e.watchdog("PeerForIP", func() { ret, ok = e.wrap.PeerForIP(ip) }) + e.watchdog(PeerForIPEvent, func() { ret, ok = e.wrap.PeerForIP(ip) }) return ret, ok } diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index 47f133373c445..8032339573e90 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -1,10 +1,13 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause +//go:build !js + package wgengine import ( "runtime" + "sync" "testing" "time" @@ -44,3 +47,95 @@ func TestWatchdog(t *testing.T) { e.Close() }) } + +func TestWatchdogMetrics(t *testing.T) { + tests := []struct { + name string + events []watchdogEvent + wantCounts map[watchdogEvent]int64 + }{ + { + name: "single event types", + events: []watchdogEvent{RequestStatus, PeerForIPEvent, Ping}, + wantCounts: map[watchdogEvent]int64{ + RequestStatus: 1, + PeerForIPEvent: 1, + Ping: 1, + }, + }, + { + name: "repeated events", + events: []watchdogEvent{RequestStatus, RequestStatus, Ping, RequestStatus}, + wantCounts: map[watchdogEvent]int64{ + RequestStatus: 3, + Ping: 1, + }, + }, + } + + // For swallowing fatalf calls and stack traces + logf := func(format string, args ...any) {} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clearMetrics(t) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) + reg := new(usermetric.Registry) + e, err := NewFakeUserspaceEngine(logf, 0, ht, reg, bus) + if err != nil { + t.Fatal(err) + } + e = NewWatchdog(e) + w := e.(*watchdogEngine) + w.maxWait = 1 * time.Microsecond + w.logf = logf + w.fatalf = logf + + var wg sync.WaitGroup + wg.Add(len(tt.events)) + + for _, ev := range tt.events { + blocked := make(chan struct{}) + w.watchdog(ev, func() { + defer wg.Done() + <-blocked + }) + close(blocked) + } + wg.Wait() + + // Check individual event counts + for ev, want := range tt.wantCounts { + m, ok := watchdogMetrics[ev] + if !ok { + t.Fatalf("no metric found for event %q", ev) + } + got := m.Value() + if got != want { + t.Errorf("got %d metric events for %q, want %d", got, ev, want) + } + } + + // Check total count for Any + m, ok := watchdogMetrics[Any] + if !ok { + t.Fatalf("no Any metric found") + } + got := m.Value() + if got != int64(len(tt.events)) { + t.Errorf("got %d metric events for Any, want %d", got, len(tt.events)) + } + }) + } +} + +func clearMetrics(t *testing.T) { + t.Helper() + if watchdogMetrics == nil { + return + } + for _, m := range watchdogMetrics { + m.Set(0) + } +} From 4f1406f05ae7a1ea4c79d12587bdb3156bb2e12e Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Fri, 13 Feb 2026 10:59:43 -0800 Subject: [PATCH 0975/1093] ipn/ipnlocal/netmapcache: include packet filters in the cache (#18715) Store packet filter rules in the cache. The match expressions are derived from the filter rules, so these do not need to be stored explicitly, but ensure they are properly reconstructed when the cache is read back. Update the tests to include these fields, and provide representative values. Updates #12639 Change-Id: I9bdb972a86d2c6387177d393ada1f54805a2448b Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/netmapcache/netmapcache.go | 86 +++++++++++++------- ipn/ipnlocal/netmapcache/netmapcache_test.go | 55 ++++++++++++- ipn/ipnlocal/netmapcache/types.go | 7 ++ 3 files changed, 115 insertions(+), 33 deletions(-) diff --git a/ipn/ipnlocal/netmapcache/netmapcache.go b/ipn/ipnlocal/netmapcache/netmapcache.go index b12443b99f473..1b8347f0b8d18 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache.go +++ b/ipn/ipnlocal/netmapcache/netmapcache.go @@ -27,6 +27,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/util/mak" "tailscale.com/util/set" + "tailscale.com/wgengine/filter" ) var ( @@ -45,17 +46,17 @@ var ( type Cache struct { store Store - // wantKeys records the storage keys from the last write or load of a cached + // wantKeys records the cache keys from the last write or load of a cached // netmap. This is used to prune keys that are no longer referenced after an // update. - wantKeys set.Set[string] + wantKeys set.Set[cacheKey] // lastWrote records the last values written to each stored key. // // TODO(creachadair): This is meant to avoid disk writes, but I'm not // convinced we need it. Or maybe just track hashes of the content rather // than caching a complete copy. - lastWrote map[string]lastWrote + lastWrote map[cacheKey]lastWrote } // NewCache constructs a new empty [Cache] from the given [Store]. @@ -66,8 +67,8 @@ func NewCache(s Store) *Cache { } return &Cache{ store: s, - wantKeys: make(set.Set[string]), - lastWrote: make(map[string]lastWrote), + wantKeys: make(set.Set[cacheKey]), + lastWrote: make(map[cacheKey]lastWrote), } } @@ -76,7 +77,7 @@ type lastWrote struct { at time.Time } -func (c *Cache) writeJSON(ctx context.Context, key string, v any) error { +func (c *Cache) writeJSON(ctx context.Context, key cacheKey, v any) error { j, err := jsonv1.Marshal(v) if err != nil { return fmt.Errorf("JSON marshalling %q: %w", key, err) @@ -90,7 +91,7 @@ func (c *Cache) writeJSON(ctx context.Context, key string, v any) error { return nil } - if err := c.store.Store(ctx, key, j); err != nil { + if err := c.store.Store(ctx, string(key), j); err != nil { return err } @@ -110,11 +111,12 @@ func (c *Cache) removeUnwantedKeys(ctx context.Context) error { errs = append(errs, err) break } - if !c.wantKeys.Contains(key) { + ckey := cacheKey(key) + if !c.wantKeys.Contains(ckey) { if err := c.store.Remove(ctx, key); err != nil { errs = append(errs, fmt.Errorf("remove key %q: %w", key, err)) } - delete(c.lastWrote, key) // even if removal failed, we don't want it + delete(c.lastWrote, ckey) // even if removal failed, we don't want it } } return errors.Join(errs...) @@ -177,6 +179,20 @@ func (s FileStore) Remove(ctx context.Context, key string) error { return err } +// cacheKey is a type wrapper for strings used as cache keys. +type cacheKey string + +const ( + selfKey cacheKey = "self" + miscKey cacheKey = "msic" + dnsKey cacheKey = "dns" + derpMapKey cacheKey = "derpmap" + peerKeyPrefix cacheKey = "peer-" // + stable ID + userKeyPrefix cacheKey = "user-" // + profile ID + sshPolicyKey cacheKey = "ssh" + packetFilterKey cacheKey = "filter" +) + // Store records nm in the cache, replacing any previously-cached values. func (c *Cache) Store(ctx context.Context, nm *netmap.NetworkMap) error { if !buildfeatures.HasCacheNetMap || nm == nil || nm.Cached { @@ -187,7 +203,7 @@ func (c *Cache) Store(ctx context.Context, nm *netmap.NetworkMap) error { } clear(c.wantKeys) - if err := c.writeJSON(ctx, "misc", netmapMisc{ + if err := c.writeJSON(ctx, miscKey, netmapMisc{ MachineKey: &nm.MachineKey, CollectServices: &nm.CollectServices, DisplayMessages: &nm.DisplayMessages, @@ -198,33 +214,36 @@ func (c *Cache) Store(ctx context.Context, nm *netmap.NetworkMap) error { }); err != nil { return err } - if err := c.writeJSON(ctx, "dns", netmapDNS{DNS: &nm.DNS}); err != nil { + if err := c.writeJSON(ctx, dnsKey, netmapDNS{DNS: &nm.DNS}); err != nil { return err } - if err := c.writeJSON(ctx, "derpmap", netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { + if err := c.writeJSON(ctx, derpMapKey, netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { return err } - if err := c.writeJSON(ctx, "self", netmapNode{Node: &nm.SelfNode}); err != nil { + if err := c.writeJSON(ctx, selfKey, netmapNode{Node: &nm.SelfNode}); err != nil { return err // N.B. The NodeKey and AllCaps fields can be recovered from SelfNode on // load, and do not need to be stored separately. } for _, p := range nm.Peers { - key := fmt.Sprintf("peer-%s", p.StableID()) + key := peerKeyPrefix + cacheKey(p.StableID()) if err := c.writeJSON(ctx, key, netmapNode{Node: &p}); err != nil { return err } } for uid, u := range nm.UserProfiles { - key := fmt.Sprintf("user-%d", uid) - if err := c.writeJSON(ctx, key, netmapUserProfile{UserProfile: &u}); err != nil { + key := fmt.Sprintf("%s%d", userKeyPrefix, uid) + if err := c.writeJSON(ctx, cacheKey(key), netmapUserProfile{UserProfile: &u}); err != nil { return err } } + if err := c.writeJSON(ctx, packetFilterKey, netmapPacketFilter{Rules: &nm.PacketFilterRules}); err != nil { + return err + } if buildfeatures.HasSSH && nm.SSHPolicy != nil { - if err := c.writeJSON(ctx, "ssh", netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { + if err := c.writeJSON(ctx, sshPolicyKey, netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { return err } } @@ -244,12 +263,12 @@ func (c *Cache) Load(ctx context.Context) (*netmap.NetworkMap, error) { // At minimum, we require that the cache contain a "self" node, or the data // are not usable. - if self, err := c.store.Load(ctx, "self"); errors.Is(err, ErrKeyNotFound) { + if self, err := c.store.Load(ctx, string(selfKey)); errors.Is(err, ErrKeyNotFound) { return nil, ErrCacheNotAvailable } else if err := jsonv1.Unmarshal(self, &netmapNode{Node: &nm.SelfNode}); err != nil { return nil, err } - c.wantKeys.Add("self") + c.wantKeys.Add(selfKey) // If we successfully recovered a SelfNode, pull out its related fields. if s := nm.SelfNode; s.Valid() { @@ -266,7 +285,7 @@ func (c *Cache) Load(ctx context.Context) (*netmap.NetworkMap, error) { // Unmarshal the contents of each specified cache entry directly into the // fields of the output. See the comment in types.go for more detail. - if err := c.readJSON(ctx, "misc", &netmapMisc{ + if err := c.readJSON(ctx, miscKey, &netmapMisc{ MachineKey: &nm.MachineKey, CollectServices: &nm.CollectServices, DisplayMessages: &nm.DisplayMessages, @@ -278,43 +297,52 @@ func (c *Cache) Load(ctx context.Context) (*netmap.NetworkMap, error) { return nil, err } - if err := c.readJSON(ctx, "dns", &netmapDNS{DNS: &nm.DNS}); err != nil { + if err := c.readJSON(ctx, dnsKey, &netmapDNS{DNS: &nm.DNS}); err != nil { return nil, err } - if err := c.readJSON(ctx, "derpmap", &netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { + if err := c.readJSON(ctx, derpMapKey, &netmapDERPMap{DERPMap: &nm.DERPMap}); err != nil { return nil, err } - for key, err := range c.store.List(ctx, "peer-") { + for key, err := range c.store.List(ctx, string(peerKeyPrefix)) { if err != nil { return nil, err } var peer tailcfg.NodeView - if err := c.readJSON(ctx, key, &netmapNode{Node: &peer}); err != nil { + if err := c.readJSON(ctx, cacheKey(key), &netmapNode{Node: &peer}); err != nil { return nil, err } nm.Peers = append(nm.Peers, peer) } slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) }) - for key, err := range c.store.List(ctx, "user-") { + for key, err := range c.store.List(ctx, string(userKeyPrefix)) { if err != nil { return nil, err } var up tailcfg.UserProfileView - if err := c.readJSON(ctx, key, &netmapUserProfile{UserProfile: &up}); err != nil { + if err := c.readJSON(ctx, cacheKey(key), &netmapUserProfile{UserProfile: &up}); err != nil { return nil, err } mak.Set(&nm.UserProfiles, up.ID(), up) } - if err := c.readJSON(ctx, "ssh", &netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { + if err := c.readJSON(ctx, sshPolicyKey, &netmapSSH{SSHPolicy: &nm.SSHPolicy}); err != nil { return nil, err } + if err := c.readJSON(ctx, packetFilterKey, &netmapPacketFilter{Rules: &nm.PacketFilterRules}); err != nil { + return nil, err + } else if r := nm.PacketFilterRules; r.Len() != 0 { + // Reconstitute packet match expressions from the filter rules, + nm.PacketFilter, err = filter.MatchesFromFilterRules(r.AsSlice()) + if err != nil { + return nil, err + } + } return &nm, nil } -func (c *Cache) readJSON(ctx context.Context, key string, value any) error { - data, err := c.store.Load(ctx, key) +func (c *Cache) readJSON(ctx context.Context, key cacheKey, value any) error { + data, err := c.store.Load(ctx, string(key)) if errors.Is(err, ErrKeyNotFound) { return nil } else if err != nil { diff --git a/ipn/ipnlocal/netmapcache/netmapcache_test.go b/ipn/ipnlocal/netmapcache/netmapcache_test.go index b31db2d5eb8b5..b5a46d2982a04 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache_test.go +++ b/ipn/ipnlocal/netmapcache/netmapcache_test.go @@ -11,6 +11,7 @@ import ( "fmt" "iter" "maps" + "net/netip" "os" "reflect" "slices" @@ -23,10 +24,13 @@ import ( "tailscale.com/ipn/ipnlocal/netmapcache" "tailscale.com/tailcfg" "tailscale.com/tka" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/set" + "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" ) // Input values for valid-looking placeholder values for keys, hashes, etc. @@ -68,6 +72,27 @@ func init() { panic(fmt.Sprintf("invalid test AUM hash %q: %v", testAUMHashString, err)) } + pfRules := []tailcfg.FilterRule{ + { + SrcIPs: []string{"192.168.0.0/16"}, + DstPorts: []tailcfg.NetPortRange{{ + IP: "*", + Ports: tailcfg.PortRange{First: 2000, Last: 9999}, + }}, + IPProto: []int{1, 6, 17}, // ICMPv4, TCP, UDP + CapGrant: []tailcfg.CapGrant{{ + Dsts: []netip.Prefix{netip.MustParsePrefix("192.168.4.0/24")}, + CapMap: tailcfg.PeerCapMap{ + "tailscale.com/testcap": []tailcfg.RawMessage{`"apple"`, `"pear"`}, + }, + }}, + }, + } + pfMatch, err := filter.MatchesFromFilterRules(pfRules) + if err != nil { + panic(fmt.Sprintf("invalid packet filter rules: %v", err)) + } + // The following network map must have a non-zero non-empty value for every // field that is to be stored in the cache. The test checks for this using // reflection, as a way to ensure that new fields added to the type are @@ -79,8 +104,9 @@ func init() { testMap = &netmap.NetworkMap{ Cached: false, // not cached, this is metadata for the cache machinery - PacketFilter: nil, // not cached - PacketFilterRules: views.Slice[tailcfg.FilterRule]{}, // not cached + // These two fields must contain compatible data. + PacketFilterRules: views.SliceOf(pfRules), + PacketFilter: pfMatch, // Fields stored under the "self" key. // Note that SelfNode must have a valid user in order to be considered @@ -235,7 +261,7 @@ func TestInvalidCache(t *testing.T) { // skippedMapFields are the names of fields that should not be considered by // network map caching, and thus skipped when comparing test results. var skippedMapFields = []string{ - "Cached", "PacketFilter", "PacketFilterRules", + "Cached", } // checkFieldCoverage logs an error in t if any of the fields of nm are zero @@ -366,6 +392,27 @@ func (t testStore) Remove(_ context.Context, key string) error { delete(t, key); func diffNetMaps(got, want *netmap.NetworkMap) string { return cmp.Diff(got, want, cmpopts.IgnoreFields(netmap.NetworkMap{}, skippedMapFields...), - cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}), + cmpopts.IgnoreFields(filtertype.Match{}, "SrcsContains"), // function pointer + cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}, netip.Prefix{}), + cmp.Comparer(eqViewsSlice(eqFilterRules)), + cmp.Comparer(eqViewsSlice(func(a, b ipproto.Proto) bool { return a == b })), ) } + +func eqViewsSlice[T any](eqVal func(x, y T) bool) func(a, b views.Slice[T]) bool { + return func(a, b views.Slice[T]) bool { + if a.Len() != b.Len() { + return false + } + for i := range a.Len() { + if !eqVal(a.At(i), b.At(i)) { + return false + } + } + return true + } +} + +func eqFilterRules(a, b tailcfg.FilterRule) bool { + return cmp.Equal(a, b, cmpopts.EquateComparable(netip.Prefix{})) +} diff --git a/ipn/ipnlocal/netmapcache/types.go b/ipn/ipnlocal/netmapcache/types.go index 2fb5a1575f1b3..c9f9efc1e3b21 100644 --- a/ipn/ipnlocal/netmapcache/types.go +++ b/ipn/ipnlocal/netmapcache/types.go @@ -7,6 +7,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/types/key" + "tailscale.com/types/views" ) // The fields in the following wrapper types are all pointers, even when their @@ -50,3 +51,9 @@ type netmapNode struct { type netmapUserProfile struct { UserProfile *tailcfg.UserProfileView } + +type netmapPacketFilter struct { + Rules *views.Slice[tailcfg.FilterRule] + + // Match expressions are derived from the rules. +} From 3cc7f897d3dec55b87dc290d22e76f50139f59d7 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Fri, 13 Feb 2026 12:51:51 -0600 Subject: [PATCH 0976/1093] health: always include control health messages in the current state (*health.Tracker).CurrentState() returns an empty state when there are no client-side warnables, even when there are control-health messages, which is incorrect. This fixes it. Updates tailscale/corp#37275 Signed-off-by: Nick Khyl --- health/state.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/health/state.go b/health/state.go index 91e30b75e796d..61d36797ceaf0 100644 --- a/health/state.go +++ b/health/state.go @@ -11,6 +11,7 @@ import ( "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" + "tailscale.com/util/mak" ) // State contains the health status of the backend, and is @@ -128,11 +129,7 @@ func (t *Tracker) CurrentState() *State { t.mu.Lock() defer t.mu.Unlock() - if t.warnableVal == nil || len(t.warnableVal) == 0 { - return &State{} - } - - wm := map[WarnableCode]UnhealthyState{} + var wm map[WarnableCode]UnhealthyState for w, ws := range t.warnableVal { if !w.IsVisible(ws, t.now) { @@ -145,7 +142,7 @@ func (t *Tracker) CurrentState() *State { continue } state := w.unhealthyState(ws) - wm[w.Code] = state.withETag() + mak.Set(&wm, w.Code, state.withETag()) } for id, msg := range t.lastNotifiedControlMessages { @@ -165,7 +162,7 @@ func (t *Tracker) CurrentState() *State { } } - wm[state.WarnableCode] = state.withETag() + mak.Set(&wm, state.WarnableCode, state.withETag()) } return &State{ From 371d6369cd25afb7e3dd170873f232ad6dba9a02 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Sun, 8 Feb 2026 02:13:45 +0000 Subject: [PATCH 0977/1093] gokrazy: use monorepo for gokrazy appliance builds (monogok) This switches our gokrazy builds to use a new variant of cmd/gok called opinionated about using monorepos: https://github.com/bradfitz/monogok And with that, we can get rid of all the go.mod files and builddir forests under gokrazy/**. Updates #13038 Updates gokrazy/gokrazy#361 Change-Id: I9f18fbe59b8792286abc1e563d686ea9472c622d Signed-off-by: Brad Fitzpatrick --- .github/workflows/natlab-integrationtest.yml | 2 +- flake.nix | 2 +- go.mod | 20 +- go.mod.sri | 2 +- go.sum | 49 +++- gokrazy/build.go | 13 +- gokrazy/go.mod | 19 -- gokrazy/go.sum | 33 --- gokrazy/gok | 8 - gokrazy/monogok | 8 + .../gokrazy/gokrazy/cmd/dhcp/go.mod | 18 -- .../gokrazy/gokrazy/cmd/dhcp/go.sum | 39 --- .../github.com/gokrazy/gokrazy/go.mod | 15 - .../github.com/gokrazy/gokrazy/go.sum | 23 -- .../github.com/gokrazy/kernel.arm64/go.mod | 5 - .../github.com/gokrazy/kernel.arm64/go.sum | 2 - .../github.com/gokrazy/serial-busybox/go.mod | 5 - .../github.com/gokrazy/serial-busybox/go.sum | 26 -- .../tailscale/gokrazy-kernel/go.mod | 5 - .../tailscale/gokrazy-kernel/go.sum | 4 - .../builddir/tailscale.com/go.mod | 7 - .../builddir/tailscale.com/go.sum | 266 ----------------- .../gokrazy/gokrazy/cmd/dhcp/go.mod | 18 -- .../gokrazy/gokrazy/cmd/dhcp/go.sum | 39 --- .../github.com/gokrazy/gokrazy/go.mod | 15 - .../github.com/gokrazy/gokrazy/go.sum | 23 -- .../github.com/gokrazy/serial-busybox/go.mod | 5 - .../github.com/gokrazy/serial-busybox/go.sum | 26 -- .../tailscale/gokrazy-kernel/go.mod | 5 - .../tailscale/gokrazy-kernel/go.sum | 4 - .../natlabapp/builddir/tailscale.com/go.mod | 7 - .../natlabapp/builddir/tailscale.com/go.sum | 268 ------------------ gokrazy/natlabapp/gokrazydeps.go | 16 ++ gokrazy/tidy-deps.go | 2 +- .../github.com/gokrazy/breakglass/go.mod | 19 -- .../github.com/gokrazy/breakglass/go.sum | 46 --- .../gokrazy/gokrazy/cmd/dhcp/go.mod | 18 -- .../gokrazy/gokrazy/cmd/dhcp/go.sum | 39 --- .../github.com/gokrazy/gokrazy/cmd/ntp/go.mod | 5 - .../github.com/gokrazy/gokrazy/cmd/ntp/go.sum | 8 - .../github.com/gokrazy/gokrazy/go.mod | 15 - .../github.com/gokrazy/gokrazy/go.sum | 23 -- .../github.com/gokrazy/rpi-eeprom/go.mod | 5 - .../github.com/gokrazy/rpi-eeprom/go.sum | 3 - .../github.com/gokrazy/serial-busybox/go.mod | 5 - .../github.com/gokrazy/serial-busybox/go.sum | 26 -- .../tailscale/gokrazy-kernel/go.mod | 5 - .../tailscale/gokrazy-kernel/go.sum | 4 - gokrazy/tsapp/builddir/tailscale.com/go.mod | 7 - gokrazy/tsapp/builddir/tailscale.com/go.sum | 262 ----------------- gokrazy/tsapp/gokrazydeps.go | 18 ++ pkgdoc_test.go | 4 + shell.nix | 2 +- tstest/integration/nat/nat_test.go | 2 +- 54 files changed, 124 insertions(+), 1391 deletions(-) delete mode 100644 gokrazy/go.mod delete mode 100644 gokrazy/go.sum delete mode 100755 gokrazy/gok create mode 100755 gokrazy/monogok delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod delete mode 100644 gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum delete mode 100644 gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod delete mode 100644 gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum delete mode 100644 gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod delete mode 100644 gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum delete mode 100644 gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod delete mode 100644 gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum delete mode 100644 gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod delete mode 100644 gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum delete mode 100644 gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod delete mode 100644 gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum delete mode 100644 gokrazy/natlabapp/builddir/tailscale.com/go.mod delete mode 100644 gokrazy/natlabapp/builddir/tailscale.com/go.sum create mode 100644 gokrazy/natlabapp/gokrazydeps.go delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum delete mode 100644 gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod delete mode 100644 gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum delete mode 100644 gokrazy/tsapp/builddir/tailscale.com/go.mod delete mode 100644 gokrazy/tsapp/builddir/tailscale.com/go.sum create mode 100644 gokrazy/tsapp/gokrazydeps.go diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index 3e87ba4345180..e10d879c3daa5 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -18,7 +18,7 @@ jobs: uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Install qemu run: | - sudo rm /var/lib/man-db/auto-update + sudo rm -f /var/lib/man-db/auto-update sudo apt-get -y update sudo apt-get -y remove man-db sudo apt-get install -y qemu-system-x86 qemu-utils diff --git a/flake.nix b/flake.nix index b29d45aacf43b..bbd1f8b48be0c 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-5A6EShJ33yHQdr6tgsNCRFLvNUUjIKXDv5DvzsiUwFI= +# nix-direnv cache busting line: sha256-e5fAO7gye8B5FGBTxLNVTKq6dp8By9iDEw72M1/y4ZE= diff --git a/go.mod b/go.mod index c69f4f1edc5aa..bc356a19c9b59 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd + github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032 github.com/bramvdbogaerde/go-scp v1.4.0 github.com/cilium/ebpf v0.16.0 github.com/coder/websocket v1.8.12 @@ -43,6 +44,9 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 + github.com/gokrazy/breakglass v0.0.0-20251229072214-9dbc0478d486 + github.com/gokrazy/gokrazy v0.0.0-20260123094004-294c93fa173c + github.com/gokrazy/serial-busybox v0.0.0-20250119153030-ac58ba7574e7 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 @@ -87,6 +91,7 @@ require ( github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 + github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/mkctr v0.0.0-20260107121656-ea857e3e500b @@ -148,6 +153,7 @@ require ( github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/beevik/ntp v0.3.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect @@ -177,11 +183,15 @@ require ( github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.12.0 // indirect + github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775 // indirect + github.com/gokrazy/internal v0.0.0-20251208203110-3c1aa9087c82 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-github/v66 v66.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect + github.com/google/renameio/v2 v2.0.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect @@ -194,13 +204,18 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/jjti/go-spancheck v0.5.3 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/josharian/native v1.1.0 // indirect github.com/karamaru-alpha/copyloopvar v1.0.8 // indirect + github.com/kenshaw/evdev v0.1.0 // indirect + github.com/kr/pty v1.1.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/macabu/inamedparam v0.1.3 // indirect + github.com/mdlayher/packet v1.1.2 // indirect + github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/buildkit v0.20.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect @@ -212,11 +227,13 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/puzpuzpuz/xsync v1.5.2 // indirect + github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/stacklok/frizbee v0.1.7 // indirect + github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect @@ -231,6 +248,7 @@ require ( go.uber.org/automaxprocs v1.5.3 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1 // indirect golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect @@ -431,7 +449,7 @@ require ( github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.0 // indirect - github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/cobra v1.10.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.9 // indirect github.com/spf13/viper v1.16.0 // indirect diff --git a/go.mod.sri b/go.mod.sri index 4edd4e7acabad..de11cbc71e1b7 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-5A6EShJ33yHQdr6tgsNCRFLvNUUjIKXDv5DvzsiUwFI= +sha256-e5fAO7gye8B5FGBTxLNVTKq6dp8By9iDEw72M1/y4ZE= diff --git a/go.sum b/go.sum index e925fcc3d4371..c924e5e6e1a12 100644 --- a/go.sum +++ b/go.sum @@ -183,6 +183,9 @@ github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 h1:bXAPYSbdYbS5VTy92NIUbeDI1qyggi+JYh5op9IFlcQ= github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= +github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -201,6 +204,8 @@ github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFi github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd h1:1Df3FBmfyUCIQ4eKzAPXIWTfewY89L0fWPWO56zWCyI= github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd/go.mod h1:2+xptBAd0m2kZ1wLO4AYZhldLEFPy+KeGwmnlXLvy+w= +github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032 h1:xDomVqO85ss/98Ky5zxM/g86bXDNBLebM2I9G/fu6uA= +github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032/go.mod h1:TG1HbU9fRVDnNgXncVkKz9GdvjIvqquXjH6QZSEVmY4= github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= github.com/bramvdbogaerde/go-scp v1.4.0/go.mod h1:on2aH5AxaFb2G0N5Vsdy6B0Ml7k9HuHSwfo1y0QzAbQ= github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= @@ -277,6 +282,7 @@ github.com/creachadair/msync v0.7.1 h1:SeZmuEBXQPe5GqV/C94ER7QIZPwtvFbeQiykzt/7u github.com/creachadair/msync v0.7.1/go.mod h1:8CcFlLsSujfHE5wWm19uUBLHIPDAUr6LXDwneVMO008= github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= @@ -473,6 +479,18 @@ github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeH github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gokrazy/breakglass v0.0.0-20251229072214-9dbc0478d486 h1:QBELQyXGy+eCEcWtvSfslJk3y7nUPZldOwBqIz1tkXc= +github.com/gokrazy/breakglass v0.0.0-20251229072214-9dbc0478d486/go.mod h1:PFPkRFcazBmCZKo+sBaGjsWouTtfDvg13nCDm0tFOCA= +github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775 h1:f5+2UMRRbr3+e/gdWCBNn48chS/KMMljfbmlSSHfRBA= +github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775/go.mod h1:q9mIV8al0wqmqFXJhKiO3SOHkL9/7Q4kIMynqUQWhgU= +github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= +github.com/gokrazy/gokrazy v0.0.0-20260123094004-294c93fa173c h1:grjqEMf6dPJzZxf+gdo8rjx6bcyseO5p9hierlVkhXQ= +github.com/gokrazy/gokrazy v0.0.0-20260123094004-294c93fa173c/go.mod h1:NtMkrFeDGnwldKLi0dLdd2ipNwoVa7TI4HTxsy7lFRg= +github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= +github.com/gokrazy/internal v0.0.0-20251208203110-3c1aa9087c82 h1:4ghNfD9NaZLpFrqQiBF6mPVFeMYXJSky38ubVA4ic2E= +github.com/gokrazy/internal v0.0.0-20251208203110-3c1aa9087c82/go.mod h1:dQY4EMkD4L5ZjYJ0SPtpgYbV7MIUMCxNIXiOfnZ6jP4= +github.com/gokrazy/serial-busybox v0.0.0-20250119153030-ac58ba7574e7 h1:gurTGc4sL7Ik+IKZ29rhGgHNZQTXPtEXLw+aM9E+/HE= +github.com/gokrazy/serial-busybox v0.0.0-20250119153030-ac58ba7574e7/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -556,6 +574,7 @@ github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba/go.mod h1:E github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= @@ -574,6 +593,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= github.com/google/rpmpack v0.5.0 h1:L16KZ3QvkFGpYhmp23iQip+mx1X39foEsqszjMNBm8A= github.com/google/rpmpack v0.5.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -704,6 +725,8 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= @@ -727,6 +750,8 @@ github.com/karamaru-alpha/copyloopvar v1.0.8 h1:gieLARwuByhEMxRwM3GRS/juJqFbLraf github.com/karamaru-alpha/copyloopvar v1.0.8/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= +github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -750,6 +775,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -812,10 +839,15 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= +github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= +github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= +github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 h1:80FAK3TW5lVymfHu3kvB1QvTZvy9Kmx1lx6sT5Ep16s= +github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5/go.mod h1:z0QjVpjpK4jksEkffQwS3+abQ3XFTm1bnimyDzWyUk0= github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= @@ -986,6 +1018,9 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= +github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1043,8 +1078,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -1092,6 +1127,8 @@ github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8 github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41/go.mod h1:/roCdA6gg6lQyw/Oz6gIIGu3ggJKYhF+WC/AQReE5XQ= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= +github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM= github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= @@ -1153,7 +1190,10 @@ github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvni github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg= +github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -1263,12 +1303,15 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1 h1:EBHQuS9qI8xJ96+YRgVV2ahFLUYbWpt1rf3wPfXN2wQ= +golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1/go.mod h1:MEIPiCnxvQEjA4astfaKItNwEVZA5Ki+3+nyGbJ5N18= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1409,6 +1452,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1445,6 +1489,7 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= diff --git a/gokrazy/build.go b/gokrazy/build.go index ea54cc829d1f1..f92edb1a34abb 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -137,25 +137,24 @@ func buildImage() error { // Build the tsapp.img var buf bytes.Buffer cmd := exec.Command("go", "run", - "github.com/gokrazy/tools/cmd/gok", - "--parent_dir="+dir, - "--instance="+*app, + "github.com/bradfitz/monogok/cmd/monogok", "overwrite", - "--full", *app+".img", + "--full", filepath.Join(dir, *app+".img"), "--target_storage_bytes=1258299392") + cmd.Dir = filepath.Join(dir, *app) cmd.Stdout = io.MultiWriter(os.Stdout, &buf) cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } - // gok overwrite emits a line of text saying how to run mkfs.ext4 + // monogok overwrite emits a line of text saying how to run mkfs.ext4 // to create the ext4 /perm filesystem. Parse that and run it. // The regexp is tight to avoid matching if the command changes, // to force us to check it's still correct/safe. But it shouldn't - // change on its own because we pin the gok version in our go.mod. + // change on its own because we pin the monogok version in our go.mod. // - // TODO(bradfitz): emit this in a machine-readable way from gok. + // TODO(bradfitz): emit this in a machine-readable way from monogok. rx := regexp.MustCompile(`(?m)/mkfs.ext4 (-F) (-E) (offset=\d+) (\S+) (\d+)\s*?$`) m := rx.FindStringSubmatch(buf.String()) if m == nil { diff --git a/gokrazy/go.mod b/gokrazy/go.mod deleted file mode 100644 index f7483f41d5d46..0000000000000 --- a/gokrazy/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module tailscale.com/gokrazy - -go 1.23 - -require github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c - -require ( - github.com/breml/rootcerts v0.2.10 // indirect - github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 // indirect - github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 // indirect - github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.28.0 // indirect -) diff --git a/gokrazy/go.sum b/gokrazy/go.sum deleted file mode 100644 index 170d15b3db19c..0000000000000 --- a/gokrazy/go.sum +++ /dev/null @@ -1,33 +0,0 @@ -github.com/breml/rootcerts v0.2.10 h1:UGVZ193UTSUASpGtg6pbDwzOd7XQP+at0Ssg1/2E4h8= -github.com/breml/rootcerts v0.2.10/go.mod h1:24FDtzYMpqIeYC7QzaE8VPRQaFZU5TIUDlyk8qwjD88= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= -github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= -github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 h1:f5bEvO4we3fbfiBkECrrUgWQ8OH6J3SdB2Dwxid/Yx4= -github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57/go.mod h1:SJG1KwuJQXFEoBgryaNCkMbdISyovDgZd0xmXJRZmiw= -github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c h1:iEbS8GrNOn671ze8J/AfrYFEVzf8qMx8aR5K0VxPK2w= -github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c/go.mod h1:f2vZhnaPzy92+Bjpx1iuZHK7VuaJx6SNCWQWmu23HZA= -github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 h1:kBY5R1tSf+EYZ+QaSrofLaVJtBqYsVNVBWkdMq3Smcg= -github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2/go.mod h1:PYOvzGOL4nlBmuxu7IyKQTFLaxr61+WPRNRzVtuYOHw= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/gokrazy/gok b/gokrazy/gok deleted file mode 100755 index 13111dab28f6a..0000000000000 --- a/gokrazy/gok +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash - -# This is a wrapper around gok that sets --parent_dir. - -dir=$(dirname "${BASH_SOURCE[0]}") - -cd $dir -$dir/../tool/go run github.com/gokrazy/tools/cmd/gok --parent_dir="$dir" "$@" diff --git a/gokrazy/monogok b/gokrazy/monogok new file mode 100755 index 0000000000000..2e09a6918fb5a --- /dev/null +++ b/gokrazy/monogok @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# This is a wrapper around monogok that sets the working directory. + +dir=$(dirname "${BASH_SOURCE[0]}") + +cd $dir +$dir/../tool/go run github.com/bradfitz/monogok/cmd/monogok "$@" diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod deleted file mode 100644 index c56dede46ed65..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect - github.com/mdlayher/packet v1.0.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect - github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.20.0 // indirect -) diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum deleted file mode 100644 index 3cd002ae782b1..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= -github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= -github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod deleted file mode 100644 index 33656efeea7d7..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.20.0 // indirect -) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum deleted file mode 100644 index 479eb1cef1ca7..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/gokrazy/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod deleted file mode 100644 index d4708bf4628ff..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/natlabapp.arm64 - -go 1.23.1 - -require github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum deleted file mode 100644 index 5084da5c5990c..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/kernel.arm64/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e h1:D9QYleJ7CI4p7gpgUT1mPgAlWMi5au6yOiE8/qC5PhE= -github.com/gokrazy/kernel.arm64 v0.0.0-20240830035047-cdba87a9eb0e/go.mod h1:WWx72LXHEesuJxbopusRfSoKJQ6ffdwkT0DZditdrLo= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod deleted file mode 100644 index de52e181b9c3c..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum deleted file mode 100644 index 8135f60c3e791..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/gokrazy/serial-busybox/go.sum +++ /dev/null @@ -1,26 +0,0 @@ -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= -github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod deleted file mode 100644 index ec4d9c64fc93e..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum deleted file mode 100644 index d32d5460bf29c..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod deleted file mode 100644 index da21a143975e9..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module gokrazy/build/tsapp - -go 1.23.1 - -replace tailscale.com => ../../../.. - -require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum deleted file mode 100644 index ae814f31698f4..0000000000000 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum +++ /dev/null @@ -1,266 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= -github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= -github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= -github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= -github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= -github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= -github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= -github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod deleted file mode 100644 index c56dede46ed65..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect - github.com/mdlayher/packet v1.0.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect - github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.20.0 // indirect -) diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum deleted file mode 100644 index 3cd002ae782b1..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= -github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= -github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod deleted file mode 100644 index 33656efeea7d7..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.20.0 // indirect -) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum deleted file mode 100644 index 479eb1cef1ca7..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/gokrazy/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod deleted file mode 100644 index de52e181b9c3c..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum deleted file mode 100644 index 8135f60c3e791..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/gokrazy/serial-busybox/go.sum +++ /dev/null @@ -1,26 +0,0 @@ -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= -github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod deleted file mode 100644 index ec4d9c64fc93e..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum deleted file mode 100644 index d32d5460bf29c..0000000000000 --- a/gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.mod b/gokrazy/natlabapp/builddir/tailscale.com/go.mod deleted file mode 100644 index 53bc11f9bd3f8..0000000000000 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module gokrazy/build/tsapp - -go 1.25.5 - -replace tailscale.com => ../../../.. - -require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum deleted file mode 100644 index 25f15059d3af6..0000000000000 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.sum +++ /dev/null @@ -1,268 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= -github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= -github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= -github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= -github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= -github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= -github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= -github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= -github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/natlabapp/gokrazydeps.go b/gokrazy/natlabapp/gokrazydeps.go new file mode 100644 index 0000000000000..c5d2b32a3d543 --- /dev/null +++ b/gokrazy/natlabapp/gokrazydeps.go @@ -0,0 +1,16 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build for_go_mod_tidy_only + +package gokrazydeps + +import ( + _ "github.com/gokrazy/gokrazy" + _ "github.com/gokrazy/gokrazy/cmd/dhcp" + _ "github.com/gokrazy/serial-busybox" + _ "github.com/tailscale/gokrazy-kernel" + _ "tailscale.com/cmd/tailscale" + _ "tailscale.com/cmd/tailscaled" + _ "tailscale.com/cmd/tta" +) diff --git a/gokrazy/tidy-deps.go b/gokrazy/tidy-deps.go index 8f99f333302b2..3b9dbea7c73af 100644 --- a/gokrazy/tidy-deps.go +++ b/gokrazy/tidy-deps.go @@ -6,5 +6,5 @@ package gokrazy import ( - _ "github.com/gokrazy/tools/cmd/gok" + _ "github.com/bradfitz/monogok/cmd/monogok" ) diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod deleted file mode 100644 index fc809b8f7c130..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/creack/pty v1.1.18 // indirect - github.com/gokrazy/breakglass v0.0.0-20240604170121-09eeab3321d6 // indirect - github.com/gokrazy/gokrazy v0.0.0-20230812092215-346db1998f83 // indirect - github.com/gokrazy/internal v0.0.0-20230211171410-9608422911d0 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/kr/fs v0.1.0 // indirect - github.com/kr/pty v1.1.8 // indirect - github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 // indirect - github.com/pkg/sftp v1.13.5 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/sys v0.15.0 // indirect -) diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum deleted file mode 100644 index 99e0622742caf..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/breakglass/go.sum +++ /dev/null @@ -1,46 +0,0 @@ -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/breakglass v0.0.0-20240529175905-44b3fe64f19c h1:cWzgXJIluB6jAQ0HcnvA1yExLawmtDSssk9H4fLv3yM= -github.com/gokrazy/breakglass v0.0.0-20240529175905-44b3fe64f19c/go.mod h1:4Yffo2Z5w3q2eDvo3HDR8eDnmkDpMAkX0Tn7b/9upgs= -github.com/gokrazy/breakglass v0.0.0-20240604170121-09eeab3321d6 h1:38JB1lVPx+ihCzlWZdbH1LoNmu0KR+jRSmNFR7aMVTg= -github.com/gokrazy/breakglass v0.0.0-20240604170121-09eeab3321d6/go.mod h1:4Yffo2Z5w3q2eDvo3HDR8eDnmkDpMAkX0Tn7b/9upgs= -github.com/gokrazy/gokrazy v0.0.0-20230812092215-346db1998f83 h1:Y4sADvUYd/c0eqnqebipHHl0GMpAxOQeTzPnwI4ievM= -github.com/gokrazy/gokrazy v0.0.0-20230812092215-346db1998f83/go.mod h1:9q5Tg+q+YvRjC3VG0gfMFut46dhbhtAnvUEp4lPjc6c= -github.com/gokrazy/internal v0.0.0-20230211171410-9608422911d0 h1:QTi0skQ/OM7he/5jEWA9k/DYgdwGAhw3hrUoiPGGZHM= -github.com/gokrazy/internal v0.0.0-20230211171410-9608422911d0/go.mod h1:ddHcxXZ/VVQOSAWcRBbkYY58+QOw4L145ye6phyDmRA= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5 h1:80FAK3TW5lVymfHu3kvB1QvTZvy9Kmx1lx6sT5Ep16s= -github.com/mdlayher/watchdog v0.0.0-20221003142519-49be0df7b3b5/go.mod h1:z0QjVpjpK4jksEkffQwS3+abQ3XFTm1bnimyDzWyUk0= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tailscale/breakglass v0.0.0-20240529174846-0d8ebfc2c652 h1:36TB+ZuYaA8OTdMoPnygC9CJuQmTWxMEmn+a+9XTOgk= -github.com/tailscale/breakglass v0.0.0-20240529174846-0d8ebfc2c652/go.mod h1:4Yffo2Z5w3q2eDvo3HDR8eDnmkDpMAkX0Tn7b/9upgs= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod deleted file mode 100644 index c56dede46ed65..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.mod +++ /dev/null @@ -1,18 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect - github.com/google/gopacket v1.1.19 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/josharian/native v1.0.0 // indirect - github.com/mdlayher/packet v1.0.0 // indirect - github.com/mdlayher/socket v0.2.3 // indirect - github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 // indirect - github.com/vishvananda/netlink v1.1.0 // indirect - github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.20.0 // indirect -) diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum deleted file mode 100644 index 3cd002ae782b1..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/dhcp/go.sum +++ /dev/null @@ -1,39 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= -github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/mdlayher/packet v1.0.0 h1:InhZJbdShQYt6XV2GPj5XHxChzOfhJJOMbvnGAmOfQ8= -github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= -github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= -github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46 h1:3psQveH4RUiv5yc3p7kRySilf1nSXLQhAvJFwg4fgnE= -github.com/rtr7/dhcp4 v0.0.0-20220302171438-18c84d089b46/go.mod h1:Ng1F/s+z0zCMsbEFEneh+30LJa9DrTfmA+REbEqcTPk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= -github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod deleted file mode 100644 index d851081bbc660..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum deleted file mode 100644 index d3dc288edf218..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/cmd/ntp/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= -github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod deleted file mode 100644 index 33656efeea7d7..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require ( - github.com/gokrazy/gokrazy v0.0.0-20240802144848-676865a4e84f // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect - github.com/google/renameio/v2 v2.0.0 // indirect - github.com/kenshaw/evdev v0.1.0 // indirect - github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.20.0 // indirect -) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum deleted file mode 100644 index 479eb1cef1ca7..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/gokrazy/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803 h1:gdGRW/wXHPJuZgZD931Lh75mdJfzEEXrL+Dvi97Ck3A= -github.com/gokrazy/gokrazy v0.0.0-20240525065858-dedadaf38803/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a h1:FKeN678rNpKTpWRdFbAhYL9mWzPu57R5XPXCR3WmXdI= -github.com/gokrazy/internal v0.0.0-20240510165500-68dd68393b7a/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= -github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= -github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/kenshaw/evdev v0.1.0 h1:wmtceEOFfilChgdNT+c/djPJ2JineVsQ0N14kGzFRUo= -github.com/kenshaw/evdev v0.1.0/go.mod h1:B/fErKCihUyEobz0mjn2qQbHgyJKFQAxkXSvkeeA/Wo= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b h1:7tUBfsEEBWfFeHOB7CUfoOamak+Gx/BlirfXyPk1WjI= -github.com/mdlayher/watchdog v0.0.0-20201005150459-8bdc4f41966b/go.mod h1:bmoJUS6qOA3uKFvF3KVuhf7mU1KQirzQMeHXtPyKEqg= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a h1:7dnA8x14JihQmKbPr++Y5CCN/XSyDmOB6cXUxcIj6VQ= -github.com/tailscale/gokrazy v0.0.0-20240602215456-7b9b6bbf726a/go.mod h1:NHROeDlzn0icUl3f+tEYvGGpcyBDMsr3AvKLHOWRe5M= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f h1:ZSAGWpgs+6dK2oIz5OR+HUul3oJbnhFn8YNgcZ3d9SQ= -github.com/tailscale/gokrazy v0.0.0-20240802144848-676865a4e84f/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 h1:2B8/FbIRqmVgRUulQ4iu1EojniufComYe5Yj4BtIn1c= -github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678/go.mod h1:+/WWMckeuQt+DG6690A6H8IgC+HpBFq2fmwRKcSbxdk= -golang.org/x/sys v0.0.0-20201005065044-765f4ea38db3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod deleted file mode 100644 index 613104a7f6469..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/rpi-eeprom v0.0.0-20240518032756-37da22ee9608 // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum deleted file mode 100644 index b037c105633fb..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/rpi-eeprom/go.sum +++ /dev/null @@ -1,3 +0,0 @@ -github.com/gokrazy/rpi-eeprom v0.0.0-20240518032756-37da22ee9608 h1:8uderKR+8eXR0nRcyBugql1YPoJQjpjoltHqX9yl2DI= -github.com/gokrazy/rpi-eeprom v0.0.0-20240518032756-37da22ee9608/go.mod h1:vabxV1M+i6S3rGuWoFieHxCJW3jlob3rqe0KV82j+0o= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod b/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod deleted file mode 100644 index de52e181b9c3c..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca // indirect diff --git a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum b/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum deleted file mode 100644 index 8135f60c3e791..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/gokrazy/serial-busybox/go.sum +++ /dev/null @@ -1,26 +0,0 @@ -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904 h1:eqfH4A/LLgxv5RvqEXwVoFvfmpRa8+TokRjB5g6xBkk= -github.com/gokrazy/gokrazy v0.0.0-20200501080617-f3445e01a904/go.mod h1:pq6rGHqxMRPSaTXaCMzIZy0wLDusAJyoVNyNo05RLs0= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9 h1:x5jR/nNo4/kMSoNo/nwa2xbL7PN1an8S3oIn4OZJdec= -github.com/gokrazy/internal v0.0.0-20200407075822-660ad467b7c9/go.mod h1:LA5TQy7LcvYGQOy75tkrYkFUhbV2nl5qEBP47PSi2JA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca h1:x0eSjuFy8qsRctVHeWm3EC474q3xm4h3OOOrYpcqyyA= -github.com/gokrazy/serial-busybox v0.0.0-20220918193710-d728912733ca/go.mod h1:OYcG5tSb+QrelmUOO4EZVUFcIHyyZb0QDbEbZFUp1TA= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gopacket v1.1.16/go.mod h1:UCLx9mCmAwsVbn6qQl1WIEt2SO7Nd2fD0th1TBAsqBw= -github.com/mdlayher/raw v0.0.0-20190303161257-764d452d77af/go.mod h1:rC/yE65s/DoHB6BzVOUBNYBGTg772JVytyAytffIZkY= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rtr7/dhcp4 v0.0.0-20181120124042-778e8c2e24a5/go.mod h1:FwstIpm6vX98QgtR8KEwZcVjiRn2WP76LjXAHj84fK0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4 h1:c1Sgqkh8v6ZxafNGG64r8C8UisIW2TKMJN8P86tKjr0= -golang.org/x/sys v0.0.0-20200406155108-e3b113bbe6a4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod deleted file mode 100644 index ec4d9c64fc93e..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gokrazy/build/tsapp - -go 1.22.2 - -require github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e // indirect diff --git a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum b/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum deleted file mode 100644 index d32d5460bf29c..0000000000000 --- a/gokrazy/tsapp/builddir/github.com/tailscale/gokrazy-kernel/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2 h1:xzf+cMvBJBcA/Av7OTWBa0Tjrbfcy00TeatJeJt6zrY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240530042707-3f95c886bcf2/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e h1:tyUUgeRPGHjCZWycRnhdx8Lx9DRkjl3WsVUxYMrVBOw= -github.com/tailscale/gokrazy-kernel v0.0.0-20240728225134-3d23beabda2e/go.mod h1:7Mth+m9bq2IHusSsexMNyupHWPL8RxwOuSvBlSGtgDY= diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.mod b/gokrazy/tsapp/builddir/tailscale.com/go.mod deleted file mode 100644 index 53bc11f9bd3f8..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module gokrazy/build/tsapp - -go 1.25.5 - -replace tailscale.com => ../../../.. - -require tailscale.com v0.0.0-00010101000000-000000000000 // indirect diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.sum b/gokrazy/tsapp/builddir/tailscale.com/go.sum deleted file mode 100644 index 2ffef7bf7ba22..0000000000000 --- a/gokrazy/tsapp/builddir/tailscale.com/go.sum +++ /dev/null @@ -1,262 +0,0 @@ -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= -github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= -github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= -github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= -github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= -github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= -github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= -github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= -github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= -github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= -github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= -github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= -github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= -github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= -github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= -github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= -github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= -github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI= -github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= -github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= -github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= -github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= -github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= -github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= -github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= -github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= -github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= -github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= -github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= -github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= -github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= -github.com/jellydator/ttlcache/v3 v3.1.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk= -github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8= -github.com/jsimonetti/rtnetlink v1.4.0 h1:Z1BF0fRgcETPEa0Kt0MRk3yV5+kF1FWTni6KUFKrq2I= -github.com/jsimonetti/rtnetlink v1.4.0/go.mod h1:5W1jDvWdnthFJ7fxYX1GMK07BUpI4oskfOqvPteYS6E= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= -github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= -github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= -github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= -github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= -github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= -github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= -github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= -github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw= -github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio= -github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= -github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= -github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= -github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= -github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= -github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= -github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= -github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= -github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= -github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= -github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= -github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= -github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= -github.com/tcnksm/go-httpstat v0.2.0 h1:rP7T5e5U2HfmOBmZzGgGZjBQ5/GluWUylujl0tJ04I0= -github.com/tcnksm/go-httpstat v0.2.0/go.mod h1:s3JVJFtQxtBEBC9dwcdTTXS9xFnM3SXAZwPG41aurT8= -github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= -github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= -github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= -github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= -github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= -github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= -github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= -go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= -go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= -go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= -golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= -golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= -golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= -gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= -gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= -k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= -k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= -k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= -nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= -nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/gokrazy/tsapp/gokrazydeps.go b/gokrazy/tsapp/gokrazydeps.go new file mode 100644 index 0000000000000..931080647f8e5 --- /dev/null +++ b/gokrazy/tsapp/gokrazydeps.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build for_go_mod_tidy_only + +package gokrazydeps + +import ( + _ "github.com/gokrazy/breakglass" + _ "github.com/gokrazy/gokrazy" + _ "github.com/gokrazy/gokrazy/cmd/dhcp" + _ "github.com/gokrazy/gokrazy/cmd/ntp" + _ "github.com/gokrazy/gokrazy/cmd/randomd" + _ "github.com/gokrazy/serial-busybox" + _ "github.com/tailscale/gokrazy-kernel" + _ "tailscale.com/cmd/tailscale" + _ "tailscale.com/cmd/tailscaled" +) diff --git a/pkgdoc_test.go b/pkgdoc_test.go index b3a902bf41f4b..60b2d4856d6c7 100644 --- a/pkgdoc_test.go +++ b/pkgdoc_test.go @@ -71,6 +71,10 @@ func TestPackageDocs(t *testing.T) { t.Logf("multiple files with package doc in %s: %q", dir, ff) } if len(ff) == 0 { + if strings.HasPrefix(dir, "gokrazy/") { + // Ignore gokrazy appliances. Their *.go file is only for deps. + continue + } t.Errorf("no package doc in %s", dir) } } diff --git a/shell.nix b/shell.nix index ff44b9b89631b..0c51f59c00d3b 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-5A6EShJ33yHQdr6tgsNCRFLvNUUjIKXDv5DvzsiUwFI= +# nix-direnv cache busting line: sha256-e5fAO7gye8B5FGBTxLNVTKq6dp8By9iDEw72M1/y4ZE= diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 2aea7c296701b..56d602222cbe9 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -81,7 +81,7 @@ func newNatTest(tb testing.TB) *natTest { } } - nt.kernel, err = findKernelPath(filepath.Join(modRoot, "gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod")) + nt.kernel, err = findKernelPath(filepath.Join(modRoot, "go.mod")) if err != nil { tb.Skipf("skipping test; kernel not found: %v", err) } From 6854d2982b0619e6bfe0dcff0b20f12fe3a72d01 Mon Sep 17 00:00:00 2001 From: Simon Law Date: Fri, 13 Feb 2026 18:19:27 -0800 Subject: [PATCH 0978/1093] ipn/ipnlocal: log errors when suggesting exit nodes (#18728) In PR #18681, we started logging which exit nodes were being suggested. However, we did not log if there were errors encountered. This patch corrects this oversight. Updates: tailscale/corp#29964 Updates: tailscale/corp#36446 Signed-off-by: Simon Law --- ipn/ipnlocal/local.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 27858484a7a0e..e9222cde35b50 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -7492,8 +7492,12 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta // it is set in the policy file: tailscale/corp#34401 res, err = suggestExitNodeUsingDERP(report, nb, prevSuggestion, selectRegion, selectNode, allowList) } - name, _, _ := strings.Cut(res.Name, ".") - nb.logf("netmap: suggested exit node: %s (%s)", name, res.ID) + if err != nil { + nb.logf("netmap: suggested exit node: %v", err) + } else { + name, _, _ := strings.Cut(res.Name, ".") + nb.logf("netmap: suggested exit node: %s (%s)", name, res.ID) + } return res, err } From 3f3af841afe5637996b45272e3460a8f095fb151 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Feb 2026 01:05:32 +0000 Subject: [PATCH 0979/1093] tool/gocross: respect TS_GO_NEXT=1 in gocross too The gocross-wrapper.sh bash script already checks TS_GO_NEXT (as of a374cc344e48) to select go.toolchain.next.rev over go.toolchain.rev, but when TS_USE_GOCROSS=1 the Go binary itself was hardcoded to read go.toolchain.rev. This makes gocross also respect the TS_GO_NEXT=1 environment variable. Updates tailscale/corp#36382 Change-Id: I04bef25a34e7ed3ccb1bfdb33a3a1f896236c6ee Signed-off-by: Brad Fitzpatrick --- tool/gocross/toolchain.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tool/gocross/toolchain.go b/tool/gocross/toolchain.go index 2eb675861bbce..8086d96976e92 100644 --- a/tool/gocross/toolchain.go +++ b/tool/gocross/toolchain.go @@ -43,7 +43,11 @@ findTopLevel: } } - return readRevFile(filepath.Join(d, "go.toolchain.rev")) + revFile := "go.toolchain.rev" + if os.Getenv("TS_GO_NEXT") == "1" { + revFile = "go.toolchain.next.rev" + } + return readRevFile(filepath.Join(d, revFile)) } func readRevFile(path string) (string, error) { From bfc15cb57c0ce2ee809434db77f1c3ac7c107b29 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Feb 2026 06:26:39 -1000 Subject: [PATCH 0980/1093] cmd/cigocacher: remove Windows-specific disk code moved upstream (#18697) Updates tailscale/corp#10808 Updates bradfitz/go-tool-cache#27 Change-Id: I27a2af63d882d916998933521f17e410692255ca Signed-off-by: Brad Fitzpatrick Signed-off-by: Tom Proctor --- cmd/cigocacher/cigocacher.go | 134 ++++++++++-------------------- cmd/cigocacher/disk.go | 88 -------------------- cmd/cigocacher/disk_notwindows.go | 44 ---------- cmd/cigocacher/disk_windows.go | 102 ----------------------- cmd/cigocacher/http.go | 109 ------------------------ flake.nix | 2 +- go.mod | 4 +- go.mod.sri | 2 +- go.sum | 8 +- shell.nix | 2 +- 10 files changed, 55 insertions(+), 440 deletions(-) delete mode 100644 cmd/cigocacher/disk.go delete mode 100644 cmd/cigocacher/disk_notwindows.go delete mode 100644 cmd/cigocacher/disk_windows.go delete mode 100644 cmd/cigocacher/http.go diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go index b308afd06d688..1e4326ebcb6be 100644 --- a/cmd/cigocacher/cigocacher.go +++ b/cmd/cigocacher/cigocacher.go @@ -12,10 +12,8 @@ package main import ( - "bytes" "context" jsonv1 "encoding/json" - "errors" "flag" "fmt" "io" @@ -103,13 +101,7 @@ func main() { if tk == "" { log.Fatal("--token is empty; cannot fetch stats") } - c := &gocachedClient{ - baseURL: *srvURL, - cl: httpClient(srvHost, *srvHostDial), - accessToken: tk, - verbose: *verbose, - } - stats, err := c.fetchStats() + stats, err := fetchStats(httpClient(srvHost, *srvHostDial), *srvURL, tk) if err != nil { log.Fatalf("error fetching gocached stats: %v", err) } @@ -140,11 +132,13 @@ func main() { if *verbose { log.Printf("Using cigocached at %s", *srvURL) } - c.gocached = &gocachedClient{ - baseURL: *srvURL, - cl: httpClient(srvHost, *srvHostDial), - accessToken: *token, - verbose: *verbose, + c.remote = &cachers.HTTPClient{ + BaseURL: *srvURL, + Disk: c.disk, + HTTPClient: httpClient(srvHost, *srvHostDial), + AccessToken: *token, + Verbose: *verbose, + BestEffortHTTP: true, } } var p *cacheproc.Process @@ -186,9 +180,9 @@ func httpClient(srvHost, srvHostDial string) *http.Client { } type cigocacher struct { - disk *cachers.DiskCache - gocached *gocachedClient - verbose bool + disk *cachers.DiskCache + remote *cachers.HTTPClient // nil if no remote server + verbose bool getNanos atomic.Int64 // total nanoseconds spent in gets putNanos atomic.Int64 // total nanoseconds spent in puts @@ -209,39 +203,33 @@ func (c *cigocacher) get(ctx context.Context, actionID string) (outputID, diskPa defer func() { c.getNanos.Add(time.Since(t0).Nanoseconds()) }() - if c.gocached == nil { - return c.disk.Get(ctx, actionID) - } outputID, diskPath, err = c.disk.Get(ctx, actionID) - if err == nil && outputID != "" { - return outputID, diskPath, nil + if c.remote == nil || (err == nil && outputID != "") { + return outputID, diskPath, err } + // Disk miss; try remote. HTTPClient.Get handles the HTTP fetch + // (including lz4 decompression) and writes to disk for us. c.getHTTP.Add(1) t0HTTP := time.Now() defer func() { c.getHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds()) }() - outputID, res, err := c.gocached.get(ctx, actionID) + outputID, diskPath, err = c.remote.Get(ctx, actionID) if err != nil { c.getHTTPErrors.Add(1) return "", "", nil } - if outputID == "" || res == nil { + if outputID == "" { c.getHTTPMisses.Add(1) return "", "", nil } - defer res.Body.Close() - - diskPath, err = put(c.disk, actionID, outputID, res.ContentLength, res.Body) - if err != nil { - return "", "", fmt.Errorf("error filling disk cache from HTTP: %w", err) - } - c.getHTTPHits.Add(1) - c.getHTTPBytes.Add(res.ContentLength) + if fi, err := os.Stat(diskPath); err == nil { + c.getHTTPBytes.Add(fi.Size()) + } return outputID, diskPath, nil } @@ -250,56 +238,25 @@ func (c *cigocacher) put(ctx context.Context, actionID, outputID string, size in defer func() { c.putNanos.Add(time.Since(t0).Nanoseconds()) }() - if c.gocached == nil { - return put(c.disk, actionID, outputID, size, r) - } - c.putHTTP.Add(1) - var diskReader, httpReader io.Reader - tee := &bestEffortTeeReader{r: r} - if size == 0 { - // Special case the empty file so NewRequest sets "Content-Length: 0", - // as opposed to thinking we didn't set it and not being able to sniff its size - // from the type. - diskReader, httpReader = bytes.NewReader(nil), bytes.NewReader(nil) - } else { - pr, pw := io.Pipe() - defer pw.Close() - // The diskReader is in the driving seat. We will try to forward data - // to httpReader as well, but only best-effort. - diskReader = tee - tee.w = pw - httpReader = pr + if c.remote == nil { + return c.disk.Put(ctx, actionID, outputID, size, r) } - httpErrCh := make(chan error) - go func() { - t0HTTP := time.Now() - defer func() { - c.putHTTPNanos.Add(time.Since(t0HTTP).Nanoseconds()) - }() - httpErrCh <- c.gocached.put(ctx, actionID, outputID, size, httpReader) - }() - diskPath, err = put(c.disk, actionID, outputID, size, diskReader) + c.putHTTP.Add(1) + diskPath, err = c.remote.Put(ctx, actionID, outputID, size, r) + c.putHTTPNanos.Add(time.Since(t0).Nanoseconds()) if err != nil { - return "", fmt.Errorf("error writing to disk cache: %w", errors.Join(err, tee.err)) - } - - select { - case err := <-httpErrCh: - if err != nil { - c.putHTTPErrors.Add(1) - } else { - c.putHTTPBytes.Add(size) - } - case <-ctx.Done(): + c.putHTTPErrors.Add(1) + } else { + c.putHTTPBytes.Add(size) } - return diskPath, nil + return diskPath, err } func (c *cigocacher) close() error { - if !c.verbose || c.gocached == nil { + if !c.verbose || c.remote == nil { return nil } @@ -307,7 +264,7 @@ func (c *cigocacher) close() error { c.getHTTP.Load(), float64(c.getHTTPBytes.Load())/float64(1<<20), float64(c.getHTTPNanos.Load())/float64(time.Second), c.getHTTPHits.Load(), c.getHTTPMisses.Load(), c.getHTTPErrors.Load(), c.putHTTP.Load(), float64(c.putHTTPBytes.Load())/float64(1<<20), float64(c.putHTTPNanos.Load())/float64(time.Second), c.putHTTPErrors.Load()) - stats, err := c.gocached.fetchStats() + stats, err := fetchStats(c.remote.HTTPClient, c.remote.BaseURL, c.remote.AccessToken) if err != nil { log.Printf("error fetching gocached stats: %v", err) } else { @@ -354,19 +311,20 @@ func fetchAccessToken(cl *http.Client, idTokenURL, idTokenRequestToken, gocached return accessToken.AccessToken, nil } -type bestEffortTeeReader struct { - r io.Reader - w io.WriteCloser - err error -} - -func (t *bestEffortTeeReader) Read(p []byte) (int, error) { - n, err := t.r.Read(p) - if n > 0 && t.w != nil { - if _, err := t.w.Write(p[:n]); err != nil { - t.err = errors.Join(err, t.w.Close()) - t.w = nil - } +func fetchStats(cl *http.Client, baseURL, accessToken string) (string, error) { + req, _ := http.NewRequest("GET", baseURL+"/session/stats", nil) + req.Header.Set("Authorization", "Bearer "+accessToken) + resp, err := cl.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("fetching stats: %s", resp.Status) + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return "", err } - return n, err + return string(b), nil } diff --git a/cmd/cigocacher/disk.go b/cmd/cigocacher/disk.go deleted file mode 100644 index e04dac0509300..0000000000000 --- a/cmd/cigocacher/disk.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -package main - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "log" - "os" - "path/filepath" - "time" - - "github.com/bradfitz/go-tool-cache/cachers" -) - -// indexEntry is the metadata that DiskCache stores on disk for an ActionID. -type indexEntry struct { - Version int `json:"v"` - OutputID string `json:"o"` - Size int64 `json:"n"` - TimeNanos int64 `json:"t"` -} - -func validHex(x string) bool { - if len(x) < 4 || len(x) > 100 { - return false - } - for _, b := range x { - if b >= '0' && b <= '9' || b >= 'a' && b <= 'f' { - continue - } - return false - } - return true -} - -// put is like dc.Put but refactored to support safe concurrent writes on Windows. -// TODO(tomhjp): upstream these changes to go-tool-cache once they look stable. -func put(dc *cachers.DiskCache, actionID, outputID string, size int64, body io.Reader) (diskPath string, _ error) { - if len(actionID) < 4 || len(outputID) < 4 { - return "", fmt.Errorf("actionID and outputID must be at least 4 characters long") - } - if !validHex(actionID) { - log.Printf("diskcache: got invalid actionID %q", actionID) - return "", errors.New("actionID must be hex") - } - if !validHex(outputID) { - log.Printf("diskcache: got invalid outputID %q", outputID) - return "", errors.New("outputID must be hex") - } - - actionFile := dc.ActionFilename(actionID) - outputFile := dc.OutputFilename(outputID) - actionDir := filepath.Dir(actionFile) - outputDir := filepath.Dir(outputFile) - - if err := os.MkdirAll(actionDir, 0755); err != nil { - return "", fmt.Errorf("failed to create action directory: %w", err) - } - if err := os.MkdirAll(outputDir, 0755); err != nil { - return "", fmt.Errorf("failed to create output directory: %w", err) - } - - wrote, err := writeOutputFile(outputFile, body, size, outputID) - if err != nil { - return "", err - } - if wrote != size { - return "", fmt.Errorf("wrote %d bytes, expected %d", wrote, size) - } - - ij, err := json.Marshal(indexEntry{ - Version: 1, - OutputID: outputID, - Size: size, - TimeNanos: time.Now().UnixNano(), - }) - if err != nil { - return "", err - } - if err := writeActionFile(dc.ActionFilename(actionID), ij); err != nil { - return "", fmt.Errorf("atomic write failed: %w", err) - } - return outputFile, nil -} diff --git a/cmd/cigocacher/disk_notwindows.go b/cmd/cigocacher/disk_notwindows.go deleted file mode 100644 index 353b734ab9ce7..0000000000000 --- a/cmd/cigocacher/disk_notwindows.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows - -package main - -import ( - "bytes" - "io" - "os" - "path/filepath" -) - -func writeActionFile(dest string, b []byte) error { - _, err := writeAtomic(dest, bytes.NewReader(b)) - return err -} - -func writeOutputFile(dest string, r io.Reader, _ int64, _ string) (int64, error) { - return writeAtomic(dest, r) -} - -func writeAtomic(dest string, r io.Reader) (int64, error) { - tf, err := os.CreateTemp(filepath.Dir(dest), filepath.Base(dest)+".*") - if err != nil { - return 0, err - } - size, err := io.Copy(tf, r) - if err != nil { - tf.Close() - os.Remove(tf.Name()) - return 0, err - } - if err := tf.Close(); err != nil { - os.Remove(tf.Name()) - return 0, err - } - if err := os.Rename(tf.Name(), dest); err != nil { - os.Remove(tf.Name()) - return 0, err - } - return size, nil -} diff --git a/cmd/cigocacher/disk_windows.go b/cmd/cigocacher/disk_windows.go deleted file mode 100644 index 686bcf2b0d68b..0000000000000 --- a/cmd/cigocacher/disk_windows.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -package main - -import ( - "crypto/sha256" - "errors" - "fmt" - "io" - "os" -) - -// The functions in this file are based on go's own cache in -// cmd/go/internal/cache/cache.go, particularly putIndexEntry and copyFile. - -// writeActionFile writes the indexEntry metadata for an ActionID to disk. It -// may be called for the same actionID concurrently from multiple processes, -// and the outputID for a specific actionID may change from time to time due -// to non-deterministic builds. It makes a best-effort to delete the file if -// anything goes wrong. -func writeActionFile(dest string, b []byte) (retErr error) { - f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, 0o666) - if err != nil { - return err - } - defer func() { - cerr := f.Close() - if retErr != nil || cerr != nil { - retErr = errors.Join(retErr, cerr, os.Remove(dest)) - } - }() - - _, err = f.Write(b) - if err != nil { - return err - } - - // Truncate the file only *after* writing it. - // (This should be a no-op, but truncate just in case of previous corruption.) - // - // This differs from os.WriteFile, which truncates to 0 *before* writing - // via os.O_TRUNC. Truncating only after writing ensures that a second write - // of the same content to the same file is idempotent, and does not - even - // temporarily! - undo the effect of the first write. - return f.Truncate(int64(len(b))) -} - -// writeOutputFile writes content to be cached to disk. The outputID is the -// sha256 hash of the content, and each file should only be written ~once, -// assuming no sha256 hash collisions. It may be written multiple times if -// concurrent processes are both populating the same output. The file is opened -// with FILE_SHARE_READ|FILE_SHARE_WRITE, which means both processes can write -// the same contents concurrently without conflict. -// -// It makes a best effort to clean up if anything goes wrong, but the file may -// be left in an inconsistent state in the event of disk-related errors such as -// another process taking file locks, or power loss etc. -func writeOutputFile(dest string, r io.Reader, size int64, outputID string) (_ int64, retErr error) { - info, err := os.Stat(dest) - if err == nil && info.Size() == size { - // Already exists, check the hash. - if f, err := os.Open(dest); err == nil { - h := sha256.New() - io.Copy(h, f) - f.Close() - if fmt.Sprintf("%x", h.Sum(nil)) == outputID { - // Still drain the reader to ensure associated resources are released. - return io.Copy(io.Discard, r) - } - } - } - - // Didn't successfully find the pre-existing file, write it. - mode := os.O_WRONLY | os.O_CREATE - if err == nil && info.Size() > size { - mode |= os.O_TRUNC // Should never happen, but self-heal. - } - f, err := os.OpenFile(dest, mode, 0644) - if err != nil { - return 0, fmt.Errorf("failed to open output file %q: %w", dest, err) - } - defer func() { - cerr := f.Close() - if retErr != nil || cerr != nil { - retErr = errors.Join(retErr, cerr, os.Remove(dest)) - } - }() - - // Copy file to f, but also into h to double-check hash. - h := sha256.New() - w := io.MultiWriter(f, h) - n, err := io.Copy(w, r) - if err != nil { - return 0, err - } - if fmt.Sprintf("%x", h.Sum(nil)) != outputID { - return 0, errors.New("file content changed underfoot") - } - - return n, nil -} diff --git a/cmd/cigocacher/http.go b/cmd/cigocacher/http.go deleted file mode 100644 index 16d0ae899acbc..0000000000000 --- a/cmd/cigocacher/http.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -package main - -import ( - "context" - "fmt" - "io" - "log" - "net/http" -) - -type gocachedClient struct { - baseURL string // base URL of the cacher server, like "http://localhost:31364". - cl *http.Client // http.Client to use. - accessToken string // Bearer token to use in the Authorization header. - verbose bool -} - -// drainAndClose reads and throws away a small bounded amount of data. This is a -// best-effort attempt to allow connection reuse; Go's HTTP/1 Transport won't -// reuse a TCP connection unless you fully consume HTTP responses. -func drainAndClose(body io.ReadCloser) { - io.CopyN(io.Discard, body, 4<<10) - body.Close() -} - -func tryReadErrorMessage(res *http.Response) []byte { - msg, _ := io.ReadAll(io.LimitReader(res.Body, 4<<10)) - return msg -} - -func (c *gocachedClient) get(ctx context.Context, actionID string) (outputID string, resp *http.Response, err error) { - req, _ := http.NewRequestWithContext(ctx, "GET", c.baseURL+"/action/"+actionID, nil) - req.Header.Set("Want-Object", "1") // opt in to single roundtrip protocol - if c.accessToken != "" { - req.Header.Set("Authorization", "Bearer "+c.accessToken) - } - - res, err := c.cl.Do(req) - if err != nil { - return "", nil, err - } - defer func() { - if resp == nil { - drainAndClose(res.Body) - } - }() - if res.StatusCode == http.StatusNotFound { - return "", nil, nil - } - if res.StatusCode != http.StatusOK { - msg := tryReadErrorMessage(res) - if c.verbose { - log.Printf("error GET /action/%s: %v, %s", actionID, res.Status, msg) - } - return "", nil, fmt.Errorf("unexpected GET /action/%s status %v", actionID, res.Status) - } - - outputID = res.Header.Get("Go-Output-Id") - if outputID == "" { - return "", nil, fmt.Errorf("missing Go-Output-Id header in response") - } - if res.ContentLength == -1 { - return "", nil, fmt.Errorf("no Content-Length from server") - } - return outputID, res, nil -} - -func (c *gocachedClient) put(ctx context.Context, actionID, outputID string, size int64, body io.Reader) error { - req, _ := http.NewRequestWithContext(ctx, "PUT", c.baseURL+"/"+actionID+"/"+outputID, body) - req.ContentLength = size - if c.accessToken != "" { - req.Header.Set("Authorization", "Bearer "+c.accessToken) - } - res, err := c.cl.Do(req) - if err != nil { - if c.verbose { - log.Printf("error PUT /%s/%s: %v", actionID, outputID, err) - } - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusNoContent { - msg := tryReadErrorMessage(res) - if c.verbose { - log.Printf("error PUT /%s/%s: %v, %s", actionID, outputID, res.Status, msg) - } - return fmt.Errorf("unexpected PUT /%s/%s status %v", actionID, outputID, res.Status) - } - - return nil -} - -func (c *gocachedClient) fetchStats() (string, error) { - req, _ := http.NewRequest("GET", c.baseURL+"/session/stats", nil) - req.Header.Set("Authorization", "Bearer "+c.accessToken) - resp, err := c.cl.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - return string(b), nil -} diff --git a/flake.nix b/flake.nix index bbd1f8b48be0c..6fc0ff28a906f 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-e5fAO7gye8B5FGBTxLNVTKq6dp8By9iDEw72M1/y4ZE= +# nix-direnv cache busting line: sha256-JD1PZPZT5clhRWIAQO8skBRN59QPiyfTc7nPYTvGbd8= diff --git a/go.mod b/go.mod index bc356a19c9b59..7b062afbf4ccf 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 github.com/axiomhq/hyperloglog v0.0.0-20240319100328-84253e514e02 - github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd + github.com/bradfitz/go-tool-cache v0.0.0-20260216153636-9e5201344fe5 github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032 github.com/bramvdbogaerde/go-scp v1.4.0 github.com/cilium/ebpf v0.16.0 @@ -419,7 +419,7 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.0 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pierrec/lz4/v4 v4.1.25 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect diff --git a/go.mod.sri b/go.mod.sri index de11cbc71e1b7..e5d18033ae976 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-e5fAO7gye8B5FGBTxLNVTKq6dp8By9iDEw72M1/y4ZE= +sha256-JD1PZPZT5clhRWIAQO8skBRN59QPiyfTc7nPYTvGbd8= diff --git a/go.sum b/go.sum index c924e5e6e1a12..299fe95cd84ad 100644 --- a/go.sum +++ b/go.sum @@ -202,8 +202,8 @@ github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= -github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd h1:1Df3FBmfyUCIQ4eKzAPXIWTfewY89L0fWPWO56zWCyI= -github.com/bradfitz/go-tool-cache v0.0.0-20251113223507-0124e698e0bd/go.mod h1:2+xptBAd0m2kZ1wLO4AYZhldLEFPy+KeGwmnlXLvy+w= +github.com/bradfitz/go-tool-cache v0.0.0-20260216153636-9e5201344fe5 h1:0sG3c7afYdBNlc3QyhckvZ4bV9iqlfqCQM1i+mWm0eE= +github.com/bradfitz/go-tool-cache v0.0.0-20260216153636-9e5201344fe5/go.mod h1:78ZLITnBUCDJeU01+wYYJKaPYYgsDzJPRfxeI8qFh5g= github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032 h1:xDomVqO85ss/98Ky5zxM/g86bXDNBLebM2I9G/fu6uA= github.com/bradfitz/monogok v0.0.0-20260208031948-2219c393d032/go.mod h1:TG1HbU9fRVDnNgXncVkKz9GdvjIvqquXjH6QZSEVmY4= github.com/bramvdbogaerde/go-scp v1.4.0 h1:jKMwpwCbcX1KyvDbm/PDJuXcMuNVlLGi0Q0reuzjyKY= @@ -935,8 +935,8 @@ github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkM github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0= +github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= diff --git a/shell.nix b/shell.nix index 0c51f59c00d3b..9fab641722dca 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-e5fAO7gye8B5FGBTxLNVTKq6dp8By9iDEw72M1/y4ZE= +# nix-direnv cache busting line: sha256-JD1PZPZT5clhRWIAQO8skBRN59QPiyfTc7nPYTvGbd8= From 4044e05dfdd6445a2baabea2c35e34b49c1d37df Mon Sep 17 00:00:00 2001 From: Will Norris Date: Mon, 16 Feb 2026 11:41:53 -0800 Subject: [PATCH 0981/1093] client/systray: set consistent ID for StatusNotifierItem Fixes #18736 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- client/systray/systray.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index 8c30dbf05ef3e..7018f0f3be2be 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -171,6 +171,11 @@ tailscale systray See https://tailscale.com/kb/1597/linux-systray for more information.`) } setAppIcon(disconnected) + + // set initial title, which is used by the systray package as the ID of the StatusNotifierItem. + // This value will get overwritten later as the client status changes. + systray.SetTitle("tailscale") + menu.rebuild() menu.mu.Lock() From a8204568d88897292d7146d3ceda03071f6067fb Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 16 Feb 2023 10:46:52 -0800 Subject: [PATCH 0982/1093] all: replace UserVisibleError with vizerror package Updates tailscale/corp#9025 Signed-off-by: Will Norris --- cmd/tailscaled/depaware.txt | 6 +++--- control/controlclient/client.go | 6 ------ control/controlclient/direct.go | 3 ++- ipn/ipnlocal/local.go | 6 +++--- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 71a1df1d4c6c2..7f2e6f7681751 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -150,7 +150,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress from github.com/klauspost/compress/zstd github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd - github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/zstd+ 💣 github.com/klauspost/compress/internal/le from github.com/klauspost/compress/huff0+ github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe @@ -472,7 +472,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ - tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/vizerror from tailscale.com/tsweb+ 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ @@ -480,7 +480,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ tailscale.com/util/zstdframe from tailscale.com/control/controlclient+ tailscale.com/version from tailscale.com/client/web+ - tailscale.com/version/distro from tailscale.com/client/web+ + tailscale.com/version/distro from tailscale.com/hostinfo+ W tailscale.com/wf from tailscale.com/cmd/tailscaled tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ diff --git a/control/controlclient/client.go b/control/controlclient/client.go index 3bc53ed5a24fc..a57c6940a88c4 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -91,9 +91,3 @@ type Client interface { // distinguish one client from another. ClientID() int64 } - -// UserVisibleError is an error that should be shown to users. -type UserVisibleError string - -func (e UserVisibleError) Error() string { return string(e) } -func (e UserVisibleError) UserVisibleError() string { return string(e) } diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index a368d6f858384..6f3393b18dfdf 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -59,6 +59,7 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/testenv" + "tailscale.com/util/vizerror" "tailscale.com/util/zstdframe" ) @@ -743,7 +744,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new resp.NodeKeyExpired, resp.MachineAuthorized, resp.AuthURL != "") if resp.Error != "" { - return false, "", nil, UserVisibleError(resp.Error) + return false, "", nil, vizerror.New(resp.Error) } if len(resp.NodeKeySignature) > 0 { return true, "", resp.NodeKeySignature, nil diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e9222cde35b50..bf0651ac97bd1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -99,6 +99,7 @@ import ( "tailscale.com/util/syspolicy/ptype" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" + "tailscale.com/util/vizerror" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" @@ -1583,9 +1584,8 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control return } b.logf("Received error: %v", st.Err) - var uerr controlclient.UserVisibleError - if errors.As(st.Err, &uerr) { - s := uerr.UserVisibleError() + if vizerr, ok := vizerror.As(st.Err); ok { + s := vizerr.Error() b.sendLocked(ipn.Notify{ErrMessage: &s}) } return From a6390ca008b580ef41e65d10bb1dfc811ebf3aa9 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 13 Feb 2026 05:07:45 +0000 Subject: [PATCH 0983/1093] ssh/tailssh: fix data race on conn auth state in OnPolicyChange OnPolicyChange can observe a conn in activeConns before authentication completes. The previous `c.info == nil` guard was itself a data race against clientAuth writing c.info, and even when c.info appeared non-nil, c.localUser could still be nil, causing a nil pointer dereference at c.localUser.Username. Add an authCompleted atomic.Bool to conn, stored true after all auth fields are written in clientAuth. OnPolicyChange checks this atomic instead of c.info, which provides the memory barrier guaranteeing all prior writes are visible to the concurrent reader. Updates tailscale/corp#36268 (fixes, but we might want to cherry-pick) Co-authored-by: Gesa Stupperich Change-Id: I4c69843541f5f9f04add9bf431e320c65a203a39 Signed-off-by: Brad Fitzpatrick --- ssh/tailssh/tailssh.go | 21 +++++++++-- ssh/tailssh/tailssh_test.go | 75 +++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 4 deletions(-) diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 9d5a7d2a880db..cb56f701b5e68 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -192,9 +192,9 @@ func (srv *server) OnPolicyChange() { srv.mu.Lock() defer srv.mu.Unlock() for c := range srv.activeConns { - if c.info == nil { - // c.info is nil when the connection hasn't been authenticated yet. - // In that case, the connection will be terminated when it is. + if !c.authCompleted.Load() { + // The connection hasn't completed authentication yet. + // In that case, the connection will be terminated when it does. continue } go c.checkStillValid() @@ -236,14 +236,26 @@ type conn struct { // Banners cannot be sent after auth completes. spac gossh.ServerPreAuthConn + // The following fields are set during clientAuth and are used for policy + // evaluation and session management. They are immutable after clientAuth + // completes. They must not be read from other goroutines until + // authCompleted is set to true. + action0 *tailcfg.SSHAction // set by clientAuth finalAction *tailcfg.SSHAction // set by clientAuth - info *sshConnInfo // set by setInfo + info *sshConnInfo // set by setInfo (during clientAuth) localUser *userMeta // set by clientAuth userGroupIDs []string // set by clientAuth acceptEnv []string + // authCompleted is set to true after clientAuth has finished writing + // all authentication state fields (info, localUser, action0, + // finalAction, userGroupIDs, acceptEnv). It provides a memory + // barrier so that concurrent readers (e.g. OnPolicyChange) see + // fully-initialized values. + authCompleted atomic.Bool + // mu protects the following fields. // // srv.mu should be acquired prior to mu. @@ -369,6 +381,7 @@ func (c *conn) clientAuth(cm gossh.ConnMetadata) (perms *gossh.Permissions, retE } } c.finalAction = action + c.authCompleted.Store(true) return &gossh.Permissions{}, nil case action.Reject: metricTerminalReject.Add(1) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 44db0cc000beb..6d9d859a22d91 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -31,6 +31,7 @@ import ( "sync" "sync/atomic" "testing" + "testing/synctest" "time" gossh "golang.org/x/crypto/ssh" @@ -1111,6 +1112,7 @@ func TestSSH(t *testing.T) { } sc.action0 = &tailcfg.SSHAction{Accept: true} sc.finalAction = sc.action0 + sc.authCompleted.Store(true) sc.Handler = func(s ssh.Session) { sc.newSSHSession(s).run() @@ -1320,6 +1322,79 @@ func TestStdOsUserUserAssumptions(t *testing.T) { } } +func TestOnPolicyChangeSkipsPreAuthConns(t *testing.T) { + tests := []struct { + name string + sshRule *tailcfg.SSHRule + wantCancel bool + }{ + { + name: "accept-after-auth", + sshRule: newSSHRule(&tailcfg.SSHAction{Accept: true}), + wantCancel: false, + }, + { + name: "reject-after-auth", + sshRule: newSSHRule(&tailcfg.SSHAction{Reject: true}), + wantCancel: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + srv := &server{ + logf: tstest.WhileTestRunningLogger(t), + lb: &localState{ + sshEnabled: true, + matchingRule: tt.sshRule, + }, + } + c := &conn{ + srv: srv, + info: &sshConnInfo{ + sshUser: "alice", + src: netip.MustParseAddrPort("1.2.3.4:30343"), + dst: netip.MustParseAddrPort("100.100.100.102:22"), + }, + localUser: &userMeta{User: user.User{Username: currentUser}}, + } + srv.activeConns = map[*conn]bool{c: true} + ctx, cancel := context.WithCancelCause(context.Background()) + ss := &sshSession{ctx: ctx, cancelCtx: cancel} + c.sessions = []*sshSession{ss} + + // Before authCompleted is set, OnPolicyChange should skip + // the conn entirely — no goroutine spawned. + srv.OnPolicyChange() + synctest.Wait() + select { + case <-ctx.Done(): + t.Fatal("session canceled before auth completed") + default: + } + + // Mark auth as completed. Now OnPolicyChange should + // evaluate the policy and act accordingly. + c.authCompleted.Store(true) + + srv.OnPolicyChange() + synctest.Wait() + select { + case <-ctx.Done(): + if !tt.wantCancel { + t.Fatal("valid session should not have been canceled") + } + default: + if tt.wantCancel { + t.Fatal("invalid session should have been canceled") + } + } + }) + }) + } +} + func mockRecordingServer(t *testing.T, handleRecord http.HandlerFunc) *httptest.Server { t.Helper() mux := http.NewServeMux() From a7a864419d3238756c4c15a532408fa475c9f992 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 16 Feb 2026 18:56:51 -1000 Subject: [PATCH 0984/1093] net/dns: make MagicDNS IPv6 registration opt-out now, not opt-in This adds a new ControlKnob to make MagicDNS IPv6 registration (telling systemd/etc) opt-out rather than opt-in. Updates #15404 Change-Id: If008e1cb046b792c6aff7bb1d7c58638f7d650b1 Signed-off-by: Brad Fitzpatrick --- control/controlknobs/controlknobs.go | 16 +++++ net/dns/config.go | 8 +-- net/dns/manager_tcp_test.go | 4 +- net/dns/manager_test.go | 88 +++++++++++++++++++--------- tailcfg/tailcfg.go | 9 ++- 5 files changed, 91 insertions(+), 34 deletions(-) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 0f85e82368dd9..1861a122e2f9e 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -113,6 +113,14 @@ type Knobs struct { // resolver on Windows or when the host is domain-joined and its primary domain // takes precedence over MagicDNS. As of 2026-02-13, it is only used on Windows. DisableHostsFileUpdates atomic.Bool + + // ForceRegisterMagicDNSIPv4Only is whether the node should only register + // its IPv4 MagicDNS service IP and not its IPv6 one. The IPv6 one, + // tsaddr.TailscaleServiceIPv6String, still works in either case. This knob + // controls only whether we tell systemd/etc about the IPv6 one. + // See https://github.com/tailscale/tailscale/issues/15404. + // TODO(bradfitz): remove this a few releases after 2026-02-16. + ForceRegisterMagicDNSIPv4Only atomic.Bool } // UpdateFromNodeAttributes updates k (if non-nil) based on the provided self @@ -144,6 +152,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { disableCaptivePortalDetection = has(tailcfg.NodeAttrDisableCaptivePortalDetection) disableSkipStatusQueue = has(tailcfg.NodeAttrDisableSkipStatusQueue) disableHostsFileUpdates = has(tailcfg.NodeAttrDisableHostsFileUpdates) + forceRegisterMagicDNSIPv4Only = has(tailcfg.NodeAttrForceRegisterMagicDNSIPv4Only) ) if has(tailcfg.NodeAttrOneCGNATEnable) { @@ -171,6 +180,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) k.DisableHostsFileUpdates.Store(disableHostsFileUpdates) + k.ForceRegisterMagicDNSIPv4Only.Store(forceRegisterMagicDNSIPv4Only) // If both attributes are present, then "enable" should win. This reflects // the history of seamless key renewal. @@ -210,3 +220,9 @@ func (k *Knobs) AsDebugJSON() map[string]any { } return ret } + +// ShouldForceRegisterMagicDNSIPv4Only reports the value of +// ForceRegisterMagicDNSIPv4Only, or false if k is nil. +func (k *Knobs) ShouldForceRegisterMagicDNSIPv4Only() bool { + return k != nil && k.ForceRegisterMagicDNSIPv4Only.Load() +} diff --git a/net/dns/config.go b/net/dns/config.go index 47fac83c2df48..0b09fe1a8f609 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -73,11 +73,9 @@ func (c *Config) serviceIPs(knobs *controlknobs.Knobs) []netip.Addr { return []netip.Addr{tsaddr.TailscaleServiceIPv6()} } - // TODO(bradfitz,mikeodr,raggi): include IPv6 here too; tailscale/tailscale#15404 - // And add a controlknobs knob to disable dual stack. - // - // For now, opt-in for testing. - if magicDNSDualStack() { + // See https://github.com/tailscale/tailscale/issues/15404 for the background + // on the opt-in debug knob and the controlknob opt-out. + if magicDNSDualStack() || !knobs.ShouldForceRegisterMagicDNSIPv4Only() { return []netip.Addr{ tsaddr.TailscaleServiceIP(), tsaddr.TailscaleServiceIPv6(), diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index bdd5cc7bb314b..67d6d15cd42ed 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -15,6 +15,7 @@ import ( "github.com/google/go-cmp/cmp" dns "golang.org/x/net/dns/dnsmessage" + "tailscale.com/control/controlknobs" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" @@ -93,7 +94,8 @@ func TestDNSOverTCP(t *testing.T) { bus := eventbustest.NewBus(t) dialer := tsdial.NewDialer(netmon.NewStatic()) dialer.SetBus(bus) - m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, nil, "", bus) + cknobs := &controlknobs.Knobs{} + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, cknobs, "", bus) m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index cf0c2458e395f..8a67aca5cd545 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -28,6 +28,7 @@ import ( "tailscale.com/net/dns/publicdns" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" + "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/types/dnstype" @@ -172,6 +173,8 @@ func TestCompileHostEntries(t *testing.T) { } } +var serviceAddr46 = []netip.Addr{tsaddr.TailscaleServiceIP(), tsaddr.TailscaleServiceIPv6()} + func TestManager(t *testing.T) { if runtime.GOOS == "windows" { t.Skipf("test's assumptions break because of https://github.com/tailscale/corp/issues/1662") @@ -189,6 +192,7 @@ func TestManager(t *testing.T) { split bool bs OSConfig os OSConfig + knobs *controlknobs.Knobs rs resolver.Config goos string // empty means "linux" }{ @@ -231,7 +235,7 @@ func TestManager(t *testing.T) { "bar.tld.", "2.3.4.5"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, }, rs: resolver.Config{ Hosts: hosts( @@ -317,7 +321,7 @@ func TestManager(t *testing.T) { "bradfitz.ts.com.", "2.3.4.5"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -340,7 +344,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -359,7 +363,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("tailscale.com", "universe.tf"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -377,7 +381,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -386,6 +390,33 @@ func TestManager(t *testing.T) { "corp.com.", "2.2.2.2"), }, }, + { + name: "controlknob-disable-v6-registration", + in: Config{ + DefaultResolvers: mustRes("1.1.1.1", "9.9.9.9"), + SearchDomains: fqdns("tailscale.com", "universe.tf"), + Routes: upstreams("ts.com", ""), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + }, + knobs: (func() *controlknobs.Knobs { + k := new(controlknobs.Knobs) + k.ForceRegisterMagicDNSIPv4Only.Store(true) + return k + })(), + os: OSConfig{ + Nameservers: mustIPs("100.100.100.100"), // without IPv6 + SearchDomains: fqdns("tailscale.com", "universe.tf"), + }, + rs: resolver.Config{ + Routes: upstreams(".", "1.1.1.1", "9.9.9.9"), + Hosts: hosts( + "dave.ts.com.", "1.2.3.4", + "bradfitz.ts.com.", "2.3.4.5"), + LocalDomains: fqdns("ts.com."), + }, + }, { name: "routes", in: Config{ @@ -397,7 +428,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -432,7 +463,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -452,7 +483,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), MatchDomains: fqdns("bigco.net", "corp.com"), }, @@ -477,7 +508,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -502,7 +533,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -527,7 +558,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -549,7 +580,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), MatchDomains: fqdns("ts.com"), }, @@ -575,7 +606,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -601,7 +632,7 @@ func TestManager(t *testing.T) { }, split: false, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -627,7 +658,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf", "coffee.shop"), }, rs: resolver.Config{ @@ -653,7 +684,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), MatchDomains: fqdns("corp.com", "ts.com"), }, @@ -683,7 +714,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -715,7 +746,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -740,7 +771,7 @@ func TestManager(t *testing.T) { SearchDomains: fqdns("tailscale.com", "universe.tf"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("tailscale.com", "universe.tf"), }, rs: resolver.Config{ @@ -768,7 +799,7 @@ func TestManager(t *testing.T) { DefaultResolvers: mustRes("2a07:a8c0::c3:a884"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, }, rs: resolver.Config{ Routes: upstreams(".", "2a07:a8c0::c3:a884"), @@ -780,7 +811,7 @@ func TestManager(t *testing.T) { DefaultResolvers: mustRes("https://dns.nextdns.io/c3a884"), }, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, }, rs: resolver.Config{ Routes: upstreams(".", "https://dns.nextdns.io/c3a884"), @@ -796,7 +827,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("optimistic-display.ts.net"), MatchDomains: fqdns("ts.net"), }, @@ -821,7 +852,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("optimistic-display.ts.net"), }, rs: resolver.Config{ @@ -844,7 +875,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("optimistic-display.ts.net"), }, rs: resolver.Config{ @@ -885,7 +916,7 @@ func TestManager(t *testing.T) { }, }, }, - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("ts.com", "universe.tf"), MatchDomains: fqdns("corp.com", "ts.com"), }, @@ -912,7 +943,7 @@ func TestManager(t *testing.T) { }, split: true, os: OSConfig{ - Nameservers: mustIPs("100.100.100.100"), + Nameservers: serviceAddr46, SearchDomains: fqdns("ts.com", "universe.tf"), MatchDomains: fqdns("corp.com", "ts.com"), }, @@ -946,7 +977,10 @@ func TestManager(t *testing.T) { if goos == "" { goos = "linux" } - knobs := &controlknobs.Knobs{} + knobs := test.knobs + if knobs == nil { + knobs = &controlknobs.Knobs{} + } bus := eventbustest.NewBus(t) dialer := tsdial.NewDialer(netmon.NewStatic()) dialer.SetBus(bus) diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 69ca20a947735..b49791be6fb39 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -179,7 +179,8 @@ type CapabilityVersion int // - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest // - 131: 2025-11-25: client respects [NodeAttrDefaultAutoUpdate] // - 132: 2026-02-13: client respects [NodeAttrDisableHostsFileUpdates] -const CurrentCapabilityVersion CapabilityVersion = 132 +// - 133: 2026-02-17: client understands [NodeAttrForceRegisterMagicDNSIPv4Only]; MagicDNS IPv6 registered w/ OS by default +const CurrentCapabilityVersion CapabilityVersion = 133 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2748,6 +2749,12 @@ const ( // primary domain takes precedence over MagicDNS. As of 2026-02-12, it is only // used on Windows. NodeAttrDisableHostsFileUpdates NodeCapability = "disable-hosts-file-updates" + + // NodeAttrForceRegisterMagicDNSIPv4Only forces the client to only register + // its MagicDNS IPv4 address with systemd/etc, and not both its IPv4 and IPv6 addresses. + // See https://github.com/tailscale/tailscale/issues/15404. + // TODO(bradfitz): remove this a few releases after 2026-02-16. + NodeAttrForceRegisterMagicDNSIPv4Only NodeCapability = "force-register-magicdns-ipv4-only" ) // SetDNSRequest is a request to add a DNS record. From fbbf0d6669fe2b305f5bad5dd638e8b5db5c14bc Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Fri, 13 Feb 2026 09:11:15 -0700 Subject: [PATCH 0985/1093] tsconsensus: fix race condition in TestOnlyTaggedPeersCanBeDialed TestOnlyTaggedPeersCanBeDialed has a race condition: - The test untags ps[2] and waits until ps[0] sees this tag dropped from ps[2] in the netmap. - Later the test tries to dial ps[2] from ps[0] and expects the dial to fail as authorization to dial relies on the presence of the tag, now removed from ps[2]. - However, the authorization layer caches the status used to consult peer tags. When the dial happens before the cache times out, the test fails. - Due to a bug in testcontrol.Server.UpdateNode, which the test uses to remove the tag, netmap updates are not immediately triggered. The test has to wait for the next natural set of netmap updates, which on my machine takes about 22 seconds. As a result, the cache in the authorization layer times out and the test passes. - If one fixes the bug in UpdateNode, then netmap updates happen immediately, the cache is no longer timed out when the dial occurs, and the test fails. Fixes #18720 Updates #18703 Signed-off-by: Harry Harpham --- tsconsensus/authorization.go | 21 +++++++++++++++++++-- tsconsensus/tsconsensus_test.go | 6 ++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/tsconsensus/authorization.go b/tsconsensus/authorization.go index 6261a8f1debb6..017c9e80721b9 100644 --- a/tsconsensus/authorization.go +++ b/tsconsensus/authorization.go @@ -17,6 +17,10 @@ import ( "tailscale.com/util/set" ) +// defaultStatusCacheTimeout is the duration after which cached status will be +// disregarded. See tailscaleStatusGetter.cacheTimeout. +const defaultStatusCacheTimeout = time.Second + type statusGetter interface { getStatus(context.Context) (*ipnstate.Status, error) } @@ -24,6 +28,10 @@ type statusGetter interface { type tailscaleStatusGetter struct { ts *tsnet.Server + // cacheTimeout is used to determine when the cached status should be + // disregarded and a new status fetched. Zero means ignore the cache. + cacheTimeout time.Duration + mu sync.Mutex // protects the following lastStatus *ipnstate.Status lastStatusTime time.Time @@ -40,7 +48,7 @@ func (sg *tailscaleStatusGetter) fetchStatus(ctx context.Context) (*ipnstate.Sta func (sg *tailscaleStatusGetter) getStatus(ctx context.Context) (*ipnstate.Status, error) { sg.mu.Lock() defer sg.mu.Unlock() - if sg.lastStatus != nil && time.Since(sg.lastStatusTime) < 1*time.Second { + if sg.lastStatus != nil && time.Since(sg.lastStatusTime) < sg.cacheTimeout { return sg.lastStatus, nil } status, err := sg.fetchStatus(ctx) @@ -61,14 +69,23 @@ type authorization struct { } func newAuthorization(ts *tsnet.Server, tag string) *authorization { + return newAuthorizationWithCacheTimeout(ts, tag, defaultStatusCacheTimeout) +} + +func newAuthorizationWithCacheTimeout(ts *tsnet.Server, tag string, cacheTimeout time.Duration) *authorization { return &authorization{ sg: &tailscaleStatusGetter{ - ts: ts, + ts: ts, + cacheTimeout: cacheTimeout, }, tag: tag, } } +func newAuthorizationForTest(ts *tsnet.Server, tag string) *authorization { + return newAuthorizationWithCacheTimeout(ts, tag, 0) +} + func (a *authorization) Refresh(ctx context.Context) error { tStatus, err := a.sg.getStatus(ctx) if err != nil { diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 2199a0c6b9441..8897db119c467 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -642,7 +642,7 @@ func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { // make a StreamLayer for ps[0] ts := ps[0].ts - auth := newAuthorization(ts, clusterTag) + auth := newAuthorizationForTest(ts, clusterTag) port := 19841 lns := make([]net.Listener, 3) @@ -692,10 +692,12 @@ func TestOnlyTaggedPeersCanBeDialed(t *testing.T) { conn.Close() _, err = sl.Dial(a2, 2*time.Second) + if err == nil { + t.Fatal("expected dial error to untagged node, got none") + } if err.Error() != "dial: peer is not allowed" { t.Fatalf("expected dial: peer is not allowed, got: %v", err) } - } func TestOnlyTaggedPeersCanJoin(t *testing.T) { From f4aea70f7a199e5ac155e225fe7ad4de374e9dea Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Tue, 17 Feb 2026 14:51:54 -0800 Subject: [PATCH 0986/1093] ipn/ipnlocal: add basic support for netmap caching (#18530) This commit is based on ff0978ab, and extends #18497 to connect network map caching to the LocalBackend. As implemented, only "whole" netmap values are stored, and we do not yet handle incremental updates. As-written, the feature must be explicitly enabled via the TS_USE_CACHED_NETMAP envknob, and must be considered experimental. Updates #12639 Co-Authored-by: Brad Fitzpatrick Change-Id: I48a1e92facfbf7fb3a8e67cff7f2c9ab4ed62c83 Signed-off-by: M. J. Fromberger --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware-min.txt | 1 + cmd/tailscaled/depaware-minbox.txt | 1 + cmd/tailscaled/depaware.txt | 1 + cmd/tsidp/depaware.txt | 1 + ipn/ipnlocal/diskcache.go | 56 ++++++++++++++++ ipn/ipnlocal/local.go | 38 ++++++++--- ipn/ipnlocal/local_test.go | 100 +++++++++++++++++++++++++++++ tsnet/depaware.txt | 1 + 9 files changed, 192 insertions(+), 8 deletions(-) create mode 100644 ipn/ipnlocal/diskcache.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 5565ec01921bb..677891ad71d3f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -821,6 +821,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index b7df3a48a6b1e..f97e0368c0d12 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -71,6 +71,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index ca029194c101e..8dfa00af75a68 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -85,6 +85,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 7f2e6f7681751..aa25fd75f9e9e 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -318,6 +318,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnext from tailscale.com/ipn/auditlog+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 4dfb831b59f43..ae13b20449b41 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -240,6 +240,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ diff --git a/ipn/ipnlocal/diskcache.go b/ipn/ipnlocal/diskcache.go new file mode 100644 index 0000000000000..0b1b7b4487bd1 --- /dev/null +++ b/ipn/ipnlocal/diskcache.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package ipnlocal + +import ( + "tailscale.com/feature/buildfeatures" + "tailscale.com/ipn/ipnlocal/netmapcache" + "tailscale.com/types/netmap" +) + +// diskCache is the state netmap caching to disk. +type diskCache struct { + // all fields guarded by LocalBackend.mu + + dir string // active profile cache directory + cache *netmapcache.Cache +} + +func (b *LocalBackend) writeNetmapToDiskLocked(nm *netmap.NetworkMap) error { + if !buildfeatures.HasCacheNetMap || nm == nil || nm.Cached { + return nil + } + b.logf("writing netmap to disk cache") + + dir, err := b.profileMkdirAllLocked(b.pm.CurrentProfile().ID(), "netmap-cache") + if err != nil { + return err + } + if c := b.diskCache; c.cache == nil || c.dir != dir { + b.diskCache.cache = netmapcache.NewCache(netmapcache.FileStore(dir)) + b.diskCache.dir = dir + } + return b.diskCache.cache.Store(b.currentNode().Context(), nm) +} + +func (b *LocalBackend) loadDiskCacheLocked() (om *netmap.NetworkMap, ok bool) { + if !buildfeatures.HasCacheNetMap { + return nil, false + } + dir, err := b.profileMkdirAllLocked(b.pm.CurrentProfile().ID(), "netmap-cache") + if err != nil { + b.logf("profile data directory: %v", err) + return nil, false + } + if c := b.diskCache; c.cache == nil || c.dir != dir { + b.diskCache.cache = netmapcache.NewCache(netmapcache.FileStore(dir)) + b.diskCache.dir = dir + } + nm, err := b.diskCache.cache.Load(b.currentNode().Context()) + if err != nil { + b.logf("load netmap from cache: %v", err) + return nil, false + } + return nm, true +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bf0651ac97bd1..4221b45e5615a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -271,6 +271,7 @@ type LocalBackend struct { // of [LocalBackend]'s own state that is not tied to the node context. currentNodeAtomic atomic.Pointer[nodeBackend] + diskCache diskCache conf *conffile.Config // latest parsed config, or nil if not in declarative mode pm *profileManager // mu guards access lastFilterInputs *filterInputs @@ -1573,7 +1574,13 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } b.mu.Lock() defer b.mu.Unlock() + b.setControlClientStatusLocked(c, st) +} +// setControlClientStatusLocked is the locked version of SetControlClientStatus. +// +// b.mu must be held. +func (b *LocalBackend) setControlClientStatusLocked(c controlclient.Client, st controlclient.Status) { if b.cc != c { b.logf("Ignoring SetControlClientStatus from old client") return @@ -2414,6 +2421,14 @@ func (b *LocalBackend) initOnce() { b.extHost.Init() } +func (b *LocalBackend) controlDebugFlags() []string { + debugFlags := controlDebugFlags + if b.sys.IsNetstackRouter() { + return append([]string{"netstack"}, debugFlags...) + } + return debugFlags +} + // Start applies the configuration specified in opts, and starts the // state machine. // @@ -2570,14 +2585,18 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { persistv = new(persist.Persist) } - discoPublic := b.MagicConn().DiscoPublicKey() - - isNetstack := b.sys.IsNetstackRouter() - debugFlags := controlDebugFlags - if isNetstack { - debugFlags = append([]string{"netstack"}, debugFlags...) + if envknob.Bool("TS_USE_CACHED_NETMAP") { + if nm, ok := b.loadDiskCacheLocked(); ok { + logf("loaded netmap from disk cache; %d peers", len(nm.Peers)) + b.setControlClientStatusLocked(nil, controlclient.Status{ + NetMap: nm, + LoggedIn: true, // sure + }) + } } + discoPublic := b.MagicConn().DiscoPublicKey() + var ccShutdownCbs []func() ccShutdown := func() { for _, cb := range ccShutdownCbs { @@ -2603,7 +2622,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { Hostinfo: b.hostInfoWithServicesLocked(), HTTPTestClient: httpTestClient, DiscoPublicKey: discoPublic, - DebugFlags: debugFlags, + DebugFlags: b.controlDebugFlags(), HealthTracker: b.health, PolicyClient: b.sys.PolicyClientOrDefault(), Pinger: b, @@ -2619,7 +2638,7 @@ func (b *LocalBackend) startLocked(opts ipn.Options) error { // Don't warn about broken Linux IP forwarding when // netstack is being used. - SkipIPForwardingCheck: isNetstack, + SkipIPForwardingCheck: b.sys.IsNetstackRouter(), }) if err != nil { return err @@ -6248,6 +6267,9 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { var login string if nm != nil { login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") + if err := b.writeNetmapToDiskLocked(nm); err != nil { + b.logf("write netmap to cache: %v", err) + } } b.currentNode().SetNetMap(nm) if ms, ok := b.sys.MagicSock.GetOK(); ok { diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index cd44acdd1fecf..259e4b6b28a83 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -41,6 +41,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" + "tailscale.com/ipn/ipnlocal/netmapcache" "tailscale.com/ipn/store/mem" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -611,6 +612,105 @@ func makeExitNode(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { return makePeer(id, append([]peerOptFunc{withCap(26), withSuggest(), withExitRoutes()}, opts...)...) } +func TestLoadCachedNetMap(t *testing.T) { + t.Setenv("TS_USE_CACHED_NETMAP", "1") + + // Write a small network map into a cache, and verify we can load it. + varRoot := t.TempDir() + cacheDir := filepath.Join(varRoot, "profile-data", "id0", "netmap-cache") + if err := os.MkdirAll(cacheDir, 0700); err != nil { + t.Fatalf("Create cache directory: %v", err) + } + + testMap := &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Name: "example.ts.net", + User: tailcfg.UserID(1), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.2.3.4/32"), + }, + }).View(), + UserProfiles: map[tailcfg.UserID]tailcfg.UserProfileView{ + tailcfg.UserID(1): (&tailcfg.UserProfile{ + ID: 1, + LoginName: "amelie@example.com", + DisplayName: "Amelie du Pangoline", + }).View(), + }, + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 601, + StableID: "n601FAKE", + ComputedName: "some-peer", + User: tailcfg.UserID(1), + Key: makeNodeKeyFromID(601), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.2.3.5/32"), + }, + }).View(), + (&tailcfg.Node{ + ID: 602, + StableID: "n602FAKE", + ComputedName: "some-tagged-peer", + Tags: []string{"tag:server", "tag:test"}, + User: tailcfg.UserID(1), + Key: makeNodeKeyFromID(602), + Addresses: []netip.Prefix{ + netip.MustParsePrefix("100.2.3.6/32"), + }, + }).View(), + }, + } + dc := netmapcache.NewCache(netmapcache.FileStore(cacheDir)) + if err := dc.Store(t.Context(), testMap); err != nil { + t.Fatalf("Store netmap in cache: %v", err) + } + + // Now make a new backend and hook it up to have access to the cache created + // above, then start it to pull in the cached netmap. + sys := tsd.NewSystem() + e, err := wgengine.NewFakeUserspaceEngine(logger.Discard, + sys.Set, + sys.HealthTracker.Get(), + sys.UserMetricsRegistry(), + sys.Bus.Get(), + ) + if err != nil { + t.Fatalf("Make userspace engine: %v", err) + } + t.Cleanup(e.Close) + sys.Set(e) + sys.Set(new(mem.Store)) + + logf := tstest.WhileTestRunningLogger(t) + clb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) + if err != nil { + t.Fatalf("Make local backend: %v", err) + } + t.Cleanup(clb.Shutdown) + clb.SetVarRoot(varRoot) + + pm := must.Get(newProfileManager(new(mem.Store), logf, health.NewTracker(sys.Bus.Get()))) + pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() + clb.pm = pm + + // Start up the node. We can't actually log in, because we have no + // controlplane, but verify that we got a network map. + if err := clb.Start(ipn.Options{}); err != nil { + t.Fatalf("Start local backend: %v", err) + } + + // Check that the network map the backend wound up with is the one we + // stored, modulo uncached fields. + nm := clb.currentNode().NetMap() + if diff := cmp.Diff(nm, testMap, + cmpopts.IgnoreFields(netmap.NetworkMap{}, "Cached", "PacketFilter", "PacketFilterRules"), + cmpopts.EquateComparable(key.NodePublic{}, key.MachinePublic{}), + ); diff != "" { + t.Error(diff) + } +} + func TestConfigureExitNode(t *testing.T) { controlURL := "https://localhost:1/" exitNode1 := makeExitNode(1, withName("node-1"), withDERP(1), withAddresses(netip.MustParsePrefix("100.64.1.1/32"))) diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 46acadd1dd750..bcc00590a4d2c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -236,6 +236,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ + tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ From eb3d35c8b59c853eec0e66a48e22faacc7a82859 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 18 Feb 2026 09:34:55 +0000 Subject: [PATCH 0987/1093] cmd/k8s-operator,k8s-operator: define ProxyGroupPolicy reconciler (#18654) This commit implements a reconciler for the new `ProxyGroupPolicy` custom resource. When created, all `ProxyGroupPolicy` resources within the same namespace are merged into two `ValidatingAdmissionPolicy` resources, one for egress and one for ingress. These policies use CEL expressions to limit the usage of the "tailscale.com/proxy-group" annotation on `Service` and `Ingress` resources on create & update. Included here is also a new e2e test that ensures that resources that violate the policy return an error on creation, and that once the policy is changed to allow them they can be created. Closes: https://github.com/tailscale/corp/issues/36830 Signed-off-by: David Bond --- cmd/k8s-operator/depaware.txt | 1 + .../deploy/chart/templates/.gitignore | 1 + .../deploy/chart/templates/operator-rbac.yaml | 6 + .../tailscale.com_proxygrouppolicies.yaml | 4 - .../deploy/manifests/operator.yaml | 158 +++++++ cmd/k8s-operator/e2e/doc.go | 2 +- cmd/k8s-operator/e2e/ingress_test.go | 1 + cmd/k8s-operator/e2e/main_test.go | 19 +- cmd/k8s-operator/e2e/pebble.go | 1 + cmd/k8s-operator/e2e/proxy_test.go | 1 + cmd/k8s-operator/e2e/proxygrouppolicy_test.go | 161 ++++++++ cmd/k8s-operator/e2e/setup.go | 1 + cmd/k8s-operator/e2e/ssh.go | 1 + cmd/k8s-operator/generate/main.go | 33 +- cmd/k8s-operator/operator.go | 9 + .../apis/v1alpha1/types_proxygrouppolicy.go | 4 - .../proxygrouppolicy/proxygrouppolicy.go | 391 ++++++++++++++++++ .../proxygrouppolicy/proxygrouppolicy_test.go | 217 ++++++++++ 18 files changed, 987 insertions(+), 24 deletions(-) create mode 100644 cmd/k8s-operator/e2e/proxygrouppolicy_test.go create mode 100644 k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go create mode 100644 k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 677891ad71d3f..cd87d49872028 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -832,6 +832,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1 tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+ tailscale.com/k8s-operator/reconciler from tailscale.com/k8s-operator/reconciler/tailnet + tailscale.com/k8s-operator/reconciler/proxygrouppolicy from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/reconciler/tailnet from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/sessionrecording from tailscale.com/k8s-operator/api-proxy tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore index f480bb57d5f18..185ea9e2be316 100644 --- a/cmd/k8s-operator/deploy/chart/templates/.gitignore +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -9,3 +9,4 @@ /proxygroup.yaml /recorder.yaml /tailnet.yaml +/proxygrouppolicy.yaml diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 92decef17aab4..4d59b4aad077d 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -40,6 +40,9 @@ rules: - apiGroups: ["tailscale.com"] resources: ["tailnets", "tailnets/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["proxygrouppolicies", "proxygrouppolicies/status"] + verbs: ["get", "list", "watch", "update"] - apiGroups: ["tailscale.com"] resources: ["recorders", "recorders/status"] verbs: ["get", "list", "watch", "update"] @@ -47,6 +50,9 @@ rules: resources: ["customresourcedefinitions"] verbs: ["get", "list", "watch"] resourceNames: ["servicemonitors.monitoring.coreos.com"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingadmissionpolicies", "validatingadmissionpolicybindings"] + verbs: ["list", "create", "delete", "update", "get", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml index 51edcb56f039b..d1425fba80165 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml @@ -19,10 +19,6 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - - description: Status of the deployed ProxyGroupPolicy resources. - jsonPath: .status.conditions[?(@.type == "ProxyGroupPolicyReady")].reason - name: Status - type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index b31e45eb7befc..597641bdefecf 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -3290,6 +3290,142 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: proxygrouppolicies.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroupPolicy + listKind: ProxyGroupPolicyList + plural: proxygrouppolicies + shortNames: + - pgp + singular: proxygrouppolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the ProxyGroupPolicy. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + egress: + description: |- + Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + denotes that no egress via ProxyGroups is allowed within this namespace. + items: + type: string + type: array + ingress: + description: |- + Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + denotes that no ingress via ProxyGroups is allowed within this namespace. + items: + type: string + type: array + type: object + status: + description: |- + Status describes the status of the ProxyGroupPolicy. This is set + and managed by the Tailscale operator. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.17.0 @@ -5318,6 +5454,16 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - proxygrouppolicies + - proxygrouppolicies/status + verbs: + - get + - list + - watch + - update - apiGroups: - tailscale.com resources: @@ -5338,6 +5484,18 @@ rules: - get - list - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingadmissionpolicies + - validatingadmissionpolicybindings + verbs: + - list + - create + - delete + - update + - get + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cmd/k8s-operator/e2e/doc.go b/cmd/k8s-operator/e2e/doc.go index c0cc363160f70..27d10e637c8c2 100644 --- a/cmd/k8s-operator/e2e/doc.go +++ b/cmd/k8s-operator/e2e/doc.go @@ -24,5 +24,5 @@ // // * go // * container runtime with the docker daemon API available -// * devcontrol: ./tool/go run ./cmd/devcontrol --generate-test-devices=k8s-operator-e2e --scenario-output-dir=/tmp/k8s-operator-e2e --test-dns=http://localhost:8055 +// * devcontrol: ./tool/go run --tags=tailscale_saas ./cmd/devcontrol --generate-test-devices=k8s-operator-e2e --scenario-output-dir=/tmp/k8s-operator-e2e --test-dns=http://localhost:8055 package e2e diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index eb05efa0cd1b8..5339b05836388 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -14,6 +14,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + kube "tailscale.com/k8s-operator" "tailscale.com/tstest" "tailscale.com/types/ptr" diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index cb5c35c0054b2..02f614014dbee 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -54,12 +54,29 @@ func createAndCleanup(t *testing.T, cl client.Client, obj client.Object) { t.Cleanup(func() { // Use context.Background() for cleanup, as t.Context() is cancelled // just before cleanup functions are called. - if err := cl.Delete(context.Background(), obj); err != nil { + if err = cl.Delete(context.Background(), obj); err != nil { t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) } }) } +func createAndCleanupErr(t *testing.T, cl client.Client, obj client.Object) error { + t.Helper() + + err := cl.Create(t.Context(), obj) + if err != nil { + return err + } + + t.Cleanup(func() { + if err = cl.Delete(context.Background(), obj); err != nil { + t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) + } + }) + + return nil +} + func get(ctx context.Context, cl client.Client, obj client.Object) error { return cl.Get(ctx, client.ObjectKeyFromObject(obj), obj) } diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go index a3ccb50cd0493..5fcb35e057c3d 100644 --- a/cmd/k8s-operator/e2e/pebble.go +++ b/cmd/k8s-operator/e2e/pebble.go @@ -12,6 +12,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/types/ptr" ) diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index f7d11d278ef77..2d4fa53cc2589 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -16,6 +16,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/ipn" "tailscale.com/tstest" ) diff --git a/cmd/k8s-operator/e2e/proxygrouppolicy_test.go b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go new file mode 100644 index 0000000000000..f8126499b0db0 --- /dev/null +++ b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go @@ -0,0 +1,161 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" +) + +// See [TestMain] for test requirements. +func TestProxyGroupPolicy(t *testing.T) { + if tnClient == nil { + t.Skip("TestProxyGroupPolicy requires a working tailnet client") + } + + // Apply deny-all policy + denyAllPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + } + + createAndCleanup(t, kubeClient, denyAllPolicy) + <-time.After(time.Second * 2) + + // Attempt to create an egress Service within the default namespace, the above policy should + // reject it. + egressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/tailnet-fqdn": "test.something.ts.net", + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "placeholder", + Type: corev1.ServiceTypeExternalName, + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + err := createAndCleanupErr(t, kubeClient, egressService) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating egress service") + } + + // Attempt to create an ingress Service within the default namespace, the above policy should + // reject it. + ingressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + err = createAndCleanupErr(t, kubeClient, ingressService) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating ingress service") + } + + // Attempt to create an Ingress within the default namespace, the above policy should reject it + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "nginx", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + { + Hosts: []string{"nginx"}, + }, + }, + }, + } + + err = createAndCleanupErr(t, kubeClient, ingress) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating ingress") + } + + // Add policy to allow ingress/egress using the "test" proxy-group. This should be merged with the deny-all + // policy so they do not conflict. + allowTestPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-test", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{"test"}, + Egress: []string{"test"}, + }, + } + + createAndCleanup(t, kubeClient, allowTestPolicy) + <-time.After(time.Second * 2) + + // With this policy in place, the above ingress/egress resources should be allowed to be created. + createAndCleanup(t, kubeClient, egressService) + createAndCleanup(t, kubeClient, ingressService) + createAndCleanup(t, kubeClient, ingress) +} diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index baf763ac61a60..845a591453b64 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -52,6 +52,7 @@ import ( "sigs.k8s.io/kind/pkg/cluster" "sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cmd" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go index 8000d13267262..371c44f9d4544 100644 --- a/cmd/k8s-operator/e2e/ssh.go +++ b/cmd/k8s-operator/e2e/ssh.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + tailscaleroot "tailscale.com" "tailscale.com/types/ptr" ) diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 9a910da3eb945..840812ea3b248 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -20,20 +20,22 @@ import ( ) const ( - operatorDeploymentFilesPath = "cmd/k8s-operator/deploy" - connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" - proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" - dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" - recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" - proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" - tailnetCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_tailnets.yaml" - helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" - connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" - proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" - dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" - recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" - proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" - tailnetCRDHelmTemplatePath = helmTemplatesPath + "/tailnet.yaml" + operatorDeploymentFilesPath = "cmd/k8s-operator/deploy" + connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" + proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" + dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" + recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" + proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" + tailnetCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_tailnets.yaml" + proxyGroupPolicyCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygrouppolicies.yaml" + helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" + connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" + proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" + dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" + recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" + proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" + tailnetCRDHelmTemplatePath = helmTemplatesPath + "/tailnet.yaml" + proxyGroupPolicyCRDHelmTemplatePath = helmTemplatesPath + "/proxygrouppolicy.yaml" helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalEnd = "{{- end -}}" @@ -157,6 +159,7 @@ func generate(baseDir string) error { {recorderCRDPath, recorderCRDHelmTemplatePath}, {proxyGroupCRDPath, proxyGroupCRDHelmTemplatePath}, {tailnetCRDPath, tailnetCRDHelmTemplatePath}, + {proxyGroupPolicyCRDPath, proxyGroupPolicyCRDHelmTemplatePath}, } { if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil { return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err) @@ -173,6 +176,8 @@ func cleanup(baseDir string) error { dnsConfigCRDHelmTemplatePath, recorderCRDHelmTemplatePath, proxyGroupCRDHelmTemplatePath, + tailnetCRDHelmTemplatePath, + proxyGroupPolicyCRDHelmTemplatePath, } { if err := os.Remove(filepath.Join(baseDir, path)); err != nil && !os.IsNotExist(err) { return fmt.Errorf("error cleaning up %s: %w", path, err) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 4f48c1812643a..81f62d4775671 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -54,6 +54,7 @@ import ( "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" "tailscale.com/k8s-operator/reconciler/tailnet" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" @@ -337,6 +338,14 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not register tailnet reconciler: %v", err) } + proxyGroupPolicyOptions := proxygrouppolicy.ReconcilerOptions{ + Client: mgr.GetClient(), + } + + if err = proxygrouppolicy.NewReconciler(proxyGroupPolicyOptions).Register(mgr); err != nil { + startlog.Fatalf("could not register proxygrouppolicy reconciler: %v", err) + } + svcFilter := handler.EnqueueRequestsFromMapFunc(serviceHandler) svcChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("svc")) // If a ProxyClass changes, enqueue all Services labeled with that diff --git a/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go index dd06380c271f5..551811693f7c8 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go +++ b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go @@ -18,7 +18,6 @@ var ProxyGroupPolicyKind = "ProxyGroupPolicy" // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Namespaced,shortName=pgp // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupPolicyReady")].reason`,description="Status of the deployed ProxyGroupPolicy resources." type ProxyGroupPolicy struct { metav1.TypeMeta `json:",inline"` @@ -62,6 +61,3 @@ type ProxyGroupPolicyStatus struct { // +optional Conditions []metav1.Condition `json:"conditions"` } - -// ProxyGroupPolicyReady is set to True if the ProxyGroupPolicy is available for use by operator workloads. -const ProxyGroupPolicyReady ConditionType = "ProxyGroupPolicyReady" diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go new file mode 100644 index 0000000000000..0541a5cf3691b --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go @@ -0,0 +1,391 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package proxygrouppolicy provides reconciliation logic for the ProxyGroupPolicy custom resource definition. It is +// responsible for generating ValidatingAdmissionPolicy resources that limit users to a set number of ProxyGroup +// names that can be used within Service and Ingress resources via the "tailscale.com/proxy-group" annotation. +package proxygrouppolicy + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + admr "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" + "tailscale.com/util/set" +) + +type ( + // The Reconciler type is a reconcile.TypedReconciler implementation used to manage the reconciliation of + // ProxyGroupPolicy custom resources. + Reconciler struct { + client.Client + } + + // The ReconcilerOptions type contains configuration values for the Reconciler. + ReconcilerOptions struct { + // The client for interacting with the Kubernetes API. + Client client.Client + } +) + +const reconcilerName = "proxygrouppolicy-reconciler" + +// NewReconciler returns a new instance of the Reconciler type. It watches specifically for changes to ProxyGroupPolicy +// custom resources. The ReconcilerOptions can be used to modify the behaviour of the Reconciler. +func NewReconciler(options ReconcilerOptions) *Reconciler { + return &Reconciler{ + Client: options.Client, + } +} + +// Register the Reconciler onto the given manager.Manager implementation. +func (r *Reconciler) Register(mgr manager.Manager) error { + return builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroupPolicy{}). + Named(reconcilerName). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Rather than working on a single ProxyGroupPolicy resource, we list all that exist within the + // same namespace as the one we're reconciling so that we can merge them into a single pair of + // ValidatingAdmissionPolicy resources. + var policies tsapi.ProxyGroupPolicyList + if err := r.List(ctx, &policies, client.InNamespace(req.Namespace)); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list ProxyGroupPolicy resources %q: %w", req.NamespacedName, err) + } + + if len(policies.Items) == 0 { + // If we've got no items in the list, we go and delete any policies and bindings that + // may exist. + return r.delete(ctx, req.Namespace) + } + + return r.createOrUpdate(ctx, req.Namespace, policies) +} + +func (r *Reconciler) delete(ctx context.Context, namespace string) (reconcile.Result, error) { + ingress := "ts-ingress-" + namespace + egress := "ts-egress-" + namespace + + objects := []client.Object{ + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + } + + for _, obj := range objects { + err := r.Delete(ctx, obj) + switch { + case apierrors.IsNotFound(err): + // A resource may have already been deleted in a previous reconciliation that failed for + // some reason, so we'll ignore it if it doesn't exist. + continue + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to delete %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +func (r *Reconciler) createOrUpdate(ctx context.Context, namespace string, policies tsapi.ProxyGroupPolicyList) (reconcile.Result, error) { + ingressNames := set.Set[string]{} + egressNames := set.Set[string]{} + + // If this namespace has multiple ProxyGroupPolicy resources, we'll reduce them down to just their distinct + // egress/ingress names. + for _, policy := range policies.Items { + ingressNames.AddSlice(policy.Spec.Ingress) + egressNames.AddSlice(policy.Spec.Egress) + } + + ingress, err := r.generateIngressPolicy(ctx, namespace, ingressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy: %w", err) + } + + ingressBinding, err := r.generatePolicyBinding(ctx, namespace, ingress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy binding: %w", err) + } + + egress, err := r.generateEgressPolicy(ctx, namespace, egressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy: %w", err) + } + + egressBinding, err := r.generatePolicyBinding(ctx, namespace, egress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy binding: %w", err) + } + + objects := []client.Object{ + ingress, + ingressBinding, + egress, + egressBinding, + } + + for _, obj := range objects { + // Attempt to perform an update first as we'll only create these once and continually update them, so it's + // more likely that an update is needed instead of creation. If the resource does not exist, we'll + // create it. + err = r.Update(ctx, obj) + switch { + case apierrors.IsNotFound(err): + if err = r.Create(ctx, obj); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to update %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +const ( + // ingressCEL enforces proxy-group annotation rules for Ingress resources. + // + // Logic: + // + // - If the object is NOT an Ingress → allow (this validation is irrelevant) + // - If the annotation is absent → allow (annotation is optional) + // - If the annotation is present → its value must be in the allowlist + // + // Empty allowlist behavior: + // If the list is empty, any present annotation will fail membership, + // effectively acting as "deny-all". + ingressCEL = `request.kind.kind != "Ingress" || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` + + // ingressServiceCEL enforces proxy-group annotation rules for Services + // that are using the tailscale load balancer. + // + // Logic: + // + // - If the object is NOT a Service → allow + // - If Service does NOT use loadBalancerClass "tailscale" → allow + // (egress policy will handle those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // This makes ingress policy apply ONLY to tailscale Services. + ingressServiceCEL = `request.kind.kind != "Service" || !((has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || ("tailscale.com/expose" in object.metadata.annotations && object.metadata.annotations["tailscale.com/expose"] == "true")) || (!("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s])` + // egressCEL enforces proxy-group annotation rules for Services that + // are NOT using the tailscale load balancer. + // + // Logic: + // + // - If Service uses loadBalancerClass "tailscale" → allow + // (ingress policy handles those) + // - If Service uses "tailscale.com/expose" → allow + // (ingress policy handles those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // Empty allowlist behavior: + // Any present annotation is rejected ("deny-all"). + // + // This expression is mutually exclusive with ingressServiceCEL, + // preventing policy conflicts. + egressCEL = `((has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || ("tailscale.com/expose" in object.metadata.annotations && object.metadata.annotations["tailscale.com/expose"] == "true")) || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` +) + +func (r *Reconciler) generateIngressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-ingress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admr.Fail), + MatchConstraints: &admr.MatchResources{ + // The operator allows ingress via Ingress resources & Service resources (that use the "tailscale" load + // balancer class), so we have two resource rules here with multiple validation expressions that attempt + // to keep out of each other's way. + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{"networking.k8s.io"}, + APIVersions: []string{"*"}, + Resources: []string{"ingresses"}, + }, + }, + }, + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, ingressCEL), + generateValidation(names, ingressServiceCEL), + }, + }, + }, nil +} + +func (r *Reconciler) generateEgressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-egress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admr.Fail), + MatchConstraints: &admr.MatchResources{ + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, egressCEL), + }, + }, + }, nil +} + +const ( + denyMessage = `Annotation "tailscale.com/proxy-group" cannot be used on this resource in this namespace` + messageFormat = `If set, annotation "tailscale.com/proxy-group" must be one of [%s]` +) + +func generateValidation(names set.Set[string], format string) admr.Validation { + values := names.Slice() + + // We use a sort here so that the order of the proxy-group names are consistent + // across reconciliation loops. + sort.Strings(values) + + quoted := make([]string, len(values)) + for i, v := range values { + quoted[i] = strconv.Quote(v) + } + + joined := strings.Join(quoted, ",") + message := fmt.Sprintf(messageFormat, strings.Join(values, ", ")) + if len(values) == 0 { + message = denyMessage + } + + return admr.Validation{ + Expression: fmt.Sprintf(format, joined), + Message: message, + } +} + +func (r *Reconciler) generatePolicyBinding(ctx context.Context, namespace string, policy *admr.ValidatingAdmissionPolicy) (*admr.ValidatingAdmissionPolicyBinding, error) { + var binding admr.ValidatingAdmissionPolicyBinding + err := r.Get(ctx, client.ObjectKey{Name: policy.Name}, &binding) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicyBinding %q: %w", policy.Name, err) + } + + return &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: policy.Name, + ResourceVersion: binding.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicyBindingSpec{ + PolicyName: policy.Name, + ValidationActions: []admr.ValidationAction{ + admr.Deny, + }, + MatchResources: &admr.MatchResources{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": namespace, + }, + }, + }, + }, + }, nil +} diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go new file mode 100644 index 0000000000000..6710eac7406d6 --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go @@ -0,0 +1,217 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package proxygrouppolicy_test + +import ( + "slices" + "strings" + "testing" + + admr "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" +) + +func TestReconciler_Reconcile(t *testing.T) { + t.Parallel() + + tt := []struct { + Name string + Request reconcile.Request + ExpectedPolicyCount int + ExistingResources []client.Object + ExpectsError bool + }{ + { + Name: "single policy, denies all", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "multiple policies merged", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-one", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{ + "test-ingress", + }, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "no policies, no child resources", + ExpectedPolicyCount: 0, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.Name, func(t *testing.T) { + bldr := fake.NewClientBuilder().WithScheme(tsapi.GlobalScheme) + bldr = bldr.WithObjects(tc.ExistingResources...) + + fc := bldr.Build() + opts := proxygrouppolicy.ReconcilerOptions{ + Client: fc, + } + + reconciler := proxygrouppolicy.NewReconciler(opts) + _, err := reconciler.Reconcile(t.Context(), tc.Request) + if tc.ExpectsError && err == nil { + t.Fatalf("expected error, got none") + } + + if !tc.ExpectsError && err != nil { + t.Fatalf("expected no error, got %v", err) + } + + var policies admr.ValidatingAdmissionPolicyList + if err = fc.List(t.Context(), &policies); err != nil { + t.Fatal(err) + } + + if len(policies.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicy resources, got %d", tc.ExpectedPolicyCount, len(policies.Items)) + } + + var bindings admr.ValidatingAdmissionPolicyBindingList + if err = fc.List(t.Context(), &bindings); err != nil { + t.Fatal(err) + } + + if len(bindings.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicyBinding resources, got %d", tc.ExpectedPolicyCount, len(bindings.Items)) + } + + for _, binding := range bindings.Items { + actual, ok := binding.Spec.MatchResources.NamespaceSelector.MatchLabels["kubernetes.io/metadata.name"] + if !ok || actual != metav1.NamespaceDefault { + t.Fatalf("expected binding to be for default namespace, got %v", actual) + } + + if !slices.Contains(binding.Spec.ValidationActions, admr.Deny) { + t.Fatalf("expected binding to be deny, got %v", binding.Spec.ValidationActions) + } + } + + for _, policy := range policies.Items { + // Each ValidatingAdmissionPolicy must be set to fail (rejecting resources). + if policy.Spec.FailurePolicy == nil || *policy.Spec.FailurePolicy != admr.Fail { + t.Fatalf("expected fail policy, got %v", *policy.Spec.FailurePolicy) + } + + // Each ValidatingAdmissionPolicy must have a matching ValidatingAdmissionPolicyBinding + bound := slices.ContainsFunc(bindings.Items, func(obj admr.ValidatingAdmissionPolicyBinding) bool { + return obj.Spec.PolicyName == policy.Name + }) + if !bound { + t.Fatalf("expected policy %s to be bound, but wasn't", policy.Name) + } + + // Each ValidatingAdmissionPolicy must be set to evaluate on creation and update of resources. + for _, rule := range policy.Spec.MatchConstraints.ResourceRules { + if !slices.Contains(rule.Operations, admr.Update) { + t.Fatal("expected ingress rule to act on update, but doesn't") + } + + if !slices.Contains(rule.Operations, admr.Create) { + t.Fatal("expected ingress rule to act on create, but doesn't") + } + } + + // Egress policies should only act on Service resources. + if strings.Contains(policy.Name, "egress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 1 { + t.Fatalf("expected exactly one matching resource, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + rule := policy.Spec.MatchConstraints.ResourceRules[0] + + if !slices.Contains(rule.Resources, "services") { + t.Fatal("expected egress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 1 { + t.Fatalf("expected exactly one validation, got %d", len(policy.Spec.Validations)) + } + } + + // Ingress policies should act on both Ingress and Service resources. + if strings.Contains(policy.Name, "ingress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 2 { + t.Fatalf("expected exactly two matching resources, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + ingressRule := policy.Spec.MatchConstraints.ResourceRules[0] + if !slices.Contains(ingressRule.Resources, "ingresses") { + t.Fatal("expected ingress rule to act on ingresses, but doesn't") + } + + serviceRule := policy.Spec.MatchConstraints.ResourceRules[1] + if !slices.Contains(serviceRule.Resources, "services") { + t.Fatal("expected ingress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 2 { + t.Fatalf("expected exactly two validations, got %d", len(policy.Spec.Validations)) + } + } + } + }) + } +} From 299f1bf581886a6d9051d6ac60efc770db5321ab Mon Sep 17 00:00:00 2001 From: Harry Harpham Date: Thu, 12 Feb 2026 16:38:18 -0700 Subject: [PATCH 0988/1093] testcontrol: ensure Server.UpdateNode triggers netmap updates Updating a node on a testcontrol server should trigger netmap updates to all connected streaming clients. This was not the case previous to this change and consequently caused race conditions in tests. It was possible for a test to call UpdateNode and for connected nodes to never see the update propagate. Updates #16340 Fixes #18703 Signed-off-by: Harry Harpham --- tstest/integration/testcontrol/testcontrol.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 56664ba746204..1e24414903ae9 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -1075,9 +1075,7 @@ func sendUpdate(dst chan<- updateType, updateType updateType) bool { } } -func (s *Server) UpdateNode(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) { - s.mu.Lock() - defer s.mu.Unlock() +func (s *Server) updateNodeLocked(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) { if n.Key.IsZero() { panic("zero nodekey") } @@ -1085,6 +1083,15 @@ func (s *Server) UpdateNode(n *tailcfg.Node) (peersToUpdate []tailcfg.NodeID) { return s.nodeIDsLocked(n.ID) } +// UpdateNode updates or adds the input node, then triggers a netmap update for +// all attached streaming clients. +func (s *Server) UpdateNode(n *tailcfg.Node) { + s.mu.Lock() + defer s.mu.Unlock() + s.updateNodeLocked(n) + s.updateLocked("UpdateNode", s.nodeIDsLocked(0)) +} + func (s *Server) incrInServeMap(delta int) { s.mu.Lock() defer s.mu.Unlock() @@ -1143,7 +1150,9 @@ func (s *Server) serveMap(w http.ResponseWriter, r *http.Request, mkey key.Machi } } } - peersToUpdate = s.UpdateNode(node) + s.mu.Lock() + peersToUpdate = s.updateNodeLocked(node) + s.mu.Unlock() } nodeID := node.ID From 976aa940ec2b4600b1a6dc53364da8388df25ef6 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 18 Feb 2026 11:54:09 -0500 Subject: [PATCH 0989/1093] ipn/ipnlocal, cmd/tailscale: use wildcard. prefix for cert filenames (#18748) Stop stripping the "*." prefix from wildcard domains when used as storage keys. Instead, replace "*" with "wildcard_" only at the filesystem boundary in certFile and keyFile. This prevents wildcard and non-wildcard certs from colliding in storage. Updates #1196 Updates #7081 Signed-off-by: Fernando Serboncini --- cmd/tailscale/cli/cert.go | 5 +++-- ipn/ipnlocal/cert.go | 32 ++++++++++++++++---------------- ipn/ipnlocal/cert_test.go | 2 +- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index f38ddbacf1804..6d78a8d8abf5f 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -108,8 +108,9 @@ func runCert(ctx context.Context, args []string) error { log.SetFlags(0) } if certArgs.certFile == "" && certArgs.keyFile == "" { - certArgs.certFile = domain + ".crt" - certArgs.keyFile = domain + ".key" + fileBase := strings.Replace(domain, "*.", "wildcard_.", 1) + certArgs.certFile = fileBase + ".crt" + certArgs.keyFile = fileBase + ".key" } certPEM, keyPEM, err := localClient.CertPairWithValidity(ctx, domain, certArgs.minValidity) if err != nil { diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 027e7c810778b..efab9db7aad6e 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -130,8 +130,6 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if err != nil { return nil, err } - storageKey := strings.TrimPrefix(certDomain, "*.") - logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain)) now := b.clock.Now() traceACME := func(v any) { @@ -147,13 +145,13 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string return nil, err } - if pair, err := getCertPEMCached(cs, storageKey, now); err == nil { + if pair, err := getCertPEMCached(cs, certDomain, now); err == nil { if envknob.IsCertShareReadOnlyMode() { return pair, nil } // If we got here, we have a valid unexpired cert. // Check whether we should start an async renewal. - shouldRenew, err := b.shouldStartDomainRenewal(cs, storageKey, now, pair, minValidity) + shouldRenew, err := b.shouldStartDomainRenewal(cs, certDomain, now, pair, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) // Renewal check failed, but the current cert is valid and not @@ -501,8 +499,12 @@ func (kp TLSCertKeyPair) parseCertificate() (*x509.Certificate, error) { return x509.ParseCertificate(block.Bytes) } -func keyFile(dir, domain string) string { return filepath.Join(dir, domain+".key") } -func certFile(dir, domain string) string { return filepath.Join(dir, domain+".crt") } +func keyFile(dir, domain string) string { + return filepath.Join(dir, strings.Replace(domain, "*.", "wildcard_.", 1)+".key") +} +func certFile(dir, domain string) string { + return filepath.Join(dir, strings.Replace(domain, "*.", "wildcard_.", 1)+".crt") +} // getCertPEMCached returns a non-nil keyPair if a cached keypair for domain // exists on disk in dir that is valid at the provided now time. @@ -525,18 +527,16 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l acmeMu.Lock() defer acmeMu.Unlock() - // storageKey is used for file storage and renewal tracking. - // For wildcards, "*.node.ts.net" -> "node.ts.net" - storageKey, isWildcard := strings.CutPrefix(domain, "*.") + baseDomain, isWildcard := strings.CutPrefix(domain, "*.") // In case this method was triggered multiple times in parallel (when // serving incoming requests), check whether one of the other goroutines // already renewed the cert before us. - previous, err := getCertPEMCached(cs, storageKey, now) + previous, err := getCertPEMCached(cs, domain, now) if err == nil { // shouldStartDomainRenewal caches its result so it's OK to call this // frequently. - shouldRenew, err := b.shouldStartDomainRenewal(cs, storageKey, now, previous, minValidity) + shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, previous, minValidity) if err != nil { logf("error checking for certificate renewal: %v", err) } else if !shouldRenew { @@ -598,7 +598,7 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l if isWildcard { authzIDs = []acme.AuthzID{ {Type: "dns", Value: domain}, - {Type: "dns", Value: storageKey}, + {Type: "dns", Value: baseDomain}, } } else { authzIDs = []acme.AuthzID{{Type: "dns", Value: domain}} @@ -697,10 +697,10 @@ var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf l return nil, err } } - if err := cs.WriteTLSCertAndKey(storageKey, certPEM.Bytes(), privPEM.Bytes()); err != nil { + if err := cs.WriteTLSCertAndKey(domain, certPEM.Bytes(), privPEM.Bytes()); err != nil { return nil, err } - b.domainRenewed(storageKey) + b.domainRenewed(domain) return &TLSCertKeyPair{CertPEM: certPEM.Bytes(), KeyPEM: privPEM.Bytes()}, nil } @@ -924,7 +924,7 @@ func (b *LocalBackend) resolveCertDomain(domain string) (string, error) { return "", fmt.Errorf("wildcard certificates are not enabled for this node") } if !slices.Contains(certDomains, base) { - return "", fmt.Errorf("invalid domain %q; parent domain must be one of %q", domain, certDomains) + return "", fmt.Errorf("invalid domain %q; wildcard certificates are not enabled for this domain", domain) } return domain, nil } @@ -951,7 +951,7 @@ func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Requ return } - domain := strings.TrimPrefix(r.FormValue("domain"), "*.") + domain := r.FormValue("domain") if domain == "" { http.Error(w, "no 'domain'", http.StatusBadRequest) return diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index b8acb710ac7d3..cc9146ae1e055 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -139,7 +139,7 @@ func TestResolveCertDomain(t *testing.T) { domain: "*.unrelated.ts.net", certDomains: []string{"node.ts.net"}, hasCap: true, - wantErr: `invalid domain "*.unrelated.ts.net"; parent domain must be one of ["node.ts.net"]`, + wantErr: `invalid domain "*.unrelated.ts.net"; wildcard certificates are not enabled for this domain`, }, { name: "subdomain_unrelated_rejected", From 9acf22f9dfd993acd23d33003e05bc68124f9187 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 18 Feb 2026 14:12:16 -0500 Subject: [PATCH 0990/1093] netmon: use State AnyInterfaceUp in ChangeDelta (#18752) fixes tailscale/corp#37048 We're duplicating logic in AnyInterfaceUp in the ChangeDelta and we're duplicating it wrong. The new State has the logic for this based on the HaveV6 and HaveV4 flags. Signed-off-by: Jonathan Nobels --- net/netmon/netmon.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index c30010ee407da..1d51379d86e31 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -201,12 +201,7 @@ func (cd *ChangeDelta) AnyInterfaceUp() bool { if cd.new == nil { return false } - for _, ifi := range cd.new.Interface { - if ifi.IsUp() { - return true - } - } - return false + return cd.new.AnyInterfaceUp() } // isInterestingInterfaceChange reports whether any interfaces have changed in a meaningful way. From 7fb61e176575ce0e5c148f01b09105ea4c661429 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 19 Feb 2026 16:06:12 +0000 Subject: [PATCH 0991/1093] cmd/cigocacher: make --stats flag best-effort (#18761) --auth is already best-effort, but we saw some CI failures due to failing to fetch stats when cigocached was overwhelmed recently. Make sure it fails more gracefully in the absence of cigocached like the rest of cigocacher already does. Updates tailscale/corp#37059 Change-Id: I0703b30b1c5a7f8c649879a87e6bcd2278610208 Signed-off-by: Tom Proctor --- cmd/cigocacher/cigocacher.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/cigocacher/cigocacher.go b/cmd/cigocacher/cigocacher.go index 1e4326ebcb6be..74ed083679743 100644 --- a/cmd/cigocacher/cigocacher.go +++ b/cmd/cigocacher/cigocacher.go @@ -103,9 +103,19 @@ func main() { } stats, err := fetchStats(httpClient(srvHost, *srvHostDial), *srvURL, tk) if err != nil { - log.Fatalf("error fetching gocached stats: %v", err) + // Errors that are not due to misconfiguration are non-fatal so we + // don't fail builds if e.g. cigocached is down. + // + // Print error as JSON so it can still be piped through jq. + statsErr := map[string]any{ + "error": fmt.Sprintf("fetching gocached stats: %v", err), + } + b, _ := jsonv1.Marshal(statsErr) + fmt.Println(string(b)) + } else { + fmt.Println(stats) } - fmt.Println(stats) + return } From c208ba2615bd4df47fb7dada25dc6bd28a94358d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 09:31:50 -0800 Subject: [PATCH 0992/1093] .github: Bump actions/setup-go from 5.5.0 to 6.2.0 (#18455) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.5.0 to 6.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v5.5.0...7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: 6.2.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e66d6454a9847..9363facaa2aa2 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 684a094e26560..22d9d3c467ad9 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -29,7 +29,7 @@ jobs: steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 + - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 with: go-version-file: go.mod cache: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cdf8f3f5f69d1..57a638d2977da 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -245,7 +245,7 @@ jobs: path: ${{ github.workspace }}/src - name: Install Go - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 with: go-version-file: ${{ github.workspace }}/src/go.mod cache: false From 6e76db73a9830ac414b9e69195d30ffe62d8293e Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Thu, 19 Feb 2026 10:01:33 -0800 Subject: [PATCH 0993/1093] go.mod: bump filippo.io/edwards25519 (#18765) Pick up a fix for CVE-2026-26958. Fixes #18756 Signed-off-by: Andrew Lytvynov --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 3 ++- shell.nix | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/flake.nix b/flake.nix index 6fc0ff28a906f..af860c09ce627 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-JD1PZPZT5clhRWIAQO8skBRN59QPiyfTc7nPYTvGbd8= +# nix-direnv cache busting line: sha256-A3mjdGE6B5t6sdkHieZZGVYlCyvhdrcqpNaHxISAPuk= diff --git a/go.mod b/go.mod index 7b062afbf4ccf..468b085a180a0 100644 --- a/go.mod +++ b/go.mod @@ -270,7 +270,7 @@ require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect dario.cat/mergo v1.0.1 // indirect - filippo.io/edwards25519 v1.1.0 // indirect + filippo.io/edwards25519 v1.2.0 // indirect github.com/Abirdcfly/dupword v0.0.14 // indirect github.com/AlekSi/pointer v1.2.0 github.com/Antonboom/errname v0.1.12 // indirect diff --git a/go.mod.sri b/go.mod.sri index e5d18033ae976..7cab0c4024e7e 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-JD1PZPZT5clhRWIAQO8skBRN59QPiyfTc7nPYTvGbd8= +sha256-A3mjdGE6B5t6sdkHieZZGVYlCyvhdrcqpNaHxISAPuk= diff --git a/go.sum b/go.sum index 299fe95cd84ad..10febf73a81b8 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,9 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/edwards25519 v1.2.0 h1:crnVqOiS4jqYleHd9vaKZ+HKtHfllngJIiOpNpoJsjo= +filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc= filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc= filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA= fyne.io/systray v1.11.1-0.20250812065214-4856ac3adc3c h1:km4PIleGtbbF1oxmFQuO93CyNCldwuRTPB8WlzNWNZs= diff --git a/shell.nix b/shell.nix index 9fab641722dca..652e41a94469b 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-JD1PZPZT5clhRWIAQO8skBRN59QPiyfTc7nPYTvGbd8= +# nix-direnv cache busting line: sha256-A3mjdGE6B5t6sdkHieZZGVYlCyvhdrcqpNaHxISAPuk= From f1509d27cc1ed6c02b8e69f183ad478c135b729f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 10:22:22 -0800 Subject: [PATCH 0994/1093] build(deps): bump lodash from 4.17.21 to 4.17.23 in /client/web (#18476) Bumps [lodash](https://github.com/lodash/lodash) from 4.17.21 to 4.17.23. - [Release notes](https://github.com/lodash/lodash/releases) - [Commits](https://github.com/lodash/lodash/compare/4.17.21...4.17.23) --- updated-dependencies: - dependency-name: lodash dependency-version: 4.17.23 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- client/web/yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/web/yarn.lock b/client/web/yarn.lock index e8e5f5bb66450..106a104b98d26 100644 --- a/client/web/yarn.lock +++ b/client/web/yarn.lock @@ -4088,9 +4088,9 @@ lodash.merge@^4.6.2: integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== lodash@^4.17.21: - version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" - integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + version "4.17.23" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.23.tgz#f113b0378386103be4f6893388c73d0bde7f2c5a" + integrity sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w== loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" From c0446aa4e17d288085e98503cadb200e15018dc0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 10:56:39 -0800 Subject: [PATCH 0995/1093] .github: Bump DeterminateSystems/nix-installer-action from 20 to 21 (#18453) Bumps [DeterminateSystems/nix-installer-action](https://github.com/determinatesystems/nix-installer-action) from 20 to 21. - [Release notes](https://github.com/determinatesystems/nix-installer-action/releases) - [Commits](https://github.com/determinatesystems/nix-installer-action/compare/786fff0690178f1234e4e1fe9b536e94f5433196...c5a866b6ab867e88becbed4467b93592bce69f8a) --- updated-dependencies: - dependency-name: DeterminateSystems/nix-installer-action dependency-version: '21' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/flakehub-publish-tagged.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 8b3f44338026a..798e1708a1c2a 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - - uses: DeterminateSystems/nix-installer-action@786fff0690178f1234e4e1fe9b536e94f5433196 # v20 + - uses: DeterminateSystems/nix-installer-action@c5a866b6ab867e88becbed4467b93592bce69f8a # v21 - uses: DeterminateSystems/flakehub-push@71f57208810a5d299fc6545350981de98fdbc860 # v6 with: visibility: "public" From f5d1202988556d52c4a07a1083e36a2171a061b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 11:14:58 -0800 Subject: [PATCH 0996/1093] build(deps): bump postcss from 8.4.14 to 8.4.31 in /cmd/tsconnect (#9698) Bumps [postcss](https://github.com/postcss/postcss) from 8.4.14 to 8.4.31. - [Release notes](https://github.com/postcss/postcss/releases) - [Changelog](https://github.com/postcss/postcss/blob/main/CHANGELOG.md) - [Commits](https://github.com/postcss/postcss/compare/8.4.14...8.4.31) --- updated-dependencies: - dependency-name: postcss dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmd/tsconnect/yarn.lock | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/cmd/tsconnect/yarn.lock b/cmd/tsconnect/yarn.lock index d9d9db32f66a0..46d86c1ee4df1 100644 --- a/cmd/tsconnect/yarn.lock +++ b/cmd/tsconnect/yarn.lock @@ -348,10 +348,10 @@ minimist@^1.2.6: resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.6.tgz#8637a5b759ea0d6e98702cfb3a9283323c93af44" integrity sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q== -nanoid@^3.3.4: - version "3.3.8" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" - integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== +nanoid@^3.3.11: + version "3.3.11" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" @@ -397,6 +397,11 @@ picocolors@^1.0.0: resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== +picocolors@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== + picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" @@ -457,13 +462,13 @@ postcss-value-parser@^4.0.0, postcss-value-parser@^4.2.0: integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== postcss@^8.4.14: - version "8.4.14" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.14.tgz#ee9274d5622b4858c1007a74d76e42e56fd21caf" - integrity sha512-E398TUmfAYFPBSdzgeieK2Y1+1cpdxJx8yXbK/m57nRhKSmk1GB2tO4lbLBtlkfPQTDKfe4Xqv1ASWPpayPEig== + version "8.5.6" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.5.6.tgz#2825006615a619b4f62a9e7426cc120b349a8f3c" + integrity sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg== dependencies: - nanoid "^3.3.4" - picocolors "^1.0.0" - source-map-js "^1.0.2" + nanoid "^3.3.11" + picocolors "^1.1.1" + source-map-js "^1.2.1" preact@^10.10.0: version "10.10.0" @@ -540,10 +545,10 @@ set-blocking@^2.0.0: resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" integrity sha1-BF+XgtARrppoA93TgrJDkrPYkPc= -source-map-js@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" - integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== +source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" From 03d0f6c356886e4ab4e7f0bc9a5a8c0d270f97a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 11:22:09 -0800 Subject: [PATCH 0997/1093] build(deps): bump github.com/go-git/go-git/v5 from 5.13.1 to 5.16.5 (#18667) Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.13.1 to 5.16.5. - [Release notes](https://github.com/go-git/go-git/releases) - [Commits](https://github.com/go-git/go-git/compare/v5.13.1...v5.16.5) --- updated-dependencies: - dependency-name: github.com/go-git/go-git/v5 dependency-version: 5.16.5 dependency-type: indirect ... Signed-off-by: dependabot[bot] Signed-off-by: Andrew Lytvynov Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- flake.nix | 2 +- go.mod | 8 ++++---- go.mod.sri | 2 +- go.sum | 20 ++++++++++---------- shell.nix | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/flake.nix b/flake.nix index af860c09ce627..726d9b7762284 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-A3mjdGE6B5t6sdkHieZZGVYlCyvhdrcqpNaHxISAPuk= +# nix-direnv cache busting line: sha256-8J1iLhnLzrLrh6MFLcCyO+iYT0jjczxNDW3O6a6f+xM= diff --git a/go.mod b/go.mod index 468b085a180a0..7d9684a129920 100644 --- a/go.mod +++ b/go.mod @@ -281,7 +281,7 @@ require ( github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect - github.com/ProtonMail/go-crypto v1.1.3 // indirect + github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect @@ -334,7 +334,7 @@ require ( github.com/go-critic/go-critic v0.11.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect - github.com/go-git/go-git/v5 v5.13.1 // indirect + github.com/go-git/go-git/v5 v5.16.5 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect @@ -420,7 +420,7 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.0 // indirect github.com/pierrec/lz4/v4 v4.1.25 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.4.8 // indirect @@ -444,7 +444,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.7.1 // indirect - github.com/skeema/knownhosts v1.3.0 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect diff --git a/go.mod.sri b/go.mod.sri index 7cab0c4024e7e..b92d32a95a925 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-A3mjdGE6B5t6sdkHieZZGVYlCyvhdrcqpNaHxISAPuk= +sha256-8J1iLhnLzrLrh6MFLcCyO+iYT0jjczxNDW3O6a6f+xM= diff --git a/go.sum b/go.sum index 10febf73a81b8..ca3d0ca21abe0 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= -github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= -github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k= github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw= github.com/ProtonMail/gopenpgp/v2 v2.7.1 h1:Awsg7MPc2gD3I7IFac2qE3Gdls0lZW8SzrFZ3k1oz0s= @@ -337,8 +337,8 @@ github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= github.com/elastic/crd-ref-docs v0.0.12 h1:F3seyncbzUz3rT3d+caeYWhumb5ojYQ6Bl0Z+zOp16M= github.com/elastic/crd-ref-docs v0.0.12/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U= -github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= -github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= @@ -398,8 +398,8 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= +github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -940,8 +940,8 @@ github.com/pierrec/lz4/v4 v4.1.25 h1:kocOqRffaIbU5djlIBr7Wh+cx82C0vtFb0fOurZHqD0 github.com/pierrec/lz4/v4 v4.1.25/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaASJgp0= github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1063,8 +1063,8 @@ github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+W github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= -github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= diff --git a/shell.nix b/shell.nix index 652e41a94469b..b5c3b82cae751 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-A3mjdGE6B5t6sdkHieZZGVYlCyvhdrcqpNaHxISAPuk= +# nix-direnv cache busting line: sha256-8J1iLhnLzrLrh6MFLcCyO+iYT0jjczxNDW3O6a6f+xM= From 03247a35d5e3d811a547214f0b336a999d603743 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 11:22:34 -0800 Subject: [PATCH 0998/1093] .github: Bump actions/create-github-app-token from 2.0.6 to 2.2.1 (#18388) Bumps [actions/create-github-app-token](https://github.com/actions/create-github-app-token) from 2.0.6 to 2.2.1. - [Release notes](https://github.com/actions/create-github-app-token/releases) - [Commits](https://github.com/actions/create-github-app-token/compare/df432ceedc7162793a195dd1713ff69aefc7379e...29824e69f54612133e76f7eaac726eef6c875baf) --- updated-dependencies: - dependency-name: actions/create-github-app-token dependency-version: 2.2.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/request-dataplane-review.yml | 2 +- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 7ca3b98022ce7..2e30ba06d4629 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -18,7 +18,7 @@ jobs: - name: Check out code uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Get access token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 id: generate-token with: # Get token for app: https://github.com/apps/change-visibility-bot diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 69c954384e9bc..0c40758543458 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -27,7 +27,7 @@ jobs: run: ./update-flake.sh - name: Get access token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 id: generate-token with: # Get token for app: https://github.com/apps/tailscale-code-updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index c302e4f2091ca..2f4f676c5d354 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -23,7 +23,7 @@ jobs: ./tool/go mod tidy - name: Get access token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 + uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 id: generate-token with: # Get token for app: https://github.com/apps/tailscale-code-updater From 9e31a68547eb3d729c11b3a7efaca48cafba5104 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 11:35:59 -0800 Subject: [PATCH 0999/1093] build(deps): bump micromatch from 4.0.5 to 4.0.8 in /cmd/tsconnect (#13335) Bumps [micromatch](https://github.com/micromatch/micromatch) from 4.0.5 to 4.0.8. - [Release notes](https://github.com/micromatch/micromatch/releases) - [Changelog](https://github.com/micromatch/micromatch/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/micromatch/compare/4.0.5...4.0.8) --- updated-dependencies: - dependency-name: micromatch dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- cmd/tsconnect/yarn.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/tsconnect/yarn.lock b/cmd/tsconnect/yarn.lock index 46d86c1ee4df1..5ab282dcb2ead 100644 --- a/cmd/tsconnect/yarn.lock +++ b/cmd/tsconnect/yarn.lock @@ -89,7 +89,7 @@ binary-extensions@^2.0.0: resolved "https://registry.yarnpkg.com/binary-extensions/-/binary-extensions-2.2.0.tgz#75f502eeaf9ffde42fc98829645be4ea76bd9e2d" integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA== -braces@^3.0.2, braces@~3.0.2: +braces@^3.0.3, braces@~3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== @@ -336,11 +336,11 @@ merge2@^1.3.0: integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== micromatch@^4.0.4: - version "4.0.5" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" - integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + version "4.0.8" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== dependencies: - braces "^3.0.2" + braces "^3.0.3" picomatch "^2.3.1" minimist@^1.2.6: From c38d1badba578e41da4c10d3b4d2e2da61326950 Mon Sep 17 00:00:00 2001 From: Amal Bansode Date: Thu, 19 Feb 2026 11:39:16 -0800 Subject: [PATCH 1000/1093] cmd/tailscale/cli: add bind-address and bind-port flags to netcheck command (#18621) Add more explicit `--bind-address` and `--bind-port` flags to the `tailscale netcheck` CLI to give users control over UDP probes' source IP and UDP port. This was already supported in a less documented manner via the` TS_DEBUG_NETCHECK_UDP_BIND` environment variable. The environment variable reference is preserved and used as a fallback value in the absence of these new CLI flags. Updates tailscale/corp#36833 Signed-off-by: Amal Bansode --- cmd/tailscale/cli/netcheck.go | 87 ++++++++++++++++++++--- cmd/tailscale/cli/netcheck_test.go | 108 +++++++++++++++++++++++++++++ 2 files changed, 184 insertions(+), 11 deletions(-) create mode 100644 cmd/tailscale/cli/netcheck_test.go diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index c9cbce29a32cf..5e45445c79cd5 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -10,7 +10,9 @@ import ( "fmt" "io" "log" + "math" "net/http" + "net/netip" "sort" "strings" "time" @@ -26,6 +28,7 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + "tailscale.com/util/set" // The "netcheck" command also wants the portmapper linked. // @@ -41,19 +44,25 @@ var netcheckCmd = &ffcli.Command{ ShortUsage: "tailscale netcheck", ShortHelp: "Print an analysis of local network conditions", Exec: runNetcheck, - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("netcheck") - fs.StringVar(&netcheckArgs.format, "format", "", `output format; empty (for human-readable), "json" or "json-line"`) - fs.DurationVar(&netcheckArgs.every, "every", 0, "if non-zero, do an incremental report with the given frequency") - fs.BoolVar(&netcheckArgs.verbose, "verbose", false, "verbose logs") - return fs - })(), + FlagSet: netcheckFlagSet, } +var netcheckFlagSet = func() *flag.FlagSet { + fs := newFlagSet("netcheck") + fs.StringVar(&netcheckArgs.format, "format", "", `output format; empty (for human-readable), "json" or "json-line"`) + fs.DurationVar(&netcheckArgs.every, "every", 0, "if non-zero, do an incremental report with the given frequency") + fs.BoolVar(&netcheckArgs.verbose, "verbose", false, "verbose logs") + fs.StringVar(&netcheckArgs.bindAddress, "bind-address", "", "send and receive connectivity probes using this locally bound IP address; default: OS-assigned") + fs.IntVar(&netcheckArgs.bindPort, "bind-port", 0, "send and receive connectivity probes using this UDP port; default: OS-assigned") + return fs +}() + var netcheckArgs struct { - format string - every time.Duration - verbose bool + format string + every time.Duration + verbose bool + bindAddress string + bindPort int } func runNetcheck(ctx context.Context, args []string) error { @@ -73,6 +82,11 @@ func runNetcheck(ctx context.Context, args []string) error { defer pm.Close() } + flagsProvided := set.Set[string]{} + netcheckFlagSet.Visit(func(f *flag.Flag) { + flagsProvided.Add(f.Name) + }) + c := &netcheck.Client{ NetMon: netMon, PortMapper: pm, @@ -89,7 +103,17 @@ func runNetcheck(ctx context.Context, args []string) error { fmt.Fprintln(Stderr, "# Warning: this JSON format is not yet considered a stable interface") } - if err := c.Standalone(ctx, envknob.String("TS_DEBUG_NETCHECK_UDP_BIND")); err != nil { + bind, err := createNetcheckBindString( + netcheckArgs.bindAddress, + flagsProvided.Contains("bind-address"), + netcheckArgs.bindPort, + flagsProvided.Contains("bind-port"), + envknob.String("TS_DEBUG_NETCHECK_UDP_BIND")) + if err != nil { + return err + } + + if err := c.Standalone(ctx, bind); err != nil { fmt.Fprintln(Stderr, "netcheck: UDP test failure:", err) } @@ -265,3 +289,44 @@ func prodDERPMap(ctx context.Context, httpc *http.Client) (*tailcfg.DERPMap, err } return &derpMap, nil } + +// createNetcheckBindString determines the netcheck socket bind "address:port" string based +// on the CLI args and environment variable values used to invoke the netcheck CLI. +// Arguments cliAddressIsSet and cliPortIsSet explicitly indicate whether the +// corresponding cliAddress and cliPort were set in CLI args, instead of relying +// on in-band sentinel values. +func createNetcheckBindString(cliAddress string, cliAddressIsSet bool, cliPort int, cliPortIsSet bool, envBind string) (string, error) { + // Default to port number 0 but overwrite with a valid CLI value, if set. + var port uint16 = 0 + if cliPortIsSet { + // 0 is valid, results in OS picking port. + if cliPort >= 0 && cliPort <= math.MaxUint16 { + port = uint16(cliPort) + } else { + return "", fmt.Errorf("invalid bind port number: %d", cliPort) + } + } + + // Use CLI address, if set. + if cliAddressIsSet { + addr, err := netip.ParseAddr(cliAddress) + if err != nil { + return "", fmt.Errorf("invalid bind address: %q", cliAddress) + } + return netip.AddrPortFrom(addr, port).String(), nil + } else { + // No CLI address set, but port is set. + if cliPortIsSet { + return fmt.Sprintf(":%d", port), nil + } + } + + // Fall back to the environment variable. + // Intentionally skipping input validation here to avoid breaking legacy usage method. + if envBind != "" { + return envBind, nil + } + + // OS picks both address and port. + return ":0", nil +} diff --git a/cmd/tailscale/cli/netcheck_test.go b/cmd/tailscale/cli/netcheck_test.go new file mode 100644 index 0000000000000..b2c2bceb39dc9 --- /dev/null +++ b/cmd/tailscale/cli/netcheck_test.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "testing" +) + +func TestCreateBindStr(t *testing.T) { + // Test all combinations of CLI arg address, CLI arg port, and env var string + // as inputs to create netcheck bind string. + tests := []struct { + name string + cliAddress string + cliAddressIsSet bool + cliPort int + cliPortIsSet bool + envBind string + want string + wantError string + }{ + { + name: "noAddr-noPort-noEnv", + want: ":0", + }, + { + name: "yesAddrv4-noPort-noEnv", + cliAddress: "100.123.123.123", + cliAddressIsSet: true, + want: "100.123.123.123:0", + }, + { + name: "yesAddrv6-noPort-noEnv", + cliAddress: "dead::beef", + cliAddressIsSet: true, + want: "[dead::beef]:0", + }, + { + name: "yesAddr-yesPort-noEnv", + cliAddress: "100.123.123.123", + cliAddressIsSet: true, + cliPort: 456, + cliPortIsSet: true, + want: "100.123.123.123:456", + }, + { + name: "yesAddr-yesPort-yesEnv", + cliAddress: "100.123.123.123", + cliAddressIsSet: true, + cliPort: 456, + cliPortIsSet: true, + envBind: "55.55.55.55:789", + want: "100.123.123.123:456", + }, + { + name: "noAddr-yesPort-noEnv", + cliPort: 456, + cliPortIsSet: true, + want: ":456", + }, + { + name: "noAddr-yesPort-yesEnv", + cliPort: 456, + cliPortIsSet: true, + envBind: "55.55.55.55:789", + want: ":456", + }, + { + name: "noAddr-noPort-yesEnv", + envBind: "55.55.55.55:789", + want: "55.55.55.55:789", + }, + { + name: "badAddr-noPort-noEnv-1", + cliAddress: "678.678.678.678", + cliAddressIsSet: true, + wantError: `invalid bind address: "678.678.678.678"`, + }, + { + name: "badAddr-noPort-noEnv-2", + cliAddress: "lorem ipsum", + cliAddressIsSet: true, + wantError: `invalid bind address: "lorem ipsum"`, + }, + { + name: "noAddr-badPort-noEnv", + cliPort: -1, + cliPortIsSet: true, + wantError: "invalid bind port number: -1", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, gotErr := createNetcheckBindString(tt.cliAddress, tt.cliAddressIsSet, tt.cliPort, tt.cliPortIsSet, tt.envBind) + var gotErrStr string + if gotErr != nil { + gotErrStr = gotErr.Error() + } + if gotErrStr != tt.wantError { + t.Errorf("got error %q; want error %q", gotErrStr, tt.wantError) + } + if got != tt.want { + t.Errorf("got result %q; want result %q", got, tt.want) + } + }) + } +} From d9d95db0bbde36b57099fa684e439c2cfbf3b20b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 11:51:42 -0800 Subject: [PATCH 1001/1093] build(deps): bump github.com/go-viper/mapstructure/v2 (#16914) Bumps [github.com/go-viper/mapstructure/v2](https://github.com/go-viper/mapstructure) from 2.0.0-alpha.1 to 2.4.0. - [Release notes](https://github.com/go-viper/mapstructure/releases) - [Changelog](https://github.com/go-viper/mapstructure/blob/main/CHANGELOG.md) - [Commits](https://github.com/go-viper/mapstructure/compare/v2.0.0-alpha.1...v2.4.0) --- updated-dependencies: - dependency-name: github.com/go-viper/mapstructure/v2 dependency-version: 2.4.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index 726d9b7762284..37c971f1111ce 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-8J1iLhnLzrLrh6MFLcCyO+iYT0jjczxNDW3O6a6f+xM= +# nix-direnv cache busting line: sha256-phgPg9fDR/rTJaVItwxAaqNCUR3CAkTVBxnuRRt3Kts= diff --git a/go.mod b/go.mod index 7d9684a129920..507275ba1595e 100644 --- a/go.mod +++ b/go.mod @@ -180,7 +180,7 @@ require ( github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/goccy/go-yaml v1.12.0 // indirect github.com/gokrazy/gokapi v0.0.0-20250222071133-506fdb322775 // indirect diff --git a/go.mod.sri b/go.mod.sri index b92d32a95a925..a587699c5e39c 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-8J1iLhnLzrLrh6MFLcCyO+iYT0jjczxNDW3O6a6f+xM= +sha256-phgPg9fDR/rTJaVItwxAaqNCUR3CAkTVBxnuRRt3Kts= diff --git a/go.sum b/go.sum index ca3d0ca21abe0..18db5ef22d475 100644 --- a/go.sum +++ b/go.sum @@ -461,8 +461,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo= diff --git a/shell.nix b/shell.nix index b5c3b82cae751..fa19d968228ef 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-8J1iLhnLzrLrh6MFLcCyO+iYT0jjczxNDW3O6a6f+xM= +# nix-direnv cache busting line: sha256-phgPg9fDR/rTJaVItwxAaqNCUR3CAkTVBxnuRRt3Kts= From 2a60d0a007ea5803e6576b05367a207586993441 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 11:54:06 -0800 Subject: [PATCH 1002/1093] .github: Bump github/codeql-action from 3.29.8 to 4.31.10 (#18454) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.29.8 to 4.31.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/76621b61decf072c1cee8dd1ce2d2a82d33c17ed...cdefb33c0f6224e58673d9004f47f7cb3e328b89) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.31.10 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 9363facaa2aa2..39133dc40c3dd 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 + uses: github/codeql-action/init@9e907b5e64f6b83e7804b09294d44122997950d6 # v4.32.3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 + uses: github/codeql-action/autobuild@9e907b5e64f6b83e7804b09294d44122997950d6 # v4.32.3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@76621b61decf072c1cee8dd1ce2d2a82d33c17ed # v3.29.5 + uses: github/codeql-action/analyze@9e907b5e64f6b83e7804b09294d44122997950d6 # v4.32.3 From cae54e204640b785a4a95619f714ee441997aa3b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Feb 2026 13:05:15 -0800 Subject: [PATCH 1003/1093] build(deps): bump github.com/docker/docker (#13081) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 26.1.4+incompatible to 26.1.5+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v26.1.4...v26.1.5) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- flake.nix | 2 +- go.mod | 15 ++++++++------- go.mod.sri | 2 +- go.sum | 34 ++++++++++++++++++++-------------- shell.nix | 2 +- 5 files changed, 31 insertions(+), 24 deletions(-) diff --git a/flake.nix b/flake.nix index 37c971f1111ce..e15eeca6a664f 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-phgPg9fDR/rTJaVItwxAaqNCUR3CAkTVBxnuRRt3Kts= +# nix-direnv cache busting line: sha256-4orp8iQekVbhCFpt7DXLvj6dediKxo1qkWr1oe7+RaE= diff --git a/go.mod b/go.mod index 507275ba1595e..80b453cd5a9f7 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/golangci/golangci-lint v1.57.1 github.com/google/go-cmp v0.7.0 - github.com/google/go-containerregistry v0.20.3 + github.com/google/go-containerregistry v0.20.7 github.com/google/go-tpm v0.9.4 github.com/google/gopacket v1.1.19 github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 @@ -114,7 +114,7 @@ require ( golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/mod v0.30.0 golang.org/x/net v0.48.0 - golang.org/x/oauth2 v0.32.0 + golang.org/x/oauth2 v0.33.0 golang.org/x/sync v0.19.0 golang.org/x/sys v0.40.0 golang.org/x/term v0.38.0 @@ -164,6 +164,7 @@ require ( github.com/ckaznocha/intrange v0.1.0 // indirect github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v1.0.0-rc.2 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect @@ -313,15 +314,15 @@ require ( github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/daixiang0/gci v0.12.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect - github.com/docker/cli v27.5.1+incompatible // indirect + github.com/docker/cli v29.0.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v27.5.1+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker v28.5.2+incompatible // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/ettle/strcase v0.2.0 // indirect @@ -471,7 +472,7 @@ require ( github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.1.0 // indirect github.com/uudashr/gocognit v1.1.2 // indirect - github.com/vbatts/tar-split v0.11.6 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/yagipy/maintidx v1.0.0 // indirect diff --git a/go.mod.sri b/go.mod.sri index a587699c5e39c..feea9b11b1ab0 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-phgPg9fDR/rTJaVItwxAaqNCUR3CAkTVBxnuRRt3Kts= +sha256-4orp8iQekVbhCFpt7DXLvj6dediKxo1qkWr1oe7+RaE= diff --git a/go.sum b/go.sum index 18db5ef22d475..ab4f3303623c8 100644 --- a/go.sum +++ b/go.sum @@ -262,12 +262,14 @@ github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9 github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6acgLGv/QzE4= github.com/containerd/platforms v1.0.0-rc.2/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= @@ -317,14 +319,14 @@ github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= -github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20250808211157-605354379745 h1:yOn6Ze6IbYI/KAw2lw/83ELYvZh6hvsygTVkD0dzMC4= @@ -562,8 +564,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= -github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -871,6 +873,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1189,8 +1195,8 @@ github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZ github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350 h1:w5OI+kArIBVksl8UGn6ARQshtPCQvDsbuA9NQie3GIg= github.com/vishvananda/netlink v1.3.1-0.20240922070040-084abd93d350/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -1409,8 +1415,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= -golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/shell.nix b/shell.nix index fa19d968228ef..07d3c1ad53bad 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-phgPg9fDR/rTJaVItwxAaqNCUR3CAkTVBxnuRRt3Kts= +# nix-direnv cache busting line: sha256-4orp8iQekVbhCFpt7DXLvj6dediKxo1qkWr1oe7+RaE= From 3b737edbf182631f380e6e0e2663de6df73b4bba Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 28 Jan 2026 14:07:08 -0800 Subject: [PATCH 1004/1093] appc,feature/conn25,net: Add DNS response interception for conn25 The new version of app connector (conn25) needs to read DNS responses for domains it is interested in and store and swap out IP addresses. Add a hook to dns manager to enable this. Give the conn25 updated netmaps so that it knows when to assign connecting addresses and from what pool. Assign an address when we see a DNS response for a domain we are interested in, but don't do anything with the address yet. Updates tailscale/corp#34252 Signed-off-by: Fran Bull --- appc/conn25.go | 101 ----- appc/conn25_test.go | 178 --------- cmd/derper/depaware.txt | 2 +- cmd/tailscale/depaware.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- feature/conn25/conn25.go | 471 ++++++++++++++++++++++- feature/conn25/conn25_test.go | 490 ++++++++++++++++++++++++ {appc => feature/conn25}/ippool.go | 2 +- {appc => feature/conn25}/ippool_test.go | 2 +- net/dns/manager.go | 27 +- types/appctype/appconnector.go | 15 + 11 files changed, 1000 insertions(+), 292 deletions(-) create mode 100644 feature/conn25/conn25_test.go rename {appc => feature/conn25}/ippool.go (99%) rename {appc => feature/conn25}/ippool_test.go (98%) diff --git a/appc/conn25.go b/appc/conn25.go index 08ca651fda7e9..08b2a1ade6826 100644 --- a/appc/conn25.go +++ b/appc/conn25.go @@ -5,9 +5,7 @@ package appc import ( "cmp" - "net/netip" "slices" - "sync" "tailscale.com/tailcfg" "tailscale.com/types/appctype" @@ -15,105 +13,6 @@ import ( "tailscale.com/util/set" ) -// Conn25 holds the developing state for the as yet nascent next generation app connector. -// There is currently (2025-12-08) no actual app connecting functionality. -type Conn25 struct { - mu sync.Mutex - transitIPs map[tailcfg.NodeID]map[netip.Addr]netip.Addr -} - -const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest" - -// HandleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest. -// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID). -// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer. -func (c *Conn25) HandleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse { - resp := ConnectorTransitIPResponse{} - seen := map[netip.Addr]bool{} - for _, each := range ctipr.TransitIPs { - if seen[each.TransitIP] { - resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{ - Code: OtherFailure, - Message: dupeTransitIPMessage, - }) - continue - } - tipresp := c.handleTransitIPRequest(nid, each) - seen[each.TransitIP] = true - resp.TransitIPs = append(resp.TransitIPs, tipresp) - } - return resp -} - -func (c *Conn25) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse { - c.mu.Lock() - defer c.mu.Unlock() - if c.transitIPs == nil { - c.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]netip.Addr) - } - peerMap, ok := c.transitIPs[nid] - if !ok { - peerMap = make(map[netip.Addr]netip.Addr) - c.transitIPs[nid] = peerMap - } - peerMap[tipr.TransitIP] = tipr.DestinationIP - return TransitIPResponse{} -} - -func (c *Conn25) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr { - c.mu.Lock() - defer c.mu.Unlock() - return c.transitIPs[nid][tip] -} - -// TransitIPRequest details a single TransitIP allocation request from a client to a -// connector. -type TransitIPRequest struct { - // TransitIP is the intermediate destination IP that will be received at this - // connector and will be replaced by DestinationIP when performing DNAT. - TransitIP netip.Addr `json:"transitIP,omitzero"` - - // DestinationIP is the final destination IP that connections to the TransitIP - // should be mapped to when performing DNAT. - DestinationIP netip.Addr `json:"destinationIP,omitzero"` -} - -// ConnectorTransitIPRequest is the request body for a PeerAPI request to -// /connector/transit-ip and can include zero or more TransitIP allocation requests. -type ConnectorTransitIPRequest struct { - // TransitIPs is the list of requested mappings. - TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"` -} - -// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status. -type TransitIPResponseCode int - -const ( - // OK indicates that the mapping was created as requested. - OK TransitIPResponseCode = 0 - - // OtherFailure indicates that the mapping failed for a reason that does not have - // another relevant [TransitIPResponsecode]. - OtherFailure TransitIPResponseCode = 1 -) - -// TransitIPResponse is the response to a TransitIPRequest -type TransitIPResponse struct { - // Code is an error code indicating success or failure of the [TransitIPRequest]. - Code TransitIPResponseCode `json:"code,omitzero"` - // Message is an error message explaining what happened, suitable for logging but - // not necessarily suitable for displaying in a UI to non-technical users. It - // should be empty when [Code] is [OK]. - Message string `json:"message,omitzero"` -} - -// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest -type ConnectorTransitIPResponse struct { - // TransitIPs is the list of outcomes for each requested mapping. Elements - // correspond to the order of [ConnectorTransitIPRequest.TransitIPs]. - TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"` -} - const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" // PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers diff --git a/appc/conn25_test.go b/appc/conn25_test.go index 33f89749ca748..a9cb0fb7ebf9c 100644 --- a/appc/conn25_test.go +++ b/appc/conn25_test.go @@ -5,7 +5,6 @@ package appc import ( "encoding/json" - "net/netip" "reflect" "testing" @@ -14,183 +13,6 @@ import ( "tailscale.com/types/opt" ) -// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a -// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a -// ConnectorTransitIPResponse with 0 TransitIPResponses. -func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) { - c := &Conn25{} - req := ConnectorTransitIPRequest{} - nid := tailcfg.NodeID(1) - - resp := c.HandleConnectorTransitIPRequest(nid, req) - if len(resp.TransitIPs) != 0 { - t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs)) - } -} - -// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a -// request with a transit addr and a destination addr we store that mapping -// and can retrieve it. If sent another req with a different dst for that transit addr -// we store that instead. -func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) { - c := &Conn25{} - nid := tailcfg.NodeID(1) - tip := netip.MustParseAddr("0.0.0.1") - dip := netip.MustParseAddr("1.2.3.4") - dip2 := netip.MustParseAddr("1.2.3.5") - mr := func(t, d netip.Addr) ConnectorTransitIPRequest { - return ConnectorTransitIPRequest{ - TransitIPs: []TransitIPRequest{ - {TransitIP: t, DestinationIP: d}, - }, - } - } - - resp := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip)) - if len(resp.TransitIPs) != 1 { - t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs)) - } - got := resp.TransitIPs[0].Code - if got != TransitIPResponseCode(0) { - t.Fatalf("TransitIP Code: %d, want 0", got) - } - gotAddr := c.transitIPTarget(nid, tip) - if gotAddr != dip { - t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip) - } - - // mapping can be overwritten - resp2 := c.HandleConnectorTransitIPRequest(nid, mr(tip, dip2)) - if len(resp2.TransitIPs) != 1 { - t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs)) - } - got2 := resp.TransitIPs[0].Code - if got2 != TransitIPResponseCode(0) { - t.Fatalf("TransitIP Code: %d, want 0", got2) - } - gotAddr2 := c.transitIPTarget(nid, tip) - if gotAddr2 != dip2 { - t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2) - } -} - -// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can -// get a req with multiple mappings and we store them all. Including -// multiple transit addrs for the same destination. -func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) { - c := &Conn25{} - nid := tailcfg.NodeID(1) - tip := netip.MustParseAddr("0.0.0.1") - tip2 := netip.MustParseAddr("0.0.0.2") - tip3 := netip.MustParseAddr("0.0.0.3") - dip := netip.MustParseAddr("1.2.3.4") - dip2 := netip.MustParseAddr("1.2.3.5") - req := ConnectorTransitIPRequest{ - TransitIPs: []TransitIPRequest{ - {TransitIP: tip, DestinationIP: dip}, - {TransitIP: tip2, DestinationIP: dip2}, - // can store same dst addr for multiple transit addrs - {TransitIP: tip3, DestinationIP: dip}, - }, - } - resp := c.HandleConnectorTransitIPRequest(nid, req) - if len(resp.TransitIPs) != 3 { - t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) - } - - for i := 0; i < 3; i++ { - got := resp.TransitIPs[i].Code - if got != TransitIPResponseCode(0) { - t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got) - } - } - gotAddr1 := c.transitIPTarget(nid, tip) - if gotAddr1 != dip { - t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) - } - gotAddr2 := c.transitIPTarget(nid, tip2) - if gotAddr2 != dip2 { - t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2) - } - gotAddr3 := c.transitIPTarget(nid, tip3) - if gotAddr3 != dip { - t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip) - } -} - -// TestHandleConnectorTransitIPRequestSameTIP tests that if we get -// a req that has more than one TransitIPRequest for the same transit addr -// only the first is stored, and the subsequent ones get an error code and -// message in the response. -func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) { - c := &Conn25{} - nid := tailcfg.NodeID(1) - tip := netip.MustParseAddr("0.0.0.1") - tip2 := netip.MustParseAddr("0.0.0.2") - dip := netip.MustParseAddr("1.2.3.4") - dip2 := netip.MustParseAddr("1.2.3.5") - dip3 := netip.MustParseAddr("1.2.3.6") - req := ConnectorTransitIPRequest{ - TransitIPs: []TransitIPRequest{ - {TransitIP: tip, DestinationIP: dip}, - // cannot have dupe TransitIPs in one ConnectorTransitIPRequest - {TransitIP: tip, DestinationIP: dip2}, - {TransitIP: tip2, DestinationIP: dip3}, - }, - } - - resp := c.HandleConnectorTransitIPRequest(nid, req) - if len(resp.TransitIPs) != 3 { - t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) - } - - got := resp.TransitIPs[0].Code - if got != TransitIPResponseCode(0) { - t.Fatalf("i=0 TransitIP Code: %d, want 0", got) - } - msg := resp.TransitIPs[0].Message - if msg != "" { - t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "") - } - got1 := resp.TransitIPs[1].Code - if got1 != TransitIPResponseCode(1) { - t.Fatalf("i=1 TransitIP Code: %d, want 1", got1) - } - msg1 := resp.TransitIPs[1].Message - if msg1 != dupeTransitIPMessage { - t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage) - } - got2 := resp.TransitIPs[2].Code - if got2 != TransitIPResponseCode(0) { - t.Fatalf("i=2 TransitIP Code: %d, want 0", got2) - } - msg2 := resp.TransitIPs[2].Message - if msg2 != "" { - t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "") - } - - gotAddr1 := c.transitIPTarget(nid, tip) - if gotAddr1 != dip { - t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) - } - gotAddr2 := c.transitIPTarget(nid, tip2) - if gotAddr2 != dip3 { - t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3) - } -} - -// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem. -func TestTransitIPTargetUnknownTIP(t *testing.T) { - c := &Conn25{} - nid := tailcfg.NodeID(1) - tip := netip.MustParseAddr("0.0.0.1") - got := c.transitIPTarget(nid, tip) - want := netip.Addr{} - if got != want { - t.Fatalf("Unknown transit addr, want: %v, got %v", want, got) - } -} - func TestPickSplitDNSPeers(t *testing.T) { getBytesForAttr := func(name string, domains []string, tags []string) []byte { attr := appctype.AppConnectorAttr{ diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 7695cf598b694..d04c66eba118e 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -45,7 +45,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ - go4.org/netipx from tailscale.com/net/tsaddr + go4.org/netipx from tailscale.com/net/tsaddr+ W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ google.golang.org/protobuf/encoding/protodelim from github.com/prometheus/common/expfmt google.golang.org/protobuf/encoding/prototext from github.com/prometheus/common/expfmt+ diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 58f9e1c0bfb83..8cef9725847a3 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -149,7 +149,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/x448/float16 from github.com/fxamacker/cbor/v2 go.yaml.in/yaml/v2 from sigs.k8s.io/yaml 💣 go4.org/mem from tailscale.com/client/local+ - go4.org/netipx from tailscale.com/net/tsaddr + go4.org/netipx from tailscale.com/net/tsaddr+ W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/netmon+ k8s.io/client-go/util/homedir from tailscale.com/cmd/tailscale/cli sigs.k8s.io/yaml from tailscale.com/cmd/tailscale/cli diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index aa25fd75f9e9e..4128ecc4ce972 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -249,7 +249,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal+ + tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index 33ba0e486abe3..02bec132dc10c 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -9,13 +9,24 @@ package conn25 import ( "encoding/json" + "errors" "net/http" + "net/netip" + "strings" + "sync" - "tailscale.com/appc" + "go4.org/netipx" + "golang.org/x/net/dns/dnsmessage" "tailscale.com/feature" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/dns" + "tailscale.com/tailcfg" + "tailscale.com/types/appctype" "tailscale.com/types/logger" + "tailscale.com/util/dnsname" + "tailscale.com/util/mak" + "tailscale.com/util/set" ) // featureName is the name of the feature implemented by this package. @@ -26,7 +37,8 @@ func init() { feature.Register(featureName) newExtension := func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { e := &extension{ - conn: &appc.Conn25{}, + conn25: newConn25(logger.WithPrefix(logf, "conn25: ")), + backend: sb, } return e, nil } @@ -46,7 +58,11 @@ func handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, // extension is an [ipnext.Extension] managing the connector on platforms // that import this package. type extension struct { - conn *appc.Conn25 + conn25 *Conn25 // safe for concurrent access and only set at creation + backend ipnext.SafeBackend // safe for concurrent access and only set at creation + + mu sync.Mutex // protects the fields below + isDNSHookRegistered bool } // Name implements [ipnext.Extension]. @@ -56,6 +72,7 @@ func (e *extension) Name() string { // Init implements [ipnext.Extension]. func (e *extension) Init(host ipnext.Host) error { + host.Hooks().OnSelfChange.Add(e.onSelfChange) return nil } @@ -71,13 +88,13 @@ func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.R http.Error(w, "Method should be POST", http.StatusMethodNotAllowed) return } - var req appc.ConnectorTransitIPRequest + var req ConnectorTransitIPRequest err := json.NewDecoder(http.MaxBytesReader(w, r.Body, maxBodyBytes+1)).Decode(&req) if err != nil { http.Error(w, "Error decoding JSON", http.StatusBadRequest) return } - resp := e.conn.HandleConnectorTransitIPRequest(h.Peer().ID(), req) + resp := e.conn25.handleConnectorTransitIPRequest(h.Peer().ID(), req) bs, err := json.Marshal(resp) if err != nil { http.Error(w, "Error encoding JSON", http.StatusInternalServerError) @@ -85,3 +102,447 @@ func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.R } w.Write(bs) } + +func (e *extension) onSelfChange(selfNode tailcfg.NodeView) { + err := e.conn25.reconfig(selfNode) + if err != nil { + e.conn25.client.logf("error during Reconfig onSelfChange: %v", err) + return + } + + if e.conn25.isConfigured() { + err = e.registerDNSHook() + } else { + err = e.unregisterDNSHook() + } + if err != nil { + e.conn25.client.logf("error managing DNS hook onSelfChange: %v", err) + } +} + +func (e *extension) registerDNSHook() error { + e.mu.Lock() + defer e.mu.Unlock() + if e.isDNSHookRegistered { + return nil + } + err := e.setDNSHookLocked(e.conn25.mapDNSResponse) + if err == nil { + e.isDNSHookRegistered = true + } + return err +} + +func (e *extension) unregisterDNSHook() error { + e.mu.Lock() + defer e.mu.Unlock() + if !e.isDNSHookRegistered { + return nil + } + err := e.setDNSHookLocked(nil) + if err == nil { + e.isDNSHookRegistered = false + } + return err +} + +func (e *extension) setDNSHookLocked(fx dns.ResponseMapper) error { + dnsManager, ok := e.backend.Sys().DNSManager.GetOK() + if !ok || dnsManager == nil { + return errors.New("couldn't get DNSManager from sys") + } + dnsManager.SetQueryResponseMapper(fx) + return nil +} + +type appAddr struct { + app string + addr netip.Addr +} + +// Conn25 holds state for routing traffic for a domain via a connector. +type Conn25 struct { + client *client + connector *connector +} + +func (c *Conn25) isConfigured() bool { + return c.client.isConfigured() +} + +func newConn25(logf logger.Logf) *Conn25 { + c := &Conn25{ + client: &client{logf: logf}, + connector: &connector{logf: logf}, + } + return c +} + +func ipSetFromIPRanges(rs []netipx.IPRange) (*netipx.IPSet, error) { + b := &netipx.IPSetBuilder{} + for _, r := range rs { + b.AddRange(r) + } + return b.IPSet() +} + +func (c *Conn25) reconfig(selfNode tailcfg.NodeView) error { + cfg, err := configFromNodeView(selfNode) + if err != nil { + return err + } + if err := c.client.reconfig(cfg); err != nil { + return err + } + if err := c.connector.reconfig(cfg); err != nil { + return err + } + return nil +} + +// mapDNSResponse parses and inspects the DNS response, and uses the +// contents to assign addresses for connecting. It does not yet modify +// the response. +func (c *Conn25) mapDNSResponse(buf []byte) []byte { + return c.client.mapDNSResponse(buf) +} + +const dupeTransitIPMessage = "Duplicate transit address in ConnectorTransitIPRequest" + +// handleConnectorTransitIPRequest creates a ConnectorTransitIPResponse in response to a ConnectorTransitIPRequest. +// It updates the connectors mapping of TransitIP->DestinationIP per peer (tailcfg.NodeID). +// If a peer has stored this mapping in the connector Conn25 will route traffic to TransitIPs to DestinationIPs for that peer. +func (c *Conn25) handleConnectorTransitIPRequest(nid tailcfg.NodeID, ctipr ConnectorTransitIPRequest) ConnectorTransitIPResponse { + resp := ConnectorTransitIPResponse{} + seen := map[netip.Addr]bool{} + for _, each := range ctipr.TransitIPs { + if seen[each.TransitIP] { + resp.TransitIPs = append(resp.TransitIPs, TransitIPResponse{ + Code: OtherFailure, + Message: dupeTransitIPMessage, + }) + continue + } + tipresp := c.connector.handleTransitIPRequest(nid, each) + seen[each.TransitIP] = true + resp.TransitIPs = append(resp.TransitIPs, tipresp) + } + return resp +} + +func (s *connector) handleTransitIPRequest(nid tailcfg.NodeID, tipr TransitIPRequest) TransitIPResponse { + s.mu.Lock() + defer s.mu.Unlock() + if s.transitIPs == nil { + s.transitIPs = make(map[tailcfg.NodeID]map[netip.Addr]appAddr) + } + peerMap, ok := s.transitIPs[nid] + if !ok { + peerMap = make(map[netip.Addr]appAddr) + s.transitIPs[nid] = peerMap + } + peerMap[tipr.TransitIP] = appAddr{addr: tipr.DestinationIP, app: tipr.App} + return TransitIPResponse{} +} + +func (s *connector) transitIPTarget(nid tailcfg.NodeID, tip netip.Addr) netip.Addr { + s.mu.Lock() + defer s.mu.Unlock() + return s.transitIPs[nid][tip].addr +} + +// TransitIPRequest details a single TransitIP allocation request from a client to a +// connector. +type TransitIPRequest struct { + // TransitIP is the intermediate destination IP that will be received at this + // connector and will be replaced by DestinationIP when performing DNAT. + TransitIP netip.Addr `json:"transitIP,omitzero"` + + // DestinationIP is the final destination IP that connections to the TransitIP + // should be mapped to when performing DNAT. + DestinationIP netip.Addr `json:"destinationIP,omitzero"` + + // App is the name of the connector application from the tailnet + // configuration. + App string `json:"app,omitzero"` +} + +// ConnectorTransitIPRequest is the request body for a PeerAPI request to +// /connector/transit-ip and can include zero or more TransitIP allocation requests. +type ConnectorTransitIPRequest struct { + // TransitIPs is the list of requested mappings. + TransitIPs []TransitIPRequest `json:"transitIPs,omitempty"` +} + +// TransitIPResponseCode appears in TransitIPResponse and signifies success or failure status. +type TransitIPResponseCode int + +const ( + // OK indicates that the mapping was created as requested. + OK TransitIPResponseCode = 0 + + // OtherFailure indicates that the mapping failed for a reason that does not have + // another relevant [TransitIPResponsecode]. + OtherFailure TransitIPResponseCode = 1 +) + +// TransitIPResponse is the response to a TransitIPRequest +type TransitIPResponse struct { + // Code is an error code indicating success or failure of the [TransitIPRequest]. + Code TransitIPResponseCode `json:"code,omitzero"` + // Message is an error message explaining what happened, suitable for logging but + // not necessarily suitable for displaying in a UI to non-technical users. It + // should be empty when [Code] is [OK]. + Message string `json:"message,omitzero"` +} + +// ConnectorTransitIPResponse is the response to a ConnectorTransitIPRequest +type ConnectorTransitIPResponse struct { + // TransitIPs is the list of outcomes for each requested mapping. Elements + // correspond to the order of [ConnectorTransitIPRequest.TransitIPs]. + TransitIPs []TransitIPResponse `json:"transitIPs,omitempty"` +} + +const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" + +// config holds the config from the policy and lookups derived from that. +// config is not safe for concurrent use. +type config struct { + isConfigured bool + apps []appctype.Conn25Attr + appsByDomain map[string][]string + selfRoutedDomains set.Set[string] +} + +func configFromNodeView(n tailcfg.NodeView) (config, error) { + apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.Conn25Attr](n.CapMap(), AppConnectorsExperimentalAttrName) + if err != nil { + return config{}, err + } + if len(apps) == 0 { + return config{}, nil + } + selfTags := set.SetOf(n.Tags().AsSlice()) + cfg := config{ + isConfigured: true, + apps: apps, + appsByDomain: map[string][]string{}, + selfRoutedDomains: set.Set[string]{}, + } + for _, app := range apps { + selfMatchesTags := false + for _, tag := range app.Connectors { + if selfTags.Contains(tag) { + selfMatchesTags = true + break + } + } + for _, d := range app.Domains { + fqdn, err := dnsname.ToFQDN(d) + if err != nil { + return config{}, err + } + key := fqdn.WithTrailingDot() + mak.Set(&cfg.appsByDomain, key, append(cfg.appsByDomain[key], app.Name)) + if selfMatchesTags { + cfg.selfRoutedDomains.Add(key) + } + } + } + return cfg, nil +} + +// client performs the conn25 functionality for clients of connectors +// It allocates magic and transit IP addresses and communicates them with +// connectors. +// It's safe for concurrent use. +type client struct { + logf logger.Logf + + mu sync.Mutex // protects the fields below + magicIPPool *ippool + transitIPPool *ippool + // map of magic IP -> (transit IP, app) + magicIPs map[netip.Addr]appAddr + config config +} + +func (c *client) isConfigured() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.config.isConfigured +} + +func (c *client) reconfig(newCfg config) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.config = newCfg + + // TODO(fran) this is not the correct way to manage the pools and changes to the pools. + // We probably want to: + // * check the pools haven't changed + // * reset the whole connector if the pools change? or just if they've changed to exclude + // addresses we have in use? + // * have config separate from the apps for this (rather than multiple potentially conflicting places) + // but this works while we are just getting started here. + for _, app := range c.config.apps { + if c.magicIPPool != nil { // just take the first config and never reconfig + break + } + if app.MagicIPPool == nil { + continue + } + mipp, err := ipSetFromIPRanges(app.MagicIPPool) + if err != nil { + return err + } + tipp, err := ipSetFromIPRanges(app.TransitIPPool) + if err != nil { + return err + } + c.magicIPPool = newIPPool(mipp) + c.transitIPPool = newIPPool(tipp) + } + return nil +} + +func (c *client) setMagicIP(magicAddr, transitAddr netip.Addr, app string) { + c.mu.Lock() + defer c.mu.Unlock() + mak.Set(&c.magicIPs, magicAddr, appAddr{addr: transitAddr, app: app}) +} + +func (c *client) isConnectorDomain(domain string) bool { + c.mu.Lock() + defer c.mu.Unlock() + appNames, ok := c.config.appsByDomain[domain] + return ok && len(appNames) > 0 +} + +// reserveAddresses tries to make an assignment of addrs from the address pools +// for this domain+dst address, so that this client can use conn25 connectors. +// It checks that this domain should be routed and that this client is not itself a connector for the domain +// and generally if it is valid to make the assignment. +func (c *client) reserveAddresses(domain string, dst netip.Addr) (addrs, error) { + c.mu.Lock() + defer c.mu.Unlock() + appNames, _ := c.config.appsByDomain[domain] + // only reserve for first app + app := appNames[0] + mip, err := c.magicIPPool.next() + if err != nil { + return addrs{}, err + } + tip, err := c.transitIPPool.next() + if err != nil { + return addrs{}, err + } + addrs := addrs{ + dst: dst, + magic: mip, + transit: tip, + app: app, + } + return addrs, nil +} + +func (c *client) enqueueAddressAssignment(addrs addrs) { + c.setMagicIP(addrs.magic, addrs.transit, addrs.app) + // TODO(fran) 2026-02-03 asynchronously send peerapi req to connector to + // allocate these addresses for us. +} + +func (c *client) mapDNSResponse(buf []byte) []byte { + var p dnsmessage.Parser + if _, err := p.Start(buf); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + if err := p.SkipAllQuestions(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + + if h.Class != dnsmessage.ClassINET { + if err := p.SkipAnswer(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + continue + } + + switch h.Type { + case dnsmessage.TypeA: + domain := strings.ToLower(h.Name.String()) + if len(domain) == 0 || !c.isConnectorDomain(domain) { + if err := p.SkipAnswer(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + continue + } + r, err := p.AResource() + if err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + addrs, err := c.reserveAddresses(domain, netip.AddrFrom4(r.A)) + if err != nil { + c.logf("error assigning connector addresses: %v", err) + return buf + } + if !addrs.isValid() { + c.logf("assigned connector addresses unexpectedly empty: %v", err) + return buf + } + c.enqueueAddressAssignment(addrs) + default: + if err := p.SkipAnswer(); err != nil { + c.logf("error parsing dns response: %v", err) + return buf + } + continue + } + } + + // TODO(fran) 2026-01-21 return a dns response with addresses + // swapped out for the magic IPs to make conn25 work. + return buf +} + +type connector struct { + logf logger.Logf + + mu sync.Mutex // protects the fields below + // transitIPs is a map of connector client peer NodeID -> client transitIPs that we update as connector client peers instruct us to, and then use to route traffic to its destination on behalf of connector clients. + transitIPs map[tailcfg.NodeID]map[netip.Addr]appAddr + config config +} + +func (s *connector) reconfig(newCfg config) error { + s.mu.Lock() + defer s.mu.Unlock() + s.config = newCfg + return nil +} + +type addrs struct { + dst netip.Addr + magic netip.Addr + transit netip.Addr + app string +} + +func (c addrs) isValid() bool { + return c.dst.IsValid() +} diff --git a/feature/conn25/conn25_test.go b/feature/conn25/conn25_test.go new file mode 100644 index 0000000000000..0489b22a14e4d --- /dev/null +++ b/feature/conn25/conn25_test.go @@ -0,0 +1,490 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package conn25 + +import ( + "encoding/json" + "net/netip" + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go4.org/netipx" + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/tailcfg" + "tailscale.com/types/appctype" + "tailscale.com/types/logger" + "tailscale.com/util/set" +) + +func mustIPSetFromPrefix(s string) *netipx.IPSet { + b := &netipx.IPSetBuilder{} + b.AddPrefix(netip.MustParsePrefix(s)) + set, err := b.IPSet() + if err != nil { + panic(err) + } + return set +} + +// TestHandleConnectorTransitIPRequestZeroLength tests that if sent a +// ConnectorTransitIPRequest with 0 TransitIPRequests, we respond with a +// ConnectorTransitIPResponse with 0 TransitIPResponses. +func TestHandleConnectorTransitIPRequestZeroLength(t *testing.T) { + c := newConn25(logger.Discard) + req := ConnectorTransitIPRequest{} + nid := tailcfg.NodeID(1) + + resp := c.handleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 0 { + t.Fatalf("n TransitIPs in response: %d, want 0", len(resp.TransitIPs)) + } +} + +// TestHandleConnectorTransitIPRequestStoresAddr tests that if sent a +// request with a transit addr and a destination addr we store that mapping +// and can retrieve it. If sent another req with a different dst for that transit addr +// we store that instead. +func TestHandleConnectorTransitIPRequestStoresAddr(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + mr := func(t, d netip.Addr) ConnectorTransitIPRequest { + return ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: t, DestinationIP: d}, + }, + } + } + + resp := c.handleConnectorTransitIPRequest(nid, mr(tip, dip)) + if len(resp.TransitIPs) != 1 { + t.Fatalf("n TransitIPs in response: %d, want 1", len(resp.TransitIPs)) + } + got := resp.TransitIPs[0].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("TransitIP Code: %d, want 0", got) + } + gotAddr := c.connector.transitIPTarget(nid, tip) + if gotAddr != dip { + t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip) + } + + // mapping can be overwritten + resp2 := c.handleConnectorTransitIPRequest(nid, mr(tip, dip2)) + if len(resp2.TransitIPs) != 1 { + t.Fatalf("n TransitIPs in response: %d, want 1", len(resp2.TransitIPs)) + } + got2 := resp.TransitIPs[0].Code + if got2 != TransitIPResponseCode(0) { + t.Fatalf("TransitIP Code: %d, want 0", got2) + } + gotAddr2 := c.connector.transitIPTarget(nid, tip) + if gotAddr2 != dip2 { + t.Fatalf("Connector stored destination for tip: %v, want %v", gotAddr, dip2) + } +} + +// TestHandleConnectorTransitIPRequestMultipleTIP tests that we can +// get a req with multiple mappings and we store them all. Including +// multiple transit addrs for the same destination. +func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + tip2 := netip.MustParseAddr("0.0.0.2") + tip3 := netip.MustParseAddr("0.0.0.3") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + req := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: tip, DestinationIP: dip}, + {TransitIP: tip2, DestinationIP: dip2}, + // can store same dst addr for multiple transit addrs + {TransitIP: tip3, DestinationIP: dip}, + }, + } + resp := c.handleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 3 { + t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) + } + + for i := 0; i < 3; i++ { + got := resp.TransitIPs[i].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got) + } + } + gotAddr1 := c.connector.transitIPTarget(nid, tip) + if gotAddr1 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) + } + gotAddr2 := c.connector.transitIPTarget(nid, tip2) + if gotAddr2 != dip2 { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip2) + } + gotAddr3 := c.connector.transitIPTarget(nid, tip3) + if gotAddr3 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip3, gotAddr3, dip) + } +} + +// TestHandleConnectorTransitIPRequestSameTIP tests that if we get +// a req that has more than one TransitIPRequest for the same transit addr +// only the first is stored, and the subsequent ones get an error code and +// message in the response. +func TestHandleConnectorTransitIPRequestSameTIP(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + tip2 := netip.MustParseAddr("0.0.0.2") + dip := netip.MustParseAddr("1.2.3.4") + dip2 := netip.MustParseAddr("1.2.3.5") + dip3 := netip.MustParseAddr("1.2.3.6") + req := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{ + {TransitIP: tip, DestinationIP: dip}, + // cannot have dupe TransitIPs in one ConnectorTransitIPRequest + {TransitIP: tip, DestinationIP: dip2}, + {TransitIP: tip2, DestinationIP: dip3}, + }, + } + + resp := c.handleConnectorTransitIPRequest(nid, req) + if len(resp.TransitIPs) != 3 { + t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) + } + + got := resp.TransitIPs[0].Code + if got != TransitIPResponseCode(0) { + t.Fatalf("i=0 TransitIP Code: %d, want 0", got) + } + msg := resp.TransitIPs[0].Message + if msg != "" { + t.Fatalf("i=0 TransitIP Message: \"%s\", want \"%s\"", msg, "") + } + got1 := resp.TransitIPs[1].Code + if got1 != TransitIPResponseCode(1) { + t.Fatalf("i=1 TransitIP Code: %d, want 1", got1) + } + msg1 := resp.TransitIPs[1].Message + if msg1 != dupeTransitIPMessage { + t.Fatalf("i=1 TransitIP Message: \"%s\", want \"%s\"", msg1, dupeTransitIPMessage) + } + got2 := resp.TransitIPs[2].Code + if got2 != TransitIPResponseCode(0) { + t.Fatalf("i=2 TransitIP Code: %d, want 0", got2) + } + msg2 := resp.TransitIPs[2].Message + if msg2 != "" { + t.Fatalf("i=2 TransitIP Message: \"%s\", want \"%s\"", msg, "") + } + + gotAddr1 := c.connector.transitIPTarget(nid, tip) + if gotAddr1 != dip { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip, gotAddr1, dip) + } + gotAddr2 := c.connector.transitIPTarget(nid, tip2) + if gotAddr2 != dip3 { + t.Fatalf("Connector stored destination for tip(%v): %v, want %v", tip2, gotAddr2, dip3) + } +} + +// TestGetDstIPUnknownTIP tests that unknown transit addresses can be looked up without problem. +func TestTransitIPTargetUnknownTIP(t *testing.T) { + c := newConn25(logger.Discard) + nid := tailcfg.NodeID(1) + tip := netip.MustParseAddr("0.0.0.1") + got := c.connector.transitIPTarget(nid, tip) + want := netip.Addr{} + if got != want { + t.Fatalf("Unknown transit addr, want: %v, got %v", want, got) + } +} + +func TestSetMagicIP(t *testing.T) { + c := newConn25(logger.Discard) + mip := netip.MustParseAddr("0.0.0.1") + tip := netip.MustParseAddr("0.0.0.2") + app := "a" + c.client.setMagicIP(mip, tip, app) + val, ok := c.client.magicIPs[mip] + if !ok { + t.Fatal("expected there to be a value stored for the magic IP") + } + if val.addr != tip { + t.Fatalf("want %v, got %v", tip, val.addr) + } + if val.app != app { + t.Fatalf("want %s, got %s", app, val.app) + } +} + +func TestReserveIPs(t *testing.T) { + c := newConn25(logger.Discard) + c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) + c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) + mbd := map[string][]string{} + mbd["example.com."] = []string{"a"} + c.client.config.appsByDomain = mbd + + dst := netip.MustParseAddr("0.0.0.1") + con, err := c.client.reserveAddresses("example.com.", dst) + if err != nil { + t.Fatal(err) + } + + wantDst := netip.MustParseAddr("0.0.0.1") // same as dst we pass in + wantMagic := netip.MustParseAddr("100.64.0.0") // first from magic pool + wantTransit := netip.MustParseAddr("169.254.0.0") // first from transit pool + wantApp := "a" // the app name related to example.com. + + if wantDst != con.dst { + t.Errorf("want %v, got %v", wantDst, con.dst) + } + if wantMagic != con.magic { + t.Errorf("want %v, got %v", wantMagic, con.magic) + } + if wantTransit != con.transit { + t.Errorf("want %v, got %v", wantTransit, con.transit) + } + if wantApp != con.app { + t.Errorf("want %s, got %s", wantApp, con.app) + } +} + +func TestReconfig(t *testing.T) { + rawCfg := `{"name":"app1","connectors":["tag:woo"],"domains":["example.com"]}` + capMap := tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): []tailcfg.RawMessage{ + tailcfg.RawMessage(rawCfg), + }, + } + + c := newConn25(logger.Discard) + sn := (&tailcfg.Node{ + CapMap: capMap, + }).View() + + err := c.reconfig(sn) + if err != nil { + t.Fatal(err) + } + + if len(c.client.config.apps) != 1 || c.client.config.apps[0].Name != "app1" { + t.Fatalf("want apps to have one entry 'app1', got %v", c.client.config.apps) + } +} + +func TestConfigReconfig(t *testing.T) { + for _, tt := range []struct { + name string + rawCfg string + cfg []appctype.Conn25Attr + tags []string + wantErr bool + wantAppsByDomain map[string][]string + wantSelfRoutedDomains set.Set[string] + }{ + { + name: "bad-config", + rawCfg: `bad`, + wantErr: true, + }, + { + name: "simple", + cfg: []appctype.Conn25Attr{ + {Name: "one", Domains: []string{"a.example.com"}, Connectors: []string{"tag:one"}}, + {Name: "two", Domains: []string{"b.example.com"}, Connectors: []string{"tag:two"}}, + }, + tags: []string{"tag:one"}, + wantAppsByDomain: map[string][]string{ + "a.example.com.": {"one"}, + "b.example.com.": {"two"}, + }, + wantSelfRoutedDomains: set.SetOf([]string{"a.example.com."}), + }, + { + name: "more-complex", + cfg: []appctype.Conn25Attr{ + {Name: "one", Domains: []string{"1.a.example.com", "1.b.example.com"}, Connectors: []string{"tag:one", "tag:onea"}}, + {Name: "two", Domains: []string{"2.b.example.com", "2.c.example.com"}, Connectors: []string{"tag:two", "tag:twoa"}}, + {Name: "three", Domains: []string{"1.b.example.com", "1.c.example.com"}, Connectors: []string{}}, + {Name: "four", Domains: []string{"4.b.example.com", "4.d.example.com"}, Connectors: []string{"tag:four"}}, + }, + tags: []string{"tag:onea", "tag:four", "tag:unrelated"}, + wantAppsByDomain: map[string][]string{ + "1.a.example.com.": {"one"}, + "1.b.example.com.": {"one", "three"}, + "1.c.example.com.": {"three"}, + "2.b.example.com.": {"two"}, + "2.c.example.com.": {"two"}, + "4.b.example.com.": {"four"}, + "4.d.example.com.": {"four"}, + }, + wantSelfRoutedDomains: set.SetOf([]string{"1.a.example.com.", "1.b.example.com.", "4.b.example.com.", "4.d.example.com."}), + }, + } { + t.Run(tt.name, func(t *testing.T) { + cfg := []tailcfg.RawMessage{tailcfg.RawMessage(tt.rawCfg)} + if tt.cfg != nil { + cfg = []tailcfg.RawMessage{} + for _, attr := range tt.cfg { + bs, err := json.Marshal(attr) + if err != nil { + t.Fatalf("unexpected error in test setup: %v", err) + } + cfg = append(cfg, tailcfg.RawMessage(bs)) + } + } + capMap := tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): cfg, + } + sn := (&tailcfg.Node{ + CapMap: capMap, + Tags: tt.tags, + }).View() + c, err := configFromNodeView(sn) + if (err != nil) != tt.wantErr { + t.Fatalf("wantErr: %t, err: %v", tt.wantErr, err) + } + if diff := cmp.Diff(tt.wantAppsByDomain, c.appsByDomain); diff != "" { + t.Errorf("appsByDomain diff (-want, +got):\n%s", diff) + } + if diff := cmp.Diff(tt.wantSelfRoutedDomains, c.selfRoutedDomains); diff != "" { + t.Errorf("selfRoutedDomains diff (-want, +got):\n%s", diff) + } + }) + } +} + +func makeSelfNode(t *testing.T, attr appctype.Conn25Attr, tags []string) tailcfg.NodeView { + t.Helper() + bs, err := json.Marshal(attr) + if err != nil { + t.Fatalf("unexpected error in test setup: %v", err) + } + cfg := []tailcfg.RawMessage{tailcfg.RawMessage(bs)} + capMap := tailcfg.NodeCapMap{ + tailcfg.NodeCapability(AppConnectorsExperimentalAttrName): cfg, + } + return (&tailcfg.Node{ + CapMap: capMap, + Tags: tags, + }).View() +} + +func rangeFrom(from, to string) netipx.IPRange { + return netipx.IPRangeFrom( + netip.MustParseAddr("100.64.0."+from), + netip.MustParseAddr("100.64.0."+to), + ) +} + +func TestMapDNSResponse(t *testing.T) { + makeDNSResponse := func(domain string, addrs []dnsmessage.AResource) []byte { + b := dnsmessage.NewBuilder(nil, + dnsmessage.Header{ + ID: 1, + Response: true, + Authoritative: true, + RCode: dnsmessage.RCodeSuccess, + }) + b.EnableCompression() + + if err := b.StartQuestions(); err != nil { + t.Fatal(err) + } + + if err := b.Question(dnsmessage.Question{ + Name: dnsmessage.MustNewName(domain), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }); err != nil { + t.Fatal(err) + } + + if err := b.StartAnswers(); err != nil { + t.Fatal(err) + } + + for _, addr := range addrs { + b.AResource( + dnsmessage.ResourceHeader{ + Name: dnsmessage.MustNewName(domain), + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + }, + addr, + ) + } + + outbs, err := b.Finish() + if err != nil { + t.Fatal(err) + } + return outbs + } + + for _, tt := range []struct { + name string + domain string + addrs []dnsmessage.AResource + wantMagicIPs map[netip.Addr]appAddr + }{ + { + name: "one-ip-matches", + domain: "example.com.", + addrs: []dnsmessage.AResource{{A: [4]byte{1, 0, 0, 0}}}, + // these are 'expected' because they are the beginning of the provided pools + wantMagicIPs: map[netip.Addr]appAddr{ + netip.MustParseAddr("100.64.0.0"): {app: "app1", addr: netip.MustParseAddr("100.64.0.40")}, + }, + }, + { + name: "multiple-ip-matches", + domain: "example.com.", + addrs: []dnsmessage.AResource{ + {A: [4]byte{1, 0, 0, 0}}, + {A: [4]byte{2, 0, 0, 0}}, + }, + wantMagicIPs: map[netip.Addr]appAddr{ + netip.MustParseAddr("100.64.0.0"): {app: "app1", addr: netip.MustParseAddr("100.64.0.40")}, + netip.MustParseAddr("100.64.0.1"): {app: "app1", addr: netip.MustParseAddr("100.64.0.41")}, + }, + }, + { + name: "no-domain-match", + domain: "x.example.com.", + addrs: []dnsmessage.AResource{ + {A: [4]byte{1, 0, 0, 0}}, + {A: [4]byte{2, 0, 0, 0}}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + dnsResp := makeDNSResponse(tt.domain, tt.addrs) + sn := makeSelfNode(t, appctype.Conn25Attr{ + Name: "app1", + Connectors: []string{"tag:woo"}, + Domains: []string{"example.com"}, + MagicIPPool: []netipx.IPRange{rangeFrom("0", "10"), rangeFrom("20", "30")}, + TransitIPPool: []netipx.IPRange{rangeFrom("40", "50")}, + }, []string{}) + c := newConn25(logger.Discard) + c.reconfig(sn) + + bs := c.mapDNSResponse(dnsResp) + if !reflect.DeepEqual(dnsResp, bs) { + t.Fatal("shouldn't be changing the bytes (yet)") + } + if diff := cmp.Diff(tt.wantMagicIPs, c.client.magicIPs, cmpopts.EquateComparable(appAddr{}, netip.Addr{})); diff != "" { + t.Errorf("magicIPs diff (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/appc/ippool.go b/feature/conn25/ippool.go similarity index 99% rename from appc/ippool.go rename to feature/conn25/ippool.go index 702f79ddef8d8..e50186d880914 100644 --- a/appc/ippool.go +++ b/feature/conn25/ippool.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package appc +package conn25 import ( "errors" diff --git a/appc/ippool_test.go b/feature/conn25/ippool_test.go similarity index 98% rename from appc/ippool_test.go rename to feature/conn25/ippool_test.go index 8ac457c117475..ccfaad3eb71e1 100644 --- a/appc/ippool_test.go +++ b/feature/conn25/ippool_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -package appc +package conn25 import ( "errors" diff --git a/net/dns/manager.go b/net/dns/manager.go index c052055654f1d..889c542cf1f1d 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -46,6 +46,11 @@ var ( // be running. const maxActiveQueries = 256 +// ResponseMapper is a function that accepts the bytes representing +// a DNS response and returns bytes representing a DNS response. +// Used to observe and/or mutate DNS responses managed by this manager. +type ResponseMapper func([]byte) []byte + // We use file-ignore below instead of ignore because on some platforms, // the lint exception is necessary and on others it is not, // and plain ignore complains if the exception is unnecessary. @@ -67,8 +72,9 @@ type Manager struct { knobs *controlknobs.Knobs // or nil goos string // if empty, gets set to runtime.GOOS - mu sync.Mutex // guards following - config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called. + mu sync.Mutex // guards following + config *Config // Tracks the last viable DNS configuration set by Set. nil on failures other than compilation failures or if set has never been called. + queryResponseMapper ResponseMapper } // NewManager created a new manager from the given config. @@ -467,7 +473,16 @@ func (m *Manager) Query(ctx context.Context, bs []byte, family string, from neti return nil, errFullQueue } defer atomic.AddInt32(&m.activeQueriesAtomic, -1) - return m.resolver.Query(ctx, bs, family, from) + outbs, err := m.resolver.Query(ctx, bs, family, from) + if err != nil { + return outbs, err + } + m.mu.Lock() + defer m.mu.Unlock() + if m.queryResponseMapper != nil { + outbs = m.queryResponseMapper(outbs) + } + return outbs, err } const ( @@ -653,3 +668,9 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health } var metricDNSQueryErrorQueue = clientmetric.NewCounter("dns_query_local_error_queue") + +func (m *Manager) SetQueryResponseMapper(fx ResponseMapper) { + m.mu.Lock() + defer m.mu.Unlock() + m.queryResponseMapper = fx +} diff --git a/types/appctype/appconnector.go b/types/appctype/appconnector.go index 5442e8290cb8a..0af5db4c38672 100644 --- a/types/appctype/appconnector.go +++ b/types/appctype/appconnector.go @@ -8,6 +8,7 @@ package appctype import ( "net/netip" + "go4.org/netipx" "tailscale.com/tailcfg" ) @@ -93,3 +94,17 @@ type RouteUpdate struct { Advertise []netip.Prefix Unadvertise []netip.Prefix } + +type Conn25Attr struct { + // Name is the name of this collection of domains. + Name string `json:"name,omitempty"` + // Domains enumerates the domains serviced by the specified app connectors. + // Domains can be of the form: example.com, or *.example.com. + Domains []string `json:"domains,omitempty"` + // Connectors enumerates the app connectors which service these domains. + // These can either be "*" to match any advertising connector, or a + // tag of the form tag:. + Connectors []string `json:"connectors,omitempty"` + MagicIPPool []netipx.IPRange `json:"magicIPPool,omitempty"` + TransitIPPool []netipx.IPRange `json:"transitIPPool,omitempty"` +} From 2d64c0dab3fcf355de78bd00b856b2fa7101bf94 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Fri, 20 Feb 2026 18:06:07 +0000 Subject: [PATCH 1005/1093] cmd/k8s-operator/e2e: mark TestIngress flaky (#18773) --- cmd/k8s-operator/e2e/ingress_test.go | 67 ++++++++++++++++++++++++++++ cmd/k8s-operator/e2e/setup.go | 3 +- 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index 5339b05836388..47a838414d449 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -5,6 +5,7 @@ package e2e import ( "context" + "encoding/json" "fmt" "net/http" "testing" @@ -14,7 +15,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + "tailscale.com/cmd/testwrapper/flakytest" kube "tailscale.com/k8s-operator" "tailscale.com/tstest" "tailscale.com/types/ptr" @@ -23,6 +28,7 @@ import ( // See [TestMain] for test requirements. func TestIngress(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/corp/issues/37533") if tnClient == nil { t.Skip("TestIngress requires a working tailnet client") } @@ -85,8 +91,68 @@ func TestIngress(t *testing.T) { } createAndCleanup(t, kubeClient, svc) + // TODO(tomhjp): Delete once we've reproduced the flake with this extra info. + t0 := time.Now() + watcherCtx, cancelWatcher := context.WithCancel(t.Context()) + defer cancelWatcher() + go func() { + // client-go client for logs. + clientGoKubeClient, err := kubernetes.NewForConfig(restCfg) + if err != nil { + t.Logf("error creating client-go Kubernetes client: %v", err) + return + } + + for { + select { + case <-watcherCtx.Done(): + t.Logf("stopping watcher after %v", time.Since(t0)) + return + case <-time.After(time.Minute): + t.Logf("dumping info after %v elapsed", time.Since(t0)) + // Service itself. + svc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} + err := get(watcherCtx, kubeClient, svc) + svcYaml, _ := yaml.Marshal(svc) + t.Logf("Service: %s, error: %v\n%s", svc.Name, err, string(svcYaml)) + + // Pods in tailscale namespace. + var pods corev1.PodList + if err := kubeClient.List(watcherCtx, &pods, client.InNamespace("tailscale")); err != nil { + t.Logf("error listing Pods in tailscale namespace: %v", err) + } else { + t.Logf("%d Pods", len(pods.Items)) + for _, pod := range pods.Items { + podYaml, _ := yaml.Marshal(pod) + t.Logf("Pod: %s\n%s", pod.Name, string(podYaml)) + logs := clientGoKubeClient.CoreV1().Pods("tailscale").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(watcherCtx) + logData, err := logs.Raw() + if err != nil { + t.Logf("error reading logs for Pod %s: %v", pod.Name, err) + continue + } + t.Logf("Logs for Pod %s:\n%s", pod.Name, string(logData)) + } + } + + // Tailscale status on the tailnet. + lc, err := tnClient.LocalClient() + if err != nil { + t.Logf("error getting tailnet local client: %v", err) + } else { + status, err := lc.Status(watcherCtx) + statusJSON, _ := json.MarshalIndent(status, "", " ") + t.Logf("Tailnet status: %s, error: %v", string(statusJSON), err) + } + } + } + }() + // TODO: instead of timing out only when test times out, cancel context after 60s or so. if err := wait.PollUntilContextCancel(t.Context(), time.Millisecond*100, true, func(ctx context.Context) (done bool, err error) { + if time.Since(t0) > time.Minute { + t.Logf("%v elapsed waiting for Service default/test-ingress to become Ready", time.Since(t0)) + } maybeReadySvc := &corev1.Service{ObjectMeta: objectMeta("default", "test-ingress")} if err := get(ctx, kubeClient, maybeReadySvc); err != nil { return false, err @@ -99,6 +165,7 @@ func TestIngress(t *testing.T) { }); err != nil { t.Fatalf("error waiting for the Service to become Ready: %v", err) } + cancelWatcher() var resp *http.Response if err := tstest.WaitFor(time.Minute, func() error { diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index 845a591453b64..c4fd45d3e4125 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -70,6 +70,7 @@ const ( var ( tsClient *tailscale.Client // For API calls to control. tnClient *tsnet.Server // For testing real tailnet traffic. + restCfg *rest.Config // For constructing a client-go client if necessary. kubeClient client.WithWatch // For k8s API calls. //go:embed certs/pebble.minica.crt @@ -141,7 +142,7 @@ func runTests(m *testing.M) (int, error) { } // Cluster client setup. - restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + restCfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return 0, fmt.Errorf("error loading kubeconfig: %w", err) } From 8890c3c413d6422c7810719efe4ff3e8c994afa9 Mon Sep 17 00:00:00 2001 From: Tom Meadows Date: Fri, 20 Feb 2026 15:52:34 -0800 Subject: [PATCH 1006/1093] cmd/containerboot,kube: enable autoadvertisement of Tailscale services on containerboot (#18527) * cmd/containerboot,kube/services: support the ability to automatically advertise services on startup Updates #17769 Signed-off-by: chaosinthecrd * cmd/containerboot: don't assume we want to use kube state store if in kubernetes Fixes #8188 Signed-off-by: chaosinthecrd --------- Signed-off-by: chaosinthecrd --- cmd/containerboot/main.go | 27 +++++- cmd/containerboot/main_test.go | 70 ++++++++++++--- cmd/containerboot/serve.go | 103 +++++++++++++++------- cmd/containerboot/serve_test.go | 125 ++++++++++++++++++++++++--- cmd/containerboot/settings.go | 7 +- cmd/containerboot/tailscaled.go | 2 +- cmd/k8s-operator/proxygroup_specs.go | 4 + cmd/k8s-operator/sts.go | 4 + cmd/k8s-operator/testutils_test.go | 2 + kube/localclient/fake-client.go | 23 +++++ kube/localclient/local-client.go | 10 +++ kube/services/services.go | 37 ++++++++ 12 files changed, 349 insertions(+), 65 deletions(-) diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 9d8d3f02328e8..6b192b41605f1 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -101,6 +101,10 @@ // cluster using the same hostname (in this case, the MagicDNS name of the ingress proxy) // as a non-cluster workload on tailnet. // This is only meant to be configured by the Kubernetes operator. +// - TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT: If set to true and if this +// containerboot instance is not running in Kubernetes, autoadvertise any services +// defined in the devices serve config, and unadvertise on shutdown. Defaults +// to `true`, but can be disabled to allow user specific advertisement configuration. // // When running on Kubernetes, containerboot defaults to storing state in the // "tailscale" kube secret. To store state on local disk instead, set @@ -137,6 +141,7 @@ import ( kubeutils "tailscale.com/k8s-operator" healthz "tailscale.com/kube/health" "tailscale.com/kube/kubetypes" + klc "tailscale.com/kube/localclient" "tailscale.com/kube/metrics" "tailscale.com/kube/services" "tailscale.com/tailcfg" @@ -155,6 +160,10 @@ func newNetfilterRunner(logf logger.Logf) (linuxfw.NetfilterRunner, error) { return linuxfw.New(logf, "") } +func getAutoAdvertiseBool() bool { + return defaultBool("TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", true) +} + func main() { if err := run(); err != nil && !errors.Is(err, context.Canceled) { log.Fatal(err) @@ -199,7 +208,7 @@ func run() error { defer cancel() var kc *kubeClient - if cfg.InKubernetes { + if cfg.KubeSecret != "" { kc, err = newKubeClient(cfg.Root, cfg.KubeSecret) if err != nil { return fmt.Errorf("error initializing kube client: %w", err) @@ -229,6 +238,7 @@ func run() error { ctx, cancel := context.WithTimeout(context.Background(), 25*time.Second) defer cancel() + // we are shutting down, we always want to unadvertise here if err := services.EnsureServicesNotAdvertised(ctx, client, log.Printf); err != nil { log.Printf("Error ensuring services are not advertised: %v", err) } @@ -652,9 +662,22 @@ runLoop: healthCheck.Update(len(addrs) != 0) } + var prevServeConfig *ipn.ServeConfig + if getAutoAdvertiseBool() { + prevServeConfig, err = client.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("autoadvertisement: failed to get serve config: %w", err) + } + + err = refreshAdvertiseServices(ctx, prevServeConfig, klc.New(client)) + if err != nil { + return fmt.Errorf("autoadvertisement: failed to refresh advertise services: %w", err) + } + } + if cfg.ServeConfigPath != "" { triggerWatchServeConfigChanges.Do(func() { - go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg) + go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg, prevServeConfig) }) } diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 6eeb59c9b2e7e..1970fb4bfa449 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -1009,6 +1009,25 @@ func TestContainerBoot(t *testing.T) { }, } }, + "serve_config_with_service_auto_advertisement": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_SERVE_CONFIG": filepath.Join(env.d, "etc/tailscaled/serve-config-with-services.json"), + "TS_AUTHKEY": "tskey-key", + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking", + "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", + }, + }, + { + Notify: runningNotify, + }, + }, + } + }, "kube_shutdown_during_state_write": func(env *testEnv) testCase { return testCase{ Env: map[string]string{ @@ -1159,7 +1178,7 @@ func TestContainerBoot(t *testing.T) { return nil }) if err != nil { - t.Fatalf("phase %d: %v", i, err) + t.Fatalf("test: %q phase %d: %v", name, i, err) } err = tstest.WaitFor(2*time.Second, func() error { for path, want := range p.WantFiles { @@ -1340,10 +1359,16 @@ func (lc *localAPI) Notify(n *ipn.Notify) { func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case "/localapi/v0/serve-config": - if r.Method != "POST" { + switch r.Method { + case "GET": + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&ipn.ServeConfig{}) + return + case "POST": + return + default: panic(fmt.Sprintf("unsupported method %q", r.Method)) } - return case "/localapi/v0/watch-ipn-bus": if r.Method != "GET" { panic(fmt.Sprintf("unsupported method %q", r.Method)) @@ -1355,10 +1380,19 @@ func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Write([]byte("fake metrics")) return case "/localapi/v0/prefs": - if r.Method != "GET" { + switch r.Method { + case "GET": + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&ipn.Prefs{}) + return + case "PATCH": + // EditPrefs - just return empty prefs + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(&ipn.Prefs{}) + return + default: panic(fmt.Sprintf("unsupported method %q", r.Method)) } - return default: panic(fmt.Sprintf("unsupported path %q", r.URL.Path)) } @@ -1635,6 +1669,13 @@ func newTestEnv(t *testing.T) testEnv { tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} + serveConfWithServices := ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:test-service-1": {}, + "svc:test-service-2": {}, + }, + } egressCfg := egressSvcConfig("foo", "foo.tailnetxyz.ts.net") dirs := []string{ @@ -1652,15 +1693,16 @@ func newTestEnv(t *testing.T) testEnv { } } files := map[string][]byte{ - "usr/bin/tailscaled": fakeTailscaled, - "usr/bin/tailscale": fakeTailscale, - "usr/bin/iptables": fakeTailscale, - "usr/bin/ip6tables": fakeTailscale, - "dev/net/tun": []byte(""), - "proc/sys/net/ipv4/ip_forward": []byte("0"), - "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), - "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), - "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), + "usr/bin/tailscaled": fakeTailscaled, + "usr/bin/tailscale": fakeTailscale, + "usr/bin/iptables": fakeTailscale, + "usr/bin/ip6tables": fakeTailscale, + "dev/net/tun": []byte(""), + "proc/sys/net/ipv4/ip_forward": []byte("0"), + "proc/sys/net/ipv6/conf/all/forwarding": []byte("0"), + "etc/tailscaled/cap-95.hujson": mustJSON(t, tailscaledConf), + "etc/tailscaled/serve-config.json": mustJSON(t, serveConf), + "etc/tailscaled/serve-config-with-services.json": mustJSON(t, serveConfWithServices), filepath.Join("etc/tailscaled/", egressservices.KeyEgressServices): mustJSON(t, egressCfg), filepath.Join("etc/tailscaled/", egressservices.KeyHEPPings): []byte("4"), } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index bc154c7e9f258..f64d2d24f681f 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -9,6 +9,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "log" "os" "path/filepath" @@ -22,6 +23,7 @@ import ( "tailscale.com/kube/certs" "tailscale.com/kube/kubetypes" klc "tailscale.com/kube/localclient" + "tailscale.com/kube/services" "tailscale.com/types/netmap" ) @@ -29,8 +31,9 @@ import ( // the serve config from it, replacing ${TS_CERT_DOMAIN} with certDomain, and // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // is written to when the certDomain changes, causing the serve config to be -// re-read and applied. -func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings) { +// re-read and applied. prevServeConfig is the serve config that was fetched +// during startup. This will be refreshed by the goroutine when serve config changes. +func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings, prevServeConfig *ipn.ServeConfig) { if certDomainAtomic == nil { panic("certDomainAtomic must not be nil") } @@ -53,11 +56,18 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom } var certDomain string - var prevServeConfig *ipn.ServeConfig var cm *certs.CertManager if cfg.CertShareMode == "rw" { cm = certs.NewCertManager(klc.New(lc), log.Printf) } + + var err error + if prevServeConfig == nil { + prevServeConfig, err = lc.GetServeConfig(ctx) + if err != nil { + log.Fatalf("serve proxy: failed to get serve config: %v", err) + } + } for { select { case <-ctx.Done(): @@ -70,35 +80,68 @@ func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDom // k8s handles these mounts. So just re-read the file and apply it // if it's changed. } - sc, err := readServeConfig(cfg.ServeConfigPath, certDomain) - if err != nil { - log.Fatalf("serve proxy: failed to read serve config: %v", err) - } - if sc == nil { - log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath) - continue - } - if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { - continue - } - if err := updateServeConfig(ctx, sc, certDomain, lc); err != nil { - log.Fatalf("serve proxy: error updating serve config: %v", err) - } - if kc != nil && kc.canPatch { - if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { - log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) + + var sc *ipn.ServeConfig + if cfg.ServeConfigPath != "" { + sc, err := readServeConfig(cfg.ServeConfigPath, certDomain) + if err != nil { + log.Fatalf("serve proxy: failed to read serve config: %v", err) } + if sc == nil { + log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath) + continue + } + if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { + continue + } + if err := updateServeConfig(ctx, sc, certDomain, klc.New(lc)); err != nil { + log.Fatalf("serve proxy: error updating serve config: %v", err) + } + if kc != nil && kc.canPatch { + if err := kc.storeHTTPSEndpoint(ctx, certDomain); err != nil { + log.Fatalf("serve proxy: error storing HTTPS endpoint: %v", err) + } + } + prevServeConfig = sc + if cfg.CertShareMode != "rw" { + continue + } + if err := cm.EnsureCertLoops(ctx, sc); err != nil { + log.Fatalf("serve proxy: error ensuring cert loops: %v", err) + } + } else { + log.Printf("serve config path not provided.") + sc = prevServeConfig } - prevServeConfig = sc - if cfg.CertShareMode != "rw" { - continue - } - if err := cm.EnsureCertLoops(ctx, sc); err != nil { - log.Fatalf("serve proxy: error ensuring cert loops: %v", err) + + // if we are running in kubernetes, we want to leave advertisement to the operator + // to do (by updating the serve config) + if getAutoAdvertiseBool() { + if err := refreshAdvertiseServices(ctx, sc, klc.New(lc)); err != nil { + log.Fatalf("error refreshing advertised services: %v", err) + } } } } +func refreshAdvertiseServices(ctx context.Context, sc *ipn.ServeConfig, lc klc.LocalClient) error { + if sc == nil || len(sc.Services) == 0 { + return nil + } + + var svcs []string + for svc := range sc.Services { + svcs = append(svcs, svc.String()) + } + + err := services.EnsureServicesAdvertised(ctx, svcs, lc, log.Printf) + if err != nil { + return fmt.Errorf("failed to ensure services advertised: %w", err) + } + + return nil +} + func certDomainFromNetmap(nm *netmap.NetworkMap) string { if len(nm.DNS.CertDomains) == 0 { return "" @@ -106,13 +149,7 @@ func certDomainFromNetmap(nm *netmap.NetworkMap) string { return nm.DNS.CertDomains[0] } -// localClient is a subset of [local.Client] that can be mocked for testing. -type localClient interface { - SetServeConfig(context.Context, *ipn.ServeConfig) error - CertPair(context.Context, string) ([]byte, []byte, error) -} - -func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc localClient) error { +func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc klc.LocalClient) error { if !isValidHTTPSConfig(certDomain, sc) { return nil } diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go index 0683346f7159a..5da5ef5f737c3 100644 --- a/cmd/containerboot/serve_test.go +++ b/cmd/containerboot/serve_test.go @@ -12,9 +12,10 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/kube/kubetypes" + "tailscale.com/kube/localclient" + "tailscale.com/tailcfg" ) func TestUpdateServeConfig(t *testing.T) { @@ -65,13 +66,13 @@ func TestUpdateServeConfig(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - fakeLC := &fakeLocalClient{} + fakeLC := &localclient.FakeLocalClient{} err := updateServeConfig(context.Background(), tt.sc, tt.certDomain, fakeLC) if err != nil { t.Errorf("updateServeConfig() error = %v", err) } - if fakeLC.setServeCalled != tt.wantCall { - t.Errorf("SetServeConfig() called = %v, want %v", fakeLC.setServeCalled, tt.wantCall) + if fakeLC.SetServeCalled != tt.wantCall { + t.Errorf("SetServeConfig() called = %v, want %v", fakeLC.SetServeCalled, tt.wantCall) } }) } @@ -196,18 +197,114 @@ func TestReadServeConfig(t *testing.T) { } } -type fakeLocalClient struct { - *local.Client - setServeCalled bool -} +func TestRefreshAdvertiseServices(t *testing.T) { + tests := []struct { + name string + sc *ipn.ServeConfig + wantServices []string + wantEditPrefsCalled bool + wantErr bool + }{ + { + name: "nil_serve_config", + sc: nil, + wantEditPrefsCalled: false, + }, + { + name: "empty_serve_config", + sc: &ipn.ServeConfig{}, + wantEditPrefsCalled: false, + }, + { + name: "no_services_defined", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + }, + wantEditPrefsCalled: false, + }, + { + name: "single_service", + sc: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-service": {}, + }, + }, + wantServices: []string{"svc:my-service"}, + wantEditPrefsCalled: true, + }, + { + name: "multiple_services", + sc: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:service-a": {}, + "svc:service-b": {}, + "svc:service-c": {}, + }, + }, + wantServices: []string{"svc:service-a", "svc:service-b", "svc:service-c"}, + wantEditPrefsCalled: true, + }, + { + name: "services_with_tcp_and_web", + sc: &ipn.ServeConfig{ + TCP: map[uint16]*ipn.TCPPortHandler{ + 80: {HTTP: true}, + }, + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "example.com:443": {}, + }, + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:frontend": {}, + "svc:backend": {}, + }, + }, + wantServices: []string{"svc:frontend", "svc:backend"}, + wantEditPrefsCalled: true, + }, + } -func (m *fakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConfig) error { - m.setServeCalled = true - return nil -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeLC := &localclient.FakeLocalClient{} + err := refreshAdvertiseServices(context.Background(), tt.sc, fakeLC) -func (m *fakeLocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return nil, nil, nil + if (err != nil) != tt.wantErr { + t.Errorf("refreshAdvertiseServices() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wantEditPrefsCalled != (len(fakeLC.EditPrefsCalls) > 0) { + t.Errorf("EditPrefs called = %v, want %v", len(fakeLC.EditPrefsCalls) > 0, tt.wantEditPrefsCalled) + } + + if tt.wantEditPrefsCalled { + if len(fakeLC.EditPrefsCalls) != 1 { + t.Fatalf("expected 1 EditPrefs call, got %d", len(fakeLC.EditPrefsCalls)) + } + + mp := fakeLC.EditPrefsCalls[0] + if !mp.AdvertiseServicesSet { + t.Error("AdvertiseServicesSet should be true") + } + + if len(mp.AdvertiseServices) != len(tt.wantServices) { + t.Errorf("AdvertiseServices length = %d, want %d", len(mp.Prefs.AdvertiseServices), len(tt.wantServices)) + } + + advertised := make(map[string]bool) + for _, svc := range mp.AdvertiseServices { + advertised[svc] = true + } + + for _, want := range tt.wantServices { + if !advertised[want] { + t.Errorf("expected service %q to be advertised, but it wasn't", want) + } + } + } + }) + } } func TestHasHTTPSEndpoint(t *testing.T) { diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 181a94dd71114..e6147717bb39a 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -107,7 +107,12 @@ func configFromEnv() (*settings, error) { UserspaceMode: defaultBool("TS_USERSPACE", true), StateDir: defaultEnv("TS_STATE_DIR", ""), AcceptDNS: defaultEnvBoolPointer("TS_ACCEPT_DNS"), - KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"), + KubeSecret: func() string { + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return defaultEnv("TS_KUBE_SECRET", "tailscale") + } + return defaultEnv("TS_KUBE_SECRET", "") + }(), SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""), HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""), Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"), diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 9990600c84c65..6f4ed77e76d72 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -69,7 +69,7 @@ func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Pro func tailscaledArgs(cfg *settings) []string { args := []string{"--socket=" + cfg.Socket} switch { - case cfg.InKubernetes && cfg.KubeSecret != "": + case cfg.KubeSecret != "": args = append(args, "--state=kube:"+cfg.KubeSecret) if cfg.StateDir == "" { cfg.StateDir = "/tmp" diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 6bce004eaa88d..05e0ed0b26013 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -173,6 +173,10 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)", }, + { + Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", + Value: "false", + }, { // TODO(tomhjp): This is tsrecorder-specific and does nothing. Delete. Name: "TS_STATE", diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index e81fe2d66f6ed..85aab2e8a0d2a 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -692,6 +692,10 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)", }, + corev1.EnvVar{ + Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", + Value: "false", + }, corev1.EnvVar{ Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)", diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 0e4a3eee40e73..54b7ead55f7ff 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -91,6 +91,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", Value: "false"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, }, @@ -287,6 +288,7 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.name"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "POD_UID", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{APIVersion: "", FieldPath: "metadata.uid"}, ResourceFieldRef: nil, ConfigMapKeyRef: nil, SecretKeyRef: nil}}, {Name: "TS_KUBE_SECRET", Value: "$(POD_NAME)"}, + {Name: "TS_EXPERIMENTAL_SERVICE_AUTO_ADVERTISEMENT", Value: "false"}, {Name: "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR", Value: "/etc/tsconfig/$(POD_NAME)"}, {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, {Name: "TS_SERVE_CONFIG", Value: "/etc/tailscaled/$(POD_NAME)/serve-config"}, diff --git a/kube/localclient/fake-client.go b/kube/localclient/fake-client.go index 1bce4bef00d6f..a244ce31a10c9 100644 --- a/kube/localclient/fake-client.go +++ b/kube/localclient/fake-client.go @@ -12,6 +12,29 @@ import ( type FakeLocalClient struct { FakeIPNBusWatcher + SetServeCalled bool + EditPrefsCalls []*ipn.MaskedPrefs + GetPrefsResult *ipn.Prefs +} + +func (m *FakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConfig) error { + m.SetServeCalled = true + return nil +} + +func (m *FakeLocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { + m.EditPrefsCalls = append(m.EditPrefsCalls, mp) + if m.GetPrefsResult == nil { + return &ipn.Prefs{}, nil + } + return m.GetPrefsResult, nil +} + +func (m *FakeLocalClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) { + if m.GetPrefsResult == nil { + return &ipn.Prefs{}, nil + } + return m.GetPrefsResult, nil } func (f *FakeLocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { diff --git a/kube/localclient/local-client.go b/kube/localclient/local-client.go index 8cc0d41ffe473..b8d40f4067c0e 100644 --- a/kube/localclient/local-client.go +++ b/kube/localclient/local-client.go @@ -17,6 +17,8 @@ import ( // for easier testing. type LocalClient interface { WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) + SetServeConfig(context.Context, *ipn.ServeConfig) error + EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) CertIssuer } @@ -40,6 +42,14 @@ type localClient struct { lc *local.Client } +func (lc *localClient) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { + return lc.lc.SetServeConfig(ctx, config) +} + +func (lc *localClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) { + return lc.lc.EditPrefs(ctx, mp) +} + func (lc *localClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (IPNBusWatcher, error) { return lc.lc.WatchIPNBus(ctx, mask) } diff --git a/kube/services/services.go b/kube/services/services.go index 36566c2855a9f..0c27f888f5f7d 100644 --- a/kube/services/services.go +++ b/kube/services/services.go @@ -12,9 +12,46 @@ import ( "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/kube/localclient" "tailscale.com/types/logger" ) +// EnsureServicesAdvertised is a function that gets called on containerboot +// startup and ensures that Services get advertised if they exist. +func EnsureServicesAdvertised(ctx context.Context, services []string, lc localclient.LocalClient, logf logger.Logf) error { + if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: services, + }, + }); err != nil { + // EditPrefs only returns an error if it fails _set_ its local prefs. + // If it fails to _persist_ the prefs in state, we don't get an error + // and we continue waiting below, as control will failover as usual. + return fmt.Errorf("error setting prefs AdvertiseServices: %w", err) + } + + // Services use the same (failover XOR regional routing) mechanism that + // HA subnet routers use. Unfortunately we don't yet get a reliable signal + // from control that it's responded to our unadvertisement, so the best we + // can do is wait for 20 seconds, where 15s is the approximate maximum time + // it should take for control to choose a new primary, and 5s is for buffer. + // + // Note: There is no guarantee that clients have been _informed_ of the new + // primary no matter how long we wait. We would need a mechanism to await + // netmap updates for peers to know for sure. + // + // See https://tailscale.com/kb/1115/high-availability for more details. + // TODO(tomhjp): Wait for a netmap update instead of sleeping when control + // supports that. + select { + case <-ctx.Done(): + return nil + case <-time.After(20 * time.Second): + return nil + } +} + // EnsureServicesNotAdvertised is a function that gets called on containerboot // or k8s-proxy termination and ensures that any currently advertised Services // get unadvertised to give clients time to switch to another node before this From c48b7364591d43f5760a9e4cd75828b36dd8262b Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 23 Feb 2026 15:15:24 +0000 Subject: [PATCH 1007/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/apple.md | 4 ++-- licenses/tailscale.md | 6 +++--- licenses/windows.md | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/licenses/apple.md b/licenses/apple.md index 4170a4c8bac49..93afd9385cdbe 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -11,7 +11,7 @@ See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.1/LICENSE)) - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.32.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.19.5/credentials/LICENSE.txt)) @@ -57,7 +57,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.25/LICENSE)) - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) - [github.com/safchain/ethtool](https://pkg.go.dev/github.com/safchain/ethtool) ([Apache-2.0](https://github.com/safchain/ethtool/blob/v0.3.0/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 9ccc37adb22cc..521b6ff9ce887 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -13,7 +13,7 @@ well as an [option for macOS][]. Some packages may only be included on certain architectures or operating systems. - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) - [fyne.io/systray](https://pkg.go.dev/fyne.io/systray) ([Apache-2.0](https://github.com/fyne-io/systray/blob/4856ac3adc3c/LICENSE)) - [github.com/Kodeworks/golang-image-ico](https://pkg.go.dev/github.com/Kodeworks/golang-image-ico) ([BSD-3-Clause](https://github.com/Kodeworks/golang-image-ico/blob/73f0f4cfade9/LICENSE)) - [github.com/akutz/memconn](https://pkg.go.dev/github.com/akutz/memconn) ([Apache-2.0](https://github.com/akutz/memconn/blob/v0.1.0/LICENSE)) @@ -68,7 +68,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.25/LICENSE)) - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/pkg/sftp](https://pkg.go.dev/github.com/pkg/sftp) ([BSD-2-Clause](https://github.com/pkg/sftp/blob/v1.13.6/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) @@ -90,7 +90,7 @@ Some packages may only be included on certain architectures or operating systems - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/b7579e27:LICENSE)) - [golang.org/x/image](https://pkg.go.dev/golang.org/x/image) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.48.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.32.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.33.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.19.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.40.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.38.0:LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 03d0ce40ef717..29581566c68ba 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -9,7 +9,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.1/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) From 0ea55d37e3d3c42ef4631e8f9237fae80f089fdc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:00:20 +0000 Subject: [PATCH 1008/1093] .github: bump peter-evans/create-pull-request from 8.0.0 to 8.1.0 Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 8.0.0 to 8.1.0. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/98357b18bf14b5342f975ff684046ec3b2a07725...c0f553fe549906ede9cf27b5156039d195d2ece0) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-version: 8.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 0c40758543458..22ee80397fb3c 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -35,7 +35,7 @@ jobs: private-key: ${{ secrets.CODE_UPDATER_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 #v8.0.0 + uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 #v8.1.0 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 2f4f676c5d354..24e6535dcc385 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -32,7 +32,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 #v8.0.0 + uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 #v8.1.0 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 98c77ac2f5b16f7f3a3c8b5396154ab2980909d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:00:30 +0000 Subject: [PATCH 1009/1093] .github: bump actions/cache from 4.2.4 to 5.0.3 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.4 to 5.0.3. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/0400d5f644dc74513175e3cd8d07132dd4860809...cdf6c1fa76f9f475f3d7449005a359c84ca0f306) --- updated-dependencies: - dependency-name: actions/cache dependency-version: 5.0.3 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: Mario Minardi --- .github/workflows/test.yml | 58 +++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 57a638d2977da..5610bda10f9b8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -58,7 +58,7 @@ jobs: # See if the cache entry already exists to avoid downloading it # and doing the cache write again. - id: check-cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -70,7 +70,7 @@ jobs: run: go mod download - name: Cache Go modules if: steps.check-cache.outputs.cache-hit != 'true' - uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache # relative to workspace; see env note at top of file key: ${{ steps.hash.outputs.key }} @@ -93,7 +93,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -131,14 +131,14 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache id: restore-cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only restoring the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -209,7 +209,7 @@ jobs: - name: Save Cache # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' - uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only saving the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -251,7 +251,7 @@ jobs: cache: false - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -309,14 +309,14 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache id: restore-cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: ~/Library/Caches/go-build key: ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} @@ -352,7 +352,7 @@ jobs: - name: Save Cache # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' - uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: ~/Library/Caches/go-build key: ${{ runner.os }}-go-test-${{ hashFiles('**/go.sum') }}-${{ github.job }}-${{ github.run_id }} @@ -369,7 +369,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -392,7 +392,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -448,14 +448,14 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache id: restore-cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only restoring the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -490,7 +490,7 @@ jobs: - name: Save Cache # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' - uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only saving the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -509,7 +509,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -547,14 +547,14 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache id: restore-cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only restoring the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -582,7 +582,7 @@ jobs: - name: Save Cache # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' - uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only saving the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -607,7 +607,7 @@ jobs: # some Android breakages early. # TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482 - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -628,14 +628,14 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - name: Restore Cache id: restore-cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only restoring the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -668,7 +668,7 @@ jobs: - name: Save Cache # Save cache even on failure, but only on cache miss and main branch to avoid thrashing. if: always() && steps.restore-cache.outputs.cache-hit != 'true' && github.ref == 'refs/heads/main' - uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/save@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: # Note: this is only saving the build cache. Mod cache is shared amongst # all jobs in the workflow. @@ -686,7 +686,7 @@ jobs: - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -773,7 +773,7 @@ jobs: - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -791,7 +791,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -815,7 +815,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -837,7 +837,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} @@ -891,7 +891,7 @@ jobs: with: path: src - name: Restore Go module cache - uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 + uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} From 3d2bb5baa82ed0bdbcc29e4283030653f47a88e0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 11:59:46 +0000 Subject: [PATCH 1010/1093] .github: bump actions/download-artifact from 6.0.0 to 7.0.0 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 6.0.0 to 7.0.0. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/018cc2cf5baa6db3ef3c5f8a56943fffe632ef53...37930b1c2abaa49bbe596cd826c3c89aef350131) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: 7.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cigocacher.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml index c4dd0c3c509a5..cc2fb87faf3b0 100644 --- a/.github/workflows/cigocacher.yml +++ b/.github/workflows/cigocacher.yml @@ -36,7 +36,7 @@ jobs: contents: write steps: - name: Download all artifacts - uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 with: pattern: 'cigocacher-*' merge-multiple: true From 8be5affa6da48f56435abc3f55f565d362282d5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:00:13 +0000 Subject: [PATCH 1011/1093] .github: bump actions/checkout from 6.0.1 to 6.0.2 Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.1 to 6.0.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/8e8c483db84b4bee98b60c0593521ed34d9990e8...de0fac2e4500dabe0009e67214ff5f5447ce83dd) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 6.0.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/checklocks.yml | 2 +- .github/workflows/cigocacher.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker-base.yml | 2 +- .github/workflows/docker-file-build.yml | 2 +- .github/workflows/flakehub-publish-tagged.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/govulncheck.yml | 2 +- .github/workflows/installer.yml | 2 +- .github/workflows/kubemanifests.yaml | 2 +- .github/workflows/natlab-integrationtest.yml | 2 +- .github/workflows/pin-github-actions.yml | 2 +- .../workflows/request-dataplane-review.yml | 2 +- .github/workflows/ssh-integrationtest.yml | 2 +- .github/workflows/test.yml | 38 +++++++++---------- .github/workflows/update-flake.yml | 2 +- .../workflows/update-webclient-prebuilt.yml | 2 +- .github/workflows/vet.yml | 2 +- .github/workflows/webclient.yml | 2 +- 19 files changed, 37 insertions(+), 37 deletions(-) diff --git a/.github/workflows/checklocks.yml b/.github/workflows/checklocks.yml index ee950b4fc9212..5768cf05af634 100644 --- a/.github/workflows/checklocks.yml +++ b/.github/workflows/checklocks.yml @@ -18,7 +18,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Build checklocks run: ./tool/go build -o /tmp/checklocks gvisor.dev/gvisor/tools/checklocks/cmd/checklocks diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml index cc2fb87faf3b0..f19e004d3e726 100644 --- a/.github/workflows/cigocacher.yml +++ b/.github/workflows/cigocacher.yml @@ -17,7 +17,7 @@ jobs: GOARCH: "${{ matrix.GOARCH }}" CGO_ENABLED: "0" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Build run: | OUT="cigocacher$(./tool/go env GOEXE)" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 39133dc40c3dd..49657de707f17 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -45,7 +45,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 # Install a more recent Go that understands modern go.mod content. - name: Install Go diff --git a/.github/workflows/docker-base.yml b/.github/workflows/docker-base.yml index a47669f6ade8a..a3eac2c24e691 100644 --- a/.github/workflows/docker-base.yml +++ b/.github/workflows/docker-base.yml @@ -9,7 +9,7 @@ jobs: build-and-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: "build and test" run: | set -e diff --git a/.github/workflows/docker-file-build.yml b/.github/workflows/docker-file-build.yml index 9a56fd05758a9..7ee2468682695 100644 --- a/.github/workflows/docker-file-build.yml +++ b/.github/workflows/docker-file-build.yml @@ -8,6 +8,6 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: "Build Docker image" run: docker build . diff --git a/.github/workflows/flakehub-publish-tagged.yml b/.github/workflows/flakehub-publish-tagged.yml index 798e1708a1c2a..c781e30e5154f 100644 --- a/.github/workflows/flakehub-publish-tagged.yml +++ b/.github/workflows/flakehub-publish-tagged.yml @@ -17,7 +17,7 @@ jobs: id-token: "write" contents: "read" steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - uses: DeterminateSystems/nix-installer-action@c5a866b6ab867e88becbed4467b93592bce69f8a # v21 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 22d9d3c467ad9..dbabb361e14fa 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -27,7 +27,7 @@ jobs: name: lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 with: diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index c99cb11d3eff7..2b46aa9b06e57 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code into the Go module directory - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install govulncheck run: ./tool/go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/.github/workflows/installer.yml b/.github/workflows/installer.yml index d7db30782470b..6fc8913c4e19c 100644 --- a/.github/workflows/installer.yml +++ b/.github/workflows/installer.yml @@ -99,7 +99,7 @@ jobs: contains(matrix.image, 'parrotsec') || contains(matrix.image, 'kalilinux') - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: run installer run: scripts/installer.sh env: diff --git a/.github/workflows/kubemanifests.yaml b/.github/workflows/kubemanifests.yaml index 6812b69d6e702..40734a015dad3 100644 --- a/.github/workflows/kubemanifests.yaml +++ b/.github/workflows/kubemanifests.yaml @@ -17,7 +17,7 @@ jobs: runs-on: [ ubuntu-latest ] steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Build and lint Helm chart run: | eval `./tool/go run ./cmd/mkversion` diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index e10d879c3daa5..c3821db17f22f 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install qemu run: | sudo rm -f /var/lib/man-db/auto-update diff --git a/.github/workflows/pin-github-actions.yml b/.github/workflows/pin-github-actions.yml index 7c1816d134cd6..836ae46dbfa89 100644 --- a/.github/workflows/pin-github-actions.yml +++ b/.github/workflows/pin-github-actions.yml @@ -22,7 +22,7 @@ jobs: name: pin-github-actions runs-on: ubuntu-latest steps: - - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: pin run: make pin-github-actions - name: check for changed workflow files diff --git a/.github/workflows/request-dataplane-review.yml b/.github/workflows/request-dataplane-review.yml index 2e30ba06d4629..2b66fc7899428 100644 --- a/.github/workflows/request-dataplane-review.yml +++ b/.github/workflows/request-dataplane-review.yml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Get access token uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2.2.1 id: generate-token diff --git a/.github/workflows/ssh-integrationtest.yml b/.github/workflows/ssh-integrationtest.yml index 342b8e9362c30..afe2dd2f74683 100644 --- a/.github/workflows/ssh-integrationtest.yml +++ b/.github/workflows/ssh-integrationtest.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run SSH integration tests run: | make sshintegrationtest \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5610bda10f9b8..3cd71097f66ba 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -49,7 +49,7 @@ jobs: cache-key: ${{ steps.hash.outputs.key }} steps: - name: Checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Compute cache key from go.{mod,sum} @@ -89,7 +89,7 @@ jobs: - shard: '4/4' steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -127,7 +127,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -240,7 +240,7 @@ jobs: shard: "2/2" steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: ${{ github.workspace }}/src @@ -293,7 +293,7 @@ jobs: name: Windows (win-tool-go) steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: test-tool-go @@ -305,7 +305,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -365,7 +365,7 @@ jobs: options: --privileged steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -388,7 +388,7 @@ jobs: if: github.repository == 'tailscale/tailscale' steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -444,7 +444,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -505,7 +505,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -543,7 +543,7 @@ jobs: runs-on: ubuntu-24.04 steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -599,7 +599,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src # Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed @@ -624,7 +624,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -682,7 +682,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set GOMODCACHE env run: echo "GOMODCACHE=$HOME/.cache/go-mod" >> $GITHUB_ENV - name: Restore Go module cache @@ -767,7 +767,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Set GOMODCACHE env @@ -787,7 +787,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -811,7 +811,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -833,7 +833,7 @@ jobs: needs: gomod-cache steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache @@ -887,7 +887,7 @@ jobs: steps: - name: checkout - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src - name: Restore Go module cache diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 22ee80397fb3c..4c0da7831b5ba 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run update-flakes run: ./update-flake.sh diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 24e6535dcc385..a3d78e1a5b4a8 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Run go get run: | diff --git a/.github/workflows/vet.yml b/.github/workflows/vet.yml index c85e3ec86a67f..574852e62beee 100644 --- a/.github/workflows/vet.yml +++ b/.github/workflows/vet.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: path: src diff --git a/.github/workflows/webclient.yml b/.github/workflows/webclient.yml index 4fc19901d0ef6..1a65eacf56414 100644 --- a/.github/workflows/webclient.yml +++ b/.github/workflows/webclient.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Install deps run: ./tool/yarn --cwd client/web - name: Run lint From 980e1c9d5c1db2c5172a1f014fa265b164630e1a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:00:34 +0000 Subject: [PATCH 1012/1093] .github: bump actions/upload-artifact from 4.6.2 to 6.0.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.2 to 6.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4.6.2...b7c566a772e6b6bfb58ed0dc250532a479d7789f) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cigocacher.yml | 2 +- .github/workflows/test.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml index f19e004d3e726..15aec8af90904 100644 --- a/.github/workflows/cigocacher.yml +++ b/.github/workflows/cigocacher.yml @@ -24,7 +24,7 @@ jobs: ./tool/go build -o "${OUT}" ./cmd/cigocacher/ tar -zcf cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz "${OUT}" - - uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 with: name: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }} path: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3cd71097f66ba..862420f70f98d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -756,7 +756,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From a58a8fc1e814364dfbd8443102bdf758de2ae68c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 23 Feb 2026 09:28:46 -0800 Subject: [PATCH 1013/1093] .: permit running binary built with TS_GO_NEXT=1 The old check was too aggressive and required TS_GO_NEXT=1 at runtime as well, which is too strict and onerous. This is a sanity check only (and an outdated one, at that); it's okay for it to be slightly loose and permit two possible values. If either is working, we're already way past the old bug that this was introduced to catch. Updates tailscale/corp#36382 Change-Id: Ib9a62e10382cd889ba590c3539e6b8535c6b19fe Signed-off-by: Brad Fitzpatrick --- assert_ts_toolchain_match.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/assert_ts_toolchain_match.go b/assert_ts_toolchain_match.go index 901dbb8ec83a1..4df0eeb1570d3 100644 --- a/assert_ts_toolchain_match.go +++ b/assert_ts_toolchain_match.go @@ -17,10 +17,10 @@ func init() { panic("binary built with tailscale_go build tag but failed to read build info or find tailscale.toolchain.rev in build info") } want := strings.TrimSpace(GoToolchainRev) - if os.Getenv("TS_GO_NEXT") == "1" { - want = strings.TrimSpace(GoToolchainNextRev) - } - if tsRev != want { + // Also permit the "next" toolchain rev, which is used in the main branch and will eventually become the new "current" rev. + // This allows building with TS_GO_NEXT=1 and then running the resulting binary without TS_GO_NEXT=1. + wantAlt := strings.TrimSpace(GoToolchainNextRev) + if tsRev != want && tsRev != wantAlt { if os.Getenv("TS_PERMIT_TOOLCHAIN_MISMATCH") == "1" { fmt.Fprintf(os.Stderr, "tailscale.toolchain.rev = %q, want %q; but ignoring due to TS_PERMIT_TOOLCHAIN_MISMATCH=1\n", tsRev, want) return From dc80fd6324eb1e2e183408451761ff38a5eeafd2 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Sun, 22 Feb 2026 06:29:55 +0100 Subject: [PATCH 1014/1093] flake: fix default devShell The devshell had the wrong name expected by the flake compat package causing weird behaviour if you loaded it initiating the wrong go compiler. Updates #16637 Signed-off-by: Kristoffer Dalby --- flake.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flake.nix b/flake.nix index e15eeca6a664f..4e315a5cab7e6 100644 --- a/flake.nix +++ b/flake.nix @@ -132,7 +132,7 @@ }); devShells = eachSystem (pkgs: { - devShell = pkgs.mkShell { + default = pkgs.mkShell { packages = with pkgs; [ curl git From 811fe7d18ed832a1b48880ab8d893c7909a900e1 Mon Sep 17 00:00:00 2001 From: Michael Ben-Ami Date: Fri, 20 Feb 2026 17:36:40 +0000 Subject: [PATCH 1015/1093] ipnext,ipnlocal,wgengine/filter: add extension hooks for custom filter matchers Add PacketMatch hooks to the packet filter, allowing extensions to customize filtering decisions: - IngressAllowHooks: checked in RunIn after pre() but before the standard runIn4/runIn6 match rules. Hooks can accept packets to destinations outside the local IP set. First match wins; the returned why string is used for logging. - LinkLocalAllowHooks: checked inside pre() for both ingress and egress, providing exceptions to the default policy of dropping link-local unicast packets. First match wins. The GCP DNS address (169.254.169.254) is always allowed regardless of hooks. PacketMatch returns (match bool, why string) to provide a log reason consistent with the existing filter functions. Hooks are registered via the new FilterHooks struct in ipnext.Hooks and wired through to filter.Filter in LocalBackend.updateFilterLocked. Fixes tailscale/corp#35989 Fixes tailscale/corp#37207 Co-Authored-By: Claude Opus 4.6 Signed-off-by: Michael Ben-Ami --- ipn/ipnext/ipnext.go | 34 ++++++ ipn/ipnlocal/local.go | 6 +- wgengine/filter/filter.go | 76 +++++++++++-- wgengine/filter/filter_test.go | 198 +++++++++++++++++++++++++++++++-- 4 files changed, 296 insertions(+), 18 deletions(-) diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 275e28c85bddc..6dea49939af91 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -21,6 +21,7 @@ import ( "tailscale.com/tstime" "tailscale.com/types/logger" "tailscale.com/types/mapx" + "tailscale.com/wgengine/filter" ) // Extension augments LocalBackend with additional functionality. @@ -377,6 +378,39 @@ type Hooks struct { // ShouldUploadServices reports whether this node should include services // in Hostinfo from the portlist extension. ShouldUploadServices feature.Hook[func() bool] + + // Filter contains hooks for the packet filter. + // See [filter.Filter] for details on how these hooks are invoked. + Filter FilterHooks +} + +// FilterHooks contains hooks that extensions can use to customize the packet +// filter. Field names match the corresponding fields in filter.Filter. +type FilterHooks struct { + // IngressAllowHooks are hooks that allow extensions to accept inbound + // packets beyond the standard filter rules. Packets that are not dropped + // by the direction-agnostic pre-check, but would be not accepted by the + // main filter rules, including the check for destinations in the node's + // local IP set, will be accepted if they match one of these hooks. + // As of 2026-02-24, the ingress filter does not implement explicit drop + // rules, but if it does, an explicitly dropped packet will be dropped, + // and these hooks will not be evaluated. + // + // Processing of hooks stop after the first one that returns true. + // The returned why string of the first match is used in logging. + // Returning false does not drop the packet. + // See also [filter.Filter.IngressAllowHooks]. + IngressAllowHooks feature.Hooks[filter.PacketMatch] + + // LinkLocalAllowHooks are hooks that provide exceptions to the default + // policy of dropping link-local unicast packets. They run inside the + // direction-agnostic pre-checks for both ingress and egress. + // + // A hook can allow a link-local packet to pass the link-local check, + // but the packet is still subject to all other filter rules, and could be + // dropped elsewhere. Matching link-local packets are not logged. + // See also [filter.Filter.LinkLocalAllowHooks]. + LinkLocalAllowHooks feature.Hooks[filter.PacketMatch] } // NodeBackend is an interface to query the current node and its peers. diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4221b45e5615a..3fccb4399dd13 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -2884,7 +2884,11 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { b.setFilter(filter.NewShieldsUpFilter(localNets, logNets, oldFilter, b.logf)) } else { b.logf("[v1] netmap packet filter: %v filters", len(packetFilter)) - b.setFilter(filter.New(packetFilter, b.srcIPHasCapForFilter, localNets, logNets, oldFilter, b.logf)) + filt := filter.New(packetFilter, b.srcIPHasCapForFilter, localNets, logNets, oldFilter, b.logf) + + filt.IngressAllowHooks = b.extHost.Hooks().Filter.IngressAllowHooks + filt.LinkLocalAllowHooks = b.extHost.Hooks().Filter.LinkLocalAllowHooks + b.setFilter(filt) } // The filter for a jailed node is the exact same as a ShieldsUp filter. oldJailedFilter := b.e.GetJailedFilter() diff --git a/wgengine/filter/filter.go b/wgengine/filter/filter.go index 63a7aee1e461f..b2be836c73395 100644 --- a/wgengine/filter/filter.go +++ b/wgengine/filter/filter.go @@ -32,8 +32,9 @@ import ( type Filter struct { logf logger.Logf // local4 and local6 report whether an IP is "local" to this node, for the - // respective address family. All packets coming in over tailscale must have - // a destination within local, regardless of the policy filter below. + // respective address family. Inbound packets that pass the direction-agnostic + // pre-checks and are not accepted by [Filter.IngressAllowHooks] must have a destination + // within local to be considered by the policy filter. local4 func(netip.Addr) bool local6 func(netip.Addr) bool @@ -66,8 +67,38 @@ type Filter struct { state *filterState shieldsUp bool + + // IngressAllowHooks are hooks that allow extensions to accept inbound + // packets beyond the standard filter rules. Packets that are not dropped + // by the direction-agnostic pre-check, but would be not accepted by the + // main filter rules, including the check for destinations in the node's + // local IP set, will be accepted if they match one of these hooks. + // As of 2026-02-24, the ingress filter does not implement explicit drop + // rules, but if it does, an explicitly dropped packet will be dropped, + // and these hooks will not be evaluated. + // + // Processing of hooks stop after the first one that returns true. + // The returned why string of the first match is used in logging. + // Returning false does not drop the packet. + // See also [filter.Filter.IngressAllowHooks]. + IngressAllowHooks []PacketMatch + + // LinkLocalAllowHooks are hooks that provide exceptions to the default + // policy of dropping link-local unicast packets. They run inside the + // direction-agnostic pre-checks for both ingress and egress. + // + // A hook can allow a link-local packet to pass the link-local check, + // but the packet is still subject to all other filter rules, and could be + // dropped elsewhere. Matching link-local packets are not logged. + // See also [filter.Filter.LinkLocalAllowHooks]. + LinkLocalAllowHooks []PacketMatch } +// PacketMatch is a function that inspects a packet and reports whether it +// matches a custom filter criterion. If match is true, why should be a short +// human-readable reason for the match, used in filter logging (e.g. "corp-dns ok"). +type PacketMatch func(packet.Parsed) (match bool, why string) + // filterState is a state cache of past seen packets. type filterState struct { mu sync.Mutex @@ -426,6 +457,16 @@ func (f *Filter) RunIn(q *packet.Parsed, rf RunFlags) Response { default: r, why = Drop, "not-ip" } + + if r == noVerdict { + for _, pm := range f.IngressAllowHooks { + if match, why := pm(*q); match { + f.logRateLimit(rf, q, dir, Accept, why) + return Accept + } + } + r = Drop + } f.logRateLimit(rf, q, dir, r, why) return r } @@ -439,6 +480,7 @@ func (f *Filter) RunOut(q *packet.Parsed, rf RunFlags) (Response, usermetric.Dro // already logged return r, reason } + r, why := f.runOut(q) f.logRateLimit(rf, q, dir, r, why) return r, "" @@ -455,12 +497,14 @@ func unknownProtoString(proto ipproto.Proto) string { return s } +// runIn4 returns noVerdict for unaccepted packets that may ultimately +// be accepted through [Filter.IngressAllowHooks]. func (f *Filter) runIn4(q *packet.Parsed) (r Response, why string) { // A compromised peer could try to send us packets for // destinations we didn't explicitly advertise. This check is to // prevent that. if !f.local4(q.Dst.Addr()) { - return Drop, "destination not allowed" + return noVerdict, "destination not allowed" } switch q.IPProto { @@ -510,17 +554,19 @@ func (f *Filter) runIn4(q *packet.Parsed) (r Response, why string) { if f.matches4.matchProtoAndIPsOnlyIfAllPorts(q) { return Accept, "other-portless ok" } - return Drop, unknownProtoString(q.IPProto) + return noVerdict, unknownProtoString(q.IPProto) } - return Drop, "no rules matched" + return noVerdict, "no rules matched" } +// runIn6 returns noVerdict for unaccepted packets that may ultimately +// be accepted through [Filter.IngressAllowHooks]. func (f *Filter) runIn6(q *packet.Parsed) (r Response, why string) { // A compromised peer could try to send us packets for // destinations we didn't explicitly advertise. This check is to // prevent that. if !f.local6(q.Dst.Addr()) { - return Drop, "destination not allowed" + return noVerdict, "destination not allowed" } switch q.IPProto { @@ -570,9 +616,9 @@ func (f *Filter) runIn6(q *packet.Parsed) (r Response, why string) { if f.matches6.matchProtoAndIPsOnlyIfAllPorts(q) { return Accept, "other-portless ok" } - return Drop, unknownProtoString(q.IPProto) + return noVerdict, unknownProtoString(q.IPProto) } - return Drop, "no rules matched" + return noVerdict, "no rules matched" } // runIn runs the output-specific part of the filter logic. @@ -609,6 +655,18 @@ func (d direction) String() string { var gcpDNSAddr = netaddr.IPv4(169, 254, 169, 254) +func (f *Filter) isAllowedLinkLocal(q *packet.Parsed) bool { + if q.Dst.Addr() == gcpDNSAddr { + return true + } + for _, pm := range f.LinkLocalAllowHooks { + if match, _ := pm(*q); match { + return true + } + } + return false +} + // pre runs the direction-agnostic filter logic. dir is only used for // logging. func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, usermetric.DropReason) { @@ -630,7 +688,7 @@ func (f *Filter) pre(q *packet.Parsed, rf RunFlags, dir direction) (Response, us f.logRateLimit(rf, q, dir, Drop, "multicast") return Drop, usermetric.ReasonMulticast } - if q.Dst.Addr().IsLinkLocalUnicast() && q.Dst.Addr() != gcpDNSAddr { + if q.Dst.Addr().IsLinkLocalUnicast() && !f.isAllowedLinkLocal(q) { f.logRateLimit(rf, q, dir, Drop, "link-local-unicast") return Drop, usermetric.ReasonLinkLocalUnicast } diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index 4b364d30e85cb..c588a506e0dc9 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -171,12 +171,8 @@ func TestFilter(t *testing.T) { {Drop, parsed(ipproto.TCP, ipWithoutCap.String(), "1.2.3.4", 30000, 22)}, } for i, test := range tests { - aclFunc := filt.runIn4 - if test.p.IPVersion == 6 { - aclFunc = filt.runIn6 - } - if got, why := aclFunc(&test.p); test.want != got { - t.Errorf("#%d runIn got=%v want=%v why=%q packet:%v", i, got, test.want, why, test.p) + if got := filt.RunIn(&test.p, 0); test.want != got { + t.Errorf("#%d RunIn got=%v want=%v packet:%v", i, got, test.want, test.p) continue } if test.p.IPProto == ipproto.TCP { @@ -191,8 +187,8 @@ func TestFilter(t *testing.T) { } // TCP and UDP are treated equivalently in the filter - verify that. test.p.IPProto = ipproto.UDP - if got, why := aclFunc(&test.p); test.want != got { - t.Errorf("#%d runIn (UDP) got=%v want=%v why=%q packet:%v", i, got, test.want, why, test.p) + if got := filt.RunIn(&test.p, 0); test.want != got { + t.Errorf("#%d RunIn (UDP) got=%v want=%v packet:%v", i, got, test.want, test.p) } } // Update UDP state @@ -1071,6 +1067,192 @@ type benchOpt struct { udp, udpOpen bool } +func TestIngressAllowHooks(t *testing.T) { + matchSrc := func(ip string) PacketMatch { + return func(q packet.Parsed) (bool, string) { + return q.Src.Addr() == mustIP(ip), "match-src" + } + } + matchDst := func(ip string) PacketMatch { + return func(q packet.Parsed) (bool, string) { + return q.Dst.Addr() == mustIP(ip), "match-dst" + } + } + noMatch := func(q packet.Parsed) (bool, string) { return false, "" } + + tests := []struct { + name string + p packet.Parsed + hooks []PacketMatch + want Response + }{ + { + name: "no_hooks_denied_src", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + want: Drop, + }, + { + name: "non_matching_hook", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + hooks: []PacketMatch{noMatch}, + want: Drop, + }, + { + name: "matching_hook_denied_src", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + hooks: []PacketMatch{matchSrc("99.99.99.99")}, + want: Accept, + }, + { + name: "non_local_dst_no_hooks", + p: parsed(ipproto.TCP, "8.1.1.1", "16.32.48.64", 0, 443), + want: Drop, + }, + { + name: "non_local_dst_with_hook", + p: parsed(ipproto.TCP, "8.1.1.1", "16.32.48.64", 0, 443), + hooks: []PacketMatch{matchDst("16.32.48.64")}, + want: Accept, + }, + { + name: "first_match_wins", + p: parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22), + hooks: []PacketMatch{noMatch, matchSrc("99.99.99.99")}, + want: Accept, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filt := newFilter(t.Logf) + filt.IngressAllowHooks = tt.hooks + if got := filt.RunIn(&tt.p, 0); got != tt.want { + t.Errorf("RunIn = %v; want %v", got, tt.want) + } + }) + } + + // Verify first-match-wins stops calling subsequent hooks. + t.Run("first_match_stops_iteration", func(t *testing.T) { + filt := newFilter(t.Logf) + p := parsed(ipproto.TCP, "99.99.99.99", "1.2.3.4", 0, 22) + var called []int + filt.IngressAllowHooks = []PacketMatch{ + func(q packet.Parsed) (bool, string) { + called = append(called, 0) + return true, "first" + }, + func(q packet.Parsed) (bool, string) { + called = append(called, 1) + return true, "second" + }, + } + filt.RunIn(&p, 0) + if len(called) != 1 || called[0] != 0 { + t.Errorf("called = %v; want [0]", called) + } + }) +} + +func TestLinkLocalAllowHooks(t *testing.T) { + matchDst := func(ip string) PacketMatch { + return func(q packet.Parsed) (bool, string) { + return q.Dst.Addr() == mustIP(ip), "match-dst" + } + } + noMatch := func(q packet.Parsed) (bool, string) { return false, "" } + + llPkt := func() packet.Parsed { + p := parsed(ipproto.UDP, "8.1.1.1", "169.254.1.2", 0, 53) + p.StuffForTesting(1024) + return p + } + gcpPkt := func() packet.Parsed { + p := parsed(ipproto.UDP, "8.1.1.1", "169.254.169.254", 0, 53) + p.StuffForTesting(1024) + return p + } + + tests := []struct { + name string + p packet.Parsed + hooks []PacketMatch + dir direction + want Response + }{ + { + name: "dropped_by_default", + p: llPkt(), + dir: in, + want: Drop, + }, + { + name: "non_matching_hook", + p: llPkt(), + hooks: []PacketMatch{noMatch}, + dir: in, + want: Drop, + }, + { + name: "matching_hook_allows", + p: llPkt(), + hooks: []PacketMatch{matchDst("169.254.1.2")}, + dir: in, + want: noVerdict, + }, + { + name: "gcp_dns_always_allowed", + p: gcpPkt(), + dir: in, + want: noVerdict, + }, + { + name: "matching_hook_allows_egress", + p: llPkt(), + hooks: []PacketMatch{matchDst("169.254.1.2")}, + dir: out, + want: noVerdict, + }, + { + name: "first_match_wins", + p: llPkt(), + hooks: []PacketMatch{noMatch, matchDst("169.254.1.2")}, + dir: in, + want: noVerdict, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + filt := newFilter(t.Logf) + filt.LinkLocalAllowHooks = tt.hooks + got, reason := filt.pre(&tt.p, 0, tt.dir) + if got != tt.want { + t.Errorf("pre = %v (%s); want %v", got, reason, tt.want) + } + }) + } + + // Verify first-match-wins stops calling subsequent hooks. + t.Run("first_match_stops_iteration", func(t *testing.T) { + filt := newFilter(t.Logf) + p := llPkt() + var called []int + filt.LinkLocalAllowHooks = []PacketMatch{ + func(q packet.Parsed) (bool, string) { + called = append(called, 0) + return true, "first" + }, + func(q packet.Parsed) (bool, string) { + called = append(called, 1) + return true, "second" + }, + } + filt.pre(&p, 0, in) + if len(called) != 1 || called[0] != 0 { + t.Errorf("called = %v; want [0]", called) + } + }) +} + func benchmarkFile(b *testing.B, file string, opt benchOpt) { var matches []Match bts, err := os.ReadFile(file) From eb819c580eb7a2b47047fd5f0a63bf29dbb423fe Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 25 Feb 2026 09:52:50 -0800 Subject: [PATCH 1016/1093] cmd/containerboot, net/dns/resolver: remove unused funcs in tests staticcheck was complaining about it on a PR I sent: https://github.com/tailscale/tailscale/actions/runs/22408882872/job/64876543467?pr=18804 And: https://github.com/tailscale/tailscale/actions/runs/22408882872/job/64876543475?pr=18804 Updates #cleanup Updates #18157 Change-Id: I6225481f3aab9e43ef1920aa1a12e86c5073a638 Signed-off-by: Brad Fitzpatrick --- cmd/containerboot/main_test.go | 6 ------ net/dns/resolver/forwarder_test.go | 6 ------ 2 files changed, 12 deletions(-) diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 1970fb4bfa449..58ab757950612 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -1603,12 +1603,6 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { } } -func mustBase64(t *testing.T, v any) string { - b := mustJSON(t, v) - s := base64.StdEncoding.WithPadding('=').EncodeToString(b) - return s -} - func mustJSON(t *testing.T, v any) []byte { b, err := json.Marshal(v) if err != nil { diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 6c7459b1f619c..6fd186c25a61c 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -595,12 +595,6 @@ func beVerbose(f *forwarder) { f.verboseFwd = true } -// makeTestRequestWithEDNS returns a new TypeTXT request for the given domain with EDNS buffer size. -// Deprecated: Use makeTestRequest with queryType and ednsSize parameters instead. -func makeTestRequestWithEDNS(tb testing.TB, domain string, ednsSize uint16) []byte { - return makeTestRequest(tb, domain, dns.TypeTXT, ednsSize) -} - // makeEDNSResponse creates a DNS response of approximately the specified size // with TXT records and an OPT record. The response will NOT have the TC flag set // (simulating a non-compliant server that doesn't set TC when response exceeds EDNS buffer). From 329d2e2643b804c3666d93b3f7195c22b6ae2523 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 25 Feb 2026 13:52:01 -0500 Subject: [PATCH 1017/1093] prober: fix race condition in TestExcludeInRunAll (#18807) The test was making HTTP requests before waiting for probes to complete their initial run in "once" mode. This created a race where sometimes the probe's previous state was empty (0 results) and sometimes it had one result, causing inconsistent RecentResults and PreviousSuccessRatio values. Fixed by waiting for all probes to complete via their stopped channels before making HTTP requests, matching the pattern used in other tests like TestProberRunHandler and TestRunAllHandler. Fixes #18806 Signed-off-by: Mike O'Driscoll --- prober/prober_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/prober/prober_test.go b/prober/prober_test.go index 8da5127875859..14b75d5b5c2b1 100644 --- a/prober/prober_test.go +++ b/prober/prober_test.go @@ -793,9 +793,14 @@ func TestExcludeInRunAll(t *testing.T) { }, } - p.Run("includedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) - p.Run("excludedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) - p.Run("excludedOtherProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + includedProbe := p.Run("includedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + excludedProbe := p.Run("excludedProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + excludedOtherProbe := p.Run("excludedOtherProbe", probeInterval, nil, FuncProbe(func(context.Context) error { return nil })) + + // Wait for all probes to complete their initial run + <-includedProbe.stopped + <-excludedProbe.stopped + <-excludedOtherProbe.stopped mux := http.NewServeMux() server := httptest.NewServer(mux) From fd2ebcd5bdf5a166513e7b86114dcbcb5d8c67e3 Mon Sep 17 00:00:00 2001 From: Chris Mosetick Date: Wed, 25 Feb 2026 20:30:21 +0100 Subject: [PATCH 1018/1093] cmd/k8s-operator: add exit node example (#18087) * cmd/k8s-operator/deploy/examples Adds exitnode.yaml to k8s-operator Fixes #18086 Signed-off-by: Christopher Mosetick * cmd/k8s-operator/deploy/examples: update connector and add exitnode examples - Remove exitNode: true from connector.yaml to keep it focused as a subnet router example - Update connector.yaml header comment to remove exit node reference and add pointer hint to exitnode.yaml - Clarify exitnode.yaml comments to accurately describe separate Connector deployment pattern Fixes #18086 Signed-off-by: Christopher Mosetick * Update cmd/k8s-operator/deploy/examples/exitnode.yaml Co-authored-by: David Bond Signed-off-by: Chris Mosetick * Update cmd/k8s-operator/deploy/examples/exitnode.yaml Co-authored-by: David Bond Signed-off-by: Chris Mosetick * Update cmd/k8s-operator/deploy/examples/exitnode.yaml Co-authored-by: David Bond Signed-off-by: Chris Mosetick * Update cmd/k8s-operator/deploy/examples/exitnode.yaml Co-authored-by: David Bond Signed-off-by: Chris Mosetick --------- Signed-off-by: Christopher Mosetick Signed-off-by: Chris Mosetick Co-authored-by: David Bond --- .../deploy/examples/connector.yaml | 4 +-- .../deploy/examples/exitnode.yaml | 26 +++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) create mode 100644 cmd/k8s-operator/deploy/examples/exitnode.yaml diff --git a/cmd/k8s-operator/deploy/examples/connector.yaml b/cmd/k8s-operator/deploy/examples/connector.yaml index f5447400e8722..a025eef98cd26 100644 --- a/cmd/k8s-operator/deploy/examples/connector.yaml +++ b/cmd/k8s-operator/deploy/examples/connector.yaml @@ -1,9 +1,10 @@ # Before applying ensure that the operator owns tag:prod. # https://tailscale.com/kb/1236/kubernetes-operator/#setting-up-the-kubernetes-operator. -# To set up autoapproval set tag:prod as approver for 10.40.0.0/14 route and exit node. +# To set up autoapproval set tag:prod as approver for 10.40.0.0/14 route. # Otherwise approve it manually in Machines panel once the # ts-prod Tailscale node has been created. # See https://tailscale.com/kb/1018/acls/#auto-approvers-for-routes-and-exit-nodes +# For an exit node example, see exitnode.yaml apiVersion: tailscale.com/v1alpha1 kind: Connector metadata: @@ -17,4 +18,3 @@ spec: advertiseRoutes: - "10.40.0.0/14" - "192.168.0.0/14" - exitNode: true diff --git a/cmd/k8s-operator/deploy/examples/exitnode.yaml b/cmd/k8s-operator/deploy/examples/exitnode.yaml new file mode 100644 index 0000000000000..b2ce516cd98bf --- /dev/null +++ b/cmd/k8s-operator/deploy/examples/exitnode.yaml @@ -0,0 +1,26 @@ +# Before applying ensure that the operator owns tag:k8s-operator +# To use both subnet routing and exit node on the same cluster, deploy a separate +# Connector resource for each. +# See connector.yaml for a subnet router example. +# See: https://tailscale.com/kb/1441/kubernetes-operator-connector +--- +apiVersion: tailscale.com/v1alpha1 +kind: Connector +metadata: + name: exit-node +spec: + # Exit node configuration - allows Tailscale clients to route all internet traffic through this Connector + exitNode: true + + # High availability: 2 replicas for redundancy + # Note: Must use hostnamePrefix (not hostname) when replicas > 1 + replicas: 2 + + # Hostname prefix for the exit node devices + # Devices will be named: exit-node-0, exit-node-1 + hostnamePrefix: exit-node + + # Tailscale tags for ACL policy management + tags: + - tag:k8s-operator + From 7370c24eb4989ca82f83009a0d36395bab4ea8c0 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 25 Feb 2026 17:45:51 +0000 Subject: [PATCH 1019/1093] tool/listpkgs: add --affected-by-tag For paring back build tag variant CI runs' set of packages to test. Updates tailscale/corp#28679 Change-Id: Iba46fd1f58c1eaee1f7888ef573bc8b14fa73208 Signed-off-by: Brad Fitzpatrick --- tool/listpkgs/listpkgs.go | 81 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/tool/listpkgs/listpkgs.go b/tool/listpkgs/listpkgs.go index e2c286efc0f7d..1c2dda257a7ca 100644 --- a/tool/listpkgs/listpkgs.go +++ b/tool/listpkgs/listpkgs.go @@ -26,6 +26,7 @@ var ( withTagsAllStr = flag.String("with-tags-all", "", "if non-empty, a comma-separated list of builds tags to require (a package will only be listed if it contains all of these build tags)") withoutTagsAnyStr = flag.String("without-tags-any", "", "if non-empty, a comma-separated list of build constraints to exclude (a package will be omitted if it contains any of these build tags)") shard = flag.String("shard", "", "if non-empty, a string of the form 'N/M' to only print packages in shard N of M (e.g. '1/3', '2/3', '3/3/' for different thirds of the list)") + affectedByTag = flag.String("affected-by-tag", "", "if non-empty, only list packages whose test binary would be affected by the presence or absence of this build tag") ) func main() { @@ -41,6 +42,10 @@ func main() { Mode: packages.LoadFiles, Env: os.Environ(), } + if *affectedByTag != "" { + cfg.Mode |= packages.NeedImports + cfg.Tests = true + } if *goos != "" { cfg.Env = append(cfg.Env, "GOOS="+*goos) } @@ -62,6 +67,11 @@ func main() { withAll = strings.Split(*withTagsAllStr, ",") } + var affected map[string]bool // PkgPath → true + if *affectedByTag != "" { + affected = computeAffected(pkgs, *affectedByTag) + } + seen := map[string]bool{} matches := 0 Pkg: @@ -69,6 +79,17 @@ Pkg: if pkg.PkgPath == "" { // malformed (shouldn’t happen) continue } + if affected != nil { + // Skip synthetic packages created by Tests: true: + // - for-test variants like "foo [foo.test]" (ID != PkgPath) + // - test binary packages like "foo.test" (PkgPath ends in ".test") + if pkg.ID != pkg.PkgPath || strings.HasSuffix(pkg.PkgPath, ".test") { + continue + } + if !affected[pkg.PkgPath] { + continue + } + } if seen[pkg.PkgPath] { continue // suppress duplicates when patterns overlap } @@ -96,7 +117,7 @@ Pkg: if *shard != "" { var n, m int if _, err := fmt.Sscanf(*shard, "%d/%d", &n, &m); err != nil || n < 1 || m < 1 { - log.Fatalf("invalid shard format %q; expected 'N/M'", *shard) + log.Fatalf("invalid shard format %q; expected ‘N/M’", *shard) } if m > 0 && (matches-1)%m != n-1 { continue // not in this shard @@ -112,6 +133,62 @@ Pkg: } } +// computeAffected returns the set of package paths whose test binaries would +// differ with vs without the given build tag. It finds packages that directly +// mention the tag, then propagates transitively via reverse dependencies. +func computeAffected(pkgs []*packages.Package, tag string) map[string]bool { + // Build a map from package ID to package for quick lookup. + byID := make(map[string]*packages.Package, len(pkgs)) + for _, pkg := range pkgs { + byID[pkg.ID] = pkg + } + + // First pass: find directly affected package IDs. + directlyAffected := make(map[string]bool) + for _, pkg := range pkgs { + if hasBuildTag(pkg, tag) { + directlyAffected[pkg.ID] = true + } + } + + // Build reverse dependency graph: importedID → []importingID. + reverseDeps := make(map[string][]string) + for _, pkg := range pkgs { + for _, imp := range pkg.Imports { + reverseDeps[imp.ID] = append(reverseDeps[imp.ID], pkg.ID) + } + } + + // BFS from directly affected packages through reverse deps. + affectedIDs := make(map[string]bool) + queue := make([]string, 0, len(directlyAffected)) + for id := range directlyAffected { + affectedIDs[id] = true + queue = append(queue, id) + } + for len(queue) > 0 { + id := queue[0] + queue = queue[1:] + for _, rdep := range reverseDeps[id] { + if !affectedIDs[rdep] { + affectedIDs[rdep] = true + queue = append(queue, rdep) + } + } + } + + // Map affected IDs back to PkgPaths. For-test variants like + // "foo [foo.test]" share the same PkgPath as "foo", so the + // result naturally deduplicates. + affected := make(map[string]bool) + for id := range affectedIDs { + if pkg, ok := byID[id]; ok { + affected[pkg.PkgPath] = true + } + } + return affected +} + func isThirdParty(pkg string) bool { return strings.HasPrefix(pkg, "tailscale.com/tempfork/") } @@ -194,7 +271,7 @@ func getFileTags(filename string) (tagSet, error) { mu.Lock() defer mu.Unlock() fileTags[filename] = ts - return tags, nil + return ts, nil } func fileMentionsTag(filename, tag string) (bool, error) { From 518d2417003657f955b98a546987e376ad9fe740 Mon Sep 17 00:00:00 2001 From: joshua stein Date: Sun, 22 Feb 2026 17:13:58 -0600 Subject: [PATCH 1020/1093] netns,wgengine: add OpenBSD support to netns via an rtable When an exit node has been set and a new default route is added, create a new rtable in the default rdomain and add the current default route via its physical interface. When control() is requesting a connection not go through the exit-node default route, we can use the SO_RTABLE socket option to force it through the new rtable we created. Updates #17321 Signed-off-by: joshua stein --- ipn/ipnlocal/local.go | 2 +- net/netmon/defaultroute_bsd.go | 5 +- net/netmon/interfaces_bsd.go | 2 +- ...aces_freebsd.go => interfaces_bsdroute.go} | 4 +- net/netmon/interfaces_defaultrouteif_todo.go | 2 +- net/netns/netns_default.go | 2 +- net/netns/netns_openbsd.go | 178 ++++++++++++++++++ net/routetable/routetable_bsd.go | 2 +- ...able_freebsd.go => routetable_bsdconst.go} | 3 +- net/routetable/routetable_other.go | 2 +- wgengine/router/osrouter/router_openbsd.go | 49 ++++- 11 files changed, 231 insertions(+), 20 deletions(-) rename net/netmon/{interfaces_freebsd.go => interfaces_bsdroute.go} (87%) create mode 100644 net/netns/netns_openbsd.go rename net/routetable/{routetable_freebsd.go => routetable_bsdconst.go} (90%) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 3fccb4399dd13..bae1e66393a4b 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5601,7 +5601,7 @@ func (b *LocalBackend) routerConfigLocked(cfg *wgcfg.Config, prefs ipn.PrefsView b.logf("failed to discover interface ips: %v", err) } switch runtime.GOOS { - case "linux", "windows", "darwin", "ios", "android": + case "linux", "windows", "darwin", "ios", "android", "openbsd": rs.LocalRoutes = internalIPs // unconditionally allow access to guest VM networks if prefs.ExitNodeAllowLANAccess() { rs.LocalRoutes = append(rs.LocalRoutes, externalIPs...) diff --git a/net/netmon/defaultroute_bsd.go b/net/netmon/defaultroute_bsd.go index 88f2c8ea54be1..741948599758e 100644 --- a/net/netmon/defaultroute_bsd.go +++ b/net/netmon/defaultroute_bsd.go @@ -1,11 +1,10 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// Common code for FreeBSD. This might also work on other -// BSD systems (e.g. OpenBSD) but has not been tested. +// Common code for FreeBSD and OpenBSD. // Not used on iOS or macOS. See defaultroute_darwin.go. -//go:build freebsd +//go:build freebsd || openbsd package netmon diff --git a/net/netmon/interfaces_bsd.go b/net/netmon/interfaces_bsd.go index d53e2cfc18f99..4c09aa55eeb31 100644 --- a/net/netmon/interfaces_bsd.go +++ b/net/netmon/interfaces_bsd.go @@ -4,7 +4,7 @@ // Common code for FreeBSD and Darwin. This might also work on other // BSD systems (e.g. OpenBSD) but has not been tested. -//go:build darwin || freebsd +//go:build darwin || freebsd || openbsd package netmon diff --git a/net/netmon/interfaces_freebsd.go b/net/netmon/interfaces_bsdroute.go similarity index 87% rename from net/netmon/interfaces_freebsd.go rename to net/netmon/interfaces_bsdroute.go index 5573643ca7370..7ac28c4b576fd 100644 --- a/net/netmon/interfaces_freebsd.go +++ b/net/netmon/interfaces_bsdroute.go @@ -1,9 +1,9 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -// This might work on other BSDs, but only tested on FreeBSD. +// FreeBSD and OpenBSD routing table functions. -//go:build freebsd +//go:build freebsd || openbsd package netmon diff --git a/net/netmon/interfaces_defaultrouteif_todo.go b/net/netmon/interfaces_defaultrouteif_todo.go index e428f16a1f946..55d284153815e 100644 --- a/net/netmon/interfaces_defaultrouteif_todo.go +++ b/net/netmon/interfaces_defaultrouteif_todo.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !windows && !darwin && !freebsd && !android +//go:build !linux && !windows && !darwin && !freebsd && !android && !openbsd package netmon diff --git a/net/netns/netns_default.go b/net/netns/netns_default.go index 4087e40488e60..33f4c1333e395 100644 --- a/net/netns/netns_default.go +++ b/net/netns/netns_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux && !windows && !darwin +//go:build !linux && !windows && !darwin && !openbsd package netns diff --git a/net/netns/netns_openbsd.go b/net/netns/netns_openbsd.go new file mode 100644 index 0000000000000..47968bd42f35e --- /dev/null +++ b/net/netns/netns_openbsd.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build openbsd + +package netns + +import ( + "fmt" + "os/exec" + "strconv" + "strings" + "sync" + "syscall" + + "golang.org/x/sys/unix" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" +) + +var ( + bypassMu sync.Mutex + bypassRtable int +) + +// Called by the router when exit node routes are configured. +func SetBypassRtable(rtable int) { + bypassMu.Lock() + defer bypassMu.Unlock() + bypassRtable = rtable +} + +func GetBypassRtable() int { + bypassMu.Lock() + defer bypassMu.Unlock() + return bypassRtable +} + +func control(logf logger.Logf, _ *netmon.Monitor) func(network, address string, c syscall.RawConn) error { + return func(network, address string, c syscall.RawConn) error { + return controlC(logf, network, address, c) + } +} + +func controlC(logf logger.Logf, _, address string, c syscall.RawConn) error { + if isLocalhost(address) { + return nil + } + + rtable := GetBypassRtable() + if rtable == 0 { + return nil + } + + return bindToRtable(c, rtable, logf) +} + +func bindToRtable(c syscall.RawConn, rtable int, logf logger.Logf) error { + var sockErr error + err := c.Control(func(fd uintptr) { + sockErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_RTABLE, rtable) + }) + if sockErr != nil { + logf("netns: SO_RTABLE(%d): %v", rtable, sockErr) + } + if err != nil { + return fmt.Errorf("RawConn.Control: %w", err) + } + return sockErr +} + +// SetupBypassRtable creates a bypass rtable with the existing default route +// in it routing through its existing physical interface. It should be called +// by the router when exit node routes are being added. +// Returns the rtable number. +func SetupBypassRtable(logf logger.Logf) (int, error) { + bypassMu.Lock() + defer bypassMu.Unlock() + + if bypassRtable != 0 { + return bypassRtable, nil + } + + gw, err := getPhysicalGateway() + if err != nil { + return 0, fmt.Errorf("getPhysicalGateway: %w", err) + } + + rtable, err := findAvailableRtable() + if err != nil { + return 0, fmt.Errorf("findAvailableRtable: %w", err) + } + + // Add the existing default route interface to the new bypass rtable + out, err := exec.Command("route", "-T", strconv.Itoa(rtable), "-qn", "add", "default", gw).CombinedOutput() + if err != nil { + return 0, fmt.Errorf("route -T%d add default %s: %w\n%s", rtable, gw, err, out) + } + + bypassRtable = rtable + logf("netns: created bypass rtable %d with default route via %s", rtable, gw) + return rtable, nil +} + +func CleanupBypassRtable(logf logger.Logf) { + bypassMu.Lock() + defer bypassMu.Unlock() + + if bypassRtable == 0 { + return + } + + // Delete the default route from the bypass rtable which should clear it + out, err := exec.Command("route", "-T", strconv.Itoa(bypassRtable), "-qn", "delete", "default").CombinedOutput() + if err != nil { + logf("netns: failed to clear bypass route: %v\n%s", err, out) + } else { + logf("netns: cleared bypass rtable %d", bypassRtable) + } + + bypassRtable = 0 +} + +// getPhysicalGateway returns the default gateway IP that goes through a +// physical interface (not tun). +func getPhysicalGateway() (string, error) { + out, err := exec.Command("route", "-n", "show", "-inet").CombinedOutput() + if err != nil { + return "", fmt.Errorf("route show: %w", err) + } + + // Parse the routing table looking for default routes not via tun + for _, line := range strings.Split(string(out), "\n") { + fields := strings.Fields(line) + if len(fields) < 8 { + continue + } + // Format: Destination Gateway Flags Refs Use Mtu Prio Iface + dest := fields[0] + gateway := fields[1] + iface := fields[7] + + if dest == "default" && !strings.HasPrefix(iface, "tun") { + return gateway, nil + } + } + + return "", fmt.Errorf("no physical default gateway found") +} + +func findAvailableRtable() (int, error) { + for i := 1; i <= 255; i++ { + out, err := exec.Command("route", "-T", strconv.Itoa(i), "-n", "show", "-inet").CombinedOutput() + if err != nil { + // rtable doesn't exist, consider it available + return i, nil + } + // Check if the output only contains the header (no actual routes) + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + hasRoutes := false + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "Routing") || strings.HasPrefix(line, "Destination") { + continue + } + hasRoutes = true + break + } + if !hasRoutes { + return i, nil + } + } + return 0, fmt.Errorf("no available rtable") +} + +func UseSocketMark() bool { + return false +} diff --git a/net/routetable/routetable_bsd.go b/net/routetable/routetable_bsd.go index 7a6bf48cc96e8..f5306d8942a02 100644 --- a/net/routetable/routetable_bsd.go +++ b/net/routetable/routetable_bsd.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build darwin || freebsd +//go:build darwin || freebsd || openbsd package routetable diff --git a/net/routetable/routetable_freebsd.go b/net/routetable/routetable_bsdconst.go similarity index 90% rename from net/routetable/routetable_freebsd.go rename to net/routetable/routetable_bsdconst.go index 313febf3ca94d..9de9aad73802f 100644 --- a/net/routetable/routetable_freebsd.go +++ b/net/routetable/routetable_bsdconst.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build freebsd +//go:build freebsd || openbsd package routetable @@ -21,6 +21,7 @@ var flags = map[int]string{ unix.RTF_BROADCAST: "broadcast", unix.RTF_GATEWAY: "gateway", unix.RTF_HOST: "host", + unix.RTF_LOCAL: "local", unix.RTF_MULTICAST: "multicast", unix.RTF_REJECT: "reject", unix.RTF_STATIC: "static", diff --git a/net/routetable/routetable_other.go b/net/routetable/routetable_other.go index da162c3f8e191..25d008ccc276f 100644 --- a/net/routetable/routetable_other.go +++ b/net/routetable/routetable_other.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & contributors // SPDX-License-Identifier: BSD-3-Clause -//go:build android || (!linux && !darwin && !freebsd) +//go:build android || (!linux && !darwin && !freebsd && !openbsd) package routetable diff --git a/wgengine/router/osrouter/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go index 8807a32d5b860..1c7eed52e2e76 100644 --- a/wgengine/router/osrouter/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -14,6 +14,7 @@ import ( "go4.org/netipx" "tailscale.com/health" "tailscale.com/net/netmon" + "tailscale.com/net/netns" "tailscale.com/types/logger" "tailscale.com/util/eventbus" "tailscale.com/util/set" @@ -32,12 +33,13 @@ func init() { // https://git.zx2c4.com/wireguard-openbsd. type openbsdRouter struct { - logf logger.Logf - netMon *netmon.Monitor - tunname string - local4 netip.Prefix - local6 netip.Prefix - routes set.Set[netip.Prefix] + logf logger.Logf + netMon *netmon.Monitor + tunname string + local4 netip.Prefix + local6 netip.Prefix + routes set.Set[netip.Prefix] + areDefaultRoute bool } func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { @@ -76,6 +78,10 @@ func inet(p netip.Prefix) string { return "inet" } +func isDefaultRoute(p netip.Prefix) bool { + return p.Bits() == 0 +} + func (r *openbsdRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig @@ -219,8 +225,12 @@ func (r *openbsdRouter) Set(cfg *router.Config) error { dst = localAddr6.Addr().String() } routeadd := []string{"route", "-q", "-n", - "add", "-" + inet(route), nstr, - "-iface", dst} + "add", "-" + inet(route), nstr} + if isDefaultRoute(route) { + // 1 is reserved for kernel + routeadd = append(routeadd, "-priority", "2") + } + routeadd = append(routeadd, "-iface", dst) out, err := cmd(routeadd...).CombinedOutput() if err != nil { r.logf("addr add failed: %v: %v\n%s", routeadd, err, out) @@ -235,10 +245,33 @@ func (r *openbsdRouter) Set(cfg *router.Config) error { r.local6 = localAddr6 r.routes = newRoutes + areDefault := false + for route := range newRoutes { + if isDefaultRoute(route) { + areDefault = true + break + } + } + + // Set up or tear down the bypass rtable as needed + if areDefault && !r.areDefaultRoute { + if _, err := netns.SetupBypassRtable(r.logf); err != nil { + r.logf("router: failed to set up bypass rtable: %v", err) + } + r.areDefaultRoute = true + } else if !areDefault && r.areDefaultRoute { + netns.CleanupBypassRtable(r.logf) + r.areDefaultRoute = false + } + return errq } func (r *openbsdRouter) Close() error { + if r.areDefaultRoute { + netns.CleanupBypassRtable(r.logf) + r.areDefaultRoute = false + } cleanUp(r.logf, r.tunname) return nil } From 54de5daae00cd491d7c7174d400be6f0c630a5f0 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Wed, 25 Feb 2026 17:41:51 -0500 Subject: [PATCH 1021/1093] tstest/integration/nat: use per-call timeout in natlab ping (#18811) The test ping() passed the full 60s context to each PingWithOpts call, so if the first attempt hung (DERP not yet registered), the retry loop never reached attempt 2. Use a 2s per-call timeout instead. Updates: #18810 Signed-off-by: Fernando Serboncini --- tstest/integration/nat/nat_test.go | 33 ++++++++++++++++-------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 56d602222cbe9..2322e243a8ee9 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -415,7 +415,7 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { return "" } - pingRes, err := ping(ctx, clients[0], sts[1].Self.TailscaleIPs[0]) + pingRes, err := ping(ctx, t, clients[0], sts[1].Self.TailscaleIPs[0]) if err != nil { t.Fatalf("ping failure: %v", err) } @@ -450,35 +450,38 @@ const ( routeNil pingRoute = "nil" // *ipnstate.PingResult is nil ) -func ping(ctx context.Context, c *vnet.NodeAgentClient, target netip.Addr) (*ipnstate.PingResult, error) { - n := 0 - var res *ipnstate.PingResult - anyPong := false - for n < 10 { - n++ - pr, err := c.PingWithOpts(ctx, target, tailcfg.PingDisco, tailscale.PingOpts{}) +func ping(ctx context.Context, t testing.TB, c *vnet.NodeAgentClient, target netip.Addr) (*ipnstate.PingResult, error) { + var lastRes *ipnstate.PingResult + for n := range 10 { + t.Logf("ping attempt %d to %v ...", n+1, target) + pingCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + pr, err := c.PingWithOpts(pingCtx, target, tailcfg.PingDisco, tailscale.PingOpts{}) + cancel() if err != nil { - if anyPong { - return res, nil + t.Logf("ping attempt %d error: %v", n+1, err) + if ctx.Err() != nil { + break } - return nil, err + continue } if pr.Err != "" { return nil, errors.New(pr.Err) } + t.Logf("ping attempt %d: derp=%d endpoint=%v latency=%v", n+1, pr.DERPRegionID, pr.Endpoint, pr.LatencySeconds) if pr.DERPRegionID == 0 { return pr, nil } - res = pr + lastRes = pr select { case <-ctx.Done(): + return lastRes, nil case <-time.After(time.Second): } } - if res == nil { - return nil, errors.New("no ping response") + if lastRes != nil { + return lastRes, nil } - return res, nil + return nil, fmt.Errorf("no ping response (ctx: %v)", ctx.Err()) } func up(ctx context.Context, c *vnet.NodeAgentClient) error { From 6e2677b4ad8c79dd3112e5136aae7a352e2f4414 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 25 Feb 2026 16:00:32 -0800 Subject: [PATCH 1022/1093] client/systray: open BrowseToURL from WatchIPN in a browser (#18816) This works for Tailscale SSH, but not for account logins (due to another process potentially starting that login, or `--operator` limitations). RELNOTE=The systray app now opens login links for SSH check mode in a browser. Updates #8551 Signed-off-by: Andrew Lytvynov --- client/systray/systray.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/client/systray/systray.go b/client/systray/systray.go index 7018f0f3be2be..65c1bec20a184 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -531,6 +531,15 @@ func (menu *Menu) watchIPNBusInner() error { if err != nil { return fmt.Errorf("ipnbus error: %w", err) } + if url := n.BrowseToURL; url != nil { + // Avoid opening the browser when running as root, just in case. + runningAsRoot := os.Getuid() == 0 + if !runningAsRoot { + if err := webbrowser.Open(*url); err != nil { + log.Printf("failed to open BrowseToURL: %v", err) + } + } + } var rebuild bool if n.State != nil { log.Printf("new state: %v", n.State) From 15836e56245c590bfddf342a9ce77bcfbb364f00 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 26 Feb 2026 00:37:15 +0000 Subject: [PATCH 1023/1093] util/set: make Set.Slice return elements in sorted order for ordered types This makes Set.MarshalJSON produce deterministic output in many cases now. We still need to do make it deterministic for non-ordered types. Updates #18808 Change-Id: I7f341ec039c661a8e88d07d7f4dc0f15d5d4ab86 Signed-off-by: Brad Fitzpatrick --- util/set/set.go | 43 +++++++++++++++++++++++++++++++++++++++++-- util/set/set_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/util/set/set.go b/util/set/set.go index df4b1fa3a24ac..c3d2350a7e6ba 100644 --- a/util/set/set.go +++ b/util/set/set.go @@ -7,6 +7,8 @@ package set import ( "encoding/json" "maps" + "reflect" + "sort" ) // Set is a set of T. @@ -53,16 +55,53 @@ func (s *Set[T]) Make() { } } -// Slice returns the elements of the set as a slice. The elements will not be -// in any particular order. +// Slice returns the elements of the set as a slice. If the element type is +// ordered (integers, floats, or strings), the elements are returned in sorted +// order. Otherwise, the order is not defined. func (s Set[T]) Slice() []T { es := make([]T, 0, s.Len()) for k := range s { es = append(es, k) } + if f := genOrderedSwapper(reflect.TypeFor[T]()); f != nil { + sort.Slice(es, f(reflect.ValueOf(es))) + } return es } +// genOrderedSwapper returns a generator for a swap function that can be used to +// sort a slice of the given type. If rt is not an ordered type, +// genOrderedSwapper returns nil. +func genOrderedSwapper(rt reflect.Type) func(reflect.Value) func(i, j int) bool { + switch rt.Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).Uint() < rv.Index(j).Uint() + } + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).Int() < rv.Index(j).Int() + } + } + case reflect.Float32, reflect.Float64: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).Float() < rv.Index(j).Float() + } + } + case reflect.String: + return func(rv reflect.Value) func(i, j int) bool { + return func(i, j int) bool { + return rv.Index(i).String() < rv.Index(j).String() + } + } + } + return nil +} + // Delete removes e from the set. func (s Set[T]) Delete(e T) { delete(s, e) } diff --git a/util/set/set_test.go b/util/set/set_test.go index 4afaeea5747fc..2188cbb4ddff7 100644 --- a/util/set/set_test.go +++ b/util/set/set_test.go @@ -159,6 +159,39 @@ func TestSetJSONRoundTrip(t *testing.T) { } } +func checkSliceSorted[T comparable](t *testing.T, s Set[T], want []T) { + t.Helper() + got := s.Slice() + if !slices.Equal(got, want) { + t.Errorf("got %v; want %v", got, want) + } +} + +func TestSliceSorted(t *testing.T) { + t.Run("int", func(t *testing.T) { + checkSliceSorted(t, Of(3, 1, 4, 1, 5), []int{1, 3, 4, 5}) + }) + t.Run("int8", func(t *testing.T) { + checkSliceSorted(t, Of[int8](-1, 3, -100, 50), []int8{-100, -1, 3, 50}) + }) + t.Run("uint16", func(t *testing.T) { + checkSliceSorted(t, Of[uint16](300, 1, 65535, 0), []uint16{0, 1, 300, 65535}) + }) + t.Run("float64", func(t *testing.T) { + checkSliceSorted(t, Of(2.7, 1.0, 3.14), []float64{1.0, 2.7, 3.14}) + }) + t.Run("float32", func(t *testing.T) { + checkSliceSorted(t, Of[float32](2.5, 1.0, 3.0), []float32{1.0, 2.5, 3.0}) + }) + t.Run("string", func(t *testing.T) { + checkSliceSorted(t, Of("banana", "apple", "cherry"), []string{"apple", "banana", "cherry"}) + }) + t.Run("named-uint", func(t *testing.T) { + type Port uint16 + checkSliceSorted(t, Of[Port](443, 80, 8080), []Port{80, 443, 8080}) + }) +} + func TestMake(t *testing.T) { var s Set[int] s.Make() From da90ea664d7a601a73cd531bc5ae1db0993033c1 Mon Sep 17 00:00:00 2001 From: Fernando Serboncini Date: Thu, 26 Feb 2026 12:36:26 -0500 Subject: [PATCH 1024/1093] wgengine/magicsock: only run derpActiveFunc after connecting to DERP (#18814) derpActiveFunc was being called immediately as a bare goroutine, before startGate was resolved. For the firstDerp case, startGate is c.derpStarted which only closes after dc.Connect() completes, so derpActiveFunc was firing before the DERP connection existed. We now block it with the same logic used by runDerpReader and by runDerpWriter. Updates: #18810 Signed-off-by: Fernando Serboncini --- wgengine/magicsock/derp.go | 9 ++++- wgengine/magicsock/magicsock_test.go | 51 ++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index b3cc5c2ce4927..f9e5050705b31 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -436,7 +436,14 @@ func (c *Conn) derpWriteChanForRegion(regionID int, peer key.NodePublic) chan de go c.runDerpReader(ctx, regionID, dc, wg, startGate) go c.runDerpWriter(ctx, dc, ch, wg, startGate) - go c.derpActiveFunc() + go func() { + select { + case <-ctx.Done(): + return + case <-startGate: + c.derpActiveFunc() + } + }() return ad.writeCh } diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5fa177b3bce8b..9d6cae87bdcc6 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -530,6 +530,57 @@ func TestPickDERPFallback(t *testing.T) { // have fixed DERP fallback logic. } +// TestDERPActiveFuncCalledAfterConnect verifies that DERPActiveFunc is not +// called until the DERP connection is actually established (i.e. after +// startGate / derpStarted is closed). +func TestDERPActiveFuncCalledAfterConnect(t *testing.T) { + derpMap, cleanup := runDERPAndStun(t, t.Logf, localhostListener{}, netaddr.IPv4(127, 0, 0, 1)) + defer cleanup() + + bus := eventbustest.NewBus(t) + + netMon, err := netmon.New(bus, t.Logf) + if err != nil { + t.Fatal(err) + } + + resultCh := make(chan bool, 1) + var conn *Conn + + conn, err = NewConn(Options{ + Logf: t.Logf, + NetMon: netMon, + EventBus: bus, + HealthTracker: health.NewTracker(bus), + Metrics: new(usermetric.Registry), + DisablePortMapper: true, + TestOnlyPacketListener: localhostListener{}, + EndpointsFunc: func([]tailcfg.Endpoint) {}, + DERPActiveFunc: func() { + // derpStarted should already be closed when DERPActiveFunc is called. + select { + case <-conn.derpStarted: + resultCh <- true + default: + resultCh <- false + } + }, + }) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + conn.SetDERPMap(derpMap) + if err := conn.SetPrivateKey(key.NewNode()); err != nil { + t.Fatal(err) + } + + if ok := <-resultCh; !ok { + t.Error("DERPActiveFunc was called before DERP connection was established") + } +} + // TestDeviceStartStop exercises the startup and shutdown logic of // wireguard-go, which is intimately intertwined with magicsock's own // lifecycle. We seem to be good at generating deadlocks here, so if From 5ac35b665b5e2308faa7a48f85b45b7b29e7d551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 26 Feb 2026 12:59:45 -0500 Subject: [PATCH 1025/1093] client/systray: add installer for a freedesktop autostart file (#18767) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds freedesktop as an option for installing autostart desktop files for starting the systray application. Fixes #18766 Signed-off-by: Claus Lensbøl --- client/freedesktop/freedesktop.go | 43 +++++++ client/freedesktop/freedesktop_test.go | 145 +++++++++++++++++++++++ client/systray/startup-creator.go | 142 +++++++++++++++++++++- client/systray/tailscale-systray.desktop | 13 ++ client/systray/tailscale.png | Bin 0 -> 14069 bytes client/systray/tailscale.svg | 14 +++ cmd/tailscale/cli/configure_linux.go | 2 +- cmd/tailscale/depaware.txt | 1 + 8 files changed, 358 insertions(+), 2 deletions(-) create mode 100644 client/freedesktop/freedesktop.go create mode 100644 client/freedesktop/freedesktop_test.go create mode 100644 client/systray/tailscale-systray.desktop create mode 100644 client/systray/tailscale.png create mode 100644 client/systray/tailscale.svg diff --git a/client/freedesktop/freedesktop.go b/client/freedesktop/freedesktop.go new file mode 100644 index 0000000000000..6ed1e8ccf88fc --- /dev/null +++ b/client/freedesktop/freedesktop.go @@ -0,0 +1,43 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package freedesktop provides helpers for freedesktop systems. +package freedesktop + +import "strings" + +const needsEscape = " \t\n\"'\\><~|&;$*?#()`" + +var escaper = strings.NewReplacer(`"`, `\"`, "`", "\\`", `$`, `\$`, `\`, `\\`) + +// Quote quotes according to the Desktop Entry Specification, as below: +// +// Arguments may be quoted in whole. If an argument contains a reserved +// character the argument must be quoted. The rules for quoting of arguments is +// also applicable to the executable name or path of the executable program as +// provided. +// +// Quoting must be done by enclosing the argument between double quotes and +// escaping the double quote character, backtick character ("`"), dollar sign +// ("$") and backslash character ("\") by preceding it with an additional +// backslash character. Implementations must undo quoting before expanding field +// codes and before passing the argument to the executable program. Reserved +// characters are space (" "), tab, newline, double quote, single quote ("'"), +// backslash character ("\"), greater-than sign (">"), less-than sign ("<"), +// tilde ("~"), vertical bar ("|"), ampersand ("&"), semicolon (";"), dollar +// sign ("$"), asterisk ("*"), question mark ("?"), hash mark ("#"), parenthesis +// ("(") and (")") and backtick character ("`"). +func Quote(s string) string { + if s == "" { + return `""` + } + if !strings.ContainsAny(s, needsEscape) { + return s + } + + var b strings.Builder + b.WriteString(`"`) + escaper.WriteString(&b, s) + b.WriteString(`"`) + return b.String() +} diff --git a/client/freedesktop/freedesktop_test.go b/client/freedesktop/freedesktop_test.go new file mode 100644 index 0000000000000..07a1104f36940 --- /dev/null +++ b/client/freedesktop/freedesktop_test.go @@ -0,0 +1,145 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package freedesktop + +import ( + "strings" + "testing" +) + +func TestEscape(t *testing.T) { + tests := []struct { + name, input, want string + }{ + { + name: "no illegal chars", + input: "/home/user", + want: "/home/user", + }, + { + name: "empty string", + input: "", + want: "\"\"", + }, + { + name: "space", + input: " ", + want: "\" \"", + }, + { + name: "tab", + input: "\t", + want: "\"\t\"", + }, + { + name: "newline", + input: "\n", + want: "\"\n\"", + }, + { + name: "double quote", + input: "\"", + want: "\"\\\"\"", + }, + { + name: "single quote", + input: "'", + want: "\"'\"", + }, + { + name: "backslash", + input: "\\", + want: "\"\\\\\"", + }, + { + name: "greater than", + input: ">", + want: "\">\"", + }, + { + name: "less than", + input: "<", + want: "\"<\"", + }, + { + name: "tilde", + input: "~", + want: "\"~\"", + }, + { + name: "pipe", + input: "|", + want: "\"|\"", + }, + { + name: "ampersand", + input: "&", + want: "\"&\"", + }, + { + name: "semicolon", + input: ";", + want: "\";\"", + }, + { + name: "dollar", + input: "$", + want: "\"\\$\"", + }, + { + name: "asterisk", + input: "*", + want: "\"*\"", + }, + { + name: "question mark", + input: "?", + want: "\"?\"", + }, + { + name: "hash", + input: "#", + want: "\"#\"", + }, + { + name: "open paren", + input: "(", + want: "\"(\"", + }, + { + name: "close paren", + input: ")", + want: "\")\"", + }, + { + name: "backtick", + input: "`", + want: "\"\\`\"", + }, + { + name: "char without escape", + input: "/home/user\t", + want: "\"/home/user\t\"", + }, + { + name: "char with escape", + input: "/home/user\\", + want: "\"/home/user\\\\\"", + }, + { + name: "all illegal chars", + input: "/home/user" + needsEscape, + want: "\"/home/user \t\n\\\"'\\\\><~|&;\\$*?#()\\`\"", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Quote(tt.input) + if strings.Compare(got, tt.want) != 0 { + t.Errorf("expected %s, got %s", tt.want, got) + } + }) + } +} diff --git a/client/systray/startup-creator.go b/client/systray/startup-creator.go index 34d85e6175fc6..369190012ce6c 100644 --- a/client/systray/startup-creator.go +++ b/client/systray/startup-creator.go @@ -10,20 +10,34 @@ import ( "bufio" "bytes" _ "embed" + "errors" "fmt" "os" "os/exec" "path/filepath" "strings" + + "tailscale.com/client/freedesktop" ) //go:embed tailscale-systray.service var embedSystemd string +//go:embed tailscale-systray.desktop +var embedFreedesktop string + +//go:embed tailscale.svg +var embedLogoSvg string + +//go:embed tailscale.png +var embedLogoPng string + func InstallStartupScript(initSystem string) error { switch initSystem { case "systemd": return installSystemd() + case "freedesktop": + return installFreedesktop() default: return fmt.Errorf("unsupported init system '%s'", initSystem) } @@ -58,7 +72,7 @@ func installSystemd() error { systemdDir := filepath.Join(configDir, "systemd", "user") if err := os.MkdirAll(systemdDir, 0o755); err != nil { - return fmt.Errorf("failed creating systemd uuser dir: %w", err) + return fmt.Errorf("failed creating systemd user dir: %w", err) } serviceFile := filepath.Join(systemdDir, "tailscale-systray.service") @@ -74,3 +88,129 @@ func installSystemd() error { return nil } + +func installFreedesktop() error { + tmpDir, err := os.MkdirTemp("", "tailscale-systray") + if err != nil { + return fmt.Errorf("unable to make tmpDir: %w", err) + } + defer os.RemoveAll(tmpDir) + + // Install icon, and use it if it works, and if not change to some generic + // network/vpn icon. + iconName := "tailscale" + if err := installIcon(tmpDir); err != nil { + iconName = "network-transmit" + fmt.Printf("unable to install icon, continuing without: %s\n", err.Error()) + } + + // Create desktop file in a tmp dir + desktopTmpPath := filepath.Join(tmpDir, "tailscale-systray.desktop") + if err := os.WriteFile(desktopTmpPath, []byte(embedFreedesktop), + 0o0755); err != nil { + return fmt.Errorf("unable to create desktop file: %w", err) + } + + // Ensure autostart dir exists and install the desktop file + configDir, err := os.UserConfigDir() + if err != nil { + homeDir, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("unable to locate user home: %w", err) + } + configDir = filepath.Join(homeDir, ".config") + } + + autostartDir := filepath.Join(configDir, "autostart") + if err := os.MkdirAll(autostartDir, 0o644); err != nil { + return fmt.Errorf("failed creating freedesktop autostart dir: %w", err) + } + + desktopCmd := exec.Command("desktop-file-install", "--dir", autostartDir, + desktopTmpPath) + if output, err := desktopCmd.Output(); err != nil { + return fmt.Errorf("unable to install desktop file: %w - %s", err, output) + } + + // Find the path to tailscale, just in case it's not where the example file + // has it placed, and replace that before writing the file. + tailscaleBin, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to find tailscale binary %w", err) + } + tailscaleBin = freedesktop.Quote(tailscaleBin) + + // Make possible changes to the desktop file + runEdit := func(args ...string) error { + cmd := exec.Command("desktop-file-edit", args...) + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("cmd: %s: %w\n%s", cmd.String(), err, out) + } + return nil + } + + edits := [][]string{ + {"--set-key=Exec", "--set-value=" + tailscaleBin + " systray"}, + {"--set-key=TryExec", "--set-value=" + tailscaleBin}, + {"--set-icon=" + iconName}, + } + + var errs []error + desktopFile := filepath.Join(autostartDir, "tailscale-systray.desktop") + for _, args := range edits { + args = append(args, desktopFile) + if err := runEdit(args...); err != nil { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return fmt.Errorf( + "failed changing autostart file, try rebooting: %w", errors.Join(errs...)) + } + + fmt.Printf("Successfully installed freedesktop autostart service to: %s\n", desktopFile) + fmt.Println("The service will run upon logging in.") + + return nil +} + +// installIcon installs an icon using the freedesktop tools. SVG support +// is still on its way for some distros, notably missing on Ubuntu 25.10 as of +// 2026-02-19. Try to install both icons and let the DE decide from what is +// available. +// Reference: https://gitlab.freedesktop.org/xdg/xdg-utils/-/merge_requests/116 +func installIcon(tmpDir string) error { + svgPath := filepath.Join(tmpDir, "tailscale.svg") + if err := os.WriteFile(svgPath, []byte(embedLogoSvg), 0o0644); err != nil { + return fmt.Errorf("unable to create svg: %w", err) + } + + pngPath := filepath.Join(tmpDir, "tailscale.png") + if err := os.WriteFile(pngPath, []byte(embedLogoPng), 0o0644); err != nil { + return fmt.Errorf("unable to create png: %w", err) + } + + var errs []error + installed := false + svgCmd := exec.Command("xdg-icon-resource", "install", "--size", "scalable", + "--novendor", svgPath, "tailscale") + if output, err := svgCmd.Output(); err != nil { + errs = append(errs, fmt.Errorf("unable to install svg: %s - %s", err, output)) + } else { + installed = true + } + pngCmd := exec.Command("xdg-icon-resource", "install", "--size", "512", + "--novendor", pngPath, "tailscale") + if output, err := pngCmd.Output(); err != nil { + errs = append(errs, fmt.Errorf("unable to install png: %s - %s", err, output)) + } else { + installed = true + } + + if !installed { + return errors.Join(errs...) + } + return nil +} diff --git a/client/systray/tailscale-systray.desktop b/client/systray/tailscale-systray.desktop new file mode 100644 index 0000000000000..b79b72d181ddc --- /dev/null +++ b/client/systray/tailscale-systray.desktop @@ -0,0 +1,13 @@ +[Desktop Entry] +Type=Application +Version=1.0 +Name=Tailscale System Tray +Comment=Tailscale system tray applet for managing Tailscale +Exec=/usr/bin/tailscale systray +TryExec=/usr/bin/tailscale +Terminal=false +NoDisplay=true +StartupNotify=false +Icon=tailscale +Categories=Network;System; +X-GNOME-Autostart-enabled=true diff --git a/client/systray/tailscale.png b/client/systray/tailscale.png new file mode 100644 index 0000000000000000000000000000000000000000..d476e88fc262f5409fc76b484e0550a398196df5 GIT binary patch literal 14069 zcmdUW^ zDM-i0GrT{q=a2aQvb*;^edf%Z>zs4#OI>X>8cJ450051Ky0RVsNca;8D9GT~j(7hN z0FpHt$_fVFi1i5zCyV*Sjeo>ZPHql+C$%4Kx2=7{*mUXR8Pjfbs+buM1oyqk=ulTM zV9iF;541OBDXe!?@&xB_I`B7kq=*^nOY5Z@Df2VwslAjwzg)xnh0-nJ(&w*=Wj1Pz z(XCHe`6-HqOzTVc!^_w4w!ZZ@MW$XqSmPy|sonD-)GrcmI_CYvO8oxVY&CiC$iZQL zm6xTBaHDL&D)H{Nqh6CGGH|5|qRgI7yHE5O%f0&_vjn80@7 z!uv~@#& z7363Lq5Ge!2zeM3bKmKZx0}-OreHucDDAQwnHUZ4Kf08}w0nhVx8JFNNremeweRn`AU|=D4 z&Y@UaTbtD{%W;(IcXDh;dSC^_6|xKm7oTOxjXiXKNF+YLS&p6{Q+~?t(Xk8 z5wUNNNwC5WWyu-Kr#1UnP~iIIRmvK5;}l;KEG-`nOec=y3T*m43?Qf|X_8>i{$^l! zov!iZ=Wr7psQVpDPJ!$=4F+57aBXccFc+408UMTq&PxlNLu)dH4{UVDpdi9^^0R2Zn3ih_=4cmoefu|#M>%MobS*_B| zB7ibq4x-O=f4|{iZ#`ll$JkEj+8s||zmEd(w_^G zB~jqs)iTo<_Dy^e!crJvUt3?-M#Lh(RY8rdCwSWDlj~HQ!rUl=A0};{2mM}0IR~#<0B~W=@j`M+&|Swc zp?_#&02q(Wur%kH_5iIB^&?_r0IPPUm}N)_Ta3WY(1Fd5t2_EQo+qLt*BJmG0brvm&muojfJ0A$`3gCp_@vlyaNwPOH;?c;)M=>; zX0xIK6(YZOb9j*8A$-j9NFL|0lekh@G9BbJ`juk}sAMMnFE zJ$^QgMxmcs&lHMlRE*OBEZL8Wn{M}f!SFxW!ffCu02@xzfJTA@0su?D8zf0-A23XB zZ50jT+u#K{dS%Gbc52t34m*e!(x`I=AmvN0guJYrbd`yP`As|j{-KLt5RU-E@9H+l zmsZaJuOqm1`prlF7;K#oqQ78Y{vH7L#D3$=DVN&Lplf$AJpeR(=#NGE5V}+91QDCw zRsi^1$jSCMrCd5Z1M=Wz_^jmj^z=M{%2~3js}l?C@hH%JPPU#uB~ugp@cDcifWXy5 zLxijuveos#=IJni>h`Xs+%-AJvUc<>O`xnPGOL_W`mX>=E5)jW@muw|F3P1}XBxJ) zwxl9YgjFB4yJCtk`wPB7L14ds^+NEMk-|snm24H*CgU6%vb-PBdWk4DzC#Wxv{eDC z)_9QV{I;wgu{1RC5)SEWdhVVk$AK0(_UIwzAqs$?*bK3ipJSJtr9qlT0Nt(TtVh3?>Y18kvZ1<2(*TDmgSvDy3PlX=^kdlmG z<&+IPkHE`mCLBvcik+XISEB*oy4@EGZ2ksGt1J-#+|2a&qr8sO8ve=vO--n`;M050 z6*{luS_^U@4Et_yHZ`kXjXJU*#U3?3FM{30MtN4Ve9Hk13vhn8=z%rVgrAi$f?Fdd zpI`&`s-oy2R;^gL!6b8Il^MX>j9gGlh7bk-mcgJ{X0kZb2AF8a6fgs@sdi3tp92Nf z;o@*H@t(O8&M0)<+vq*maow`F1p1g39V>GR0kCG$7Bt&T}ttM~+B!HV&OLD(}4z#tM z?Cp2^veRz<;2w$_w$zC4-}6LpKRgAc@-58)uCvmdv#rjd9cqPo^WfxZH)nF}2o%Hp zyN0u{68L$q;}TJ?TAl>HGO=_Q!44p>0`NVBUbY2L9|05q9j#Yv_||U^zN8O(GArV8 zlUhBNJgu$m9L-W=UioA^rf$7#Y!E+E>NvFaR!6RIb*O-C{PXP7?GC9$F305x|ARka zWaUdH6CQn9T3r%}Hqy4A9G8sr_}B9v?Cf$$-~vXlm;TmlYqN22l@}Hk;#RHEE6P06 zzuyZSwp4G_ISxFM5V^J_&vAjH*rNLByo#08xzs!c4iOF+j^h1EUlF1NsqIyd$nfpF zgQW#?29C=ifirQ-2MDcklNGCFaQim9`rEn7p-b7`S#C>8L3hegx>Dj-9k4#f>*d=8 zJu=cRUQ_;F0pouwW@%T`VN)@l{Bn<1%ihbwo@~8UcY-TILPBuEh?unH+4qNs=e7$X zrUZ4g<=)0~h$?HM-h_vdeYQM{Hxf5{2G_ie2xnPXSgx4B5b?4iM|pfCHM@gmhmU_O z?1<$`e7Tx3o4y?d&Xwmb$s1OBEXUQkm?NhtvFbEcSVr-&k4Z_LL0DV9b0U@x+(rRY zwC_f(lvO+nZ>`SG9g^#|u@m}#>KBuIw`L_@L|b$d9ryoE9ENp@JxABDZjictL0y%Q z*k#b{LxtMg>%XDgOrgREFuwasE%2@M^X%TyYQY!|SCcBij;wsD_$qNuV0swc&cXF^ZmupDzNMV28U?mG@IvGwuFK67OS2kpT1KyuDj zshvvomfKt}lw;aEJ39O?5sMCLvwvw=AWbAmI!aA3GSs&P43y>eE4yy6472t$nXZ2) z=V~`2xe9~@k+b?(fr?W;^+}+Zf^ctDvp;?iWUzVFSdJqcq>G=I(F*WBFFL4m)C-1 z{O85Q#P%*@wk|K^tdEsDx))m1`Q3=8|7O!dJ64aWcOEP2@JpY+)H9z%p&!FbEQr67 zMA~o>8`zs6IyKtT!AD|mG{2(y)so*fng^E~Pr7R7@RV(=JL_Q8Zr9;u9+M}P#8YmQ zhxjjN!G{kN0MVr9wgRW-=lfzh)N%YkJZtv8(CoQvnS4}$; zyqUe&Z+aO?M+d>4xaZh@$ADk-j2)`h_G1nWG|P+qS|1gMb9!C|G)D($*ODs<6G$go z`9l5K3r+I9@DV^)l7y$kv-N3-DvP3MPXnudf}y-4MU!&zO!VGx$rtUgzmC|e4?-{{ zgb_ilm^(rH;@_IDvNvDSJmnrL_#z!Rl>lnb&0=}^FFn#Zx>c-h z<+@YMbo9~x@8nPBGL@FdbU;jBLr`e8|1$3~5OJ}bd26C%rXXMan(fA}WkmXJ{aH=K zcasqaEgVLsg_vSY$kU#`4l!sEY&LzM(DbuK7`;ZSJHf_!JJ2?@3t@{ldUk@Yhc5w^yK?weMl|fh@h7X4B7`+$ z!HT`G%GK-Rl+4W0bn>|FZcg}kRo?^9^HQGjnTK{olv|ku&_M&xG0jD(VK-D@B{k$$ z=h^HycNvKdTDiJhAoVlUq_zd6+X{;@PjIuA+~SI&T@G$`J4 zb}<`u$N8xe&FAIQHLl$Pn~aw&9Qn`Z*U2%y9W~Cg-B0=sNBwWRo}oWm zjE*)oKCgtPsgzFcpkW$LHB($5HF5k?TgZcefXVJpZ7Ls3UEC>4B9#c|f%n>a`IKbt z;uD^pdm5sqUAu~nm4obwr|VCHo2opfIt4FR#m*|!FPO5t*@gD{d+`v0h!6AfvmGpTZ*IH=2$S{qPqfpac!PYt| z@8=U(na(8dDf8vhS2!}zxe!<7k!ZmD?hz`+HTFUe=~dtL(xLXtB!BPP+iY7g!)vZP zWoOljIBK1a_|;;~>U$2Y^0P(q$y%V}B!Vuh*uEXB%Xx_+hY?p8!q#pGu zWMZ(2bs7xM^Ho}vze$2^$fBax6z%C^{f%XW-~T8dp3P3a_#r>R<={ugz$QIFrdgly z(HpwQ*n2na)sRTQKPoB<^;E4LPU_mL&0CX^rxD!fCP9IImieZLb3yDLrNInak$NqH zKNDJ(7JGfKDYGhxf9O7}boa={Kp@;YGilNd01$fhq%{469?s+ z4e?9+LrTZ0Vji9LoSqwHu2XW=v#(^@4W*5wZAHn~O-_UJ>46$N==%*9sa4mreYY#S z{5vZ*Gj?Z&tS}QfYR9xOIjLrCOsNwIh=~h8SzGA$^73*j-EDrHPy1K7IN7HQw-k83 zd3L3D=g}tY4bl(Kr~Xet0F#Hh zL2t>GBDzS-hfzD4y`m-H*M)a|t(I=PgkFG5IA^%;A*RT|ijSWSb)g3Uyr8E;Apy4{g#6&VGW__z_Y?D=foyHGv~6!~#AdAAwX6QD zf33eq|JjAbZ7VB0eq+0!z_RwyVpnpPyiG+sN4W(9!{Opgg^-}&99PrhA8t%7{PHd% zho$(wn=^?-B9|Y@kvQ#%l^k(T`GtXJ|+g5+nL&lMecT8*VK1E zzPb}IzS5%I;w-@+6@^POz!| zvC?e0)uLsAcx5(D0Z!}xF+R;=`^PHcd9UT^VUKb1C%A)uHE(}Bc7&jxtA0B%^(>(@ z{1x^&m{A5Vq9cNO?y|-)l6+HyOLrDa_(2x@v^M#8zTz$TmulXW^LxkzHu^A!p zCo8i5q3_XhW`}Xr?fq39xnD785(*Hg|4;Yde(4MG@GB$`k5?Q@2AON-Jq*E`{1wbf zXC0EXcOh$Yeay_l#^K*gpB5cnKj)aOaR&DU&DUh*QaOM2cvS>5()sCz%|<=3E=2I% zjEK+1a(-~t6iUjU@*KSWr2{crTW#MNiHb`+XebY%;2EnIJS4#eII2UT^!@c>W6Nhv za)bjLC2w!mdnx91=1w{06$ov4MvjE&aEyZd+pc4l(Vq2KB>_T+H5X;0GC4OoJ_@%! zMaT=eod`N^D?ZX8tXUByhPyBEbnE%|J%tl);`o6M-_jEKPB{h|ErF>Rer-^$1L}3pxyknLC4iSJ*vScU!a)ax^Fie zt{bi)n3^{|>rd0oiQwjrKWq`_e!0<}RU zqaIIm-)=YNClrQAUg5M^%Q2B^cKCGv3O73m7JXn&QvQ2rByK~+Eys^W{%+Ar(Gn)> zHItBVK#v_J=j}JXXs#j~UIqI5{#j}R%7!%y9>R&8@xBi-aeG`Rb0F+XAq_G5k*ew_P7^?OIy zJX>u!#h3tv5&@!@_SolA>T=Y#Rz}@qcPVSnKFX`^46C2zc3Au4zb1vmw(t>Fvn7^~ zQhtsSee%7Ox1t*#n}zAlofI)g4+{L1+W3mwP;)bD*&X3+$-VKR2&W2RU`Sf&GI6)Q z-H^R_(45xgZrOJqLDWGMn7&e4)$^&X_zV*5C`?{1yFX!P|EjRn>VM#{jmv6fDfj2R z>ECaaN25!%%5NN_{=WDRc}GLkxvIqD&@nI+3ekG3>QMyjY;a<9tkgZTGE}j5B>&|p zcbb+w*$-+s6*z++JzwF@TNKU^bJy91jzN46`{A4kUTtq0dNDgVvZsR?HJ`OMZj`7| z4S*o?1?*z4=gvtNV$xCpPedzH#gO2+a-m8AJ&>N2*kV8cW_q$;o#sv#-N)(qk>Dyf z;~5~#1Gv?V3oIpu1O)||N6q4rr9hAZ*o=;7>VQ2jF+FF4SbmefDX%yuB4U9Rf^^|J z)laU8LgsZ&n{+U%!x+=a#~bkEUqzu{4@<$DWVoB>NlF%HR^17>XG*?n6Pt7!jxddK zvq@tpDWc#tkctc+k z&s3aQ$qoKqBw=jDRvSyw0bZawLtwY=`IV`=cRM8wWB$&A_b*|Tgym;-h{#tNG;Uk2Y6eq|CY zHY@JnCfg(*je3{daqgjTnf2#niC~EbQ%dcsO?%tTSU?|V68^HW)0peoPsP;l5}}^+ zx{3*m8{e6$T;dOl|A|1r4jiyF_^+U}Ln%IraUvnw&g5uBk zQy^I1`B~C-ORrLi)Z8o7rSP7AZ(KtQy@Kl*2@ApWNROHl^B?WbettWbY$lZ=)3PRj!F<}(`-DKx_M-FeljeaaqgbGQ<=A1!6D~2Oe=@`R{7bx z=ze6IV-^nt=BT&Nv1A$d$q-BdG^&O2gB51^jEu7Esx46sgNgX#{V{0nsdvrWWEe)S z(FD?vfH`q)Egc=YnhR)%($7;+KglyyQ7R9K+SLfHO>x(T zK0G`eUs-I&^1?^GMWacX1Hn-rz{hzbZ>H<(_~5U*bDXo(c!hpxwS!&i zq=?2o1Yl`;8u|}-_^*$_JmviWIpXfX6mc?NSa<=3f@MQ&4#GDFy~m$w-JVO1m^EV0 znW?y6;ZftlUcP4HOx(%bo*EjGt`FZ1pAGFe4`5K8=PyJj=wJ%?Tloe2_`mm)(25lv z_uKAxx|&x$3X_V2$9-B-BhKX$A25T-Omeu*AGmFp9oQ-;Ft25|h>XX2q9eiC{?E`D7wP3-C?*xohPIN$b^fFZPWCd(%iImvVuN1`kl z2xC9xI&z1+#9a5>>s>pkQz&VT|8ghs-5^ad^*N@6!)w^cKE-E$!pr&o{qK`7h<=kh zWk39R!D~F7Y{}Pd*HA^QoOa?(2k5k2px=2(c|74Ym6&WNy}_7UuDA#zrJYiVUT85A zkgLb&Porwa5j@%hcK60y7g3DG@ALG(%j5d>%j~+6Zj$V@y=1t&_ES@2{ys3%XY@~W zHKQ3`&k{@7Uhj~a;`O#+LV_p!IOz9btjIsxGJlfP2_Zn7Km0w2y!>Y06d!3hMC-6I z+m}X6m+by-qNxvn9 z*FE3+Mn$|E{P(2U+{(Rv^}Jghy3BEp#QTO=iK$9q?GIaCbNy!``uXeyN-DeWJBD;~ zh#=9*{m=VzF);72fO4kw!|x7#;fgD%BSIwZJ-v!cNOx&~_dD;Ar>*m5-{HcOZkIHd z2FHTh=8W+dk&#iaZ+Q7!DjIBUZH@Yz`3fC*a&zFfz3LrL?(;*%2}*n| zbi$)CKh-%TnJ~3s{;6&LnHd>tTk~zfSao`E{v;-!@bTj>SKgTm&ISh9m(Hr$ABcTA z^3LVsPa+OgF^lztt=M8tn==C!gI0?0O;$LSx{)rmqhkRN0XNJ}k6P%r(*T({Lu?WW z=z&2NTe-e9WmI3fW~`Hg!!O0mnF0$!UEh(HAP>4pe{nRKu$&y%DJ5kS`S{<8#Z|2vz9{#<8X$ zia}1TP4y`PtbUcB3fX+|vihgBiu z!V3=~x-=1mo};V|&qVuaUW=XwB-{%KjqsM-Bh^@kl@FDLdQ=lF7ugdX3qzg%-Egy! zK%&8pTXGyZ?;Wt|tlMkjZlrn2x2=7qtv$kPxE9y~*8t zsHGYdWt6KbeSwnlS1l)_RMPxJ%0+{UF~a+gZX> zsX_!hRKfJE3wjHEDRW2H(CsorvyYSIUFI1bm=fa9ti(Tv@odsI7R)WlblIMDCCP>k zY-zor^>XF&=ny@1QmP;SCNktvqoo@;{FavKB=BhmkLi#P5L$X@UEfM&ffOPKT_=aUK=lxvh#byw&lQ zY{RX$ba5tEIku{hFcshUCL$IUy4AWIa9?oC_=-lKN|4v}Wf&w!q5o~PH_@K*bsnAF z{iknqEFg&RRu^))8?#=?4E!oaI=)ph#|+O6CN*Dhp<4Sd0#7)Jz{6fyG(mv%ZRkWa zC{e4QSMP?MaT`zqCRDeVAi`r|YXSr|wD%wY+1$!NPUv2~ z@RK`duJVxplI4@Q{}6Oa-2X{D`BzJ_gM^(z3?o+Jg+KB{`2@n00`zJ4O|yR{g468h z$kReXNR@n{+d=ZT6WSf6Ag`|%&F(@luX>ry`PZH~d$b91sl5p_R1UIm0J?OC_ZNZA zYPOH<%{kUW3KhiCaHV!1uuubrr&CwzNp=>dlL6|=V${tS6Yw0Nj)F=`>2L9 zpzW5t6mpB>@*m1PwWBA$2LEmNcj>yX4U29ER%zxwE7;_5`ciuy7*JbU>)!pBFokRO zdyG4HTm_7h@xC4oyeb8%iE4jePnU)h6v(*&^{vtl%r@<$NGycq>nQQIC!k(^^WPva`(u=&mYwbrm zCa-31)bS3POQK^t0Uw*T{8_G;oSR$vhK(5|#eMYApj17}fF3r$2)aFezJG`638hUx z_rFCMZuy$QidRL2NV9gi8vW_}wO=`0LBkBFkcWlmlYcWR5rNI`l8Ly*(0g+h7$o}M zyWW8$Y72Y$%nwNG3VyWwmw-XtC0S-A&6^*R#Uz_JvEe+-@l0XkU;EH$BkJ~vuk4cj z2O-FDP3`T0|ZIc;~j`TB%j&xxTry@ov=P~$xH zB&;$Zqd@*JEku1orS`ItxliBq#wrX}Rdp*LlTC;S=eho9ETlx$G*>AU=k(M$no5Xd z$?lZIFyl@Vb)G{m1@e6Q36lqk>ZJP4#O??TPa1v7-#@0WU3wyE z%$UG13G>ydF>jb_ShJIl_27d7eI^@pRhR z+PcxD?$Fjfk?b=O*F}unJKI9qk!Gm7H|~|}JXJSgo*_&9I$+@Stltuyx5V1i@} z=NRHLbj7~@XeH<75+CjRrlzLF-KJ6C7RzJL8W$eCvC@#1vSmfwwUQ#!?H}wdWlsSq z+|%dr`2MYWw?EHQRY?tFyDM3%BsiC9)1z5@yI-lswA&)4$&a>F0W%ke;p@i*Jd{lh zX$lT74`ZH!uz&hgk}I-exz#7$BB%9H1EC#7cfF6M`(JOKzm@l&5Iz{WWK1>Na7c8_ zCkeD3-I# zF%n(rciqDB)&59Y#c7iM{F>#Agxo1{dWXH#527KM?Y)gD*RUk-#*EBNXL|A3Aw15; zFZ_PN60Y}I#b6|Qk*xW18lHG_fV^dX|6LY zJ!IBpGti{tCr~Tbrq8C+yTORKFDIG7S}87NB;dA=FguA5)!JmOO#P#KPPtyk-wby= zUtM|7)z!sN^ak=b>az{_MHa2j)qnH^d$t@{DQXiX9&}JYhu8T1@`Z&-?HPS(`Hq58T^e3-$gDv)-zC3V zvw8`;YToC53Q~T|`<|&asEV0_1I4pI`@aE6u}{u|8Sk=-Ww(Q38qvixTF?NskzcT7 z|9dAkWh4o~JO)mo&6PnWg&tRx6Np5!S8Um>@QLffI^8gT`D zD=`p-?qZT&b_XZ+&FTLLSs8{Ba;C?b`uh6DMIlH|Dr$hf{?$EJIL~KmY_tu$r&xmA zGy0fI#)o-Spv*Rw8&cKGM$^iqsDKA_H7V4ye?<`n$$=yi3Ftpa7=yX`fw?h$NH}|b z=O*IYo$Oyy%SM47kex@N5cA1pZ!A4y%JFG40NDNBvkAwVLUjqWupb29`&*d_gn+&c zP8Nrw4oIYw(Ym~=k+YE$|EVUiCOZ8z#&jw(=Iz=3E7lQ=g}sTtcm=C5>2i=B_9{|1GNRVy0tZ=nrn z0nxkkmg2D4YrX8*xmtJ7EkyD*?GbE6K5#3&XqOhtN%u(zg=>IqwyPY7no=AfAIhcS zT)Lj|+r3xD2*wIlIL(&QX9rz(Cp>{*J?r!{$j9rv6*FgFJ~ieETJKUqf;p&I^C+Jh zhL2LHOTQdWYWy19v2ya-AeL5bfH~ssUo$EI?qRFOamGtN4EIuRO6)$0&p6>sjz{wr zySuw5kF?eo71E;DF^cElkYNurci1Mg>i^JUZ)<5;l!yyK`7-Czm>&(<&vJmV;fJtk z3g(2zIe?(Af-R^=F4+-_vSAOzk2hW5VD@FbZ>BuR(G@TPf#n%#T(gp#Kx7fSM&L{u z=$%R#>eGR4wt=?07aFoNasr0x`ydN-70h9%_Mk>x)lKjHAhUU)Ol6?`U}?#FZ@vVo zblt6vq^>~%=7CwsE-jj1bw&2AC-N(t%h6>G{&Y^+#PZ9C6hc}{(F8noBw8G zsmG7U8*%ORX;`7K4_KzzyWrnDAVJ<2j~P44t=9xn1d+pH@V@7<-}_J+wUGg5OYsAgq#d=+dn;;O9`*v@ux$7c~mMbBPabe z|3@TTk~!Xom~?ZY+ze%gv^PkKKhh{;cWZ!2Tyx27OyxDHeJRO%u*f9+DFzF#6?(CO zMxF^d3?G8qY$j`B8Vhfi7luYCLn=cCXNu4w5?#oj^DR!t8dqL}7sGqR#(}Htbxq(o z(j}Wv3s6@3P&wiZjzdjx4^xa0;`(_;0<*KT?#&r!fF_C2TmF~3w?BUh!zQ`|xXn2! zz>Du!h4&qsUIOlcrl!7N7`@6ez=>E??L^*a{0O*DUc4|DAj}v5dPM-4tHNh7kA(pR z4;&n698rIr1q8z>ZhmZQvid{=6yZTXXx*I=(IyAKl)ygRL~q5R!|Es5Rx(PpOq;6j z+U7rsYhfClgAhRb>%^q7*MW>P=jJ7#eir$A*N*%hq|+MrbFO`vk#|hm38ol zj||%{DETvY+b)p~GQ2B#ojRq)0_dd%KHrxga4r5cMKXN5^e#RN+FF}@I3H@w04t(q zEWuLZ*Uh4DKI3qaz@_ck1|CUp>dh9uWUg2XzVUT{Srs>E#RJD+@&$ z*5ECWLRa*GxLudOFU6oto^of9+FNjy>}Crds+w%-2TVL?o_W=rXB#GO8j1bJ3-vt2 zsTm`E+%lWme#Az~0w|%mw=4S4aT=1=gNl%c2KL1SRRwa83QziQH19{PN5uiIe^<&` z;D1bz6Hv%zrKN;LfmiZP1;(FGfzK}J!BYnTgQw%*d0A1kTy?$p73gMJU2zM3h=97Y zekwrcp(jx#B67$JIiCBU#>o--atFQuac6*CTB;)GWv5*}NGzsM89~^;6QL3mal|xy zAp;bak?RE8&vYO}ki7jt8*<9d6o5tp5u93a6=oC-Od=rZo<43Af6SG+S)dB+1Gxc6 z%3&gW5p~&IB}1g3NCO2){4P+Td}+Y+&Aa#s9^j$jOXBqGP*jo%iA~_cu_=M!6x>%t zG0O)?vqtPz@0(_O2nyf~m4+yQncEkEXkM#7|9-%vROx48Q0`KFyAx)c0?5XM1foot zucMg(RxXPP9}E966SQVr+`d=-3Urh2b~XgA(HuXMq(e4j7#5x1)weyuD}be9>n`+J z3frQ;4T~dE00?DICwDgg5|h04M0!tVcb5%uuVE$dUI9{JkmtZX={5zokw#4WLX(`9 z-IaqNHQd!S22I)9UK$#L&Apa>bVWizDEJ=&j%_sgd*xNP1cL`dI#FuBG|VIT~W?>txjqz(t^*8L;b)D*CK9hr6f?P+pje-&}~ z){?y{M?}Kk{BUPoqZa}~1ofs0(?5x3yE%H<@m)WuN*incZv3F;aKvMp;~qR& TPdE!t8~_?B+RFKg_n!Yh!VnPt literal 0 HcmV?d00001 diff --git a/client/systray/tailscale.svg b/client/systray/tailscale.svg new file mode 100644 index 0000000000000..9e6990271e472 --- /dev/null +++ b/client/systray/tailscale.svg @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/cmd/tailscale/cli/configure_linux.go b/cmd/tailscale/cli/configure_linux.go index ccde06c72ddbc..9ba3b8e878d52 100644 --- a/cmd/tailscale/cli/configure_linux.go +++ b/cmd/tailscale/cli/configure_linux.go @@ -33,7 +33,7 @@ func systrayConfigCmd() *ffcli.Command { FlagSet: (func() *flag.FlagSet { fs := newFlagSet("systray") fs.StringVar(&systrayArgs.initSystem, "enable-startup", "", - "Install startup script for init system. Currently supported systems are [systemd].") + "Install startup script for init system. Currently supported systems are [systemd, freedesktop].") return fs })(), } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index 8cef9725847a3..d83ac2710a897 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -157,6 +157,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep software.sslmate.com/src/go-pkcs12/internal/rc2 from software.sslmate.com/src/go-pkcs12 tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/tailscale/cli+ + L tailscale.com/client/freedesktop from tailscale.com/client/systray tailscale.com/client/local from tailscale.com/client/tailscale+ L tailscale.com/client/systray from tailscale.com/cmd/tailscale/cli tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale From a98036b41d5f78d403f8328ccd88df9ba3aa441a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 27 Feb 2026 02:09:12 +0000 Subject: [PATCH 1026/1093] go.mod: bump gvisor Updates #8043 Change-Id: Ia229ad4f28f2ff20e0bdecb99ca9e1bd0356ad8e Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 4 ++-- cmd/tailscaled/depaware.txt | 4 ++-- cmd/tsidp/depaware.txt | 4 ++-- flake.nix | 2 +- go.mod | 4 ++-- go.mod.sri | 2 +- go.sum | 12 ++++++------ shell.nix | 2 +- tsnet/depaware.txt | 4 ++-- wgengine/netstack/netstack.go | 15 ++++++++++++++- 10 files changed, 33 insertions(+), 20 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index cd87d49872028..d801c0285ca62 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -123,7 +123,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+ github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp github.com/google/gnostic-models/compiler from github.com/google/gnostic-models/openapiv2+ github.com/google/gnostic-models/extensions from github.com/google/gnostic-models/compiler github.com/google/gnostic-models/jsonschema from github.com/google/gnostic-models/compiler @@ -271,7 +271,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 4128ecc4ce972..48a7d09495c5f 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -115,7 +115,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp github.com/google/go-tpm/legacy/tpm2 from github.com/google/go-tpm/tpm2/transport+ github.com/google/go-tpm/tpm2 from tailscale.com/feature/tpm github.com/google/go-tpm/tpm2/transport from github.com/google/go-tpm/tpm2/transport/linuxtpm+ @@ -220,7 +220,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index ae13b20449b41..03f7e1f09b21d 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -103,7 +103,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp D github.com/google/uuid from github.com/prometheus-community/pro-bing github.com/hdevalence/ed25519consensus from tailscale.com/tka github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ @@ -170,7 +170,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ diff --git a/flake.nix b/flake.nix index 4e315a5cab7e6..0dbf74e7884aa 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-4orp8iQekVbhCFpt7DXLvj6dediKxo1qkWr1oe7+RaE= +# nix-direnv cache busting line: sha256-Lr+5B0LEFk66WahPczRcfzH8rSL5Cc2qvNJuW6B0Llc= diff --git a/go.mod b/go.mod index 80b453cd5a9f7..caa58b60833bc 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/creachadair/mds v0.25.9 github.com/creachadair/msync v0.7.1 github.com/creachadair/taskgroup v0.13.2 - github.com/creack/pty v1.1.23 + github.com/creack/pty v1.1.24 github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e github.com/distribution/reference v0.6.0 @@ -123,7 +123,7 @@ require ( golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 - gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 + gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 helm.sh/helm/v3 v3.19.0 honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 k8s.io/api v0.34.0 diff --git a/go.mod.sri b/go.mod.sri index feea9b11b1ab0..91887e63b3c8c 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-4orp8iQekVbhCFpt7DXLvj6dediKxo1qkWr1oe7+RaE= +sha256-Lr+5B0LEFk66WahPczRcfzH8rSL5Cc2qvNJuW6B0Llc= diff --git a/go.sum b/go.sum index ab4f3303623c8..1f8195e47fff6 100644 --- a/go.sum +++ b/go.sum @@ -276,8 +276,8 @@ github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creachadair/mds v0.25.9 h1:080Hr8laN2h+l3NeVCGMBpXtIPnl9mz8e4HLraGPqtA= github.com/creachadair/mds v0.25.9/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs= @@ -287,8 +287,8 @@ github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoi github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= @@ -1726,8 +1726,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= -gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= +gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 h1:Zy8IV/+FMLxy6j6p87vk/vQGKcdnbprwjTxc8UiUtsA= +gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8/go.mod h1:QkHjoMIBaYtpVufgwv3keYAbln78mBoCuShZrPrer1Q= helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/shell.nix b/shell.nix index 07d3c1ad53bad..a822b705a3062 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-4orp8iQekVbhCFpt7DXLvj6dediKxo1qkWr1oe7+RaE= +# nix-direnv cache busting line: sha256-Lr+5B0LEFk66WahPczRcfzH8rSL5Cc2qvNJuW6B0Llc= diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index bcc00590a4d2c..8c81aa4d70d5d 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -103,7 +103,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache - github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ + github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/transport/tcp DI github.com/google/uuid from github.com/prometheus-community/pro-bing github.com/hdevalence/ed25519consensus from tailscale.com/tka github.com/huin/goupnp from github.com/huin/goupnp/dcps/internetgateway2+ @@ -166,7 +166,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/atomicbitops+ gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/atomicbitops+ - 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack + 💣 gvisor.dev/gvisor/pkg/sync/locking from gvisor.dev/gvisor/pkg/tcpip/stack+ gvisor.dev/gvisor/pkg/tcpip from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/tcpip/adapters/gonet from tailscale.com/wgengine/netstack 💣 gvisor.dev/gvisor/pkg/tcpip/checksum from gvisor.dev/gvisor/pkg/buffer+ diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 42ac0ab1e4dba..bab94e2bed01b 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -613,7 +613,7 @@ func (ns *Impl) Start(b LocalBackend) error { } ns.lb = lb tcpFwd := tcp.NewForwarder(ns.ipstack, tcpRXBufDefSize, maxInFlightConnectionAttempts(), ns.acceptTCP) - udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDP) + udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDPNoICMP) ns.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, ns.wrapTCPProtocolHandler(tcpFwd.HandlePacket)) ns.ipstack.SetTransportProtocolHandler(udp.ProtocolNumber, ns.wrapUDPProtocolHandler(udpFwd.HandlePacket)) go ns.inject() @@ -1769,6 +1769,19 @@ func (ns *Impl) ListenTCP(network, address string) (*gonet.TCPListener, error) { return gonet.ListenTCP(ns.ipstack, localAddress, networkProto) } +// acceptUDPNoICMP wraps acceptUDP to satisfy udp.ForwarderHandler. +// A gvisor bump from 9414b50a to 573d5e71 on 2026-02-27 changed +// udp.ForwarderHandler from func(*ForwarderRequest) to +// func(*ForwarderRequest) bool, where returning false means unhandled +// and causes gvisor to send an ICMP port unreachable. Previously there +// was no such distinction and all packets were implicitly treated as +// handled. Always returning true preserves the old behavior of silently +// dropping packets we don't service rather than sending ICMP errors. +func (ns *Impl) acceptUDPNoICMP(r *udp.ForwarderRequest) bool { + ns.acceptUDP(r) + return true +} + func (ns *Impl) acceptUDP(r *udp.ForwarderRequest) { sess := r.ID() if debugNetstack() { From 30e12310f19fa85a9e35fe5800b067d7b033bd33 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 30 Jan 2026 17:30:39 -0800 Subject: [PATCH 1027/1093] cmd/tailscaled/*.{target,unit}: add systemd online target Using the new wait command from #18574 provide a tailscale-online.target that has a similar usage model to the conventional `network-online.target`. Updates #3340 Updates #11504 Signed-off-by: James Tucker --- cmd/tailscaled/tailscale-online.target | 4 ++++ cmd/tailscaled/tailscale-wait-online.service | 12 ++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 cmd/tailscaled/tailscale-online.target create mode 100644 cmd/tailscaled/tailscale-wait-online.service diff --git a/cmd/tailscaled/tailscale-online.target b/cmd/tailscaled/tailscale-online.target new file mode 100644 index 0000000000000..a8ee7db475378 --- /dev/null +++ b/cmd/tailscaled/tailscale-online.target @@ -0,0 +1,4 @@ +[Unit] +Description=Tailscale is online +Requires=tailscale-wait-online.service +After=tailscale-wait-online.service diff --git a/cmd/tailscaled/tailscale-wait-online.service b/cmd/tailscaled/tailscale-wait-online.service new file mode 100644 index 0000000000000..eb46a18bf92d2 --- /dev/null +++ b/cmd/tailscaled/tailscale-wait-online.service @@ -0,0 +1,12 @@ +[Unit] +Description=Wait for Tailscale to be online +After=tailscaled.service +Requires=tailscaled.service + +[Service] +Type=oneshot +ExecStart=/usr/bin/tailscale wait +RemainAfterExit=yes + +[Install] +WantedBy=tailscale-online.target From 0fb207c3d045b888523914dce0b6c9e9a1abdd69 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 27 Feb 2026 13:49:05 -0800 Subject: [PATCH 1028/1093] wgengine/netstack: deliver self-addressed packets via loopback When a tsnet.Server dials its own Tailscale IP, TCP SYN packets are silently dropped. In inject(), outbound packets with dst=self fail the shouldSendToHost check and fall through to WireGuard, which has no peer for the node's own address. Fix this by detecting self-addressed packets in inject() using isLocalIP and delivering them back into gVisor's network stack as inbound packets via a new DeliverLoopback method on linkEndpoint. The outbound packet must be re-serialized into a new PacketBuffer because outbound packets have their headers parsed into separate views, but DeliverNetworkPacket expects raw unparsed data. Updates #18829 Signed-off-by: James Tucker --- tsnet/tsnet_test.go | 73 +++++++ wgengine/netstack/link_endpoint.go | 38 ++++ wgengine/netstack/netstack.go | 24 +++ wgengine/netstack/netstack_test.go | 293 +++++++++++++++++++++++++++++ 4 files changed, 428 insertions(+) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 0b6b61bd10061..e2b37a365d04d 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -2792,3 +2792,76 @@ func TestResolveAuthKey(t *testing.T) { }) } } + +// TestSelfDial verifies that a single tsnet.Server can Dial its own Listen +// address. This is a regression test for a bug where self-addressed TCP SYN +// packets were sent to WireGuard (which has no peer for the node's own IP) +// and silently dropped, causing Dial to hang indefinitely. +func TestSelfDial(t *testing.T) { + tstest.Shard(t) + tstest.ResourceCheck(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + controlURL, _ := startControl(t) + s1, s1ip, _ := startServer(t, ctx, controlURL, "s1") + + ln, err := s1.Listen("tcp", ":8081") + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + errc := make(chan error, 1) + connc := make(chan net.Conn, 1) + go func() { + c, err := ln.Accept() + if err != nil { + errc <- err + return + } + connc <- c + }() + + // Self-dial: the same server dials its own Tailscale IP. + w, err := s1.Dial(ctx, "tcp", fmt.Sprintf("%s:8081", s1ip)) + if err != nil { + t.Fatalf("self-dial failed: %v", err) + } + defer w.Close() + + var accepted net.Conn + select { + case accepted = <-connc: + case err := <-errc: + t.Fatalf("accept failed: %v", err) + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for accept") + } + defer accepted.Close() + + // Verify bidirectional data exchange. + want := "hello self" + if _, err := io.WriteString(w, want); err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + if _, err := io.ReadFull(accepted, got); err != nil { + t.Fatal(err) + } + if string(got) != want { + t.Errorf("client->server: got %q, want %q", got, want) + } + + reply := "hello back" + if _, err := io.WriteString(accepted, reply); err != nil { + t.Fatal(err) + } + gotReply := make([]byte, len(reply)) + if _, err := io.ReadFull(w, gotReply); err != nil { + t.Fatal(err) + } + if string(gotReply) != reply { + t.Errorf("server->client: got %q, want %q", gotReply, reply) + } +} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 4800ed1673d20..82b5446ac8789 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -7,6 +7,7 @@ import ( "context" "sync" + "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" @@ -198,6 +199,43 @@ func (ep *linkEndpoint) injectInbound(p *packet.Parsed) { pkt.DecRef() } +// DeliverLoopback delivers pkt back into gVisor's network stack as if it +// arrived from the network, for self-addressed (loopback) packets. It takes +// ownership of one reference count on pkt. The caller must not use pkt after +// calling this method. It returns false if the dispatcher is not attached. +// +// Outbound packets from gVisor have their headers already parsed into separate +// views (NetworkHeader, TransportHeader, Data). DeliverNetworkPacket expects +// a raw unparsed packet, so we must re-serialize the packet into a new +// PacketBuffer with all bytes in the payload for gVisor to parse on inbound. +func (ep *linkEndpoint) DeliverLoopback(pkt *stack.PacketBuffer) bool { + ep.mu.RLock() + d := ep.dispatcher + ep.mu.RUnlock() + if d == nil { + pkt.DecRef() + return false + } + + // Serialize the outbound packet back to raw bytes. + raw := stack.PayloadSince(pkt.NetworkHeader()).AsSlice() + proto := pkt.NetworkProtocolNumber + + // We're done with the original outbound packet. + pkt.DecRef() + + // Create a new PacketBuffer from the raw bytes for inbound delivery. + newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + Payload: buffer.MakeWithData(raw), + }) + newPkt.NetworkProtocolNumber = proto + newPkt.RXChecksumValidated = true + + d.DeliverNetworkPacket(proto, newPkt) + newPkt.DecRef() + return true +} + // Attach saves the stack network-layer dispatcher for use later when packets // are injected. func (ep *linkEndpoint) Attach(dispatcher stack.NetworkDispatcher) { diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index bab94e2bed01b..59c2613451fa5 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -1037,6 +1037,16 @@ func (ns *Impl) inject() { return } } else { + // Self-addressed packet: deliver back into gVisor directly + // via the link endpoint's dispatcher, but only if the packet is not + // earmarked for the host. Neither the inbound path (fakeTUN Write is a + // no-op) nor the outbound path (WireGuard has no peer for our own IP) + // can handle these. + if ns.isSelfDst(pkt) { + ns.linkEP.DeliverLoopback(pkt) + continue + } + if err := ns.tundev.InjectOutboundPacketBuffer(pkt); err != nil { ns.logf("netstack inject outbound: %v", err) return @@ -1116,6 +1126,20 @@ func (ns *Impl) shouldSendToHost(pkt *stack.PacketBuffer) bool { return false } +// isSelfDst reports whether pkt's destination IP is a local Tailscale IP +// assigned to this node. This is used by inject() to detect self-addressed +// packets that need loopback delivery. +func (ns *Impl) isSelfDst(pkt *stack.PacketBuffer) bool { + hdr := pkt.Network() + switch v := hdr.(type) { + case header.IPv4: + return ns.isLocalIP(netip.AddrFrom4(v.DestinationAddress().As4())) + case header.IPv6: + return ns.isLocalIP(netip.AddrFrom16(v.DestinationAddress().As16())) + } + return false +} + // isLocalIP reports whether ip is a Tailscale IP assigned to this // node directly (but not a subnet-routed IP). func (ns *Impl) isLocalIP(ip netip.Addr) bool { diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index eea598937e4cf..da262fc13acbd 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -15,6 +15,7 @@ import ( "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/envknob" @@ -1073,3 +1074,295 @@ func makeUDP6PacketBuffer(src, dst netip.AddrPort) *stack.PacketBuffer { return pkt } + +// TestIsSelfDst verifies that isSelfDst correctly identifies packets whose +// destination IP is a local Tailscale IP assigned to this node. +func TestIsSelfDst(t *testing.T) { + var ( + selfIP4 = netip.MustParseAddr("100.64.1.2") + selfIP6 = netip.MustParseAddr("fd7a:115c:a1e0::123") + remoteIP4 = netip.MustParseAddr("100.64.99.88") + remoteIP6 = netip.MustParseAddr("fd7a:115c:a1e0::99") + ) + + ns := makeNetstack(t, func(impl *Impl) { + impl.ProcessLocalIPs = true + impl.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool { + return addr == selfIP4 || addr == selfIP6 + }) + }) + + testCases := []struct { + name string + src, dst netip.AddrPort + want bool + }{ + { + name: "self_to_self_v4", + src: netip.AddrPortFrom(selfIP4, 12345), + dst: netip.AddrPortFrom(selfIP4, 8081), + want: true, + }, + { + name: "self_to_self_v6", + src: netip.AddrPortFrom(selfIP6, 12345), + dst: netip.AddrPortFrom(selfIP6, 8081), + want: true, + }, + { + name: "remote_to_self_v4", + src: netip.AddrPortFrom(remoteIP4, 12345), + dst: netip.AddrPortFrom(selfIP4, 8081), + want: true, + }, + { + name: "remote_to_self_v6", + src: netip.AddrPortFrom(remoteIP6, 12345), + dst: netip.AddrPortFrom(selfIP6, 8081), + want: true, + }, + { + name: "self_to_remote_v4", + src: netip.AddrPortFrom(selfIP4, 12345), + dst: netip.AddrPortFrom(remoteIP4, 8081), + want: false, + }, + { + name: "self_to_remote_v6", + src: netip.AddrPortFrom(selfIP6, 12345), + dst: netip.AddrPortFrom(remoteIP6, 8081), + want: false, + }, + { + name: "remote_to_remote_v4", + src: netip.AddrPortFrom(remoteIP4, 12345), + dst: netip.MustParseAddrPort("100.64.77.66:7777"), + want: false, + }, + { + name: "service_ip_to_self_v4", + src: netip.AddrPortFrom(serviceIP, 53), + dst: netip.AddrPortFrom(selfIP4, 9999), + want: true, + }, + { + name: "service_ip_to_self_v6", + src: netip.AddrPortFrom(serviceIPv6, 53), + dst: netip.AddrPortFrom(selfIP6, 9999), + want: true, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + var pkt *stack.PacketBuffer + if tt.src.Addr().Is4() { + pkt = makeUDP4PacketBuffer(tt.src, tt.dst) + } else { + pkt = makeUDP6PacketBuffer(tt.src, tt.dst) + } + defer pkt.DecRef() + + if got := ns.isSelfDst(pkt); got != tt.want { + t.Errorf("isSelfDst(%v -> %v) = %v, want %v", tt.src, tt.dst, got, tt.want) + } + }) + } +} + +// TestDeliverLoopback verifies that DeliverLoopback correctly re-serializes an +// outbound packet and delivers it back into gVisor's inbound path. +func TestDeliverLoopback(t *testing.T) { + ep := newLinkEndpoint(64, 1280, "", groNotSupported) + + // Track delivered packets via a mock dispatcher. + type delivered struct { + proto tcpip.NetworkProtocolNumber + data []byte + } + deliveredCh := make(chan delivered, 4) + ep.Attach(&mockDispatcher{ + onDeliverNetworkPacket: func(proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { + // Capture the raw bytes from the delivered packet. At this + // point the packet is unparsed — everything is in the + // payload, no headers have been consumed yet. + buf := pkt.ToBuffer() + raw := buf.Flatten() + deliveredCh <- delivered{proto: proto, data: raw} + }, + }) + + t.Run("ipv4", func(t *testing.T) { + selfAddr := netip.MustParseAddrPort("100.64.1.2:8081") + pkt := makeUDP4PacketBuffer(selfAddr, selfAddr) + // Capture what the outbound bytes look like before loopback. + wantLen := pkt.Size() + wantProto := pkt.NetworkProtocolNumber + + if !ep.DeliverLoopback(pkt) { + t.Fatal("DeliverLoopback returned false") + } + + select { + case got := <-deliveredCh: + if got.proto != wantProto { + t.Errorf("proto = %d, want %d", got.proto, wantProto) + } + if len(got.data) != wantLen { + t.Errorf("data length = %d, want %d", len(got.data), wantLen) + } + case <-time.After(time.Second): + t.Fatal("timeout waiting for loopback delivery") + } + }) + + t.Run("ipv6", func(t *testing.T) { + selfAddr := netip.MustParseAddrPort("[fd7a:115c:a1e0::123]:8081") + pkt := makeUDP6PacketBuffer(selfAddr, selfAddr) + wantLen := pkt.Size() + wantProto := pkt.NetworkProtocolNumber + + if !ep.DeliverLoopback(pkt) { + t.Fatal("DeliverLoopback returned false") + } + + select { + case got := <-deliveredCh: + if got.proto != wantProto { + t.Errorf("proto = %d, want %d", got.proto, wantProto) + } + if len(got.data) != wantLen { + t.Errorf("data length = %d, want %d", len(got.data), wantLen) + } + case <-time.After(time.Second): + t.Fatal("timeout waiting for loopback delivery") + } + }) + + t.Run("nil_dispatcher", func(t *testing.T) { + ep2 := newLinkEndpoint(64, 1280, "", groNotSupported) + // Don't attach a dispatcher. + selfAddr := netip.MustParseAddrPort("100.64.1.2:8081") + pkt := makeUDP4PacketBuffer(selfAddr, selfAddr) + if ep2.DeliverLoopback(pkt) { + t.Error("DeliverLoopback should return false with nil dispatcher") + } + // pkt refcount was consumed by DeliverLoopback, so we don't DecRef. + }) +} + +// mockDispatcher implements stack.NetworkDispatcher for testing. +type mockDispatcher struct { + onDeliverNetworkPacket func(tcpip.NetworkProtocolNumber, *stack.PacketBuffer) +} + +func (d *mockDispatcher) DeliverNetworkPacket(proto tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) { + if d.onDeliverNetworkPacket != nil { + d.onDeliverNetworkPacket(proto, pkt) + } +} + +func (d *mockDispatcher) DeliverLinkPacket(tcpip.NetworkProtocolNumber, *stack.PacketBuffer) {} + +// udp4raw constructs a valid raw IPv4+UDP packet with proper checksums. +func udp4raw(t testing.TB, src, dst netip.Addr, sport, dport uint16, payload []byte) []byte { + t.Helper() + totalLen := header.IPv4MinimumSize + header.UDPMinimumSize + len(payload) + buf := make([]byte, totalLen) + + ip := header.IPv4(buf) + ip.Encode(&header.IPv4Fields{ + TotalLength: uint16(totalLen), + Protocol: uint8(header.UDPProtocolNumber), + TTL: 64, + SrcAddr: tcpip.AddrFrom4Slice(src.AsSlice()), + DstAddr: tcpip.AddrFrom4Slice(dst.AsSlice()), + }) + ip.SetChecksum(^ip.CalculateChecksum()) + + // Build UDP header + payload. + u := header.UDP(buf[header.IPv4MinimumSize:]) + u.Encode(&header.UDPFields{ + SrcPort: sport, + DstPort: dport, + Length: uint16(header.UDPMinimumSize + len(payload)), + }) + copy(buf[header.IPv4MinimumSize+header.UDPMinimumSize:], payload) + + xsum := header.PseudoHeaderChecksum( + header.UDPProtocolNumber, + tcpip.AddrFrom4Slice(src.AsSlice()), + tcpip.AddrFrom4Slice(dst.AsSlice()), + uint16(header.UDPMinimumSize+len(payload)), + ) + u.SetChecksum(^header.UDP(buf[header.IPv4MinimumSize:]).CalculateChecksum(xsum)) + return buf +} + +// TestInjectLoopback verifies that the inject goroutine delivers self-addressed +// packets back into gVisor (via DeliverLoopback) instead of sending them to +// WireGuard outbound. This is a regression test for a bug where self-dial +// packets were sent to WireGuard and silently dropped. +func TestInjectLoopback(t *testing.T) { + selfIP4 := netip.MustParseAddr("100.64.1.2") + + ns := makeNetstack(t, func(impl *Impl) { + impl.ProcessLocalIPs = true + impl.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool { + return addr == selfIP4 + }) + }) + + // Register gVisor's NIC address so the stack accepts and routes + // packets for this IP. + protocolAddr := tcpip.ProtocolAddress{ + Protocol: header.IPv4ProtocolNumber, + AddressWithPrefix: tcpip.AddrFrom4(selfIP4.As4()).WithPrefix(), + } + if err := ns.ipstack.AddProtocolAddress(nicID, protocolAddr, stack.AddressProperties{}); err != nil { + t.Fatalf("AddProtocolAddress: %v", err) + } + + // Bind a UDP socket on the gVisor stack to receive the loopback packet. + pc, err := gonet.DialUDP(ns.ipstack, &tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFrom4(selfIP4.As4()), + Port: 8081, + }, nil, header.IPv4ProtocolNumber) + if err != nil { + t.Fatalf("DialUDP: %v", err) + } + defer pc.Close() + + // Build a valid self-addressed UDP packet from raw bytes and wrap it + // in a gVisor PacketBuffer with headers already pushed, as gVisor's + // outbound path produces. + payload := []byte("loopback test") + raw := udp4raw(t, selfIP4, selfIP4, 12345, 8081, payload) + + pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{ + ReserveHeaderBytes: header.IPv4MinimumSize + header.UDPMinimumSize, + Payload: buffer.MakeWithData(payload), + }) + copy(pkt.TransportHeader().Push(header.UDPMinimumSize), + raw[header.IPv4MinimumSize:header.IPv4MinimumSize+header.UDPMinimumSize]) + pkt.TransportProtocolNumber = header.UDPProtocolNumber + copy(pkt.NetworkHeader().Push(header.IPv4MinimumSize), raw[:header.IPv4MinimumSize]) + pkt.NetworkProtocolNumber = header.IPv4ProtocolNumber + + if err := ns.linkEP.q.Write(pkt); err != nil { + t.Fatalf("queue.Write: %v", err) + } + + // The inject goroutine should detect the self-addressed packet via + // isSelfDst and deliver it back into gVisor via DeliverLoopback. + pc.SetReadDeadline(time.Now().Add(5 * time.Second)) + buf := make([]byte, 256) + n, _, err := pc.ReadFrom(buf) + if err != nil { + t.Fatalf("ReadFrom: %v (self-addressed packet was not looped back)", err) + } + if got := string(buf[:n]); got != "loopback test" { + t.Errorf("got %q, want %q", got, "loopback test") + } +} From 45305800a626929a4d5335102c6ad613cc21b8b6 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 27 Feb 2026 14:41:47 -0800 Subject: [PATCH 1029/1093] net/netmon: ignore NetBird interface on Linux Windows and macOS are not covered by this change, as neither have safely distinct names to make it easy to do so. This covers the requested case on Linux. Updates #18824 Signed-off-by: James Tucker --- net/netmon/state.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/netmon/state.go b/net/netmon/state.go index 10d68ab785edc..cdfa1d0fbe552 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -41,7 +41,12 @@ func isProblematicInterface(nif *net.Interface) bool { // DoS each other by doing traffic amplification, both of them // preferring/trying to use each other for transport. See: // https://github.com/tailscale/tailscale/issues/1208 - if strings.HasPrefix(name, "zt") || (runtime.GOOS == "windows" && strings.Contains(name, "ZeroTier")) { + // TODO(https://github.com/tailscale/tailscale/issues/18824): maybe exclude + // "WireGuard tunnel 0" as well on Windows (NetBird), but the name seems too + // generic where there is not a platform standard (on Linux wt0 is at least + // explicitly different from the WireGuard conventional default of wg0). + if strings.HasPrefix(name, "zt") || name == "wt0" /* NetBird */ || + (runtime.GOOS == "windows" && strings.Contains(name, "ZeroTier")) { return true } return false From 439d84134d169db13b731a0cf515e0f5e342b984 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 27 Feb 2026 19:09:24 -0800 Subject: [PATCH 1030/1093] tsnet: fix slow test shutdown leading to flakes TestDial in particular sometimes gets stuck in CI for minutes, letting chantun drop packets during shutdown avoids blocking shutdown. Updates #18423 Signed-off-by: James Tucker --- tsnet/tsnet_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index e2b37a365d04d..9481defae94e8 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -1881,8 +1881,8 @@ type chanTUN struct { func newChanTUN() *chanTUN { t := &chanTUN{ - Inbound: make(chan []byte, 10), - Outbound: make(chan []byte, 10), + Inbound: make(chan []byte, 1024), + Outbound: make(chan []byte, 1024), closed: make(chan struct{}), events: make(chan tun.Event, 1), } @@ -1922,6 +1922,10 @@ func (t *chanTUN) Write(bufs [][]byte, offset int) (int, error) { case <-t.closed: return 0, errors.New("closed") case t.Inbound <- slices.Clone(pkt): + default: + // Drop the packet if the channel is full, like a real + // TUN under congestion. This avoids blocking the + // WireGuard send path when no goroutine is draining. } } return len(bufs), nil From fa13f83375680d223d411604b7360b0325936b19 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Sat, 28 Feb 2026 15:53:09 -0800 Subject: [PATCH 1031/1093] tsnet: fix deadlock in Server.Close during shutdown Server.Close held s.mu for the entire shutdown duration, including netstack.Close (which waits for gVisor goroutines to exit) and lb.Shutdown. gVisor callbacks like getTCPHandlerForFlow acquire s.mu via listenerForDstAddr, so any in-flight gVisor goroutine attempting that callback during stack shutdown would deadlock with Close. Replace the mu-guarded closed bool with a sync.Once, and release s.mu after closing listeners but before the heavy shutdown operations. Also cancel shutdownCtx before netstack.Close so pending handlers observe cancellation rather than contending on the lock. Updates #18423 Signed-off-by: James Tucker --- tsnet/tsnet.go | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index ccea22d1619f1..416c907502147 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -198,7 +198,7 @@ type Server struct { listeners map[listenKey]*listener fallbackTCPHandlers set.HandleSet[FallbackTCPHandler] dialer *tsdial.Dialer - closed bool + closeOnce sync.Once } // FallbackTCPHandler describes the callback which @@ -439,11 +439,29 @@ func (s *Server) Up(ctx context.Context) (*ipnstate.Status, error) { // // It must not be called before or concurrently with Start. func (s *Server) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.closed { + didClose := false + s.closeOnce.Do(func() { + didClose = true + s.close() + }) + if !didClose { return fmt.Errorf("tsnet: %w", net.ErrClosed) } + return nil +} + +func (s *Server) close() { + // Close listeners under s.mu, then release before the heavy shutdown + // operations. We must not hold s.mu during netstack.Close, lb.Shutdown, + // etc. because callbacks from gVisor (e.g. getTCPHandlerForFlow) + // acquire s.mu, and waiting for those goroutines while holding the lock + // would deadlock. + s.mu.Lock() + for _, ln := range s.listeners { + ln.closeLocked() + } + s.mu.Unlock() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var wg sync.WaitGroup @@ -466,13 +484,12 @@ func (s *Server) Close() error { } }() - if s.netstack != nil { - s.netstack.Close() - s.netstack = nil - } if s.shutdownCancel != nil { s.shutdownCancel() } + if s.netstack != nil { + s.netstack.Close() + } if s.lb != nil { s.lb.Shutdown() } @@ -489,13 +506,8 @@ func (s *Server) Close() error { s.loopbackListener.Close() } - for _, ln := range s.listeners { - ln.closeLocked() - } wg.Wait() s.sys.Bus.Get().Close() - s.closed = true - return nil } func (s *Server) doInit() { From 142ce997cbf53f0bb0c86e96682bce75b34c10f8 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Sat, 28 Feb 2026 16:11:28 -0800 Subject: [PATCH 1032/1093] .github/workflows: rename tidy workflow to match what it is I was confused when everything I was reading in the CI failure was saying `go mod tidy`, but the thing that was actually failing was related to nix flakes. Rename the pipeline and step name to the `make tidy` that it actually runs. Updates #16637 Signed-off-by: James Tucker --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 862420f70f98d..e6c693188783f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -806,7 +806,7 @@ jobs: echo git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1) - go_mod_tidy: + make_tidy: runs-on: ubuntu-24.04 needs: gomod-cache steps: @@ -820,7 +820,7 @@ jobs: path: gomodcache key: ${{ needs.gomod-cache.outputs.cache-key }} enableCrossOsArchive: true - - name: check that 'go mod tidy' is clean + - name: check that 'make tidy' is clean working-directory: src run: | make tidy @@ -921,7 +921,7 @@ jobs: - fuzz - depaware - go_generate - - go_mod_tidy + - make_tidy - licenses - staticcheck runs-on: ubuntu-24.04 @@ -967,7 +967,7 @@ jobs: - fuzz - depaware - go_generate - - go_mod_tidy + - make_tidy - licenses - staticcheck steps: @@ -991,7 +991,7 @@ jobs: - tailscale_go - depaware - go_generate - - go_mod_tidy + - make_tidy - licenses - staticcheck steps: From 48e0334aaca92682f1ec59962de93afd21c49ac8 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 27 Feb 2026 15:44:59 -0800 Subject: [PATCH 1033/1093] tsnet: fix Listen for unspecified addresses and ephemeral ports Normalize 0.0.0.0 and :: to wildcard in resolveListenAddr so listeners match incoming connections. Fix ephemeral port allocation across all three modes: extract assigned ports from gVisor listeners (TUN TCP and UDP), and add an ephemeral port allocator for netstack TCP. Updates #6815 Updates #12182 Fixes #14042 Signed-off-by: James Tucker --- tsnet/tsnet.go | 217 ++++++++++++++++++++++++++++++++-------- tsnet/tsnet_test.go | 236 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 411 insertions(+), 42 deletions(-) diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 416c907502147..776854e227926 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -196,6 +196,7 @@ type Server struct { mu sync.Mutex listeners map[listenKey]*listener + nextEphemeralPort uint16 // next port to try in ephemeral range; 0 means use ephemeralPortFirst fallbackTCPHandlers set.HandleSet[FallbackTCPHandler] dialer *tsdial.Dialer closeOnce sync.Once @@ -1099,16 +1100,27 @@ func (s *Server) ListenPacket(network, addr string) (net.PacketConn, error) { network = "udp6" } } + if err := s.Start(); err != nil { + return nil, err + } - netLn, err := s.listen(network, addr, listenOnTailnet) + // Create the gVisor PacketConn first so it can handle port 0 allocation. + pc, err := s.netstack.ListenPacket(network, ap.String()) if err != nil { return nil, err } - ln := netLn.(*listener) - pc, err := s.netstack.ListenPacket(network, ap.String()) + // If port 0 was requested, use the port gVisor assigned. + if ap.Port() == 0 { + if p := portFromAddr(pc.LocalAddr()); p != 0 { + ap = netip.AddrPortFrom(ap.Addr(), p) + addr = ap.String() + } + } + + ln, err := s.registerListener(network, addr, ap, listenOnTailnet, nil) if err != nil { - ln.Close() + pc.Close() return nil, err } @@ -1621,6 +1633,11 @@ func resolveListenAddr(network, addr string) (netip.AddrPort, error) { if err != nil { return zero, fmt.Errorf("invalid Listen addr %q; host part must be empty or IP literal", host) } + // Normalize unspecified addresses (0.0.0.0, ::) to the zero value, + // equivalent to an empty host, so they match the node's own IPs. + if bindHostOrZero.IsUnspecified() { + return netip.AddrPortFrom(netip.Addr{}, uint16(port)), nil + } if strings.HasSuffix(network, "4") && !bindHostOrZero.Is4() { return zero, fmt.Errorf("invalid non-IPv4 addr %v for network %q", host, network) } @@ -1630,6 +1647,17 @@ func resolveListenAddr(network, addr string) (netip.AddrPort, error) { return netip.AddrPortFrom(bindHostOrZero, uint16(port)), nil } +// ephemeral port range for non-TUN listeners requesting port 0. This range is +// chosen to reduce the probability of collision with host listeners, avoiding +// both the typical ephemeral range, and privilege listener ranges. Collisions +// may still occur and could for example shadow host sockets in a netstack+TUN +// situation, the range here is a UX improvement, not a guarantee that +// application authors will never have to consider these cases. +const ( + ephemeralPortFirst = 10002 + ephemeralPortLast = 19999 +) + func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, error) { switch network { case "", "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": @@ -1643,6 +1671,76 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro if err := s.Start(); err != nil { return nil, err } + + isTCP := network == "" || network == "tcp" || network == "tcp4" || network == "tcp6" + + // When using a TUN with TCP, create a gVisor TCP listener. + // gVisor handles port 0 allocation natively. + var gonetLn net.Listener + if s.Tun != nil && isTCP { + gonetLn, err = s.listenTCP(network, host) + if err != nil { + return nil, err + } + // If port 0 was requested, update host to the port gVisor assigned + // so that the listenKey uses the real port. + if host.Port() == 0 { + if p := portFromAddr(gonetLn.Addr()); p != 0 { + host = netip.AddrPortFrom(host.Addr(), p) + addr = listenAddr(host) + } + } + } + + ln, err := s.registerListener(network, addr, host, lnOn, gonetLn) + if err != nil { + if gonetLn != nil { + gonetLn.Close() + } + return nil, err + } + return ln, nil +} + +// listenTCP creates a gVisor TCP listener for TUN mode. +func (s *Server) listenTCP(network string, host netip.AddrPort) (net.Listener, error) { + var nsNetwork string + nsAddr := host + switch { + case network == "tcp4" || network == "tcp6": + nsNetwork = network + case host.Addr().Is4(): + nsNetwork = "tcp4" + case host.Addr().Is6(): + nsNetwork = "tcp6" + default: + // Wildcard address: use tcp6 for dual-stack (accepts both v4 and v6). + nsNetwork = "tcp6" + nsAddr = netip.AddrPortFrom(netip.IPv6Unspecified(), host.Port()) + } + ln, err := s.netstack.ListenTCP(nsNetwork, nsAddr.String()) + if err != nil { + return nil, fmt.Errorf("tsnet: %w", err) + } + return ln, nil +} + +// registerListener allocates a port (if 0) and registers the listener in +// s.listeners under s.mu. +func (s *Server) registerListener(network, addr string, host netip.AddrPort, lnOn listenOn, gonetLn net.Listener) (*listener, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Allocate an ephemeral port for non-TUN listeners requesting port 0. + if host.Port() == 0 && gonetLn == nil { + p, ok := s.allocEphemeralLocked(network, host.Addr(), lnOn) + if !ok { + return nil, errors.New("tsnet: no available port in ephemeral range") + } + host = netip.AddrPortFrom(host.Addr(), p) + addr = listenAddr(host) + } + var keys []listenKey switch lnOn { case listenOnTailnet: @@ -1654,58 +1752,93 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro keys = append(keys, listenKey{network, host.Addr(), host.Port(), true}) } - ln := &listener{ - s: s, - keys: keys, - addr: addr, - - closedc: make(chan struct{}), - conn: make(chan net.Conn), - } - - // When using a TUN with TCP, create a gVisor TCP listener. - if s.Tun != nil && (network == "" || network == "tcp" || network == "tcp4" || network == "tcp6") { - var nsNetwork string - nsAddr := host - switch { - case network == "tcp4" || network == "tcp6": - nsNetwork = network - case host.Addr().Is4(): - nsNetwork = "tcp4" - case host.Addr().Is6(): - nsNetwork = "tcp6" - default: - // Wildcard address: use tcp6 for dual-stack (accepts both v4 and v6). - nsNetwork = "tcp6" - nsAddr = netip.AddrPortFrom(netip.IPv6Unspecified(), host.Port()) - } - gonetLn, err := s.netstack.ListenTCP(nsNetwork, nsAddr.String()) - if err != nil { - return nil, fmt.Errorf("tsnet: %w", err) - } - ln.gonetLn = gonetLn - } - - s.mu.Lock() for _, key := range keys { if _, ok := s.listeners[key]; ok { - s.mu.Unlock() - if ln.gonetLn != nil { - ln.gonetLn.Close() - } return nil, fmt.Errorf("tsnet: listener already open for %s, %s", network, addr) } } + + ln := &listener{ + s: s, + keys: keys, + addr: addr, + closedc: make(chan struct{}), + conn: make(chan net.Conn), + gonetLn: gonetLn, + } if s.listeners == nil { s.listeners = make(map[listenKey]*listener) } for _, key := range keys { s.listeners[key] = ln } - s.mu.Unlock() return ln, nil } +// allocEphemeralLocked finds an unused port in [ephemeralPortFirst, +// ephemeralPortLast] that does not collide with any existing listener for the +// given network, host, and listenOn. s.mu must be held. +func (s *Server) allocEphemeralLocked(network string, host netip.Addr, lnOn listenOn) (uint16, bool) { + if s.nextEphemeralPort < ephemeralPortFirst || s.nextEphemeralPort > ephemeralPortLast { + s.nextEphemeralPort = ephemeralPortFirst + } + start := s.nextEphemeralPort + for { + p := s.nextEphemeralPort + s.nextEphemeralPort++ + if s.nextEphemeralPort > ephemeralPortLast { + s.nextEphemeralPort = ephemeralPortFirst + } + if !s.portInUseLocked(network, host, p, lnOn) { + return p, true + } + if s.nextEphemeralPort == start { + return 0, false + } + } +} + +// portInUseLocked reports whether any listenKey for the given network, host, +// port, and listenOn already exists in s.listeners. +func (s *Server) portInUseLocked(network string, host netip.Addr, port uint16, lnOn listenOn) bool { + switch lnOn { + case listenOnTailnet: + _, ok := s.listeners[listenKey{network, host, port, false}] + return ok + case listenOnFunnel: + _, ok := s.listeners[listenKey{network, host, port, true}] + return ok + case listenOnBoth: + _, ok1 := s.listeners[listenKey{network, host, port, false}] + _, ok2 := s.listeners[listenKey{network, host, port, true}] + return ok1 || ok2 + } + return false +} + +// listenAddr formats host as a listen address string. +// If host has no IP, it returns ":port". +func listenAddr(host netip.AddrPort) string { + if !host.Addr().IsValid() { + return ":" + strconv.Itoa(int(host.Port())) + } + return host.String() +} + +// portFromAddr extracts the port from a net.Addr, or returns 0. +func portFromAddr(a net.Addr) uint16 { + switch v := a.(type) { + case *net.TCPAddr: + return uint16(v.Port) + case *net.UDPAddr: + return uint16(v.Port) + } + if ap, err := netip.ParseAddrPort(a.String()); err == nil { + return ap.Port() + } + return 0 +} + // GetRootPath returns the root path of the tsnet server. // This is where the state file and other data is stored. func (s *Server) GetRootPath() string { diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 9481defae94e8..266a60f78c5ec 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -112,6 +112,86 @@ func TestListenerPort(t *testing.T) { } } +func TestResolveListenAddrUnspecified(t *testing.T) { + tests := []struct { + name string + network string + addr string + wantIP netip.Addr + }{ + {"empty_host", "tcp", ":80", netip.Addr{}}, + {"ipv4_unspecified", "tcp", "0.0.0.0:80", netip.Addr{}}, + {"ipv6_unspecified", "tcp", "[::]:80", netip.Addr{}}, + {"specific_ipv4", "tcp", "100.64.0.1:80", netip.MustParseAddr("100.64.0.1")}, + {"specific_ipv6", "tcp6", "[fd7a:115c:a1e0::1]:80", netip.MustParseAddr("fd7a:115c:a1e0::1")}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := resolveListenAddr(tt.network, tt.addr) + if err != nil { + t.Fatal(err) + } + if got.Addr() != tt.wantIP { + t.Errorf("Addr() = %v, want %v", got.Addr(), tt.wantIP) + } + }) + } +} + +func TestAllocEphemeral(t *testing.T) { + s := &Server{listeners: make(map[listenKey]*listener)} + + // Sequential allocations should return unique ports in range. + var ports []uint16 + for range 5 { + s.mu.Lock() + p, ok := s.allocEphemeralLocked("tcp", netip.Addr{}, listenOnTailnet) + s.mu.Unlock() + if !ok { + t.Fatal("allocEphemeralLocked failed unexpectedly") + } + if p < ephemeralPortFirst || p > ephemeralPortLast { + t.Errorf("port %d outside [%d, %d]", p, ephemeralPortFirst, ephemeralPortLast) + } + for _, prev := range ports { + if p == prev { + t.Errorf("duplicate port %d", p) + } + } + ports = append(ports, p) + // Occupy the port so the next call skips it. + s.listeners[listenKey{"tcp", netip.Addr{}, p, false}] = &listener{} + } + + // Verify skip over occupied port. + s.mu.Lock() + next := s.nextEphemeralPort + if next < ephemeralPortFirst || next > ephemeralPortLast { + next = ephemeralPortFirst + } + s.listeners[listenKey{"tcp", netip.Addr{}, next, false}] = &listener{} + p, ok := s.allocEphemeralLocked("tcp", netip.Addr{}, listenOnTailnet) + s.mu.Unlock() + if !ok { + t.Fatal("allocEphemeralLocked failed after skip") + } + if p == next { + t.Errorf("should have skipped occupied port %d", next) + } + + // Wrap-around. + s.mu.Lock() + s.nextEphemeralPort = ephemeralPortLast + p, ok = s.allocEphemeralLocked("tcp", netip.Addr{}, listenOnTailnet) + s.mu.Unlock() + if !ok { + t.Fatal("allocEphemeralLocked failed at wrap") + } + if p < ephemeralPortFirst || p > ephemeralPortLast { + t.Errorf("port %d outside range after wrap", p) + } +} + var verboseDERP = flag.Bool("verbose-derp", false, "if set, print DERP and STUN logs") var verboseNodes = flag.Bool("verbose-nodes", false, "if set, print tsnet.Server logs") @@ -2869,3 +2949,159 @@ func TestSelfDial(t *testing.T) { t.Errorf("server->client: got %q, want %q", gotReply, reply) } } + +// TestListenUnspecifiedAddr verifies that listening on 0.0.0.0 or [::] works +// the same as listening on an empty host (":port"), accepting connections +// destined to the node's Tailscale IPs. +func TestListenUnspecifiedAddr(t *testing.T) { + testUnspec := func(t *testing.T, lt *listenTest, addr, dialPort string) { + ln, err := lt.s2.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + echoErr := make(chan error, 1) + go func() { + conn, err := ln.Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + n, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:n]) + echoErr <- err + }() + + dialAddr := net.JoinHostPort(lt.s2ip4.String(), dialPort) + conn, err := lt.s1.Dial(t.Context(), "tcp", dialAddr) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", dialAddr, err) + } + defer conn.Close() + want := "hello unspec" + if _, err := conn.Write([]byte(want)); err != nil { + t.Fatalf("Write failed: %v", err) + } + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + n, err := conn.Read(got) + if err != nil { + t.Fatalf("Read failed: %v", err) + } + if string(got[:n]) != want { + t.Errorf("got %q, want %q", got[:n], want) + } + if err := <-echoErr; err != nil { + t.Fatalf("echo error: %v", err) + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + t.Run("0.0.0.0", func(t *testing.T) { testUnspec(t, lt, "0.0.0.0:8080", "8080") }) + t.Run("::", func(t *testing.T) { testUnspec(t, lt, "[::]:8081", "8081") }) + }) + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + t.Run("0.0.0.0", func(t *testing.T) { testUnspec(t, lt, "0.0.0.0:8080", "8080") }) + t.Run("::", func(t *testing.T) { testUnspec(t, lt, "[::]:8081", "8081") }) + }) +} + +// TestListenMultipleEphemeralPorts verifies that calling Listen with port 0 +// multiple times allocates distinct ports, each of which can receive +// connections independently. +func TestListenMultipleEphemeralPorts(t *testing.T) { + testMultipleEphemeral := func(t *testing.T, lt *listenTest) { + const n = 3 + listeners := make([]net.Listener, n) + ports := make([]string, n) + for i := range n { + ln, err := lt.s2.Listen("tcp", ":0") + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { ln.Close() }) + _, portStr, err := net.SplitHostPort(ln.Addr().String()) + if err != nil { + t.Fatalf("parsing Addr %q: %v", ln.Addr(), err) + } + if portStr == "0" { + t.Fatal("Addr() returned port 0; expected allocated port") + } + for j := range i { + if ports[j] == portStr { + t.Fatalf("listeners %d and %d both got port %s", j, i, portStr) + } + } + listeners[i] = ln + ports[i] = portStr + } + + // Verify each listener independently accepts connections. + for i := range n { + echoErr := make(chan error, 1) + go func() { + conn, err := listeners[i].Accept() + if err != nil { + echoErr <- err + return + } + defer conn.Close() + buf := make([]byte, 1024) + rn, err := conn.Read(buf) + if err != nil { + echoErr <- err + return + } + _, err = conn.Write(buf[:rn]) + echoErr <- err + }() + + dialAddr := net.JoinHostPort(lt.s2ip4.String(), ports[i]) + conn, err := lt.s1.Dial(t.Context(), "tcp", dialAddr) + if err != nil { + t.Fatalf("listener %d: Dial(%q) failed: %v", i, dialAddr, err) + } + want := fmt.Sprintf("hello port %d", i) + if _, err := conn.Write([]byte(want)); err != nil { + conn.Close() + t.Fatalf("listener %d: Write failed: %v", i, err) + } + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + got := make([]byte, 1024) + rn, err := conn.Read(got) + conn.Close() + if err != nil { + select { + case e := <-echoErr: + t.Fatalf("listener %d: echo error: %v; read error: %v", i, e, err) + default: + t.Fatalf("listener %d: Read failed: %v", i, err) + } + } + if string(got[:rn]) != want { + t.Errorf("listener %d: got %q, want %q", i, got[:rn], want) + } + if err := <-echoErr; err != nil { + t.Fatalf("listener %d: echo error: %v", i, err) + } + } + } + + t.Run("Netstack", func(t *testing.T) { + lt := setupTwoClientTest(t, false) + testMultipleEphemeral(t, lt) + }) + t.Run("TUN", func(t *testing.T) { + lt := setupTwoClientTest(t, true) + testMultipleEphemeral(t, lt) + }) +} From 2743e0b681128380f87ab0c88b43dc35ab630917 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Mon, 2 Mar 2026 16:01:48 +0000 Subject: [PATCH 1034/1093] .github/actions/go-cache: check for pre-built cigocacher (#18833) Some CI runner images now have cigocacher baked in. Skip building if it's already present. Updates tailscale/corp#35667 Change-Id: I5ea0d606d44b1373bc1c8f7bca4ab780e763e2a9 Signed-off-by: Tom Proctor --- .github/actions/go-cache/action.sh | 33 +++++++++++++++++------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/.github/actions/go-cache/action.sh b/.github/actions/go-cache/action.sh index f49d5bb779f4d..5cfafe4767fb2 100755 --- a/.github/actions/go-cache/action.sh +++ b/.github/actions/go-cache/action.sh @@ -23,22 +23,27 @@ if [ -z "${URL:-}" ]; then exit 0 fi -GOPATH=$(command -v go || true) -if [ -z "${GOPATH}" ]; then - if [ ! -f "tool/go" ]; then - echo "Go not available, unable to proceed" - exit 1 +BIN_PATH="$(PATH="$PATH:$HOME/bin" command -v cigocacher || true)" +if [ -z "${BIN_PATH}" ]; then + echo "cigocacher not found in PATH, attempting to build or fetch it" + + GOPATH=$(command -v go || true) + if [ -z "${GOPATH}" ]; then + if [ ! -f "tool/go" ]; then + echo "Go not available, unable to proceed" + exit 1 + fi + GOPATH="./tool/go" fi - GOPATH="./tool/go" -fi -BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(${GOPATH} env GOEXE)" -if [ -d "cmd/cigocacher" ]; then - echo "cmd/cigocacher found locally, building from local source" - "${GOPATH}" build -o "${BIN_PATH}" ./cmd/cigocacher -else - echo "cmd/cigocacher not found locally, fetching from tailscale.com/cmd/cigocacher" - "${GOPATH}" build -o "${BIN_PATH}" tailscale.com/cmd/cigocacher + BIN_PATH="${RUNNER_TEMP:-/tmp}/cigocacher$(${GOPATH} env GOEXE)" + if [ -d "cmd/cigocacher" ]; then + echo "cmd/cigocacher found locally, building from local source" + "${GOPATH}" build -o "${BIN_PATH}" ./cmd/cigocacher + else + echo "cmd/cigocacher not found locally, fetching from tailscale.com/cmd/cigocacher" + "${GOPATH}" build -o "${BIN_PATH}" tailscale.com/cmd/cigocacher + fi fi CIGOCACHER_TOKEN="$("${BIN_PATH}" --auth --cigocached-url "${URL}" --cigocached-host "${HOST}" )" From 3e8913f959c3313103a1b51c2ceb474663c9399b Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 2 Mar 2026 15:12:31 +0000 Subject: [PATCH 1035/1093] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 6 +++--- licenses/apple.md | 4 ++-- licenses/tailscale.md | 4 ++-- licenses/windows.md | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 5c46b3cb13340..15098f0752e79 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -8,11 +8,11 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.9.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.26.1/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.3/LICENSE)) @@ -26,7 +26,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.2/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) + - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.25/LICENSE)) - [github.com/pires/go-proxyproto](https://pkg.go.dev/github.com/pires/go-proxyproto) ([Apache-2.0](https://github.com/pires/go-proxyproto/blob/v0.8.1/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/tailscale-android/libtailscale](https://pkg.go.dev/github.com/tailscale/tailscale-android/libtailscale) ([BSD-3-Clause](https://github.com/tailscale/tailscale-android/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 93afd9385cdbe..f7989fe250a63 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -11,7 +11,7 @@ See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.1/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.41.0/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.32.5/config/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.19.5/credentials/LICENSE.txt)) @@ -78,7 +78,7 @@ See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.39.0:LICENSE)) - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.33.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/573d5e7127a8/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) ## Additional Dependencies diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 521b6ff9ce887..5050b38db2178 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -38,7 +38,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.24.0/internal/sync/singleflight/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/creachadair/msync/trigger](https://pkg.go.dev/github.com/creachadair/msync/trigger) ([BSD-3-Clause](https://github.com/creachadair/msync/blob/v0.7.1/LICENSE)) - - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) + - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.24/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/a09d6be7affa/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) @@ -98,7 +98,7 @@ Some packages may only be included on certain architectures or operating systems - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/573d5e7127a8/LICENSE)) - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.34.0/LICENSE)) - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.6.0/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 29581566c68ba..e8bcc932f332f 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -9,7 +9,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. ## Go Packages - - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.1/LICENSE)) + - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.2.0/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) From e0ca836c9928c2d2a687f7538cd3406b92393273 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 12:01:37 +0000 Subject: [PATCH 1036/1093] .github: Bump github/codeql-action from 4.32.3 to 4.32.5 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.32.3 to 4.32.5. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/9e907b5e64f6b83e7804b09294d44122997950d6...c793b717bc78562f491db7b0e93a3a178b099162) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.32.5 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 49657de707f17..e88003c769b6f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@9e907b5e64f6b83e7804b09294d44122997950d6 # v4.32.3 + uses: github/codeql-action/init@c793b717bc78562f491db7b0e93a3a178b099162 # v4.32.5 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@9e907b5e64f6b83e7804b09294d44122997950d6 # v4.32.3 + uses: github/codeql-action/autobuild@c793b717bc78562f491db7b0e93a3a178b099162 # v4.32.5 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@9e907b5e64f6b83e7804b09294d44122997950d6 # v4.32.3 + uses: github/codeql-action/analyze@c793b717bc78562f491db7b0e93a3a178b099162 # v4.32.5 From eeb1fa047bef2896d81eb01ab1a653419c129dc2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 12:01:08 +0000 Subject: [PATCH 1037/1093] .github: Bump actions/setup-go from 6.2.0 to 6.3.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 6.2.0 to 6.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5...4b73464bb391d4059bd26b0524d20df3927bd417) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: 6.3.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/golangci-lint.yml | 2 +- .github/workflows/test.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e88003c769b6f..51bae5a068df5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,7 +49,7 @@ jobs: # Install a more recent Go that understands modern go.mod content. - name: Install Go - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version-file: go.mod diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index dbabb361e14fa..6431a31d698c0 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -29,7 +29,7 @@ jobs: steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 + - uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version-file: go.mod cache: true diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e6c693188783f..064765ca2a2af 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -245,7 +245,7 @@ jobs: path: ${{ github.workspace }}/src - name: Install Go - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0 + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 with: go-version-file: ${{ github.workspace }}/src/go.mod cache: false From 5a2168da9ef6d7daff8fd0419dbfb6026b02a0ed Mon Sep 17 00:00:00 2001 From: Erisa A Date: Mon, 2 Mar 2026 18:29:49 +0000 Subject: [PATCH 1038/1093] scripts/installer.sh: handle KDE Linux (#18861) Display a message pointing to KDE Linux documentation on installing Tailscale Fixes #18306 Signed-off-by: Erisa A --- scripts/installer.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 8ffd3f5720a2d..2c15ea6571678 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -341,6 +341,11 @@ main() { echo "https://github.com/tailscale-dev/deck-tailscale" exit 1 ;; + kde-linux) + echo "The maintainers of KDE Linux provide documentation on multiple ways to install Tailscale. These instructions are not officially supported by Tailscale:" + echo "https://kde.org/linux/docs/more-software/#tailscale" + exit 1 + ;; # TODO: wsl? # TODO: synology? qnap? From 8fd02bb6260c61c217589809372959c523be3a84 Mon Sep 17 00:00:00 2001 From: Amal Bansode Date: Mon, 2 Mar 2026 17:33:57 -0800 Subject: [PATCH 1039/1093] types/geo: fix floating point bug causing NaN returns in SphericalAngleTo (#18777) Subtle floating point imprecision can propagate and lead to trigonometric functions receiving inputs outside their domain, thus returning NaN. Clamp the input to the valid domain to prevent this. Also adds a fuzz test for SphericalAngleTo. Updates tailscale/corp#37518 Signed-off-by: Amal Bansode --- types/geo/point.go | 3 + types/geo/point_test.go | 147 ++++++++++++++++++++++++---------------- 2 files changed, 92 insertions(+), 58 deletions(-) diff --git a/types/geo/point.go b/types/geo/point.go index 820582b0ff6b3..d039ea1fad7e2 100644 --- a/types/geo/point.go +++ b/types/geo/point.go @@ -125,6 +125,9 @@ func (p Point) SphericalAngleTo(q Point) (Radians, error) { sLat, sLng := float64(qLat.Radians()), float64(qLng.Radians()) cosA := math.Sin(rLat)*math.Sin(sLat) + math.Cos(rLat)*math.Cos(sLat)*math.Cos(rLng-sLng) + // Subtle floating point imprecision can lead to cosA being outside + // the domain of arccosine [-1, 1]. Clamp the input to avoid NaN return. + cosA = min(max(-1.0, cosA), 1.0) return Radians(math.Acos(cosA)), nil } diff --git a/types/geo/point_test.go b/types/geo/point_test.go index f0d0cb3abba3e..32a73180add34 100644 --- a/types/geo/point_test.go +++ b/types/geo/point_test.go @@ -448,65 +448,79 @@ func TestPointMarshalUint64(t *testing.T) { }) } +const earthRadius = 6371.000 // volumetric mean radius (km) +const kmToRad = 1 / earthRadius + +// Test corpus for exercising PointSphericalAngleTo. +var corpusPointSphericalAngleTo = []struct { + name string + x geo.Point + y geo.Point + want geo.Radians + wantErr string +}{ + { + name: "same-point-null-island", + x: geo.MakePoint(0, 0), + y: geo.MakePoint(0, 0), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-north-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(+90, +90), + want: 0.0 * geo.Radian, + }, + { + name: "same-point-south-pole", + x: geo.MakePoint(-90, 0), + y: geo.MakePoint(-90, -90), + want: 0.0 * geo.Radian, + }, + { + name: "north-pole-to-south-pole", + x: geo.MakePoint(+90, 0), + y: geo.MakePoint(-90, -90), + want: math.Pi * geo.Radian, + }, + { + name: "toronto-to-montreal", + x: geo.MakePoint(+43.6532, -79.3832), + y: geo.MakePoint(+45.5019, -73.5674), + want: 504.26 * kmToRad * geo.Radian, + }, + { + name: "sydney-to-san-francisco", + x: geo.MakePoint(-33.8727, +151.2057), + y: geo.MakePoint(+37.7749, -122.4194), + want: 11948.18 * kmToRad * geo.Radian, + }, + { + name: "new-york-to-paris", + x: geo.MakePoint(+40.7128, -74.0060), + y: geo.MakePoint(+48.8575, +2.3514), + want: 5837.15 * kmToRad * geo.Radian, + }, + { + name: "seattle-to-tokyo", + x: geo.MakePoint(+47.6061, -122.3328), + y: geo.MakePoint(+35.6764, +139.6500), + want: 7700.00 * kmToRad * geo.Radian, + }, + { + // Subtle floating point imprecision can propagate and lead to + // trigonometric functions receiving inputs outside their + // domain, thus returning NaN. + // Test one such case. + name: "floating-point-precision-test", + x: geo.MakePoint(-6.0, 0.0), + y: geo.MakePoint(-6.0, 0.0), + want: 0.0 * geo.Radian, + }, +} + func TestPointSphericalAngleTo(t *testing.T) { - const earthRadius = 6371.000 // volumetric mean radius (km) - const kmToRad = 1 / earthRadius - for _, tt := range []struct { - name string - x geo.Point - y geo.Point - want geo.Radians - wantErr string - }{ - { - name: "same-point-null-island", - x: geo.MakePoint(0, 0), - y: geo.MakePoint(0, 0), - want: 0.0 * geo.Radian, - }, - { - name: "same-point-north-pole", - x: geo.MakePoint(+90, 0), - y: geo.MakePoint(+90, +90), - want: 0.0 * geo.Radian, - }, - { - name: "same-point-south-pole", - x: geo.MakePoint(-90, 0), - y: geo.MakePoint(-90, -90), - want: 0.0 * geo.Radian, - }, - { - name: "north-pole-to-south-pole", - x: geo.MakePoint(+90, 0), - y: geo.MakePoint(-90, -90), - want: math.Pi * geo.Radian, - }, - { - name: "toronto-to-montreal", - x: geo.MakePoint(+43.6532, -79.3832), - y: geo.MakePoint(+45.5019, -73.5674), - want: 504.26 * kmToRad * geo.Radian, - }, - { - name: "sydney-to-san-francisco", - x: geo.MakePoint(-33.8727, +151.2057), - y: geo.MakePoint(+37.7749, -122.4194), - want: 11948.18 * kmToRad * geo.Radian, - }, - { - name: "new-york-to-paris", - x: geo.MakePoint(+40.7128, -74.0060), - y: geo.MakePoint(+48.8575, +2.3514), - want: 5837.15 * kmToRad * geo.Radian, - }, - { - name: "seattle-to-tokyo", - x: geo.MakePoint(+47.6061, -122.3328), - y: geo.MakePoint(+35.6764, +139.6500), - want: 7700.00 * kmToRad * geo.Radian, - }, - } { + for _, tt := range corpusPointSphericalAngleTo { t.Run(tt.name, func(t *testing.T) { got, err := tt.x.SphericalAngleTo(tt.y) if tt.wantErr == "" && err != nil { @@ -536,6 +550,23 @@ func TestPointSphericalAngleTo(t *testing.T) { } } +func FuzzPointSphericalAngleTo(f *testing.F) { + for _, tt := range corpusPointSphericalAngleTo { + xLat, xLng, _ := tt.x.LatLngFloat64() + yLat, yLng, _ := tt.y.LatLngFloat64() + f.Add(xLat, xLng, yLat, yLng) + } + + f.Fuzz(func(t *testing.T, xLat float64, xLng float64, yLat float64, yLng float64) { + x := geo.MakePoint(geo.Degrees(xLat), geo.Degrees(xLng)) + y := geo.MakePoint(geo.Degrees(yLat), geo.Degrees(yLng)) + got, _ := x.SphericalAngleTo(y) + if math.IsNaN(float64(got)) { + t.Errorf("got NaN result with xLat=%.15f xLng=%.15f yLat=%.15f yLng=%.15f", xLat, xLng, yLat, yLng) + } + }) +} + func approx[T ~float64](x, y T) bool { return math.Abs(float64(x)-float64(y)) <= 1e-5 } From 0cca3bd4170f3fd126f1ac632f095fafd00099a1 Mon Sep 17 00:00:00 2001 From: Alex Chan Date: Fri, 27 Feb 2026 16:20:11 +0000 Subject: [PATCH 1040/1093] wgengine/magicsock: improve error message for moving Mullvad node keys The "public key moved" panic has caused confusion on multiple occasions, and is a known issue for Mullvad. Add a loose heuristic to detect Mullvad nodes, and trigger distinct panics for Mullvad and non-Mullvad instances, with a link to the associated bug. When this occurs again with Mullvad, it'll be easier for somebody to find the existing bug. If it occurs again with something other than Mullvad, it'll be more obvious that it's a distinct issue. Updates tailscale/corp#27300 Change-Id: Ie47271f45f2ff28f767578fcca5e6b21731d08a1 Signed-off-by: Alex Chan --- wgengine/magicsock/magicsock.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index b2852d2e2fdbc..dd8f27b23010f 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -3080,8 +3080,18 @@ func (c *Conn) updateNodes(self tailcfg.NodeView, peers []tailcfg.NodeView) (pee // we don't get this far. If ok was false above, that means it's a key // that differs from the one the NodeID had. But double check. if ep.nodeID != n.ID() { - // Server error. - devPanicf("public key moved between nodeIDs (old=%v new=%v, key=%s)", ep.nodeID, n.ID(), n.Key().String()) + // Server error. This is known to be a particular issue for Mullvad + // nodes (http://go/corp/27300), so log a distinct error for the + // Mullvad and non-Mullvad cases. The error will be logged either way, + // so an approximate heuristic is fine. + // + // When #27300 is fixed, we can delete this branch and log the same + // panic for any public key moving. + if strings.HasSuffix(n.Name(), ".mullvad.ts.net.") { + devPanicf("public key moved between Mullvad nodeIDs (old=%v new=%v, key=%s); see http://go/corp/27300", ep.nodeID, n.ID(), n.Key().String()) + } else { + devPanicf("public key moved between nodeIDs (old=%v new=%v, key=%s)", ep.nodeID, n.ID(), n.Key().String()) + } } else { // Internal data structures out of sync. devPanicf("public key found in peerMap but not by nodeID") From 2d21dd46cd9fbb2fcf020d6b5e764f3d4aaf2d2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Tue, 3 Mar 2026 09:04:37 -0500 Subject: [PATCH 1041/1093] wgengine/magicsoc,net/tstun: put disco key advertisement behind a nob (#18857) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To be less spammy in stable, add a nob that disables the creation and processing of TSMPDiscoKeyAdvertisements until we have a proper rollout mechanism. Updates #12639 Signed-off-by: Claus Lensbøl --- net/tstun/wrap.go | 11 +++++++---- wgengine/magicsock/magicsock.go | 4 ++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 3c1315437f510..2f5d8c1d13254 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -23,6 +23,7 @@ import ( "github.com/tailscale/wireguard-go/tun" "go4.org/mem" "tailscale.com/disco" + "tailscale.com/envknob" "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" @@ -1157,10 +1158,12 @@ func (t *Wrapper) filterPacketInboundFromWireGuard(p *packet.Parsed, captHook pa t.injectOutboundPong(p, pingReq) return filter.DropSilently, gro } else if discoKeyAdvert, ok := p.AsTSMPDiscoAdvertisement(); ok { - t.discoKeyAdvertisementPub.Publish(DiscoKeyAdvertisement{ - Src: discoKeyAdvert.Src, - Key: discoKeyAdvert.Key, - }) + if buildfeatures.HasCacheNetMap && envknob.Bool("TS_USE_CACHED_NETMAP") { + t.discoKeyAdvertisementPub.Publish(DiscoKeyAdvertisement{ + Src: discoKeyAdvert.Src, + Key: discoKeyAdvert.Key, + }) + } return filter.DropSilently, gro } else if data, ok := p.AsTSMPPong(); ok { if f := t.OnTSMPPongReceived; f != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index dd8f27b23010f..169369f4bb472 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -4309,6 +4309,10 @@ type NewDiscoKeyAvailable struct { // // We do not need the Conn to be locked, but the endpoint should be. func (c *Conn) maybeSendTSMPDiscoAdvert(de *endpoint) { + if !buildfeatures.HasCacheNetMap || !envknob.Bool("TS_USE_CACHED_NETMAP") { + return + } + de.mu.Lock() defer de.mu.Unlock() if !de.sentDiscoKeyAdvertisement { From 120f27f383d5501d1483c5238b591e66db500fe4 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Fri, 20 Feb 2026 08:00:17 -0800 Subject: [PATCH 1042/1093] feature/conn25: stop adding multiple entries for same domain+dst We should only add one entry to our magic ips for each domain+dst and look up any existing entry instead of always creating a new one. Fixes tailscale/corp#34252 Signed-off-by: Fran Bull --- feature/conn25/conn25.go | 84 ++++++++++++++++------- feature/conn25/conn25_test.go | 124 ++++++++++++++++++++++------------ 2 files changed, 139 insertions(+), 69 deletions(-) diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index 02bec132dc10c..05f087e21df46 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -12,7 +12,6 @@ import ( "errors" "net/http" "net/netip" - "strings" "sync" "go4.org/netipx" @@ -310,8 +309,8 @@ const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experime type config struct { isConfigured bool apps []appctype.Conn25Attr - appsByDomain map[string][]string - selfRoutedDomains set.Set[string] + appsByDomain map[dnsname.FQDN][]string + selfRoutedDomains set.Set[dnsname.FQDN] } func configFromNodeView(n tailcfg.NodeView) (config, error) { @@ -326,8 +325,8 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) { cfg := config{ isConfigured: true, apps: apps, - appsByDomain: map[string][]string{}, - selfRoutedDomains: set.Set[string]{}, + appsByDomain: map[dnsname.FQDN][]string{}, + selfRoutedDomains: set.Set[dnsname.FQDN]{}, } for _, app := range apps { selfMatchesTags := false @@ -342,10 +341,9 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) { if err != nil { return config{}, err } - key := fqdn.WithTrailingDot() - mak.Set(&cfg.appsByDomain, key, append(cfg.appsByDomain[key], app.Name)) + mak.Set(&cfg.appsByDomain, fqdn, append(cfg.appsByDomain[fqdn], app.Name)) if selfMatchesTags { - cfg.selfRoutedDomains.Add(key) + cfg.selfRoutedDomains.Add(fqdn) } } } @@ -362,9 +360,8 @@ type client struct { mu sync.Mutex // protects the fields below magicIPPool *ippool transitIPPool *ippool - // map of magic IP -> (transit IP, app) - magicIPs map[netip.Addr]appAddr - config config + assignments addrAssignments + config config } func (c *client) isConfigured() bool { @@ -407,13 +404,7 @@ func (c *client) reconfig(newCfg config) error { return nil } -func (c *client) setMagicIP(magicAddr, transitAddr netip.Addr, app string) { - c.mu.Lock() - defer c.mu.Unlock() - mak.Set(&c.magicIPs, magicAddr, appAddr{addr: transitAddr, app: app}) -} - -func (c *client) isConnectorDomain(domain string) bool { +func (c *client) isConnectorDomain(domain dnsname.FQDN) bool { c.mu.Lock() defer c.mu.Unlock() appNames, ok := c.config.appsByDomain[domain] @@ -424,9 +415,12 @@ func (c *client) isConnectorDomain(domain string) bool { // for this domain+dst address, so that this client can use conn25 connectors. // It checks that this domain should be routed and that this client is not itself a connector for the domain // and generally if it is valid to make the assignment. -func (c *client) reserveAddresses(domain string, dst netip.Addr) (addrs, error) { +func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, error) { c.mu.Lock() defer c.mu.Unlock() + if existing, ok := c.assignments.lookupByDomainDst(domain, dst); ok { + return existing, nil + } appNames, _ := c.config.appsByDomain[domain] // only reserve for first app app := appNames[0] @@ -438,17 +432,20 @@ func (c *client) reserveAddresses(domain string, dst netip.Addr) (addrs, error) if err != nil { return addrs{}, err } - addrs := addrs{ + as := addrs{ dst: dst, magic: mip, transit: tip, app: app, + domain: domain, } - return addrs, nil + if err := c.assignments.insert(as); err != nil { + return addrs{}, err + } + return as, nil } func (c *client) enqueueAddressAssignment(addrs addrs) { - c.setMagicIP(addrs.magic, addrs.transit, addrs.app) // TODO(fran) 2026-02-03 asynchronously send peerapi req to connector to // allocate these addresses for us. } @@ -483,8 +480,12 @@ func (c *client) mapDNSResponse(buf []byte) []byte { switch h.Type { case dnsmessage.TypeA: - domain := strings.ToLower(h.Name.String()) - if len(domain) == 0 || !c.isConnectorDomain(domain) { + domain, err := dnsname.ToFQDN(h.Name.String()) + if err != nil { + c.logf("bad dnsname: %v", err) + return buf + } + if !c.isConnectorDomain(domain) { if err := p.SkipAnswer(); err != nil { c.logf("error parsing dns response: %v", err) return buf @@ -540,9 +541,44 @@ type addrs struct { dst netip.Addr magic netip.Addr transit netip.Addr + domain dnsname.FQDN app string } func (c addrs) isValid() bool { return c.dst.IsValid() } + +// domainDst is a key for looking up an existing address assignment by the +// DNS response domain and destination IP pair. +type domainDst struct { + domain dnsname.FQDN + dst netip.Addr +} + +// addrAssignments is the collection of addrs assigned by this client +// supporting lookup by magicip or domain+dst +type addrAssignments struct { + byMagicIP map[netip.Addr]addrs + byDomainDst map[domainDst]addrs +} + +func (a *addrAssignments) insert(as addrs) error { + // we likely will want to allow overwriting in the future when we + // have address expiry, but for now this should not happen + if _, ok := a.byMagicIP[as.magic]; ok { + return errors.New("byMagicIP key exists") + } + ddst := domainDst{domain: as.domain, dst: as.dst} + if _, ok := a.byDomainDst[ddst]; ok { + return errors.New("byDomainDst key exists") + } + mak.Set(&a.byMagicIP, as.magic, as) + mak.Set(&a.byDomainDst, ddst, as) + return nil +} + +func (a *addrAssignments) lookupByDomainDst(domain dnsname.FQDN, dst netip.Addr) (addrs, bool) { + v, ok := a.byDomainDst[domainDst{domain: domain, dst: dst}] + return v, ok +} diff --git a/feature/conn25/conn25_test.go b/feature/conn25/conn25_test.go index 0489b22a14e4d..d63e84e024738 100644 --- a/feature/conn25/conn25_test.go +++ b/feature/conn25/conn25_test.go @@ -16,6 +16,8 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/appctype" "tailscale.com/types/logger" + "tailscale.com/util/dnsname" + "tailscale.com/util/must" "tailscale.com/util/set" ) @@ -206,34 +208,16 @@ func TestTransitIPTargetUnknownTIP(t *testing.T) { } } -func TestSetMagicIP(t *testing.T) { - c := newConn25(logger.Discard) - mip := netip.MustParseAddr("0.0.0.1") - tip := netip.MustParseAddr("0.0.0.2") - app := "a" - c.client.setMagicIP(mip, tip, app) - val, ok := c.client.magicIPs[mip] - if !ok { - t.Fatal("expected there to be a value stored for the magic IP") - } - if val.addr != tip { - t.Fatalf("want %v, got %v", tip, val.addr) - } - if val.app != app { - t.Fatalf("want %s, got %s", app, val.app) - } -} - func TestReserveIPs(t *testing.T) { c := newConn25(logger.Discard) c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) - mbd := map[string][]string{} + mbd := map[dnsname.FQDN][]string{} mbd["example.com."] = []string{"a"} c.client.config.appsByDomain = mbd dst := netip.MustParseAddr("0.0.0.1") - con, err := c.client.reserveAddresses("example.com.", dst) + addrs, err := c.client.reserveAddresses("example.com.", dst) if err != nil { t.Fatal(err) } @@ -242,18 +226,22 @@ func TestReserveIPs(t *testing.T) { wantMagic := netip.MustParseAddr("100.64.0.0") // first from magic pool wantTransit := netip.MustParseAddr("169.254.0.0") // first from transit pool wantApp := "a" // the app name related to example.com. + wantDomain := must.Get(dnsname.ToFQDN("example.com.")) - if wantDst != con.dst { - t.Errorf("want %v, got %v", wantDst, con.dst) + if wantDst != addrs.dst { + t.Errorf("want %v, got %v", wantDst, addrs.dst) + } + if wantMagic != addrs.magic { + t.Errorf("want %v, got %v", wantMagic, addrs.magic) } - if wantMagic != con.magic { - t.Errorf("want %v, got %v", wantMagic, con.magic) + if wantTransit != addrs.transit { + t.Errorf("want %v, got %v", wantTransit, addrs.transit) } - if wantTransit != con.transit { - t.Errorf("want %v, got %v", wantTransit, con.transit) + if wantApp != addrs.app { + t.Errorf("want %s, got %s", wantApp, addrs.app) } - if wantApp != con.app { - t.Errorf("want %s, got %s", wantApp, con.app) + if wantDomain != addrs.domain { + t.Errorf("want %s, got %s", wantDomain, addrs.domain) } } @@ -287,8 +275,8 @@ func TestConfigReconfig(t *testing.T) { cfg []appctype.Conn25Attr tags []string wantErr bool - wantAppsByDomain map[string][]string - wantSelfRoutedDomains set.Set[string] + wantAppsByDomain map[dnsname.FQDN][]string + wantSelfRoutedDomains set.Set[dnsname.FQDN] }{ { name: "bad-config", @@ -302,11 +290,11 @@ func TestConfigReconfig(t *testing.T) { {Name: "two", Domains: []string{"b.example.com"}, Connectors: []string{"tag:two"}}, }, tags: []string{"tag:one"}, - wantAppsByDomain: map[string][]string{ + wantAppsByDomain: map[dnsname.FQDN][]string{ "a.example.com.": {"one"}, "b.example.com.": {"two"}, }, - wantSelfRoutedDomains: set.SetOf([]string{"a.example.com."}), + wantSelfRoutedDomains: set.SetOf([]dnsname.FQDN{"a.example.com."}), }, { name: "more-complex", @@ -317,7 +305,7 @@ func TestConfigReconfig(t *testing.T) { {Name: "four", Domains: []string{"4.b.example.com", "4.d.example.com"}, Connectors: []string{"tag:four"}}, }, tags: []string{"tag:onea", "tag:four", "tag:unrelated"}, - wantAppsByDomain: map[string][]string{ + wantAppsByDomain: map[dnsname.FQDN][]string{ "1.a.example.com.": {"one"}, "1.b.example.com.": {"one", "three"}, "1.c.example.com.": {"three"}, @@ -326,7 +314,7 @@ func TestConfigReconfig(t *testing.T) { "4.b.example.com.": {"four"}, "4.d.example.com.": {"four"}, }, - wantSelfRoutedDomains: set.SetOf([]string{"1.a.example.com.", "1.b.example.com.", "4.b.example.com.", "4.d.example.com."}), + wantSelfRoutedDomains: set.SetOf([]dnsname.FQDN{"1.a.example.com.", "1.b.example.com.", "4.b.example.com.", "4.d.example.com."}), }, } { t.Run(tt.name, func(t *testing.T) { @@ -431,18 +419,24 @@ func TestMapDNSResponse(t *testing.T) { } for _, tt := range []struct { - name string - domain string - addrs []dnsmessage.AResource - wantMagicIPs map[netip.Addr]appAddr + name string + domain string + addrs []dnsmessage.AResource + wantByMagicIP map[netip.Addr]addrs }{ { name: "one-ip-matches", domain: "example.com.", addrs: []dnsmessage.AResource{{A: [4]byte{1, 0, 0, 0}}}, // these are 'expected' because they are the beginning of the provided pools - wantMagicIPs: map[netip.Addr]appAddr{ - netip.MustParseAddr("100.64.0.0"): {app: "app1", addr: netip.MustParseAddr("100.64.0.40")}, + wantByMagicIP: map[netip.Addr]addrs{ + netip.MustParseAddr("100.64.0.0"): { + domain: "example.com.", + dst: netip.MustParseAddr("1.0.0.0"), + magic: netip.MustParseAddr("100.64.0.0"), + transit: netip.MustParseAddr("100.64.0.40"), + app: "app1", + }, }, }, { @@ -452,9 +446,21 @@ func TestMapDNSResponse(t *testing.T) { {A: [4]byte{1, 0, 0, 0}}, {A: [4]byte{2, 0, 0, 0}}, }, - wantMagicIPs: map[netip.Addr]appAddr{ - netip.MustParseAddr("100.64.0.0"): {app: "app1", addr: netip.MustParseAddr("100.64.0.40")}, - netip.MustParseAddr("100.64.0.1"): {app: "app1", addr: netip.MustParseAddr("100.64.0.41")}, + wantByMagicIP: map[netip.Addr]addrs{ + netip.MustParseAddr("100.64.0.0"): { + domain: "example.com.", + dst: netip.MustParseAddr("1.0.0.0"), + magic: netip.MustParseAddr("100.64.0.0"), + transit: netip.MustParseAddr("100.64.0.40"), + app: "app1", + }, + netip.MustParseAddr("100.64.0.1"): { + domain: "example.com.", + dst: netip.MustParseAddr("2.0.0.0"), + magic: netip.MustParseAddr("100.64.0.1"), + transit: netip.MustParseAddr("100.64.0.41"), + app: "app1", + }, }, }, { @@ -482,9 +488,37 @@ func TestMapDNSResponse(t *testing.T) { if !reflect.DeepEqual(dnsResp, bs) { t.Fatal("shouldn't be changing the bytes (yet)") } - if diff := cmp.Diff(tt.wantMagicIPs, c.client.magicIPs, cmpopts.EquateComparable(appAddr{}, netip.Addr{})); diff != "" { - t.Errorf("magicIPs diff (-want, +got):\n%s", diff) + if diff := cmp.Diff(tt.wantByMagicIP, c.client.assignments.byMagicIP, cmpopts.EquateComparable(addrs{}, netip.Addr{})); diff != "" { + t.Errorf("byMagicIP diff (-want, +got):\n%s", diff) } }) } } + +func TestReserveAddressesDeduplicated(t *testing.T) { + c := newConn25(logger.Discard) + c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) + c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) + c.client.config.appsByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}} + + dst := netip.MustParseAddr("0.0.0.1") + first, err := c.client.reserveAddresses("example.com.", dst) + if err != nil { + t.Fatal(err) + } + + second, err := c.client.reserveAddresses("example.com.", dst) + if err != nil { + t.Fatal(err) + } + + if first != second { + t.Errorf("expected same addrs on repeated call, got first=%v second=%v", first, second) + } + if got := len(c.client.assignments.byMagicIP); got != 1 { + t.Errorf("want 1 entry in byMagicIP, got %d", got) + } + if got := len(c.client.assignments.byDomainDst); got != 1 { + t.Errorf("want 1 entry in byDomainDst, got %d", got) + } +} From d42b3743b72c8fd7df77945f99bf6aaec617f33d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Mar 2026 03:31:13 +0000 Subject: [PATCH 1043/1093] net/porttrack: add net.Listen wrapper to help tests allocate ports race-free Updates tailscale/corp#27805 Updates tailscale/corp#27806 Updates tailscale/corp#37964 Change-Id: I7bb5ed7f258e840a8208e5d725c7b2f126d7ef96 Signed-off-by: Brad Fitzpatrick --- net/porttrack/porttrack.go | 176 ++++++++++++++++++++++++++++++++ net/porttrack/porttrack_test.go | 95 +++++++++++++++++ 2 files changed, 271 insertions(+) create mode 100644 net/porttrack/porttrack.go create mode 100644 net/porttrack/porttrack_test.go diff --git a/net/porttrack/porttrack.go b/net/porttrack/porttrack.go new file mode 100644 index 0000000000000..822e7200e19e7 --- /dev/null +++ b/net/porttrack/porttrack.go @@ -0,0 +1,176 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +// Package porttrack provides race-free ephemeral port assignment for +// subprocess tests. The parent test process creates a [Collector] that +// listens on a TCP port; the child process uses [Listen] which, when +// given a magic address, binds to localhost:0 and reports the actual +// port back to the collector. +// +// The magic address format is: +// +// testport-report:HOST:PORT/LABEL +// +// where HOST:PORT is the collector's TCP address and LABEL identifies +// which listener this is (e.g. "main", "plaintext"). +// +// When [Listen] is called with a non-magic address, it falls through to +// [net.Listen] with zero overhead beyond a single [strings.HasPrefix] +// check. +package porttrack + +import ( + "bufio" + "context" + "fmt" + "net" + "strconv" + "strings" + "sync" + + "tailscale.com/util/testenv" +) + +const magicPrefix = "testport-report:" + +// Collector is the parent/test side of the porttrack protocol. It +// listens for port reports from child processes that used [Listen] +// with a magic address obtained from [Collector.Addr]. +type Collector struct { + ln net.Listener + mu sync.Mutex + cond *sync.Cond + ports map[string]int + err error // non-nil if a context passed to Port was cancelled +} + +// NewCollector creates a new Collector. The collector's TCP listener is +// closed when t finishes. +func NewCollector(t testenv.TB) *Collector { + t.Helper() + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("porttrack.NewCollector: %v", err) + } + c := &Collector{ + ln: ln, + ports: make(map[string]int), + } + c.cond = sync.NewCond(&c.mu) + go c.accept(t) + t.Cleanup(func() { ln.Close() }) + return c +} + +// accept runs in a goroutine, accepting connections and parsing port +// reports until the listener is closed. +func (c *Collector) accept(t testenv.TB) { + for { + conn, err := c.ln.Accept() + if err != nil { + return // listener closed + } + go c.handleConn(t, conn) + } +} + +func (c *Collector) handleConn(t testenv.TB, conn net.Conn) { + defer conn.Close() + scanner := bufio.NewScanner(conn) + for scanner.Scan() { + line := scanner.Text() + label, portStr, ok := strings.Cut(line, "\t") + if !ok { + t.Errorf("porttrack: malformed report line: %q", line) + return + } + port, err := strconv.Atoi(portStr) + if err != nil { + t.Errorf("porttrack: bad port in report %q: %v", line, err) + return + } + c.mu.Lock() + c.ports[label] = port + c.cond.Broadcast() + c.mu.Unlock() + } +} + +// Addr returns a magic address string that, when passed to [Listen], +// causes the child to bind to localhost:0 and report its actual port +// back to this collector under the given label. +func (c *Collector) Addr(label string) string { + return magicPrefix + c.ln.Addr().String() + "/" + label +} + +// Port blocks until the child process has reported the port for the +// given label, then returns it. If ctx is cancelled before a port is +// reported, Port returns the context's cause as an error. +func (c *Collector) Port(ctx context.Context, label string) (int, error) { + stop := context.AfterFunc(ctx, func() { + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.err = context.Cause(ctx) + } + c.cond.Broadcast() + }) + defer stop() + + c.mu.Lock() + defer c.mu.Unlock() + for { + if p, ok := c.ports[label]; ok { + return p, nil + } + if c.err != nil { + return 0, c.err + } + c.cond.Wait() + } +} + +// Listen is the child/production side of the porttrack protocol. +// +// If address has the magic prefix (as returned by [Collector.Addr]), +// Listen binds to localhost:0 on the given network, then TCP-connects +// to the collector and writes "LABEL\tPORT\n" to report the actual +// port. The collector connection is closed before returning. +// +// If address does not have the magic prefix, Listen is simply +// [net.Listen](network, address). +func Listen(network, address string) (net.Listener, error) { + rest, ok := strings.CutPrefix(address, magicPrefix) + if !ok { + return net.Listen(network, address) + } + + // rest is "HOST:PORT/LABEL" + slashIdx := strings.LastIndex(rest, "/") + if slashIdx < 0 { + return nil, fmt.Errorf("porttrack: malformed magic address %q: missing /LABEL", address) + } + collectorAddr := rest[:slashIdx] + label := rest[slashIdx+1:] + + ln, err := net.Listen(network, "localhost:0") + if err != nil { + return nil, err + } + + port := ln.Addr().(*net.TCPAddr).Port + + conn, err := net.Dial("tcp", collectorAddr) + if err != nil { + ln.Close() + return nil, fmt.Errorf("porttrack: failed to connect to collector at %s: %v", collectorAddr, err) + } + _, err = fmt.Fprintf(conn, "%s\t%d\n", label, port) + conn.Close() + if err != nil { + ln.Close() + return nil, fmt.Errorf("porttrack: failed to report port to collector: %v", err) + } + + return ln, nil +} diff --git a/net/porttrack/porttrack_test.go b/net/porttrack/porttrack_test.go new file mode 100644 index 0000000000000..06412d87554fc --- /dev/null +++ b/net/porttrack/porttrack_test.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package porttrack + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "testing" +) + +func TestCollectorAndListen(t *testing.T) { + c := NewCollector(t) + + labels := []string{"main", "plaintext", "debug"} + ports := make([]int, len(labels)) + + for i, label := range labels { + ln, err := Listen("tcp", c.Addr(label)) + if err != nil { + t.Fatalf("Listen(%q): %v", label, err) + } + defer ln.Close() + p, err := c.Port(t.Context(), label) + if err != nil { + t.Fatalf("Port(%q): %v", label, err) + } + ports[i] = p + } + + // All ports should be distinct non-zero values. + seen := map[int]string{} + for i, label := range labels { + if ports[i] == 0 { + t.Errorf("Port(%q) = 0", label) + } + if prev, ok := seen[ports[i]]; ok { + t.Errorf("Port(%q) = Port(%q) = %d", label, prev, ports[i]) + } + seen[ports[i]] = label + } +} + +func TestListenPassthrough(t *testing.T) { + ln, err := Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Listen passthrough: %v", err) + } + defer ln.Close() + if ln.Addr().(*net.TCPAddr).Port == 0 { + t.Fatal("expected non-zero port") + } +} + +func TestRoundTrip(t *testing.T) { + c := NewCollector(t) + + ln, err := Listen("tcp", c.Addr("http")) + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer ln.Close() + + // Start a server on the listener. + go http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + })) + + port, err := c.Port(t.Context(), "http") + if err != nil { + t.Fatalf("Port: %v", err) + } + resp, err := http.Get(fmt.Sprintf("http://localhost:%d/", port)) + if err != nil { + t.Fatalf("http.Get: %v", err) + } + resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("status = %d, want %d", resp.StatusCode, http.StatusNoContent) + } +} + +func TestPortContextCancelled(t *testing.T) { + c := NewCollector(t) + // Nobody will ever report "never", so Port should block until ctx is done. + ctx, cancel := context.WithCancel(t.Context()) + cancel() + _, err := c.Port(ctx, "never") + if !errors.Is(err, context.Canceled) { + t.Fatalf("Port with cancelled context: got %v, want %v", err, context.Canceled) + } +} From dab8922fcfeec7f5d944ea10ad3816c3ae1e51dd Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Wed, 4 Mar 2026 10:59:43 -0800 Subject: [PATCH 1044/1093] go.mod: bump github.com/cloudflare/circl version (#18878) Pick up a fix in https://pkg.go.dev/vuln/GO-2026-4550 Updates #cleanup Signed-off-by: Andrew Lytvynov --- flake.nix | 2 +- go.mod | 2 +- go.mod.sri | 2 +- go.sum | 4 ++-- shell.nix | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.nix b/flake.nix index 0dbf74e7884aa..64956a97fef55 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-Lr+5B0LEFk66WahPczRcfzH8rSL5Cc2qvNJuW6B0Llc= +# nix-direnv cache busting line: sha256-rhuWEEN+CtumVxOw6Dy/IRxWIrZ2x6RJb6ULYwXCQc4= diff --git a/go.mod b/go.mod index caa58b60833bc..202ad894bdaff 100644 --- a/go.mod +++ b/go.mod @@ -313,7 +313,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/cloudflare/circl v1.6.1 // indirect + github.com/cloudflare/circl v1.6.3 // indirect github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/daixiang0/gci v0.12.3 // indirect diff --git a/go.mod.sri b/go.mod.sri index 91887e63b3c8c..a307075942f64 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-Lr+5B0LEFk66WahPczRcfzH8rSL5Cc2qvNJuW6B0Llc= +sha256-rhuWEEN+CtumVxOw6Dy/IRxWIrZ2x6RJb6ULYwXCQc4= diff --git a/go.sum b/go.sum index 1f8195e47fff6..b61f1d24a1db1 100644 --- a/go.sum +++ b/go.sum @@ -253,8 +253,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/ckaznocha/intrange v0.1.0 h1:ZiGBhvrdsKpoEfzh9CjBfDSZof6QB0ORY5tXasUtiew= github.com/ckaznocha/intrange v0.1.0/go.mod h1:Vwa9Ekex2BrEQMg6zlrWwbs/FtYw7eS5838Q7UjK7TQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= -github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= +github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= +github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= diff --git a/shell.nix b/shell.nix index a822b705a3062..7ddf62c52df5c 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-Lr+5B0LEFk66WahPczRcfzH8rSL5Cc2qvNJuW6B0Llc= +# nix-direnv cache busting line: sha256-rhuWEEN+CtumVxOw6Dy/IRxWIrZ2x6RJb6ULYwXCQc4= From 26ef46bf8196f5ab36e94aeeda458dcf65868fcf Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 4 Mar 2026 14:09:11 -0500 Subject: [PATCH 1045/1093] util/linuxfw,wgengine/router: add connmark rules for rp_filter workaround (#18860) When a Linux system acts as an exit node or subnet router with strict reverse path filtering (rp_filter=1), reply packets may be dropped because they fail the RPF check. Reply packets arrive on the WAN interface but the routing table indicates they should have arrived on the Tailscale interface, causing the kernel to drop them. This adds firewall rules in the mangle table to save outbound packet marks to conntrack and restore them on reply packets before the routing decision. When reply packets have their marks restored, the kernel uses the correct routing table (based on the mark) and the packets pass the rp_filter check. Implementation adds two rules per address family (IPv4/IPv6): - mangle/OUTPUT: Save packet marks to conntrack for NEW connections with non-zero marks in the Tailscale fwmark range (0xff0000) - mangle/PREROUTING: Restore marks from conntrack to packets for ESTABLISHED,RELATED connections before routing decision and rp_filter check The workaround is automatically enabled when UseConnmarkForRPFilter is set in the router configuration, which happens when subnet routes are advertised on Linux systems. Both iptables and nftables implementations are provided, with automatic backend detection. Fixes #3310 Fixes #14409 Fixes #12022 Fixes #15815 Fixes #9612 Signed-off-by: Mike O'Driscoll --- util/linuxfw/fake_netfilter.go | 2 + util/linuxfw/iptables_runner.go | 98 +++++++ util/linuxfw/nftables_runner.go | 245 ++++++++++++++++++ util/linuxfw/nftables_runner_test.go | 243 +++++++++++++++++ wgengine/router/osrouter/router_linux.go | 36 +++ wgengine/router/osrouter/router_linux_test.go | 202 ++++++++++++++- 6 files changed, 814 insertions(+), 12 deletions(-) diff --git a/util/linuxfw/fake_netfilter.go b/util/linuxfw/fake_netfilter.go index d760edfcf757e..eac5d904cff3a 100644 --- a/util/linuxfw/fake_netfilter.go +++ b/util/linuxfw/fake_netfilter.go @@ -71,6 +71,8 @@ func (f *FakeNetfilterRunner) AddHooks() error { retur func (f *FakeNetfilterRunner) DelHooks(logf logger.Logf) error { return nil } func (f *FakeNetfilterRunner) AddSNATRule() error { return nil } func (f *FakeNetfilterRunner) DelSNATRule() error { return nil } +func (f *FakeNetfilterRunner) AddConnmarkSaveRule() error { return nil } +func (f *FakeNetfilterRunner) DelConnmarkSaveRule() error { return nil } func (f *FakeNetfilterRunner) AddStatefulRule(tunname string) error { return nil } func (f *FakeNetfilterRunner) DelStatefulRule(tunname string) error { return nil } func (f *FakeNetfilterRunner) AddLoopbackRule(addr netip.Addr) error { return nil } diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index ed55960b36d7c..b8eb39f219be9 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -527,6 +527,104 @@ func (i *iptablesRunner) DelStatefulRule(tunname string) error { return nil } +// AddConnmarkSaveRule adds conntrack marking rules to save and restore marks. +// These rules run in mangle/PREROUTING (to restore marks from conntrack) and +// mangle/OUTPUT (to save marks to conntrack) before rp_filter checks, enabling +// proper routing table lookups for exit nodes and subnet routers. +func (i *iptablesRunner) AddConnmarkSaveRule() error { + // Check if rules already exist (idempotency) + for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + rules, err := ipt.List("mangle", "PREROUTING") + if err != nil { + continue + } + // Look for existing connmark restore rule + for _, rule := range rules { + if strings.Contains(rule, "CONNMARK") && + strings.Contains(rule, "restore-mark") && + strings.Contains(rule, "ctmask 0xff0000") { + // Rules already exist, skip adding + return nil + } + } + } + + // mangle/PREROUTING: Restore mark from conntrack for ESTABLISHED/RELATED connections + // This runs BEFORE routing decision and rp_filter check + for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + args := []string{ + "-m", "conntrack", + "--ctstate", "ESTABLISHED,RELATED", + "-j", "CONNMARK", + "--restore-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Insert("mangle", "PREROUTING", 1, args...); err != nil { + return fmt.Errorf("adding %v in mangle/PREROUTING: %w", args, err) + } + } + + // mangle/OUTPUT: Save mark to conntrack for NEW connections with non-zero marks + for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + args := []string{ + "-m", "conntrack", + "--ctstate", "NEW", + "-m", "mark", + "!", "--mark", "0x0/" + fwmarkMask, + "-j", "CONNMARK", + "--save-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Insert("mangle", "OUTPUT", 1, args...); err != nil { + return fmt.Errorf("adding %v in mangle/OUTPUT: %w", args, err) + } + } + + return nil +} + +// DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. +func (i *iptablesRunner) DelConnmarkSaveRule() error { + for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + // Delete PREROUTING rule + args := []string{ + "-m", "conntrack", + "--ctstate", "ESTABLISHED,RELATED", + "-j", "CONNMARK", + "--restore-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Delete("mangle", "PREROUTING", args...); err != nil { + if !isNotExistError(err) { + return fmt.Errorf("deleting connmark rule in mangle/PREROUTING: %w", err) + } + // Rule doesn't exist - this is fine for idempotency + } + + // Delete OUTPUT rule + args = []string{ + "-m", "conntrack", + "--ctstate", "NEW", + "-m", "mark", + "!", "--mark", "0x0/" + fwmarkMask, + "-j", "CONNMARK", + "--save-mark", + "--nfmask", fwmarkMask, + "--ctmask", fwmarkMask, + } + if err := ipt.Delete("mangle", "OUTPUT", args...); err != nil { + if !isNotExistError(err) { + return fmt.Errorf("deleting connmark rule in mangle/OUTPUT: %w", err) + } + // Rule doesn't exist - this is fine for idempotency + } + } + return nil +} + // buildMagicsockPortRule generates the string slice containing the arguments // to describe a rule accepting traffic on a particular port to iptables. It is // separated out here to avoid repetition in AddMagicsockPortRule and diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index 2c44a6218e76e..7496e7034a98a 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -521,6 +521,15 @@ type NetfilterRunner interface { // using conntrack. DelStatefulRule(tunname string) error + // AddConnmarkSaveRule adds conntrack marking rules to save marks from packets. + // These rules run in mangle/PREROUTING and mangle/OUTPUT to mark connections + // and restore marks on reply packets before rp_filter checks, enabling proper + // routing table lookups for exit nodes and subnet routers. + AddConnmarkSaveRule() error + + // DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. + DelConnmarkSaveRule() error + // HasIPV6 reports true if the system supports IPv6. HasIPV6() bool @@ -1950,6 +1959,242 @@ func (n *nftablesRunner) DelStatefulRule(tunname string) error { return nil } +// makeConnmarkRestoreExprs creates nftables expressions to restore mark from conntrack. +// Implements: ct state established,related ct mark & 0xff0000 != 0 meta mark set ct mark & 0xff0000 +func makeConnmarkRestoreExprs() []expr.Any { + return []expr.Any{ + // Load conntrack state into register 1 + &expr.Ct{ + Register: 1, + Key: expr.CtKeySTATE, + }, + // Check if state is ESTABLISHED or RELATED + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: nativeUint32( + expr.CtStateBitESTABLISHED | + expr.CtStateBitRELATED), + Xor: nativeUint32(0), + }, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{0, 0, 0, 0}, + }, + // Load conntrack mark into register 1 + &expr.Ct{ + Register: 1, + Key: expr.CtKeyMARK, + }, + // Mask to Tailscale mark bits (0xff0000) + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: getTailscaleFwmarkMask(), + Xor: []byte{0x00, 0x00, 0x00, 0x00}, + }, + // Set packet mark from register 1 + &expr.Meta{ + Key: expr.MetaKeyMARK, + SourceRegister: true, + Register: 1, + }, + } +} + +// makeConnmarkSaveExprs creates nftables expressions to save mark to conntrack. +// Implements: ct state new meta mark & 0xff0000 != 0 ct mark set meta mark & 0xff0000 +func makeConnmarkSaveExprs() []expr.Any { + return []expr.Any{ + // Load conntrack state into register 1 + &expr.Ct{ + Register: 1, + Key: expr.CtKeySTATE, + }, + // Check if state is NEW + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: nativeUint32(expr.CtStateBitNEW), + Xor: nativeUint32(0), + }, + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{0, 0, 0, 0}, + }, + // Load packet mark into register 1 + &expr.Meta{ + Key: expr.MetaKeyMARK, + Register: 1, + }, + // Mask to Tailscale mark bits (0xff0000) + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: getTailscaleFwmarkMask(), + Xor: []byte{0x00, 0x00, 0x00, 0x00}, + }, + // Check if mark is non-zero + &expr.Cmp{ + Op: expr.CmpOpNeq, + Register: 1, + Data: []byte{0, 0, 0, 0}, + }, + // Load packet mark again for saving + &expr.Meta{ + Key: expr.MetaKeyMARK, + Register: 1, + }, + // Mask again + &expr.Bitwise{ + SourceRegister: 1, + DestRegister: 1, + Len: 4, + Mask: getTailscaleFwmarkMask(), + Xor: []byte{0x00, 0x00, 0x00, 0x00}, + }, + // Set conntrack mark from register 1 + &expr.Ct{ + Key: expr.CtKeyMARK, + SourceRegister: true, + Register: 1, + }, + } +} + +// AddConnmarkSaveRule adds conntrack marking rules to save and restore marks. +// These rules run in mangle/PREROUTING (to restore marks from conntrack) and +// mangle/OUTPUT (to save marks to conntrack) before rp_filter checks, enabling +// proper routing table lookups for exit nodes and subnet routers. +func (n *nftablesRunner) AddConnmarkSaveRule() error { + conn := n.conn + + // Check if rules already exist (idempotency) + for _, table := range n.getTables() { + mangleTable := &nftables.Table{ + Family: table.Proto, + Name: "mangle", + } + + // Check PREROUTING chain for restore rule + preroutingChain, err := getChainFromTable(conn, mangleTable, "PREROUTING") + if err == nil { + rules, _ := conn.GetRules(preroutingChain.Table, preroutingChain) + for _, rule := range rules { + if string(rule.UserData) == "ts-connmark-restore" { + // Rules already exist, skip adding + return nil + } + } + } + } + + // Add rules for both IPv4 and IPv6 + for _, table := range n.getTables() { + // Get or create mangle table + mangleTable := &nftables.Table{ + Family: table.Proto, + Name: "mangle", + } + conn.AddTable(mangleTable) + + // Get or create PREROUTING chain + preroutingChain, err := getChainFromTable(conn, mangleTable, "PREROUTING") + if err != nil { + // Chain doesn't exist, create it + preroutingChain = conn.AddChain(&nftables.Chain{ + Name: "PREROUTING", + Table: mangleTable, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookPrerouting, + Priority: nftables.ChainPriorityMangle, + }) + } + + // Add PREROUTING rule to restore mark from conntrack + conn.InsertRule(&nftables.Rule{ + Table: mangleTable, + Chain: preroutingChain, + Exprs: makeConnmarkRestoreExprs(), + UserData: []byte("ts-connmark-restore"), + }) + + // Get or create OUTPUT chain + outputChain, err := getChainFromTable(conn, mangleTable, "OUTPUT") + if err != nil { + // Chain doesn't exist, create it + outputChain = conn.AddChain(&nftables.Chain{ + Name: "OUTPUT", + Table: mangleTable, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityMangle, + }) + } + + // Add OUTPUT rule to save mark to conntrack + conn.InsertRule(&nftables.Rule{ + Table: mangleTable, + Chain: outputChain, + Exprs: makeConnmarkSaveExprs(), + UserData: []byte("ts-connmark-save"), + }) + } + + if err := conn.Flush(); err != nil { + return fmt.Errorf("flush add connmark rules: %w", err) + } + + return nil +} + +// DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. +func (n *nftablesRunner) DelConnmarkSaveRule() error { + conn := n.conn + + for _, table := range n.getTables() { + mangleTable := &nftables.Table{ + Family: table.Proto, + Name: "mangle", + } + + // Remove PREROUTING rule - look for restore-mark rule by UserData + preroutingChain, err := getChainFromTable(conn, mangleTable, "PREROUTING") + if err == nil { + rules, _ := conn.GetRules(preroutingChain.Table, preroutingChain) + for _, rule := range rules { + if string(rule.UserData) == "ts-connmark-restore" { + conn.DelRule(rule) + break + } + } + } + + // Remove OUTPUT rule - look for save-mark rule by UserData + outputChain, err := getChainFromTable(conn, mangleTable, "OUTPUT") + if err == nil { + rules, _ := conn.GetRules(outputChain.Table, outputChain) + for _, rule := range rules { + if string(rule.UserData) == "ts-connmark-save" { + conn.DelRule(rule) + break + } + } + } + } + + // Ignore errors during deletion - rules might not exist + conn.Flush() + + return nil +} + // cleanupChain removes a jump rule from hookChainName to tsChainName, and then // the entire chain tsChainName. Errors are logged, but attempts to remove both // the jump rule and chain continue even if one errors. diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index dc4d3194a23ba..8299a9cbd72da 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -1070,3 +1070,246 @@ func checkSNATRule_nft(t *testing.T, runner *nftablesRunner, fam nftables.TableF wantsRule := snatRule(chain.Table, chain, src, dst, meta) checkRule(t, wantsRule, runner.conn) } + +// TestNFTAddAndDelConnmarkRules tests adding and removing connmark rules +// in a real network namespace. This verifies the rules are correctly created +// and cleaned up. +func TestNFTAddAndDelConnmarkRules(t *testing.T) { + conn := newSysConn(t) + runner := newFakeNftablesRunnerWithConn(t, conn, true) + + // Helper to get mangle chains + getMangleChains := func(fam nftables.TableFamily) (prerouting, output *nftables.Chain, err error) { + chains, err := conn.ListChainsOfTableFamily(fam) + if err != nil { + return nil, nil, err + } + for _, ch := range chains { + if ch.Table.Name != "mangle" { + continue + } + if ch.Name == "PREROUTING" { + prerouting = ch + } else if ch.Name == "OUTPUT" { + output = ch + } + } + return prerouting, output, nil + } + + // Check initial state - mangle chains might not exist yet + prerouting4Before, output4Before, _ := getMangleChains(nftables.TableFamilyIPv4) + prerouting6Before, output6Before, _ := getMangleChains(nftables.TableFamilyIPv6) + + var prerouting4RulesBefore, output4RulesBefore, prerouting6RulesBefore, output6RulesBefore int + if prerouting4Before != nil { + rules, _ := conn.GetRules(prerouting4Before.Table, prerouting4Before) + prerouting4RulesBefore = len(rules) + } + if output4Before != nil { + rules, _ := conn.GetRules(output4Before.Table, output4Before) + output4RulesBefore = len(rules) + } + if prerouting6Before != nil { + rules, _ := conn.GetRules(prerouting6Before.Table, prerouting6Before) + prerouting6RulesBefore = len(rules) + } + if output6Before != nil { + rules, _ := conn.GetRules(output6Before.Table, output6Before) + output6RulesBefore = len(rules) + } + + // Add connmark rules + if err := runner.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule() failed: %v", err) + } + + // Verify rules were added + prerouting4After, output4After, err := getMangleChains(nftables.TableFamilyIPv4) + if err != nil { + t.Fatalf("Failed to get IPv4 mangle chains: %v", err) + } + if prerouting4After == nil || output4After == nil { + t.Fatal("IPv4 mangle chains not created") + } + + prerouting4Rules, err := conn.GetRules(prerouting4After.Table, prerouting4After) + if err != nil { + t.Fatalf("GetRules(PREROUTING) failed: %v", err) + } + output4Rules, err := conn.GetRules(output4After.Table, output4After) + if err != nil { + t.Fatalf("GetRules(OUTPUT) failed: %v", err) + } + + // Should have added 1 rule to each chain + if len(prerouting4Rules) != prerouting4RulesBefore+1 { + t.Fatalf("PREROUTING rules: got %d, want %d", len(prerouting4Rules), prerouting4RulesBefore+1) + } + if len(output4Rules) != output4RulesBefore+1 { + t.Fatalf("OUTPUT rules: got %d, want %d", len(output4Rules), output4RulesBefore+1) + } + + // Verify IPv6 rules + prerouting6After, output6After, err := getMangleChains(nftables.TableFamilyIPv6) + if err != nil { + t.Fatalf("Failed to get IPv6 mangle chains: %v", err) + } + if prerouting6After == nil || output6After == nil { + t.Fatal("IPv6 mangle chains not created") + } + + prerouting6Rules, err := conn.GetRules(prerouting6After.Table, prerouting6After) + if err != nil { + t.Fatalf("GetRules(IPv6 PREROUTING) failed: %v", err) + } + output6Rules, err := conn.GetRules(output6After.Table, output6After) + if err != nil { + t.Fatalf("GetRules(IPv6 OUTPUT) failed: %v", err) + } + + if len(prerouting6Rules) != prerouting6RulesBefore+1 { + t.Fatalf("IPv6 PREROUTING rules: got %d, want %d", len(prerouting6Rules), prerouting6RulesBefore+1) + } + if len(output6Rules) != output6RulesBefore+1 { + t.Fatalf("IPv6 OUTPUT rules: got %d, want %d", len(output6Rules), output6RulesBefore+1) + } + + // Verify the rules contain conntrack expressions + foundCtInPrerouting := false + foundCtInOutput := false + for _, e := range prerouting4Rules[0].Exprs { + if _, ok := e.(*expr.Ct); ok { + foundCtInPrerouting = true + break + } + } + for _, e := range output4Rules[0].Exprs { + if _, ok := e.(*expr.Ct); ok { + foundCtInOutput = true + break + } + } + if !foundCtInPrerouting { + t.Error("PREROUTING rule doesn't contain conntrack expression") + } + if !foundCtInOutput { + t.Error("OUTPUT rule doesn't contain conntrack expression") + } + + // Delete connmark rules + if err := runner.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule() failed: %v", err) + } + + // Verify rules were deleted + prerouting4After, output4After, _ = getMangleChains(nftables.TableFamilyIPv4) + if prerouting4After != nil { + rules, _ := conn.GetRules(prerouting4After.Table, prerouting4After) + if len(rules) != prerouting4RulesBefore { + t.Fatalf("IPv4 PREROUTING rules after delete: got %d, want %d", len(rules), prerouting4RulesBefore) + } + } + if output4After != nil { + rules, _ := conn.GetRules(output4After.Table, output4After) + if len(rules) != output4RulesBefore { + t.Fatalf("IPv4 OUTPUT rules after delete: got %d, want %d", len(rules), output4RulesBefore) + } + } + + prerouting6After, output6After, _ = getMangleChains(nftables.TableFamilyIPv6) + if prerouting6After != nil { + rules, _ := conn.GetRules(prerouting6After.Table, prerouting6After) + if len(rules) != prerouting6RulesBefore { + t.Fatalf("IPv6 PREROUTING rules after delete: got %d, want %d", len(rules), prerouting6RulesBefore) + } + } + if output6After != nil { + rules, _ := conn.GetRules(output6After.Table, output6After) + if len(rules) != output6RulesBefore { + t.Fatalf("IPv6 OUTPUT rules after delete: got %d, want %d", len(rules), output6RulesBefore) + } + } +} + +// TestMakeConnmarkRestoreExprs tests the nftables expressions for restoring +// marks from conntrack. This is a regression test that ensures the byte encoding +// doesn't change unexpectedly. +func TestMakeConnmarkRestoreExprs(t *testing.T) { + // Expected netlink bytes for the restore rule + // Generated by running makeConnmarkRestoreExprs() and capturing the output + want := [][]byte{ + // batch begin + []byte("\x00\x00\x00\x0a"), + // nft add table ip mangle + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x08\x00\x02\x00\x00\x00\x00\x00"), + // nft add chain ip mangle PREROUTING { type filter hook prerouting priority mangle; } + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0f\x00\x03\x00\x50\x52\x45\x52\x4f\x55\x54\x49\x4e\x47\x00\x00\x14\x00\x04\x80\x08\x00\x01\x00\x00\x00\x00\x00\x08\x00\x02\x00\xff\xff\xff\x6a\x0b\x00\x07\x00\x66\x69\x6c\x74\x65\x72\x00\x00"), + // nft add rule ip mangle PREROUTING ct state established,related ct mark & 0xff0000 != 0 meta mark set ct mark & 0xff0000 + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0f\x00\x02\x00\x50\x52\x45\x52\x4f\x55\x54\x49\x4e\x47\x00\x00\x1c\x01\x04\x80\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x00\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x06\x00\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x00\x00\x00\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x03\x00\x00\x00\x00\x01"), + // batch end + []byte("\x00\x00\x00\x0a"), + } + + testConn := newTestConn(t, want, nil) + table := testConn.AddTable(&nftables.Table{ + Family: nftables.TableFamilyIPv4, + Name: "mangle", + }) + chain := testConn.AddChain(&nftables.Chain{ + Name: "PREROUTING", + Table: table, + Type: nftables.ChainTypeFilter, + Hooknum: nftables.ChainHookPrerouting, + Priority: nftables.ChainPriorityMangle, + }) + testConn.InsertRule(&nftables.Rule{ + Table: table, + Chain: chain, + Exprs: makeConnmarkRestoreExprs(), + }) + if err := testConn.Flush(); err != nil { + t.Fatalf("Flush() failed: %v", err) + } +} + +// TestMakeConnmarkSaveExprs tests the nftables expressions for saving marks +// to conntrack. This is a regression test that ensures the byte encoding +// doesn't change unexpectedly. +func TestMakeConnmarkSaveExprs(t *testing.T) { + // Expected netlink bytes for the save rule + // Generated by running makeConnmarkSaveExprs() and capturing the output + want := [][]byte{ + // batch begin + []byte("\x00\x00\x00\x0a"), + // nft add table ip mangle + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x08\x00\x02\x00\x00\x00\x00\x00"), + // nft add chain ip mangle OUTPUT { type route hook output priority mangle; } + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0b\x00\x03\x00\x4f\x55\x54\x50\x55\x54\x00\x00\x14\x00\x04\x80\x08\x00\x01\x00\x00\x00\x00\x03\x08\x00\x02\x00\xff\xff\xff\x6a\x0a\x00\x07\x00\x72\x6f\x75\x74\x65\x00\x00\x00"), + // nft add rule ip mangle OUTPUT ct state new meta mark & 0xff0000 != 0 ct mark set meta mark & 0xff0000 + []byte("\x02\x00\x00\x00\x0b\x00\x01\x00\x6d\x61\x6e\x67\x6c\x65\x00\x00\x0b\x00\x02\x00\x4f\x55\x54\x50\x55\x54\x00\x00\xb0\x01\x04\x80\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x00\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x08\x00\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x00\x00\x00\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x2c\x00\x01\x80\x08\x00\x01\x00\x63\x6d\x70\x00\x20\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x0c\x00\x03\x80\x08\x00\x01\x00\x00\x00\x00\x00\x24\x00\x01\x80\x09\x00\x01\x00\x6d\x65\x74\x61\x00\x00\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x01\x00\x00\x00\x00\x01\x44\x00\x01\x80\x0c\x00\x01\x00\x62\x69\x74\x77\x69\x73\x65\x00\x34\x00\x02\x80\x08\x00\x01\x00\x00\x00\x00\x01\x08\x00\x02\x00\x00\x00\x00\x01\x08\x00\x03\x00\x00\x00\x00\x04\x0c\x00\x04\x80\x08\x00\x01\x00\x00\xff\x00\x00\x0c\x00\x05\x80\x08\x00\x01\x00\x00\x00\x00\x00\x20\x00\x01\x80\x07\x00\x01\x00\x63\x74\x00\x00\x14\x00\x02\x80\x08\x00\x02\x00\x00\x00\x00\x03\x08\x00\x04\x00\x00\x00\x00\x01"), + // batch end + []byte("\x00\x00\x00\x0a"), + } + + testConn := newTestConn(t, want, nil) + table := testConn.AddTable(&nftables.Table{ + Family: nftables.TableFamilyIPv4, + Name: "mangle", + }) + chain := testConn.AddChain(&nftables.Chain{ + Name: "OUTPUT", + Table: table, + Type: nftables.ChainTypeRoute, + Hooknum: nftables.ChainHookOutput, + Priority: nftables.ChainPriorityMangle, + }) + testConn.InsertRule(&nftables.Rule{ + Table: table, + Chain: chain, + Exprs: makeConnmarkSaveExprs(), + }) + if err := testConn.Flush(); err != nil { + t.Fatalf("Flush() failed: %v", err) + } +} diff --git a/wgengine/router/osrouter/router_linux.go b/wgengine/router/osrouter/router_linux.go index 8ca38f9ecd15d..3c261c9120785 100644 --- a/wgengine/router/osrouter/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -86,6 +86,7 @@ type linuxRouter struct { localRoutes map[netip.Prefix]bool snatSubnetRoutes bool statefulFiltering bool + connmarkEnabled bool // whether connmark rules are currently enabled netfilterMode preftype.NetfilterMode netfilterKind string magicsockPortV4 uint16 @@ -370,6 +371,12 @@ func (r *linuxRouter) Close() error { r.unregNetMon() } r.eventClient.Close() + + // Clean up connmark rules + if err := r.nfr.DelConnmarkSaveRule(); err != nil { + r.logf("warning: failed to delete connmark rules: %v", err) + } + if err := r.downInterface(); err != nil { return err } @@ -479,6 +486,35 @@ func (r *linuxRouter) Set(cfg *router.Config) error { r.statefulFiltering = cfg.StatefulFiltering r.updateStatefulFilteringWithDockerWarning(cfg) + // Connmark rules for rp_filter compatibility. + // Always enabled when netfilter is ON to handle all rp_filter=1 scenarios + // (normal operation, exit nodes, subnet routers, and clients using exit nodes). + netfilterOn := cfg.NetfilterMode == netfilterOn + switch { + case netfilterOn == r.connmarkEnabled: + // state already correct, nothing to do. + case netfilterOn: + r.logf("enabling connmark-based rp_filter workaround") + if err := r.nfr.AddConnmarkSaveRule(); err != nil { + r.logf("warning: failed to add connmark rules (rp_filter workaround may not work): %v", err) + errs = append(errs, fmt.Errorf("enabling connmark rules: %w", err)) + } else { + // Only update state on success to keep it in sync with actual rules + r.connmarkEnabled = true + } + default: + r.logf("disabling connmark-based rp_filter workaround") + if err := r.nfr.DelConnmarkSaveRule(); err != nil { + // Deletion errors are only logged, not returned, because: + // 1. Rules may not exist (e.g., first run or after manual deletion) + // 2. Failure to delete is less critical than failure to add + // 3. We still want to update state to attempt re-add on next enable + r.logf("warning: failed to delete connmark rules: %v", err) + } + // Always clear state when disabling, even if delete failed + r.connmarkEnabled = false + } + // Issue 11405: enable IP forwarding on gokrazy. advertisingRoutes := len(cfg.SubnetRoutes) > 0 if getDistroFunc() == distro.Gokrazy && advertisingRoutes { diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index bce0ea09275e3..bae997e331d55 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -124,6 +124,8 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE v6/filter/FORWARD -j ts-forward @@ -132,6 +134,8 @@ v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -m conntrack ! --ctstate ESTABLISHED,RELATED -j DROP v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE `, @@ -160,6 +164,8 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE v6/filter/FORWARD -j ts-forward @@ -167,6 +173,8 @@ v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE `, @@ -192,12 +200,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -225,12 +237,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -255,12 +271,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -310,12 +330,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -342,12 +366,16 @@ v4/filter/ts-forward -o tailscale0 -j ACCEPT v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v4/nat/POSTROUTING -j ts-postrouting v6/filter/FORWARD -j ts-forward v6/filter/INPUT -j ts-input v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 v6/nat/POSTROUTING -j ts-postrouting `, }, @@ -367,6 +395,120 @@ ip route add 100.100.100.100/32 dev tailscale0 table 52 ip route add throw 10.0.0.0/8 table 52 ip route add throw 192.168.0.0/24 table 52` + basic, }, + { + name: "subnet routes with connmark for rp_filter", + in: &Config{ + LocalAddrs: mustCIDRs("100.101.102.104/10"), + Routes: mustCIDRs("100.100.100.100/32"), + SubnetRoutes: mustCIDRs("10.0.0.0/16"), + SNATSubnetRoutes: true, + NetfilterMode: netfilterOn, + }, + want: ` +up +ip addr add 100.101.102.104/10 dev tailscale0 +ip route add 100.100.100.100/32 dev tailscale0 table 52` + basic + + `v4/filter/FORWARD -j ts-forward +v4/filter/INPUT -j ts-input +v4/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v4/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v4/filter/ts-forward -o tailscale0 -s 100.64.0.0/10 -j DROP +v4/filter/ts-forward -o tailscale0 -j ACCEPT +v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT +v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN +v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/nat/POSTROUTING -j ts-postrouting +v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +v6/filter/FORWARD -j ts-forward +v6/filter/INPUT -j ts-input +v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/nat/POSTROUTING -j ts-postrouting +v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +`, + }, + { + name: "subnet routes (connmark always enabled)", + in: &Config{ + LocalAddrs: mustCIDRs("100.101.102.104/10"), + Routes: mustCIDRs("100.100.100.100/32"), + SubnetRoutes: mustCIDRs("10.0.0.0/16"), + SNATSubnetRoutes: true, + NetfilterMode: netfilterOn, + }, + want: ` +up +ip addr add 100.101.102.104/10 dev tailscale0 +ip route add 100.100.100.100/32 dev tailscale0 table 52` + basic + + `v4/filter/FORWARD -j ts-forward +v4/filter/INPUT -j ts-input +v4/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v4/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v4/filter/ts-forward -o tailscale0 -s 100.64.0.0/10 -j DROP +v4/filter/ts-forward -o tailscale0 -j ACCEPT +v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT +v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN +v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/nat/POSTROUTING -j ts-postrouting +v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +v6/filter/FORWARD -j ts-forward +v6/filter/INPUT -j ts-input +v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/nat/POSTROUTING -j ts-postrouting +v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +`, + }, + { + name: "connmark with stateful filtering", + in: &Config{ + LocalAddrs: mustCIDRs("100.101.102.104/10"), + Routes: mustCIDRs("100.100.100.100/32"), + SubnetRoutes: mustCIDRs("10.0.0.0/16"), + SNATSubnetRoutes: true, + StatefulFiltering: true, + NetfilterMode: netfilterOn, + }, + want: ` +up +ip addr add 100.101.102.104/10 dev tailscale0 +ip route add 100.100.100.100/32 dev tailscale0 table 52` + basic + + `v4/filter/FORWARD -j ts-forward +v4/filter/INPUT -j ts-input +v4/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v4/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v4/filter/ts-forward -o tailscale0 -s 100.64.0.0/10 -j DROP +v4/filter/ts-forward -o tailscale0 -m conntrack ! --ctstate ESTABLISHED,RELATED -j DROP +v4/filter/ts-forward -o tailscale0 -j ACCEPT +v4/filter/ts-input -i lo -s 100.101.102.104 -j ACCEPT +v4/filter/ts-input ! -i tailscale0 -s 100.115.92.0/23 -j RETURN +v4/filter/ts-input ! -i tailscale0 -s 100.64.0.0/10 -j DROP +v4/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v4/nat/POSTROUTING -j ts-postrouting +v4/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +v6/filter/FORWARD -j ts-forward +v6/filter/INPUT -j ts-input +v6/filter/ts-forward -i tailscale0 -j MARK --set-mark 0x40000/0xff0000 +v6/filter/ts-forward -m mark --mark 0x40000/0xff0000 -j ACCEPT +v6/filter/ts-forward -o tailscale0 -m conntrack ! --ctstate ESTABLISHED,RELATED -j DROP +v6/filter/ts-forward -o tailscale0 -j ACCEPT +v6/mangle/OUTPUT -m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/mangle/PREROUTING -m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000 +v6/nat/POSTROUTING -j ts-postrouting +v6/nat/ts-postrouting -m mark --mark 0x40000/0xff0000 -j MASQUERADE +`, + }, } bus := eventbus.New() @@ -426,20 +568,24 @@ func newIPTablesRunner(t *testing.T) linuxfw.NetfilterRunner { return &fakeIPTablesRunner{ t: t, ipt4: map[string][]string{ - "filter/INPUT": nil, - "filter/OUTPUT": nil, - "filter/FORWARD": nil, - "nat/PREROUTING": nil, - "nat/OUTPUT": nil, - "nat/POSTROUTING": nil, + "filter/INPUT": nil, + "filter/OUTPUT": nil, + "filter/FORWARD": nil, + "nat/PREROUTING": nil, + "nat/OUTPUT": nil, + "nat/POSTROUTING": nil, + "mangle/PREROUTING": nil, + "mangle/OUTPUT": nil, }, ipt6: map[string][]string{ - "filter/INPUT": nil, - "filter/OUTPUT": nil, - "filter/FORWARD": nil, - "nat/PREROUTING": nil, - "nat/OUTPUT": nil, - "nat/POSTROUTING": nil, + "filter/INPUT": nil, + "filter/OUTPUT": nil, + "filter/FORWARD": nil, + "nat/PREROUTING": nil, + "nat/OUTPUT": nil, + "nat/POSTROUTING": nil, + "mangle/PREROUTING": nil, + "mangle/OUTPUT": nil, }, } } @@ -775,6 +921,38 @@ func (n *fakeIPTablesRunner) DelMagicsockPortRule(port uint16, network string) e return nil } +func (n *fakeIPTablesRunner) AddConnmarkSaveRule() error { + // PREROUTING rule: restore mark from conntrack + prerouteRule := "-m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + if err := insertRule(n, ipt, "mangle/PREROUTING", prerouteRule); err != nil { + return err + } + } + + // OUTPUT rule: save mark to conntrack for NEW connections + outputRule := "-m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + if err := insertRule(n, ipt, "mangle/OUTPUT", outputRule); err != nil { + return err + } + } + return nil +} + +func (n *fakeIPTablesRunner) DelConnmarkSaveRule() error { + prerouteRule := "-m conntrack --ctstate ESTABLISHED,RELATED -j CONNMARK --restore-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + deleteRule(n, ipt, "mangle/PREROUTING", prerouteRule) // ignore errors + } + + outputRule := "-m conntrack --ctstate NEW -m mark ! --mark 0x0/0xff0000 -j CONNMARK --save-mark --nfmask 0xff0000 --ctmask 0xff0000" + for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { + deleteRule(n, ipt, "mangle/OUTPUT", outputRule) // ignore errors + } + return nil +} + func (n *fakeIPTablesRunner) HasIPV6() bool { return true } func (n *fakeIPTablesRunner) HasIPV6NAT() bool { return true } func (n *fakeIPTablesRunner) HasIPV6Filter() bool { return true } From 2c9ffdd188bd53ce43c8389f42594b2a8be6c390 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Wed, 4 Mar 2026 14:09:19 -0500 Subject: [PATCH 1046/1093] cmd/tailscale,ipn,net/netutil: remove rp_filter strict mode warnings (#18863) PR #18860 adds firewall rules in the mangle table to save outbound packet marks to conntrack and restore them on reply packets before the routing decision. When reply packets have their marks restored, the kernel uses the correct routing table (based on the mark) and the packets pass the rp_filter check. This makes the risk check and reverse path filtering warnings unnecessary. Updates #3310 Fixes tailscale/corp#37846 Signed-off-by: Mike O'Driscoll --- client/local/local.go | 19 ------ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscale/cli/risks.go | 19 ------ cmd/tailscale/cli/set.go | 4 +- cmd/tailscale/cli/up.go | 4 -- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- health/healthmsg/healthmsg.go | 2 - ipn/ipnlocal/local.go | 30 --------- ipn/localapi/localapi.go | 30 --------- net/netutil/ip_forward.go | 104 -------------------------------- net/netutil/netutil_test.go | 21 ------- tsnet/depaware.txt | 2 +- 14 files changed, 6 insertions(+), 237 deletions(-) diff --git a/client/local/local.go b/client/local/local.go index 5794734f27133..a7b8b83b10a77 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -818,25 +818,6 @@ func (lc *Client) CheckUDPGROForwarding(ctx context.Context) error { return nil } -// CheckReversePathFiltering asks the local Tailscale daemon whether strict -// reverse path filtering is enabled, which would break exit node usage on Linux. -func (lc *Client) CheckReversePathFiltering(ctx context.Context) error { - body, err := lc.get200(ctx, "/localapi/v0/check-reverse-path-filtering") - if err != nil { - return err - } - var jres struct { - Warning string - } - if err := json.Unmarshal(body, &jres); err != nil { - return fmt.Errorf("invalid JSON from check-reverse-path-filtering: %w", err) - } - if jres.Warning != "" { - return errors.New(jres.Warning) - } - return nil -} - // SetUDPGROForwarding enables UDP GRO forwarding for the main interface of this // node. This can be done to improve performance of tailnet nodes acting as exit // nodes or subnet routers. diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d801c0285ca62..c0cf0fd7cd35e 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -813,7 +813,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/ipn from tailscale.com/client/local+ diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index 058eff1f8501a..1bd128d566125 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -4,13 +4,10 @@ package cli import ( - "context" "errors" "flag" - "runtime" "strings" - "tailscale.com/ipn" "tailscale.com/util/prompt" "tailscale.com/util/testenv" ) @@ -19,7 +16,6 @@ var ( riskTypes []string riskLoseSSH = registerRiskType("lose-ssh") riskMacAppConnector = registerRiskType("mac-app-connector") - riskStrictRPFilter = registerRiskType("linux-strict-rp-filter") riskAll = registerRiskType("all") ) @@ -72,18 +68,3 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { return errAborted } - -// checkExitNodeRisk checks if the user is using an exit node on Linux and -// whether reverse path filtering is enabled. If so, it presents a risk message. -func checkExitNodeRisk(ctx context.Context, prefs *ipn.Prefs, acceptedRisks string) error { - if runtime.GOOS != "linux" { - return nil - } - if !prefs.ExitNodeIP.IsValid() && prefs.ExitNodeID == "" { - return nil - } - if err := localClient.CheckReversePathFiltering(ctx); err != nil { - return presentRiskToUser(riskStrictRPFilter, err.Error(), acceptedRisks) - } - return nil -} diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 615900833596c..22d78641f38a9 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -193,9 +193,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { } warnOnAdvertiseRoutes(ctx, &maskedPrefs.Prefs) - if err := checkExitNodeRisk(ctx, &maskedPrefs.Prefs, setArgs.acceptedRisks); err != nil { - return err - } + var advertiseExitNodeSet, advertiseRoutesSet bool setFlagSet.Visit(func(f *flag.Flag) { updateMaskedPrefsFromUpOrSetFlag(maskedPrefs, f.Name) diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index d78cb2d44bfb2..79cc60ca2347f 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -543,9 +543,6 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } warnOnAdvertiseRoutes(ctx, prefs) - if err := checkExitNodeRisk(ctx, prefs, upArgs.acceptedRisks); err != nil { - return err - } curPrefs, err := localClient.GetPrefs(ctx) if err != nil { @@ -834,7 +831,6 @@ func upWorthyWarning(s string) bool { return strings.Contains(s, healthmsg.TailscaleSSHOnBut) || strings.Contains(s, healthmsg.WarnAcceptRoutesOff) || strings.Contains(s, healthmsg.LockedOut) || - strings.Contains(s, healthmsg.WarnExitNodeUsage) || strings.Contains(s, healthmsg.InMemoryTailnetLockState) || strings.Contains(strings.ToLower(s), "update available: ") } diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index f97e0368c0d12..fc39a980b7741 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -64,7 +64,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 48a7d09495c5f..efd1ea1090ed8 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -309,7 +309,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/ipn from tailscale.com/client/local+ W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 03f7e1f09b21d..5016e568aa334 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -232,7 +232,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/ipn from tailscale.com/client/local+ diff --git a/health/healthmsg/healthmsg.go b/health/healthmsg/healthmsg.go index 3de885d53a61a..c6efb0d574db4 100644 --- a/health/healthmsg/healthmsg.go +++ b/health/healthmsg/healthmsg.go @@ -11,7 +11,5 @@ const ( WarnAcceptRoutesOff = "Some peers are advertising routes but --accept-routes is false" TailscaleSSHOnBut = "Tailscale SSH enabled, but " // + ... something from caller LockedOut = "this node is locked out; it will not have connectivity until it is signed. For more info, see https://tailscale.com/s/locked-out" - WarnExitNodeUsage = "The following issues on your machine will likely make usage of exit nodes impossible" - DisableRPFilter = "Please set rp_filter=2 instead of rp_filter=1; see https://github.com/tailscale/tailscale/issues/3310" InMemoryTailnetLockState = "Tailnet Lock state is only being stored in-memory. Set --statedir to store state on disk, which is more secure. See https://tailscale.com/kb/1226/tailnet-lock#tailnet-lock-state" ) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index bae1e66393a4b..9cb86642fa1f0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -1031,7 +1031,6 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { // If the local network configuration has changed, our filter may // need updating to tweak default routes. b.updateFilterLocked(prefs) - updateExitNodeUsageWarning(prefs, delta.CurrentState(), b.health) if buildfeatures.HasPeerAPIServer { cn := b.currentNode() @@ -4213,35 +4212,6 @@ func (b *LocalBackend) isDefaultServerLocked() bool { return prefs.ControlURLOrDefault(b.polc) == ipn.DefaultControlURL } -var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ - Code: "exit-node-misconfiguration", - Title: "Exit node misconfiguration", - Severity: health.SeverityMedium, - Text: func(args health.Args) string { - return "Exit node misconfiguration: " + args[health.ArgError] - }, -}) - -// updateExitNodeUsageWarning updates a warnable meant to notify users of -// configuration issues that could break exit node usage. -func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTracker *health.Tracker) { - if !buildfeatures.HasUseExitNode { - return - } - var msg string - if p.ExitNodeIP().IsValid() || p.ExitNodeID() != "" { - warn, _ := netutil.CheckReversePathFiltering(state) - if len(warn) > 0 { - msg = fmt.Sprintf("%s: %v, %s", healthmsg.WarnExitNodeUsage, warn, healthmsg.DisableRPFilter) - } - } - if len(msg) > 0 { - healthTracker.SetUnhealthy(exitNodeMisconfigurationWarnable, health.Args{health.ArgError: msg}) - } else { - healthTracker.SetHealthy(exitNodeMisconfigurationWarnable) - } -} - func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { tryingToUseExitNode := p.ExitNodeIP.IsValid() || p.ExitNodeID != "" if !tryingToUseExitNode { diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index dc558b36e61d9..ed25e875da409 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -28,7 +28,6 @@ import ( "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/feature/buildfeatures" - "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" @@ -100,9 +99,6 @@ func init() { Register("check-udp-gro-forwarding", (*Handler).serveCheckUDPGROForwarding) Register("set-udp-gro-forwarding", (*Handler).serveSetUDPGROForwarding) } - if buildfeatures.HasUseExitNode && runtime.GOOS == "linux" { - Register("check-reverse-path-filtering", (*Handler).serveCheckReversePathFiltering) - } if buildfeatures.HasClientMetrics { Register("upload-client-metrics", (*Handler).serveUploadClientMetrics) } @@ -780,32 +776,6 @@ func (h *Handler) serveCheckSOMarkInUse(w http.ResponseWriter, r *http.Request) }) } -func (h *Handler) serveCheckReversePathFiltering(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "reverse path filtering check access denied", http.StatusForbidden) - return - } - var warning string - - state := h.b.Sys().NetMon.Get().InterfaceState() - warn, err := netutil.CheckReversePathFiltering(state) - if err == nil && len(warn) > 0 { - var msg strings.Builder - msg.WriteString(healthmsg.WarnExitNodeUsage + ":\n") - for _, w := range warn { - msg.WriteString("- " + w + "\n") - } - msg.WriteString(healthmsg.DisableRPFilter) - warning = msg.String() - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(struct { - Warning string - }{ - Warning: warning, - }) -} - func (h *Handler) serveCheckUDPGROForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "UDP GRO forwarding check access denied", http.StatusForbidden) diff --git a/net/netutil/ip_forward.go b/net/netutil/ip_forward.go index 0711953f52e68..bc0f1961dfcbc 100644 --- a/net/netutil/ip_forward.go +++ b/net/netutil/ip_forward.go @@ -5,7 +5,6 @@ package netutil import ( "bytes" - "errors" "fmt" "net/netip" "os" @@ -146,64 +145,6 @@ func CheckIPForwarding(routes []netip.Prefix, state *netmon.State) (warn, err er return nil, nil } -// CheckReversePathFiltering reports whether reverse path filtering is either -// disabled or set to 'loose' mode for exit node functionality on any -// interface. -// -// The routes should only be advertised routes, and should not contain the -// node's Tailscale IPs. -// -// This function returns an error if it is unable to determine whether reverse -// path filtering is enabled, or a warning describing configuration issues if -// reverse path fitering is non-functional or partly functional. -func CheckReversePathFiltering(state *netmon.State) (warn []string, err error) { - if runtime.GOOS != "linux" { - return nil, nil - } - - if state == nil { - return nil, errors.New("no link state") - } - - // The kernel uses the maximum value for rp_filter between the 'all' - // setting and each per-interface config, so we need to fetch both. - allSetting, err := reversePathFilterValueLinux("all") - if err != nil { - return nil, fmt.Errorf("reading global rp_filter value: %w", err) - } - - const ( - filtOff = 0 - filtStrict = 1 - filtLoose = 2 - ) - - // Because the kernel use the max rp_filter value, each interface will use 'loose', so we - // can abort early. - if allSetting == filtLoose { - return nil, nil - } - - for _, iface := range state.Interface { - if iface.IsLoopback() { - continue - } - - iSetting, err := reversePathFilterValueLinux(iface.Name) - if err != nil { - return nil, fmt.Errorf("reading interface rp_filter value for %q: %w", iface.Name, err) - } - // Perform the same max() that the kernel does - if allSetting > iSetting { - iSetting = allSetting - } - if iSetting == filtStrict { - warn = append(warn, fmt.Sprintf("interface %q has strict reverse-path filtering enabled", iface.Name)) - } - } - return warn, nil -} - // ipForwardSysctlKey returns the sysctl key for the given protocol and iface. // When the dotFormat parameter is true the output is formatted as `net.ipv4.ip_forward`, // else it is `net/ipv4/ip_forward` @@ -235,25 +176,6 @@ func ipForwardSysctlKey(format sysctlFormat, p protocol, iface string) string { return fmt.Sprintf(k, iface) } -// rpFilterSysctlKey returns the sysctl key for the given iface. -// -// Format controls whether the output is formatted as -// `net.ipv4.conf.iface.rp_filter` or `net/ipv4/conf/iface/rp_filter`. -func rpFilterSysctlKey(format sysctlFormat, iface string) string { - // No iface means all interfaces - if iface == "" { - iface = "all" - } - - k := "net/ipv4/conf/%s/rp_filter" - if format == dotFormat { - // Swap the delimiters. - iface = strings.ReplaceAll(iface, ".", "/") - k = strings.ReplaceAll(k, "/", ".") - } - return fmt.Sprintf(k, iface) -} - type sysctlFormat int const ( @@ -305,32 +227,6 @@ func ipForwardingEnabledLinux(p protocol, iface string) (bool, error) { return on, nil } -// reversePathFilterValueLinux reports the reverse path filter setting on Linux -// for the given interface. -// -// The iface param determines which interface to check against; the empty -// string means to check the global config. -// -// This function tries to look up the value directly from `/proc/sys`, and -// falls back to using the `sysctl` command on failure. -func reversePathFilterValueLinux(iface string) (int, error) { - k := rpFilterSysctlKey(slashFormat, iface) - bs, err := os.ReadFile(filepath.Join("/proc/sys", k)) - if err != nil { - // Fall back to the sysctl command - k := rpFilterSysctlKey(dotFormat, iface) - bs, err = exec.Command("sysctl", "-n", k).Output() - if err != nil { - return -1, fmt.Errorf("couldn't check %s (%v)", k, err) - } - } - v, err := strconv.Atoi(string(bytes.TrimSpace(bs))) - if err != nil { - return -1, fmt.Errorf("couldn't parse %s (%v)", k, err) - } - return v, nil -} - func ipForwardingEnabledSunOS(p protocol, iface string) (bool, error) { var proto string if p == ipv4 { diff --git a/net/netutil/netutil_test.go b/net/netutil/netutil_test.go index a512238d5f5ee..2c40d8d9ee68e 100644 --- a/net/netutil/netutil_test.go +++ b/net/netutil/netutil_test.go @@ -8,9 +8,6 @@ import ( "net" "runtime" "testing" - - "tailscale.com/net/netmon" - "tailscale.com/util/eventbus" ) type conn struct { @@ -68,21 +65,3 @@ func TestIPForwardingEnabledLinux(t *testing.T) { t.Errorf("got true; want false") } } - -func TestCheckReversePathFiltering(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skipf("skipping on %s", runtime.GOOS) - } - bus := eventbus.New() - defer bus.Close() - - netMon, err := netmon.New(bus, t.Logf) - if err != nil { - t.Fatal(err) - } - defer netMon.Close() - - warn, err := CheckReversePathFiltering(netMon.InterfaceState()) - t.Logf("err: %v", err) - t.Logf("warnings: %v", warn) -} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 8c81aa4d70d5d..b61545d2487ac 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -228,7 +228,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/feature/syspolicy from tailscale.com/logpolicy tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ - tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/ipn from tailscale.com/client/local+ From 30adf4527b9e55175a3391d04d6d4fbe751791d8 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Mar 2026 09:32:14 -0800 Subject: [PATCH 1047/1093] feature/portlist: address case where poller misses CollectServices updates This is a minimal hacky fix for a case where the portlist poller extension could miss updates to NetMap's CollectServices bool. Updates tailscale/corp#36813 Change-Id: I9b50de8ba8b09e4a44f9fbfe90c9df4d8ab4d586 Signed-off-by: Brad Fitzpatrick --- feature/portlist/portlist.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/feature/portlist/portlist.go b/feature/portlist/portlist.go index b651c64cb6afa..4d2908962bca8 100644 --- a/feature/portlist/portlist.go +++ b/feature/portlist/portlist.go @@ -122,6 +122,19 @@ func (e *Extension) runPollLoop() { return } + // Before we do potentially expensive work below (polling what might be + // a ton of ports), double check that we actually need to do it. + // TODO(bradfitz): the onSelfChange, and onChangeProfile hooks above are + // not enough, because CollectServices is a NetMap-level thing and not a + // change to the local self node. We should add an eventbus topic for + // when CollectServices changes probably, or move the CollectServices + // thing into a local node self cap (at least logically, if not on the + // wire) so then the onSelfChange hook would cover it. In the meantime, + // we'll just end up doing some extra checks every PollInterval, which + // is not the end of the world, and fixes the problem of picking up + // changes to CollectServices that come down in the netmap. + e.updateShouldUploadServices() + if !e.shouldUploadServicesAtomic.Load() { continue } From ea1f1616b9099f8ffae43ae2a461f59b90ae8be0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 26 Feb 2026 10:15:58 -0800 Subject: [PATCH 1048/1093] .github/workflows: enable natlab in CI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After fixing the flakey tests in #18811 and #18814 we can enable running the natlab testsuite running on CI generally. Fixes #18810 Signed-off-by: Claus Lensbøl --- .github/workflows/natlab-integrationtest.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index c3821db17f22f..162153cb23293 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -7,9 +7,15 @@ concurrency: cancel-in-progress: true on: + push: + branches: + - "main" + - "release-branch/*" pull_request: - paths: - - "tstest/integration/nat/nat_test.go" + # all PRs on all branches + merge_group: + branches: + - "main" jobs: natlab-integrationtest: runs-on: ubuntu-latest From 26951a1cbbabdcc09782007b3dce38ed82dfe585 Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Wed, 4 Mar 2026 15:13:30 -0800 Subject: [PATCH 1049/1093] ipn/ipnlocal: skip writing netmaps to disk when disabled (#18883) We use the TS_USE_CACHED_NETMAP knob to condition loading a cached netmap, but were hitherto writing the map out to disk even when it was disabled. Let's not do that; the two should travel together. Updates #12639 Change-Id: Iee5aa828e2c59937d5b95093ea1ac26c9536721e Signed-off-by: M. J. Fromberger --- ipn/ipnlocal/local.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9cb86642fa1f0..ec16f6a80aff6 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6241,8 +6241,10 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { var login string if nm != nil { login = cmp.Or(profileFromView(nm.UserProfiles[nm.User()]).LoginName, "") - if err := b.writeNetmapToDiskLocked(nm); err != nil { - b.logf("write netmap to cache: %v", err) + if envknob.Bool("TS_USE_CACHED_NETMAP") { + if err := b.writeNetmapToDiskLocked(nm); err != nil { + b.logf("write netmap to cache: %v", err) + } } } b.currentNode().SetNetMap(nm) From d58bfb8a1b519afffa6796d16f49b9de7c4fef8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Pa=C3=B1eda?= Date: Wed, 4 Mar 2026 17:51:01 +0100 Subject: [PATCH 1050/1093] net/udprelay: use GOMAXPROCS instead of NumCPU for socket count MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit runtime.NumCPU() returns the number of CPUs on the host, which in containerized environments is the node's CPU count rather than the container's CPU limit. This causes excessive memory allocation in pods with low CPU requests running on large nodes, as each socket's packetReadLoop allocates significant buffer memory. Use runtime.GOMAXPROCS(0) instead, which is container-aware since Go 1.25 and respects CPU limits set via cgroups. Fixes #18774 Signed-off-by: Daniel Pañeda --- net/udprelay/server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 3d870904493ec..03d8e3dc3050d 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -651,8 +651,9 @@ func trySetSOMark(logf logger.Logf, netMon *netmon.Monitor, network, address str // single packet syscall operations. func (s *Server) bindSockets(desiredPort uint16) error { // maxSocketsPerAF is a conservative starting point, but is somewhat - // arbitrary. - maxSocketsPerAF := min(16, runtime.NumCPU()) + // arbitrary. Use GOMAXPROCS rather than NumCPU as it is container-aware + // and respects CPU limits/quotas set via cgroups. + maxSocketsPerAF := min(16, runtime.GOMAXPROCS(0)) listenConfig := &net.ListenConfig{ Control: func(network, address string, c syscall.RawConn) error { trySetReusePort(network, address, c) From 87bf76de89a81c540103c05009d4be07158ea2a4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Mar 2026 21:30:02 -0800 Subject: [PATCH 1051/1093] net/porttrack: change magic listen address format for Go 1.26 Go 1.26's url.Parser is stricter and made our tests elsewhere fail with this scheme because when these listen addresses get shoved into a URL, it can't parse back out. I verified this makes tests elsewhere pass with Go 1.26. Updates #18682 Change-Id: I04dd3cee591aa85a9417a0bbae2b6f699d8302fa Signed-off-by: Brad Fitzpatrick --- net/porttrack/porttrack.go | 42 +++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/net/porttrack/porttrack.go b/net/porttrack/porttrack.go index 822e7200e19e7..f71154f78e631 100644 --- a/net/porttrack/porttrack.go +++ b/net/porttrack/porttrack.go @@ -9,9 +9,9 @@ // // The magic address format is: // -// testport-report:HOST:PORT/LABEL +// testport-report-LABEL:PORT // -// where HOST:PORT is the collector's TCP address and LABEL identifies +// where localhost:PORT is the collector's TCP address and LABEL identifies // which listener this is (e.g. "main", "plaintext"). // // When [Listen] is called with a non-magic address, it falls through to @@ -31,17 +31,18 @@ import ( "tailscale.com/util/testenv" ) -const magicPrefix = "testport-report:" +const magicPrefix = "testport-report-" // Collector is the parent/test side of the porttrack protocol. It // listens for port reports from child processes that used [Listen] // with a magic address obtained from [Collector.Addr]. type Collector struct { - ln net.Listener - mu sync.Mutex - cond *sync.Cond - ports map[string]int - err error // non-nil if a context passed to Port was cancelled + ln net.Listener + lnPort int + mu sync.Mutex + cond *sync.Cond + ports map[string]int + err error // non-nil if a context passed to Port was cancelled } // NewCollector creates a new Collector. The collector's TCP listener is @@ -53,8 +54,9 @@ func NewCollector(t testenv.TB) *Collector { t.Fatalf("porttrack.NewCollector: %v", err) } c := &Collector{ - ln: ln, - ports: make(map[string]int), + ln: ln, + lnPort: ln.Addr().(*net.TCPAddr).Port, + ports: make(map[string]int), } c.cond = sync.NewCond(&c.mu) go c.accept(t) @@ -100,7 +102,14 @@ func (c *Collector) handleConn(t testenv.TB, conn net.Conn) { // causes the child to bind to localhost:0 and report its actual port // back to this collector under the given label. func (c *Collector) Addr(label string) string { - return magicPrefix + c.ln.Addr().String() + "/" + label + for _, c := range label { + switch { + case 'a' <= c && c <= 'z', 'A' <= c && c <= 'Z', '0' <= c && c <= '9', c == '-': + default: + panic(fmt.Sprintf("invalid label %q: only letters, digits, and hyphens are allowed", label)) + } + } + return fmt.Sprintf("%s%s:%d", magicPrefix, label, c.lnPort) } // Port blocks until the child process has reported the port for the @@ -145,13 +154,11 @@ func Listen(network, address string) (net.Listener, error) { return net.Listen(network, address) } - // rest is "HOST:PORT/LABEL" - slashIdx := strings.LastIndex(rest, "/") - if slashIdx < 0 { - return nil, fmt.Errorf("porttrack: malformed magic address %q: missing /LABEL", address) + // rest is LABEL:PORT. + label, collectorPort, ok := strings.Cut(rest, ":") + if !ok { + return nil, fmt.Errorf("porttrack: malformed magic address %q: missing :PORT", address) } - collectorAddr := rest[:slashIdx] - label := rest[slashIdx+1:] ln, err := net.Listen(network, "localhost:0") if err != nil { @@ -160,6 +167,7 @@ func Listen(network, address string) (net.Listener, error) { port := ln.Addr().(*net.TCPAddr).Port + collectorAddr := net.JoinHostPort("localhost", collectorPort) conn, err := net.Dial("tcp", collectorAddr) if err != nil { ln.Close() From d784dcc61bf4e43a610a58feebb2ac693af81f01 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 4 Mar 2026 11:36:08 -0800 Subject: [PATCH 1052/1093] go.toolchain.branch: switch to Go 1.26 Updates #18682 Change-Id: I1eadfab950e55d004484af880a5d8df6893e85e8 Signed-off-by: Brad Fitzpatrick --- .github/workflows/golangci-lint.yml | 4 +-- Dockerfile | 2 +- cmd/derper/depaware.txt | 38 +++++++++++++++------------ cmd/k8s-operator/depaware.txt | 38 +++++++++++++++------------ cmd/stund/depaware.txt | 40 +++++++++++++++++------------ cmd/tailscale/cli/network-lock.go | 4 +-- cmd/tailscale/cli/serve_v2.go | 2 +- cmd/tailscale/cli/status.go | 10 ++++---- cmd/tailscale/depaware.txt | 36 +++++++++++++++----------- cmd/tailscaled/depaware-min.txt | 39 ++++++++++++++++------------ cmd/tailscaled/depaware-minbox.txt | 39 ++++++++++++++++------------ cmd/tailscaled/depaware.txt | 35 ++++++++++++++----------- cmd/tailscaled/deps_test.go | 3 ++- cmd/tsconnect/common.go | 2 +- cmd/tsidp/depaware.txt | 37 ++++++++++++++------------ flake.nix | 4 +-- go.mod | 2 +- go.toolchain.branch | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- tsnet/depaware.txt | 37 ++++++++++++++------------ tsnet/tsnet_test.go | 2 +- tstest/nettest/nettest.go | 4 +-- version/mkversion/mkversion.go | 2 +- 25 files changed, 219 insertions(+), 169 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 6431a31d698c0..66b8497e65441 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -35,9 +35,9 @@ jobs: cache: true - name: golangci-lint - uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0 + uses: golangci/golangci-lint-action@b7bcab6379029e905e3f389a6bf301f1bc220662 # head as of 2026-03-04 with: - version: v2.4.0 + version: v2.10.1 # Show only new issues if it's a pull request. only-new-issues: true diff --git a/Dockerfile b/Dockerfile index 413a4b8211465..ee12922f2d719 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,7 +36,7 @@ # $ docker exec tailscaled tailscale status -FROM golang:1.25-alpine AS build-env +FROM golang:1.26-alpine AS build-env WORKDIR /go/src/tailscale diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index d04c66eba118e..a0eb4a29e259c 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 github.com/axiomhq/hyperloglog from tailscale.com/derp/derpserver @@ -203,7 +204,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/derper+ vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -226,7 +227,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -234,13 +235,14 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls - crypto/hkdf from crypto/internal/hpke+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -255,7 +257,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -269,19 +271,21 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from golang.org/x/crypto/acme+ @@ -322,9 +326,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -337,14 +340,17 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/runtime/atomic from internal/runtime/exithook+ L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from encoding/asn1 internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -387,7 +393,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from encoding/asn1+ regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index c0cf0fd7cd35e..77739350b199c 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ @@ -1050,7 +1051,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -1075,7 +1076,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -1084,12 +1085,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/fips140 from crypto/tls/internal/fips140tls+ - crypto/hkdf from crypto/internal/hpke+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -1104,7 +1106,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -1118,20 +1120,21 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from golang.org/x/crypto/ssh+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from github.com/prometheus-community/pro-bing+ @@ -1162,6 +1165,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime go/doc/comment from go/doc + go/internal/scannerhooks from go/parser+ go/parser from k8s.io/apimachinery/pkg/runtime go/scanner from go/ast+ go/token from go/ast+ @@ -1185,9 +1189,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/lazyregexp from go/doc internal/msan from internal/runtime/maps+ internal/nettrace from net+ @@ -1201,14 +1204,17 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/runtime/atomic from internal/runtime/exithook+ L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -1255,7 +1261,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ os/user from github.com/godbus/dbus/v5+ path from debug/dwarf+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from database/sql+ regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp runtime from crypto/internal/fips140+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 7b945dd77ea79..d25974b2df424 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/go-json-experiment/json from tailscale.com/types/opt+ @@ -100,7 +101,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -118,12 +119,12 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar bufio from compress/flate+ bytes from bufio+ cmp from slices+ - compress/flate from compress/gzip + compress/flate from compress/gzip+ compress/gzip from google.golang.org/protobuf/internal/impl+ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -131,13 +132,14 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls - crypto/hkdf from crypto/internal/hpke+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -152,7 +154,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -166,19 +168,21 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from net/http+ @@ -218,9 +222,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -233,14 +236,17 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/runtime/atomic from internal/runtime/exithook+ L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from encoding/asn1 internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -280,7 +286,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar os/signal from tailscale.com/cmd/stund path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from encoding/asn1+ regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index d8cff4aca402d..9ec0e1d7fe819 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -224,7 +224,7 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { if nlStatusArgs.json.Value == 1 { return jsonoutput.PrintNetworkLockStatusJSONV1(os.Stdout, st) } else { - return fmt.Errorf("unrecognised version: %q", nlStatusArgs.json.Value) + return fmt.Errorf("unrecognised version: %d", nlStatusArgs.json.Value) } } @@ -717,7 +717,7 @@ func printNetworkLockLog(updates []ipnstate.NetworkLockUpdate, out io.Writer, js if jsonSchema.Value == 1 { return jsonoutput.PrintNetworkLockLogJSONV1(out, updates) } else { - return fmt.Errorf("unrecognised version: %q", jsonSchema.Value) + return fmt.Errorf("unrecognised version: %d", jsonSchema.Value) } } diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 06a4ce1bbde3e..840c47ac66dd1 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -219,7 +219,7 @@ var errHelpFunc = func(m serveMode) error { // newServeV2Command returns a new "serve" subcommand using e as its environment. func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { if subcmd != serve && subcmd != funnel { - log.Fatalf("newServeDevCommand called with unknown subcmd %q", subcmd) + log.Fatalf("newServeDevCommand called with unknown subcmd %v", subcmd) } info := infoMap[subcmd] diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 49c565febb9cc..9ce4debda8dea 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -176,13 +176,13 @@ func runStatus(ctx context.Context, args []string) error { } if !ps.Active { if ps.ExitNode { - f("idle; exit node" + offline) + f("idle; exit node%s", offline) } else if ps.ExitNodeOption { - f("idle; offers exit node" + offline) + f("idle; offers exit node%s", offline) } else if anyTraffic { - f("idle" + offline) + f("idle%s", offline) } else if !ps.Online { - f("offline" + lastSeenFmt(ps.LastSeen)) + f("offline%s", lastSeenFmt(ps.LastSeen)) } else { f("-") } @@ -201,7 +201,7 @@ func runStatus(ctx context.Context, args []string) error { f("peer-relay %s", ps.PeerRelay) } if !ps.Online { - f(offline) + f("%s", offline) } } if anyTraffic { diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index d83ac2710a897..b4605f9f2e926 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 L fyne.io/systray from tailscale.com/client/systray @@ -352,7 +353,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/tailscale/cli+ vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -377,7 +378,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -385,13 +386,14 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls - crypto/hkdf from crypto/internal/hpke+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -406,7 +408,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -420,19 +422,21 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from net/http+ @@ -482,9 +486,8 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -497,14 +500,17 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/runtime/atomic from internal/runtime/exithook+ L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index fc39a980b7741..2ad5cbca7b3af 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ @@ -230,7 +231,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -248,12 +249,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ - compress/flate from compress/gzip + compress/flate from compress/gzip+ compress/gzip from net/http container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -261,13 +262,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls - crypto/hkdf from crypto/internal/hpke+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/fips140+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -282,7 +284,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/hkdf+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/ecdsa+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -296,19 +298,21 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from net/http+ @@ -344,9 +348,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from runtime internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -357,14 +360,16 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime internal/runtime/sys from crypto/subtle+ - internal/runtime/syscall from internal/runtime/cgroup+ + internal/runtime/syscall/linux from internal/runtime/cgroup+ internal/saferio from encoding/asn1 internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -402,7 +407,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de os/user from tailscale.com/ipn/ipnauth+ path from io/fs+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from encoding/asn1+ runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ slices from crypto/tls+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 8dfa00af75a68..9b09604875446 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/allot from github.com/gaissmai/bart/internal/nodes github.com/gaissmai/bart/internal/art from github.com/gaissmai/bart+ @@ -250,7 +251,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/derp vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -268,12 +269,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de bufio from compress/flate+ bytes from bufio+ cmp from encoding/json+ - compress/flate from compress/gzip + compress/flate from compress/gzip+ compress/gzip from net/http+ container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509 @@ -281,13 +282,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ - crypto/fips140 from crypto/tls/internal/fips140tls - crypto/hkdf from crypto/internal/hpke+ + crypto/fips140 from crypto/tls/internal/fips140tls+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/fips140+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -302,7 +304,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/hkdf+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/ecdsa+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -316,19 +318,21 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from net/http+ @@ -364,9 +368,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from runtime internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -377,14 +380,16 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/atomic from internal/runtime/exithook+ internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime internal/runtime/sys from crypto/subtle+ - internal/runtime/syscall from internal/runtime/cgroup+ + internal/runtime/syscall/linux from internal/runtime/cgroup+ internal/saferio from encoding/asn1 internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -423,7 +428,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de os/user from tailscale.com/ipn/ipnauth+ path from io/fs+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from encoding/asn1+ runtime from crypto/internal/fips140+ runtime/debug from github.com/klauspost/compress/zstd+ slices from crypto/tls+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index efd1ea1090ed8..207d86243b607 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ @@ -546,7 +547,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -572,7 +573,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -581,12 +582,13 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/fips140 from crypto/tls/internal/fips140tls+ - crypto/hkdf from crypto/internal/hpke+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -601,7 +603,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -615,20 +617,21 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from golang.org/x/crypto/ssh+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ @@ -672,9 +675,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -687,14 +689,17 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/atomic from internal/runtime/exithook+ L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index c7ab01298f223..be4f65a7dd576 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -265,7 +265,6 @@ func TestMinTailscaledWithCLI(t *testing.T) { badSubstrs := []string{ "cbor", "hujson", - "pprof", "multierr", // https://github.com/tailscale/tailscale/pull/17379 "tailscale.com/metrics", "tailscale.com/tsweb/varz", @@ -287,6 +286,8 @@ func TestMinTailscaledWithCLI(t *testing.T) { BadDeps: map[string]string{ "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", "expvar": "unexpected expvar dep", + "runtime/pprof": "unexpected runtime/pprof dep", + "net/http/pprof": "unexpected net/http/pprof dep", "github.com/mdlayher/genetlink": "unexpected genetlink dep", "tailscale.com/clientupdate": "unexpected clientupdate dep", "filippo.io/edwards25519": "unexpected edwards25519 dep", diff --git a/cmd/tsconnect/common.go b/cmd/tsconnect/common.go index 9daa402692c04..bc9e1ed4ff532 100644 --- a/cmd/tsconnect/common.go +++ b/cmd/tsconnect/common.go @@ -269,7 +269,7 @@ func runWasmOpt(path string) error { return fmt.Errorf("Cannot stat %v: %w", path, err) } startSize := stat.Size() - cmd := exec.Command("../../tool/wasm-opt", "--enable-bulk-memory", "-Oz", path, "-o", path) + cmd := exec.Command("../../tool/wasm-opt", "--enable-bulk-memory", "--enable-nontrapping-float-to-int", "-Oz", path, "-o", path) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 5016e568aa334..bb991383c8a06 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ @@ -448,7 +449,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -473,7 +474,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -482,12 +483,13 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/fips140 from crypto/tls/internal/fips140tls+ - crypto/hkdf from crypto/internal/hpke+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -502,7 +504,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -516,20 +518,21 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from golang.org/x/crypto/ssh+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from github.com/prometheus-community/pro-bing+ @@ -573,9 +576,8 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -588,14 +590,17 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/runtime/atomic from internal/runtime/exithook+ L internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - L internal/runtime/syscall from runtime+ + L internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -639,7 +644,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar os/user from github.com/godbus/dbus/v5+ path from debug/dwarf+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from database/sql/driver+ regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp runtime from crypto/internal/fips140+ diff --git a/flake.nix b/flake.nix index 64956a97fef55..c9e3b50a1ad73 100644 --- a/flake.nix +++ b/flake.nix @@ -55,7 +55,7 @@ system = system; overlays = [ (final: prev: { - go_1_25 = prev.go_1_25.overrideAttrs { + go_1_26 = prev.go_1_26.overrideAttrs { version = goVersion; src = prev.fetchFromGitHub { owner = "tailscale"; @@ -140,7 +140,7 @@ gotools graphviz perl - go_1_25 + go_1_26 yarn # qemu and e2fsprogs are needed for natlab diff --git a/go.mod b/go.mod index 202ad894bdaff..24c39a4cf3d91 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.7 +go 1.26.0 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.branch b/go.toolchain.branch index a2bebbeb7858e..6022b95593bbe 100644 --- a/go.toolchain.branch +++ b/go.toolchain.branch @@ -1 +1 @@ -tailscale.go1.25 +tailscale.go1.26 diff --git a/go.toolchain.rev b/go.toolchain.rev index 05e37f312da5c..ea3d3c773f779 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -692441891e061f8ae2cb2f8f2c898f86bb1c5dca +5b5cb0db47535a0a8d2f450cb1bf83af8e70f164 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index b7a7163f79f68..34a9b157d33d6 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-gWKrpBTXfsQmgOWoMrbvCaWGsBXCt5X12BAcwfAPMQY= +sha256-f12BE5+H8wHZNKaD6pv9nJJym+1QwxkFNpBtnNcltdc= diff --git a/go.toolchain.version b/go.toolchain.version index f1968aa8818d5..5ff8c4f5d2ad2 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.25.7 +1.26.0 diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index b61545d2487ac..cb6b6996b7a87 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -1,5 +1,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) + 💣 crypto/internal/entropy/v1.0.0 from crypto/internal/fips140/drbg filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ @@ -441,7 +442,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 - vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/hpke+ vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ @@ -466,7 +467,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) container/list from crypto/tls+ context from crypto/tls+ crypto from crypto/ecdh+ - crypto/aes from crypto/internal/hpke+ + crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ crypto/dsa from crypto/x509+ @@ -475,12 +476,13 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/ed25519 from crypto/tls+ crypto/elliptic from crypto/ecdsa+ crypto/fips140 from crypto/tls/internal/fips140tls+ - crypto/hkdf from crypto/internal/hpke+ + crypto/hkdf from crypto/hpke+ crypto/hmac from crypto/tls+ + crypto/hpke from crypto/tls crypto/internal/boring from crypto/aes+ crypto/internal/boring/bbig from crypto/ecdsa+ crypto/internal/boring/sig from crypto/internal/boring - crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/constanttime from crypto/internal/fips140/edwards25519+ crypto/internal/fips140 from crypto/internal/fips140/aes+ crypto/internal/fips140/aes from crypto/aes+ crypto/internal/fips140/aes/gcm from crypto/cipher+ @@ -495,7 +497,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140/edwards25519/field from crypto/ecdh+ crypto/internal/fips140/hkdf from crypto/internal/fips140/tls13+ crypto/internal/fips140/hmac from crypto/hmac+ - crypto/internal/fips140/mlkem from crypto/tls+ + crypto/internal/fips140/mlkem from crypto/mlkem crypto/internal/fips140/nistec from crypto/elliptic+ crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec crypto/internal/fips140/rsa from crypto/rsa @@ -509,20 +511,21 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140deps/time from crypto/internal/entropy/v1.0.0 crypto/internal/fips140hash from crypto/ecdsa+ crypto/internal/fips140only from crypto/cipher+ - crypto/internal/hpke from crypto/tls crypto/internal/impl from crypto/internal/fips140/aes+ - crypto/internal/randutil from crypto/dsa+ - crypto/internal/sysrand from crypto/internal/entropy+ + crypto/internal/rand from crypto/dsa+ + crypto/internal/randutil from crypto/internal/rand + crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - LD crypto/mlkem from golang.org/x/crypto/ssh + crypto/mlkem from golang.org/x/crypto/ssh+ crypto/rand from crypto/ed25519+ crypto/rc4 from crypto/tls+ crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ - crypto/sha3 from crypto/internal/fips140hash + crypto/sha3 from crypto/internal/fips140hash+ crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ crypto/tls from github.com/prometheus-community/pro-bing+ @@ -566,9 +569,8 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from hash/maphash+ + internal/goexperiment from net/http/pprof+ internal/goos from crypto/x509+ - internal/itoa from internal/poll+ internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ @@ -581,14 +583,17 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/runtime/atomic from internal/runtime/exithook+ LA internal/runtime/cgroup from runtime internal/runtime/exithook from runtime - internal/runtime/gc from runtime + internal/runtime/gc from runtime+ + internal/runtime/gc/scan from runtime internal/runtime/maps from reflect+ internal/runtime/math from internal/runtime/maps+ - internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/pprof/label from runtime+ internal/runtime/sys from crypto/subtle+ - LA internal/runtime/syscall from runtime+ + LA internal/runtime/syscall/linux from internal/runtime/cgroup+ + W internal/runtime/syscall/windows from internal/syscall/windows+ internal/saferio from debug/pe+ internal/singleflight from net + internal/strconv from internal/poll+ internal/stringslite from embed+ internal/sync from sync+ internal/synctest from sync @@ -631,7 +636,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) os/user from github.com/godbus/dbus/v5+ path from debug/dwarf+ path/filepath from crypto/x509+ - reflect from crypto/x509+ + reflect from database/sql/driver+ regexp from github.com/huin/goupnp/httpu+ regexp/syntax from regexp runtime from crypto/internal/fips140+ diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 266a60f78c5ec..1cf4bf48fe5bd 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -445,7 +445,7 @@ func TestConn(t *testing.T) { for { c, err := ln.Accept() if err != nil { - if ctx.Err() != nil { + if ctx.Err() != nil || errors.Is(err, net.ErrClosed) { return } t.Errorf("s1.Accept: %v", err) diff --git a/tstest/nettest/nettest.go b/tstest/nettest/nettest.go index 0ceef463d8160..cfb0a921904eb 100644 --- a/tstest/nettest/nettest.go +++ b/tstest/nettest/nettest.go @@ -91,8 +91,8 @@ func NewUnstartedHTTPServer(nw netx.Network, handler http.Handler) *httptest.Ser c.Transport = &http.Transport{} } tr := c.Transport.(*http.Transport) - if tr.Dial != nil || tr.DialContext != nil { - panic("unexpected non-nil Dial or DialContext in httptest.Server.Client.Transport") + if tr.Dial != nil { + panic("unexpected non-nil Dial in httptest.Server.Client.Transport") } tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { return nw.Dial(ctx, network, addr) diff --git a/version/mkversion/mkversion.go b/version/mkversion/mkversion.go index f42b3ad036de3..45576e4c1161b 100644 --- a/version/mkversion/mkversion.go +++ b/version/mkversion/mkversion.go @@ -384,7 +384,7 @@ func infoFromCache(ref string, runner dirRunner) (verInfo, error) { } changeCount, err := strconv.Atoi(s) if err != nil { - return verInfo{}, fmt.Errorf("infoFromCache: parsing changeCount %q: %w", changeCount, err) + return verInfo{}, fmt.Errorf("infoFromCache: parsing changeCount %q: %w", s, err) } return verInfo{ From faf7f2bc45880cf4abf4bd94d35a3e4a72136ecc Mon Sep 17 00:00:00 2001 From: BeckyPauley <64131207+BeckyPauley@users.noreply.github.com> Date: Thu, 5 Mar 2026 12:09:11 +0000 Subject: [PATCH 1053/1093] cmd/k8s-operator: remove deprecated TS_EXPERIMENTAL_KUBE_API_EVENTS (#18893) Remove the TS_EXPERIMENTAL_KUBE_API_EVENTS env var from the operator and its helm chart. This has already been marked as deprecated, and has been scheduled to be removed in release 1.96. Add a check in helm chart to fail if the removed variable is set to true, prompting users to move to ACLs instead. Fixes: #18875 Signed-off-by: Becky Pauley --- .../deploy/chart/templates/NOTES.txt | 10 ++++++++ k8s-operator/api-proxy/proxy.go | 25 +++---------------- 2 files changed, 13 insertions(+), 22 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt index 1bee6704616e6..a1a351c5e526a 100644 --- a/cmd/k8s-operator/deploy/chart/templates/NOTES.txt +++ b/cmd/k8s-operator/deploy/chart/templates/NOTES.txt @@ -1,3 +1,13 @@ +{{/* +Fail on presence of removed TS_EXPERIMENTAL_KUBE_API_EVENTS extraEnv var. +*/}} +{{- $removed := "TS_EXPERIMENTAL_KUBE_API_EVENTS" -}} +{{- range .Values.operatorConfig.extraEnv }} + {{- if and .name (eq .name $removed) (eq .value "true") -}} + {{- fail (printf "ERROR: operatorConfig.extraEnv.%s has been removed. Use ACLs instead." $removed) -}} + {{- end -}} +{{- end -}} + You have successfully installed the Tailscale Kubernetes Operator! Once connected, the operator should appear as a device within the Tailscale admin console: diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index c4c651b1fb029..cbcad1582e673 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -28,7 +28,6 @@ import ( "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/envknob" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" "tailscale.com/net/netx" @@ -43,13 +42,7 @@ import ( var ( // counterNumRequestsproxies counts the number of API server requests proxied via this proxy. counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests_proxied") - // NOTE: adding this metric so we can keep track of users during deprecation - counterExperimentalEventsVarUsed = clientmetric.NewCounter("ts_experimental_kube_api_events_var_used") - whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) -) - -const ( - eventsEnabledVar = "TS_EXPERIMENTAL_KUBE_API_EVENTS" + whoIsKey = ctxkey.New("", (*apitype.WhoIsResponse)(nil)) ) // NewAPIServerProxy creates a new APIServerProxy that's ready to start once Run @@ -103,7 +96,6 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn upstreamURL: u, ts: ts, sendEventFunc: sessionrecording.SendEvent, - eventsEnabled: envknob.Bool(eventsEnabledVar), } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -134,11 +126,6 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } - if ap.eventsEnabled { - counterExperimentalEventsVarUsed.Add(1) - ap.log.Warnf("DEPRECATED: %q environment variable is deprecated, and will be removed in v1.96. See documentation for more detail.", eventsEnabledVar) - } - mode := "noauth" if ap.authMode { mode = "auth" @@ -205,10 +192,6 @@ type APIServerProxy struct { upstreamURL *url.URL sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error - - // Flag used to enable sending API requests as events to tsrecorder. - // Deprecated: events are now set via ACLs (see https://tailscale.com/kb/1246/tailscale-ssh-session-recording#turn-on-session-recording-in-your-tailnet-policy-file) - eventsEnabled bool } // serveDefault is the default handler for Kubernetes API server requests. @@ -237,8 +220,7 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { return } - // NOTE: (ChaosInTheCRD) ap.eventsEnabled deprecated, remove in v1.96 - if c.enableEvents || ap.eventsEnabled { + if c.enableEvents { if err = ap.recordRequestAsEvent(r, who, c.recorderAddresses, c.failOpen); err != nil { msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) ap.log.Errorf(msg) @@ -308,8 +290,7 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request return } - // NOTE: (ChaosInTheCRD) ap.eventsEnabled deprecated, remove in v1.96 - if c.enableEvents || ap.eventsEnabled { + if c.enableEvents { if err = ap.recordRequestAsEvent(r, who, c.recorderAddresses, c.failOpen); err != nil { msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) ap.log.Errorf(msg) From d82e478dbcae0f01ab1cfa15f900544729ad75a6 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Wed, 18 Feb 2026 15:36:13 +0100 Subject: [PATCH 1054/1093] cli: `--json` for `tailscale dns status|query` This commit adds `--json` output mode to dns debug commands. It defines structs for the data that is returned from: `tailscale dns status` and `tailscale dns query ` and populates that as it runs the diagnostics. When all the information is collected, it is serialised to JSON or string built into an output and returned to the user. The structs are defined and exported to golang consumers of this command can use them for unmarshalling. Updates #13326 Signed-off-by: Kristoffer Dalby --- cmd/tailscale/cli/dns-query.go | 158 +++++++++---- cmd/tailscale/cli/dns-status.go | 337 ++++++++++++++++++---------- cmd/tailscale/cli/dns_test.go | 65 ++++++ cmd/tailscale/cli/jsonoutput/dns.go | 116 ++++++++++ cmd/tailscaled/depaware-minbox.txt | 1 + 5 files changed, 514 insertions(+), 163 deletions(-) create mode 100644 cmd/tailscale/cli/dns_test.go create mode 100644 cmd/tailscale/cli/jsonoutput/dns.go diff --git a/cmd/tailscale/cli/dns-query.go b/cmd/tailscale/cli/dns-query.go index 88a897f21ed8d..2993441b3d2fc 100644 --- a/cmd/tailscale/cli/dns-query.go +++ b/cmd/tailscale/cli/dns-query.go @@ -5,93 +5,165 @@ package cli import ( "context" + "encoding/json" + "errors" "flag" "fmt" "net/netip" - "os" "strings" "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" "golang.org/x/net/dns/dnsmessage" - "tailscale.com/types/dnstype" + "tailscale.com/cmd/tailscale/cli/jsonoutput" ) +var dnsQueryArgs struct { + json bool +} + var dnsQueryCmd = &ffcli.Command{ Name: "query", - ShortUsage: "tailscale dns query [a|aaaa|cname|mx|ns|opt|ptr|srv|txt]", + ShortUsage: "tailscale dns query [--json] [type]", Exec: runDNSQuery, ShortHelp: "Perform a DNS query", LongHelp: strings.TrimSpace(` The 'tailscale dns query' subcommand performs a DNS query for the specified name using the internal DNS forwarder (100.100.100.100). -By default, the DNS query will request an A record. Another DNS record type can -be specified as the second parameter. +By default, the DNS query will request an A record. Specify the record type as +a second argument after the name (e.g. AAAA, CNAME, MX, NS, PTR, SRV, TXT). The output also provides information about the resolver(s) used to resolve the query. `), + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("query") + fs.BoolVar(&dnsQueryArgs.json, "json", false, "output in JSON format") + return fs + })(), } func runDNSQuery(ctx context.Context, args []string) error { - if len(args) < 1 { - return flag.ErrHelp + if len(args) == 0 { + return errors.New("missing required argument: name") + } + if len(args) > 1 { + var flags []string + for _, a := range args[1:] { + if strings.HasPrefix(a, "-") { + flags = append(flags, a) + } + } + if len(flags) > 0 { + return fmt.Errorf("unexpected flags after query name: %s; see 'tailscale dns query --help'", strings.Join(flags, ", ")) + } + if len(args) > 2 { + return fmt.Errorf("unexpected extra arguments: %s", strings.Join(args[2:], " ")) + } } name := args[0] queryType := "A" - if len(args) >= 2 { - queryType = args[1] + if len(args) > 1 { + queryType = strings.ToUpper(args[1]) } - fmt.Printf("DNS query for %q (%s) using internal resolver:\n", name, queryType) - fmt.Println() - bytes, resolvers, err := localClient.QueryDNS(ctx, name, queryType) + + rawBytes, resolvers, err := localClient.QueryDNS(ctx, name, queryType) if err != nil { - fmt.Printf("failed to query DNS: %v\n", err) - return nil + return fmt.Errorf("failed to query DNS: %w", err) } - if len(resolvers) == 1 { - fmt.Printf("Forwarding to resolver: %v\n", makeResolverString(*resolvers[0])) - } else { - fmt.Println("Multiple resolvers available:") - for _, r := range resolvers { - fmt.Printf(" - %v\n", makeResolverString(*r)) - } + data := &jsonoutput.DNSQueryResult{ + Name: name, + QueryType: queryType, + } + + for _, r := range resolvers { + data.Resolvers = append(data.Resolvers, makeDNSResolverInfo(r)) } - fmt.Println() + var p dnsmessage.Parser - header, err := p.Start(bytes) + header, err := p.Start(rawBytes) if err != nil { - fmt.Printf("failed to parse DNS response: %v\n", err) - return err + return fmt.Errorf("failed to parse DNS response: %w", err) } - fmt.Printf("Response code: %v\n", header.RCode.String()) - fmt.Println() + data.ResponseCode = header.RCode.String() + p.SkipAllQuestions() - if header.RCode != dnsmessage.RCodeSuccess { - fmt.Println("No answers were returned.") + + if header.RCode == dnsmessage.RCodeSuccess { + answers, err := p.AllAnswers() + if err != nil { + return fmt.Errorf("failed to parse DNS answers: %w", err) + } + data.Answers = make([]jsonoutput.DNSAnswer, 0, len(answers)) + for _, a := range answers { + data.Answers = append(data.Answers, jsonoutput.DNSAnswer{ + Name: a.Header.Name.String(), + TTL: a.Header.TTL, + Class: a.Header.Class.String(), + Type: a.Header.Type.String(), + Body: makeAnswerBody(a), + }) + } + } + + if dnsQueryArgs.json { + j, err := json.MarshalIndent(data, "", " ") + if err != nil { + return err + } + printf("%s\n", j) return nil } - answers, err := p.AllAnswers() - if err != nil { - fmt.Printf("failed to parse DNS answers: %v\n", err) - return err + printf("%s", formatDNSQueryText(data)) + return nil +} + +func formatDNSQueryText(data *jsonoutput.DNSQueryResult) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "DNS query for %q (%s) using internal resolver:\n", data.Name, data.QueryType) + fmt.Fprintf(&sb, "\n") + if len(data.Resolvers) == 1 { + fmt.Fprintf(&sb, "Forwarding to resolver: %v\n", formatResolverString(data.Resolvers[0])) + } else { + fmt.Fprintf(&sb, "Multiple resolvers available:\n") + for _, r := range data.Resolvers { + fmt.Fprintf(&sb, " - %v\n", formatResolverString(r)) + } } - if len(answers) == 0 { - fmt.Println(" (no answers found)") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "Response code: %v\n", data.ResponseCode) + fmt.Fprintf(&sb, "\n") + + if data.Answers == nil { + fmt.Fprintf(&sb, "No answers were returned.\n") + return sb.String() } - w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + if len(data.Answers) == 0 { + fmt.Fprintf(&sb, " (no answers found)\n") + } + + w := tabwriter.NewWriter(&sb, 0, 0, 2, ' ', 0) fmt.Fprintln(w, "Name\tTTL\tClass\tType\tBody") fmt.Fprintln(w, "----\t---\t-----\t----\t----") - for _, a := range answers { - fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", a.Header.Name.String(), a.Header.TTL, a.Header.Class.String(), a.Header.Type.String(), makeAnswerBody(a)) + for _, a := range data.Answers { + fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\n", a.Name, a.TTL, a.Class, a.Type, a.Body) } w.Flush() - fmt.Println() - return nil + fmt.Fprintf(&sb, "\n") + return sb.String() +} + +// formatResolverString formats a jsonoutput.DNSResolverInfo for human-readable text output. +func formatResolverString(r jsonoutput.DNSResolverInfo) string { + if len(r.BootstrapResolution) > 0 { + return fmt.Sprintf("%s (bootstrap: %v)", r.Addr, r.BootstrapResolution) + } + return r.Addr } // makeAnswerBody returns a string with the DNS answer body in a human-readable format. @@ -174,9 +246,3 @@ func makeTXTBody(txt dnsmessage.ResourceBody) string { } return "" } -func makeResolverString(r dnstype.Resolver) string { - if len(r.BootstrapResolution) > 0 { - return fmt.Sprintf("%s (bootstrap: %v)", r.Addr, r.BootstrapResolution) - } - return fmt.Sprintf("%s", r.Addr) -} diff --git a/cmd/tailscale/cli/dns-status.go b/cmd/tailscale/cli/dns-status.go index f63f418281987..66a5e21d89700 100644 --- a/cmd/tailscale/cli/dns-status.go +++ b/cmd/tailscale/cli/dns-status.go @@ -5,6 +5,7 @@ package cli import ( "context" + "encoding/json" "flag" "fmt" "maps" @@ -12,13 +13,15 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/cmd/tailscale/cli/jsonoutput" "tailscale.com/ipn" + "tailscale.com/types/dnstype" "tailscale.com/types/netmap" ) var dnsStatusCmd = &ffcli.Command{ Name: "status", - ShortUsage: "tailscale dns status [--all]", + ShortUsage: "tailscale dns status [--all] [--json]", Exec: runDNSStatus, ShortHelp: "Print the current DNS status and configuration", LongHelp: strings.TrimSpace(` @@ -72,17 +75,30 @@ https://tailscale.com/kb/1054/dns. FlagSet: (func() *flag.FlagSet { fs := newFlagSet("status") fs.BoolVar(&dnsStatusArgs.all, "all", false, "outputs advanced debugging information") + fs.BoolVar(&dnsStatusArgs.json, "json", false, "output in JSON format") return fs })(), } // dnsStatusArgs are the arguments for the "dns status" subcommand. var dnsStatusArgs struct { - all bool + all bool + json bool +} + +// makeDNSResolverInfo converts a dnstype.Resolver to a jsonoutput.DNSResolverInfo. +func makeDNSResolverInfo(r *dnstype.Resolver) jsonoutput.DNSResolverInfo { + info := jsonoutput.DNSResolverInfo{Addr: r.Addr} + if r.BootstrapResolution != nil { + info.BootstrapResolution = make([]string, 0, len(r.BootstrapResolution)) + for _, a := range r.BootstrapResolution { + info.BootstrapResolution = append(info.BootstrapResolution, a.String()) + } + } + return info } func runDNSStatus(ctx context.Context, args []string) error { - all := dnsStatusArgs.all s, err := localClient.Status(ctx) if err != nil { return err @@ -92,167 +108,254 @@ func runDNSStatus(ctx context.Context, args []string) error { if err != nil { return err } - enabledStr := "disabled.\n\n(Run 'tailscale set --accept-dns=true' to start sending DNS queries to the Tailscale DNS resolver)" - if prefs.CorpDNS { - enabledStr = "enabled.\n\nTailscale is configured to handle DNS queries on this device.\nRun 'tailscale set --accept-dns=false' to revert to your system default DNS resolver." + + data := &jsonoutput.DNSStatusResult{ + TailscaleDNS: prefs.CorpDNS, } - fmt.Print("\n") - fmt.Println("=== 'Use Tailscale DNS' status ===") - fmt.Print("\n") - fmt.Printf("Tailscale DNS: %s\n", enabledStr) - fmt.Print("\n") - fmt.Println("=== MagicDNS configuration ===") - fmt.Print("\n") - fmt.Println("This is the DNS configuration provided by the coordination server to this device.") - fmt.Print("\n") - if s.CurrentTailnet == nil { - fmt.Println("No tailnet information available; make sure you're logged in to a tailnet.") + + if s.CurrentTailnet != nil { + data.CurrentTailnet = &jsonoutput.DNSTailnetInfo{ + MagicDNSEnabled: s.CurrentTailnet.MagicDNSEnabled, + MagicDNSSuffix: s.CurrentTailnet.MagicDNSSuffix, + SelfDNSName: s.Self.DNSName, + } + + netMap, err := fetchNetMap() + if err != nil { + return fmt.Errorf("failed to fetch network map: %w", err) + } + dnsConfig := netMap.DNS + + for _, r := range dnsConfig.Resolvers { + data.Resolvers = append(data.Resolvers, makeDNSResolverInfo(r)) + } + + data.SplitDNSRoutes = make(map[string][]jsonoutput.DNSResolverInfo) + for k, v := range dnsConfig.Routes { + for _, r := range v { + data.SplitDNSRoutes[k] = append(data.SplitDNSRoutes[k], makeDNSResolverInfo(r)) + } + } + + for _, r := range dnsConfig.FallbackResolvers { + data.FallbackResolvers = append(data.FallbackResolvers, makeDNSResolverInfo(r)) + } + + domains := slices.Clone(dnsConfig.Domains) + slices.Sort(domains) + data.SearchDomains = domains + + for _, a := range dnsConfig.Nameservers { + data.Nameservers = append(data.Nameservers, a.String()) + } + + data.CertDomains = dnsConfig.CertDomains + + for _, er := range dnsConfig.ExtraRecords { + data.ExtraRecords = append(data.ExtraRecords, jsonoutput.DNSExtraRecord{ + Name: er.Name, + Type: er.Type, + Value: er.Value, + }) + } + + data.ExitNodeFilteredSet = dnsConfig.ExitNodeFilteredSet + + osCfg, err := localClient.GetDNSOSConfig(ctx) + if err != nil { + if strings.Contains(err.Error(), "not supported") { + data.SystemDNSError = "not supported on this platform" + } else { + data.SystemDNSError = err.Error() + } + } else if osCfg != nil { + data.SystemDNS = &jsonoutput.DNSSystemConfig{ + Nameservers: osCfg.Nameservers, + SearchDomains: osCfg.SearchDomains, + MatchDomains: osCfg.MatchDomains, + } + } + } + + if dnsStatusArgs.json { + j, err := json.MarshalIndent(data, "", " ") + if err != nil { + return err + } + printf("%s\n", j) return nil - } else if s.CurrentTailnet.MagicDNSEnabled { - fmt.Printf("MagicDNS: enabled tailnet-wide (suffix = %s)", s.CurrentTailnet.MagicDNSSuffix) - fmt.Print("\n\n") - fmt.Printf("Other devices in your tailnet can reach this device at %s\n", s.Self.DNSName) + } + printf("%s", formatDNSStatusText(data, dnsStatusArgs.all)) + return nil +} + +func formatDNSStatusText(data *jsonoutput.DNSStatusResult, all bool) string { + var sb strings.Builder + + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "=== 'Use Tailscale DNS' status ===\n") + fmt.Fprintf(&sb, "\n") + if data.TailscaleDNS { + fmt.Fprintf(&sb, "Tailscale DNS: enabled.\n\nTailscale is configured to handle DNS queries on this device.\nRun 'tailscale set --accept-dns=false' to revert to your system default DNS resolver.\n") } else { - fmt.Printf("MagicDNS: disabled tailnet-wide.\n") + fmt.Fprintf(&sb, "Tailscale DNS: disabled.\n\n(Run 'tailscale set --accept-dns=true' to start sending DNS queries to the Tailscale DNS resolver)\n") + } + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "=== MagicDNS configuration ===\n") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "This is the DNS configuration provided by the coordination server to this device.\n") + fmt.Fprintf(&sb, "\n") + if data.CurrentTailnet == nil { + fmt.Fprintf(&sb, "No tailnet information available; make sure you're logged in to a tailnet.\n") + return sb.String() } - fmt.Print("\n") - netMap, err := fetchNetMap() - if err != nil { - fmt.Printf("Failed to fetch network map: %v\n", err) - return err + if data.CurrentTailnet.MagicDNSEnabled { + fmt.Fprintf(&sb, "MagicDNS: enabled tailnet-wide (suffix = %s)", data.CurrentTailnet.MagicDNSSuffix) + fmt.Fprintf(&sb, "\n\n") + fmt.Fprintf(&sb, "Other devices in your tailnet can reach this device at %s\n", data.CurrentTailnet.SelfDNSName) + } else { + fmt.Fprintf(&sb, "MagicDNS: disabled tailnet-wide.\n") } - dnsConfig := netMap.DNS - fmt.Println("Resolvers (in preference order):") - if len(dnsConfig.Resolvers) == 0 { - fmt.Println(" (no resolvers configured, system default will be used: see 'System DNS configuration' below)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Resolvers (in preference order):\n") + if len(data.Resolvers) == 0 { + fmt.Fprintf(&sb, " (no resolvers configured, system default will be used: see 'System DNS configuration' below)\n") } - for _, r := range dnsConfig.Resolvers { - fmt.Printf(" - %v", r.Addr) + for _, r := range data.Resolvers { + fmt.Fprintf(&sb, " - %v", r.Addr) if r.BootstrapResolution != nil { - fmt.Printf(" (bootstrap: %v)", r.BootstrapResolution) + fmt.Fprintf(&sb, " (bootstrap: %v)", r.BootstrapResolution) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } - fmt.Print("\n") - fmt.Println("Split DNS Routes:") - if len(dnsConfig.Routes) == 0 { - fmt.Println(" (no routes configured: split DNS disabled)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Split DNS Routes:\n") + if len(data.SplitDNSRoutes) == 0 { + fmt.Fprintf(&sb, " (no routes configured: split DNS disabled)\n") } - for _, k := range slices.Sorted(maps.Keys(dnsConfig.Routes)) { - v := dnsConfig.Routes[k] - for _, r := range v { - fmt.Printf(" - %-30s -> %v", k, r.Addr) + for _, k := range slices.Sorted(maps.Keys(data.SplitDNSRoutes)) { + for _, r := range data.SplitDNSRoutes[k] { + fmt.Fprintf(&sb, " - %-30s -> %v", k, r.Addr) if r.BootstrapResolution != nil { - fmt.Printf(" (bootstrap: %v)", r.BootstrapResolution) + fmt.Fprintf(&sb, " (bootstrap: %v)", r.BootstrapResolution) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") + if all { - fmt.Println("Fallback Resolvers:") - if len(dnsConfig.FallbackResolvers) == 0 { - fmt.Println(" (no fallback resolvers configured)") + fmt.Fprintf(&sb, "Fallback Resolvers:\n") + if len(data.FallbackResolvers) == 0 { + fmt.Fprintf(&sb, " (no fallback resolvers configured)\n") } - for i, r := range dnsConfig.FallbackResolvers { - fmt.Printf(" %d: %v\n", i, r) + for i, r := range data.FallbackResolvers { + fmt.Fprintf(&sb, " %d: %v", i, r.Addr) + if r.BootstrapResolution != nil { + fmt.Fprintf(&sb, " (bootstrap: %v)", r.BootstrapResolution) + } + fmt.Fprintf(&sb, "\n") } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } - fmt.Println("Search Domains:") - if len(dnsConfig.Domains) == 0 { - fmt.Println(" (no search domains configured)") + + fmt.Fprintf(&sb, "Search Domains:\n") + if len(data.SearchDomains) == 0 { + fmt.Fprintf(&sb, " (no search domains configured)\n") } - domains := dnsConfig.Domains - slices.Sort(domains) - for _, r := range domains { - fmt.Printf(" - %v\n", r) + for _, r := range data.SearchDomains { + fmt.Fprintf(&sb, " - %v\n", r) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") + if all { - fmt.Println("Nameservers IP Addresses:") - if len(dnsConfig.Nameservers) == 0 { - fmt.Println(" (none were provided)") + fmt.Fprintf(&sb, "Nameservers IP Addresses:\n") + if len(data.Nameservers) == 0 { + fmt.Fprintf(&sb, " (none were provided)\n") } - for _, r := range dnsConfig.Nameservers { - fmt.Printf(" - %v\n", r) + for _, r := range data.Nameservers { + fmt.Fprintf(&sb, " - %v\n", r) } - fmt.Print("\n") - fmt.Println("Certificate Domains:") - if len(dnsConfig.CertDomains) == 0 { - fmt.Println(" (no certificate domains are configured)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Certificate Domains:\n") + if len(data.CertDomains) == 0 { + fmt.Fprintf(&sb, " (no certificate domains are configured)\n") } - for _, r := range dnsConfig.CertDomains { - fmt.Printf(" - %v\n", r) + for _, r := range data.CertDomains { + fmt.Fprintf(&sb, " - %v\n", r) } - fmt.Print("\n") - fmt.Println("Additional DNS Records:") - if len(dnsConfig.ExtraRecords) == 0 { - fmt.Println(" (no extra records are configured)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Additional DNS Records:\n") + if len(data.ExtraRecords) == 0 { + fmt.Fprintf(&sb, " (no extra records are configured)\n") } - for _, er := range dnsConfig.ExtraRecords { + for _, er := range data.ExtraRecords { if er.Type == "" { - fmt.Printf(" - %-50s -> %v\n", er.Name, er.Value) + fmt.Fprintf(&sb, " - %-50s -> %v\n", er.Name, er.Value) } else { - fmt.Printf(" - [%s] %-50s -> %v\n", er.Type, er.Name, er.Value) + fmt.Fprintf(&sb, " - [%s] %-50s -> %v\n", er.Type, er.Name, er.Value) } } - fmt.Print("\n") - fmt.Println("Filtered suffixes when forwarding DNS queries as an exit node:") - if len(dnsConfig.ExitNodeFilteredSet) == 0 { - fmt.Println(" (no suffixes are filtered)") + fmt.Fprintf(&sb, "\n") + + fmt.Fprintf(&sb, "Filtered suffixes when forwarding DNS queries as an exit node:\n") + if len(data.ExitNodeFilteredSet) == 0 { + fmt.Fprintf(&sb, " (no suffixes are filtered)\n") } - for _, s := range dnsConfig.ExitNodeFilteredSet { - fmt.Printf(" - %s\n", s) + for _, s := range data.ExitNodeFilteredSet { + fmt.Fprintf(&sb, " - %s\n", s) } - fmt.Print("\n") + fmt.Fprintf(&sb, "\n") } - fmt.Println("=== System DNS configuration ===") - fmt.Print("\n") - fmt.Println("This is the DNS configuration that Tailscale believes your operating system is using.\nTailscale may use this configuration if 'Override Local DNS' is disabled in the admin console,\nor if no resolvers are provided by the coordination server.") - fmt.Print("\n") - osCfg, err := localClient.GetDNSOSConfig(ctx) - if err != nil { - if strings.Contains(err.Error(), "not supported") { - // avoids showing the HTTP error code which would be odd here - fmt.Println(" (reading the system DNS configuration is not supported on this platform)") + fmt.Fprintf(&sb, "=== System DNS configuration ===\n") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "This is the DNS configuration that Tailscale believes your operating system is using.\nTailscale may use this configuration if 'Override Local DNS' is disabled in the admin console,\nor if no resolvers are provided by the coordination server.\n") + fmt.Fprintf(&sb, "\n") + + if data.SystemDNSError != "" { + if strings.Contains(data.SystemDNSError, "not supported") { + fmt.Fprintf(&sb, " (reading the system DNS configuration is not supported on this platform)\n") } else { - fmt.Printf(" (failed to read system DNS configuration: %v)\n", err) + fmt.Fprintf(&sb, " (failed to read system DNS configuration: %s)\n", data.SystemDNSError) } - } else if osCfg == nil { - fmt.Println(" (no OS DNS configuration available)") + } else if data.SystemDNS == nil { + fmt.Fprintf(&sb, " (no OS DNS configuration available)\n") } else { - fmt.Println("Nameservers:") - if len(osCfg.Nameservers) == 0 { - fmt.Println(" (no nameservers found, DNS queries might fail\nunless the coordination server is providing a nameserver)") + fmt.Fprintf(&sb, "Nameservers:\n") + if len(data.SystemDNS.Nameservers) == 0 { + fmt.Fprintf(&sb, " (no nameservers found, DNS queries might fail\nunless the coordination server is providing a nameserver)\n") } - for _, ns := range osCfg.Nameservers { - fmt.Printf(" - %v\n", ns) + for _, ns := range data.SystemDNS.Nameservers { + fmt.Fprintf(&sb, " - %v\n", ns) } - fmt.Print("\n") - fmt.Println("Search domains:") - if len(osCfg.SearchDomains) == 0 { - fmt.Println(" (no search domains found)") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "Search domains:\n") + if len(data.SystemDNS.SearchDomains) == 0 { + fmt.Fprintf(&sb, " (no search domains found)\n") } - for _, sd := range osCfg.SearchDomains { - fmt.Printf(" - %v\n", sd) + for _, sd := range data.SystemDNS.SearchDomains { + fmt.Fprintf(&sb, " - %v\n", sd) } if all { - fmt.Print("\n") - fmt.Println("Match domains:") - if len(osCfg.MatchDomains) == 0 { - fmt.Println(" (no match domains found)") + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "Match domains:\n") + if len(data.SystemDNS.MatchDomains) == 0 { + fmt.Fprintf(&sb, " (no match domains found)\n") } - for _, md := range osCfg.MatchDomains { - fmt.Printf(" - %v\n", md) + for _, md := range data.SystemDNS.MatchDomains { + fmt.Fprintf(&sb, " - %v\n", md) } } } - fmt.Print("\n") - fmt.Println("[this is a preliminary version of this command; the output format may change in the future]") - return nil + fmt.Fprintf(&sb, "\n") + fmt.Fprintf(&sb, "[this is a preliminary version of this command; the output format may change in the future]\n") + return sb.String() } func fetchNetMap() (netMap *netmap.NetworkMap, err error) { diff --git a/cmd/tailscale/cli/dns_test.go b/cmd/tailscale/cli/dns_test.go new file mode 100644 index 0000000000000..cc01a52702fac --- /dev/null +++ b/cmd/tailscale/cli/dns_test.go @@ -0,0 +1,65 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "strings" + "testing" +) + +func TestRunDNSQueryArgs(t *testing.T) { + tests := []struct { + name string + args []string + wantErr string + }{ + { + name: "no_args", + args: []string{}, + wantErr: "missing required argument: name", + }, + { + name: "flag_after_name", + args: []string{"example.com", "--json"}, + wantErr: "unexpected flags after query name: --json", + }, + { + name: "flag_after_name_and_type", + args: []string{"example.com", "AAAA", "--json"}, + wantErr: "unexpected flags after query name: --json", + }, + { + name: "extra_args_after_type", + args: []string{"example.com", "AAAA", "extra"}, + wantErr: "unexpected extra arguments: extra", + }, + { + name: "multiple_extra_args", + args: []string{"example.com", "AAAA", "extra1", "extra2"}, + wantErr: "unexpected extra arguments: extra1 extra2", + }, + { + name: "non_flag_then_flag", + args: []string{"example.com", "AAAA", "foo", "--json"}, + wantErr: "unexpected flags after query name: --json", + }, + { + name: "multiple_misplaced_flags", + args: []string{"example.com", "--json", "--verbose"}, + wantErr: "unexpected flags after query name: --json, --verbose", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := runDNSQuery(context.Background(), tt.args) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("error = %q, want it to contain %q", err.Error(), tt.wantErr) + } + }) + } +} diff --git a/cmd/tailscale/cli/jsonoutput/dns.go b/cmd/tailscale/cli/jsonoutput/dns.go new file mode 100644 index 0000000000000..d9d3cc0bbb3b6 --- /dev/null +++ b/cmd/tailscale/cli/jsonoutput/dns.go @@ -0,0 +1,116 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonoutput + +// DNSResolverInfo is the JSON form of [dnstype.Resolver]. +type DNSResolverInfo struct { + // Addr is a plain IP, IP:port, DoH URL, or HTTP-over-WireGuard URL. + Addr string + + // BootstrapResolution is optional pre-resolved IPs for DoT/DoH + // resolvers whose address is not already an IP. + BootstrapResolution []string `json:",omitempty"` +} + +// DNSExtraRecord is the JSON form of [tailcfg.DNSRecord]. +type DNSExtraRecord struct { + Name string + Type string `json:",omitempty"` // empty means A or AAAA, depending on Value + Value string // typically an IP address +} + +// DNSSystemConfig is the OS DNS configuration as observed by Tailscale, +// mirroring [net/dns.OSConfig]. +type DNSSystemConfig struct { + Nameservers []string `json:",omitzero"` + SearchDomains []string `json:",omitzero"` + + // MatchDomains are DNS suffixes restricting which queries use + // these Nameservers. Empty means Nameservers is the primary + // resolver. + MatchDomains []string `json:",omitzero"` +} + +// DNSTailnetInfo describes MagicDNS configuration for the tailnet, +// combining [ipnstate.TailnetStatus] and [ipnstate.PeerStatus]. +type DNSTailnetInfo struct { + // MagicDNSEnabled is whether MagicDNS is enabled for the + // tailnet. The device may still not use it if + // --accept-dns=false. + MagicDNSEnabled bool + + // MagicDNSSuffix is the tailnet's MagicDNS suffix + // (e.g. "tail1234.ts.net"), without surrounding dots. + MagicDNSSuffix string `json:",omitempty"` + + // SelfDNSName is this device's FQDN + // (e.g. "host.tail1234.ts.net."), with trailing dot. + SelfDNSName string `json:",omitempty"` +} + +// DNSStatusResult is the full DNS status collected from the local +// Tailscale daemon. +type DNSStatusResult struct { + // TailscaleDNS is whether the Tailscale DNS configuration is + // installed on this device (the --accept-dns setting). + TailscaleDNS bool + + // CurrentTailnet describes MagicDNS configuration for the tailnet. + CurrentTailnet *DNSTailnetInfo `json:",omitzero"` // nil if not connected + + // Resolvers are the DNS resolvers, in preference order. If + // empty, the system defaults are used. + Resolvers []DNSResolverInfo `json:",omitzero"` + + // SplitDNSRoutes maps domain suffixes to dedicated resolvers. + // An empty resolver slice means the suffix is handled by + // Tailscale's built-in resolver (100.100.100.100). + SplitDNSRoutes map[string][]DNSResolverInfo `json:",omitzero"` + + // FallbackResolvers are like Resolvers but only used when + // split DNS needs explicit default resolvers. + FallbackResolvers []DNSResolverInfo `json:",omitzero"` + + SearchDomains []string `json:",omitzero"` + + // Nameservers are nameserver IPs. + // + // Deprecated: old protocol versions only. Use Resolvers. + Nameservers []string `json:",omitzero"` + + // CertDomains are FQDNs for which the coordination server + // provisions TLS certificates via dns-01 ACME challenges. + CertDomains []string `json:",omitzero"` + + // ExtraRecords contains extra DNS records in the MagicDNS config. + ExtraRecords []DNSExtraRecord `json:",omitzero"` + + // ExitNodeFilteredSet are DNS suffixes this node won't resolve + // when acting as an exit node DNS proxy. Period-prefixed + // entries are suffix matches; others are exact. Always + // lowercase, no trailing dots. + ExitNodeFilteredSet []string `json:",omitzero"` + + SystemDNS *DNSSystemConfig `json:",omitzero"` // nil if unavailable + SystemDNSError string `json:",omitempty"` +} + +// DNSAnswer is a single DNS resource record from a query response. +type DNSAnswer struct { + Name string + TTL uint32 + Class string // e.g. "ClassINET" + Type string // e.g. "TypeA", "TypeAAAA" + Body string // human-readable record data +} + +// DNSQueryResult is the result of a DNS query via the Tailscale +// internal forwarder (100.100.100.100). +type DNSQueryResult struct { + Name string + QueryType string // e.g. "A", "AAAA" + Resolvers []DNSResolverInfo `json:",omitzero"` + ResponseCode string // e.g. "RCodeSuccess", "RCodeNameError" + Answers []DNSAnswer `json:",omitzero"` +} diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 9b09604875446..64911d9318f03 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -54,6 +54,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscaled tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete + tailscale.com/cmd/tailscale/cli/jsonoutput from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ From 1b53c00f2bd10ce99e9d7148a292e344bfd72768 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 5 Mar 2026 13:39:07 -0500 Subject: [PATCH 1055/1093] clientupdate,net/tstun: add support for OpenWrt 25.12.0 using apk (#18545) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit OpenWrt is changing to using alpine like `apk` for package installation over its previous opkg. Additionally, they are not using the same repo files as alpine making installation fail. Add support for the new repository files and ensure that the required package detection system uses apk. Updates #18535 Signed-off-by: Claus Lensbøl --- clientupdate/clientupdate.go | 59 +++++++++----- clientupdate/clientupdate_test.go | 124 ++++++++++++++++++++++++++++++ net/tstun/tun_linux.go | 34 ++++++-- 3 files changed, 188 insertions(+), 29 deletions(-) diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index 1ed7894bf3d43..d52241483812a 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -661,7 +661,7 @@ func updateYUMRepoTrack(repoFile, dstTrack string) (rewrote bool, err error) { func (up *Updater) updateAlpineLike() (err error) { if up.Version != "" { - return errors.New("installing a specific version on Alpine-based distros is not supported") + return errors.New("installing a specific version on apk-based distros is not supported") } if err := requireRoot(); err != nil { return err @@ -691,7 +691,7 @@ func (up *Updater) updateAlpineLike() (err error) { return fmt.Errorf(`failed to parse latest version from "apk info tailscale": %w`, err) } if !up.confirm(ver) { - if err := checkOutdatedAlpineRepo(up.Logf, ver, up.Track); err != nil { + if err := checkOutdatedAlpineRepo(up.Logf, apkDirPaths, ver, up.Track); err != nil { up.Logf("failed to check whether Alpine release is outdated: %v", err) } return nil @@ -731,9 +731,12 @@ func parseAlpinePackageVersion(out []byte) (string, error) { return "", errors.New("tailscale version not found in output") } -var apkRepoVersionRE = regexp.MustCompile(`v[0-9]+\.[0-9]+`) +var ( + apkRepoVersionRE = regexp.MustCompile(`v[0-9]+\.[0-9]+`) + apkDirPaths = []string{"/etc/apk/repositories", "/etc/apk/repositories.d/distfeeds.list"} +) -func checkOutdatedAlpineRepo(logf logger.Logf, apkVer, track string) error { +func checkOutdatedAlpineRepo(logf logger.Logf, filePaths []string, apkVer, track string) error { latest, err := LatestTailscaleVersion(track) if err != nil { return err @@ -742,22 +745,34 @@ func checkOutdatedAlpineRepo(logf logger.Logf, apkVer, track string) error { // Actually on latest release. return nil } - f, err := os.Open("/etc/apk/repositories") - if err != nil { - return err - } - defer f.Close() - // Read the first repo line. Typically, there are multiple repos that all - // contain the same version in the path, like: - // https://dl-cdn.alpinelinux.org/alpine/v3.20/main - // https://dl-cdn.alpinelinux.org/alpine/v3.20/community - s := bufio.NewScanner(f) - if !s.Scan() { - return s.Err() - } - alpineVer := apkRepoVersionRE.FindString(s.Text()) - if alpineVer != "" { - logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYour Alpine version is %q, you may need to upgrade the system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer, alpineVer) + + // OpenWrt uses a different repo file in repositories.d, check for that as well. + for _, repoFile := range filePaths { + f, err := os.Open(repoFile) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + continue + } else { + return err + } + } + defer f.Close() + // Read the first repo line. Typically, there are multiple repos that all + // contain the same version in the path, like: + // https://dl-cdn.alpinelinux.org/alpine/v3.20/main + // https://dl-cdn.alpinelinux.org/alpine/v3.20/community + s := bufio.NewScanner(f) + if !s.Scan() { + if s.Err() != nil { + return s.Err() + } + logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYou may need to upgrade your Alpine system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer) + } + alpineVer := apkRepoVersionRE.FindString(s.Text()) + if alpineVer != "" { + logf("The latest Tailscale release for Linux is %q, but your apk repository only provides %q.\nYour Alpine version is %q, you may need to upgrade the system to get the latest Tailscale version: https://wiki.alpinelinux.org/wiki/Upgrading_Alpine", latest, apkVer, alpineVer) + } + return nil } return nil } @@ -1246,8 +1261,10 @@ type trackPackages struct { SPKsVersion string } +var tailscaleHTTPEndpoint = "https://pkgs.tailscale.com" + func latestPackages(track string) (*trackPackages, error) { - url := fmt.Sprintf("https://pkgs.tailscale.com/%s/?mode=json&os=%s", track, runtime.GOOS) + url := fmt.Sprintf("%s/%s/?mode=json&os=%s", tailscaleHTTPEndpoint, track, runtime.GOOS) res, err := http.Get(url) if err != nil { return nil, fmt.Errorf("fetching latest tailscale version: %w", err) diff --git a/clientupdate/clientupdate_test.go b/clientupdate/clientupdate_test.go index 7487026355326..13fc8f08a6a2e 100644 --- a/clientupdate/clientupdate_test.go +++ b/clientupdate/clientupdate_test.go @@ -6,9 +6,12 @@ package clientupdate import ( "archive/tar" "compress/gzip" + "encoding/json" "fmt" "io/fs" "maps" + "net/http" + "net/http/httptest" "os" "path/filepath" "slices" @@ -299,6 +302,127 @@ tailscale-1.58.2-r0 installed size: } } +func TestCheckOutdatedAlpineRepo(t *testing.T) { + anyToString := func(a any) string { + str, ok := a.(string) + if !ok { + panic("failed to parse param as string") + } + return str + } + + tests := []struct { + name string + fileContent string + latestHTTPVersion string + latestApkVersion string + wantHTTPVersion string + wantApkVersion string + wantAlpineVersion string + track string + }{ + { + name: "Up to date", + fileContent: "https://dl-cdn.alpinelinux.org/alpine/v3.20/main", + latestHTTPVersion: "1.95.3", + latestApkVersion: "1.95.3", + track: "unstable", + }, + { + name: "Behind unstable", + fileContent: "https://dl-cdn.alpinelinux.org/alpine/v3.20/main", + latestHTTPVersion: "1.95.4", + latestApkVersion: "1.95.3", + wantHTTPVersion: "1.95.4", + wantApkVersion: "1.95.3", + wantAlpineVersion: "v3.20", + track: "unstable", + }, + { + name: "Behind stable", + fileContent: "https://dl-cdn.alpinelinux.org/alpine/v2.40/main", + latestHTTPVersion: "1.94.3", + latestApkVersion: "1.92.1", + wantHTTPVersion: "1.94.3", + wantApkVersion: "1.92.1", + wantAlpineVersion: "v2.40", + track: "stable", + }, + { + name: "Nothing in dist file", + fileContent: "", + latestHTTPVersion: "1.94.3", + latestApkVersion: "1.92.1", + wantHTTPVersion: "1.94.3", + wantApkVersion: "1.92.1", + track: "stable", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir, err := os.MkdirTemp("", "example") + if err != nil { + t.Fatalf("error creating temp dir: %v", err) + } + t.Cleanup(func() { os.RemoveAll(dir) }) // clean up + + file := filepath.Join(dir, "distfile") + if err := os.WriteFile(file, []byte(tt.fileContent), 0o666); err != nil { + t.Fatalf("error creating dist file: %v", err) + } + + testServ := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, _ *http.Request) { + version := trackPackages{ + MSIsVersion: tt.latestHTTPVersion, + MacZipsVersion: tt.latestHTTPVersion, + TarballsVersion: tt.latestHTTPVersion, + SPKsVersion: tt.latestHTTPVersion, + } + jsonData, err := json.Marshal(version) + if err != nil { + t.Errorf("failed to marshal version string: %v", err) + } + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(jsonData); err != nil { + t.Errorf("failed to write json blob: %v", err) + } + }, + )) + defer testServ.Close() + + oldEndpoint := tailscaleHTTPEndpoint + tailscaleHTTPEndpoint = testServ.URL + defer func() { tailscaleHTTPEndpoint = oldEndpoint }() + + var paramLatest string + var paramApkVer string + var paramAlpineVer string + logf := func(_ string, params ...any) { + paramLatest = anyToString(params[0]) + paramApkVer = anyToString(params[1]) + if len(params) > 2 { + paramAlpineVer = anyToString(params[2]) + } + } + + err = checkOutdatedAlpineRepo(logf, []string{file}, tt.latestApkVersion, tt.track) + if err != nil { + t.Errorf("did not expect error, got: %v", err) + } + if paramLatest != tt.wantHTTPVersion { + t.Errorf("expected HTTP version '%s', got '%s'", tt.wantHTTPVersion, paramLatest) + } + if paramApkVer != tt.wantApkVersion { + t.Errorf("expected APK version '%s', got '%s'", tt.wantApkVersion, paramApkVer) + } + if paramAlpineVer != tt.wantAlpineVersion { + t.Errorf("expected alpine version '%s', got '%s'", tt.wantAlpineVersion, paramAlpineVer) + } + }) + } +} + func TestSynoArch(t *testing.T) { tests := []struct { goarch string diff --git a/net/tstun/tun_linux.go b/net/tstun/tun_linux.go index 028e0a14b5bd8..fb4a8a415dac7 100644 --- a/net/tstun/tun_linux.go +++ b/net/tstun/tun_linux.go @@ -86,14 +86,32 @@ func diagnoseLinuxTUNFailure(tunName string, logf logger.Logf, createErr error) logf("kernel/drivers/net/tun.ko found on disk, but not for current kernel; are you in middle of a system update and haven't rebooted? found: %s", findOut) } case distro.OpenWrt: - out, err := exec.Command("opkg", "list-installed").CombinedOutput() - if err != nil { - logf("error querying OpenWrt installed packages: %s", out) - return - } - for _, pkg := range []string{"kmod-tun", "ca-bundle"} { - if !bytes.Contains(out, []byte(pkg+" - ")) { - logf("Missing required package %s; run: opkg install %s", pkg, pkg) + // OpenWRT switched to using apk as a package manager as of OpenWrt 25.12.0. + // Find out what is used on this system and use that, Maybe we can get rid + // of opkg in the future but for now keep checking. + + if path, err := exec.LookPath("apk"); err == nil && path != "" { + // Test with apk + out, err := exec.Command("apk", "info").CombinedOutput() + if err != nil { + logf("error querying OpenWrt installed packages with apk: %s", out) + return + } + for _, pkg := range []string{"kmod-tun", "ca-bundle"} { + if !bytes.Contains(out, []byte(pkg)) { + logf("Missing required package %s; run: apk add %s", pkg, pkg) + } + } + } else { // Check for package with opkg (legacy) + out, err := exec.Command("opkg", "list-installed").CombinedOutput() + if err != nil { + logf("error querying OpenWrt installed packages with opkg: %s", out) + return + } + for _, pkg := range []string{"kmod-tun", "ca-bundle"} { + if !bytes.Contains(out, []byte(pkg+" - ")) { + logf("Missing required package %s; run: opkg install %s", pkg, pkg) + } } } } From 19e2c8c49f64c2cf7c8ac52fe1d8a01eedf23c0d Mon Sep 17 00:00:00 2001 From: Raj Singh Date: Thu, 5 Mar 2026 13:47:54 -0500 Subject: [PATCH 1056/1093] cmd/k8s-proxy: use L4 TCPForward instead of L7 HTTP proxy (#18179) considerable latency was seen when using k8s-proxy with ProxyGroup in the kubernetes operator. Switching to L4 TCPForward solves this. Fixes tailscale#18171 Signed-off-by: chaosinthecrd Co-authored-by: chaosinthecrd --- cmd/k8s-operator/depaware.txt | 2 +- cmd/k8s-proxy/k8s-proxy.go | 22 ++++++++++------------ k8s-operator/api-proxy/proxy.go | 11 ++++++++++- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 77739350b199c..8718127b6e75f 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -161,7 +161,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go github.com/munnerz/goautoneg from k8s.io/kube-openapi/pkg/handler3+ github.com/opencontainers/go-digest from github.com/distribution/reference - github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal + github.com/pires/go-proxyproto from tailscale.com/ipn/ipnlocal+ github.com/pkg/errors from github.com/evanphx/json-patch/v5+ github.com/pmezard/go-difflib/difflib from k8s.io/apimachinery/pkg/util/diff D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index e00d43a948dba..38a86a5e0ade5 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -50,6 +50,12 @@ import ( "tailscale.com/tsnet" ) +const ( + // proxyProtocolV2 enables PROXY protocol v2 to preserve original client + // connection info after TLS termination. + proxyProtocolV2 = 2 +) + func main() { encoderCfg := zap.NewProductionEncoderConfig() encoderCfg.EncodeTime = zapcore.RFC3339TimeEncoder @@ -441,24 +447,16 @@ func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager if err != nil { return fmt.Errorf("error getting local client status: %w", err) } - serviceHostPort := ipn.HostPort(fmt.Sprintf("%s.%s:443", name.WithoutPrefix(), status.CurrentTailnet.MagicDNSSuffix)) + serviceSNI := fmt.Sprintf("%s.%s", name.WithoutPrefix(), status.CurrentTailnet.MagicDNSSuffix) serveConfig := ipn.ServeConfig{ - // Configure for the Service hostname. Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ name: { TCP: map[uint16]*ipn.TCPPortHandler{ 443: { - HTTPS: true, - }, - }, - Web: map[ipn.HostPort]*ipn.WebServerConfig{ - serviceHostPort: { - Handlers: map[string]*ipn.HTTPHandler{ - "/": { - Proxy: "http://localhost:80", - }, - }, + TCPForward: "localhost:80", + TerminateTLS: serviceSNI, + ProxyProtocol: proxyProtocolV2, }, }, }, diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index cbcad1582e673..acc7b62341b83 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -21,6 +21,7 @@ import ( "strings" "time" + "github.com/pires/go-proxyproto" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/endpoints/request" @@ -150,10 +151,18 @@ func (ap *APIServerProxy) Run(ctx context.Context) error { } } else { var err error - proxyLn, err = net.Listen("tcp", "localhost:80") + baseLn, err := net.Listen("tcp", "localhost:80") if err != nil { return fmt.Errorf("could not listen on :80: %w", err) } + proxyLn = &proxyproto.Listener{ + Listener: baseLn, + ReadHeaderTimeout: 10 * time.Second, + ConnPolicy: proxyproto.ConnPolicyFunc(func(opts proxyproto.ConnPolicyOptions) (proxyproto.Policy, + error) { + return proxyproto.REQUIRE, nil + }), + } serve = ap.hs.Serve } From c17ec8ce1cde3d17172131a05b249fe4df5562ea Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Thu, 5 Mar 2026 15:24:48 -0500 Subject: [PATCH 1057/1093] VERSION.txt: this is v1.97.0 (#18898) Signed-off-by: Jonathan Nobels --- VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION.txt b/VERSION.txt index 55f6ae93382d1..acbb747ac540f 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.95.0 +1.97.0 From 9657a9321795dc7aa837da347b453099767c76d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Claus=20Lensb=C3=B8l?= Date: Thu, 5 Mar 2026 16:00:36 -0500 Subject: [PATCH 1058/1093] tstest/natlab: add test for no control and rotated disco key (#18261) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates #12639 Signed-off-by: Claus Lensbøl --- tstest/integration/nat/nat_test.go | 56 ++++++++++++++++++++++-- tstest/natlab/vnet/conf.go | 68 +++++++++++++++++++++++++++--- tstest/natlab/vnet/vnet.go | 44 +++++++++++-------- wgengine/magicsock/magicsock.go | 1 + 4 files changed, 140 insertions(+), 29 deletions(-) diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 2322e243a8ee9..1f62436fff341 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -25,6 +25,7 @@ import ( "golang.org/x/mod/modfile" "golang.org/x/sync/errgroup" "tailscale.com/client/tailscale" + "tailscale.com/envknob" "tailscale.com/ipn/ipnstate" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -133,6 +134,24 @@ func easyAnd6(c *vnet.Config) *vnet.Node { vnet.EasyNAT)) } +// easyNoControlDiscoRotate sets up a node with easy NAT, cuts traffic to +// control after connecting, and then rotates the disco key to simulate a newly +// started node (from a disco perspective). +func easyNoControlDiscoRotate(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + nw := c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("192.168.%d.1/24", n), + vnet.EasyNAT) + nw.SetPostConnectControlBlackhole(true) + return c.AddNode( + vnet.TailscaledEnv{ + Key: "TS_USE_CACHED_NETMAP", + Value: "true", + }, + vnet.RotateDisco, vnet.PreICMPPing, nw) +} + func v6AndBlackholedIPv4(c *vnet.Config) *vnet.Node { n := c.NumNodes() + 1 nw := c.AddNetwork( @@ -364,7 +383,9 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { var clients []*vnet.NodeAgentClient for _, n := range nodes { - clients = append(clients, nt.vnet.NodeAgentClient(n)) + client := nt.vnet.NodeAgentClient(n) + n.SetClient(client) + clients = append(clients, client) } sts := make([]*ipnstate.Status, len(nodes)) @@ -415,7 +436,27 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { return "" } - pingRes, err := ping(ctx, t, clients[0], sts[1].Self.TailscaleIPs[0]) + preICMPPing := false + for _, node := range c.Nodes() { + node.Network().PostConnectedToControl() + if err := node.PostConnectedToControl(ctx); err != nil { + t.Fatalf("post control error: %s", err) + } + if node.PreICMPPing() { + preICMPPing = true + } + } + + // Should we send traffic across the nodes before starting disco? + // For nodes that rotated disco keys after control going away. + if preICMPPing { + _, err := ping(ctx, t, clients[0], sts[1].Self.TailscaleIPs[0], tailcfg.PingICMP) + if err != nil { + t.Fatalf("ICMP ping failure: %v", err) + } + } + + pingRes, err := ping(ctx, t, clients[0], sts[1].Self.TailscaleIPs[0], tailcfg.PingDisco) if err != nil { t.Fatalf("ping failure: %v", err) } @@ -450,12 +491,12 @@ const ( routeNil pingRoute = "nil" // *ipnstate.PingResult is nil ) -func ping(ctx context.Context, t testing.TB, c *vnet.NodeAgentClient, target netip.Addr) (*ipnstate.PingResult, error) { +func ping(ctx context.Context, t testing.TB, c *vnet.NodeAgentClient, target netip.Addr, pType tailcfg.PingType) (*ipnstate.PingResult, error) { var lastRes *ipnstate.PingResult for n := range 10 { t.Logf("ping attempt %d to %v ...", n+1, target) pingCtx, cancel := context.WithTimeout(ctx, 2*time.Second) - pr, err := c.PingWithOpts(pingCtx, target, tailcfg.PingDisco, tailscale.PingOpts{}) + pr, err := c.PingWithOpts(pingCtx, target, pType, tailscale.PingOpts{}) cancel() if err != nil { t.Logf("ping attempt %d error: %v", n+1, err) @@ -529,6 +570,13 @@ func TestEasyEasy(t *testing.T) { nt.want(routeDirect) } +func TestTwoEasyNoControlDiscoRotate(t *testing.T) { + envknob.Setenv("TS_USE_CACHED_NETMAP", "1") + nt := newNatTest(t) + nt.runTest(easyNoControlDiscoRotate, easyNoControlDiscoRotate) + nt.want(routeDirect) +} + // Issue tailscale/corp#26438: use learned DERP route as send path of last // resort // diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index 3f83e35c09ba3..33a9bd7e54330 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -5,6 +5,7 @@ package vnet import ( "cmp" + "context" "fmt" "iter" "net/netip" @@ -114,6 +115,10 @@ func (c *Config) AddNode(opts ...any) *Node { switch o { case HostFirewall: n.hostFW = true + case RotateDisco: + n.rotateDisco = true + case PreICMPPing: + n.preICMPPing = true case VerboseSyslog: n.verboseSyslog = true default: @@ -137,6 +142,8 @@ type NodeOption string const ( HostFirewall NodeOption = "HostFirewall" + RotateDisco NodeOption = "RotateDisco" + PreICMPPing NodeOption = "PreICMPPing" VerboseSyslog NodeOption = "VerboseSyslog" ) @@ -197,12 +204,15 @@ func (c *Config) AddNetwork(opts ...any) *Network { // Node is the configuration of a node in the virtual network. type Node struct { - err error - num int // 1-based node number - n *node // nil until NewServer called + err error + num int // 1-based node number + n *node // nil until NewServer called + client *NodeAgentClient env []TailscaledEnv hostFW bool + rotateDisco bool + preICMPPing bool verboseSyslog bool // TODO(bradfitz): this is halfway converted to supporting multiple NICs @@ -243,6 +253,33 @@ func (n *Node) SetVerboseSyslog(v bool) { n.verboseSyslog = v } +func (n *Node) SetClient(c *NodeAgentClient) { + n.client = c +} + +// PostConnectedToControl should be called after the clients have connected to +// control to modify the client behaviour after getting the network maps. +// Currently, the only implemented behavior is rotating disco keys. +func (n *Node) PostConnectedToControl(ctx context.Context) error { + if n.rotateDisco { + if err := n.client.DebugAction(ctx, "rotate-disco-key"); err != nil { + return err + } + } + return nil +} + +// PreICMPPing reports whether node should send an ICMP Ping sent before +// the disco ping. This is important for the nodes having rotated their +// disco keys while control is down. Disco pings deliberately does not +// trigger a TSMPDiscoKeyAdvertisement, making the need for other traffic (here +// simlulated as an ICMP ping) needed first. Any traffic could trigger this key +// exchange, the ICMP Ping is used as a handy existing way of sending some +// non-disco traffic. +func (n *Node) PreICMPPing() bool { + return n.preICMPPing +} + // IsV6Only reports whether this node is only connected to IPv6 networks. func (n *Node) IsV6Only() bool { for _, net := range n.nets { @@ -275,10 +312,12 @@ type Network struct { wanIP6 netip.Prefix // global unicast router in host bits; CIDR is /64 delegated to LAN - wanIP4 netip.Addr // IPv4 WAN IP, if any - lanIP4 netip.Prefix - nodes []*Node - breakWAN4 bool // whether to break WAN IPv4 connectivity + wanIP4 netip.Addr // IPv4 WAN IP, if any + lanIP4 netip.Prefix + nodes []*Node + breakWAN4 bool // whether to break WAN IPv4 connectivity + postConnectBlackholeControl bool // whether to break control connectivity after nodes have connected + network *network svcs set.Set[NetworkService] @@ -310,6 +349,12 @@ func (n *Network) SetBlackholedIPv4(v bool) { n.breakWAN4 = v } +// SetPostConnectControlBlackhole sets wether the network should blackhole all +// traffic to the control server after the clients have connected. +func (n *Network) SetPostConnectControlBlackhole(v bool) { + n.postConnectBlackholeControl = v +} + func (n *Network) CanV4() bool { return n.lanIP4.IsValid() || n.wanIP4.IsValid() } @@ -325,6 +370,13 @@ func (n *Network) CanTakeMoreNodes() bool { return len(n.nodes) < 150 } +// PostConnectedToControl should be called after the clients have connected to +// the control server to modify network behaviors. Currently the only +// implemented behavior is to conditionally blackhole traffic to control. +func (n *Network) PostConnectedToControl() { + n.network.SetControlBlackholed(n.postConnectBlackholeControl) +} + // NetworkService is a service that can be added to a network. type NetworkService string @@ -390,6 +442,8 @@ func (s *Server) initFromConfig(c *Config) error { } netOfConf[conf] = n s.networks.Add(n) + + conf.network = n if conf.wanIP4.IsValid() { if conf.wanIP4.Is6() { return fmt.Errorf("invalid IPv6 address in wanIP") diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 357fe213c8c28..43d370c61b83c 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -506,23 +506,24 @@ func (nw networkWriter) write(b []byte) { } type network struct { - s *Server - num int // 1-based - mac MAC // of router - portmap bool - lanInterfaceID int - wanInterfaceID int - v4 bool // network supports IPv4 - v6 bool // network support IPv6 - wanIP6 netip.Prefix // router's WAN IPv6, if any, as a /64. - wanIP4 netip.Addr // router's LAN IPv4, if any - lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24) - breakWAN4 bool // break WAN IPv4 connectivity - latency time.Duration // latency applied to interface writes - lossRate float64 // probability of dropping a packet (0.0 to 1.0) - nodesByIP4 map[netip.Addr]*node // by LAN IPv4 - nodesByMAC map[MAC]*node - logf func(format string, args ...any) + s *Server + num int // 1-based + mac MAC // of router + portmap bool + lanInterfaceID int + wanInterfaceID int + v4 bool // network supports IPv4 + v6 bool // network support IPv6 + wanIP6 netip.Prefix // router's WAN IPv6, if any, as a /64. + wanIP4 netip.Addr // router's LAN IPv4, if any + lanIP4 netip.Prefix // router's LAN IP + CIDR (e.g. 192.168.2.1/24) + breakWAN4 bool // break WAN IPv4 connectivity + blackholeControl bool // blackhole control connectivity + latency time.Duration // latency applied to interface writes + lossRate float64 // probability of dropping a packet (0.0 to 1.0) + nodesByIP4 map[netip.Addr]*node // by LAN IPv4 + nodesByMAC map[MAC]*node + logf func(format string, args ...any) ns *stack.Stack linkEP *channel.Endpoint @@ -578,6 +579,12 @@ func (n *network) MACOfIP(ip netip.Addr) (_ MAC, ok bool) { return MAC{}, false } +// SetControlBlackholed sets wether traffic to control should be blackholed for the +// network. +func (n *network) SetControlBlackholed(v bool) { + n.blackholeControl = v +} + type node struct { mac MAC num int // 1-based node number @@ -1263,7 +1270,8 @@ func (n *network) HandleEthernetPacketForRouter(ep EthernetPacket) { } if toForward && n.s.shouldInterceptTCP(packet) { - if flow.dst.Is4() && n.breakWAN4 { + if (flow.dst.Is4() && n.breakWAN4) || + (n.blackholeControl && fakeControl.Match(flow.dst)) { // Blackhole the packet. return } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 169369f4bb472..1f02d84c7c608 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -4266,6 +4266,7 @@ func (c *Conn) HandleDiscoKeyAdvertisement(node tailcfg.NodeView, update packet. // If the key did not change, count it and return. if oldDiscoKey.Compare(discoKey) == 0 { metricTSMPDiscoKeyAdvertisementUnchanged.Add(1) + c.logf("magicsock: disco key did not change for node %v", nodeKey.ShortString()) return } c.discoInfoForKnownPeerLocked(discoKey) From 2810f0c6f192775ea89da79f5182cf513516250c Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 5 Mar 2026 21:41:12 +0000 Subject: [PATCH 1059/1093] all: fix typos in comments Fix its/it's, who's/whose, wether/whether, missing apostrophes in contractions, and other misspellings across the codebase. Updates #cleanup Change-Id: I20453b81a7aceaa14ea2a551abba08a2e7f0a1d8 Signed-off-by: Brad Fitzpatrick --- cmd/tailscale/cli/serve_v2.go | 2 +- drive/driveimpl/remote_impl.go | 2 +- ipn/ipnlocal/network-lock.go | 2 +- ipn/ipnserver/server.go | 2 +- net/socks5/socks5_test.go | 2 +- safesocket/safesocket_darwin.go | 4 ++-- tka/chaintest_test.go | 4 ++-- tka/tailchonk.go | 2 +- tka/tailchonk_test.go | 2 +- tstest/natlab/vnet/conf.go | 2 +- tstest/natlab/vnet/vnet.go | 2 +- tstest/tailmac/README.md | 2 +- types/key/nl.go | 2 +- wgengine/magicsock/endpoint.go | 2 +- 14 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 840c47ac66dd1..9ac303c791b37 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1096,7 +1096,7 @@ func isRemote(target string) bool { target = "tmp://" + target } - // make sure we can parse the target, wether it's a full URL or just a host:port + // make sure we can parse the target, whether it's a full URL or just a host:port u, err := url.ParseRequestURI(target) if err != nil { // If we can't parse the target, it doesn't matter if it's remote or not diff --git a/drive/driveimpl/remote_impl.go b/drive/driveimpl/remote_impl.go index df27ba71627df..0ff27dc643efe 100644 --- a/drive/driveimpl/remote_impl.go +++ b/drive/driveimpl/remote_impl.go @@ -415,7 +415,7 @@ var writeMethods = map[string]bool{ "DELETE": true, } -// canSudo checks wether we can sudo -u the configured executable as the +// canSudo checks whether we can sudo -u the configured executable as the // configured user by attempting to call the executable with the '-h' flag to // print help. func (s *userServer) canSudo() bool { diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 242fec0287c65..276fde5860bd2 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -407,7 +407,7 @@ func (b *LocalBackend) tkaSyncLocked(ourNodeKey key.NodePublic) error { // has updates for us, or we have updates for the control plane. // // TODO(tom): Do we want to keep processing even if the Inform fails? Need - // to think through if theres holdback concerns here or not. + // to think through if there's holdback concerns here or not. if len(offerResp.MissingAUMs) > 0 { aums := make([]tka.AUM, len(offerResp.MissingAUMs)) for i, a := range offerResp.MissingAUMs { diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index 1f8abf0e20128..19efaf9895b94 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -429,7 +429,7 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o if len(s.activeReqs) == 1 { if envknob.GOOS() == "windows" && !actor.IsLocalSystem() { // Tell the LocalBackend about the identity we're now running as, - // unless its the SYSTEM user. That user is not a real account and + // unless it's the SYSTEM user. That user is not a real account and // doesn't have a home directory. lb.SetCurrentUser(actor) } diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go index 9fbc11f8c0dfb..e6ca4b68e9967 100644 --- a/net/socks5/socks5_test.go +++ b/net/socks5/socks5_test.go @@ -222,7 +222,7 @@ func TestUDP(t *testing.T) { if err != nil { t.Fatal(err) } - _, err = conn.Write(append([]byte{socks5Version, byte(udpAssociate), 0x00}, targetAddrPkt...)) // client reqeust + _, err = conn.Write(append([]byte{socks5Version, byte(udpAssociate), 0x00}, targetAddrPkt...)) // client request if err != nil { t.Fatal(err) } diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index 8cbabff63364e..aa67baaf82596 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -102,8 +102,8 @@ func SetCredentials(token string, port int) { // InitListenerDarwin initializes the listener for the CLI commands // and localapi HTTP server and sets the port/token. This will override -// any credentials set explicitly via SetCredentials(). Calling this mulitple times -// has no effect. The listener and it's corresponding token/port is initialized only once. +// any credentials set explicitly via SetCredentials(). Calling this multiple times +// has no effect. The listener and its corresponding token/port is initialized only once. func InitListenerDarwin(sharedDir string) (*net.Listener, error) { ssd.mu.Lock() defer ssd.mu.Unlock() diff --git a/tka/chaintest_test.go b/tka/chaintest_test.go index c370bf60a2e4c..5ca68afa8f8ba 100644 --- a/tka/chaintest_test.go +++ b/tka/chaintest_test.go @@ -203,9 +203,9 @@ func (c *testChain) buildChain() { } // AUMs with a parent need to know their hash, so we - // only compute AUMs who's parents have been computed + // only compute AUMs whose parents have been computed // each iteration. Since at least the genesis AUM - // had no parent, theres always a path to completion + // had no parent, there's always a path to completion // in O(n+1) where n is the number of AUMs. c.AUMs = make(map[string]AUM, len(c.Nodes)) c.AUMHashes = make(map[string]AUMHash, len(c.Nodes)) diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 256faaea2b8b9..3b083f327f3e7 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -715,7 +715,7 @@ func markActiveChain(storage Chonk, verdict map[AUMHash]retainState, minChain in parent, hasParent := next.Parent() if !hasParent { - // Genesis AUM (beginning of time). The chain isnt long enough to need truncating. + // Genesis AUM (beginning of time). The chain isn't long enough to need truncating. return h, nil } diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index d40e4b09da769..be638c56e8022 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -309,7 +309,7 @@ func TestMarkDescendantAUMs(t *testing.T) { } for _, h := range []AUMHash{hs["genesis"], hs["B"], hs["D"]} { if (verdict[h] & retainStateLeaf) != 0 { - t.Errorf("%v was marked as a descendant and shouldnt be", h) + t.Errorf("%v was marked as a descendant and shouldn't be", h) } } } diff --git a/tstest/natlab/vnet/conf.go b/tstest/natlab/vnet/conf.go index 33a9bd7e54330..eec8a4731c2fe 100644 --- a/tstest/natlab/vnet/conf.go +++ b/tstest/natlab/vnet/conf.go @@ -349,7 +349,7 @@ func (n *Network) SetBlackholedIPv4(v bool) { n.breakWAN4 = v } -// SetPostConnectControlBlackhole sets wether the network should blackhole all +// SetPostConnectControlBlackhole sets whether the network should blackhole all // traffic to the control server after the clients have connected. func (n *Network) SetPostConnectControlBlackhole(v bool) { n.postConnectBlackholeControl = v diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 43d370c61b83c..ea119bad7bb10 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -579,7 +579,7 @@ func (n *network) MACOfIP(ip netip.Addr) (_ MAC, ok bool) { return MAC{}, false } -// SetControlBlackholed sets wether traffic to control should be blackholed for the +// SetControlBlackholed sets whether traffic to control should be blackholed for the // network. func (n *network) SetControlBlackholed(v bool) { n.blackholeControl = v diff --git a/tstest/tailmac/README.md b/tstest/tailmac/README.md index a8b9f2598dde3..6c62d24318119 100644 --- a/tstest/tailmac/README.md +++ b/tstest/tailmac/README.md @@ -53,7 +53,7 @@ All vm images, restore images, block device files, save states, and other suppor Each vm gets its own directory. These can be archived for posterity to preserve a particular image and/or state. The mere existence of a directory containing all of the required files in ~/VM.bundle is sufficient for tailmac to -be able to see and run it. ~/VM.bundle and it's contents *is* tailmac's state. No other state is maintained elsewhere. +be able to see and run it. ~/VM.bundle and its contents *is* tailmac's state. No other state is maintained elsewhere. Each vm has its own custom configuration which can be modified while the vm is idle. It's simple JSON - you may modify this directly, or using 'tailmac configure'. diff --git a/types/key/nl.go b/types/key/nl.go index fc11d5b20ff64..0e8c5ed966260 100644 --- a/types/key/nl.go +++ b/types/key/nl.go @@ -119,7 +119,7 @@ type NLPublic struct { // NLPublicFromEd25519Unsafe converts an ed25519 public key into // a type of NLPublic. // -// New uses of this function should be avoided, as its possible to +// New uses of this function should be avoided, as it's possible to // accidentally construct an NLPublic from a non network-lock key. func NLPublicFromEd25519Unsafe(public ed25519.PublicKey) NLPublic { var out NLPublic diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 1f99f57ec2d16..5f493027be945 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -80,7 +80,7 @@ type endpoint struct { lastSendAny mono.Time // last time there were outgoing packets sent this peer from any trigger, internal or external to magicsock lastFullPing mono.Time // last time we pinged all disco or wireguard only endpoints lastUDPRelayPathDiscovery mono.Time // last time we ran UDP relay path discovery - sentDiscoKeyAdvertisement bool // wether we sent a TSMPDiscoAdvertisement or not to this endpoint + sentDiscoKeyAdvertisement bool // whether we sent a TSMPDiscoAdvertisement or not to this endpoint derpAddr netip.AddrPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients) bestAddr addrQuality // best non-DERP path; zero if none; mutate via setBestAddrLocked() From 8cfbaa717d9670e2d9e356ca989387fe18611419 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 5 Mar 2026 14:08:30 -0800 Subject: [PATCH 1060/1093] go.mod: bump staticcheck to version that supports Go 1.26 Otherwise it gets confused on new(123) etc. Updates #18682 Change-Id: I9e2e93ea24f2b952b2396dceaf094b4db64424b0 Signed-off-by: Brad Fitzpatrick --- flake.nix | 2 +- go.mod | 8 ++++---- go.mod.sri | 2 +- go.sum | 16 ++++++++-------- shell.nix | 2 +- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/flake.nix b/flake.nix index c9e3b50a1ad73..5ac0726dab25c 100644 --- a/flake.nix +++ b/flake.nix @@ -151,4 +151,4 @@ }); }; } -# nix-direnv cache busting line: sha256-rhuWEEN+CtumVxOw6Dy/IRxWIrZ2x6RJb6ULYwXCQc4= +# nix-direnv cache busting line: sha256-dx+SJyDx+eZptFaMatoyM6w1E3nJKY+hKs7nuR997bE= diff --git a/go.mod b/go.mod index 24c39a4cf3d91..ba9c64061da4d 100644 --- a/go.mod +++ b/go.mod @@ -112,20 +112,20 @@ require ( go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/crypto v0.46.0 golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b - golang.org/x/mod v0.30.0 + golang.org/x/mod v0.31.0 golang.org/x/net v0.48.0 golang.org/x/oauth2 v0.33.0 golang.org/x/sync v0.19.0 golang.org/x/sys v0.40.0 golang.org/x/term v0.38.0 golang.org/x/time v0.12.0 - golang.org/x/tools v0.39.0 + golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wireguard/windows v0.5.3 gopkg.in/square/go-jose.v2 v2.6.0 gvisor.dev/gvisor v0.0.0-20260224225140-573d5e7127a8 helm.sh/helm/v3 v3.19.0 - honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 + honnef.co/go/tools v0.7.0 k8s.io/api v0.34.0 k8s.io/apimachinery v0.34.0 k8s.io/apiserver v0.34.0 @@ -250,7 +250,7 @@ require ( go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto/x509roots/fallback v0.0.0-20260113154411-7d0074ccc6f1 // indirect - golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect + golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc // indirect golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251213004720-97cd9d5aeac2 // indirect diff --git a/go.mod.sri b/go.mod.sri index a307075942f64..0e0a6fdece5ee 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-rhuWEEN+CtumVxOw6Dy/IRxWIrZ2x6RJb6ULYwXCQc4= +sha256-dx+SJyDx+eZptFaMatoyM6w1E3nJKY+hKs7nuR997bE= diff --git a/go.sum b/go.sum index b61f1d24a1db1..48b1e9379006f 100644 --- a/go.sum +++ b/go.sum @@ -1366,8 +1366,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1499,8 +1499,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 h1:E2/AqCUMZGgd73TQkxUMcMla25GB9i/5HOdLr+uH7Vo= -golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc h1:bH6xUXay0AIFMElXG2rQ4uiE+7ncwtiOdPfYK1NK2XA= +golang.org/x/telemetry v0.0.0-20251203150158-8fff8a5912fc/go.mod h1:hKdjCMrbv9skySur+Nek8Hd0uJ0GuxJIoIX2payrIdQ= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1590,8 +1590,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054 h1:CHVDrNHx9ZoOrNN9kKWYIbT5Rj+WF2rlwPkhbQQ5V4U= +golang.org/x/tools v0.40.1-0.20260108161641-ca281cf95054/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -1737,8 +1737,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0 h1:5SXjd4ET5dYijLaf0O3aOenC0Z4ZafIWSpjUzsQaNho= -honnef.co/go/tools v0.7.0-0.dev.0.20251022135355-8273271481d0/go.mod h1:EPDDhEZqVHhWuPI5zPAsjU0U7v9xNIWjoOVyZ5ZcniQ= +honnef.co/go/tools v0.7.0 h1:w6WUp1VbkqPEgLz4rkBzH/CSU6HkoqNLp6GstyTx3lU= +honnef.co/go/tools v0.7.0/go.mod h1:pm29oPxeP3P82ISxZDgIYeOaf9ta6Pi0EWvCFoLG2vc= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= diff --git a/shell.nix b/shell.nix index 7ddf62c52df5c..7e965bb11082a 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-rhuWEEN+CtumVxOw6Dy/IRxWIrZ2x6RJb6ULYwXCQc4= +# nix-direnv cache busting line: sha256-dx+SJyDx+eZptFaMatoyM6w1E3nJKY+hKs7nuR997bE= From 2a64c03c9554a7397ef6965036da1246e8c01f7d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 5 Mar 2026 22:48:46 +0000 Subject: [PATCH 1061/1093] types/ptr: deprecate ptr.To, use Go 1.26 new Updates #18682 Change-Id: I62f6aa0de2a15ef8c1435032c6aa74a181c25f8f Signed-off-by: Brad Fitzpatrick --- cmd/cloner/cloner.go | 17 ++-- cmd/cloner/clonerex/clonerex_clone.go | 6 +- cmd/containerboot/main.go | 3 +- cmd/containerboot/main_test.go | 25 +++--- cmd/derper/depaware.txt | 1 - cmd/k8s-operator/api-server-proxy-pg_test.go | 7 +- cmd/k8s-operator/api-server-proxy.go | 7 +- cmd/k8s-operator/connector_test.go | 13 ++- cmd/k8s-operator/depaware.txt | 1 - cmd/k8s-operator/dnsrecords_test.go | 21 +++-- cmd/k8s-operator/e2e/ingress_test.go | 3 +- cmd/k8s-operator/e2e/pebble.go | 4 +- cmd/k8s-operator/e2e/proxygrouppolicy_test.go | 5 +- cmd/k8s-operator/e2e/ssh.go | 5 +- cmd/k8s-operator/egress-eps.go | 7 +- cmd/k8s-operator/egress-pod-readiness_test.go | 3 +- cmd/k8s-operator/ingress-for-pg_test.go | 21 +++-- cmd/k8s-operator/ingress_test.go | 26 +++--- cmd/k8s-operator/nameserver.go | 3 +- cmd/k8s-operator/nameserver_test.go | 5 +- cmd/k8s-operator/operator_test.go | 63 +++++++------- cmd/k8s-operator/proxygroup.go | 15 ++-- cmd/k8s-operator/proxygroup_specs.go | 11 ++- cmd/k8s-operator/proxygroup_test.go | 59 +++++++------ cmd/k8s-operator/sts.go | 7 +- cmd/k8s-operator/sts_test.go | 17 ++-- cmd/k8s-operator/svc-for-pg_test.go | 9 +- cmd/k8s-operator/testutils_test.go | 17 ++-- cmd/k8s-operator/tsrecorder_specs.go | 3 +- cmd/k8s-operator/tsrecorder_specs_test.go | 7 +- cmd/k8s-operator/tsrecorder_test.go | 3 +- cmd/k8s-proxy/internal/config/config.go | 5 +- cmd/k8s-proxy/internal/config/config_test.go | 11 ++- cmd/stund/depaware.txt | 1 - cmd/tailscale/cli/set.go | 3 +- cmd/tailscale/cli/set_test.go | 33 ++++---- cmd/tailscale/depaware.txt | 1 - cmd/tailscaled/depaware-min.txt | 1 - cmd/tailscaled/depaware-minbox.txt | 1 - cmd/tailscaled/depaware.txt | 1 - cmd/tsidp/depaware.txt | 1 - cmd/tta/fw_linux.go | 3 +- cmd/viewer/tests/tests.go | 3 +- cmd/viewer/tests/tests_clone.go | 37 ++++----- control/controlclient/direct.go | 3 +- control/controlclient/map.go | 19 ++--- control/controlclient/map_test.go | 45 +++++----- feature/relayserver/relayserver.go | 5 +- feature/relayserver/relayserver_test.go | 49 ++++++----- hostinfo/hostinfo.go | 5 +- hostinfo/hostinfo_darwin.go | 3 +- hostinfo/hostinfo_freebsd.go | 5 +- hostinfo/hostinfo_linux.go | 5 +- hostinfo/hostinfo_uname.go | 3 +- hostinfo/hostinfo_windows.go | 7 +- ipn/ipn_clone.go | 7 +- ipn/ipnlocal/local.go | 15 ++-- ipn/ipnlocal/local_test.go | 83 +++++++++---------- ipn/ipnlocal/netstack.go | 3 +- ipn/ipnlocal/node_backend.go | 3 +- ipn/ipnlocal/node_backend_test.go | 9 +- ipn/ipnserver/server_test.go | 7 +- ipn/ipnstate/ipnstate.go | 3 +- ipn/lapitest/server.go | 3 +- ipn/localapi/localapi.go | 5 +- .../proxygrouppolicy/proxygrouppolicy.go | 5 +- kube/k8s-proxy/conf/conf_test.go | 7 +- net/packet/geneve_test.go | 9 +- net/tstun/wrap_test.go | 5 +- ssh/tailssh/tailssh_test.go | 3 +- tailcfg/tailcfg_clone.go | 23 +++-- tailcfg/tailcfg_test.go | 13 ++- tsnet/depaware.txt | 1 - tstest/integration/integration_test.go | 7 +- tstest/integration/testcontrol/testcontrol.go | 5 +- tstest/reflect.go | 4 +- types/jsonx/json_test.go | 3 +- types/lazy/deferred.go | 4 +- types/lazy/lazy.go | 8 +- types/netmap/nodemut.go | 9 +- types/netmap/nodemut_test.go | 7 +- types/prefs/item.go | 3 +- types/prefs/list.go | 3 +- types/prefs/map.go | 3 +- types/prefs/prefs_clone_test.go | 4 +- types/prefs/struct_list.go | 3 +- types/prefs/struct_map.go | 3 +- types/ptr/ptr.go | 11 ++- types/views/views.go | 3 +- util/deephash/deephash_test.go | 9 +- util/linuxfw/nftables_runner.go | 3 +- util/pool/pool.go | 4 +- util/syspolicy/setting/errors.go | 6 +- util/syspolicy/setting/setting_test.go | 3 +- wgengine/magicsock/magicsock_test.go | 3 +- wgengine/wgcfg/wgcfg_clone.go | 5 +- 96 files changed, 429 insertions(+), 532 deletions(-) diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index a3f0684faa589..9a5ff3de2bbdd 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -141,14 +141,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("if src.%s[i] == nil { dst.%s[i] = nil } else {", fname, fname) if codegen.ContainsPointers(ptr.Elem()) { if _, isIface := ptr.Elem().Underlying().(*types.Interface); isIface { - it.Import("", "tailscale.com/types/ptr") - writef("\tdst.%s[i] = ptr.To((*src.%s[i]).Clone())", fname, fname) + writef("\tdst.%s[i] = new((*src.%s[i]).Clone())", fname, fname) } else { writef("\tdst.%s[i] = src.%s[i].Clone()", fname, fname) } } else { - it.Import("", "tailscale.com/types/ptr") - writef("\tdst.%s[i] = ptr.To(*src.%s[i])", fname, fname) + writef("\tdst.%s[i] = new(*src.%s[i])", fname, fname) } writef("}") } else if ft.Elem().String() == "encoding/json.RawMessage" { @@ -170,12 +168,11 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { writef("dst.%s = src.%s.Clone()", fname, fname) continue } - it.Import("", "tailscale.com/types/ptr") writef("if dst.%s != nil {", fname) if _, isIface := base.Underlying().(*types.Interface); isIface && hasPtrs { - writef("\tdst.%s = ptr.To((*src.%s).Clone())", fname, fname) + writef("\tdst.%s = new((*src.%s).Clone())", fname, fname) } else if !hasPtrs { - writef("\tdst.%s = ptr.To(*src.%s)", fname, fname) + writef("\tdst.%s = new(*src.%s)", fname, fname) } else { writef("\t" + `panic("TODO pointers in pointers")`) } @@ -293,14 +290,12 @@ func writeMapValueClone(params mapValueCloneParams) { writef("if %s == nil { %s = nil } else {", params.SrcExpr, params.DstExpr) if base := elem.Elem().Underlying(); codegen.ContainsPointers(base) { if _, isIface := base.(*types.Interface); isIface { - params.It.Import("", "tailscale.com/types/ptr") - writef("\t%s = ptr.To((*%s).Clone())", params.DstExpr, params.SrcExpr) + writef("\t%s = new((*%s).Clone())", params.DstExpr, params.SrcExpr) } else { writef("\t%s = %s.Clone()", params.DstExpr, params.SrcExpr) } } else { - params.It.Import("", "tailscale.com/types/ptr") - writef("\t%s = ptr.To(*%s)", params.DstExpr, params.SrcExpr) + writef("\t%s = new(*%s)", params.DstExpr, params.SrcExpr) } writef("}") diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index 5c161239fc992..ad776538113dd 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -7,8 +7,6 @@ package clonerex import ( "maps" - - "tailscale.com/types/ptr" ) // Clone makes a deep copy of SliceContainer. @@ -25,7 +23,7 @@ func (src *SliceContainer) Clone() *SliceContainer { if src.Slice[i] == nil { dst.Slice[i] = nil } else { - dst.Slice[i] = ptr.To(*src.Slice[i]) + dst.Slice[i] = new(*src.Slice[i]) } } } @@ -70,7 +68,7 @@ func (src *MapWithPointers) Clone() *MapWithPointers { if v == nil { dst.Nested[k] = nil } else { - dst.Nested[k] = ptr.To(*v) + dst.Nested[k] = new(*v) } } } diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index 6b192b41605f1..ba47111fd797f 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -147,7 +147,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/util/deephash" "tailscale.com/util/dnsname" "tailscale.com/util/linuxfw" @@ -612,7 +611,7 @@ runLoop: if cd == "" { cd = kubetypes.ValueNoHTTPS } - prev := certDomain.Swap(ptr.To(cd)) + prev := certDomain.Swap(new(cd)) if prev == nil || *prev != cd { select { case certDomainChanged <- true: diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 58ab757950612..cc5629f99ca0b 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -38,7 +38,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" ) func TestContainerBoot(t *testing.T) { @@ -95,7 +94,7 @@ func TestContainerBoot(t *testing.T) { EndpointStatuses map[string]int } runningNotify := &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), @@ -373,7 +372,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), @@ -390,7 +389,7 @@ func TestContainerBoot(t *testing.T) { }, }, WantLog: "no forwarding rules for egress addresses [::1/128], host supports IPv6: false", - WantExitCode: ptr.To(1), + WantExitCode: new(1), }, }, } @@ -409,7 +408,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + State: new(ipn.NeedsLogin), }, WantCmds: []string{ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", @@ -440,7 +439,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + State: new(ipn.NeedsLogin), }, WantCmds: []string{ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=true --authkey=tskey-key", @@ -564,7 +563,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.NeedsLogin), + State: new(ipn.NeedsLogin), }, WantCmds: []string{ "/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --authkey=tskey-key", @@ -621,7 +620,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("newID"), @@ -964,7 +963,7 @@ func TestContainerBoot(t *testing.T) { }, { Notify: &ipn.Notify{ - State: ptr.To(ipn.Running), + State: new(ipn.Running), NetMap: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ StableID: tailcfg.StableNodeID("myID"), @@ -1004,7 +1003,7 @@ func TestContainerBoot(t *testing.T) { Phases: []phase{ { WantLog: "TS_EGRESS_PROXIES_CONFIG_PATH is only supported for Tailscale running on Kubernetes", - WantExitCode: ptr.To(1), + WantExitCode: new(1), }, }, } @@ -1053,7 +1052,7 @@ func TestContainerBoot(t *testing.T) { { // SIGTERM before state is finished writing, should wait for // consistent state before propagating SIGTERM to tailscaled. - Signal: ptr.To(unix.SIGTERM), + Signal: new(unix.SIGTERM), UpdateKubeSecret: map[string]string{ "_machinekey": "foo", "_profiles": "foo", @@ -1083,7 +1082,7 @@ func TestContainerBoot(t *testing.T) { kubetypes.KeyCapVer: capver, }, WantLog: "HTTP server at [::]:9002 closed", - WantExitCode: ptr.To(0), + WantExitCode: new(0), }, }, } @@ -1661,7 +1660,7 @@ func newTestEnv(t *testing.T) testEnv { kube.Start(t) t.Cleanup(kube.Close) - tailscaledConf := &ipn.ConfigVAlpha{AuthKey: ptr.To("foo"), Version: "alpha0"} + tailscaledConf := &ipn.ConfigVAlpha{AuthKey: new("foo"), Version: "alpha0"} serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} serveConfWithServices := ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index a0eb4a29e259c..30d19b78179c5 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -138,7 +138,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/opt from tailscale.com/envknob+ tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/ipn - tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/client/local+ diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go index d7e88123fb28b..8fb18c818edb1 100644 --- a/cmd/k8s-operator/api-server-proxy-pg_test.go +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -25,7 +25,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/opt" - "tailscale.com/types/ptr" ) func TestAPIServerProxyReconciler(t *testing.T) { @@ -57,7 +56,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { initialCfg := &conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("test-key"), + AuthKey: new("test-key"), APIServerProxy: &conf.APIServerProxyConfig{ Enabled: opt.NewBool(true), }, @@ -174,7 +173,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) expectEqual(t, fc, pg, omitPGStatusConditionMessages) - expectedCfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:" + pgName)) + expectedCfg.APIServerProxy.ServiceName = new(tailcfg.ServiceName("svc:" + pgName)) expectCfg(&expectedCfg) expectEqual(t, fc, certSecret(pgName, ns, defaultDomain, pg)) @@ -230,7 +229,7 @@ func TestAPIServerProxyReconciler(t *testing.T) { t.Fatalf("expected Tailscale Service to be %+v, got %+v", expectedTSSvc, tsSvc) } // Check cfg and status reset until TLS certs are available again. - expectedCfg.APIServerProxy.ServiceName = ptr.To(updatedServiceName) + expectedCfg.APIServerProxy.ServiceName = new(updatedServiceName) expectedCfg.AdvertiseServices = nil expectCfg(&expectedCfg) tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionFalse, reasonKubeAPIServerProxyNoBackends, "", 1, r.clock, r.logger) diff --git a/cmd/k8s-operator/api-server-proxy.go b/cmd/k8s-operator/api-server-proxy.go index 492590c9fecd6..b8d87cf0aa38a 100644 --- a/cmd/k8s-operator/api-server-proxy.go +++ b/cmd/k8s-operator/api-server-proxy.go @@ -11,7 +11,6 @@ import ( "os" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" ) func parseAPIProxyMode() *kubetypes.APIServerProxyMode { @@ -23,18 +22,18 @@ func parseAPIProxyMode() *kubetypes.APIServerProxyMode { case haveAuthProxyEnv: var authProxyEnv = defaultBool("AUTH_PROXY", false) // deprecated if authProxyEnv { - return ptr.To(kubetypes.APIServerProxyModeAuth) + return new(kubetypes.APIServerProxyModeAuth) } return nil case haveAPIProxyEnv: var apiProxyEnv = defaultEnv("APISERVER_PROXY", "") // true, false or "noauth" switch apiProxyEnv { case "true": - return ptr.To(kubetypes.APIServerProxyModeAuth) + return new(kubetypes.APIServerProxyModeAuth) case "false", "": return nil case "noauth": - return ptr.To(kubetypes.APIServerProxyModeNoAuth) + return new(kubetypes.APIServerProxyModeNoAuth) default: panic(fmt.Sprintf("unknown APISERVER_PROXY value %q", apiProxyEnv)) } diff --git a/cmd/k8s-operator/connector_test.go b/cmd/k8s-operator/connector_test.go index 7866f3e002921..110ad1bf14305 100644 --- a/cmd/k8s-operator/connector_test.go +++ b/cmd/k8s-operator/connector_test.go @@ -22,7 +22,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -39,7 +38,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.com/v1alpha1", }, Spec: tsapi.ConnectorSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -166,7 +165,7 @@ func TestConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -229,7 +228,7 @@ func TestConnectorWithProxyClass(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), SubnetRouter: &tsapi.SubnetRouter{ AdvertiseRoutes: []tsapi.Route{"10.40.0.0/14"}, }, @@ -326,7 +325,7 @@ func TestConnectorWithAppConnector(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), AppConnector: &tsapi.AppConnector{}, }, } @@ -425,7 +424,7 @@ func TestConnectorWithMultipleReplicas(t *testing.T) { APIVersion: "tailscale.io/v1alpha1", }, Spec: tsapi.ConnectorSpec{ - Replicas: ptr.To[int32](3), + Replicas: new(int32(3)), AppConnector: &tsapi.AppConnector{}, HostnamePrefix: "test-connector", }, @@ -496,7 +495,7 @@ func TestConnectorWithMultipleReplicas(t *testing.T) { // 5. We'll scale the connector down by 1 replica and make sure its secret is cleaned up mustUpdate[tsapi.Connector](t, fc, "", "test", func(conn *tsapi.Connector) { - conn.Spec.Replicas = ptr.To[int32](2) + conn.Spec.Replicas = new(int32(2)) }) expectReconciled(t, cr, "", "test") names = findGenNames(t, fc, "", "test", "connector") diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 8718127b6e75f..356f1f6c438a2 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -927,7 +927,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/opt from tailscale.com/client/tailscale+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/cmd/k8s-operator+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 0d89c4a863e4d..c6c5ee0296ca3 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -25,7 +25,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" ) func TestDNSRecordsReconciler(t *testing.T) { @@ -44,7 +43,7 @@ func TestDNSRecordsReconciler(t *testing.T) { Namespace: "test", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), }, Status: networkingv1.IngressStatus{ LoadBalancer: networkingv1.IngressLoadBalancerStatus{ @@ -150,7 +149,7 @@ func TestDNSRecordsReconciler(t *testing.T) { // 7. A not-ready Endpoint is removed from DNS config. mustUpdate(t, fc, ep.Namespace, ep.Name, func(ep *discoveryv1.EndpointSlice) { - ep.Endpoints[0].Conditions.Ready = ptr.To(false) + ep.Endpoints[0].Conditions.Ready = new(false) ep.Endpoints = append(ep.Endpoints, discoveryv1.Endpoint{ Addresses: []string{"1.2.3.4"}, }) @@ -220,13 +219,13 @@ func TestDNSRecordsReconciler(t *testing.T) { Endpoints: []discoveryv1.Endpoint{{ Addresses: []string{"10.1.0.100", "10.1.0.101", "10.1.0.102"}, // Pod IPs that should NOT be used Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), - Serving: ptr.To(true), - Terminating: ptr.To(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }}, Ports: []discoveryv1.EndpointPort{{ - Port: ptr.To(int32(10443)), + Port: new(int32(10443)), }}, } @@ -316,7 +315,7 @@ func TestDNSRecordsReconcilerDualStack(t *testing.T) { Namespace: "test", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), }, Status: networkingv1.IngressStatus{ LoadBalancer: networkingv1.IngressLoadBalancerStatus{ @@ -447,9 +446,9 @@ func endpointSliceForService(svc *corev1.Service, ip string, fam discoveryv1.Add Endpoints: []discoveryv1.Endpoint{{ Addresses: []string{ip}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), - Serving: ptr.To(true), - Terminating: ptr.To(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }}, } diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index 47a838414d449..95fbbab9df697 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -22,7 +22,6 @@ import ( "tailscale.com/cmd/testwrapper/flakytest" kube "tailscale.com/k8s-operator" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/httpm" ) @@ -44,7 +43,7 @@ func TestIngress(t *testing.T) { }, }, Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app.kubernetes.io/name": "nginx", diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go index 5fcb35e057c3d..7abe3416ef7dc 100644 --- a/cmd/k8s-operator/e2e/pebble.go +++ b/cmd/k8s-operator/e2e/pebble.go @@ -12,8 +12,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - - "tailscale.com/types/ptr" ) func applyPebbleResources(ctx context.Context, cl client.Client) error { @@ -46,7 +44,7 @@ func pebbleDeployment(tag string) *appsv1.Deployment { Namespace: ns, }, Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "pebble", diff --git a/cmd/k8s-operator/e2e/proxygrouppolicy_test.go b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go index f8126499b0db0..0e73394d539da 100644 --- a/cmd/k8s-operator/e2e/proxygrouppolicy_test.go +++ b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go @@ -13,7 +13,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/types/ptr" ) // See [TestMain] for test requirements. @@ -82,7 +81,7 @@ func TestProxyGroupPolicy(t *testing.T) { }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), Ports: []corev1.ServicePort{ { Port: 8080, @@ -112,7 +111,7 @@ func TestProxyGroupPolicy(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "nginx", diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go index 371c44f9d4544..9adcce6e3eee0 100644 --- a/cmd/k8s-operator/e2e/ssh.go +++ b/cmd/k8s-operator/e2e/ssh.go @@ -26,7 +26,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" tailscaleroot "tailscale.com" - "tailscale.com/types/ptr" ) const ( @@ -206,7 +205,7 @@ func applySSHResources(ctx context.Context, cl client.Client, alpineTag string, func cleanupSSHResources(ctx context.Context, cl client.Client) error { noGrace := &client.DeleteOptions{ - GracePeriodSeconds: ptr.To[int64](0), + GracePeriodSeconds: new(int64(0)), } if err := cl.Delete(ctx, sshDeployment("", nil), noGrace); err != nil { return fmt.Errorf("failed to delete ssh-server Deployment: %w", err) @@ -232,7 +231,7 @@ func sshDeployment(tag string, pubKey []byte) *appsv1.Deployment { Namespace: ns, }, Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "app": "ssh-server", diff --git a/cmd/k8s-operator/egress-eps.go b/cmd/k8s-operator/egress-eps.go index 5181edf49a26c..a248ed8883b56 100644 --- a/cmd/k8s-operator/egress-eps.go +++ b/cmd/k8s-operator/egress-eps.go @@ -21,7 +21,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "tailscale.com/kube/egressservices" - "tailscale.com/types/ptr" ) // egressEpsReconciler reconciles EndpointSlices for tailnet services exposed to cluster via egress ProxyGroup proxies. @@ -120,9 +119,9 @@ func (er *egressEpsReconciler) Reconcile(ctx context.Context, req reconcile.Requ Hostname: (*string)(&pod.UID), Addresses: []string{podIP}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), - Serving: ptr.To(true), - Terminating: ptr.To(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }) } diff --git a/cmd/k8s-operator/egress-pod-readiness_test.go b/cmd/k8s-operator/egress-pod-readiness_test.go index baa1442671907..0cf9108f5cd20 100644 --- a/cmd/k8s-operator/egress-pod-readiness_test.go +++ b/cmd/k8s-operator/egress-pod-readiness_test.go @@ -24,7 +24,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" ) func TestEgressPodReadiness(t *testing.T) { @@ -48,7 +47,7 @@ func TestEgressPodReadiness(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: "egress", - Replicas: ptr.To(int32(3)), + Replicas: new(int32(3)), }, } mustCreate(t, fc, pg) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index f285bd8ee947d..e93d0184e8412 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -33,7 +33,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" - "tailscale.com/types/ptr" ) func TestIngressPGReconciler(t *testing.T) { @@ -50,7 +49,7 @@ func TestIngressPGReconciler(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -117,7 +116,7 @@ func TestIngressPGReconciler(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -242,7 +241,7 @@ func TestIngressPGReconciler(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -286,7 +285,7 @@ func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -341,7 +340,7 @@ func TestValidateIngress(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"test"}}, }, @@ -475,7 +474,7 @@ func TestValidateIngress(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"test"}}, }, @@ -522,7 +521,7 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -651,7 +650,7 @@ func TestIngressPGReconciler_HTTPRedirect(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -775,7 +774,7 @@ func TestIngressPGReconciler_HTTPEndpointAndRedirectConflict(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test", @@ -864,7 +863,7 @@ func TestIngressPGReconciler_MultiCluster(t *testing.T) { }, }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"my-svc"}}, }, diff --git a/cmd/k8s-operator/ingress_test.go b/cmd/k8s-operator/ingress_test.go index aac40897cc88e..1381193065093 100644 --- a/cmd/k8s-operator/ingress_test.go +++ b/cmd/k8s-operator/ingress_test.go @@ -25,7 +25,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -59,7 +58,7 @@ func TestTailscaleIngress(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "ingress") opts := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -109,7 +108,7 @@ func TestTailscaleIngress(t *testing.T) { // 4. Resources get cleaned up when Ingress class is unset mustUpdate(t, fc, "default", "test", func(ing *networkingv1.Ingress) { - ing.Spec.IngressClassName = ptr.To("nginx") + ing.Spec.IngressClassName = new("nginx") }) expectReconciled(t, ingR, "default", "test") expectReconciled(t, ingR, "default", "test") // deleting Ingress STS requires two reconciles @@ -639,7 +638,7 @@ func TestEmptyPath(t *testing.T) { name: "empty_path_with_prefix_type", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypePrefix), + PathType: new(networkingv1.PathTypePrefix), Path: "", Backend: *backend(), }, @@ -652,7 +651,7 @@ func TestEmptyPath(t *testing.T) { name: "empty_path_with_implementation_specific_type", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + PathType: new(networkingv1.PathTypeImplementationSpecific), Path: "", Backend: *backend(), }, @@ -665,7 +664,7 @@ func TestEmptyPath(t *testing.T) { name: "empty_path_with_exact_type", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypeExact), + PathType: new(networkingv1.PathTypeExact), Path: "", Backend: *backend(), }, @@ -679,12 +678,12 @@ func TestEmptyPath(t *testing.T) { name: "two_competing_but_not_identical_paths_including_one_empty", paths: []networkingv1.HTTPIngressPath{ { - PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + PathType: new(networkingv1.PathTypeImplementationSpecific), Path: "", Backend: *backend(), }, { - PathType: ptrPathType(networkingv1.PathTypeImplementationSpecific), + PathType: new(networkingv1.PathTypeImplementationSpecific), Path: "/", Backend: *backend(), }, @@ -760,11 +759,6 @@ func TestEmptyPath(t *testing.T) { } } -// ptrPathType is a helper function to return a pointer to the pathtype string (required for TestEmptyPath) -func ptrPathType(p networkingv1.PathType) *networkingv1.PathType { - return &p -} - func ingressClass() *networkingv1.IngressClass { return &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, @@ -799,7 +793,7 @@ func ingress() *networkingv1.Ingress { UID: "1234-UID", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: backend(), TLS: []networkingv1.IngressTLS{ {Hosts: []string{"default-test"}}, @@ -817,7 +811,7 @@ func ingressWithPaths(paths []networkingv1.HTTPIngressPath) *networkingv1.Ingres UID: types.UID("1234-UID"), }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), Rules: []networkingv1.IngressRule{ { Host: "foo.tailnetxyz.ts.net", @@ -878,7 +872,7 @@ func TestTailscaleIngressWithHTTPRedirect(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "ingress") opts := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 522b460031530..869e5bb264cc3 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -31,7 +31,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstime" - "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/set" ) @@ -245,7 +244,7 @@ var ( if err := yaml.Unmarshal(deployYaml, &d); err != nil { return fmt.Errorf("error unmarshalling Deployment yaml: %w", err) } - d.Spec.Replicas = ptr.To(cfg.replicas) + d.Spec.Replicas = new(cfg.replicas) d.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 531190cf21dc2..b374c114fc799 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -23,7 +23,6 @@ import ( operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -35,7 +34,7 @@ func TestNameserverReconciler(t *testing.T) { }, Spec: tsapi.DNSConfigSpec{ Nameserver: &tsapi.Nameserver{ - Replicas: ptr.To[int32](3), + Replicas: new(int32(3)), Image: &tsapi.NameserverImage{ Repo: "test", Tag: "v0.0.1", @@ -87,7 +86,7 @@ func TestNameserverReconciler(t *testing.T) { } wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" - wantsDeploy.Spec.Replicas = ptr.To[int32](3) + wantsDeploy.Spec.Replicas = new(int32(3)) wantsDeploy.Namespace = tsNamespace wantsDeploy.ObjectMeta.Labels = nameserverLabels wantsDeploy.Spec.Template.Spec.Tolerations = []corev1.Toleration{ diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 53d16fbd225f3..305b1738cbf81 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -30,7 +30,6 @@ import ( "tailscale.com/net/dns/resolvconffile" "tailscale.com/tstest" "tailscale.com/tstime" - "tailscale.com/types/ptr" "tailscale.com/util/dnsname" "tailscale.com/util/mak" ) @@ -71,7 +70,7 @@ func TestLoadBalancerClass(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -94,7 +93,7 @@ func TestLoadBalancerClass(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ Conditions: []metav1.Condition{{ @@ -119,7 +118,7 @@ func TestLoadBalancerClass(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -259,7 +258,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -369,7 +368,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -466,7 +465,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -486,7 +485,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ Conditions: []metav1.Condition{{ @@ -534,7 +533,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -554,7 +553,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ Conditions: []metav1.Condition{{ @@ -612,7 +611,7 @@ func TestAnnotations(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -716,7 +715,7 @@ func TestAnnotationIntoLB(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -767,7 +766,7 @@ func TestAnnotationIntoLB(t *testing.T) { mustUpdate(t, fc, "default", "test", func(s *corev1.Service) { delete(s.ObjectMeta.Annotations, "tailscale.com/expose") s.Spec.Type = corev1.ServiceTypeLoadBalancer - s.Spec.LoadBalancerClass = ptr.To("tailscale") + s.Spec.LoadBalancerClass = new("tailscale") }) expectReconciled(t, sr, "default", "test") // None of the proxy machinery should have changed... @@ -785,7 +784,7 @@ func TestAnnotationIntoLB(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ LoadBalancer: corev1.LoadBalancerStatus{ @@ -836,7 +835,7 @@ func TestLBIntoAnnotation(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -844,7 +843,7 @@ func TestLBIntoAnnotation(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -880,7 +879,7 @@ func TestLBIntoAnnotation(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, Status: corev1.ServiceStatus{ LoadBalancer: corev1.LoadBalancerStatus{ @@ -982,7 +981,7 @@ func TestCustomHostname(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1092,7 +1091,7 @@ func TestCustomPriorityClassName(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1332,13 +1331,13 @@ func TestProxyClassForService(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) expectReconciled(t, sr, "default", "test") fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1431,7 +1430,7 @@ func TestDefaultLoadBalancer(t *testing.T) { expectEqual(t, fc, expectedHeadlessService(shortName, "svc")) o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1484,7 +1483,7 @@ func TestProxyFirewallMode(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") o := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1596,7 +1595,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { Name: "ing-1", Namespace: "ns-1", }, - Spec: networkingv1.IngressSpec{IngressClassName: ptr.To(tailscaleIngressClassName)}, + Spec: networkingv1.IngressSpec{IngressClassName: new(tailscaleIngressClassName)}, }) svc1 := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -1628,7 +1627,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { DefaultBackend: &networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{Name: "def-backend"}, }, - IngressClassName: ptr.To(tailscaleIngressClassName), + IngressClassName: new(tailscaleIngressClassName), }, }) backendSvc := &corev1.Service{ @@ -1652,7 +1651,7 @@ func Test_serviceHandlerForIngress(t *testing.T) { Namespace: "ns-3", }, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To(tailscaleIngressClassName), + IngressClassName: new(tailscaleIngressClassName), Rules: []networkingv1.IngressRule{{IngressRuleValue: networkingv1.IngressRuleValue{HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ {Backend: networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}}, @@ -1727,7 +1726,7 @@ func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { mustCreate(t, fc, &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{Name: "nginx-ing", Namespace: "default"}, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("nginx"), + IngressClassName: new("nginx"), DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, }, }) @@ -1735,7 +1734,7 @@ func Test_serviceHandlerForIngress_multipleIngressClasses(t *testing.T) { mustCreate(t, fc, &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{Name: "ts-ing", Namespace: "default"}, Spec: networkingv1.IngressSpec{ - IngressClassName: ptr.To("tailscale"), + IngressClassName: new("tailscale"), DefaultBackend: &networkingv1.IngressBackend{Service: &networkingv1.IngressServiceBackend{Name: "backend"}}, }, }) @@ -1844,7 +1843,7 @@ func Test_authKeyRemoval(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, }) @@ -1859,7 +1858,7 @@ func Test_authKeyRemoval(t *testing.T) { hostname: "default-test", clusterTargetIP: "10.20.30.40", app: kubetypes.AppIngressProxy, - replicas: ptr.To[int32](1), + replicas: new(int32(1)), } expectEqual(t, fc, expectedSecret(t, fc, opts)) @@ -1924,7 +1923,7 @@ func Test_externalNameService(t *testing.T) { fullName, shortName := findGenName(t, fc, "default", "test", "svc") opts := configOpts{ - replicas: ptr.To[int32](1), + replicas: new(int32(1)), stsName: shortName, secretName: fullName, namespace: "default", @@ -1969,7 +1968,7 @@ func Test_metricsResourceCreation(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "10.20.30.40", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, } crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 13c3d7b715e50..db8733f9095b8 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -42,7 +42,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" "tailscale.com/util/set" @@ -624,7 +623,7 @@ func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, } } - return svcToNodePorts, ptr.To(tailscaledPort), nil + return svcToNodePorts, new(tailscaledPort), nil } // cleanupDanglingResources ensures we don't leak config secrets, state secrets, and @@ -837,9 +836,9 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ AuthKey: authKey, - State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), - App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), - LogLevel: ptr.To(logger.Level().String()), + State: new(fmt.Sprintf("kube:%s", pgPodName(pg.Name, i))), + App: new(kubetypes.AppProxyGroupKubeAPIServer), + LogLevel: new(logger.Level().String()), // Reloadable fields. Hostname: &hostname, @@ -850,7 +849,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( // as containerboot does for ingress-pg-reconciler. IssueCerts: opt.NewBool(i == 0), }, - LocalPort: ptr.To(uint16(9002)), + LocalPort: new(uint16(9002)), HealthCheckEnabled: opt.NewBool(true), }, } @@ -1021,7 +1020,7 @@ func getStaticEndpointAddress(a *corev1.NodeAddress, port uint16) *netip.AddrPor return nil } - return ptr.To(netip.AddrPortFrom(addr, port)) + return new(netip.AddrPortFrom(addr, port)) } // ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup @@ -1062,7 +1061,7 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, a AcceptDNS: "false", AcceptRoutes: "false", // AcceptRoutes defaults to true Locked: "false", - Hostname: ptr.To(pgHostname(pg, idx)), + Hostname: new(pgHostname(pg, idx)), AdvertiseServices: oldAdvertiseServices, AuthKey: authKey, } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 05e0ed0b26013..69b5b109a0129 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -22,7 +22,6 @@ import ( "tailscale.com/kube/egressservices" "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" ) const ( @@ -87,7 +86,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Labels: pgLabels(pg.Name, nil), OwnerReferences: pgOwnerReference(pg), } - ss.Spec.Replicas = ptr.To(pgReplicas(pg)) + ss.Spec.Replicas = new(pgReplicas(pg)) ss.Spec.Selector = &metav1.LabelSelector{ MatchLabels: pgLabels(pg.Name, nil), } @@ -98,7 +97,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: pg.Name, Namespace: namespace, Labels: pgLabels(pg.Name, nil), - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), } tmpl.Spec.ServiceAccountName = pg.Name tmpl.Spec.InitContainers[0].Image = image @@ -282,7 +281,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string } // Set the deletion grace period to 6 minutes to ensure that the pre-stop hook has enough time to terminate // gracefully. - ss.Spec.Template.DeletionGracePeriodSeconds = ptr.To(deletionGracePeriodSeconds) + ss.Spec.Template.DeletionGracePeriodSeconds = new(deletionGracePeriodSeconds) } return ss, nil @@ -297,7 +296,7 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por OwnerReferences: pgOwnerReference(pg), }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To(pgReplicas(pg)), + Replicas: new(pgReplicas(pg)), Selector: &metav1.LabelSelector{ MatchLabels: pgLabels(pg.Name, nil), }, @@ -306,7 +305,7 @@ func kubeAPIServerStatefulSet(pg *tsapi.ProxyGroup, namespace, image string, por Name: pg.Name, Namespace: namespace, Labels: pgLabels(pg.Name, nil), - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), }, Spec: corev1.PodSpec{ ServiceAccountName: pgServiceAccountName(pg), diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index c58bd2bb71dc5..2d46e3d5bf16b 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -36,7 +36,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/opt" - "tailscale.com/types/ptr" ) const ( @@ -49,7 +48,7 @@ var ( "some-annotation": "from-the-proxy-class", } - defaultReplicas = ptr.To(int32(2)) + defaultReplicas = new(int32(2)) defaultStaticEndpointConfig = &tsapi.StaticEndpointsConfig{ NodePort: &tsapi.NodePortConfig{ Ports: []tsapi.PortRange{ @@ -107,7 +106,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, }, - replicas: ptr.To(int32(4)), + replicas: new(int32(4)), nodes: []testNode{ { name: "foobar", @@ -150,7 +149,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, }, - replicas: ptr.To(int32(4)), + replicas: new(int32(4)), nodes: []testNode{ { name: "foobar", @@ -192,7 +191,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, }, - replicas: ptr.To(int32(4)), + replicas: new(int32(4)), nodes: []testNode{ { name: "foobar", @@ -234,7 +233,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, }, - replicas: ptr.To(int32(3)), + replicas: new(int32(3)), nodes: []testNode{ {name: "node1", addresses: []testNodeAddr{{ip: "10.0.0.1", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, {name: "node2", addresses: []testNodeAddr{{ip: "10.0.0.2", addrType: corev1.NodeExternalIP}}, labels: map[string]string{"foo/bar": "baz"}}, @@ -294,7 +293,7 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { }, }, }, - replicas: ptr.To(int32(4)), + replicas: new(int32(4)), nodes: []testNode{ { name: "foobar", @@ -942,7 +941,7 @@ func TestProxyGroup(t *testing.T) { }) t.Run("scale_up_to_3", func(t *testing.T) { - pg.Spec.Replicas = ptr.To[int32](3) + pg.Spec.Replicas = new(int32(3)) mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { p.Spec = pg.Spec }) @@ -965,7 +964,7 @@ func TestProxyGroup(t *testing.T) { }) t.Run("scale_down_to_1", func(t *testing.T) { - pg.Spec.Replicas = ptr.To[int32](1) + pg.Spec.Replicas = new(int32(1)) mustUpdate(t, fc, "", pg.Name, func(p *tsapi.ProxyGroup) { p.Spec = pg.Spec }) @@ -1062,7 +1061,7 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, - Replicas: ptr.To[int32](0), + Replicas: new(int32(0)), }, } mustCreate(t, fc, pg) @@ -1137,7 +1136,7 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeEgress, - Replicas: ptr.To[int32](0), + Replicas: new(int32(0)), ProxyClass: "test", }, } @@ -1174,7 +1173,7 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeIngress, - Replicas: ptr.To[int32](0), + Replicas: new(int32(0)), }, } if err := fc.Create(t.Context(), pg); err != nil { @@ -1228,9 +1227,9 @@ func TestProxyGroupTypes(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeKubernetesAPIServer, - Replicas: ptr.To[int32](2), + Replicas: new(int32(2)), KubeAPIServer: &tsapi.KubeAPIServerConfig{ - Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + Mode: new(tsapi.APIServerProxyModeNoAuth), }, }, } @@ -1268,9 +1267,9 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeKubernetesAPIServer, - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), KubeAPIServer: &tsapi.KubeAPIServerConfig{ - Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + Mode: new(tsapi.APIServerProxyModeNoAuth), }, }, } @@ -1354,9 +1353,9 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeKubernetesAPIServer, - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), KubeAPIServer: &tsapi.KubeAPIServerConfig{ - Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), // Avoid needing to pre-create the static ServiceAccount. + Mode: new(tsapi.APIServerProxyModeNoAuth), // Avoid needing to pre-create the static ServiceAccount. }, }, } @@ -1368,18 +1367,18 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { cfg := conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("secret-authkey"), - State: ptr.To(fmt.Sprintf("kube:%s", pgPodName(pg.Name, 0))), - App: ptr.To(kubetypes.AppProxyGroupKubeAPIServer), - LogLevel: ptr.To("debug"), + AuthKey: new("secret-authkey"), + State: new(fmt.Sprintf("kube:%s", pgPodName(pg.Name, 0))), + App: new(kubetypes.AppProxyGroupKubeAPIServer), + LogLevel: new("debug"), - Hostname: ptr.To("test-k8s-apiserver-0"), + Hostname: new("test-k8s-apiserver-0"), APIServerProxy: &conf.APIServerProxyConfig{ Enabled: opt.NewBool(true), - Mode: ptr.To(kubetypes.APIServerProxyModeNoAuth), + Mode: new(kubetypes.APIServerProxyModeNoAuth), IssueCerts: opt.NewBool(true), }, - LocalPort: ptr.To(uint16(9002)), + LocalPort: new(uint16(9002)), HealthCheckEnabled: opt.NewBool(true), }, } @@ -1403,7 +1402,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { // Now simulate the kube-apiserver services reconciler updating config, // then check the proxygroup reconciler doesn't overwrite it. - cfg.APIServerProxy.ServiceName = ptr.To(tailcfg.ServiceName("svc:some-svc-name")) + cfg.APIServerProxy.ServiceName = new(tailcfg.ServiceName("svc:some-svc-name")) cfg.AdvertiseServices = []string{"svc:should-not-be-overwritten"} cfgB, err = json.Marshal(cfg) if err != nil { @@ -1459,7 +1458,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tsapi.ProxyGroupTypeIngress, - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), }, }) expectReconciled(t, reconciler, "", pgName) @@ -1473,7 +1472,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { AcceptDNS: "false", AcceptRoutes: "false", Locked: "false", - Hostname: ptr.To(fmt.Sprintf("%s-%d", pgName, 0)), + Hostname: new(fmt.Sprintf("%s-%d", pgName, 0)), }) if err != nil { t.Fatal(err) @@ -1609,7 +1608,7 @@ func TestValidateProxyGroup(t *testing.T) { } if tc.noauth { pg.Spec.KubeAPIServer = &tsapi.KubeAPIServerConfig{ - Mode: ptr.To(tsapi.APIServerProxyModeNoAuth), + Mode: new(tsapi.APIServerProxyModeNoAuth), } } @@ -1875,7 +1874,7 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { }, Spec: tsapi.ProxyGroupSpec{ Type: tt.pgType, - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), ProxyClass: tt.proxyClassPerResource, }, } diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 85aab2e8a0d2a..2a63ede4efe2b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -38,7 +38,6 @@ import ( "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -378,7 +377,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l Selector: map[string]string{ "app": sts.ParentResourceUID, }, - IPFamilyPolicy: ptr.To(corev1.IPFamilyPolicyPreferDualStack), + IPFamilyPolicy: new(corev1.IPFamilyPolicyPreferDualStack), }, } logger.Debugf("reconciling headless service for StatefulSet") @@ -526,7 +525,7 @@ func sanitizeConfig(c ipn.ConfigVAlpha) ipn.ConfigVAlpha { // Explicitly redact AuthKey because we never want it appearing in logs. Never populate this with the // actual auth key. if c.AuthKey != nil { - c.AuthKey = ptr.To("**redacted**") + c.AuthKey = new("**redacted**") } return c @@ -683,7 +682,7 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S } if sts.Replicas > 0 { - ss.Spec.Replicas = ptr.To(sts.Replicas) + ss.Spec.Replicas = new(sts.Replicas) } // Generic containerboot configuration options. diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 81c0d25ec0ba4..f44de8481bf76 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -22,7 +22,6 @@ import ( "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" ) // Test_statefulSetNameBase tests that parent name portion in a StatefulSet name @@ -69,7 +68,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { Labels: tsapi.Labels{"bar": "foo"}, Annotations: map[string]string{"bar.io/foo": "foo"}, SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: ptr.To(int64(0)), + RunAsUser: new(int64(0)), }, ImagePullSecrets: []corev1.LocalObjectReference{{Name: "docker-creds"}}, NodeName: "some-node", @@ -87,18 +86,18 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, }, }, - DNSPolicy: ptr.To(corev1.DNSClusterFirstWithHostNet), + DNSPolicy: new(corev1.DNSClusterFirstWithHostNet), DNSConfig: &corev1.PodDNSConfig{ Nameservers: []string{"1.1.1.1", "8.8.8.8"}, Searches: []string{"example.com", "test.local"}, Options: []corev1.PodDNSConfigOption{ - {Name: "ndots", Value: ptr.To("2")}, + {Name: "ndots", Value: new("2")}, {Name: "edns0"}, }, }, TailscaleContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), + Privileged: new(true), }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1000m"), corev1.ResourceMemory: resource.MustParse("128Mi")}, @@ -110,8 +109,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, TailscaleInitContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), - RunAsUser: ptr.To(int64(0)), + Privileged: new(true), + RunAsUser: new(int64(0)), }, Resources: corev1.ResourceRequirements{ Limits: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("1000m"), corev1.ResourceMemory: resource.MustParse("128Mi")}, @@ -293,7 +292,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { corev1.EnvVar{Name: "TS_ENABLE_METRICS", Value: "true"}, ) wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 9002}} - gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, ptr.To(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(true, new(false)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } @@ -305,7 +304,7 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { corev1.EnvVar{Name: "TS_TAILSCALED_EXTRA_ARGS", Value: "--debug=$(TS_DEBUG_ADDR_PORT)"}, ) wantSS.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{Name: "debug", Protocol: "TCP", ContainerPort: 9001}} - gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(false, ptr.To(true)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) + gotSS = applyProxyClassToStatefulSet(proxyClassWithMetricsDebug(false, new(true)), nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with metrics enabled to a StatefulSet (-got +want):\n%s", diff) } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index d01f8e983ad75..3c478a90c8e21 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -28,7 +28,6 @@ import ( "tailscale.com/kube/ingressservices" "tailscale.com/kube/kubetypes" "tailscale.com/tstest" - "tailscale.com/types/ptr" "tailscale.com/util/mak" "tailscale.com/tailcfg" @@ -235,7 +234,7 @@ func TestValidateService(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "1.2.3.4", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, } svc2 := &corev1.Service{ @@ -252,7 +251,7 @@ func TestValidateService(t *testing.T) { Spec: corev1.ServiceSpec{ ClusterIP: "1.2.3.5", Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), }, } wantSvc := &corev1.Service{ @@ -392,7 +391,7 @@ func setupTestService(t *testing.T, svcName string, hostname string, clusterIP s }, Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeLoadBalancer, - LoadBalancerClass: ptr.To("tailscale"), + LoadBalancerClass: new("tailscale"), ClusterIP: clusterIP, ClusterIPs: []string{clusterIP}, }, @@ -412,7 +411,7 @@ func setupTestService(t *testing.T, svcName string, hostname string, clusterIP s { Addresses: []string{"4.3.2.1"}, Conditions: discoveryv1.EndpointConditions{ - Ready: ptr.To(true), + Ready: new(true), }, }, }, diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 54b7ead55f7ff..e13478d7167a0 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -36,7 +36,6 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" - "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -96,7 +95,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef {Name: "TS_DEBUG_ACME_FORCE_RENEWAL", Value: "true"}, }, SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), + Privileged: new(true), }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ @@ -231,7 +230,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: annots, - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), Labels: map[string]string{ "tailscale.com/managed": "true", "tailscale.com/parent-resource": "test", @@ -250,7 +249,7 @@ func expectedSTS(t *testing.T, cl client.Client, opts configOpts) *appsv1.Statef Command: []string{"/bin/sh", "-c"}, Args: []string{"sysctl -w net.ipv4.ip_forward=1 && if sysctl net.ipv6.conf.all.forwarding; then sysctl -w net.ipv6.conf.all.forwarding=1; fi"}, SecurityContext: &corev1.SecurityContext{ - Privileged: ptr.To(true), + Privileged: new(true), }, }, }, @@ -364,14 +363,14 @@ func expectedSTSUserspace(t *testing.T, cl client.Client, opts configOpts) *apps }, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To[int32](1), + Replicas: new(int32(1)), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"app": "1234-UID"}, }, ServiceName: opts.stsName, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - DeletionGracePeriodSeconds: ptr.To[int64](10), + DeletionGracePeriodSeconds: new(int64(10)), Labels: map[string]string{ "tailscale.com/managed": "true", "tailscale.com/parent-resource": "test", @@ -420,7 +419,7 @@ func expectedHeadlessService(name string, parentType string) *corev1.Service { "app": "1234-UID", }, ClusterIP: "None", - IPFamilyPolicy: ptr.To(corev1.IPFamilyPolicyPreferDualStack), + IPFamilyPolicy: new(corev1.IPFamilyPolicyPreferDualStack), }, } } @@ -480,7 +479,7 @@ func expectedServiceMonitor(t *testing.T, opts configOpts) *unstructured.Unstruc Namespace: opts.tailscaleNamespace, Labels: smLabels, ResourceVersion: opts.resourceVersion, - OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: ptr.To(true), Controller: ptr.To(true)}}, + OwnerReferences: []metav1.OwnerReference{{APIVersion: "v1", Kind: "Service", Name: name, BlockOwnerDeletion: new(true), Controller: new(true)}}, }, TypeMeta: metav1.TypeMeta{ Kind: "ServiceMonitor", @@ -529,7 +528,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec AcceptDNS: "false", Hostname: &opts.hostname, Locked: "false", - AuthKey: ptr.To("secret-authkey"), + AuthKey: new("secret-authkey"), AcceptRoutes: "false", AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, NoStatefulFiltering: "true", diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index ab06c01f81b7d..101f68405d001 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -14,7 +14,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/types/ptr" "tailscale.com/version" ) @@ -33,7 +32,7 @@ func tsrStatefulSet(tsr *tsapi.Recorder, namespace string, loginServer string) * Annotations: tsr.Spec.StatefulSet.Annotations, }, Spec: appsv1.StatefulSetSpec{ - Replicas: ptr.To(replicas), + Replicas: new(replicas), Selector: &metav1.LabelSelector{ MatchLabels: tsrLabels("recorder", tsr.Name, tsr.Spec.StatefulSet.Pod.Labels), }, diff --git a/cmd/k8s-operator/tsrecorder_specs_test.go b/cmd/k8s-operator/tsrecorder_specs_test.go index 47997d1d31b0f..151391956781b 100644 --- a/cmd/k8s-operator/tsrecorder_specs_test.go +++ b/cmd/k8s-operator/tsrecorder_specs_test.go @@ -14,7 +14,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/types/ptr" ) func TestRecorderSpecs(t *testing.T) { @@ -24,7 +23,7 @@ func TestRecorderSpecs(t *testing.T) { Name: "test", }, Spec: tsapi.RecorderSpec{ - Replicas: ptr.To[int32](3), + Replicas: new(int32(3)), StatefulSet: tsapi.RecorderStatefulSet{ Labels: map[string]string{ "ss-label-key": "ss-label-value", @@ -51,7 +50,7 @@ func TestRecorderSpecs(t *testing.T) { }, }, SecurityContext: &corev1.PodSecurityContext{ - RunAsUser: ptr.To[int64](1000), + RunAsUser: new(int64(1000)), }, ImagePullSecrets: []corev1.LocalObjectReference{{ Name: "img-pull", @@ -62,7 +61,7 @@ func TestRecorderSpecs(t *testing.T) { Tolerations: []corev1.Toleration{{ Key: "key", Value: "value", - TolerationSeconds: ptr.To[int64](60), + TolerationSeconds: new(int64(60)), }}, Container: tsapi.RecorderContainer{ Env: []tsapi.Env{{ diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index bea734d865f66..0e1641243c937 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -25,7 +25,6 @@ import ( tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" - "tailscale.com/types/ptr" ) const ( @@ -40,7 +39,7 @@ func TestRecorder(t *testing.T) { Finalizers: []string{"tailscale.com/finalizer"}, }, Spec: tsapi.RecorderSpec{ - Replicas: ptr.To[int32](3), + Replicas: new(int32(3)), }, } diff --git a/cmd/k8s-proxy/internal/config/config.go b/cmd/k8s-proxy/internal/config/config.go index 91b4c54a5c32d..c12383d45c470 100644 --- a/cmd/k8s-proxy/internal/config/config.go +++ b/cmd/k8s-proxy/internal/config/config.go @@ -27,7 +27,6 @@ import ( clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" "tailscale.com/util/testenv" ) @@ -178,7 +177,7 @@ func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretName }, // Re-watch regularly to avoid relying on long-lived connections. // See https://github.com/kubernetes-client/javascript/issues/596#issuecomment-786419380 - TimeoutSeconds: ptr.To(int64(600)), + TimeoutSeconds: new(int64(600)), FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), Watch: true, }) @@ -216,7 +215,7 @@ func (ld *configLoader) watchConfigSecretChanges(ctx context.Context, secretName Kind: "Secret", APIVersion: "v1", }, - TimeoutSeconds: ptr.To(int64(600)), + TimeoutSeconds: new(int64(600)), FieldSelector: fmt.Sprintf("metadata.name=%s", secretName), Watch: true, }) diff --git a/cmd/k8s-proxy/internal/config/config_test.go b/cmd/k8s-proxy/internal/config/config_test.go index ac6c6cf93f623..aedd29d4e1877 100644 --- a/cmd/k8s-proxy/internal/config/config_test.go +++ b/cmd/k8s-proxy/internal/config/config_test.go @@ -20,7 +20,6 @@ import ( ktesting "k8s.io/client-go/testing" "tailscale.com/kube/k8s-proxy/conf" "tailscale.com/kube/kubetypes" - "tailscale.com/types/ptr" ) func TestWatchConfig(t *testing.T) { @@ -52,7 +51,7 @@ func TestWatchConfig(t *testing.T) { initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, phases: []phase{{ expectedConf: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("abc123"), + AuthKey: new("abc123"), }, }}, }, @@ -62,7 +61,7 @@ func TestWatchConfig(t *testing.T) { phases: []phase{ { expectedConf: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("abc123"), + AuthKey: new("abc123"), }, }, { @@ -76,13 +75,13 @@ func TestWatchConfig(t *testing.T) { phases: []phase{ { expectedConf: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("abc123"), + AuthKey: new("abc123"), }, }, { config: `{"version": "v1alpha1", "authKey": "def456"}`, expectedConf: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("def456"), + AuthKey: new("def456"), }, }, }, @@ -93,7 +92,7 @@ func TestWatchConfig(t *testing.T) { phases: []phase{ { expectedConf: &conf.ConfigV1Alpha1{ - AuthKey: ptr.To("abc123"), + AuthKey: new("abc123"), }, }, { diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index d25974b2df424..0b42072d9beb7 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -71,7 +71,6 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/logger from tailscale.com/tsweb+ tailscale.com/types/opt from tailscale.com/envknob+ tailscale.com/types/persist from tailscale.com/feature - tailscale.com/types/ptr from tailscale.com/tailcfg+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/tailcfg+ tailscale.com/types/tkatype from tailscale.com/tailcfg+ diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index 22d78641f38a9..feccf6d12d026 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -24,7 +24,6 @@ import ( "tailscale.com/safesocket" "tailscale.com/tsconst" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/set" "tailscale.com/version" @@ -247,7 +246,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { if err != nil { return fmt.Errorf("failed to set relay server port: %v", err) } - maskedPrefs.Prefs.RelayServerPort = ptr.To(uint16(uport)) + maskedPrefs.Prefs.RelayServerPort = new(uint16(uport)) } if setArgs.relayServerStaticEndpoints != "" { diff --git a/cmd/tailscale/cli/set_test.go b/cmd/tailscale/cli/set_test.go index 63fa3c05c48b3..e2c3ae5f64116 100644 --- a/cmd/tailscale/cli/set_test.go +++ b/cmd/tailscale/cli/set_test.go @@ -11,7 +11,6 @@ import ( "tailscale.com/ipn" "tailscale.com/net/tsaddr" - "tailscale.com/types/ptr" ) func TestCalcAdvertiseRoutesForSet(t *testing.T) { @@ -28,80 +27,80 @@ func TestCalcAdvertiseRoutesForSet(t *testing.T) { }, { name: "advertise-exit", - setExit: ptr.To(true), + setExit: new(true), want: tsaddr.ExitRoutes(), }, { name: "advertise-exit/already-routes", was: []netip.Prefix{pfx("34.0.0.0/16")}, - setExit: ptr.To(true), + setExit: new(true), want: []netip.Prefix{pfx("34.0.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-exit/already-exit", was: tsaddr.ExitRoutes(), - setExit: ptr.To(true), + setExit: new(true), want: tsaddr.ExitRoutes(), }, { name: "stop-advertise-exit", was: tsaddr.ExitRoutes(), - setExit: ptr.To(false), + setExit: new(false), want: nil, }, { name: "stop-advertise-exit/with-routes", was: []netip.Prefix{pfx("34.0.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - setExit: ptr.To(false), + setExit: new(false), want: []netip.Prefix{pfx("34.0.0.0/16")}, }, { name: "advertise-routes", - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16")}, }, { name: "advertise-routes/already-exit", was: tsaddr.ExitRoutes(), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-routes/already-diff-routes", was: []netip.Prefix{pfx("34.0.0.0/16")}, - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16")}, }, { name: "stop-advertise-routes", was: []netip.Prefix{pfx("34.0.0.0/16")}, - setRoutes: ptr.To(""), + setRoutes: new(""), want: nil, }, { name: "stop-advertise-routes/already-exit", was: []netip.Prefix{pfx("34.0.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - setRoutes: ptr.To(""), + setRoutes: new(""), want: tsaddr.ExitRoutes(), }, { name: "advertise-routes-and-exit", - setExit: ptr.To(true), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setExit: new(true), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-routes-and-exit/already-exit", was: tsaddr.ExitRoutes(), - setExit: ptr.To(true), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setExit: new(true), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, { name: "advertise-routes-and-exit/already-routes", was: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16")}, - setExit: ptr.To(true), - setRoutes: ptr.To("10.0.0.0/24,192.168.0.0/16"), + setExit: new(true), + setRoutes: new("10.0.0.0/24,192.168.0.0/16"), want: []netip.Prefix{pfx("10.0.0.0/24"), pfx("192.168.0.0/16"), tsaddr.AllIPv4(), tsaddr.AllIPv6()}, }, } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index b4605f9f2e926..4d6a1efb6a282 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -253,7 +253,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/opt from tailscale.com/client/tailscale+ tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+ - tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/ipn+ tailscale.com/types/tkatype from tailscale.com/types/key+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index 2ad5cbca7b3af..e485e3397fe11 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -145,7 +145,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 64911d9318f03..2696e17ec2713 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -164,7 +164,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/control/controlclient+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 207d86243b607..3f3d343de07ed 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -420,7 +420,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/tka+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index bb991383c8a06..d16a96f932324 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -328,7 +328,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/opt from tailscale.com/cmd/tsidp+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ diff --git a/cmd/tta/fw_linux.go b/cmd/tta/fw_linux.go index 49d8d41ea4b4d..66888a45b30c9 100644 --- a/cmd/tta/fw_linux.go +++ b/cmd/tta/fw_linux.go @@ -8,7 +8,6 @@ import ( "github.com/google/nftables" "github.com/google/nftables/expr" - "tailscale.com/types/ptr" ) func init() { @@ -35,7 +34,7 @@ func addFirewallLinux() error { Type: nftables.ChainTypeFilter, Hooknum: nftables.ChainHookInput, Priority: nftables.ChainPriorityFilter, - Policy: ptr.To(nftables.ChainPolicyDrop), + Policy: new(nftables.ChainPolicyDrop), } c.AddChain(inputChain) diff --git a/cmd/viewer/tests/tests.go b/cmd/viewer/tests/tests.go index cbffd38845ec3..5f6218ad3b42e 100644 --- a/cmd/viewer/tests/tests.go +++ b/cmd/viewer/tests/tests.go @@ -9,7 +9,6 @@ import ( "net/netip" "golang.org/x/exp/constraints" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -135,7 +134,7 @@ func (c *Container[T]) Clone() *Container[T] { return &Container[T]{cloner.Clone()} } if !views.ContainsPointers[T]() { - return ptr.To(*c) + return new(*c) } panic(fmt.Errorf("%T contains pointers, but is not cloneable", c.Item)) } diff --git a/cmd/viewer/tests/tests_clone.go b/cmd/viewer/tests/tests_clone.go index cbf5ec2653d98..545b9546b0400 100644 --- a/cmd/viewer/tests/tests_clone.go +++ b/cmd/viewer/tests/tests_clone.go @@ -10,7 +10,6 @@ import ( "net/netip" "golang.org/x/exp/constraints" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -23,13 +22,13 @@ func (src *StructWithPtrs) Clone() *StructWithPtrs { dst := new(StructWithPtrs) *dst = *src if dst.Value != nil { - dst.Value = ptr.To(*src.Value) + dst.Value = new(*src.Value) } if dst.Int != nil { - dst.Int = ptr.To(*src.Int) + dst.Int = new(*src.Int) } if dst.NoView != nil { - dst.NoView = ptr.To(*src.NoView) + dst.NoView = new(*src.NoView) } return dst } @@ -90,7 +89,7 @@ func (src *Map) Clone() *Map { if v == nil { dst.StructPtrWithoutPtr[k] = nil } else { - dst.StructPtrWithoutPtr[k] = ptr.To(*v) + dst.StructPtrWithoutPtr[k] = new(*v) } } } @@ -156,7 +155,7 @@ func (src *StructWithSlices) Clone() *StructWithSlices { if src.ValuePointers[i] == nil { dst.ValuePointers[i] = nil } else { - dst.ValuePointers[i] = ptr.To(*src.ValuePointers[i]) + dst.ValuePointers[i] = new(*src.ValuePointers[i]) } } } @@ -185,7 +184,7 @@ func (src *StructWithSlices) Clone() *StructWithSlices { if src.Ints[i] == nil { dst.Ints[i] = nil } else { - dst.Ints[i] = ptr.To(*src.Ints[i]) + dst.Ints[i] = new(*src.Ints[i]) } } } @@ -248,7 +247,7 @@ func (src *GenericIntStruct[T]) Clone() *GenericIntStruct[T] { dst := new(GenericIntStruct[T]) *dst = *src if dst.Pointer != nil { - dst.Pointer = ptr.To(*src.Pointer) + dst.Pointer = new(*src.Pointer) } dst.Slice = append(src.Slice[:0:0], src.Slice...) dst.Map = maps.Clone(src.Map) @@ -258,7 +257,7 @@ func (src *GenericIntStruct[T]) Clone() *GenericIntStruct[T] { if src.PtrSlice[i] == nil { dst.PtrSlice[i] = nil } else { - dst.PtrSlice[i] = ptr.To(*src.PtrSlice[i]) + dst.PtrSlice[i] = new(*src.PtrSlice[i]) } } } @@ -269,7 +268,7 @@ func (src *GenericIntStruct[T]) Clone() *GenericIntStruct[T] { if v == nil { dst.PtrValueMap[k] = nil } else { - dst.PtrValueMap[k] = ptr.To(*v) + dst.PtrValueMap[k] = new(*v) } } } @@ -305,7 +304,7 @@ func (src *GenericNoPtrsStruct[T]) Clone() *GenericNoPtrsStruct[T] { dst := new(GenericNoPtrsStruct[T]) *dst = *src if dst.Pointer != nil { - dst.Pointer = ptr.To(*src.Pointer) + dst.Pointer = new(*src.Pointer) } dst.Slice = append(src.Slice[:0:0], src.Slice...) dst.Map = maps.Clone(src.Map) @@ -315,7 +314,7 @@ func (src *GenericNoPtrsStruct[T]) Clone() *GenericNoPtrsStruct[T] { if src.PtrSlice[i] == nil { dst.PtrSlice[i] = nil } else { - dst.PtrSlice[i] = ptr.To(*src.PtrSlice[i]) + dst.PtrSlice[i] = new(*src.PtrSlice[i]) } } } @@ -326,7 +325,7 @@ func (src *GenericNoPtrsStruct[T]) Clone() *GenericNoPtrsStruct[T] { if v == nil { dst.PtrValueMap[k] = nil } else { - dst.PtrValueMap[k] = ptr.To(*v) + dst.PtrValueMap[k] = new(*v) } } } @@ -375,7 +374,7 @@ func (src *GenericCloneableStruct[T, V]) Clone() *GenericCloneableStruct[T, V] { } } if dst.Pointer != nil { - dst.Pointer = ptr.To((*src.Pointer).Clone()) + dst.Pointer = new((*src.Pointer).Clone()) } if src.PtrSlice != nil { dst.PtrSlice = make([]*T, len(src.PtrSlice)) @@ -383,7 +382,7 @@ func (src *GenericCloneableStruct[T, V]) Clone() *GenericCloneableStruct[T, V] { if src.PtrSlice[i] == nil { dst.PtrSlice[i] = nil } else { - dst.PtrSlice[i] = ptr.To((*src.PtrSlice[i]).Clone()) + dst.PtrSlice[i] = new((*src.PtrSlice[i]).Clone()) } } } @@ -394,7 +393,7 @@ func (src *GenericCloneableStruct[T, V]) Clone() *GenericCloneableStruct[T, V] { if v == nil { dst.PtrValueMap[k] = nil } else { - dst.PtrValueMap[k] = ptr.To((*v).Clone()) + dst.PtrValueMap[k] = new((*v).Clone()) } } } @@ -457,7 +456,7 @@ func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { dst.WithPtr = *src.WithPtr.Clone() dst.WithPtrByPtr = src.WithPtrByPtr.Clone() if dst.WithoutPtrByPtr != nil { - dst.WithoutPtrByPtr = ptr.To(*src.WithoutPtrByPtr) + dst.WithoutPtrByPtr = new(*src.WithoutPtrByPtr) } if src.SliceWithPtrs != nil { dst.SliceWithPtrs = make([]*StructWithPtrsAlias, len(src.SliceWithPtrs)) @@ -475,7 +474,7 @@ func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { if src.SliceWithoutPtrs[i] == nil { dst.SliceWithoutPtrs[i] = nil } else { - dst.SliceWithoutPtrs[i] = ptr.To(*src.SliceWithoutPtrs[i]) + dst.SliceWithoutPtrs[i] = new(*src.SliceWithoutPtrs[i]) } } } @@ -495,7 +494,7 @@ func (src *StructWithTypeAliasFields) Clone() *StructWithTypeAliasFields { if v == nil { dst.MapWithoutPtrs[k] = nil } else { - dst.MapWithoutPtrs[k] = ptr.To(*v) + dst.MapWithoutPtrs[k] = new(*v) } } } diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 6f3393b18dfdf..965523f956f94 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -51,7 +51,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" - "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -383,7 +382,7 @@ func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool { if hi == nil { panic("nil Hostinfo") } - hi = ptr.To(*hi) + hi = new(*hi) hi.NetInfo = nil c.mu.Lock() defer c.mu.Unlock() diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 18bd420ebaae3..29b0a034877c7 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -28,7 +28,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/mak" @@ -504,7 +503,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s if vp, ok := ms.peers[nodeID]; ok { mut := vp.AsStruct() if seen { - mut.LastSeen = ptr.To(clock.Now()) + mut.LastSeen = new(clock.Now()) } else { mut.LastSeen = nil } @@ -516,7 +515,7 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s for nodeID, online := range resp.OnlineChange { if vp, ok := ms.peers[nodeID]; ok { mut := vp.AsStruct() - mut.Online = ptr.To(online) + mut.Online = new(online) ms.peers[nodeID] = mut.View() stats.changed++ } @@ -550,11 +549,11 @@ func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (s patchDiscoKey.Add(1) } if v := pc.Online; v != nil { - mut.Online = ptr.To(*v) + mut.Online = new(*v) patchOnline.Add(1) } if v := pc.LastSeen; v != nil { - mut.LastSeen = ptr.To(*v) + mut.LastSeen = new(*v) patchLastSeen.Add(1) } if v := pc.KeyExpiry; v != nil { @@ -688,11 +687,11 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "Key": if was.Key() != n.Key { - pc().Key = ptr.To(n.Key) + pc().Key = new(n.Key) } case "KeyExpiry": if !was.KeyExpiry().Equal(n.KeyExpiry) { - pc().KeyExpiry = ptr.To(n.KeyExpiry) + pc().KeyExpiry = new(n.KeyExpiry) } case "KeySignature": if !was.KeySignature().Equal(n.KeySignature) { @@ -704,7 +703,7 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "DiscoKey": if was.DiscoKey() != n.DiscoKey { - pc().DiscoKey = ptr.To(n.DiscoKey) + pc().DiscoKey = new(n.DiscoKey) } case "Addresses": if !views.SliceEqual(was.Addresses(), views.SliceOf(n.Addresses)) { @@ -773,11 +772,11 @@ func peerChangeDiff(was tailcfg.NodeView, n *tailcfg.Node) (_ *tailcfg.PeerChang } case "Online": if wasOnline, ok := was.Online().GetOk(); ok && n.Online != nil && *n.Online != wasOnline { - pc().Online = ptr.To(*n.Online) + pc().Online = new(*n.Online) } case "LastSeen": if wasSeen, ok := was.LastSeen().GetOk(); ok && n.LastSeen != nil && !wasSeen.Equal(*n.LastSeen) { - pc().LastSeen = ptr.To(*n.LastSeen) + pc().LastSeen = new(*n.LastSeen) } case "MachineAuthorized": if was.MachineAuthorized() != n.MachineAuthorized { diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index 11d4593f03fae..5a0ccfd823f35 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -30,7 +30,6 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" - "tailscale.com/types/ptr" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" @@ -250,7 +249,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - Key: ptr.To(key.NodePublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), + Key: new(key.NodePublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), }}, }, want: peers(&tailcfg.Node{ ID: 1, @@ -281,7 +280,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - DiscoKey: ptr.To(key.DiscoPublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), + DiscoKey: new(key.DiscoPublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))), }}, }, want: peers(&tailcfg.Node{ @@ -297,13 +296,13 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - Online: ptr.To(true), + Online: new(true), }}, }, want: peers(&tailcfg.Node{ ID: 1, Name: "foo", - Online: ptr.To(true), + Online: new(true), }), wantStats: updateStats{changed: 1}, }, @@ -313,13 +312,13 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - LastSeen: ptr.To(time.Unix(123, 0).UTC()), + LastSeen: new(time.Unix(123, 0).UTC()), }}, }, want: peers(&tailcfg.Node{ ID: 1, Name: "foo", - LastSeen: ptr.To(time.Unix(123, 0).UTC()), + LastSeen: new(time.Unix(123, 0).UTC()), }), wantStats: updateStats{changed: 1}, }, @@ -329,7 +328,7 @@ func TestUpdatePeersStateFromResponse(t *testing.T) { mapRes: &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ NodeID: 1, - KeyExpiry: ptr.To(time.Unix(123, 0).UTC()), + KeyExpiry: new(time.Unix(123, 0).UTC()), }}, }, want: peers(&tailcfg.Node{ @@ -770,21 +769,21 @@ func TestPeerChangeDiff(t *testing.T) { }, { name: "patch-lastseen", - a: &tailcfg.Node{ID: 1, LastSeen: ptr.To(time.Unix(1, 0))}, - b: &tailcfg.Node{ID: 1, LastSeen: ptr.To(time.Unix(2, 0))}, - want: &tailcfg.PeerChange{NodeID: 1, LastSeen: ptr.To(time.Unix(2, 0))}, + a: &tailcfg.Node{ID: 1, LastSeen: new(time.Unix(1, 0))}, + b: &tailcfg.Node{ID: 1, LastSeen: new(time.Unix(2, 0))}, + want: &tailcfg.PeerChange{NodeID: 1, LastSeen: new(time.Unix(2, 0))}, }, { name: "patch-online-to-true", - a: &tailcfg.Node{ID: 1, Online: ptr.To(false)}, - b: &tailcfg.Node{ID: 1, Online: ptr.To(true)}, - want: &tailcfg.PeerChange{NodeID: 1, Online: ptr.To(true)}, + a: &tailcfg.Node{ID: 1, Online: new(false)}, + b: &tailcfg.Node{ID: 1, Online: new(true)}, + want: &tailcfg.PeerChange{NodeID: 1, Online: new(true)}, }, { name: "patch-online-to-false", - a: &tailcfg.Node{ID: 1, Online: ptr.To(true)}, - b: &tailcfg.Node{ID: 1, Online: ptr.To(false)}, - want: &tailcfg.PeerChange{NodeID: 1, Online: ptr.To(false)}, + a: &tailcfg.Node{ID: 1, Online: new(true)}, + b: &tailcfg.Node{ID: 1, Online: new(false)}, + want: &tailcfg.PeerChange{NodeID: 1, Online: new(false)}, }, { name: "mix-patchable-and-not", @@ -818,14 +817,14 @@ func TestPeerChangeDiff(t *testing.T) { }, { name: "miss-change-masq-v4", - a: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, - b: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.2"))}, + a: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, + b: &tailcfg.Node{ID: 1, SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.2"))}, want: nil, }, { name: "miss-change-masq-v6", - a: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, - b: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3006"))}, + a: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, + b: &tailcfg.Node{ID: 1, SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3006"))}, want: nil, }, { @@ -1079,7 +1078,7 @@ func TestUpgradeNode(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var got *tailcfg.Node if tt.in != nil { - got = ptr.To(*tt.in) // shallow clone + got = new(*tt.in) // shallow clone } upgradeNode(got) if diff := cmp.Diff(tt.want, got); diff != "" { @@ -1122,7 +1121,7 @@ func BenchmarkMapSessionDelta(b *testing.B) { {Proto: "peerapi-dns-proxy", Port: 1}, }, }).View(), - LastSeen: ptr.To(time.Unix(int64(i), 0)), + LastSeen: new(time.Unix(int64(i), 0)), }) } ms.HandleNonKeepAliveMapResponse(ctx, res) diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index 45d6abcc1d3d6..4f52a7ca748e7 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -23,7 +23,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/wgengine/magicsock" @@ -225,7 +224,7 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.stopRelayServerLocked() e.port = nil if ok { - e.port = ptr.To(newPort) + e.port = new(newPort) } } e.handleRelayServerLifetimeLocked() @@ -264,7 +263,7 @@ func (e *extension) serverStatus() status.ServerStatus { if e.rs == nil { return st } - st.UDPPort = ptr.To(*e.port) + st.UDPPort = new(*e.port) st.Sessions = e.rs.GetSessions() return st } diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index 730e25a00d0d3..bd61a4fc3df3e 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -18,15 +18,14 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) func Test_extension_profileStateChanged(t *testing.T) { - prefsWithPortOne := ipn.Prefs{RelayServerPort: ptr.To(uint16(1))} + prefsWithPortOne := ipn.Prefs{RelayServerPort: new(uint16(1))} prefsWithNilPort := ipn.Prefs{RelayServerPort: nil} prefsWithPortOneRelayEndpoints := ipn.Prefs{ - RelayServerPort: ptr.To(uint16(1)), + RelayServerPort: new(uint16(1)), RelayServerStaticEndpoints: []netip.AddrPort{netip.MustParseAddrPort("127.0.0.1:7777")}, } @@ -51,28 +50,28 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "no changes non-nil port previously running", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, }, { name: "set addr ports unchanged port previously running", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOneRelayEndpoints.View(), sameNode: true, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, @@ -87,7 +86,7 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOneRelayEndpoints.View(), sameNode: true, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, wantEndpoints: prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints, @@ -95,7 +94,7 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "clear addr ports unchanged port previously running", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), staticEndpoints: views.SliceOf(prefsWithPortOneRelayEndpoints.RelayServerStaticEndpoints), rs: mockRelayServerNotZeroVal(), }, @@ -103,7 +102,7 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: false, wantEndpoints: nil, @@ -111,7 +110,7 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "prefs port nil", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), }, args: args{ prefs: prefsWithNilPort.View(), @@ -124,7 +123,7 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "prefs port nil previously running", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ @@ -138,54 +137,54 @@ func Test_extension_profileStateChanged(t *testing.T) { { name: "prefs port changed", fields: fields{ - port: ptr.To(uint16(2)), + port: new(uint16(2)), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, { name: "prefs port changed previously running", fields: fields{ - port: ptr.To(uint16(2)), + port: new(uint16(2)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: true, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, { name: "sameNode false", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, { name: "sameNode false previously running", fields: fields{ - port: ptr.To(uint16(1)), + port: new(uint16(1)), rs: mockRelayServerNotZeroVal(), }, args: args{ prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, @@ -198,7 +197,7 @@ func Test_extension_profileStateChanged(t *testing.T) { prefs: prefsWithPortOne.View(), sameNode: false, }, - wantPort: ptr.To(uint16(1)), + wantPort: new(uint16(1)), wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, }, @@ -280,7 +279,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "want running", shutdown: false, - port: ptr.To(uint16(1)), + port: new(uint16(1)), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: true, wantRelayServerFieldMutated: true, @@ -288,7 +287,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "want running previously running", shutdown: false, - port: ptr.To(uint16(1)), + port: new(uint16(1)), rs: mockRelayServerNotZeroVal(), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: true, @@ -297,7 +296,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "shutdown true", shutdown: true, - port: ptr.To(uint16(1)), + port: new(uint16(1)), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: false, wantRelayServerFieldMutated: false, @@ -305,7 +304,7 @@ func Test_extension_handleRelayServerLifetimeLocked(t *testing.T) { { name: "shutdown true previously running", shutdown: true, - port: ptr.To(uint16(1)), + port: new(uint16(1)), rs: mockRelayServerNotZeroVal(), hasNodeAttrDisableRelayServer: false, wantRelayServerFieldNonNil: false, diff --git a/hostinfo/hostinfo.go b/hostinfo/hostinfo.go index f91f52ec0c3d8..11b0a25ccc238 100644 --- a/hostinfo/hostinfo.go +++ b/hostinfo/hostinfo.go @@ -23,7 +23,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/cloudenv" "tailscale.com/util/dnsname" "tailscale.com/util/lineiter" @@ -93,8 +92,8 @@ func condCall[T any](fn func() T) T { } var ( - lazyInContainer = &lazyAtomicValue[opt.Bool]{f: ptr.To(inContainer)} - lazyGoArchVar = &lazyAtomicValue[string]{f: ptr.To(goArchVar)} + lazyInContainer = &lazyAtomicValue[opt.Bool]{f: new(inContainer)} + lazyGoArchVar = &lazyAtomicValue[string]{f: new(goArchVar)} ) type lazyAtomicValue[T any] struct { diff --git a/hostinfo/hostinfo_darwin.go b/hostinfo/hostinfo_darwin.go index cd551ca425790..338ab9792c215 100644 --- a/hostinfo/hostinfo_darwin.go +++ b/hostinfo/hostinfo_darwin.go @@ -10,7 +10,6 @@ import ( "path/filepath" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" ) func init() { @@ -19,7 +18,7 @@ func init() { } var ( - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionDarwin)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionDarwin)} ) func packageTypeDarwin() string { diff --git a/hostinfo/hostinfo_freebsd.go b/hostinfo/hostinfo_freebsd.go index 3a214ed2463cb..580d97a6d1027 100644 --- a/hostinfo/hostinfo_freebsd.go +++ b/hostinfo/hostinfo_freebsd.go @@ -11,7 +11,6 @@ import ( "os/exec" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" "tailscale.com/version/distro" ) @@ -22,8 +21,8 @@ func init() { } var ( - lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: ptr.To(freebsdVersionMeta)} - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionFreeBSD)} + lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: new(freebsdVersionMeta)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionFreeBSD)} ) func distroNameFreeBSD() string { diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go index bb9a5c58c1bb0..77f47ffe2fe7c 100644 --- a/hostinfo/hostinfo_linux.go +++ b/hostinfo/hostinfo_linux.go @@ -11,7 +11,6 @@ import ( "strings" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" "tailscale.com/util/lineiter" "tailscale.com/version/distro" ) @@ -26,8 +25,8 @@ func init() { } var ( - lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: ptr.To(linuxVersionMeta)} - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionLinux)} + lazyVersionMeta = &lazyAtomicValue[versionMeta]{f: new(linuxVersionMeta)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionLinux)} ) type versionMeta struct { diff --git a/hostinfo/hostinfo_uname.go b/hostinfo/hostinfo_uname.go index b358c0e2cb108..0185da49d8bc9 100644 --- a/hostinfo/hostinfo_uname.go +++ b/hostinfo/hostinfo_uname.go @@ -9,14 +9,13 @@ import ( "runtime" "golang.org/x/sys/unix" - "tailscale.com/types/ptr" ) func init() { unameMachine = lazyUnameMachine.Get } -var lazyUnameMachine = &lazyAtomicValue[string]{f: ptr.To(unameMachineUnix)} +var lazyUnameMachine = &lazyAtomicValue[string]{f: new(unameMachineUnix)} func unameMachineUnix() string { switch runtime.GOOS { diff --git a/hostinfo/hostinfo_windows.go b/hostinfo/hostinfo_windows.go index 5e0b340919e34..59b57433e0c65 100644 --- a/hostinfo/hostinfo_windows.go +++ b/hostinfo/hostinfo_windows.go @@ -11,7 +11,6 @@ import ( "golang.org/x/sys/windows" "golang.org/x/sys/windows/registry" - "tailscale.com/types/ptr" "tailscale.com/util/winutil" "tailscale.com/util/winutil/winenv" ) @@ -23,9 +22,9 @@ func init() { } var ( - lazyDistroName = &lazyAtomicValue[string]{f: ptr.To(distroNameWindows)} - lazyOSVersion = &lazyAtomicValue[string]{f: ptr.To(osVersionWindows)} - lazyPackageType = &lazyAtomicValue[string]{f: ptr.To(packageTypeWindows)} + lazyDistroName = &lazyAtomicValue[string]{f: new(distroNameWindows)} + lazyOSVersion = &lazyAtomicValue[string]{f: new(osVersionWindows)} + lazyPackageType = &lazyAtomicValue[string]{f: new(packageTypeWindows)} ) func distroNameWindows() string { diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 94aebefdfd73d..3e6cbbb823a4f 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -14,7 +14,6 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" - "tailscale.com/types/ptr" ) // Clone makes a deep copy of LoginProfile. @@ -62,7 +61,7 @@ func (src *Prefs) Clone() *Prefs { } } if dst.RelayServerPort != nil { - dst.RelayServerPort = ptr.To(*src.RelayServerPort) + dst.RelayServerPort = new(*src.RelayServerPort) } dst.RelayServerStaticEndpoints = append(src.RelayServerStaticEndpoints[:0:0], src.RelayServerStaticEndpoints...) dst.Persist = src.Persist.Clone() @@ -122,7 +121,7 @@ func (src *ServeConfig) Clone() *ServeConfig { if v == nil { dst.TCP[k] = nil } else { - dst.TCP[k] = ptr.To(*v) + dst.TCP[k] = new(*v) } } } @@ -184,7 +183,7 @@ func (src *ServiceConfig) Clone() *ServiceConfig { if v == nil { dst.TCP[k] = nil } else { - dst.TCP[k] = ptr.To(*v) + dst.TCP[k] = new(*v) } } } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ec16f6a80aff6..596a51bd7ce3a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -81,7 +81,6 @@ import ( "tailscale.com/types/opt" "tailscale.com/types/persist" "tailscale.com/types/preftype" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" @@ -1738,7 +1737,7 @@ func (b *LocalBackend) setControlClientStatusLocked(c controlclient.Client, st c b.logf("Failed to save new controlclient state: %v", err) } - b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) + b.sendToLocked(ipn.Notify{Prefs: new(prefs.View())}, allClients) } // initTKALocked is dependent on CurrentProfile.ID, which is initialized @@ -3139,13 +3138,13 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A ini = &ipn.Notify{Version: version.Long()} if mask&ipn.NotifyInitialState != 0 { ini.SessionID = sessionID - ini.State = ptr.To(b.state) + ini.State = new(b.state) if b.state == ipn.NeedsLogin && b.authURL != "" { - ini.BrowseToURL = ptr.To(b.authURL) + ini.BrowseToURL = new(b.authURL) } } if mask&ipn.NotifyInitialPrefs != 0 { - ini.Prefs = ptr.To(b.sanitizedPrefsLocked()) + ini.Prefs = new(b.sanitizedPrefsLocked()) } if mask&ipn.NotifyInitialNetMap != 0 { ini.NetMap = cn.NetMap() @@ -3397,7 +3396,7 @@ func (b *LocalBackend) sendTo(n ipn.Notify, recipient notificationTarget) { // sendToLocked is like [LocalBackend.sendTo], but assumes b.mu is already held. func (b *LocalBackend) sendToLocked(n ipn.Notify, recipient notificationTarget) { if n.Prefs != nil { - n.Prefs = ptr.To(stripKeysFromPrefs(*n.Prefs)) + n.Prefs = new(stripKeysFromPrefs(*n.Prefs)) } if n.Version == "" { n.Version = version.Long() @@ -4415,7 +4414,7 @@ func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change // First, apply the adjustments to a copy of the changes, // e.g., clear AutoExitNode if ExitNodeID is set. - tmpChange := ptr.To(*change) + tmpChange := new(*change) tmpChange.Prefs = *change.Prefs.Clone() b.adjustEditPrefsLocked(prefs, tmpChange) @@ -6185,7 +6184,7 @@ func (b *LocalBackend) resolveExitNodeLocked() (changed bool) { b.goTracker.Go(b.doSetHostinfoFilterServices) } - b.sendToLocked(ipn.Notify{Prefs: ptr.To(prefs.View())}, allClients) + b.sendToLocked(ipn.Notify{Prefs: new(prefs.View())}, allClients) return true } diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 259e4b6b28a83..b9d8da046aea0 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -61,7 +61,6 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/opt" "tailscale.com/types/persist" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" @@ -877,7 +876,7 @@ func TestConfigureExitNode(t *testing.T) { Prefs: ipn.Prefs{AutoExitNode: "any"}, AutoExitNodeSet: true, }, - useExitNodeEnabled: ptr.To(false), + useExitNodeEnabled: new(false), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: "", @@ -894,7 +893,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - useExitNodeEnabled: ptr.To(true), + useExitNodeEnabled: new(true), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), @@ -909,7 +908,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, }, netMap: clientNetmap, - exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + exitNodeIDPolicy: new(exitNode1.StableID()), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), @@ -922,7 +921,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, }, netMap: clientNetmap, - exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + exitNodeIDPolicy: new(exitNode1.StableID()), changePrefs: &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ ExitNodeID: exitNode2.StableID(), // this should be ignored @@ -942,7 +941,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, }, netMap: clientNetmap, - exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + exitNodeIDPolicy: new(exitNode1.StableID()), changePrefs: &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ ExitNodeIP: exitNode2.Addresses().At(0).Addr(), // this should be ignored @@ -962,7 +961,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, }, netMap: clientNetmap, - exitNodeIDPolicy: ptr.To(exitNode1.StableID()), + exitNodeIDPolicy: new(exitNode1.StableID()), changePrefs: &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ AutoExitNode: "any", // this should be ignored @@ -982,7 +981,7 @@ func TestConfigureExitNode(t *testing.T) { ControlURL: controlURL, }, netMap: clientNetmap, - exitNodeIPPolicy: ptr.To(exitNode2.Addresses().At(0).Addr()), + exitNodeIPPolicy: new(exitNode2.Addresses().At(0).Addr()), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode2.StableID(), @@ -996,7 +995,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), @@ -1011,7 +1010,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: nil, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: unresolvedExitNodeID, @@ -1026,7 +1025,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: nil, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: unresolvedExitNodeID, @@ -1042,7 +1041,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: nil, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), exitNodeAllowedIDs: nil, // not configured, so all exit node IDs are implicitly allowed wantPrefs: ipn.Prefs{ ControlURL: controlURL, @@ -1059,7 +1058,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: nil, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), exitNodeAllowedIDs: []tailcfg.StableNodeID{ exitNode2.StableID(), // the current exit node ID is allowed }, @@ -1078,7 +1077,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: nil, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), exitNodeAllowedIDs: []tailcfg.StableNodeID{ exitNode1.StableID(), // a different exit node ID; the current one is not allowed }, @@ -1097,7 +1096,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), exitNodeAllowedIDs: []tailcfg.StableNodeID{ exitNode2.StableID(), // a different exit node ID; the current one is not allowed }, @@ -1116,7 +1115,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), // switch to the best exit node @@ -1131,7 +1130,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:foo")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:foo")), wantPrefs: ipn.Prefs{ ControlURL: controlURL, ExitNodeID: exitNode1.StableID(), // unknown exit node expressions should work as "any" @@ -1164,8 +1163,8 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), - useExitNodeEnabled: ptr.To(false), // should fail with an error + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + useExitNodeEnabled: new(false), // should fail with an error wantExitNodeToggleErr: errManagedByPolicy, wantPrefs: ipn.Prefs{ ControlURL: controlURL, @@ -1182,7 +1181,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), exitNodeAllowOverride: true, // allow changing the exit node changePrefs: &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ @@ -1204,7 +1203,7 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node changePrefs: &ipn.MaskedPrefs{ Prefs: ipn.Prefs{ @@ -1228,9 +1227,9 @@ func TestConfigureExitNode(t *testing.T) { }, netMap: clientNetmap, report: report, - exitNodeIDPolicy: ptr.To(tailcfg.StableNodeID("auto:any")), - exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node - useExitNodeEnabled: ptr.To(false), // should fail with an error + exitNodeIDPolicy: new(tailcfg.StableNodeID("auto:any")), + exitNodeAllowOverride: true, // allow changing, but not disabling, the exit node + useExitNodeEnabled: new(false), // should fail with an error wantExitNodeToggleErr: errManagedByPolicy, wantPrefs: ipn.Prefs{ ControlURL: controlURL, @@ -1992,15 +1991,15 @@ func TestUpdateNetmapDelta(t *testing.T) { }, { NodeID: 2, - Online: ptr.To(true), + Online: new(true), }, { NodeID: 3, - Online: ptr.To(false), + Online: new(false), }, { NodeID: 4, - LastSeen: ptr.To(someTime), + LastSeen: new(someTime), }, }, }, someTime) @@ -2021,17 +2020,17 @@ func TestUpdateNetmapDelta(t *testing.T) { { ID: 2, Key: makeNodeKeyFromID(2), - Online: ptr.To(true), + Online: new(true), }, { ID: 3, Key: makeNodeKeyFromID(3), - Online: ptr.To(false), + Online: new(false), }, { ID: 4, Key: makeNodeKeyFromID(4), - LastSeen: ptr.To(someTime), + LastSeen: new(someTime), }, } for _, want := range wants { @@ -3149,11 +3148,11 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(true), + Online: new(true), }, { NodeID: 2, - Online: ptr.To(false), // the selected exit node goes offline + Online: new(false), // the selected exit node goes offline }, }, exitNodeIDWant: peer1.StableID(), @@ -3173,11 +3172,11 @@ func TestUpdateNetmapDeltaAutoExitNode(t *testing.T) { muts: []*tailcfg.PeerChange{ { NodeID: 1, - Online: ptr.To(false), // a different exit node goes offline + Online: new(false), // a different exit node goes offline }, { NodeID: 2, - Online: ptr.To(true), + Online: new(true), }, }, exitNodeIDWant: peer2.StableID(), @@ -4326,7 +4325,7 @@ func TestDriveManageShares(t *testing.T) { b.driveSetSharesLocked(tt.existing) } if !tt.disabled { - nm := ptr.To(*b.currentNode().NetMap()) + nm := new(*b.currentNode().NetMap()) self := nm.SelfNode.AsStruct() self.CapMap = tailcfg.NodeCapMap{tailcfg.NodeAttrsTaildriveShare: nil} nm.SelfNode = self.View() @@ -4476,7 +4475,7 @@ func makePeer(id tailcfg.NodeID, opts ...peerOptFunc) tailcfg.NodeView { DiscoKey: makeDiscoKeyFromID(id), StableID: tailcfg.StableNodeID(fmt.Sprintf("stable%d", id)), Name: fmt.Sprintf("peer%d", id), - Online: ptr.To(true), + Online: new(true), MachineAuthorized: true, HomeDERP: int(id), } @@ -6399,13 +6398,13 @@ func TestConfigFileReload(t *testing.T) { initial: &conffile.Config{ Parsed: ipn.ConfigVAlpha{ Version: "alpha0", - Hostname: ptr.To("initial-host"), + Hostname: new("initial-host"), }, }, updated: &conffile.Config{ Parsed: ipn.ConfigVAlpha{ Version: "alpha0", - Hostname: ptr.To("updated-host"), + Hostname: new("updated-host"), }, }, checkFn: func(t *testing.T, b *LocalBackend) { @@ -7362,28 +7361,28 @@ func TestStripKeysFromPrefs(t *testing.T) { genNotify := map[string]func() ipn.Notify{ "Notify.Prefs.ж.Persist.PrivateNodeKey": func() ipn.Notify { return ipn.Notify{ - Prefs: ptr.To((&ipn.Prefs{ + Prefs: new((&ipn.Prefs{ Persist: &persist.Persist{PrivateNodeKey: key.NewNode()}, }).View()), } }, "Notify.Prefs.ж.Persist.OldPrivateNodeKey": func() ipn.Notify { return ipn.Notify{ - Prefs: ptr.To((&ipn.Prefs{ + Prefs: new((&ipn.Prefs{ Persist: &persist.Persist{OldPrivateNodeKey: key.NewNode()}, }).View()), } }, "Notify.Prefs.ж.Persist.NetworkLockKey": func() ipn.Notify { return ipn.Notify{ - Prefs: ptr.To((&ipn.Prefs{ + Prefs: new((&ipn.Prefs{ Persist: &persist.Persist{NetworkLockKey: key.NewNLPrivate()}, }).View()), } }, "Notify.Prefs.ж.Persist.AttestationKey": func() ipn.Notify { return ipn.Notify{ - Prefs: ptr.To((&ipn.Prefs{ + Prefs: new((&ipn.Prefs{ Persist: &persist.Persist{AttestationKey: new(fakeAttestationKey)}, }).View()), } diff --git a/ipn/ipnlocal/netstack.go b/ipn/ipnlocal/netstack.go index b331d93e329de..eac9568b7f765 100644 --- a/ipn/ipnlocal/netstack.go +++ b/ipn/ipnlocal/netstack.go @@ -11,7 +11,6 @@ import ( "time" "gvisor.dev/gvisor/pkg/tcpip" - "tailscale.com/types/ptr" ) // TCPHandlerForDst returns a TCP handler for connections to dst, or nil if @@ -52,7 +51,7 @@ func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c // tell the difference between a long lived connection that is idle // vs a connection that is dead because the peer has gone away. // We pick 72h as that is typically sufficient for a long weekend. - opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) + opts = append(opts, new(tcpip.KeepaliveIdleOption(72*time.Hour))) return b.handleSSHConn, opts } // TODO(will,sonia): allow customizing web client port ? diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index b70d71cb934f2..fcc45097c7b2b 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -24,7 +24,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" @@ -414,7 +413,7 @@ func (nb *nodeBackend) netMapWithPeers() *netmap.NetworkMap { if nb.netMap == nil { return nil } - nm := ptr.To(*nb.netMap) // shallow clone + nm := new(*nb.netMap) // shallow clone nm.Peers = slicesx.MapValues(nb.peers) slices.SortFunc(nm.Peers, func(a, b tailcfg.NodeView) int { return cmp.Compare(a.ID(), b.ID()) diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index f1f38dae6aee1..ca61624b8419b 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -12,7 +12,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) @@ -146,7 +145,7 @@ func TestNodeBackendReachability(t *testing.T) { name: "disabled/offline", cap: false, peer: tailcfg.Node{ - Online: ptr.To(false), + Online: new(false), }, want: false, }, @@ -154,7 +153,7 @@ func TestNodeBackendReachability(t *testing.T) { name: "disabled/online", cap: false, peer: tailcfg.Node{ - Online: ptr.To(true), + Online: new(true), }, want: true, }, @@ -162,7 +161,7 @@ func TestNodeBackendReachability(t *testing.T) { name: "enabled/offline", cap: true, peer: tailcfg.Node{ - Online: ptr.To(false), + Online: new(false), }, want: true, }, @@ -170,7 +169,7 @@ func TestNodeBackendReachability(t *testing.T) { name: "enabled/online", cap: true, peer: tailcfg.Node{ - Online: ptr.To(true), + Online: new(true), }, want: true, }, diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 9aa9c4c015f23..45a8d622d3e73 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -16,7 +16,6 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/lapitest" "tailscale.com/tsd" - "tailscale.com/types/ptr" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policytest" ) @@ -49,7 +48,7 @@ func TestUserConnectDisconnectNonWindows(t *testing.T) { // And if we send a notification, both users should receive it. wantErrMessage := "test error" - testNotify := ipn.Notify{ErrMessage: ptr.To(wantErrMessage)} + testNotify := ipn.Notify{ErrMessage: new(wantErrMessage)} server.Backend().DebugNotify(testNotify) if n, err := watcherA.Next(); err != nil { @@ -274,12 +273,12 @@ func TestShutdownViaLocalAPI(t *testing.T) { }, { name: "AllowTailscaledRestart/False", - allowTailscaledRestart: ptr.To(false), + allowTailscaledRestart: new(false), wantErr: errAccessDeniedByPolicy, }, { name: "AllowTailscaledRestart/True", - allowTailscaledRestart: ptr.To(true), + allowTailscaledRestart: new(true), wantErr: nil, // shutdown should be allowed }, } diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index 4d219d131d528..17e6ac870bead 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -20,7 +20,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tka" "tailscale.com/types/key" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/version" @@ -535,7 +534,7 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { e.Expired = true } if t := st.KeyExpiry; t != nil { - e.KeyExpiry = ptr.To(*t) + e.KeyExpiry = new(*t) } if v := st.CapMap; v != nil { e.CapMap = v diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go index 8fd3c8cdd361f..2686682af15c9 100644 --- a/ipn/lapitest/server.go +++ b/ipn/lapitest/server.go @@ -22,7 +22,6 @@ import ( "tailscale.com/ipn/ipnserver" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/types/ptr" "tailscale.com/util/mak" "tailscale.com/util/rands" ) @@ -153,7 +152,7 @@ func (s *Server) MakeTestActor(name string, clientID string) *ipnauth.TestActor } // Create a shallow copy of the base actor and assign it the new client ID. - actor := ptr.To(*baseActor) + actor := new(*baseActor) actor.CID = ipnauth.ClientIDFrom(clientID) return actor } diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index ed25e875da409..5eec66e64f8f4 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -43,7 +43,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/httpm" @@ -845,8 +844,8 @@ func InUseOtherUserIPNStream(w http.ResponseWriter, r *http.Request, err error) } js, err := json.Marshal(&ipn.Notify{ Version: version.Long(), - State: ptr.To(ipn.InUseOtherUser), - ErrMessage: ptr.To(err.Error()), + State: new(ipn.InUseOtherUser), + ErrMessage: new(err.Error()), }) if err != nil { return false diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go index 0541a5cf3691b..b4c311046bc7c 100644 --- a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" - "tailscale.com/types/ptr" "tailscale.com/util/set" ) @@ -243,7 +242,7 @@ func (r *Reconciler) generateIngressPolicy(ctx context.Context, namespace string ResourceVersion: policy.ResourceVersion, }, Spec: admr.ValidatingAdmissionPolicySpec{ - FailurePolicy: ptr.To(admr.Fail), + FailurePolicy: new(admr.Fail), MatchConstraints: &admr.MatchResources{ // The operator allows ingress via Ingress resources & Service resources (that use the "tailscale" load // balancer class), so we have two resource rules here with multiple validation expressions that attempt @@ -304,7 +303,7 @@ func (r *Reconciler) generateEgressPolicy(ctx context.Context, namespace string, ResourceVersion: policy.ResourceVersion, }, Spec: admr.ValidatingAdmissionPolicySpec{ - FailurePolicy: ptr.To(admr.Fail), + FailurePolicy: new(admr.Fail), MatchConstraints: &admr.MatchResources{ ResourceRules: []admr.NamedRuleWithOperations{ { diff --git a/kube/k8s-proxy/conf/conf_test.go b/kube/k8s-proxy/conf/conf_test.go index 4034bf3cb7752..0c26b4242e92f 100644 --- a/kube/k8s-proxy/conf/conf_test.go +++ b/kube/k8s-proxy/conf/conf_test.go @@ -10,7 +10,6 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "tailscale.com/types/ptr" ) // Test that the config file can be at the root of the object, or in a versioned sub-object. @@ -23,17 +22,17 @@ func TestVersionedConfig(t *testing.T) { }{ "root_config_v1alpha1": { inputConfig: `{"version": "v1alpha1", "authKey": "abc123"}`, - expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + expectedConfig: ConfigV1Alpha1{AuthKey: new("abc123")}, }, "backwards_compat_v1alpha1_config": { // Client doesn't know about v1beta1, so it should read in v1alpha1. inputConfig: `{"version": "v1beta1", "beta-key": "beta-value", "authKey": "def456", "v1alpha1": {"authKey": "abc123"}}`, - expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + expectedConfig: ConfigV1Alpha1{AuthKey: new("abc123")}, }, "unknown_key_allowed": { // Adding new keys to the config doesn't require a version bump. inputConfig: `{"version": "v1alpha1", "unknown-key": "unknown-value", "authKey": "abc123"}`, - expectedConfig: ConfigV1Alpha1{AuthKey: ptr.To("abc123")}, + expectedConfig: ConfigV1Alpha1{AuthKey: new("abc123")}, }, "version_only_no_authkey": { inputConfig: `{"version": "v1alpha1"}`, diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go index bd673cd0d963a..43a64efde0e80 100644 --- a/net/packet/geneve_test.go +++ b/net/packet/geneve_test.go @@ -9,7 +9,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "tailscale.com/types/ptr" ) func TestGeneveHeader(t *testing.T) { @@ -47,22 +46,22 @@ func TestVirtualNetworkID(t *testing.T) { }, { "Set 0", - ptr.To(uint32(0)), + new(uint32(0)), 0, }, { "Set 1", - ptr.To(uint32(1)), + new(uint32(1)), 1, }, { "Set math.MaxUint32", - ptr.To(uint32(math.MaxUint32)), + new(uint32(math.MaxUint32)), 1<<24 - 1, }, { "Set max 3-byte value", - ptr.To(uint32(1<<24 - 1)), + new(uint32(1<<24 - 1)), 1<<24 - 1, }, } diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 1744fc30266a9..bd29489a83d22 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -34,7 +34,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netlogtype" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" @@ -655,9 +654,9 @@ func TestPeerCfg_NAT(t *testing.T) { }, } if masqIP.Is4() { - p.V4MasqAddr = ptr.To(masqIP) + p.V4MasqAddr = new(masqIP) } else { - p.V6MasqAddr = ptr.To(masqIP) + p.V6MasqAddr = new(masqIP) } p.AllowedIPs = append(p.AllowedIPs, otherAllowedIPs...) return p diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 6d9d859a22d91..df80235006353 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -51,7 +51,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logid" "tailscale.com/types/netmap" - "tailscale.com/types/ptr" "tailscale.com/util/cibuild" "tailscale.com/util/lineiter" "tailscale.com/util/must" @@ -96,7 +95,7 @@ func TestMatchRule(t *testing.T) { name: "expired", rule: &tailcfg.SSHRule{ Action: someAction, - RuleExpires: ptr.To(time.Unix(100, 0)), + RuleExpires: new(time.Unix(100, 0)), }, ci: &sshConnInfo{}, wantErr: errRuleExpired, diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index a60f301d763c7..1911707235b87 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -13,7 +13,6 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/structs" "tailscale.com/types/tkatype" ) @@ -53,10 +52,10 @@ func (src *Node) Clone() *Node { dst.Tags = append(src.Tags[:0:0], src.Tags...) dst.PrimaryRoutes = append(src.PrimaryRoutes[:0:0], src.PrimaryRoutes...) if dst.LastSeen != nil { - dst.LastSeen = ptr.To(*src.LastSeen) + dst.LastSeen = new(*src.LastSeen) } if dst.Online != nil { - dst.Online = ptr.To(*src.Online) + dst.Online = new(*src.Online) } dst.Capabilities = append(src.Capabilities[:0:0], src.Capabilities...) if dst.CapMap != nil { @@ -66,10 +65,10 @@ func (src *Node) Clone() *Node { } } if dst.SelfNodeV4MasqAddrForThisPeer != nil { - dst.SelfNodeV4MasqAddrForThisPeer = ptr.To(*src.SelfNodeV4MasqAddrForThisPeer) + dst.SelfNodeV4MasqAddrForThisPeer = new(*src.SelfNodeV4MasqAddrForThisPeer) } if dst.SelfNodeV6MasqAddrForThisPeer != nil { - dst.SelfNodeV6MasqAddrForThisPeer = ptr.To(*src.SelfNodeV6MasqAddrForThisPeer) + dst.SelfNodeV6MasqAddrForThisPeer = new(*src.SelfNodeV6MasqAddrForThisPeer) } if src.ExitNodeDNSResolvers != nil { dst.ExitNodeDNSResolvers = make([]*dnstype.Resolver, len(src.ExitNodeDNSResolvers)) @@ -139,10 +138,10 @@ func (src *Hostinfo) Clone() *Hostinfo { dst.NetInfo = src.NetInfo.Clone() dst.SSH_HostKeys = append(src.SSH_HostKeys[:0:0], src.SSH_HostKeys...) if dst.Location != nil { - dst.Location = ptr.To(*src.Location) + dst.Location = new(*src.Location) } if dst.TPM != nil { - dst.TPM = ptr.To(*src.TPM) + dst.TPM = new(*src.TPM) } return dst } @@ -331,7 +330,7 @@ func (src *RegisterResponseAuth) Clone() *RegisterResponseAuth { dst := new(RegisterResponseAuth) *dst = *src if dst.Oauth2Token != nil { - dst.Oauth2Token = ptr.To(*src.Oauth2Token) + dst.Oauth2Token = new(*src.Oauth2Token) } return dst } @@ -355,7 +354,7 @@ func (src *RegisterRequest) Clone() *RegisterRequest { dst.Hostinfo = src.Hostinfo.Clone() dst.NodeKeySignature = append(src.NodeKeySignature[:0:0], src.NodeKeySignature...) if dst.Timestamp != nil { - dst.Timestamp = ptr.To(*src.Timestamp) + dst.Timestamp = new(*src.Timestamp) } dst.DeviceCert = append(src.DeviceCert[:0:0], src.DeviceCert...) dst.Signature = append(src.Signature[:0:0], src.Signature...) @@ -413,7 +412,7 @@ func (src *DERPRegion) Clone() *DERPRegion { if src.Nodes[i] == nil { dst.Nodes[i] = nil } else { - dst.Nodes[i] = ptr.To(*src.Nodes[i]) + dst.Nodes[i] = new(*src.Nodes[i]) } } } @@ -497,7 +496,7 @@ func (src *SSHRule) Clone() *SSHRule { dst := new(SSHRule) *dst = *src if dst.RuleExpires != nil { - dst.RuleExpires = ptr.To(*src.RuleExpires) + dst.RuleExpires = new(*src.RuleExpires) } if src.Principals != nil { dst.Principals = make([]*SSHPrincipal, len(src.Principals)) @@ -534,7 +533,7 @@ func (src *SSHAction) Clone() *SSHAction { *dst = *src dst.Recorders = append(src.Recorders[:0:0], src.Recorders...) if dst.OnRecordingFailure != nil { - dst.OnRecordingFailure = ptr.To(*src.OnRecordingFailure) + dst.OnRecordingFailure = new(*src.OnRecordingFailure) } return dst } diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index f649e43ab57b8..9ed7c1e147a7f 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -17,7 +17,6 @@ import ( "tailscale.com/tstest/deptest" "tailscale.com/types/key" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/must" ) @@ -539,22 +538,22 @@ func TestNodeEqual(t *testing.T) { }, { &Node{}, - &Node{SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, + &Node{SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, false, }, { - &Node{SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, - &Node{SelfNodeV4MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("100.64.0.1"))}, + &Node{SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, + &Node{SelfNodeV4MasqAddrForThisPeer: new(netip.MustParseAddr("100.64.0.1"))}, true, }, { &Node{}, - &Node{SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, + &Node{SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, false, }, { - &Node{SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, - &Node{SelfNodeV6MasqAddrForThisPeer: ptr.To(netip.MustParseAddr("2001::3456"))}, + &Node{SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, + &Node{SelfNodeV6MasqAddrForThisPeer: new(netip.MustParseAddr("2001::3456"))}, true, }, { diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index cb6b6996b7a87..9da63feb4bb0c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -323,7 +323,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/opt from tailscale.com/control/controlknobs+ tailscale.com/types/persist from tailscale.com/control/controlclient+ tailscale.com/types/preftype from tailscale.com/ipn+ - tailscale.com/types/ptr from tailscale.com/control/controlclient+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 779cba6290cfe..2d21942789858 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -50,7 +50,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/must" "tailscale.com/util/set" ) @@ -730,8 +729,8 @@ func TestConfigFileAuthKey(t *testing.T) { must.Do(os.WriteFile(authKeyFile, fmt.Appendf(nil, "%s\n", authKey), 0666)) must.Do(os.WriteFile(n1.configFile, must.Get(json.Marshal(ipn.ConfigVAlpha{ Version: "alpha0", - AuthKey: ptr.To("file:" + authKeyFile), - ServerURL: ptr.To(n1.env.ControlServer.URL), + AuthKey: new("file:" + authKeyFile), + ServerURL: new(n1.env.ControlServer.URL), })), 0644)) d1 := n1.StartDaemon() @@ -2232,7 +2231,7 @@ func TestC2NDebugNetmap(t *testing.T) { // Send a delta update to n1, marking node 0 as online. env.Control.AddRawMapResponse(nodes[1].Key, &tailcfg.MapResponse{ PeersChangedPatch: []*tailcfg.PeerChange{{ - NodeID: nodes[0].ID, Online: ptr.To(true), + NodeID: nodes[0].ID, Online: new(true), }}, }) diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 1e24414903ae9..8bfe446ad5ee2 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -38,7 +38,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/must" @@ -1337,9 +1336,9 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, } if masqIP := nodeMasqs[p.Key]; masqIP.IsValid() { if masqIP.Is6() { - p.SelfNodeV6MasqAddrForThisPeer = ptr.To(masqIP) + p.SelfNodeV6MasqAddrForThisPeer = new(masqIP) } else { - p.SelfNodeV4MasqAddrForThisPeer = ptr.To(masqIP) + p.SelfNodeV4MasqAddrForThisPeer = new(masqIP) } } p.IsJailed = jailed[p.Key] diff --git a/tstest/reflect.go b/tstest/reflect.go index 22903e7e9fca2..4ba1f96c39666 100644 --- a/tstest/reflect.go +++ b/tstest/reflect.go @@ -8,8 +8,6 @@ import ( "reflect" "testing" "time" - - "tailscale.com/types/ptr" ) // IsZeroable is the interface for things with an IsZero method. @@ -60,7 +58,7 @@ func CheckIsZero[T IsZeroable](t testing.TB, nonzeroValues map[reflect.Type]any) case timeType: return reflect.ValueOf(time.Unix(1704067200, 0)) case timePtrType: - return reflect.ValueOf(ptr.To(time.Unix(1704067200, 0))) + return reflect.ValueOf(new(time.Unix(1704067200, 0))) } switch ty.Kind() { diff --git a/types/jsonx/json_test.go b/types/jsonx/json_test.go index 5c302d9746c3e..8b0abbab64686 100644 --- a/types/jsonx/json_test.go +++ b/types/jsonx/json_test.go @@ -10,7 +10,6 @@ import ( "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "github.com/google/go-cmp/cmp" - "tailscale.com/types/ptr" ) type Interface interface { @@ -72,7 +71,7 @@ func TestInterfaceCoders(t *testing.T) { wantJSON: `{"Foo":"hello"}`, }, { label: "BarPointer", - wantVal: InterfaceWrapper{ptr.To(Bar(5))}, + wantVal: InterfaceWrapper{new(Bar(5))}, wantJSON: `{"Bar":5}`, }, { label: "BarValue", diff --git a/types/lazy/deferred.go b/types/lazy/deferred.go index 582090ab93112..6e96f61e7af04 100644 --- a/types/lazy/deferred.go +++ b/types/lazy/deferred.go @@ -6,8 +6,6 @@ package lazy import ( "sync" "sync/atomic" - - "tailscale.com/types/ptr" ) // DeferredInit allows one or more funcs to be deferred @@ -91,7 +89,7 @@ func (d *DeferredInit) doSlow() (err *error) { }() for _, f := range d.funcs { if err := f(); err != nil { - return ptr.To(err) + return new(err) } } return nilErrPtr diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index 915ae2002c135..a24139fe1ab07 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -7,13 +7,11 @@ package lazy import ( "sync" "sync/atomic" - - "tailscale.com/types/ptr" ) // nilErrPtr is a sentinel *error value for SyncValue.err to signal // that SyncValue.v is valid. -var nilErrPtr = ptr.To[error](nil) +var nilErrPtr = new(error(nil)) // SyncValue is a lazily computed value. // @@ -80,7 +78,7 @@ func (z *SyncValue[T]) GetErr(fill func() (T, error)) (T, error) { // Update z.err after z.v; see field docs. if err != nil { - z.err.Store(ptr.To(err)) + z.err.Store(new(err)) } else { z.err.Store(nilErrPtr) } @@ -145,7 +143,7 @@ func (z *SyncValue[T]) SetForTest(tb testing_TB, val T, err error) { z.v = val if err != nil { - z.err.Store(ptr.To(err)) + z.err.Store(new(err)) } else { z.err.Store(nilErrPtr) } diff --git a/types/netmap/nodemut.go b/types/netmap/nodemut.go index 5c9000d56ef38..901296b1fc337 100644 --- a/types/netmap/nodemut.go +++ b/types/netmap/nodemut.go @@ -12,7 +12,6 @@ import ( "time" "tailscale.com/tailcfg" - "tailscale.com/types/ptr" ) // NodeMutation is the common interface for types that describe @@ -55,7 +54,7 @@ type NodeMutationOnline struct { } func (m NodeMutationOnline) Apply(n *tailcfg.Node) { - n.Online = ptr.To(m.Online) + n.Online = new(m.Online) } // NodeMutationLastSeen is a NodeMutation that says a node's LastSeen @@ -66,14 +65,14 @@ type NodeMutationLastSeen struct { } func (m NodeMutationLastSeen) Apply(n *tailcfg.Node) { - n.LastSeen = ptr.To(m.LastSeen) + n.LastSeen = new(m.LastSeen) } var peerChangeFields = sync.OnceValue(func() []reflect.StructField { var fields []reflect.StructField rt := reflect.TypeFor[tailcfg.PeerChange]() - for i := range rt.NumField() { - fields = append(fields, rt.Field(i)) + for field := range rt.Fields() { + fields = append(fields, field) } return fields }) diff --git a/types/netmap/nodemut_test.go b/types/netmap/nodemut_test.go index f7302d48df097..a03dee49c7a76 100644 --- a/types/netmap/nodemut_test.go +++ b/types/netmap/nodemut_test.go @@ -14,7 +14,6 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/types/opt" - "tailscale.com/types/ptr" ) // tests mapResponseContainsNonPatchFields @@ -117,7 +116,7 @@ func TestMutationsFromMapResponse(t *testing.T) { name: "patch-online", mr: fromChanges(&tailcfg.PeerChange{ NodeID: 1, - Online: ptr.To(true), + Online: new(true), }), want: muts(NodeMutationOnline{1, true}), }, @@ -125,7 +124,7 @@ func TestMutationsFromMapResponse(t *testing.T) { name: "patch-online-false", mr: fromChanges(&tailcfg.PeerChange{ NodeID: 1, - Online: ptr.To(false), + Online: new(false), }), want: muts(NodeMutationOnline{1, false}), }, @@ -133,7 +132,7 @@ func TestMutationsFromMapResponse(t *testing.T) { name: "patch-lastseen", mr: fromChanges(&tailcfg.PeerChange{ NodeID: 1, - LastSeen: ptr.To(time.Unix(12345, 0)), + LastSeen: new(time.Unix(12345, 0)), }), want: muts(NodeMutationLastSeen{1, time.Unix(12345, 0)}), }, diff --git a/types/prefs/item.go b/types/prefs/item.go index fdb9301f9fdf8..564e8ffde7d0f 100644 --- a/types/prefs/item.go +++ b/types/prefs/item.go @@ -9,7 +9,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/must" ) @@ -47,7 +46,7 @@ func (i *Item[T]) SetManagedValue(val T) { // It is a runtime error to call [Item.Clone] if T contains pointers // but does not implement [views.Cloner]. func (i Item[T]) Clone() *Item[T] { - res := ptr.To(i) + res := new(i) if v, ok := i.ValueOk(); ok { res.s.Value.Set(must.Get(deepClone(v))) } diff --git a/types/prefs/list.go b/types/prefs/list.go index 20e4dad463135..c6881991ad769 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -12,7 +12,6 @@ import ( "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -62,7 +61,7 @@ func (ls *List[T]) View() ListView[T] { // Clone returns a copy of l that aliases no memory with l. func (ls List[T]) Clone() *List[T] { - res := ptr.To(ls) + res := new(ls) if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(append(v[:0:0], v...)) } diff --git a/types/prefs/map.go b/types/prefs/map.go index 6bf1948b87ab4..07cb84f0da56a 100644 --- a/types/prefs/map.go +++ b/types/prefs/map.go @@ -11,7 +11,6 @@ import ( "github.com/go-json-experiment/json/jsontext" "golang.org/x/exp/constraints" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -44,7 +43,7 @@ func (m *Map[K, V]) View() MapView[K, V] { // Clone returns a copy of m that aliases no memory with m. func (m Map[K, V]) Clone() *Map[K, V] { - res := ptr.To(m) + res := new(m) if v, ok := m.s.Value.GetOk(); ok { res.s.Value.Set(maps.Clone(v)) } diff --git a/types/prefs/prefs_clone_test.go b/types/prefs/prefs_clone_test.go index 07dc24fdc7361..1914a0c2551f6 100644 --- a/types/prefs/prefs_clone_test.go +++ b/types/prefs/prefs_clone_test.go @@ -7,8 +7,6 @@ package prefs import ( "net/netip" - - "tailscale.com/types/ptr" ) // Clone makes a deep copy of TestPrefs. @@ -67,7 +65,7 @@ func (src *TestBundle) Clone() *TestBundle { dst := new(TestBundle) *dst = *src if dst.Nested != nil { - dst.Nested = ptr.To(*src.Nested) + dst.Nested = new(*src.Nested) } return dst } diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index 09aa808ccc37e..e1c1863fc5dc1 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -11,7 +11,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -45,7 +44,7 @@ func (ls *StructList[T]) SetManagedValue(val []T) { // Clone returns a copy of l that aliases no memory with l. func (ls StructList[T]) Clone() *StructList[T] { - res := ptr.To(ls) + res := new(ls) if v, ok := ls.s.Value.GetOk(); ok { res.s.Value.Set(deepCloneSlice(v)) } diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index 2f2715a62a94a..374d8a92ee925 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -9,7 +9,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "tailscale.com/types/opt" - "tailscale.com/types/ptr" "tailscale.com/types/views" ) @@ -43,7 +42,7 @@ func (m *StructMap[K, V]) SetManagedValue(val map[K]V) { // Clone returns a copy of m that aliases no memory with m. func (m StructMap[K, V]) Clone() *StructMap[K, V] { - res := ptr.To(m) + res := new(m) if v, ok := m.s.Value.GetOk(); ok { res.s.Value.Set(deepCloneMap(v)) } diff --git a/types/ptr/ptr.go b/types/ptr/ptr.go index 5b65a0e1c13e7..ba2b9e5857e8f 100644 --- a/types/ptr/ptr.go +++ b/types/ptr/ptr.go @@ -2,9 +2,18 @@ // SPDX-License-Identifier: BSD-3-Clause // Package ptr contains the ptr.To function. +// +// Deprecated: Use Go 1.26's new(value) expression instead. +// See https://go.dev/doc/go1.26#language. package ptr // To returns a pointer to a shallow copy of v. +// +// Deprecated: Use Go 1.26's new(value) expression instead. +// For example, ptr.To(42) can be written as new(42). +// See https://go.dev/doc/go1.26#language. +// +//go:fix inline func To[T any](v T) *T { - return &v + return new(v) } diff --git a/types/views/views.go b/types/views/views.go index 9260311edc29a..4e17ac952ab49 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -19,7 +19,6 @@ import ( jsonv2 "github.com/go-json-experiment/json" "github.com/go-json-experiment/json/jsontext" "go4.org/mem" - "tailscale.com/types/ptr" ) // ByteSlice is a read-only accessor for types that are backed by a []byte. @@ -901,7 +900,7 @@ func (p ValuePointer[T]) Clone() *T { if p.ж == nil { return nil } - return ptr.To(*p.ж) + return new(*p.ж) } // String implements [fmt.Stringer]. diff --git a/util/deephash/deephash_test.go b/util/deephash/deephash_test.go index c50d70bc6ed7f..ace285b6de5ee 100644 --- a/util/deephash/deephash_test.go +++ b/util/deephash/deephash_test.go @@ -24,7 +24,6 @@ import ( "go4.org/netipx" "tailscale.com/tailcfg" "tailscale.com/types/key" - "tailscale.com/types/ptr" "tailscale.com/util/deephash/testtype" "tailscale.com/util/hashx" "tailscale.com/version" @@ -382,7 +381,7 @@ func TestGetTypeHasher(t *testing.T) { }, { name: "time_ptr", // addressable, as opposed to "time" test above - val: ptr.To(time.Unix(1234, 5678).In(time.UTC)), + val: new(time.Unix(1234, 5678).In(time.UTC)), out: u8(1) + u64(1234) + u32(5678) + u32(0), }, { @@ -412,7 +411,7 @@ func TestGetTypeHasher(t *testing.T) { }, { name: "array_ptr_memhash", - val: ptr.To([4]byte{1, 2, 3, 4}), + val: new([4]byte{1, 2, 3, 4}), out: "\x01\x01\x02\x03\x04", }, { @@ -640,7 +639,7 @@ var filterRules = []tailcfg.FilterRule{ SrcIPs: []string{"*", "10.1.3.4/32", "10.0.0.0/24"}, DstPorts: []tailcfg.NetPortRange{{ IP: "1.2.3.4/32", - Bits: ptr.To(32), + Bits: new(32), Ports: tailcfg.PortRange{First: 1, Last: 2}, }}, IPProto: []int{1, 2, 3, 4}, @@ -823,7 +822,7 @@ func TestHashThroughView(t *testing.T) { SSHPolicy: &sshPolicyOut{ Rules: []tailcfg.SSHRuleView{ (&tailcfg.SSHRule{ - RuleExpires: ptr.To(time.Unix(123, 0)), + RuleExpires: new(time.Unix(123, 0)), }).View(), }, }, diff --git a/util/linuxfw/nftables_runner.go b/util/linuxfw/nftables_runner.go index 7496e7034a98a..cdb1c5bfbbfb2 100644 --- a/util/linuxfw/nftables_runner.go +++ b/util/linuxfw/nftables_runner.go @@ -20,7 +20,6 @@ import ( "golang.org/x/sys/unix" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" - "tailscale.com/types/ptr" ) const ( @@ -955,7 +954,7 @@ const ( // via netfilter via nftables, as a last resort measure to detect that nftables // can be used. It cleans up the dummy chains after creation. func (n *nftablesRunner) createDummyPostroutingChains() (retErr error) { - polAccept := ptr.To(nftables.ChainPolicyAccept) + polAccept := new(nftables.ChainPolicyAccept) for _, table := range n.getTables() { nat, err := createTableIfNotExist(n.conn, table.Proto, tsDummyTableName) if err != nil { diff --git a/util/pool/pool.go b/util/pool/pool.go index 7042fb893a59e..2e223e57713eb 100644 --- a/util/pool/pool.go +++ b/util/pool/pool.go @@ -12,8 +12,6 @@ package pool import ( "fmt" "math/rand/v2" - - "tailscale.com/types/ptr" ) // consistencyCheck enables additional runtime checks to ensure that the pool @@ -77,7 +75,7 @@ func (p *Pool[V]) AppendTakeAll(dst []V) []V { func (p *Pool[V]) Add(item V) Handle[V] { // Store the index in a pointer, so that we can pass it to both the // handle and store it in the itemAndIndex. - idx := ptr.To(len(p.s)) + idx := new(len(p.s)) p.s = append(p.s, itemAndIndex[V]{ item: item, index: idx, diff --git a/util/syspolicy/setting/errors.go b/util/syspolicy/setting/errors.go index 655018d4b5aff..c8e0d8121ec2a 100644 --- a/util/syspolicy/setting/errors.go +++ b/util/syspolicy/setting/errors.go @@ -5,8 +5,6 @@ package setting import ( "errors" - - "tailscale.com/types/ptr" ) var ( @@ -39,7 +37,7 @@ type ErrorText string // NewErrorText returns a [ErrorText] with the specified error message. func NewErrorText(text string) *ErrorText { - return ptr.To(ErrorText(text)) + return new(ErrorText(text)) } // MaybeErrorText returns an [ErrorText] with the text of the specified error, @@ -51,7 +49,7 @@ func MaybeErrorText(err error) *ErrorText { if err, ok := err.(*ErrorText); ok { return err } - return ptr.To(ErrorText(err.Error())) + return new(ErrorText(err.Error())) } // Error implements error. diff --git a/util/syspolicy/setting/setting_test.go b/util/syspolicy/setting/setting_test.go index 3ccd2ef606c50..885491b679ba8 100644 --- a/util/syspolicy/setting/setting_test.go +++ b/util/syspolicy/setting/setting_test.go @@ -9,7 +9,6 @@ import ( "testing" "tailscale.com/types/lazy" - "tailscale.com/types/ptr" "tailscale.com/util/syspolicy/internal" "tailscale.com/util/syspolicy/pkey" ) @@ -138,7 +137,7 @@ func TestSettingDefinition(t *testing.T) { if !tt.setting.Equal(tt.setting) { t.Errorf("the setting should be equal to itself") } - if tt.setting != nil && !tt.setting.Equal(ptr.To(*tt.setting)) { + if tt.setting != nil && !tt.setting.Equal(new(*tt.setting)) { t.Errorf("the setting should be equal to its shallow copy") } if gotKey := tt.setting.Key(); gotKey != tt.wantKey { diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 9d6cae87bdcc6..dfd9d395d00f9 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -63,7 +63,6 @@ import ( "tailscale.com/types/netlogtype" "tailscale.com/types/netmap" "tailscale.com/types/nettype" - "tailscale.com/types/ptr" "tailscale.com/types/views" "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" @@ -2309,7 +2308,7 @@ func TestIsWireGuardOnlyPeerWithMasquerade(t *testing.T) { IsWireGuardOnly: true, Addresses: []netip.Prefix{wgaip}, AllowedIPs: []netip.Prefix{wgaip}, - SelfNodeV4MasqAddrForThisPeer: ptr.To(masqip.Addr()), + SelfNodeV4MasqAddrForThisPeer: new(masqip.Addr()), }, }), } diff --git a/wgengine/wgcfg/wgcfg_clone.go b/wgengine/wgcfg/wgcfg_clone.go index 5c771a2288fce..9e8de7b6f10c0 100644 --- a/wgengine/wgcfg/wgcfg_clone.go +++ b/wgengine/wgcfg/wgcfg_clone.go @@ -10,7 +10,6 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logid" - "tailscale.com/types/ptr" ) // Clone makes a deep copy of Config. @@ -56,10 +55,10 @@ func (src *Peer) Clone() *Peer { *dst = *src dst.AllowedIPs = append(src.AllowedIPs[:0:0], src.AllowedIPs...) if dst.V4MasqAddr != nil { - dst.V4MasqAddr = ptr.To(*src.V4MasqAddr) + dst.V4MasqAddr = new(*src.V4MasqAddr) } if dst.V6MasqAddr != nil { - dst.V6MasqAddr = ptr.To(*src.V6MasqAddr) + dst.V6MasqAddr = new(*src.V6MasqAddr) } return dst } From bb45b2ebbd191346a214fe5c130a19f1a48f9c3a Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Fri, 6 Mar 2026 12:53:11 +0100 Subject: [PATCH 1062/1093] nix: update flakes to get a nixpkgs version with go 1.26 We override 1.26, but its not in the old commit we are tracking. Updates #18682 Signed-off-by: Kristoffer Dalby --- flake.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flake.lock b/flake.lock index 1623342c62407..243188e431835 100644 --- a/flake.lock +++ b/flake.lock @@ -3,11 +3,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "lastModified": 1767039857, + "narHash": "sha256-vNpUSpF5Nuw8xvDLj2KCwwksIbjua2LZCqhV1LNRDns=", "owner": "edolstra", "repo": "flake-compat", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "rev": "5edf11c44bc78a0d334f6334cdaf7d60d732daab", "type": "github" }, "original": { @@ -18,11 +18,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1753151930, - "narHash": "sha256-XSQy6wRKHhRe//iVY5lS/ZpI/Jn6crWI8fQzl647wCg=", + "lastModified": 1772736753, + "narHash": "sha256-au/m3+EuBLoSzWUCb64a/MZq6QUtOV8oC0D9tY2scPQ=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "83e677f31c84212343f4cc553bab85c2efcad60a", + "rev": "917fec990948658ef1ccd07cef2a1ef060786846", "type": "github" }, "original": { From 8e3d176f1cdedf5ddf0d943c6a470a8401037313 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 6 Mar 2026 04:23:40 +0000 Subject: [PATCH 1063/1093] control/controlbase: deflake, speed up TestConnMemoryOverhead This had gotten flaky with Go 1.26. Use synctest + AllocsPerRun to make it fast and deterministic. Updates #18682 Change-Id: If673d6ecd8c1177f59c1b9c0f3fca42309375dff Signed-off-by: Brad Fitzpatrick --- control/controlbase/conn_test.go | 97 ++++++++------------------------ 1 file changed, 24 insertions(+), 73 deletions(-) diff --git a/control/controlbase/conn_test.go b/control/controlbase/conn_test.go index a1e2b313de5b6..202d39efae9e7 100644 --- a/control/controlbase/conn_test.go +++ b/control/controlbase/conn_test.go @@ -11,12 +11,11 @@ import ( "fmt" "io" "net" - "runtime" "strings" "sync" "testing" "testing/iotest" - "time" + "testing/synctest" chp "golang.org/x/crypto/chacha20poly1305" "golang.org/x/net/nettest" @@ -226,79 +225,31 @@ func TestConnStd(t *testing.T) { }) } -// tests that the idle memory overhead of a Conn blocked in a read is -// reasonable (under 2K). It was previously over 8KB with two 4KB -// buffers for rx/tx. This make sure we don't regress. Hopefully it -// doesn't turn into a flaky test. If so, const max can be adjusted, -// or it can be deleted or reworked. +// tests that the memory overhead of a Conn blocked in a read is +// reasonable. It was previously over 8KB with two 4KB buffers for +// rx/tx. This makes sure we don't regress. func TestConnMemoryOverhead(t *testing.T) { - num := 1000 - if testing.Short() { - num = 100 - } - ng0 := runtime.NumGoroutine() - - runtime.GC() - var ms0 runtime.MemStats - runtime.ReadMemStats(&ms0) - - var closers []io.Closer - closeAll := func() { - for _, c := range closers { - c.Close() - } - closers = nil - } - defer closeAll() - - for range num { - client, server := pair(t) - closers = append(closers, client, server) - go func() { - var buf [1]byte - client.Read(buf[:]) - }() - } - - t0 := time.Now() - deadline := t0.Add(3 * time.Second) - var ngo int - for time.Now().Before(deadline) { - runtime.GC() - ngo = runtime.NumGoroutine() - if ngo >= num { - break + synctest.Test(t, func(t *testing.T) { + // AllocsPerRun runs the function once for warmup (filling + // allocator slab caches, etc.) and then measures over the + // remaining runs, returning the average allocation count. + allocs := testing.AllocsPerRun(100, func() { + client, server := pair(t) + go func() { + var buf [1]byte + client.Read(buf[:]) + }() + synctest.Wait() + client.Close() + server.Close() + synctest.Wait() + }) + t.Logf("allocs per blocked-conn pair: %v", allocs) + const max = 400 + if allocs > max { + t.Errorf("allocs per blocked-conn pair = %v, want <= %v", allocs, max) } - time.Sleep(10 * time.Millisecond) - } - if ngo < num { - t.Fatalf("only %v goroutines; expected %v+", ngo, num) - } - runtime.GC() - var ms runtime.MemStats - runtime.ReadMemStats(&ms) - growthTotal := int64(ms.HeapAlloc) - int64(ms0.HeapAlloc) - growthEach := float64(growthTotal) / float64(num) - t.Logf("Alloced %v bytes, %.2f B/each", growthTotal, growthEach) - const max = 2048 - if growthEach > max { - t.Errorf("allocated more than expected; want max %v bytes/each", max) - } - - closeAll() - - // And make sure our goroutines go away too. - deadline = time.Now().Add(3 * time.Second) - for time.Now().Before(deadline) { - ngo = runtime.NumGoroutine() - if ngo < ng0+num/10 { - break - } - time.Sleep(10 * time.Millisecond) - } - if ngo >= ng0+num/10 { - t.Errorf("goroutines didn't go back down; started at %v, now %v", ng0, ngo) - } + }) } type readSink struct { From 40858a61fe1c6a0e90add476e5c9aa8d366dfb24 Mon Sep 17 00:00:00 2001 From: Michael Ben-Ami Date: Mon, 2 Mar 2026 11:18:08 -0500 Subject: [PATCH 1064/1093] ipnext,ipnlocal: add ExtraWireGuardAllowedIPs hook This hook addition is motivated by the Connectors 2025 work, in which NATed "Transit IPs" are used to route interesting traffic to the appropriate peer, without advertising the actual real IPs. It overlaps with #17858, and specifically with the WIP PR #17861. If that work completes, this hook may be replaced by other ones that fit the new WireGuard configuration paradigm. Fixes tailscale/corp#37146 Signed-off-by: Michael Ben-Ami --- ipn/ipnext/ipnext.go | 26 ++++++++++++++++++++++++++ ipn/ipnlocal/local.go | 10 ++++++++++ 2 files changed, 36 insertions(+) diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 6dea49939af91..bf8d8a7a69463 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -19,8 +19,10 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstime" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/mapx" + "tailscale.com/types/views" "tailscale.com/wgengine/filter" ) @@ -382,6 +384,30 @@ type Hooks struct { // Filter contains hooks for the packet filter. // See [filter.Filter] for details on how these hooks are invoked. Filter FilterHooks + + // ExtraWireGuardAllowedIPs is called with each peer's public key + // from the initial [wgcfg.Config], and returns a view of prefixes to + // append to each peer's AllowedIPs. + // + // The extra AllowedIPs are added after the [router.Config] is generated, but + // before the WireGuard config is sent to the engine, so the extra IPs are + // given to WireGuard, but not the OS routing table. + // + // The prefixes returned from the hook should not contain duplicates, either + // internally, or with netmap peer prefixes. Returned prefixes should only + // contain host routes, and not contain default or subnet routes. + // Subsequent calls that return an unchanged set of prefixes for a given peer, + // should return the prefixes in the same order for that peer, + // to prevent configuration churn. + // + // The returned slice should not be mutated by the extension after it is returned. + // + // The hook is called with LocalBackend's mutex locked. + // + // TODO(#17858): This hook may not be needed and can possibly be replaced by + // new hooks that fit into the new architecture that make use of new + // WireGuard APIs. + ExtraWireGuardAllowedIPs feature.Hook[func(key.NodePublic) views.Slice[netip.Prefix]] } // FilterHooks contains hooks that extensions can use to customize the packet diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 596a51bd7ce3a..b8f3550395493 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5129,6 +5129,16 @@ func (b *LocalBackend) authReconfigLocked() { oneCGNATRoute := shouldUseOneCGNATRoute(b.logf, b.sys.NetMon.Get(), b.sys.ControlKnobs(), version.OS()) rcfg := b.routerConfigLocked(cfg, prefs, nm, oneCGNATRoute) + // Add these extra Allowed IPs after router configuration, because the expected + // extension (features/conn25), does not want these routes installed on the OS. + // See also [Hooks.ExtraWireGuardAllowedIPs]. + if extraAllowedIPsFn, ok := b.extHost.hooks.ExtraWireGuardAllowedIPs.GetOk(); ok { + for i := range cfg.Peers { + extras := extraAllowedIPsFn(cfg.Peers[i].PublicKey) + cfg.Peers[i].AllowedIPs = extras.AppendTo(cfg.Peers[i].AllowedIPs) + } + } + err = b.e.Reconfig(cfg, rcfg, dcfg) if err == wgengine.ErrNoChanges { return From 3cc7b8530c7fd0dd069ebb5ec9863a8cf1e0deee Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Fri, 6 Mar 2026 09:54:25 -0500 Subject: [PATCH 1065/1093] prober: fix queuing delay probe txRecords overflow under high DERP server load (#18803) The txRecords buffer had two compounding bugs that caused the overflow guard to fire on every send tick under high DERP server load, spamming logs at the full send rate (e.g. 100x/second). First, int(packetTimeout.Seconds()) truncates fractional-second timeouts, under-allocating the buffer. Second, the capacity was sized to exactly the theoretical maximum number of in-flight records with no headroom, and the expiry check used strict > rather than >=, so records at exactly the timeout boundary were never evicted by applyTimeouts, leaving len==cap on the very next tick. Fixes tailscale/corp#37696 Signed-off-by: Mike O'Driscoll --- prober/derp.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/prober/derp.go b/prober/derp.go index 73ea02cf5ad4f..dadda6fce2208 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -17,6 +17,7 @@ import ( "io" "log" "maps" + "math" "net" "net/http" "net/netip" @@ -423,7 +424,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. // for packets up to their timeout. As records age out of the front of this // list, if the associated packet arrives, we won't have a txRecord for it // and will consider it to have timed out. - txRecords := make([]txRecord, 0, packetsPerSecond*int(packetTimeout.Seconds())) + txRecords := make([]txRecord, 0, int(math.Ceil(float64(packetsPerSecond)*packetTimeout.Seconds()))+1) var txRecordsMu sync.Mutex // applyTimeouts walks over txRecords and expires any records that are older @@ -435,7 +436,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. now := time.Now() recs := txRecords[:0] for _, r := range txRecords { - if now.Sub(r.at) > packetTimeout { + if now.Sub(r.at) >= packetTimeout { packetsDropped.Add(1) } else { recs = append(recs, r) @@ -451,9 +452,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. pkt := make([]byte, 260) // the same size as a CallMeMaybe packet observed on a Tailscale client. crand.Read(pkt) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { t := time.NewTicker(time.Second / time.Duration(packetsPerSecond)) defer t.Stop() @@ -481,13 +480,11 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. } } } - }() + }) // Receive the packets. recvFinishedC := make(chan error, 1) - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { defer close(recvFinishedC) // to break out of 'select' below. fromDERPPubKey := fromc.SelfPublicKey() for { @@ -531,7 +528,7 @@ func runDerpProbeQueuingDelayContinously(ctx context.Context, from, to *tailcfg. // Loop. } } - }() + }) select { case <-ctx.Done(): From 4453cc5f531f1570904e7e0633647fe5418a67d4 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 6 Mar 2026 09:55:57 -0800 Subject: [PATCH 1066/1093] go.mod: bump to Go 1.26.1 Updates #18682 Change-Id: I855c0dfa4c61eb33123bbb7b00c1ab5506e80b09 Signed-off-by: Brad Fitzpatrick --- go.mod | 2 +- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- go.toolchain.version | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index ba9c64061da4d..533ef04489cc6 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.26.0 +go 1.26.1 require ( filippo.io/mkcert v1.4.4 diff --git a/go.toolchain.rev b/go.toolchain.rev index ea3d3c773f779..753deba47a297 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -5b5cb0db47535a0a8d2f450cb1bf83af8e70f164 +0f1a3326f30508521e7b8322f4e0f084560c1404 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index 34a9b157d33d6..d6105252b02d0 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-f12BE5+H8wHZNKaD6pv9nJJym+1QwxkFNpBtnNcltdc= +sha256-zyo1dIQnrwq8TVxwKCjJ3PfiShjAXO4wMQb/F7ze/mU= diff --git a/go.toolchain.version b/go.toolchain.version index 5ff8c4f5d2ad2..dd43a143f0217 100644 --- a/go.toolchain.version +++ b/go.toolchain.version @@ -1 +1 @@ -1.26.0 +1.26.1 From bd2a2d53d3a4d632c9fae7c1b6426c3b5ef34010 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Thu, 5 Mar 2026 21:13:57 +0000 Subject: [PATCH 1067/1093] all: use Go 1.26 things, run most gofix modernizers I omitted a lot of the min/max modernizers because they didn't result in more clear code. Some of it's older "for x := range 123". Also: errors.AsType, any, fmt.Appendf, etc. Updates #18682 Change-Id: I83a451577f33877f962766a5b65ce86f7696471c Signed-off-by: Brad Fitzpatrick --- appc/appconnector_test.go | 4 +- client/local/local.go | 10 ++--- client/systray/logo.go | 4 +- clientupdate/clientupdate.go | 4 +- clientupdate/clientupdate_test.go | 2 +- cmd/containerboot/forwarding.go | 2 +- cmd/containerboot/main_test.go | 7 ++-- cmd/derper/mesh.go | 2 +- cmd/k8s-operator/egress-eps_test.go | 9 ++--- .../egress-services-readiness_test.go | 7 ++-- cmd/k8s-operator/egress-services_test.go | 2 +- cmd/k8s-operator/ingress-for-pg.go | 10 ++--- cmd/k8s-operator/ingress-for-pg_test.go | 2 +- cmd/k8s-operator/metrics_resources.go | 9 ++--- cmd/k8s-operator/proxygroup.go | 9 ++--- cmd/k8s-operator/proxygroup_specs.go | 5 +-- cmd/k8s-operator/proxygroup_test.go | 4 +- cmd/k8s-operator/sts.go | 16 +++----- cmd/k8s-operator/sts_test.go | 5 +-- cmd/k8s-operator/svc-for-pg.go | 3 +- cmd/k8s-operator/testutils_test.go | 12 +++--- cmd/k8s-operator/tsrecorder.go | 10 ++--- cmd/k8s-operator/tsrecorder_specs.go | 5 +-- cmd/mkpkg/main.go | 4 +- cmd/natc/ippool/ippool_test.go | 2 +- cmd/natc/natc.go | 5 +-- cmd/sniproxy/sniproxy.go | 4 +- cmd/speedtest/speedtest.go | 3 +- cmd/stunstamp/stunstamp.go | 3 +- cmd/tailscale/cli/appcroutes.go | 6 +-- cmd/tailscale/cli/cli.go | 2 +- cmd/tailscale/cli/cli_test.go | 4 +- cmd/tailscale/cli/configure-synology-cert.go | 8 ++-- cmd/tailscale/cli/file.go | 7 ++-- cmd/tailscale/cli/ip.go | 13 +++---- cmd/tailscale/cli/risks.go | 2 +- cmd/tailscale/cli/serve_v2.go | 4 +- cmd/tailscale/cli/set.go | 7 ++-- cmd/tailscale/cli/ssh.go | 13 +++---- cmd/tailscale/cli/ssh_exec_windows.go | 3 +- cmd/tailscale/cli/ssh_unix.go | 2 +- cmd/tailscale/cli/up.go | 5 +-- cmd/testwrapper/testwrapper.go | 3 +- cmd/testwrapper/testwrapper_test.go | 3 +- cmd/tta/tta.go | 2 +- cmd/viewer/viewer.go | 5 +-- control/controlclient/controlclient_test.go | 8 ++-- control/controlclient/direct.go | 2 +- control/controlclient/map.go | 8 ++-- control/controlhttp/http_test.go | 4 +- control/controlknobs/controlknobs.go | 12 +++--- derp/derp_test.go | 12 ++---- derp/derphttp/derphttp_test.go | 12 ++---- derp/derpserver/derpserver.go | 22 +++++------ derp/derpserver/derpserver_test.go | 24 ++++++------ derp/xdp/xdp_linux.go | 3 +- disco/disco.go | 8 ++-- docs/webhooks/example.go | 6 +-- drive/driveimpl/compositedav/rewriting.go | 2 +- feature/conn25/conn25.go | 9 +---- feature/conn25/conn25_test.go | 2 +- .../identityfederation/identityfederation.go | 3 +- feature/linuxdnsfight/linuxdnsfight_test.go | 2 +- feature/taildrop/fileops_fs.go | 2 +- health/health_test.go | 5 +-- hostinfo/hostinfo_linux.go | 8 ++-- ipn/auditlog/auditlog.go | 7 +++- ipn/ipnlocal/breaktcp_linux.go | 2 +- ipn/ipnlocal/bus_test.go | 7 ++-- ipn/ipnlocal/extension_host.go | 2 +- ipn/ipnlocal/extension_host_test.go | 5 +-- ipn/ipnlocal/local.go | 13 ++----- ipn/ipnlocal/netmapcache/netmapcache_test.go | 2 +- ipn/ipnlocal/peerapi.go | 2 +- ipn/ipnlocal/serve.go | 4 +- ipn/ipnlocal/ssh.go | 6 +-- ipn/localapi/debug.go | 7 +--- ipn/prefs.go | 20 +++------- ipn/prefs_test.go | 4 +- ipn/serve.go | 2 +- ipn/store/awsstore/store_aws.go | 3 +- k8s-operator/sessionrecording/spdy/frame.go | 4 +- logtail/logtail.go | 4 +- logtail/logtail_test.go | 6 +-- metrics/multilabelmap.go | 8 ++-- net/art/stride_table_test.go | 2 +- net/captivedetection/captivedetection_test.go | 6 +-- net/dns/manager_linux.go | 2 +- net/dns/openresolv.go | 2 +- net/dns/resolver/forwarder.go | 3 +- net/dns/resolver/forwarder_test.go | 16 +++----- net/dns/resolver/tsdns.go | 4 +- net/dns/wsl_windows.go | 3 +- net/netcheck/netcheck.go | 14 +++---- net/netcheck/netcheck_test.go | 9 ++--- net/neterror/neterror_linux.go | 3 +- net/netmon/state.go | 7 +--- net/netutil/routes.go | 4 +- net/socks5/socks5.go | 7 ++-- net/socks5/socks5_test.go | 8 ++-- net/stunserver/stunserver_test.go | 3 +- net/tstun/wrap.go | 4 +- net/tstun/wrap_test.go | 10 ++--- net/udprelay/server.go | 2 +- net/udprelay/server_test.go | 4 +- prober/prober.go | 8 +--- ssh/tailssh/incubator.go | 5 +-- ssh/tailssh/privs_test.go | 12 ++---- ssh/tailssh/tailssh.go | 19 +++------ ssh/tailssh/tailssh_test.go | 28 +++++-------- syncs/shardedint_test.go | 4 +- syncs/shardvalue_test.go | 4 +- syncs/syncs_test.go | 9 ++--- tailcfg/tailcfg_test.go | 4 +- tka/chaintest_test.go | 5 +-- tka/key.go | 5 +-- tka/scenario_test.go | 5 +-- tka/sync.go | 2 +- tool/gocross/exec_other.go | 3 +- tsconsensus/monitor.go | 4 +- tsconsensus/tsconsensus_test.go | 8 ++-- tsd/tsd.go | 6 +-- tsnet/tsnet.go | 12 ++---- tsnet/tsnet_test.go | 2 +- tstest/deptest/deptest.go | 2 +- tstest/integration/integration_test.go | 2 +- tstest/integration/nat/nat_test.go | 7 +--- tstest/integration/vms/vms_test.go | 2 +- tstest/natlab/natlab.go | 16 +++----- tstest/natlab/vnet/vnet.go | 2 +- tstest/resource_test.go | 2 +- tstest/typewalk/typewalk.go | 5 +-- tsweb/tsweb.go | 9 ++--- tsweb/varz/varz.go | 13 +++---- types/ipproto/ipproto_test.go | 6 +-- types/lazy/deferred_test.go | 18 +++------ types/netmap/nodemut_test.go | 5 +-- types/persist/persist_test.go | 4 +- types/views/views.go | 4 +- util/dnsname/dnsname.go | 2 +- util/goroutines/goroutines.go | 2 +- util/hashx/block512_test.go | 2 +- util/httphdr/httphdr.go | 2 +- util/httpm/httpm_test.go | 2 +- util/linuxfw/fake.go | 7 ++-- util/linuxfw/iptables.go | 4 +- util/linuxfw/nftables_for_svcs.go | 4 +- util/linuxfw/nftables_runner_test.go | 2 +- util/pool/pool_test.go | 6 +-- util/set/intset.go | 2 +- util/singleflight/singleflight.go | 4 +- util/singleflight/singleflight_test.go | 39 ++++++++----------- util/slicesx/slicesx_test.go | 2 +- util/syspolicy/policytest/policytest.go | 7 +--- util/topk/topk_test.go | 2 +- util/vizerror/vizerror.go | 3 +- util/zstdframe/zstd_test.go | 32 +++++++-------- version/cmdname.go | 2 +- version/version_test.go | 2 +- wgengine/filter/filter_test.go | 8 ++-- wgengine/magicsock/magicsock.go | 3 +- wgengine/magicsock/magicsock_test.go | 6 +-- wgengine/pendopen.go | 2 +- wgengine/router/osrouter/router_linux_test.go | 8 ++-- wgengine/router/osrouter/runner.go | 11 ++---- wgengine/router/router_test.go | 4 +- wgengine/wgcfg/config_test.go | 6 +-- wif/wif.go | 3 +- 168 files changed, 431 insertions(+), 618 deletions(-) diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index a860da6a7c737..d14ef68fcdc15 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -698,7 +698,7 @@ func TestRateLogger(t *testing.T) { wasCalled = true }) - for i := 0; i < 3; i++ { + for range 3 { clock.Advance(1 * time.Millisecond) rl.update(0) if wasCalled { @@ -720,7 +720,7 @@ func TestRateLogger(t *testing.T) { wasCalled = true }) - for i := 0; i < 3; i++ { + for range 3 { clock.Advance(1 * time.Minute) rl.update(0) if wasCalled { diff --git a/client/local/local.go b/client/local/local.go index a7b8b83b10a77..e7258930696aa 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -192,8 +192,8 @@ func (e *AccessDeniedError) Unwrap() error { return e.err } // IsAccessDeniedError reports whether err is or wraps an AccessDeniedError. func IsAccessDeniedError(err error) bool { - var ae *AccessDeniedError - return errors.As(err, &ae) + _, ok := errors.AsType[*AccessDeniedError](err) + return ok } // PreconditionsFailedError is returned when the server responds @@ -210,8 +210,8 @@ func (e *PreconditionsFailedError) Unwrap() error { return e.err } // IsPreconditionsFailedError reports whether err is or wraps an PreconditionsFailedError. func IsPreconditionsFailedError(err error) bool { - var ae *PreconditionsFailedError - return errors.As(err, &ae) + _, ok := errors.AsType[*PreconditionsFailedError](err) + return ok } // bestError returns either err, or if body contains a valid JSON @@ -1071,7 +1071,7 @@ func tailscaledConnectHint() string { // ActiveState=inactive // SubState=dead st := map[string]string{} - for _, line := range strings.Split(string(out), "\n") { + for line := range strings.SplitSeq(string(out), "\n") { if k, v, ok := strings.Cut(line, "="); ok { st[k] = strings.TrimSpace(v) } diff --git a/client/systray/logo.go b/client/systray/logo.go index 4cd19778dc3a7..a0f8bf7d045a9 100644 --- a/client/systray/logo.go +++ b/client/systray/logo.go @@ -233,8 +233,8 @@ func (logo tsLogo) renderWithBorder(borderUnits int) *bytes.Buffer { dc.InvertMask() } - for y := 0; y < 3; y++ { - for x := 0; x < 3; x++ { + for y := range 3 { + for x := range 3 { px := (borderUnits + 1 + 3*x) * radius py := (borderUnits + 1 + 3*y) * radius col := fg diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index d52241483812a..6d034b342d1cf 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -1292,6 +1292,6 @@ func requireRoot() error { } func isExitError(err error) bool { - var exitErr *exec.ExitError - return errors.As(err, &exitErr) + _, ok := errors.AsType[*exec.ExitError](err) + return ok } diff --git a/clientupdate/clientupdate_test.go b/clientupdate/clientupdate_test.go index 13fc8f08a6a2e..100339ce7730b 100644 --- a/clientupdate/clientupdate_test.go +++ b/clientupdate/clientupdate_test.go @@ -451,7 +451,7 @@ func TestSynoArch(t *testing.T) { synoinfoConfPath := filepath.Join(t.TempDir(), "synoinfo.conf") if err := os.WriteFile( synoinfoConfPath, - []byte(fmt.Sprintf("unique=%q\n", tt.synoinfoUnique)), + fmt.Appendf(nil, "unique=%q\n", tt.synoinfoUnique), 0600, ); err != nil { t.Fatal(err) diff --git a/cmd/containerboot/forwarding.go b/cmd/containerboot/forwarding.go index 0ec9c36c0bd30..6d90fbaaa9723 100644 --- a/cmd/containerboot/forwarding.go +++ b/cmd/containerboot/forwarding.go @@ -51,7 +51,7 @@ func ensureIPForwarding(root, clusterProxyTargetIP, tailnetTargetIP, tailnetTarg v4Forwarding = true } if routes != nil && *routes != "" { - for _, route := range strings.Split(*routes, ",") { + for route := range strings.SplitSeq(*routes, ",") { cidr, err := netip.ParsePrefix(route) if err != nil { return fmt.Errorf("invalid subnet route: %v", err) diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index cc5629f99ca0b..365cf218424de 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "io/fs" + "maps" "net" "net/http" "net/http/httptest" @@ -1249,7 +1250,7 @@ func (b *lockingBuffer) String() string { func waitLogLine(t *testing.T, timeout time.Duration, b *lockingBuffer, want string) { deadline := time.Now().Add(timeout) for time.Now().Before(deadline) { - for _, line := range strings.Split(b.String(), "\n") { + for line := range strings.SplitSeq(b.String(), "\n") { if !strings.HasPrefix(line, "boot: ") { continue } @@ -1438,9 +1439,7 @@ func (k *kubeServer) Secret() map[string]string { k.Lock() defer k.Unlock() ret := map[string]string{} - for k, v := range k.secret { - ret[k] = v - } + maps.Copy(ret, k.secret) return ret } diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index 34ea7da856220..c07cfe969d9e3 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -25,7 +25,7 @@ func startMesh(s *derpserver.Server) error { if !s.HasMeshKey() { return errors.New("--mesh-with requires --mesh-psk-file") } - for _, hostTuple := range strings.Split(*meshWith, ",") { + for hostTuple := range strings.SplitSeq(*meshWith, ",") { if err := startMeshWithHost(s, hostTuple); err != nil { return err } diff --git a/cmd/k8s-operator/egress-eps_test.go b/cmd/k8s-operator/egress-eps_test.go index 47acb64f27458..6335b4eb8454b 100644 --- a/cmd/k8s-operator/egress-eps_test.go +++ b/cmd/k8s-operator/egress-eps_test.go @@ -11,7 +11,6 @@ import ( "math/rand/v2" "testing" - "github.com/AlekSi/pointer" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -106,11 +105,11 @@ func TestTailscaleEgressEndpointSlices(t *testing.T) { expectReconciled(t, er, "operator-ns", "foo") eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ Addresses: []string{"10.0.0.1"}, - Hostname: pointer.To("foo"), + Hostname: new("foo"), Conditions: discoveryv1.EndpointConditions{ - Serving: pointer.ToBool(true), - Ready: pointer.ToBool(true), - Terminating: pointer.ToBool(false), + Serving: new(true), + Ready: new(true), + Terminating: new(false), }, }) expectEqual(t, fc, eps) diff --git a/cmd/k8s-operator/egress-services-readiness_test.go b/cmd/k8s-operator/egress-services-readiness_test.go index ba89903df2f29..96d76cc4e7252 100644 --- a/cmd/k8s-operator/egress-services-readiness_test.go +++ b/cmd/k8s-operator/egress-services-readiness_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/AlekSi/pointer" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -145,9 +144,9 @@ func setEndpointForReplica(pg *tsapi.ProxyGroup, ordinal int32, eps *discoveryv1 eps.Endpoints = append(eps.Endpoints, discoveryv1.Endpoint{ Addresses: []string{p.Status.PodIPs[0].IP}, Conditions: discoveryv1.EndpointConditions{ - Ready: pointer.ToBool(true), - Serving: pointer.ToBool(true), - Terminating: pointer.ToBool(false), + Ready: new(true), + Serving: new(true), + Terminating: new(false), }, }) } diff --git a/cmd/k8s-operator/egress-services_test.go b/cmd/k8s-operator/egress-services_test.go index 45861449191cb..d38284690d64d 100644 --- a/cmd/k8s-operator/egress-services_test.go +++ b/cmd/k8s-operator/egress-services_test.go @@ -243,7 +243,7 @@ func portsForEndpointSlice(svc *corev1.Service) []discoveryv1.EndpointPort { ports = append(ports, discoveryv1.EndpointPort{ Name: &p.Name, Protocol: &p.Protocol, - Port: pointer.ToInt32(p.TargetPort.IntVal), + Port: new(p.TargetPort.IntVal), }) } return ports diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 5966ace3c388e..4b140a8aedd72 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -10,6 +10,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "math/rand/v2" "net/http" "reflect" @@ -914,9 +915,7 @@ func ownerAnnotations(operatorID string, svc *tailscale.VIPService) (map[string] } newAnnots := make(map[string]string, len(svc.Annotations)+1) - for k, v := range svc.Annotations { - newAnnots[k] = v - } + maps.Copy(newAnnots, svc.Annotations) newAnnots[ownerAnnotation] = string(json) return newAnnots, nil } @@ -1129,8 +1128,7 @@ func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string, } func isErrorTailscaleServiceNotFound(err error) bool { - var errResp tailscale.ErrResponse - ok := errors.As(err, &errResp) + errResp, ok := errors.AsType[tailscale.ErrResponse](err) return ok && errResp.Status == http.StatusNotFound } @@ -1144,7 +1142,7 @@ func tagViolations(obj client.Object) []string { return nil } - for _, tag := range strings.Split(tags, ",") { + for tag := range strings.SplitSeq(tags, ",") { tag = strings.TrimSpace(tag) if err := tailcfg.CheckTag(tag); err != nil { violations = append(violations, fmt.Sprintf("invalid tag %q: %v", tag, err)) diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index e93d0184e8412..480e6a26ec65e 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -1102,7 +1102,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig), }, Data: map[string][]byte{ - tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): fmt.Appendf(nil, `{"Version":""%s}`, expected), }, }) } diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go index afb055018bb13..c7c329a7e01d6 100644 --- a/cmd/k8s-operator/metrics_resources.go +++ b/cmd/k8s-operator/metrics_resources.go @@ -8,6 +8,7 @@ package main import ( "context" "fmt" + "maps" "reflect" "go.uber.org/zap" @@ -286,11 +287,7 @@ func isNamespacedProxyType(typ string) bool { func mergeMapKeys(a, b map[string]string) map[string]string { m := make(map[string]string, len(a)+len(b)) - for key, val := range b { - m[key] = val - } - for key, val := range a { - m[key] = val - } + maps.Copy(m, b) + maps.Copy(m, a) return m } diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index db8733f9095b8..2007824988fc7 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -308,8 +308,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClie var err error svcToNodePorts, tailscaledPort, err = r.ensureNodePortServiceCreated(ctx, pg, proxyClass) if err != nil { - var allocatePortErr *allocatePortsErr - if errors.As(err, &allocatePortErr) { + if _, ok := errors.AsType[*allocatePortsErr](err); ok { reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning NodePort Services for static endpoints: %v", err) r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) @@ -321,8 +320,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClie staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tailscaleClient, pg, proxyClass, svcToNodePorts) if err != nil { - var selectorErr *FindStaticEndpointErr - if errors.As(err, &selectorErr) { + if _, ok := errors.AsType[*FindStaticEndpointErr](err); ok { reason := reasonProxyGroupCreationFailed msg := fmt.Sprintf("error provisioning config Secrets: %v", err) r.recorder.Event(pg, corev1.EventTypeWarning, reason, msg) @@ -718,8 +716,7 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, tailscaleClient tsClient, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { logger.Debugf("deleting device %s from control", string(id)) if err := tailscaleClient.DeleteDevice(ctx, string(id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", string(id)) } else { return fmt.Errorf("error deleting device: %w", err) diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 69b5b109a0129..60b4bddd5613c 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -7,6 +7,7 @@ package main import ( "fmt" + "maps" "slices" "strconv" "strings" @@ -544,9 +545,7 @@ func pgSecretLabels(pgName, secretType string) map[string]string { func pgLabels(pgName string, customLabels map[string]string) map[string]string { labels := make(map[string]string, len(customLabels)+3) - for k, v := range customLabels { - labels[k] = v - } + maps.Copy(labels, customLabels) labels[kubetypes.LabelManaged] = "true" labels[LabelParentType] = "proxygroup" diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 2d46e3d5bf16b..9b3ee0e0fd30f 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -1826,10 +1826,10 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG currentProfileKey: []byte(key), key: bytes, kubetypes.KeyDeviceIPs: []byte(`["1.2.3.4", "::1"]`), - kubetypes.KeyDeviceFQDN: []byte(fmt.Sprintf("hostname-nodeid-%d.tails-scales.ts.net", i)), + kubetypes.KeyDeviceFQDN: fmt.Appendf(nil, "hostname-nodeid-%d.tails-scales.ts.net", i), // TODO(tomhjp): We have two different mechanisms to retrieve device IDs. // Consolidate on this one. - kubetypes.KeyDeviceID: []byte(fmt.Sprintf("nodeid-%d", i)), + kubetypes.KeyDeviceID: fmt.Appendf(nil, "nodeid-%d", i), kubetypes.KeyPodUID: []byte(podUID), } }) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 2a63ede4efe2b..c88a6df173647 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -11,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "net/http" "os" "path" @@ -304,8 +305,7 @@ func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, lo if dev.id != "" { logger.Debugf("deleting device %s from control", string(dev.id)) if err = tailscaleClient.DeleteDevice(ctx, string(dev.id)); err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", string(dev.id)) } else { return false, fmt.Errorf("deleting device: %w", err) @@ -499,14 +499,11 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale } if dev != nil && dev.id != "" { - var errResp *tailscale.ErrResponse - err = tailscaleClient.DeleteDevice(ctx, string(dev.id)) - switch { - case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: + if errResp, ok := errors.AsType[*tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { // This device has possibly already been deleted in the admin console. So we can ignore this // and move on to removing the secret. - case err != nil: + } else if err != nil { return nil, err } } @@ -677,9 +674,8 @@ func (a *tailscaleSTSReconciler) reconcileSTS(ctx context.Context, logger *zap.S }, } mak.Set(&pod.Labels, "app", sts.ParentResourceUID) - for key, val := range sts.ChildResourceLabels { - pod.Labels[key] = val // sync StatefulSet labels to Pod to make it easier for users to select the Pod - } + // sync StatefulSet labels to Pod to make it easier for users to select the Pod + maps.Copy(pod.Labels, sts.ChildResourceLabels) if sts.Replicas > 0 { ss.Spec.Replicas = new(sts.Replicas) diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index f44de8481bf76..0ceec5791dc83 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -8,6 +8,7 @@ package main import ( _ "embed" "fmt" + "maps" "reflect" "regexp" "strings" @@ -408,7 +409,5 @@ func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { // updateMap updates map a with the values from map b. func updateMap(a, b map[string]string) { - for key, val := range b { - a[key] = val - } + maps.Copy(a, b) } diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index e0383824a6313..7cbbaebaa89ac 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -526,8 +526,7 @@ func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, er func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcfg.ServiceName, operatorID string, logger *zap.SugaredLogger) (updated bool, err error) { svc, err := tsClient.GetVIPService(ctx, name) if err != nil { - errResp := &tailscale.ErrResponse{} - ok := errors.As(err, errResp) + errResp, ok := errors.AsType[tailscale.ErrResponse](err) if ok && errResp.Status == http.StatusNotFound { return false, nil } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index e13478d7167a0..191a31723eea9 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -13,6 +13,7 @@ import ( "net/netip" "path" "reflect" + "slices" "strings" "sync" "testing" @@ -555,7 +556,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec if opts.isExitNode { r = "0.0.0.0/0,::/0," + r } - for _, rr := range strings.Split(r, ",") { + for rr := range strings.SplitSeq(r, ",") { prefix, err := netip.ParsePrefix(rr) if err != nil { t.Fatal(err) @@ -822,12 +823,9 @@ func expectEvents(t *testing.T, rec *record.FakeRecorder, wantsEvents []string) select { case gotEvent := <-rec.Events: found := false - for _, wantEvent := range wantsEvents { - if wantEvent == gotEvent { - found = true - seenEvents = append(seenEvents, gotEvent) - break - } + if slices.Contains(wantsEvents, gotEvent) { + found = true + seenEvents = append(seenEvents, gotEvent) } if !found { t.Errorf("got unexpected event %q, expected events: %+#v", gotEvent, wantsEvents) diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 60ed24a7006b1..0a497a46ed955 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -363,15 +363,12 @@ func (r *RecorderReconciler) maybeCleanupSecrets(ctx context.Context, tailscaleC } if ok { - var errResp *tailscale.ErrResponse - r.log.Debugf("deleting device %s", devicePrefs.Config.NodeID) err = tailscaleClient.DeleteDevice(ctx, string(devicePrefs.Config.NodeID)) - switch { - case errors.As(err, &errResp) && errResp.Status == http.StatusNotFound: + if errResp, ok := errors.AsType[*tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { // This device has possibly already been deleted in the admin console. So we can ignore this // and move on to removing the secret. - case err != nil: + } else if err != nil { return err } } @@ -412,8 +409,7 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record nodeID := string(devicePrefs.Config.NodeID) logger.Debugf("deleting device %s from control", nodeID) if err = tailscaleClient.DeleteDevice(ctx, nodeID); err != nil { - errResp := &tailscale.ErrResponse{} - if errors.As(err, errResp) && errResp.Status == http.StatusNotFound { + if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { logger.Debugf("device %s not found, likely because it has already been deleted from control", nodeID) continue } diff --git a/cmd/k8s-operator/tsrecorder_specs.go b/cmd/k8s-operator/tsrecorder_specs.go index 101f68405d001..5a93bc22b546c 100644 --- a/cmd/k8s-operator/tsrecorder_specs.go +++ b/cmd/k8s-operator/tsrecorder_specs.go @@ -7,6 +7,7 @@ package main import ( "fmt" + "maps" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -312,9 +313,7 @@ func tsrEnv(tsr *tsapi.Recorder, loginServer string) []corev1.EnvVar { func tsrLabels(app, instance string, customLabels map[string]string) map[string]string { labels := make(map[string]string, len(customLabels)+3) - for k, v := range customLabels { - labels[k] = v - } + maps.Copy(labels, customLabels) // ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ labels["app.kubernetes.io/name"] = app diff --git a/cmd/mkpkg/main.go b/cmd/mkpkg/main.go index 6f4de7e299b50..ecf108c2ec236 100644 --- a/cmd/mkpkg/main.go +++ b/cmd/mkpkg/main.go @@ -24,7 +24,7 @@ func parseFiles(s string, typ string) (files.Contents, error) { return nil, nil } var contents files.Contents - for _, f := range strings.Split(s, ",") { + for f := range strings.SplitSeq(s, ",") { fs := strings.Split(f, ":") if len(fs) != 2 { return nil, fmt.Errorf("unparseable file field %q", f) @@ -41,7 +41,7 @@ func parseEmptyDirs(s string) files.Contents { return nil } var contents files.Contents - for _, d := range strings.Split(s, ",") { + for d := range strings.SplitSeq(s, ",") { contents = append(contents, &files.Content{Type: files.TypeDir, Destination: d}) } return contents diff --git a/cmd/natc/ippool/ippool_test.go b/cmd/natc/ippool/ippool_test.go index 405ec61564ed8..af0053c2f54d8 100644 --- a/cmd/natc/ippool/ippool_test.go +++ b/cmd/natc/ippool/ippool_test.go @@ -30,7 +30,7 @@ func TestIPPoolExhaustion(t *testing.T) { from := tailcfg.NodeID(12345) - for i := 0; i < 5; i++ { + for range 5 { for _, domain := range domains { addr, err := pool.IPForDomain(from, domain) if err != nil { diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 11975b7d2e1a6..339c42ccd81d2 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -149,7 +149,7 @@ func main() { } var prefixes []netip.Prefix - for _, s := range strings.Split(*v4PfxStr, ",") { + for s := range strings.SplitSeq(*v4PfxStr, ",") { p := netip.MustParsePrefix(strings.TrimSpace(s)) if p.Masked() != p { log.Fatalf("v4 prefix %v is not a masked prefix", p) @@ -372,8 +372,7 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP addrQCount++ if _, ok := resolves[q.Name.String()]; !ok { addrs, err := c.resolver.LookupNetIP(ctx, "ip", q.Name.String()) - var dnsErr *net.DNSError - if errors.As(err, &dnsErr) && dnsErr.IsNotFound { + if dnsErr, ok := errors.AsType[*net.DNSError](err); ok && dnsErr.IsNotFound { continue } if err != nil { diff --git a/cmd/sniproxy/sniproxy.go b/cmd/sniproxy/sniproxy.go index 45503feca8718..bd95cc113b59d 100644 --- a/cmd/sniproxy/sniproxy.go +++ b/cmd/sniproxy/sniproxy.go @@ -225,7 +225,7 @@ func (s *sniproxy) mergeConfigFromFlags(out *appctype.AppConnectorConfig, ports, Addrs: []netip.Addr{ip4, ip6}, } if ports != "" { - for _, portStr := range strings.Split(ports, ",") { + for portStr := range strings.SplitSeq(ports, ",") { port, err := strconv.ParseUint(portStr, 10, 16) if err != nil { log.Fatalf("invalid port: %s", portStr) @@ -238,7 +238,7 @@ func (s *sniproxy) mergeConfigFromFlags(out *appctype.AppConnectorConfig, ports, } var forwardConfigFromFlags []appctype.DNATConfig - for _, forwStr := range strings.Split(forwards, ",") { + for forwStr := range strings.SplitSeq(forwards, ",") { if forwStr == "" { continue } diff --git a/cmd/speedtest/speedtest.go b/cmd/speedtest/speedtest.go index 2cea97b1edef1..e11c4ad1d90bb 100644 --- a/cmd/speedtest/speedtest.go +++ b/cmd/speedtest/speedtest.go @@ -72,8 +72,7 @@ var speedtestArgs struct { func runSpeedtest(ctx context.Context, args []string) error { if _, _, err := net.SplitHostPort(speedtestArgs.host); err != nil { - var addrErr *net.AddrError - if errors.As(err, &addrErr) && addrErr.Err == "missing port in address" { + if addrErr, ok := errors.AsType[*net.AddrError](err); ok && addrErr.Err == "missing port in address" { // if no port is provided, append the default port speedtestArgs.host = net.JoinHostPort(speedtestArgs.host, strconv.Itoa(speedtest.DefaultPort)) } diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index cfedd82bdd5cc..743d6aec3c9d8 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -889,8 +889,7 @@ func remoteWriteTimeSeries(client *remoteWriteClient, tsCh chan []prompb.TimeSer reqCtx, cancel := context.WithTimeout(context.Background(), time.Second*30) writeErr = client.write(reqCtx, ts) cancel() - var re recoverableErr - recoverable := errors.As(writeErr, &re) + _, recoverable := errors.AsType[recoverableErr](writeErr) if writeErr != nil { log.Printf("remote write error(recoverable=%v): %v", recoverable, writeErr) } diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go index 2ea001aec9c84..04cbcdd832258 100644 --- a/cmd/tailscale/cli/appcroutes.go +++ b/cmd/tailscale/cli/appcroutes.go @@ -102,12 +102,12 @@ func getSummarizeLearnedOutput(ri *appctype.RouteInfo) string { } return 0 }) - s := "" + var s strings.Builder fmtString := fmt.Sprintf("%%-%ds %%d\n", maxDomainWidth) // eg "%-10s %d\n" for _, dc := range x { - s += fmt.Sprintf(fmtString, dc.domain, dc.count) + s.WriteString(fmt.Sprintf(fmtString, dc.domain, dc.count)) } - return s + return s.String() } func runAppcRoutesInfo(ctx context.Context, args []string) error { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index fda6b4546324a..8a2c2b9ef147f 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -124,7 +124,7 @@ func Run(args []string) (err error) { if errors.Is(err, flag.ErrHelp) { return nil } - if noexec := (ffcli.NoExecError{}); errors.As(err, &noexec) { + if noexec, ok := errors.AsType[ffcli.NoExecError](err); ok { // When the user enters an unknown subcommand, ffcli tries to run // the closest valid parent subcommand with everything else as args, // returning NoExecError if it doesn't have an Exec function. diff --git a/cmd/tailscale/cli/cli_test.go b/cmd/tailscale/cli/cli_test.go index 537e641fc4160..bdf9116a01d7c 100644 --- a/cmd/tailscale/cli/cli_test.go +++ b/cmd/tailscale/cli/cli_test.go @@ -962,8 +962,8 @@ func TestPrefFlagMapping(t *testing.T) { } prefType := reflect.TypeFor[ipn.Prefs]() - for i := range prefType.NumField() { - prefName := prefType.Field(i).Name + for field := range prefType.Fields() { + prefName := field.Name if prefHasFlag[prefName] { continue } diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 0f38f2df2941c..32f5bbd70593c 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -16,6 +16,7 @@ import ( "os/exec" "path" "runtime" + "slices" "strings" "github.com/peterbourgon/ff/v3/ffcli" @@ -85,11 +86,8 @@ func runConfigureSynologyCert(ctx context.Context, args []string) error { domain = st.CertDomains[0] } else { var found bool - for _, d := range st.CertDomains { - if d == domain { - found = true - break - } + if slices.Contains(st.CertDomains, domain) { + found = true } if !found { return fmt.Errorf("Domain %q was not one of the valid domain options: %q.", domain, st.CertDomains) diff --git a/cmd/tailscale/cli/file.go b/cmd/tailscale/cli/file.go index 94b36f535bcab..e7406bee38035 100644 --- a/cmd/tailscale/cli/file.go +++ b/cmd/tailscale/cli/file.go @@ -19,6 +19,7 @@ import ( "os" "path" "path/filepath" + "slices" "strings" "sync" "sync/atomic" @@ -126,10 +127,8 @@ func runCp(ctx context.Context, args []string) error { if cpArgs.name != "" { return errors.New("can't use --name= with multiple files") } - for _, fileArg := range files { - if fileArg == "-" { - return errors.New("can't use '-' as STDIN file when providing filename arguments") - } + if slices.Contains(files, "-") { + return errors.New("can't use '-' as STDIN file when providing filename arguments") } } diff --git a/cmd/tailscale/cli/ip.go b/cmd/tailscale/cli/ip.go index 7159904c732d6..b76ef0a708b3a 100644 --- a/cmd/tailscale/cli/ip.go +++ b/cmd/tailscale/cli/ip.go @@ -9,6 +9,7 @@ import ( "flag" "fmt" "net/netip" + "slices" "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/ipn/ipnstate" @@ -114,17 +115,13 @@ func peerMatchingIP(st *ipnstate.Status, ipStr string) (ps *ipnstate.PeerStatus, return } for _, ps = range st.Peer { - for _, pip := range ps.TailscaleIPs { - if ip == pip { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, ip) { + return ps, true } } if ps := st.Self; ps != nil { - for _, pip := range ps.TailscaleIPs { - if ip == pip { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, ip) { + return ps, true } } return nil, false diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index 1bd128d566125..6f3ebf37bbebe 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -39,7 +39,7 @@ func registerAcceptRiskFlag(f *flag.FlagSet, acceptedRisks *string) { // isRiskAccepted reports whether riskType is in the comma-separated list of // risks in acceptedRisks. func isRiskAccepted(riskType, acceptedRisks string) bool { - for _, r := range strings.Split(acceptedRisks, ",") { + for r := range strings.SplitSeq(acceptedRisks, ",") { if r == riskType || r == riskAll { return true } diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 9ac303c791b37..13f5c09b8eac5 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -114,8 +114,8 @@ func (u *acceptAppCapsFlag) Set(s string) error { if s == "" { return nil } - appCaps := strings.Split(s, ",") - for _, appCap := range appCaps { + appCaps := strings.SplitSeq(s, ",") + for appCap := range appCaps { appCap = strings.TrimSpace(appCap) if !validAppCap.MatchString(appCap) { return fmt.Errorf("%q does not match the form {domain}/{name}, where domain must be a fully qualified domain name", appCap) diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index feccf6d12d026..b12b7de491726 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -183,8 +183,7 @@ func runSet(ctx context.Context, args []string) (retErr error) { maskedPrefs.AutoExitNode = expr maskedPrefs.AutoExitNodeSet = true } else if err := maskedPrefs.Prefs.SetExitNodeIP(setArgs.exitNodeIP, st); err != nil { - var e ipn.ExitNodeLocalIPError - if errors.As(err, &e) { + if _, ok := errors.AsType[ipn.ExitNodeLocalIPError](err); ok { return fmt.Errorf("%w; did you mean --advertise-exit-node?", err) } return err @@ -251,8 +250,8 @@ func runSet(ctx context.Context, args []string) (retErr error) { if setArgs.relayServerStaticEndpoints != "" { endpointsSet := make(set.Set[netip.AddrPort]) - endpointsSplit := strings.Split(setArgs.relayServerStaticEndpoints, ",") - for _, s := range endpointsSplit { + endpointsSplit := strings.SplitSeq(setArgs.relayServerStaticEndpoints, ",") + for s := range endpointsSplit { ap, err := netip.ParseAddrPort(s) if err != nil { return fmt.Errorf("failed to set relay server static endpoints: %q is not a valid IP:port", s) diff --git a/cmd/tailscale/cli/ssh.go b/cmd/tailscale/cli/ssh.go index bea18f7abf6ac..9efab8cf7e47e 100644 --- a/cmd/tailscale/cli/ssh.go +++ b/cmd/tailscale/cli/ssh.go @@ -14,6 +14,7 @@ import ( "os/user" "path/filepath" "runtime" + "slices" "strings" "github.com/peterbourgon/ff/v3/ffcli" @@ -202,10 +203,8 @@ func peerStatusFromArg(st *ipnstate.Status, arg string) (*ipnstate.PeerStatus, b argIP, _ := netip.ParseAddr(arg) for _, ps := range st.Peer { if argIP.IsValid() { - for _, ip := range ps.TailscaleIPs { - if ip == argIP { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, argIP) { + return ps, true } continue } @@ -230,10 +229,8 @@ func nodeDNSNameFromArg(st *ipnstate.Status, arg string) (dnsName string, ok boo for _, ps := range st.Peer { dnsName = ps.DNSName if argIP.IsValid() { - for _, ip := range ps.TailscaleIPs { - if ip == argIP { - return dnsName, true - } + if slices.Contains(ps.TailscaleIPs, argIP) { + return dnsName, true } continue } diff --git a/cmd/tailscale/cli/ssh_exec_windows.go b/cmd/tailscale/cli/ssh_exec_windows.go index 85e1518175609..f9d306463c635 100644 --- a/cmd/tailscale/cli/ssh_exec_windows.go +++ b/cmd/tailscale/cli/ssh_exec_windows.go @@ -28,9 +28,8 @@ func execSSH(ssh string, argv []string) error { cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr - var ee *exec.ExitError err := cmd.Run() - if errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { os.Exit(ee.ExitCode()) } return err diff --git a/cmd/tailscale/cli/ssh_unix.go b/cmd/tailscale/cli/ssh_unix.go index 768d71116cf2c..1cc3ccbe8c66f 100644 --- a/cmd/tailscale/cli/ssh_unix.go +++ b/cmd/tailscale/cli/ssh_unix.go @@ -39,7 +39,7 @@ func init() { return "" } prefix := []byte("SSH_CLIENT=") - for _, env := range bytes.Split(b, []byte{0}) { + for env := range bytes.SplitSeq(b, []byte{0}) { if bytes.HasPrefix(env, prefix) { return string(env[len(prefix):]) } diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 79cc60ca2347f..81c67d6622915 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -334,8 +334,7 @@ func prefsFromUpArgs(upArgs upArgsT, warnf logger.Logf, st *ipnstate.Status, goo if expr, useAutoExitNode := ipn.ParseAutoExitNodeString(upArgs.exitNodeIP); useAutoExitNode { prefs.AutoExitNode = expr } else if err := prefs.SetExitNodeIP(upArgs.exitNodeIP, st); err != nil { - var e ipn.ExitNodeLocalIPError - if errors.As(err, &e) { + if _, ok := errors.AsType[ipn.ExitNodeLocalIPError](err); ok { return nil, fmt.Errorf("%w; did you mean --advertise-exit-node?", err) } return nil, err @@ -912,7 +911,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { prefType := reflect.TypeFor[ipn.Prefs]() for _, pref := range prefNames { t := prefType - for _, name := range strings.Split(pref, ".") { + for name := range strings.SplitSeq(pref, ".") { // Crash at runtime if there's a typo in the prefName. f, ok := t.FieldByName(name) if !ok { diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index e35b83407bbb8..204409a630383 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -352,8 +352,7 @@ func main() { // If there's nothing to retry and no non-retryable tests have // failed then we've probably hit a build error. if err := <-runErr; len(toRetry) == 0 && err != nil { - var exit *exec.ExitError - if errors.As(err, &exit) { + if exit, ok := errors.AsType[*exec.ExitError](err); ok { if code := exit.ExitCode(); code > -1 { os.Exit(exit.ExitCode()) } diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index cf023f4367483..7ad78a3d003ca 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -273,8 +273,7 @@ func TestCached(t *testing.T) {} } func errExitCode(err error) (int, bool) { - var exit *exec.ExitError - if errors.As(err, &exit) { + if exit, ok := errors.AsType[*exec.ExitError](err); ok { return exit.ExitCode(), true } return 0, false diff --git a/cmd/tta/tta.go b/cmd/tta/tta.go index 377d01c9487f7..dbdbf5ddfbe40 100644 --- a/cmd/tta/tta.go +++ b/cmd/tta/tta.go @@ -91,7 +91,7 @@ func main() { if distro.Get() == distro.Gokrazy { cmdLine, _ := os.ReadFile("/proc/cmdline") explicitNS := false - for _, s := range strings.Fields(string(cmdLine)) { + for s := range strings.FieldsSeq(string(cmdLine)) { if ns, ok := strings.CutPrefix(s, "tta.nameserver="); ok { err := atomicfile.WriteFile("/tmp/resolv.conf", []byte("nameserver "+ns+"\n"), 0644) log.Printf("Wrote /tmp/resolv.conf: %v", err) diff --git a/cmd/viewer/viewer.go b/cmd/viewer/viewer.go index 56b999f5f50fe..1c04dbb2be334 100644 --- a/cmd/viewer/viewer.go +++ b/cmd/viewer/viewer.go @@ -500,8 +500,7 @@ func genView(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named, fie } writeTemplateWithComment("unsupportedField", fname) } - for i := range typ.NumMethods() { - f := typ.Method(i) + for f := range typ.Methods() { if !f.Exported() { continue } @@ -720,7 +719,7 @@ func main() { fieldComments := getFieldComments(pkg.Syntax) cloneOnlyType := map[string]bool{} - for _, t := range strings.Split(*flagCloneOnlyTypes, ",") { + for t := range strings.SplitSeq(*flagCloneOnlyTypes, ",") { cloneOnlyType[t] = true } diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index dca1d8ddf2f8b..2205a0eb3641c 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -38,8 +38,8 @@ import ( ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - if name := t.Field(i).Name; name != "_" { + for field := range t.Fields() { + if name := field.Name; name != "_" { fields = append(fields, name) } } @@ -214,12 +214,12 @@ func TestRetryableErrors(t *testing.T) { } type retryableForTest interface { + error Retryable() bool } func isRetryableErrorForTest(err error) bool { - var ae retryableForTest - if errors.As(err, &ae) { + if ae, ok := errors.AsType[retryableForTest](err); ok { return ae.Retryable() } return false diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 965523f956f94..db46a956f9dba 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1484,7 +1484,7 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { } return } - for _, t := range strings.Split(pr.Types, ",") { + for t := range strings.SplitSeq(pr.Types, ",") { switch pt := tailcfg.PingType(t); pt { case tailcfg.PingTSMP, tailcfg.PingDisco, tailcfg.PingICMP, tailcfg.PingPeerAPI: go doPingerPing(c.logf, httpc, pr, c.pinger, pt) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 29b0a034877c7..f33620edd5add 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -617,12 +617,12 @@ func (ms *mapSession) patchifyPeersChanged(resp *tailcfg.MapResponse) { var nodeFields = sync.OnceValue(getNodeFields) -// getNodeFields returns the fails of tailcfg.Node. +// getNodeFields returns the fields of tailcfg.Node. func getNodeFields() []string { rt := reflect.TypeFor[tailcfg.Node]() - ret := make([]string, rt.NumField()) - for i := range rt.NumField() { - ret[i] = rt.Field(i).Name + ret := make([]string, 0, rt.NumField()) + for f := range rt.Fields() { + ret = append(ret, f.Name) } return ret } diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index c02ac758ebf16..7f0203cd051df 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -814,8 +814,8 @@ func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.A // split on "|" first to remove memnet pipe suffix addrPart := raddrStr - if idx := strings.Index(raddrStr, "|"); idx >= 0 { - addrPart = raddrStr[:idx] + if before, _, ok := strings.Cut(raddrStr, "|"); ok { + addrPart = before } host, _, err2 := net.SplitHostPort(addrPart) diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 1861a122e2f9e..14f30d9ce47ab 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -205,17 +205,15 @@ func (k *Knobs) AsDebugJSON() map[string]any { return nil } ret := map[string]any{} - rt := reflect.TypeFor[Knobs]() rv := reflect.ValueOf(k).Elem() // of *k - for i := 0; i < rt.NumField(); i++ { - name := rt.Field(i).Name - switch v := rv.Field(i).Addr().Interface().(type) { + for sf, fv := range rv.Fields() { + switch v := fv.Addr().Interface().(type) { case *atomic.Bool: - ret[name] = v.Load() + ret[sf.Name] = v.Load() case *syncs.AtomicValue[opt.Bool]: - ret[name] = v.Load() + ret[sf.Name] = v.Load() default: - panic(fmt.Sprintf("unknown field type %T for %v", v, name)) + panic(fmt.Sprintf("unknown field type %T for %v", v, sf.Name)) } } return ret diff --git a/derp/derp_test.go b/derp/derp_test.go index cff069dd4470c..f2ccefc9fafd6 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -121,8 +121,7 @@ func TestSendRecv(t *testing.T) { } defer cin.Close() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() brwServer := bufio.NewReadWriter(bufio.NewReader(cin), bufio.NewWriter(cin)) go s.Accept(ctx, cin, brwServer, fmt.Sprintf("[abc::def]:%v", i)) @@ -331,8 +330,7 @@ func TestSendFreeze(t *testing.T) { return c, c2 } - ctx, clientCtxCancel := context.WithCancel(context.Background()) - defer clientCtxCancel() + ctx := t.Context() aliceKey := key.NewNode() aliceClient, aliceConn := newClient(ctx, "alice", aliceKey) @@ -716,8 +714,7 @@ func (c *testClient) close(t *testing.T) { // TestWatch tests the connection watcher mechanism used by regional // DERP nodes to mesh up with each other. func TestWatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() ts := newTestServer(t, ctx) defer ts.close(t) @@ -764,8 +761,7 @@ func waitConnect(t testing.TB, c *Client) { } func TestServerRepliesToPing(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() ts := newTestServer(t, ctx) defer ts.close(t) diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index ae530c93a31c0..304906b749257 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -299,9 +299,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { errChan := make(chan error, 1) // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() + wg.Go(func() { var peers int add := func(m derp.PeerPresentMessage) { t.Logf("add: %v", m.Key.ShortString()) @@ -318,7 +316,7 @@ func TestBreakWatcherConnRecv(t *testing.T) { } watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) - }() + }) synctest.Wait() @@ -381,9 +379,7 @@ func TestBreakWatcherConn(t *testing.T) { errorChan := make(chan error, 1) // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() + wg.Go(func() { var peers int add := func(m derp.PeerPresentMessage) { t.Logf("add: %v", m.Key.ShortString()) @@ -403,7 +399,7 @@ func TestBreakWatcherConn(t *testing.T) { } watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) - }() + }) synctest.Wait() diff --git a/derp/derpserver/derpserver.go b/derp/derpserver/derpserver.go index f311eb25d9817..343543c55c0cf 100644 --- a/derp/derpserver/derpserver.go +++ b/derp/derpserver/derpserver.go @@ -30,6 +30,7 @@ import ( "os" "os/exec" "runtime" + "slices" "strconv" "strings" "sync" @@ -71,7 +72,7 @@ func init() { if keys == "" { return } - for _, keyStr := range strings.Split(keys, ",") { + for keyStr := range strings.SplitSeq(keys, ",") { k, err := key.ParseNodePublicUntyped(mem.S(keyStr)) if err != nil { log.Printf("ignoring invalid debug key %q: %v", keyStr, err) @@ -1287,7 +1288,7 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { if disco.LooksLikeDiscoWrapper(p.bs) { sendQueue = dst.discoSendQueue } - for attempt := 0; attempt < 3; attempt++ { + for attempt := range 3 { select { case <-dst.done: s.recordDrop(p.bs, c.key, dstKey, dropReasonGoneDisconnected) @@ -1484,16 +1485,13 @@ func (s *Server) noteClientActivity(c *sclient) { // If we saw this connection send previously, then consider // the group fighting and disable them all. if s.dupPolicy == disableFighters { - for _, prior := range dup.sendHistory { - if prior == c { - cs.ForeachClient(func(c *sclient) { - c.isDisabled.Store(true) - if cs.activeClient.Load() == c { - cs.activeClient.Store(nil) - } - }) - break - } + if slices.Contains(dup.sendHistory, c) { + cs.ForeachClient(func(c *sclient) { + c.isDisabled.Store(true) + if cs.activeClient.Load() == c { + cs.activeClient.Store(nil) + } + }) } } diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go index 3a778d59fb009..7f956ba7809c0 100644 --- a/derp/derpserver/derpserver_test.go +++ b/derp/derpserver/derpserver_test.go @@ -627,22 +627,17 @@ func BenchmarkConcurrentStreams(b *testing.B) { if err != nil { b.Fatal(err) } - defer ln.Close() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := b.Context() + acceptDone := make(chan struct{}) go func() { - for ctx.Err() == nil { + defer close(acceptDone) + for { connIn, err := ln.Accept() if err != nil { - if ctx.Err() != nil { - return - } - b.Error(err) return } - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) go s.Accept(ctx, connIn, brwServer, "test-client") } @@ -680,6 +675,9 @@ func BenchmarkConcurrentStreams(b *testing.B) { } } }) + + ln.Close() + <-acceptDone } func BenchmarkSendRecv(b *testing.B) { @@ -769,7 +767,7 @@ func TestServeDebugTrafficUniqueSenders(t *testing.T) { senderCardinality: hyperloglog.New(), } - for i := 0; i < 5; i++ { + for range 5 { c.senderCardinality.Insert(key.NewNode().Public().AppendTo(nil)) } @@ -845,7 +843,7 @@ func TestSenderCardinality(t *testing.T) { t.Errorf("EstimatedUniqueSenders() = %d, want ~10 (8-12 range)", estimate) } - for i := 0; i < 5; i++ { + for i := range 5 { c.senderCardinality.Insert(senders[i].AppendTo(nil)) } @@ -869,7 +867,7 @@ func TestSenderCardinality100(t *testing.T) { } numSenders := 100 - for i := 0; i < numSenders; i++ { + for range numSenders { c.senderCardinality.Insert(key.NewNode().Public().AppendTo(nil)) } @@ -945,7 +943,7 @@ func BenchmarkHyperLogLogInsertUnique(b *testing.B) { func BenchmarkHyperLogLogEstimate(b *testing.B) { hll := hyperloglog.New() - for i := 0; i < 100; i++ { + for range 100 { hll.Insert(key.NewNode().Public().AppendTo(nil)) } diff --git a/derp/xdp/xdp_linux.go b/derp/xdp/xdp_linux.go index 5d22716be4f16..7ab23bd2e9eed 100644 --- a/derp/xdp/xdp_linux.go +++ b/derp/xdp/xdp_linux.go @@ -62,8 +62,7 @@ func NewSTUNServer(config *STUNServerConfig, opts ...STUNServerOption) (*STUNSer objs := new(bpfObjects) err = loadBpfObjects(objs, nil) if err != nil { - var ve *ebpf.VerifierError - if config.FullVerifierErr && errors.As(err, &ve) { + if ve, ok := errors.AsType[*ebpf.VerifierError](err); config.FullVerifierErr && ok { err = fmt.Errorf("verifier error: %+v", ve) } return nil, fmt.Errorf("error loading XDP program: %w", err) diff --git a/disco/disco.go b/disco/disco.go index 2147529d175d4..8f667b26273e7 100644 --- a/disco/disco.go +++ b/disco/disco.go @@ -475,7 +475,7 @@ const allocateUDPRelayEndpointRequestLen = key.DiscoPublicRawLen*2 + // ClientDi func (m *AllocateUDPRelayEndpointRequest) AppendMarshal(b []byte) []byte { ret, p := appendMsgHeader(b, TypeAllocateUDPRelayEndpointRequest, v0, allocateUDPRelayEndpointRequestLen) - for i := 0; i < len(m.ClientDisco); i++ { + for i := range len(m.ClientDisco) { disco := m.ClientDisco[i].AppendTo(nil) copy(p, disco) p = p[key.DiscoPublicRawLen:] @@ -492,7 +492,7 @@ func parseAllocateUDPRelayEndpointRequest(ver uint8, p []byte) (m *AllocateUDPRe if len(p) < allocateUDPRelayEndpointRequestLen { return m, errShort } - for i := 0; i < len(m.ClientDisco); i++ { + for i := range len(m.ClientDisco) { m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(p[:key.DiscoPublicRawLen])) p = p[key.DiscoPublicRawLen:] } @@ -565,7 +565,7 @@ func (m *UDPRelayEndpoint) encode(b []byte) { disco := m.ServerDisco.AppendTo(nil) copy(b, disco) b = b[key.DiscoPublicRawLen:] - for i := 0; i < len(m.ClientDisco); i++ { + for i := range len(m.ClientDisco) { disco = m.ClientDisco[i].AppendTo(nil) copy(b, disco) b = b[key.DiscoPublicRawLen:] @@ -594,7 +594,7 @@ func (m *UDPRelayEndpoint) decode(b []byte) error { } m.ServerDisco = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) b = b[key.DiscoPublicRawLen:] - for i := 0; i < len(m.ClientDisco); i++ { + for i := range len(m.ClientDisco) { m.ClientDisco[i] = key.DiscoPublicFromRaw32(mem.B(b[:key.DiscoPublicRawLen])) b = b[key.DiscoPublicRawLen:] } diff --git a/docs/webhooks/example.go b/docs/webhooks/example.go index 53ec1c8b74b52..d93c425d2c3d2 100644 --- a/docs/webhooks/example.go +++ b/docs/webhooks/example.go @@ -87,7 +87,7 @@ func verifyWebhookSignature(req *http.Request, secret string) (events []event, e return nil, err } mac := hmac.New(sha256.New, []byte(secret)) - mac.Write([]byte(fmt.Sprint(timestamp.Unix()))) + mac.Write(fmt.Append(nil, timestamp.Unix())) mac.Write([]byte(".")) mac.Write(b) want := hex.EncodeToString(mac.Sum(nil)) @@ -120,8 +120,8 @@ func parseSignatureHeader(header string) (timestamp time.Time, signatures map[st } signatures = make(map[string][]string) - pairs := strings.Split(header, ",") - for _, pair := range pairs { + pairs := strings.SplitSeq(header, ",") + for pair := range pairs { parts := strings.Split(pair, "=") if len(parts) != 2 { return time.Time{}, nil, errNotSigned diff --git a/drive/driveimpl/compositedav/rewriting.go b/drive/driveimpl/compositedav/rewriting.go index 47f020461b77d..1f0a69d75978e 100644 --- a/drive/driveimpl/compositedav/rewriting.go +++ b/drive/driveimpl/compositedav/rewriting.go @@ -63,7 +63,7 @@ func (h *Handler) delegateRewriting(w http.ResponseWriter, r *http.Request, path // Fixup paths to add the requested path as a prefix, escaped for inclusion in XML. pp := shared.EscapeForXML(shared.Join(pathComponents[0:mpl]...)) - b := responseHrefRegex.ReplaceAll(bw.buf.Bytes(), []byte(fmt.Sprintf("$1%s/$3", pp))) + b := responseHrefRegex.ReplaceAll(bw.buf.Bytes(), fmt.Appendf(nil, "$1%s/$3", pp)) return bw.status, b } diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index 05f087e21df46..64fa93394d307 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -12,6 +12,7 @@ import ( "errors" "net/http" "net/netip" + "slices" "sync" "go4.org/netipx" @@ -329,13 +330,7 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) { selfRoutedDomains: set.Set[dnsname.FQDN]{}, } for _, app := range apps { - selfMatchesTags := false - for _, tag := range app.Connectors { - if selfTags.Contains(tag) { - selfMatchesTags = true - break - } - } + selfMatchesTags := slices.ContainsFunc(app.Connectors, selfTags.Contains) for _, d := range app.Domains { fqdn, err := dnsname.ToFQDN(d) if err != nil { diff --git a/feature/conn25/conn25_test.go b/feature/conn25/conn25_test.go index d63e84e024738..7ed5c13b28ea9 100644 --- a/feature/conn25/conn25_test.go +++ b/feature/conn25/conn25_test.go @@ -115,7 +115,7 @@ func TestHandleConnectorTransitIPRequestMultipleTIP(t *testing.T) { t.Fatalf("n TransitIPs in response: %d, want 3", len(resp.TransitIPs)) } - for i := 0; i < 3; i++ { + for i := range 3 { got := resp.TransitIPs[i].Code if got != TransitIPResponseCode(0) { t.Fatalf("i=%d TransitIP Code: %d, want 0", i, got) diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go index 4b96fd6a2020c..51a8018d8644d 100644 --- a/feature/identityfederation/identityfederation.go +++ b/feature/identityfederation/identityfederation.go @@ -128,8 +128,7 @@ func exchangeJWTForToken(ctx context.Context, baseURL, clientID, idToken string) }).Exchange(ctx, "", oauth2.SetAuthURLParam("client_id", clientID), oauth2.SetAuthURLParam("jwt", idToken)) if err != nil { // Try to extract more detailed error message - var retrieveErr *oauth2.RetrieveError - if errors.As(err, &retrieveErr) { + if retrieveErr, ok := errors.AsType[*oauth2.RetrieveError](err); ok { return "", fmt.Errorf("token exchange failed with status %d: %s", retrieveErr.Response.StatusCode, string(retrieveErr.Body)) } return "", fmt.Errorf("unexpected token exchange request error: %w", err) diff --git a/feature/linuxdnsfight/linuxdnsfight_test.go b/feature/linuxdnsfight/linuxdnsfight_test.go index 661ba7f6f3a00..ce67353db297c 100644 --- a/feature/linuxdnsfight/linuxdnsfight_test.go +++ b/feature/linuxdnsfight/linuxdnsfight_test.go @@ -42,7 +42,7 @@ func TestWatchFile(t *testing.T) { // Keep writing until we get a callback. func() { for i := range 10000 { - if err := os.WriteFile(filepath, []byte(fmt.Sprintf("write%d", i)), 0644); err != nil { + if err := os.WriteFile(filepath, fmt.Appendf(nil, "write%d", i), 0644); err != nil { t.Fatal(err) } select { diff --git a/feature/taildrop/fileops_fs.go b/feature/taildrop/fileops_fs.go index 4a5b3e71a0f55..3ddf95d0314cd 100644 --- a/feature/taildrop/fileops_fs.go +++ b/feature/taildrop/fileops_fs.go @@ -101,7 +101,7 @@ func (f fsFileOps) Rename(oldPath, newName string) (newPath string, err error) { wantSize := st.Size() const maxRetries = 10 - for i := 0; i < maxRetries; i++ { + for range maxRetries { renameMu.Lock() fi, statErr := os.Stat(dst) // Atomically rename the partial file as the destination file if it doesn't exist. diff --git a/health/health_test.go b/health/health_test.go index 953c4dca26ea3..824d53f7aba8d 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -82,8 +82,7 @@ func TestAppendWarnableDebugFlags(t *testing.T) { func TestNilMethodsDontCrash(t *testing.T) { var nilt *Tracker rv := reflect.ValueOf(nilt) - for i := 0; i < rv.NumMethod(); i++ { - mt := rv.Type().Method(i) + for mt, method := range rv.Methods() { t.Logf("calling Tracker.%s ...", mt.Name) var args []reflect.Value for j := 0; j < mt.Type.NumIn(); j++ { @@ -92,7 +91,7 @@ func TestNilMethodsDontCrash(t *testing.T) { } args = append(args, reflect.Zero(mt.Type.In(j))) } - rv.Method(i).Call(args) + method.Call(args) } } diff --git a/hostinfo/hostinfo_linux.go b/hostinfo/hostinfo_linux.go index 77f47ffe2fe7c..6b21d81529264 100644 --- a/hostinfo/hostinfo_linux.go +++ b/hostinfo/hostinfo_linux.go @@ -68,7 +68,7 @@ func deviceModelLinux() string { } func getQnapQtsVersion(versionInfo string) string { - for _, field := range strings.Fields(versionInfo) { + for field := range strings.FieldsSeq(versionInfo) { if suffix, ok := strings.CutPrefix(field, "QTSFW_"); ok { return suffix } @@ -110,11 +110,11 @@ func linuxVersionMeta() (meta versionMeta) { if err != nil { break } - eq := bytes.IndexByte(line, '=') - if eq == -1 { + before, after, ok := bytes.Cut(line, []byte{'='}) + if !ok { continue } - k, v := string(line[:eq]), strings.Trim(string(line[eq+1:]), `"'`) + k, v := string(before), strings.Trim(string(after), `"'`) m[k] = v } diff --git a/ipn/auditlog/auditlog.go b/ipn/auditlog/auditlog.go index cc6b43cbdba08..0d6bd278d1996 100644 --- a/ipn/auditlog/auditlog.go +++ b/ipn/auditlog/auditlog.go @@ -69,8 +69,11 @@ type Opts struct { // IsRetryableError returns true if the given error is retryable // See [controlclient.apiResponseError]. Potentially retryable errors implement the Retryable() method. func IsRetryableError(err error) bool { - var retryable interface{ Retryable() bool } - return errors.As(err, &retryable) && retryable.Retryable() + retryable, ok := errors.AsType[interface { + error + Retryable() bool + }](err) + return ok && retryable.Retryable() } type backoffOpts struct { diff --git a/ipn/ipnlocal/breaktcp_linux.go b/ipn/ipnlocal/breaktcp_linux.go index 0ba9ed6d78f19..1d7ea0f314b11 100644 --- a/ipn/ipnlocal/breaktcp_linux.go +++ b/ipn/ipnlocal/breaktcp_linux.go @@ -15,7 +15,7 @@ func init() { func breakTCPConnsLinux() error { var matched int - for fd := 0; fd < 1000; fd++ { + for fd := range 1000 { _, err := unix.GetsockoptTCPInfo(fd, unix.IPPROTO_TCP, unix.TCP_INFO) if err == nil { matched++ diff --git a/ipn/ipnlocal/bus_test.go b/ipn/ipnlocal/bus_test.go index 27ffebcdd570e..47d13f305dd39 100644 --- a/ipn/ipnlocal/bus_test.go +++ b/ipn/ipnlocal/bus_test.go @@ -36,9 +36,8 @@ func TestIsNotableNotify(t *testing.T) { // We use reflect to catch fields that might be added in the future without // remembering to update the [isNotableNotify] function. rt := reflect.TypeFor[ipn.Notify]() - for i := range rt.NumField() { + for sf := range rt.Fields() { n := &ipn.Notify{} - sf := rt.Field(i) switch sf.Name { case "_", "NetMap", "Engine", "Version": // Already covered above or not applicable. @@ -46,7 +45,7 @@ func TestIsNotableNotify(t *testing.T) { case "DriveShares": n.DriveShares = views.SliceOfViews[*drive.Share, drive.ShareView](make([]*drive.Share, 1)) default: - rf := reflect.ValueOf(n).Elem().Field(i) + rf := reflect.ValueOf(n).Elem().FieldByIndex(sf.Index) switch rf.Kind() { case reflect.Pointer: rf.Set(reflect.New(rf.Type().Elem())) @@ -64,7 +63,7 @@ func TestIsNotableNotify(t *testing.T) { notify *ipn.Notify want bool }{ - name: "field-" + rt.Field(i).Name, + name: "field-" + sf.Name, notify: n, want: true, }) diff --git a/ipn/ipnlocal/extension_host.go b/ipn/ipnlocal/extension_host.go index 125a2329447a3..7264d7407561f 100644 --- a/ipn/ipnlocal/extension_host.go +++ b/ipn/ipnlocal/extension_host.go @@ -339,7 +339,7 @@ func (h *ExtensionHost) FindMatchingExtension(target any) bool { val := reflect.ValueOf(target) typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { + if typ.Kind() != reflect.Pointer || val.IsNil() { panic("ipnext: target must be a non-nil pointer") } targetType := typ.Elem() diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 3bd302aeab93d..a22c5156cc476 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -1010,9 +1010,8 @@ func TestNilExtensionHostMethodCall(t *testing.T) { t.Parallel() var h *ExtensionHost - typ := reflect.TypeOf(h) - for i := range typ.NumMethod() { - m := typ.Method(i) + typ := reflect.TypeFor[*ExtensionHost]() + for m := range typ.Methods() { if strings.HasSuffix(m.Name, "ForTest") { // Skip methods that are only for testing. continue diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index b8f3550395493..5f694e915c59c 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -3687,12 +3687,7 @@ func generateInterceptTCPPortFunc(ports []uint16) func(uint16) bool { f = func(p uint16) bool { return m[p] } } else { f = func(p uint16) bool { - for _, x := range ports { - if p == x { - return true - } - } - return false + return slices.Contains(ports, p) } } } @@ -7387,10 +7382,8 @@ var ( // allowedAutoRoute determines if the route being added via AdvertiseRoute (the app connector featuge) should be allowed. func allowedAutoRoute(ipp netip.Prefix) bool { // Note: blocking the addrs for globals, not solely the prefixes. - for _, addr := range disallowedAddrs { - if ipp.Addr() == addr { - return false - } + if slices.Contains(disallowedAddrs, ipp.Addr()) { + return false } for _, pfx := range disallowedRanges { if pfx.Overlaps(ipp) { diff --git a/ipn/ipnlocal/netmapcache/netmapcache_test.go b/ipn/ipnlocal/netmapcache/netmapcache_test.go index b5a46d2982a04..ca66a17133a5e 100644 --- a/ipn/ipnlocal/netmapcache/netmapcache_test.go +++ b/ipn/ipnlocal/netmapcache/netmapcache_test.go @@ -275,7 +275,7 @@ var skippedMapFields = []string{ func checkFieldCoverage(t *testing.T, nm *netmap.NetworkMap) { t.Helper() - mt := reflect.TypeOf(nm).Elem() + mt := reflect.TypeFor[netmap.NetworkMap]() mv := reflect.ValueOf(nm).Elem() for i := 0; i < mt.NumField(); i++ { f := mt.Field(i) diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index aa4c1ef527c6c..322884fc76c91 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -103,7 +103,7 @@ func (s *peerAPIServer) listen(ip netip.Addr, tunIfIndex int) (ln net.Listener, // deterministic that people will bake this into clients. // We try a few times just in case something's already // listening on that port (on all interfaces, probably). - for try := uint8(0); try < 5; try++ { + for try := range uint8(5) { a16 := ip.As16() hashData := a16[len(a16)-3:] hashData[0] += try diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index d25251accd797..9460896ad8d4a 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -835,8 +835,8 @@ func (b *LocalBackend) proxyHandlerForBackend(backend string) (http.Handler, err targetURL, insecure := expandProxyArg(backend) // Handle unix: scheme specially - if strings.HasPrefix(targetURL, "unix:") { - socketPath := strings.TrimPrefix(targetURL, "unix:") + if after, ok := strings.CutPrefix(targetURL, "unix:"); ok { + socketPath := after if socketPath == "" { return nil, fmt.Errorf("empty unix socket path") } diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index 52b3066584e08..56a6d60ccb043 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -101,9 +101,9 @@ func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*ta mem.HasSuffix(mem.B(line), mem.S("/false")) { continue } - colon := bytes.IndexByte(line, ':') - if colon != -1 { - add(string(line[:colon])) + before, _, ok := bytes.Cut(line, []byte{':'}) + if ok { + add(string(before)) } } } diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go index d1348abaafef5..36fce16acbefe 100644 --- a/ipn/localapi/debug.go +++ b/ipn/localapi/debug.go @@ -142,14 +142,11 @@ func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { var wg sync.WaitGroup for _, dialer := range dialers { - dialer := dialer // loop capture - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { conn, err := dialer.dial(ctx, network, addr) results <- result{dialer.name, conn, err} - }() + }) } wg.Wait() diff --git a/ipn/prefs.go b/ipn/prefs.go index 72e0cf8b78424..1492bae383d38 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -439,12 +439,11 @@ func applyPrefsEdits(src, dst reflect.Value, mask map[string]reflect.Value) { func maskFields(v reflect.Value) map[string]reflect.Value { mask := make(map[string]reflect.Value) - for i := range v.NumField() { - f := v.Type().Field(i).Name - if !strings.HasSuffix(f, "Set") { + for sf, fv := range v.Fields() { + if !strings.HasSuffix(sf.Name, "Set") { continue } - mask[strings.TrimSuffix(f, "Set")] = v.Field(i) + mask[strings.TrimSuffix(sf.Name, "Set")] = fv } return mask } @@ -845,22 +844,15 @@ func (p *Prefs) SetAdvertiseExitNode(runExit bool) { // Tailscale IP. func peerWithTailscaleIP(st *ipnstate.Status, ip netip.Addr) (ps *ipnstate.PeerStatus, ok bool) { for _, ps := range st.Peer { - for _, ip2 := range ps.TailscaleIPs { - if ip == ip2 { - return ps, true - } + if slices.Contains(ps.TailscaleIPs, ip) { + return ps, true } } return nil, false } func isRemoteIP(st *ipnstate.Status, ip netip.Addr) bool { - for _, selfIP := range st.TailscaleIPs { - if ip == selfIP { - return false - } - } - return true + return !slices.Contains(st.TailscaleIPs, ip) } // ClearExitNode sets the ExitNodeID and ExitNodeIP to their zero values. diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 347a91e50739c..24c8f194e8596 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -27,8 +27,8 @@ import ( ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - fields = append(fields, t.Field(i).Name) + for field := range t.Fields() { + fields = append(fields, field.Name) } return } diff --git a/ipn/serve.go b/ipn/serve.go index 911b408b65026..21d15ab818fc9 100644 --- a/ipn/serve.go +++ b/ipn/serve.go @@ -673,7 +673,7 @@ func CheckFunnelPort(wantedPort uint16, node *ipnstate.PeerStatus) error { return deny("") } wantedPortString := strconv.Itoa(int(wantedPort)) - for _, ps := range strings.Split(portsStr, ",") { + for ps := range strings.SplitSeq(portsStr, ",") { if ps == "" { continue } diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index e06e00eb3d3dd..feb86e457805a 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -189,8 +189,7 @@ func (s *awsStore) LoadState() error { ) if err != nil { - var pnf *ssmTypes.ParameterNotFound - if errors.As(err, &pnf) { + if _, ok := errors.AsType[*ssmTypes.ParameterNotFound](err); ok { // Create the parameter as it does not exist yet // and return directly as it is defacto empty return s.persistState() diff --git a/k8s-operator/sessionrecording/spdy/frame.go b/k8s-operator/sessionrecording/spdy/frame.go index 7087db3c32166..3ca661e0b6a2a 100644 --- a/k8s-operator/sessionrecording/spdy/frame.go +++ b/k8s-operator/sessionrecording/spdy/frame.go @@ -211,7 +211,7 @@ func parseHeaders(decompressor io.Reader, log *zap.SugaredLogger) (http.Header, return nil, fmt.Errorf("error determining num headers: %v", err) } h := make(http.Header, numHeaders) - for i := uint32(0); i < numHeaders; i++ { + for range numHeaders { name, err := readLenBytes() if err != nil { return nil, err @@ -224,7 +224,7 @@ func parseHeaders(decompressor io.Reader, log *zap.SugaredLogger) (http.Header, if err != nil { return nil, fmt.Errorf("error reading header data: %w", err) } - for _, v := range bytes.Split(val, headerSep) { + for v := range bytes.SplitSeq(val, headerSep) { h.Add(ns, string(v)) } } diff --git a/logtail/logtail.go b/logtail/logtail.go index ef296568da957..ed3872e79bbf4 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -902,8 +902,8 @@ func parseAndRemoveLogLevel(buf []byte) (level int, cleanBuf []byte) { if bytes.Contains(buf, v2) { return 2, bytes.ReplaceAll(buf, v2, nil) } - if i := bytes.Index(buf, vJSON); i != -1 { - rest := buf[i+len(vJSON):] + if _, after, ok := bytes.Cut(buf, vJSON); ok { + rest := after if len(rest) >= 2 { v := rest[0] if v >= '0' && v <= '9' { diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index 67250ae0db03f..19e1eeb7ac31e 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -86,10 +86,10 @@ func TestDrainPendingMessages(t *testing.T) { } // all of the "log line" messages usually arrive at once, but poll if needed. - body := "" + var body strings.Builder for i := 0; i <= logLines; i++ { - body += string(<-ts.uploaded) - count := strings.Count(body, "log line") + body.WriteString(string(<-ts.uploaded)) + count := strings.Count(body.String(), "log line") if count == logLines { break } diff --git a/metrics/multilabelmap.go b/metrics/multilabelmap.go index 54d41bbae9ef2..fa31819d9c3f8 100644 --- a/metrics/multilabelmap.go +++ b/metrics/multilabelmap.go @@ -63,16 +63,16 @@ func LabelString(k any) string { var sb strings.Builder sb.WriteString("{") - for i := range t.NumField() { - if i > 0 { + first := true + for ft, fv := range rv.Fields() { + if !first { sb.WriteString(",") } - ft := t.Field(i) + first = false label := ft.Tag.Get("prom") if label == "" { label = strings.ToLower(ft.Name) } - fv := rv.Field(i) switch fv.Kind() { case reflect.String: fmt.Fprintf(&sb, "%s=%q", label, fv.String()) diff --git a/net/art/stride_table_test.go b/net/art/stride_table_test.go index e797f40ee0ddc..8279a545e132d 100644 --- a/net/art/stride_table_test.go +++ b/net/art/stride_table_test.go @@ -19,7 +19,7 @@ import ( func TestInversePrefix(t *testing.T) { t.Parallel() for i := range 256 { - for len := 0; len < 9; len++ { + for len := range 9 { addr := i & (0xFF << (8 - len)) idx := prefixIndex(uint8(addr), len) addr2, len2 := inversePrefixIndex(idx) diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 2aa660d88d0a4..6b09ca0cc9672 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -94,8 +94,7 @@ func TestCaptivePortalRequest(t *testing.T) { now := time.Now() d.clock = func() time.Time { return now } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { @@ -133,8 +132,7 @@ func TestCaptivePortalRequest(t *testing.T) { func TestAgainstDERPHandler(t *testing.T) { d := NewDetector(t.Logf) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() s := httptest.NewServer(http.HandlerFunc(derpserver.ServeNoContent)) defer s.Close() diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index e68b2e7f9e266..392b64ba989ca 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -380,7 +380,7 @@ func isLibnssResolveUsed(env newOSConfigEnv) error { if err != nil { return fmt.Errorf("reading /etc/resolv.conf: %w", err) } - for _, line := range strings.Split(string(bs), "\n") { + for line := range strings.SplitSeq(string(bs), "\n") { fields := strings.Fields(line) if len(fields) < 2 || fields[0] != "hosts:" { continue diff --git a/net/dns/openresolv.go b/net/dns/openresolv.go index c3aaf3a6948c8..2a4ed174e3f09 100644 --- a/net/dns/openresolv.go +++ b/net/dns/openresolv.go @@ -82,7 +82,7 @@ func (m openresolvManager) GetBaseConfig() (OSConfig, error) { // Remove the "tailscale" snippet from the list. args := []string{"-l"} - for _, f := range strings.Split(strings.TrimSpace(string(bs)), " ") { + for f := range strings.SplitSeq(strings.TrimSpace(string(bs)), " ") { if f == "tailscale" { continue } diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index 6fec32d6a2685..ca15995894243 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -727,8 +727,7 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe } // If we got a truncated UDP response, return that instead of an error. - var trErr truncatedResponseError - if errors.As(err, &trErr) { + if trErr, ok := errors.AsType[truncatedResponseError](err); ok { return trErr.res, nil } return nil, err diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index 6fd186c25a61c..3ddb47433e858 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -328,7 +328,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on udpLn *net.UDPConn err error ) - for try := 0; try < tries; try++ { + for range tries { if tcpLn != nil { tcpLn.Close() tcpLn = nil @@ -392,9 +392,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on var wg sync.WaitGroup if opts == nil || !opts.SkipTCP { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for { conn, err := tcpLn.Accept() if err != nil { @@ -402,7 +400,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on } go handleConn(conn) } - }() + }) } handleUDP := func(addr netip.AddrPort, req []byte) { @@ -413,9 +411,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on } if opts == nil || !opts.SkipUDP { - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for { buf := make([]byte, 65535) n, addr, err := udpLn.ReadFromUDPAddrPort(buf) @@ -425,7 +421,7 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on buf = buf[:n] go handleUDP(addr, buf) } - }() + }) } tb.Cleanup(func() { @@ -684,7 +680,7 @@ func makeResponseOfSize(tb testing.TB, domain string, targetSize int, includeOPT var response []byte var err error - for attempt := 0; attempt < 10; attempt++ { + for range 10 { testBuilder := dns.NewBuilder(nil, dns.Header{ Response: true, Authoritative: true, diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index d0601de7bfe25..53f130a8ae3fc 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -16,7 +16,7 @@ import ( "net/netip" "os" "runtime" - "sort" + "slices" "strconv" "strings" "sync" @@ -172,7 +172,7 @@ func WriteRoutes(w *bufio.Writer, routes map[dnsname.FQDN][]*dnstype.Resolver) { } kk = append(kk, k) } - sort.Slice(kk, func(i, j int) bool { return kk[i] < kk[j] }) + slices.Sort(kk) w.WriteByte('{') for i, k := range kk { if i > 0 { diff --git a/net/dns/wsl_windows.go b/net/dns/wsl_windows.go index c2400746b8a2d..1b93142f5ffb0 100644 --- a/net/dns/wsl_windows.go +++ b/net/dns/wsl_windows.go @@ -172,8 +172,7 @@ func (fs wslFS) Truncate(name string) error { return fs.WriteFile(name, nil, 064 func (fs wslFS) ReadFile(name string) ([]byte, error) { b, err := wslCombinedOutput(fs.cmd("cat", "--", name)) - var ee *exec.ExitError - if errors.As(err, &ee) && ee.ExitCode() == 1 { + if ee, ok := errors.AsType[*exec.ExitError](err); ok && ee.ExitCode() == 1 { return nil, os.ErrNotExist } return b, err diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index ebcdc4eaca4e3..a64c358c5c09f 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -545,7 +545,7 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob var p4 []probe var p6 []probe - for try := 0; try < 3; try++ { + for try := range 3 { n := reg.Nodes[try%len(reg.Nodes)] delay := time.Duration(try) * defaultInitialRetransmitTime if n.IPv4 != "none" && ((ifState.HaveV4 && nodeMight4(n)) || n.IsTestNode()) { @@ -975,13 +975,11 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // need to close the underlying Pinger after a timeout // or when all ICMP probes are done, regardless of // whether the HTTPS probes have finished. - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { if err := c.measureAllICMPLatency(ctx, rs, need); err != nil { c.logf("[v1] measureAllICMPLatency: %v", err) } - }() + }) } wg.Add(len(need)) c.logf("netcheck: UDP is blocked, trying HTTPS") @@ -1072,9 +1070,7 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report if len(rg.Nodes) == 0 { continue } - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { node := rg.Nodes[0] req, _ := http.NewRequestWithContext(ctx, "HEAD", "https://"+node.HostName+"/derp/probe", nil) // One warm-up one to get HTTP connection set @@ -1099,7 +1095,7 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report } d := c.timeNow().Sub(t0) rs.addNodeLatency(node, netip.AddrPort{}, d) - }() + }) } wg.Wait() return nil diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index ab7f58febcb3b..bc8f4a744dda5 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -42,8 +42,7 @@ func TestBasic(t *testing.T) { c := newTestClient(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() if err := c.Standalone(ctx, "127.0.0.1:0"); err != nil { t.Fatal(err) @@ -124,8 +123,7 @@ func TestWorksWhenUDPBlocked(t *testing.T) { c := newTestClient(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() r, err := c.GetReport(ctx, dm, nil) if err != nil { @@ -1038,8 +1036,7 @@ func TestNoUDPNilGetReportOpts(t *testing.T) { } c := newTestClient(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() r, err := c.GetReport(ctx, dm, nil) if err != nil { diff --git a/net/neterror/neterror_linux.go b/net/neterror/neterror_linux.go index 9add4fd1d213c..a99452de5d3b8 100644 --- a/net/neterror/neterror_linux.go +++ b/net/neterror/neterror_linux.go @@ -12,8 +12,7 @@ import ( func init() { shouldDisableUDPGSO = func(err error) bool { - var serr *os.SyscallError - if errors.As(err, &serr) { + if serr, ok := errors.AsType[*os.SyscallError](err); ok { // EIO is returned by udp_send_skb() if the device driver does not // have tx checksumming enabled, which is a hard requirement of // UDP_SEGMENT. See: diff --git a/net/netmon/state.go b/net/netmon/state.go index cdfa1d0fbe552..98ed52e5e4c41 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -812,11 +812,8 @@ func (m *Monitor) HasCGNATInterface() (bool, error) { if hasCGNATInterface || !i.IsUp() || isTailscaleInterface(i.Name, pfxs) { return } - for _, pfx := range pfxs { - if cgnatRange.Overlaps(pfx) { - hasCGNATInterface = true - break - } + if slices.ContainsFunc(pfxs, cgnatRange.Overlaps) { + hasCGNATInterface = true } }) if err != nil { diff --git a/net/netutil/routes.go b/net/netutil/routes.go index c8212b9af66dd..26f2de97c5767 100644 --- a/net/netutil/routes.go +++ b/net/netutil/routes.go @@ -41,8 +41,8 @@ func CalcAdvertiseRoutes(advertiseRoutes string, advertiseDefaultRoute bool) ([] routeMap := map[netip.Prefix]bool{} if advertiseRoutes != "" { var default4, default6 bool - advroutes := strings.Split(advertiseRoutes, ",") - for _, s := range advroutes { + advroutes := strings.SplitSeq(advertiseRoutes, ",") + for s := range advroutes { ipp, err := netip.ParsePrefix(s) if err != nil { return nil, fmt.Errorf("%q is not a valid IP address or CIDR prefix", s) diff --git a/net/socks5/socks5.go b/net/socks5/socks5.go index 729fc8e882cf1..f67dc1ecc202a 100644 --- a/net/socks5/socks5.go +++ b/net/socks5/socks5.go @@ -21,6 +21,7 @@ import ( "io" "log" "net" + "slices" "strconv" "time" @@ -488,10 +489,8 @@ func parseClientGreeting(r io.Reader, authMethod byte) error { if err != nil { return fmt.Errorf("could not read methods") } - for _, m := range methods { - if m == authMethod { - return nil - } + if slices.Contains(methods, authMethod) { + return nil } return fmt.Errorf("no acceptable auth methods") } diff --git a/net/socks5/socks5_test.go b/net/socks5/socks5_test.go index e6ca4b68e9967..84ef4be7bc651 100644 --- a/net/socks5/socks5_test.go +++ b/net/socks5/socks5_test.go @@ -180,11 +180,11 @@ func TestUDP(t *testing.T) { const echoServerNumber = 3 echoServerListener := make([]net.PacketConn, echoServerNumber) - for i := 0; i < echoServerNumber; i++ { + for i := range echoServerNumber { echoServerListener[i] = newUDPEchoServer() } defer func() { - for i := 0; i < echoServerNumber; i++ { + for i := range echoServerNumber { _ = echoServerListener[i].Close() } }() @@ -277,10 +277,10 @@ func TestUDP(t *testing.T) { } defer socks5UDPConn.Close() - for i := 0; i < echoServerNumber; i++ { + for i := range echoServerNumber { port := echoServerListener[i].LocalAddr().(*net.UDPAddr).Port addr := socksAddr{addrType: ipv4, addr: "127.0.0.1", port: uint16(port)} - requestBody := []byte(fmt.Sprintf("Test %d", i)) + requestBody := fmt.Appendf(nil, "Test %d", i) responseBody := sendUDPAndWaitResponse(socks5UDPConn, addr, requestBody) if !bytes.Equal(requestBody, responseBody) { t.Fatalf("got: %q want: %q", responseBody, requestBody) diff --git a/net/stunserver/stunserver_test.go b/net/stunserver/stunserver_test.go index c96aea4d15973..f9efe21f30494 100644 --- a/net/stunserver/stunserver_test.go +++ b/net/stunserver/stunserver_test.go @@ -60,8 +60,7 @@ func TestSTUNServer(t *testing.T) { func BenchmarkServerSTUN(b *testing.B) { b.ReportAllocs() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := b.Context() s := New(ctx) s.Listen("localhost:0") diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 2f5d8c1d13254..6fe992575c2c2 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -1406,11 +1406,11 @@ func (t *Wrapper) InjectInboundPacketBuffer(pkt *netstack_PacketBuffer, buffs [] return err } } - for i := 0; i < n; i++ { + for i := range n { buffs[i] = buffs[i][:PacketStartOffset+sizes[i]] } defer func() { - for i := 0; i < n; i++ { + for i := range n { buffs[i] = buffs[i][:cap(buffs[i])] } }() diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index bd29489a83d22..57b300513fec8 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -95,7 +95,7 @@ func tcp4syn(src, dst string, sport, dport uint16) []byte { func nets(nets ...string) (ret []netip.Prefix) { for _, s := range nets { - if i := strings.IndexByte(s, '/'); i == -1 { + if found := strings.Contains(s, "/"); !found { ip, err := netip.ParseAddr(s) if err != nil { panic(err) @@ -122,13 +122,13 @@ func ports(s string) filter.PortRange { } var fs, ls string - i := strings.IndexByte(s, '-') - if i == -1 { + before, after, ok := strings.Cut(s, "-") + if !ok { fs = s ls = fs } else { - fs = s[:i] - ls = s[i+1:] + fs = before + ls = after } first, err := strconv.ParseInt(fs, 10, 16) if err != nil { diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 03d8e3dc3050d..7dd89920ee3cd 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -977,7 +977,7 @@ func (e ErrServerNotReady) Error() string { // For now, we favor simplicity and reducing VNI re-use over more complex // ephemeral port (VNI) selection algorithms. func (s *Server) getNextVNILocked() (uint32, error) { - for i := uint32(0); i < totalPossibleVNI; i++ { + for range totalPossibleVNI { vni := s.nextVNI if vni == maxVNI { s.nextVNI = minVNI diff --git a/net/udprelay/server_test.go b/net/udprelay/server_test.go index 66de0d88a7d0d..204e365bc33e8 100644 --- a/net/udprelay/server_test.go +++ b/net/udprelay/server_test.go @@ -265,7 +265,7 @@ func TestServer(t *testing.T) { tcB := newTestClient(t, endpoint.VNI, tcBServerEndpointAddr, discoB, discoA.Public(), endpoint.ServerDisco) defer tcB.close() - for i := 0; i < 2; i++ { + for range 2 { // We handshake both clients twice to guarantee server-side // packet reading goroutines, which are independent across // address families, have seen an answer from both clients @@ -345,7 +345,7 @@ func TestServer_getNextVNILocked(t *testing.T) { s := &Server{ nextVNI: minVNI, } - for i := uint64(0); i < uint64(totalPossibleVNI); i++ { + for range uint64(totalPossibleVNI) { vni, err := s.getNextVNILocked() if err != nil { // using quicktest here triples test time t.Fatal(err) diff --git a/prober/prober.go b/prober/prober.go index 3a43401a14ac3..40eef2faf43b1 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -122,12 +122,8 @@ func (p *Prober) Run(name string, interval time.Duration, labels Labels, pc Prob "name": name, "class": pc.Class, } - for k, v := range pc.Labels { - lb[k] = v - } - for k, v := range labels { - lb[k] = v - } + maps.Copy(lb, pc.Labels) + maps.Copy(lb, labels) probe := newProbe(p, name, interval, lb, pc) p.probes[name] = probe diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index b414ce3fbf42a..28316b04d1f2f 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -158,8 +158,7 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd, err cmd.Dir = "/" case errors.Is(err, fs.ErrPermission) || errors.Is(err, fs.ErrNotExist): // Ensure that cmd.Dir is the source of the error. - var pathErr *fs.PathError - if errors.As(err, &pathErr) && pathErr.Path == cmd.Dir { + if pathErr, ok := errors.AsType[*fs.PathError](err); ok && pathErr.Path == cmd.Dir { // If we cannot run loginShell in localUser.HomeDir, // we will try to run this command in the root directory. cmd.Dir = "/" @@ -312,7 +311,7 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { flags.StringVar(&ia.encodedEnv, "encoded-env", "", "JSON encoded array of environment variables in '['key=value']' format") flags.Parse(args) - for _, g := range strings.Split(groups, ",") { + for g := range strings.SplitSeq(groups, ",") { gid, err := strconv.Atoi(g) if err != nil { return ia, fmt.Errorf("unable to parse group id %q: %w", g, err) diff --git a/ssh/tailssh/privs_test.go b/ssh/tailssh/privs_test.go index f0ec66c64e581..7ddc9c8610a04 100644 --- a/ssh/tailssh/privs_test.go +++ b/ssh/tailssh/privs_test.go @@ -262,12 +262,10 @@ func maybeValidUID(id int) bool { return true } - var u1 user.UnknownUserIdError - if errors.As(err, &u1) { + if _, ok := errors.AsType[user.UnknownUserIdError](err); ok { return false } - var u2 user.UnknownUserError - if errors.As(err, &u2) { + if _, ok := errors.AsType[user.UnknownUserError](err); ok { return false } @@ -281,12 +279,10 @@ func maybeValidGID(id int) bool { return true } - var u1 user.UnknownGroupIdError - if errors.As(err, &u1) { + if _, ok := errors.AsType[user.UnknownGroupIdError](err); ok { return false } - var u2 user.UnknownGroupError - if errors.As(err, &u2) { + if _, ok := errors.AsType[user.UnknownGroupError](err); ok { return false } diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index cb56f701b5e68..debad2b5cf195 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "io" + "maps" "net" "net/http" "net/netip" @@ -500,15 +501,9 @@ func (srv *server) newConn() (*conn, error) { }, } ss := c.Server - for k, v := range ssh.DefaultRequestHandlers { - ss.RequestHandlers[k] = v - } - for k, v := range ssh.DefaultChannelHandlers { - ss.ChannelHandlers[k] = v - } - for k, v := range ssh.DefaultSubsystemHandlers { - ss.SubsystemHandlers[k] = v - } + maps.Copy(ss.RequestHandlers, ssh.DefaultRequestHandlers) + maps.Copy(ss.ChannelHandlers, ssh.DefaultChannelHandlers) + maps.Copy(ss.SubsystemHandlers, ssh.DefaultSubsystemHandlers) keys, err := srv.lb.GetSSH_HostKeys() if err != nil { return nil, err @@ -964,8 +959,7 @@ func (ss *sshSession) run() { var err error rec, err = ss.startNewRecording() if err != nil { - var uve userVisibleError - if errors.As(err, &uve) { + if uve, ok := errors.AsType[userVisibleError](err); ok { fmt.Fprintf(ss, "%s\r\n", uve.SSHTerminationMessage()) } else { fmt.Fprintf(ss, "can't start new recording\r\n") @@ -986,8 +980,7 @@ func (ss *sshSession) run() { logf("start failed: %v", err.Error()) if errors.Is(err, context.Canceled) { err := context.Cause(ss.ctx) - var uve userVisibleError - if errors.As(err, &uve) { + if uve, ok := errors.AsType[userVisibleError](err); ok { fmt.Fprintf(ss, "%s\r\n", uve) } } diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index df80235006353..4d6f2172d90f4 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -571,9 +571,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { tstest.Replace(t, &handler, tt.handler) sc, dc := memnet.NewTCPConn(src, dst, 1024) var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) @@ -603,7 +601,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { t.Errorf("client output must not contain %q", x) } } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -666,9 +664,7 @@ func TestMultipleRecorders(t *testing.T) { } var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) @@ -690,7 +686,7 @@ func TestMultipleRecorders(t *testing.T) { if string(out) != "Ran echo!\n" { t.Errorf("client: unexpected output: %q", out) } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -757,9 +753,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { } var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { t.Errorf("client: %v", err) @@ -778,7 +772,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { if err != nil { t.Errorf("client: %v", err) } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -988,9 +982,7 @@ func TestSSHAuthFlow(t *testing.T) { } var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { c, chans, reqs, err := testssh.NewClientConn(sc, sc.RemoteAddr().String(), cfg) if err != nil { if !tc.authErr { @@ -1014,7 +1006,7 @@ func TestSSHAuthFlow(t *testing.T) { if err != nil { t.Errorf("client: %v", err) } - }() + }) if err := s.HandleSSHConn(dc); err != nil { t.Errorf("unexpected error: %v", err) } @@ -1228,8 +1220,8 @@ func TestSSH(t *testing.T) { func parseEnv(out []byte) map[string]string { e := map[string]string{} for line := range lineiter.Bytes(out) { - if i := bytes.IndexByte(line, '='); i != -1 { - e[string(line[:i])] = string(line[i+1:]) + if before, after, ok := bytes.Cut(line, []byte{'='}); ok { + e[string(before)] = string(after) } } return e diff --git a/syncs/shardedint_test.go b/syncs/shardedint_test.go index 8c3f7ef7bd915..ac298e62686a9 100644 --- a/syncs/shardedint_test.go +++ b/syncs/shardedint_test.go @@ -66,10 +66,10 @@ func TestShardedInt(t *testing.T) { numWorkers := 1000 numIncrements := 1000 wg.Add(numWorkers) - for i := 0; i < numWorkers; i++ { + for range numWorkers { go func() { defer wg.Done() - for i := 0; i < numIncrements; i++ { + for range numIncrements { m.Add(1) } }() diff --git a/syncs/shardvalue_test.go b/syncs/shardvalue_test.go index 1dd0a542e60c2..ab34527abd77f 100644 --- a/syncs/shardvalue_test.go +++ b/syncs/shardvalue_test.go @@ -66,10 +66,10 @@ func TestShardValue(t *testing.T) { iterations := 10000 var wg sync.WaitGroup wg.Add(goroutines) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer wg.Done() - for i := 0; i < iterations; i++ { + for range iterations { sv.One(func(v *intVal) { v.Add(1) }) diff --git a/syncs/syncs_test.go b/syncs/syncs_test.go index 81fcccbf63aca..1e79448ad961e 100644 --- a/syncs/syncs_test.go +++ b/syncs/syncs_test.go @@ -6,6 +6,7 @@ package syncs import ( "context" "io" + "maps" "os" "sync" "testing" @@ -226,9 +227,7 @@ func TestMap(t *testing.T) { } got := map[string]int{} want := map[string]int{"one": 1, "two": 2, "three": 3} - for k, v := range m.All() { - got[k] = v - } + maps.Insert(got, m.All()) if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } @@ -243,9 +242,7 @@ func TestMap(t *testing.T) { m.Delete("noexist") got = map[string]int{} want = map[string]int{} - for k, v := range m.All() { - got[k] = v - } + maps.Insert(got, m.All()) if d := cmp.Diff(got, want); d != "" { t.Errorf("Range mismatch (-got +want):\n%s", d) } diff --git a/tailcfg/tailcfg_test.go b/tailcfg/tailcfg_test.go index 9ed7c1e147a7f..6d4d4e8e951c4 100644 --- a/tailcfg/tailcfg_test.go +++ b/tailcfg/tailcfg_test.go @@ -21,8 +21,8 @@ import ( ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - fields = append(fields, t.Field(i).Name) + for field := range t.Fields() { + fields = append(fields, field.Name) } return } diff --git a/tka/chaintest_test.go b/tka/chaintest_test.go index 5ca68afa8f8ba..467880e2c7dc2 100644 --- a/tka/chaintest_test.go +++ b/tka/chaintest_test.go @@ -7,6 +7,7 @@ import ( "bytes" "crypto/ed25519" "fmt" + "maps" "strconv" "strings" "testing" @@ -198,9 +199,7 @@ func (c *testChain) recordParent(t *testing.T, child, parent string) { // This method populates c.AUMs and c.AUMHashes. func (c *testChain) buildChain() { pending := make(map[string]*testchainNode, len(c.Nodes)) - for k, v := range c.Nodes { - pending[k] = v - } + maps.Copy(pending, c.Nodes) // AUMs with a parent need to know their hash, so we // only compute AUMs whose parents have been computed diff --git a/tka/key.go b/tka/key.go index bc946156eb9be..005a104333a31 100644 --- a/tka/key.go +++ b/tka/key.go @@ -7,6 +7,7 @@ import ( "crypto/ed25519" "errors" "fmt" + "maps" "tailscale.com/types/tkatype" ) @@ -64,9 +65,7 @@ func (k Key) Clone() Key { if k.Meta != nil { out.Meta = make(map[string]string, len(k.Meta)) - for k, v := range k.Meta { - out.Meta[k] = v - } + maps.Copy(out.Meta, k.Meta) } return out diff --git a/tka/scenario_test.go b/tka/scenario_test.go index cf4ee2d5b2582..ad3742dbf7164 100644 --- a/tka/scenario_test.go +++ b/tka/scenario_test.go @@ -5,6 +5,7 @@ package tka import ( "crypto/ed25519" + "maps" "sort" "testing" ) @@ -36,9 +37,7 @@ func (s *scenarioTest) mkNode(name string) *scenarioNode { } aums := make(map[string]AUM, len(s.initial.AUMs)) - for k, v := range s.initial.AUMs { - aums[k] = v - } + maps.Copy(aums, s.initial.AUMs) n := &scenarioNode{ A: authority, diff --git a/tka/sync.go b/tka/sync.go index 27e1c0e633329..18a99138482e9 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -107,7 +107,7 @@ func (a *Authority) SyncOffer(storage Chonk) (SyncOffer, error) { skipAmount uint64 = ancestorsSkipStart curs AUMHash = a.Head() ) - for i := uint64(0); i < maxSyncHeadIntersectionIter; i++ { + for i := range uint64(maxSyncHeadIntersectionIter) { if i > 0 && (i%skipAmount) == 0 { out.Ancestors = append(out.Ancestors, curs) skipAmount = skipAmount << ancestorsSkipShift diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 20e52aa8f9496..b9004b8d52c70 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -21,8 +21,7 @@ func doExec(cmd string, args []string, env []string) error { // Propagate ExitErrors within this func to give us similar semantics to // the Unix variant. - var ee *exec.ExitError - if errors.As(err, &ee) { + if ee, ok := errors.AsType[*exec.ExitError](err); ok { os.Exit(ee.ExitCode()) } diff --git a/tsconsensus/monitor.go b/tsconsensus/monitor.go index cc5ac812c49d9..b937926a651dd 100644 --- a/tsconsensus/monitor.go +++ b/tsconsensus/monitor.go @@ -85,7 +85,7 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { lines = append(lines, fmt.Sprintf("%s\t\t%d\t%d\t%t", name, p.RxBytes, p.TxBytes, p.Active)) } } - _, err = w.Write([]byte(fmt.Sprintf("RaftState: %s\n", s.RaftState))) + _, err = w.Write(fmt.Appendf(nil, "RaftState: %s\n", s.RaftState)) if err != nil { log.Printf("monitor: error writing status: %v", err) return @@ -93,7 +93,7 @@ func (m *monitor) handleSummaryStatus(w http.ResponseWriter, r *http.Request) { slices.Sort(lines) for _, ln := range lines { - _, err = w.Write([]byte(fmt.Sprintf("%s\n", ln))) + _, err = w.Write(fmt.Appendf(nil, "%s\n", ln)) if err != nil { log.Printf("monitor: error writing status: %v", err) return diff --git a/tsconsensus/tsconsensus_test.go b/tsconsensus/tsconsensus_test.go index 8897db119c467..3236ef680a8e9 100644 --- a/tsconsensus/tsconsensus_test.go +++ b/tsconsensus/tsconsensus_test.go @@ -296,7 +296,7 @@ func startNodesAndWaitForPeerStatus(t testing.TB, ctx context.Context, clusterTa keysToTag := make([]key.NodePublic, nNodes) localClients := make([]*tailscale.LocalClient, nNodes) control, controlURL := startControl(t) - for i := 0; i < nNodes; i++ { + for i := range nNodes { ts, key, _ := startNode(t, ctx, controlURL, fmt.Sprintf("node %d", i)) ps[i] = &participant{ts: ts, key: key} keysToTag[i] = key @@ -353,7 +353,7 @@ func createConsensusCluster(t testing.TB, ctx context.Context, clusterTag string } fxRaftConfigContainsAll := func() bool { - for i := 0; i < len(participants); i++ { + for i := range participants { fut := participants[i].c.raft.GetConfiguration() err = fut.Error() if err != nil { @@ -618,8 +618,8 @@ func TestOnlyTaggedPeersCanDialRaftPort(t *testing.T) { } isNetErr := func(err error) bool { - var netErr net.Error - return errors.As(err, &netErr) + _, ok := errors.AsType[net.Error](err) + return ok } err := getErrorFromTryingToSend(untaggedNode) diff --git a/tsd/tsd.go b/tsd/tsd.go index 9d79334d68e2b..57437ddcc2373 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -226,8 +226,7 @@ func (p *SubSystem[T]) Set(v T) { return } - var z *T - panic(fmt.Sprintf("%v is already set", reflect.TypeOf(z).Elem().String())) + panic(fmt.Sprintf("%v is already set", reflect.TypeFor[T]().String())) } p.v = v p.set = true @@ -236,8 +235,7 @@ func (p *SubSystem[T]) Set(v T) { // Get returns the value of p, panicking if it hasn't been set. func (p *SubSystem[T]) Get() T { if !p.set { - var z *T - panic(fmt.Sprintf("%v is not set", reflect.TypeOf(z).Elem().String())) + panic(fmt.Sprintf("%v is not set", reflect.TypeFor[T]().String())) } return p.v } diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 776854e227926..4a116cf3467f7 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -466,9 +466,7 @@ func (s *Server) close() { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { // Perform a best-effort final flush. if s.logtail != nil { s.logtail.Shutdown(ctx) @@ -476,14 +474,12 @@ func (s *Server) close() { if s.logbuffer != nil { s.logbuffer.Close() } - }() - wg.Add(1) - go func() { - defer wg.Done() + }) + wg.Go(func() { if s.localAPIServer != nil { s.localAPIServer.Shutdown(ctx) } - }() + }) if s.shutdownCancel != nil { s.shutdownCancel() diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index 1cf4bf48fe5bd..a2bf76e18c765 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -2598,7 +2598,7 @@ func buildDNSQuery(name string, srcIP netip.Addr) []byte { 0x00, 0x01, // QDCOUNT: 1 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ANCOUNT, NSCOUNT, ARCOUNT } - for _, label := range strings.Split(name, ".") { + for label := range strings.SplitSeq(name, ".") { dns = append(dns, byte(len(label))) dns = append(dns, label...) } diff --git a/tstest/deptest/deptest.go b/tstest/deptest/deptest.go index 3117af2fffa01..59672761ef06b 100644 --- a/tstest/deptest/deptest.go +++ b/tstest/deptest/deptest.go @@ -124,7 +124,7 @@ func ImportAliasCheck(t testing.TB, relDir string) { } badRx := regexp.MustCompile(`^([^:]+:\d+):\s+"golang\.org/x/exp/(slices|maps)"`) if s := strings.TrimSpace(string(matches)); s != "" { - for _, line := range strings.Split(s, "\n") { + for line := range strings.SplitSeq(s, "\n") { if m := badRx.FindStringSubmatch(line); m != nil { t.Errorf("%s: the x/exp/%s package should be imported as x%s", m[1], m[2], m[2]) } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 2d21942789858..0482e4b533333 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -1673,7 +1673,7 @@ func TestNetstackTCPLoopback(t *testing.T) { defer lis.Close() writeFn := func(conn net.Conn) error { - for i := 0; i < writeBufIterations; i++ { + for range writeBufIterations { toWrite := make([]byte, writeBufSize) var wrote int for { diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 1f62436fff341..2ac16bf587ed3 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -317,9 +317,7 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { } defer srv.Close() - wg.Add(1) - go func() { - defer wg.Done() + wg.Go(func() { for { c, err := srv.Accept() if err != nil { @@ -327,7 +325,7 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { } go nt.vnet.ServeUnixConn(c.(*net.UnixConn), vnet.ProtocolQEMU) } - }() + }) for i, node := range nodes { disk := fmt.Sprintf("%s/node-%d.qcow2", nt.tempDir, i) @@ -391,7 +389,6 @@ func (nt *natTest) runTest(addNode ...addNodeFunc) pingRoute { var eg errgroup.Group for i, c := range clients { - i, c := i, c eg.Go(func() error { node := nodes[i] t.Logf("%v calling Status...", node) diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index 5ebb12b71032b..bdfba1e273ecf 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -364,7 +364,7 @@ func (h *Harness) testDistro(t *testing.T, d Distro, ipm ipMapping) { // starts with testcontrol sometimes there can be up to a few seconds where // tailscaled is in an unknown state on these virtual machines. This exponential // delay loop should delay long enough for tailscaled to be ready. - for count := 0; count < 10; count++ { + for range 10 { sess := getSession(t, cli) outp, err = sess.CombinedOutput("tailscale status") diff --git a/tstest/natlab/natlab.go b/tstest/natlab/natlab.go index add812d8fe6e3..b66779eebe7a3 100644 --- a/tstest/natlab/natlab.go +++ b/tstest/natlab/natlab.go @@ -18,6 +18,7 @@ import ( "net" "net/netip" "os" + "slices" "sort" "strconv" "sync" @@ -247,12 +248,7 @@ func (f *Interface) String() string { // Contains reports whether f contains ip as an IP. func (f *Interface) Contains(ip netip.Addr) bool { - for _, v := range f.ips { - if ip == v { - return true - } - } - return false + return slices.Contains(f.ips, ip) } type routeEntry struct { @@ -348,10 +344,8 @@ func (m *Machine) isLocalIP(ip netip.Addr) bool { m.mu.Lock() defer m.mu.Unlock() for _, intf := range m.interfaces { - for _, iip := range intf.ips { - if ip == iip { - return true - } + if slices.Contains(intf.ips, ip) { + return true } } return false @@ -565,7 +559,7 @@ func (m *Machine) interfaceForIP(ip netip.Addr) (*Interface, error) { func (m *Machine) pickEphemPort() (port uint16, err error) { m.mu.Lock() defer m.mu.Unlock() - for tries := 0; tries < 500; tries++ { + for range 500 { port := uint16(rand.IntN(32<<10) + 32<<10) if !m.portInUseLocked(port) { return port, nil diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index ea119bad7bb10..9eb81520cd7b0 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -1917,7 +1917,7 @@ func (n *network) doPortMap(src netip.Addr, dstLANPort, wantExtPort uint16, sec } } - for try := 0; try < 20_000; try++ { + for range 20_000 { if wanAP.Port() > 0 && !n.natTable.IsPublicPortUsed(wanAP) { mak.Set(&n.portMap, wanAP, portMapping{ dst: dst, diff --git a/tstest/resource_test.go b/tstest/resource_test.go index ecef91cf60b08..fc868d5f502c7 100644 --- a/tstest/resource_test.go +++ b/tstest/resource_test.go @@ -245,7 +245,7 @@ func TestParseGoroutines(t *testing.T) { t.Errorf("sort field has different number of words: got %d, want %d", len(sorted), len(original)) continue } - for i := 0; i < len(original); i++ { + for i := range original { if original[i] != sorted[len(sorted)-1-i] { t.Errorf("sort field word mismatch at position %d: got %q, want %q", i, sorted[len(sorted)-1-i], original[i]) } diff --git a/tstest/typewalk/typewalk.go b/tstest/typewalk/typewalk.go index f989b4c180394..dea87a8e927fc 100644 --- a/tstest/typewalk/typewalk.go +++ b/tstest/typewalk/typewalk.go @@ -54,14 +54,13 @@ func MatchingPaths(rt reflect.Type, match func(reflect.Type) bool) iter.Seq[Path return } switch t.Kind() { - case reflect.Ptr, reflect.Slice, reflect.Array: + case reflect.Pointer, reflect.Slice, reflect.Array: walk(t.Elem(), func(root reflect.Value) reflect.Value { v := getV(root) return v.Elem() }) case reflect.Struct: - for i := range t.NumField() { - sf := t.Field(i) + for sf := range t.Fields() { fieldName := sf.Name if fieldName == "_" { continue diff --git a/tsweb/tsweb.go b/tsweb/tsweb.go index f464e7af2141e..c730107837441 100644 --- a/tsweb/tsweb.go +++ b/tsweb/tsweb.go @@ -13,6 +13,7 @@ import ( "expvar" "fmt" "io" + "maps" "net" "net/http" "net/netip" @@ -734,8 +735,8 @@ func (h errorHandler) handleError(w http.ResponseWriter, r *http.Request, lw *lo // Extract a presentable, loggable error. var hOK bool - var hErr HTTPError - if errors.As(err, &hErr) { + hErr, hAsOK := errors.AsType[HTTPError](err) + if hAsOK { hOK = true if hErr.Code == 0 { lw.logf("[unexpected] HTTPError %v did not contain an HTTP status code, sending internal server error", hErr) @@ -854,9 +855,7 @@ func WriteHTTPError(w http.ResponseWriter, r *http.Request, e HTTPError) { h.Set("X-Content-Type-Options", "nosniff") // Custom headers from the error. - for k, vs := range e.Header { - h[k] = vs - } + maps.Copy(h, e.Header) // Write the msg back to the user. w.WriteHeader(e.Code) diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index a2286c7603be3..0df6e57751a7e 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -93,8 +93,8 @@ func prometheusMetric(prefix string, key string) (string, string, string) { typ = "histogram" key = strings.TrimPrefix(key, histogramPrefix) } - if strings.HasPrefix(key, labelMapPrefix) { - key = strings.TrimPrefix(key, labelMapPrefix) + if after, ok := strings.CutPrefix(key, labelMapPrefix); ok { + key = after if a, b, ok := strings.Cut(key, "_"); ok { label, key = a, b } @@ -154,7 +154,7 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { case PrometheusMetricsReflectRooter: root := v.PrometheusMetricsReflectRoot() rv := reflect.ValueOf(root) - if rv.Type().Kind() == reflect.Ptr { + if rv.Type().Kind() == reflect.Pointer { if rv.IsNil() { return } @@ -419,8 +419,7 @@ func structTypeSortedFields(t reflect.Type) []sortedStructField { return v.([]sortedStructField) } fields := make([]sortedStructField, 0, t.NumField()) - for i, n := 0, t.NumField(); i < n; i++ { - sf := t.Field(i) + for sf := range t.Fields() { name := sf.Name if v := sf.Tag.Get("json"); v != "" { v, _, _ = strings.Cut(v, ",") @@ -433,7 +432,7 @@ func structTypeSortedFields(t reflect.Type) []sortedStructField { } } fields = append(fields, sortedStructField{ - Index: i, + Index: sf.Index[0], Name: name, SortName: removeTypePrefixes(name), MetricType: sf.Tag.Get("metrictype"), @@ -467,7 +466,7 @@ func foreachExportedStructField(rv reflect.Value, f func(fieldOrJSONName, metric sf := ssf.StructFieldType if ssf.MetricType != "" || sf.Type.Kind() == reflect.Struct { f(ssf.Name, ssf.MetricType, rv.Field(ssf.Index)) - } else if sf.Type.Kind() == reflect.Ptr && sf.Type.Elem().Kind() == reflect.Struct { + } else if sf.Type.Kind() == reflect.Pointer && sf.Type.Elem().Kind() == reflect.Struct { fv := rv.Field(ssf.Index) if !fv.IsNil() { f(ssf.Name, ssf.MetricType, fv.Elem()) diff --git a/types/ipproto/ipproto_test.go b/types/ipproto/ipproto_test.go index 8bfeb13fa4246..6d8be47a9046c 100644 --- a/types/ipproto/ipproto_test.go +++ b/types/ipproto/ipproto_test.go @@ -69,7 +69,7 @@ func TestProtoUnmarshalText(t *testing.T) { for i := range 256 { var p Proto - must.Do(p.UnmarshalText([]byte(fmt.Sprintf("%d", i)))) + must.Do(p.UnmarshalText(fmt.Appendf(nil, "%d", i))) if got, want := p, Proto(i); got != want { t.Errorf("Proto(%d) = %v, want %v", i, got, want) } @@ -122,7 +122,7 @@ func TestProtoUnmarshalJSON(t *testing.T) { var p Proto for i := range 256 { - j := []byte(fmt.Sprintf(`%d`, i)) + j := fmt.Appendf(nil, `%d`, i) must.Do(json.Unmarshal(j, &p)) if got, want := p, Proto(i); got != want { t.Errorf("Proto(%d) = %v, want %v", i, got, want) @@ -130,7 +130,7 @@ func TestProtoUnmarshalJSON(t *testing.T) { } for name, wantProto := range acceptedNames { - must.Do(json.Unmarshal([]byte(fmt.Sprintf(`"%s"`, name)), &p)) + must.Do(json.Unmarshal(fmt.Appendf(nil, `"%s"`, name), &p)) if got, want := p, wantProto; got != want { t.Errorf("Proto(%q) = %v, want %v", name, got, want) } diff --git a/types/lazy/deferred_test.go b/types/lazy/deferred_test.go index 61cc8f8ac6c27..4b2bb07ee2fbd 100644 --- a/types/lazy/deferred_test.go +++ b/types/lazy/deferred_test.go @@ -145,13 +145,11 @@ func TestDeferredInit(t *testing.T) { // Call [DeferredInit.Do] concurrently. const N = 10000 for range N { - wg.Add(1) - go func() { + wg.Go(func() { gotErr := di.Do() checkError(t, gotErr, nil, false) checkCalls() - wg.Done() - }() + }) } wg.Wait() }) @@ -193,12 +191,10 @@ func TestDeferredErr(t *testing.T) { var wg sync.WaitGroup N := 10000 for range N { - wg.Add(1) - go func() { + wg.Go(func() { gotErr := di.Do() checkError(t, gotErr, tt.wantErr, false) - wg.Done() - }() + }) } wg.Wait() }) @@ -254,11 +250,9 @@ func TestDeferAfterDo(t *testing.T) { const N = 10000 var wg sync.WaitGroup for range N { - wg.Add(1) - go func() { + wg.Go(func() { deferOnce() - wg.Done() - }() + }) } if err := di.Do(); err != nil { diff --git a/types/netmap/nodemut_test.go b/types/netmap/nodemut_test.go index a03dee49c7a76..1ae2ab1f98bdd 100644 --- a/types/netmap/nodemut_test.go +++ b/types/netmap/nodemut_test.go @@ -34,7 +34,7 @@ func TestMapResponseContainsNonPatchFields(t *testing.T) { return reflect.ValueOf(int64(1)).Convert(t) case reflect.Slice: return reflect.MakeSlice(t, 1, 1) - case reflect.Ptr: + case reflect.Pointer: return reflect.New(t.Elem()) case reflect.Map: return reflect.MakeMap(t) @@ -43,8 +43,7 @@ func TestMapResponseContainsNonPatchFields(t *testing.T) { } rt := reflect.TypeFor[tailcfg.MapResponse]() - for i := range rt.NumField() { - f := rt.Field(i) + for f := range rt.Fields() { var want bool switch f.Name { diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index b25af5a0b2066..33773013d667f 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -12,8 +12,8 @@ import ( ) func fieldsOf(t reflect.Type) (fields []string) { - for i := range t.NumField() { - if name := t.Field(i).Name; name != "_" { + for field := range t.Fields() { + if name := field.Name; name != "_" { fields = append(fields, name) } } diff --git a/types/views/views.go b/types/views/views.go index 4e17ac952ab49..fe70e227fc64c 100644 --- a/types/views/views.go +++ b/types/views/views.go @@ -968,8 +968,8 @@ func containsPointers(typ reflect.Type) bool { if isWellKnownImmutableStruct(typ) { return false } - for i := range typ.NumField() { - if containsPointers(typ.Field(i).Type) { + for field := range typ.Fields() { + if containsPointers(field.Type) { return true } } diff --git a/util/dnsname/dnsname.go b/util/dnsname/dnsname.go index 263c376aac674..cf1ae62000956 100644 --- a/util/dnsname/dnsname.go +++ b/util/dnsname/dnsname.go @@ -234,7 +234,7 @@ func ValidHostname(hostname string) error { return err } - for _, label := range strings.Split(fqdn.WithoutTrailingDot(), ".") { + for label := range strings.SplitSeq(fqdn.WithoutTrailingDot(), ".") { if err := ValidLabel(label); err != nil { return err } diff --git a/util/goroutines/goroutines.go b/util/goroutines/goroutines.go index fd0a4dd7eb321..f184fcd6c9e73 100644 --- a/util/goroutines/goroutines.go +++ b/util/goroutines/goroutines.go @@ -52,7 +52,7 @@ func scrubHex(buf []byte) []byte { in[0] = '?' return } - v := []byte(fmt.Sprintf("v%d%%%d", len(saw)+1, u64%8)) + v := fmt.Appendf(nil, "v%d%%%d", len(saw)+1, u64%8) saw[inStr] = v copy(in, v) }) diff --git a/util/hashx/block512_test.go b/util/hashx/block512_test.go index 91d5d9ee67749..03c77eabbecc3 100644 --- a/util/hashx/block512_test.go +++ b/util/hashx/block512_test.go @@ -47,7 +47,7 @@ type hasher interface { func hashSuite(h hasher) { for i := range 10 { - for j := 0; j < 10; j++ { + for range 10 { h.HashUint8(0x01) h.HashUint8(0x23) h.HashUint32(0x456789ab) diff --git a/util/httphdr/httphdr.go b/util/httphdr/httphdr.go index 01e8eddc67ac1..852b3f5c74138 100644 --- a/util/httphdr/httphdr.go +++ b/util/httphdr/httphdr.go @@ -44,7 +44,7 @@ func ParseRange(hdr string) (ranges []Range, ok bool) { hdr = strings.Trim(hdr, ows) // per RFC 7230, section 3.2 units, elems, hasUnits := strings.Cut(hdr, "=") elems = strings.TrimLeft(elems, ","+ows) - for _, elem := range strings.Split(elems, ",") { + for elem := range strings.SplitSeq(elems, ",") { elem = strings.Trim(elem, ows) // per RFC 7230, section 7 switch { case strings.HasPrefix(elem, "-"): // i.e., "-" suffix-length diff --git a/util/httpm/httpm_test.go b/util/httpm/httpm_test.go index 4e7f7b5ab277c..4a36a38e1b3e0 100644 --- a/util/httpm/httpm_test.go +++ b/util/httpm/httpm_test.go @@ -27,7 +27,7 @@ func TestUsedConsistently(t *testing.T) { cmd := exec.Command("git", "grep", "-l", "-F", "http.Method") cmd.Dir = rootDir matches, _ := cmd.Output() - for _, fn := range strings.Split(strings.TrimSpace(string(matches)), "\n") { + for fn := range strings.SplitSeq(strings.TrimSpace(string(matches)), "\n") { switch fn { case "util/httpm/httpm.go", "util/httpm/httpm_test.go": continue diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index 1886e25429537..166d80401e52e 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "os" + "slices" "strconv" "strings" ) @@ -60,10 +61,8 @@ func (n *fakeIPTables) Append(table, chain string, args ...string) error { func (n *fakeIPTables) Exists(table, chain string, args ...string) (bool, error) { k := table + "/" + chain if rules, ok := n.n[k]; ok { - for _, rule := range rules { - if rule == strings.Join(args, " ") { - return true, nil - } + if slices.Contains(rules, strings.Join(args, " ")) { + return true, nil } return false, nil } else { diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index f054e7abe1718..3bd2c288699e4 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -21,8 +21,8 @@ import ( func init() { isNotExistError = func(err error) bool { - var e *iptables.Error - return errors.As(err, &e) && e.IsNotExist() + e, ok := errors.AsType[*iptables.Error](err) + return ok && e.IsNotExist() } } diff --git a/util/linuxfw/nftables_for_svcs.go b/util/linuxfw/nftables_for_svcs.go index c2425e2ff285b..35764a2bde5da 100644 --- a/util/linuxfw/nftables_for_svcs.go +++ b/util/linuxfw/nftables_for_svcs.go @@ -236,7 +236,7 @@ func portMapRule(t *nftables.Table, ch *nftables.Chain, tun string, targetIP net // This metadata can then be used to find the rule. // https://github.com/google/nftables/issues/48 func svcPortMapRuleMeta(svcName string, targetIP netip.Addr, pm PortMap) []byte { - return []byte(fmt.Sprintf("svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol)) + return fmt.Appendf(nil, "svc:%s,targetIP:%s:matchPort:%v,targetPort:%v,proto:%v", svcName, targetIP.String(), pm.MatchPort, pm.TargetPort, pm.Protocol) } func (n *nftablesRunner) findRuleByMetadata(t *nftables.Table, ch *nftables.Chain, meta []byte) (*nftables.Rule, error) { @@ -305,5 +305,5 @@ func protoFromString(s string) (uint8, error) { // This metadata can then be used to find the rule. // https://github.com/google/nftables/issues/48 func svcRuleMeta(svcName string, origDst, dst netip.Addr) []byte { - return []byte(fmt.Sprintf("svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String())) + return fmt.Appendf(nil, "svc:%s,VIP:%s,ClusterIP:%s", svcName, origDst.String(), dst.String()) } diff --git a/util/linuxfw/nftables_runner_test.go b/util/linuxfw/nftables_runner_test.go index 8299a9cbd72da..19e869a046529 100644 --- a/util/linuxfw/nftables_runner_test.go +++ b/util/linuxfw/nftables_runner_test.go @@ -1066,7 +1066,7 @@ func checkSNATRule_nft(t *testing.T, runner *nftablesRunner, fam nftables.TableF if chain == nil { t.Fatal("POSTROUTING chain does not exist") } - meta := []byte(fmt.Sprintf("dst:%s,src:%s", dst.String(), src.String())) + meta := fmt.Appendf(nil, "dst:%s,src:%s", dst.String(), src.String()) wantsRule := snatRule(chain.Table, chain, src, dst, meta) checkRule(t, wantsRule, runner.conn) } diff --git a/util/pool/pool_test.go b/util/pool/pool_test.go index ac7cf86be3ef7..ad509a5632994 100644 --- a/util/pool/pool_test.go +++ b/util/pool/pool_test.go @@ -94,12 +94,12 @@ func TestPool(t *testing.T) { func TestTakeRandom(t *testing.T) { p := Pool[int]{} - for i := 0; i < 10; i++ { + for i := range 10 { p.Add(i + 100) } seen := make(map[int]bool) - for i := 0; i < 10; i++ { + for range 10 { item, ok := p.TakeRandom() if !ok { t.Errorf("unexpected empty pool") @@ -116,7 +116,7 @@ func TestTakeRandom(t *testing.T) { t.Errorf("expected empty pool") } - for i := 0; i < 10; i++ { + for i := range 10 { want := 100 + i if !seen[want] { t.Errorf("item %v not seen", want) diff --git a/util/set/intset.go b/util/set/intset.go index 04f614742e796..29a634516a510 100644 --- a/util/set/intset.go +++ b/util/set/intset.go @@ -152,7 +152,7 @@ func (s bitSet) values() iter.Seq[uint64] { return func(yield func(uint64) bool) { // Hyrum-proofing: randomly iterate in forwards or reverse. if rand.Uint64()%2 == 0 { - for i := 0; i < bits.UintSize; i++ { + for i := range bits.UintSize { if s.contains(uint64(i)) && !yield(uint64(i)) { return } diff --git a/util/singleflight/singleflight.go b/util/singleflight/singleflight.go index 23cf7e21fec15..e6d859178140b 100644 --- a/util/singleflight/singleflight.go +++ b/util/singleflight/singleflight.go @@ -36,7 +36,7 @@ var errGoexit = errors.New("runtime.Goexit was called") // A panicError is an arbitrary value recovered from a panic // with the stack trace during the execution of given function. type panicError struct { - value interface{} + value any stack []byte } @@ -45,7 +45,7 @@ func (p *panicError) Error() string { return fmt.Sprintf("%v\n\n%s", p.value, p.stack) } -func newPanicError(v interface{}) error { +func newPanicError(v any) error { stack := debug.Stack() // The first line of the stack trace is of the form "goroutine N [status]:" diff --git a/util/singleflight/singleflight_test.go b/util/singleflight/singleflight_test.go index 9f0ca7f1de853..4e8500cc3c3d6 100644 --- a/util/singleflight/singleflight_test.go +++ b/util/singleflight/singleflight_test.go @@ -25,7 +25,7 @@ import ( func TestDo(t *testing.T) { var g Group[string, any] - v, err, _ := g.Do("key", func() (interface{}, error) { + v, err, _ := g.Do("key", func() (any, error) { return "bar", nil }) if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { @@ -39,7 +39,7 @@ func TestDo(t *testing.T) { func TestDoErr(t *testing.T) { var g Group[string, any] someErr := errors.New("Some error") - v, err, _ := g.Do("key", func() (interface{}, error) { + v, err, _ := g.Do("key", func() (any, error) { return nil, someErr }) if err != someErr { @@ -55,7 +55,7 @@ func TestDoDupSuppress(t *testing.T) { var wg1, wg2 sync.WaitGroup c := make(chan string, 1) var calls int32 - fn := func() (interface{}, error) { + fn := func() (any, error) { if atomic.AddInt32(&calls, 1) == 1 { // First invocation. wg1.Done() @@ -72,9 +72,7 @@ func TestDoDupSuppress(t *testing.T) { wg1.Add(1) for range n { wg1.Add(1) - wg2.Add(1) - go func() { - defer wg2.Done() + wg2.Go(func() { wg1.Done() v, err, _ := g.Do("key", fn) if err != nil { @@ -84,7 +82,7 @@ func TestDoDupSuppress(t *testing.T) { if s, _ := v.(string); s != "bar" { t.Errorf("Do = %T %v; want %q", v, v, "bar") } - }() + }) } wg1.Wait() // At least one goroutine is in fn now and all of them have at @@ -108,7 +106,7 @@ func TestForget(t *testing.T) { ) go func() { - g.Do("key", func() (i interface{}, e error) { + g.Do("key", func() (i any, e error) { close(firstStarted) <-unblockFirst close(firstFinished) @@ -119,7 +117,7 @@ func TestForget(t *testing.T) { g.Forget("key") unblockSecond := make(chan struct{}) - secondResult := g.DoChan("key", func() (i interface{}, e error) { + secondResult := g.DoChan("key", func() (i any, e error) { <-unblockSecond return 2, nil }) @@ -127,7 +125,7 @@ func TestForget(t *testing.T) { close(unblockFirst) <-firstFinished - thirdResult := g.DoChan("key", func() (i interface{}, e error) { + thirdResult := g.DoChan("key", func() (i any, e error) { return 3, nil }) @@ -141,7 +139,7 @@ func TestForget(t *testing.T) { func TestDoChan(t *testing.T) { var g Group[string, any] - ch := g.DoChan("key", func() (interface{}, error) { + ch := g.DoChan("key", func() (any, error) { return "bar", nil }) @@ -160,7 +158,7 @@ func TestDoChan(t *testing.T) { // See https://github.com/golang/go/issues/41133 func TestPanicDo(t *testing.T) { var g Group[string, any] - fn := func() (interface{}, error) { + fn := func() (any, error) { panic("invalid memory address or nil pointer dereference") } @@ -197,7 +195,7 @@ func TestPanicDo(t *testing.T) { func TestGoexitDo(t *testing.T) { var g Group[string, any] - fn := func() (interface{}, error) { + fn := func() (any, error) { runtime.Goexit() return nil, nil } @@ -238,7 +236,7 @@ func TestPanicDoChan(t *testing.T) { }() g := new(Group[string, any]) - ch := g.DoChan("", func() (interface{}, error) { + ch := g.DoChan("", func() (any, error) { panic("Panicking in DoChan") }) <-ch @@ -283,7 +281,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) { defer func() { recover() }() - g.Do("", func() (interface{}, error) { + g.Do("", func() (any, error) { close(blocked) <-unblock panic("Panicking in Do") @@ -291,7 +289,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) { }() <-blocked - ch := g.DoChan("", func() (interface{}, error) { + ch := g.DoChan("", func() (any, error) { panic("DoChan unexpectedly executed callback") }) close(unblock) @@ -325,8 +323,7 @@ func TestPanicDoSharedByDoChan(t *testing.T) { func TestDoChanContext(t *testing.T) { t.Run("Basic", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() var g Group[string, int] ch := g.DoChanContext(ctx, "key", func(_ context.Context) (int, error) { @@ -337,8 +334,7 @@ func TestDoChanContext(t *testing.T) { }) t.Run("DoesNotPropagateValues", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() key := new(int) const value = "hello world" @@ -364,8 +360,7 @@ func TestDoChanContext(t *testing.T) { ctx1, cancel1 := context.WithCancel(context.Background()) defer cancel1() - ctx2, cancel2 := context.WithCancel(context.Background()) - defer cancel2() + ctx2 := t.Context() fn := func(ctx context.Context) (int, error) { select { diff --git a/util/slicesx/slicesx_test.go b/util/slicesx/slicesx_test.go index d5c87a3727748..6b28c29b47382 100644 --- a/util/slicesx/slicesx_test.go +++ b/util/slicesx/slicesx_test.go @@ -53,7 +53,7 @@ func TestShuffle(t *testing.T) { } var wasShuffled bool - for try := 0; try < 10; try++ { + for range 10 { shuffled := slices.Clone(sl) Shuffle(shuffled) if !reflect.DeepEqual(shuffled, sl) { diff --git a/util/syspolicy/policytest/policytest.go b/util/syspolicy/policytest/policytest.go index ef5ce889dd2de..9879a0fd3c69c 100644 --- a/util/syspolicy/policytest/policytest.go +++ b/util/syspolicy/policytest/policytest.go @@ -89,12 +89,7 @@ func (pc policyChanges) HasChanged(v pkey.Key) bool { return ok } func (pc policyChanges) HasChangedAnyOf(keys ...pkey.Key) bool { - for _, k := range keys { - if pc.HasChanged(k) { - return true - } - } - return false + return slices.ContainsFunc(keys, pc.HasChanged) } const watchersKey = "_policytest_watchers" diff --git a/util/topk/topk_test.go b/util/topk/topk_test.go index 06656c4204fe6..7679f59a303ab 100644 --- a/util/topk/topk_test.go +++ b/util/topk/topk_test.go @@ -43,7 +43,7 @@ func TestTopK(t *testing.T) { got []int want = []int{5, 6, 7, 8, 9} ) - for try := 0; try < 10; try++ { + for range 10 { topk := NewWithParams[int](5, func(in []byte, val int) []byte { return binary.LittleEndian.AppendUint64(in, uint64(val)) }, 4, 1000) diff --git a/util/vizerror/vizerror.go b/util/vizerror/vizerror.go index 479bd2de9e7c8..e0abe8f97d15e 100644 --- a/util/vizerror/vizerror.go +++ b/util/vizerror/vizerror.go @@ -77,6 +77,5 @@ func WrapWithMessage(wrapped error, publicMsg string) error { // As returns the first vizerror.Error in err's chain. func As(err error) (e Error, ok bool) { - ok = errors.As(err, &e) - return + return errors.AsType[Error](err) } diff --git a/util/zstdframe/zstd_test.go b/util/zstdframe/zstd_test.go index 302090b9951b8..c006a06fd9d39 100644 --- a/util/zstdframe/zstd_test.go +++ b/util/zstdframe/zstd_test.go @@ -128,7 +128,7 @@ func BenchmarkEncode(b *testing.B) { b.Run(bb.name, func(b *testing.B) { b.ReportAllocs() b.SetBytes(int64(len(src))) - for range b.N { + for b.Loop() { dst = AppendEncode(dst[:0], src, bb.opts...) } }) @@ -153,7 +153,7 @@ func BenchmarkDecode(b *testing.B) { b.Run(bb.name, func(b *testing.B) { b.ReportAllocs() b.SetBytes(int64(len(src))) - for range b.N { + for b.Loop() { dst = must.Get(AppendDecode(dst[:0], src, bb.opts...)) } }) @@ -169,16 +169,14 @@ func BenchmarkEncodeParallel(b *testing.B) { } b.Run(coder.name, func(b *testing.B) { b.ReportAllocs() - for range b.N { - var group sync.WaitGroup - for j := 0; j < numCPU; j++ { - group.Add(1) - go func(j int) { - defer group.Done() + for b.Loop() { + var wg sync.WaitGroup + for j := range numCPU { + wg.Go(func() { dsts[j] = coder.appendEncode(dsts[j][:0], src) - }(j) + }) } - group.Wait() + wg.Wait() } }) } @@ -194,16 +192,14 @@ func BenchmarkDecodeParallel(b *testing.B) { } b.Run(coder.name, func(b *testing.B) { b.ReportAllocs() - for range b.N { - var group sync.WaitGroup - for j := 0; j < numCPU; j++ { - group.Add(1) - go func(j int) { - defer group.Done() + for b.Loop() { + var wg sync.WaitGroup + for j := range numCPU { + wg.Go(func() { dsts[j] = must.Get(coder.appendDecode(dsts[j][:0], src)) - }(j) + }) } - group.Wait() + wg.Wait() } }) } diff --git a/version/cmdname.go b/version/cmdname.go index 8a4040f9718b9..5a0b8487509e5 100644 --- a/version/cmdname.go +++ b/version/cmdname.go @@ -39,7 +39,7 @@ func cmdName(exe string) string { } // v is like: // "path\ttailscale.com/cmd/tailscale\nmod\ttailscale.com\t(devel)\t\ndep\tgithub.com/apenwarr/fixconsole\tv0.0.0-20191012055117-5a9f6489cc29\th1:muXWUcay7DDy1/hEQWrYlBy+g0EuwT70sBHg65SeUc4=\ndep\tgithub.... - for _, line := range strings.Split(info, "\n") { + for line := range strings.SplitSeq(info, "\n") { if goPkg, ok := strings.CutPrefix(line, "path\t"); ok { // like "tailscale.com/cmd/tailscale" ret = path.Base(goPkg) // goPkg is always forward slashes; use path, not filepath break diff --git a/version/version_test.go b/version/version_test.go index ebae7f177613a..42bcf21634487 100644 --- a/version/version_test.go +++ b/version/version_test.go @@ -30,7 +30,7 @@ func readAlpineTag(t *testing.T, file string) string { if err != nil { t.Fatal(err) } - for _, line := range bytes.Split(f, []byte{'\n'}) { + for line := range bytes.SplitSeq(f, []byte{'\n'}) { line = bytes.TrimSpace(line) _, suf, ok := bytes.Cut(line, []byte("FROM alpine:")) if !ok { diff --git a/wgengine/filter/filter_test.go b/wgengine/filter/filter_test.go index c588a506e0dc9..a3b9a8e001e60 100644 --- a/wgengine/filter/filter_test.go +++ b/wgengine/filter/filter_test.go @@ -751,13 +751,13 @@ func ports(s string) PortRange { } var fs, ls string - i := strings.IndexByte(s, '-') - if i == -1 { + before, after, ok := strings.Cut(s, "-") + if !ok { fs = s ls = fs } else { - fs = s[:i] - ls = s[i+1:] + fs = before + ls = after } first, err := strconv.ParseInt(fs, 10, 16) if err != nil { diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 1f02d84c7c608..78ffd0cd0e0f5 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -1474,8 +1474,7 @@ func (c *Conn) sendUDPBatch(addr epAddr, buffs [][]byte, offset int) (sent bool, err = c.pconn4.WriteWireGuardBatchTo(buffs, addr, offset) } if err != nil { - var errGSO neterror.ErrUDPGSODisabled - if errors.As(err, &errGSO) { + if errGSO, ok := errors.AsType[neterror.ErrUDPGSODisabled](err); ok { c.logf("magicsock: %s", errGSO.Error()) err = errGSO.RetryErr } else { diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index dfd9d395d00f9..7a8a6374cd1bc 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -2007,7 +2007,7 @@ func TestStressSetNetworkMap(t *testing.T) { const iters = 1000 // approx 0.5s on an m1 mac for range iters { - for j := 0; j < npeers; j++ { + for j := range npeers { // Randomize which peers are present. if prng.Int()&1 == 0 { present[j] = !present[j] @@ -2196,7 +2196,7 @@ func newWireguard(t *testing.T, uapi string, aips []netip.Prefix) (*device.Devic if err != nil { t.Fatal(err) } - for _, line := range strings.Split(s, "\n") { + for line := range strings.SplitSeq(s, "\n") { line = strings.TrimSpace(line) if len(line) == 0 { continue @@ -4311,7 +4311,7 @@ func TestRotateDiscoKeyMultipleTimes(t *testing.T) { keys := make([]key.DiscoPublic, 0, 5) keys = append(keys, c.discoAtomic.Public()) - for i := 0; i < 4; i++ { + for i := range 4 { c.RotateDiscoKey() newKey := c.discoAtomic.Public() diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 77cb4a7b9b451..116deffa98296 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -102,7 +102,7 @@ var ( canonicalIPs = sync.OnceValue(func() (checkIPFunc func(netip.Addr) bool) { // https://bgp.he.net/AS41231#_prefixes t := &bart.Table[bool]{} - for _, s := range strings.Fields(` + for s := range strings.FieldsSeq(` 91.189.89.0/24 91.189.91.0/24 91.189.92.0/24 diff --git a/wgengine/router/osrouter/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go index bae997e331d55..8c2514eb57d73 100644 --- a/wgengine/router/osrouter/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1073,11 +1073,9 @@ func (o *fakeOS) run(args ...string) error { switch args[2] { case "add": - for _, el := range *ls { - if el == rest { - o.t.Errorf("can't add %q, already present", rest) - return errors.New("already exists") - } + if slices.Contains(*ls, rest) { + o.t.Errorf("can't add %q, already present", rest) + return errors.New("already exists") } *ls = append(*ls, rest) sort.Strings(*ls) diff --git a/wgengine/router/osrouter/runner.go b/wgengine/router/osrouter/runner.go index bdc710a8d369a..82b2680e67277 100644 --- a/wgengine/router/osrouter/runner.go +++ b/wgengine/router/osrouter/runner.go @@ -10,6 +10,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strconv" "strings" "syscall" @@ -42,8 +43,7 @@ func errCode(err error) int { if err == nil { return 0 } - var e *exec.ExitError - if ok := errors.As(err, &e); ok { + if e, ok := errors.AsType[*exec.ExitError](err); ok { return e.ExitCode() } s := err.Error() @@ -96,12 +96,7 @@ func newRunGroup(okCode []int, runner commandRunner) *runGroup { func (rg *runGroup) okCode(err error) bool { got := errCode(err) - for _, want := range rg.OkCode { - if got == want { - return true - } - } - return false + return slices.Contains(rg.OkCode, got) } func (rg *runGroup) Output(args ...string) []byte { diff --git a/wgengine/router/router_test.go b/wgengine/router/router_test.go index 28750e115a9e3..f6176f1d000aa 100644 --- a/wgengine/router/router_test.go +++ b/wgengine/router/router_test.go @@ -19,8 +19,8 @@ func TestConfigEqual(t *testing.T) { } configType := reflect.TypeFor[Config]() configFields := []string{} - for i := range configType.NumField() { - configFields = append(configFields, configType.Field(i).Name) + for field := range configType.Fields() { + configFields = append(configFields, field.Name) } if !reflect.DeepEqual(configFields, testedFields) { t.Errorf("Config.Equal check might be out of sync\nfields: %q\nhandled: %q\n", diff --git a/wgengine/wgcfg/config_test.go b/wgengine/wgcfg/config_test.go index b15b8cbf56f8b..7059b17b2dafe 100644 --- a/wgengine/wgcfg/config_test.go +++ b/wgengine/wgcfg/config_test.go @@ -12,8 +12,7 @@ import ( // that might get added in the future. func TestConfigEqual(t *testing.T) { rt := reflect.TypeFor[Config]() - for i := range rt.NumField() { - sf := rt.Field(i) + for sf := range rt.Fields() { switch sf.Name { case "Name", "NodeID", "PrivateKey", "MTU", "Addresses", "DNS", "Peers", "NetworkLogging": @@ -28,8 +27,7 @@ func TestConfigEqual(t *testing.T) { // that might get added in the future. func TestPeerEqual(t *testing.T) { rt := reflect.TypeFor[Peer]() - for i := range rt.NumField() { - sf := rt.Field(i) + for sf := range rt.Fields() { switch sf.Name { case "PublicKey", "DiscoKey", "AllowedIPs", "IsJailed", "PersistentKeepalive", "V4MasqAddr", "V6MasqAddr", "WGEndpoint": diff --git a/wif/wif.go b/wif/wif.go index bb2e760f2c7b7..bc479fad1785d 100644 --- a/wif/wif.go +++ b/wif/wif.go @@ -190,8 +190,7 @@ func acquireAWSWebIdentityToken(ctx context.Context, audience string) (string, e out, err := stsClient.GetWebIdentityToken(ctx, in) if err != nil { - var apiErr smithy.APIError - if errors.As(err, &apiErr) { + if apiErr, ok := errors.AsType[smithy.APIError](err); ok { return "", fmt.Errorf("aws sts:GetWebIdentityToken failed (%s): %w", apiErr.ErrorCode(), err) } return "", fmt.Errorf("aws sts:GetWebIdentityToken failed: %w", err) From e400d5aa7b22ef131d2b05a1df89177f87dd3922 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 6 Mar 2026 13:26:09 -0800 Subject: [PATCH 1068/1093] cmd/testwrapper: make test tolerant of a GOEXPERIMENT being set Otherwise it generates an syntactically invalid go.mod file and subsequently fails. Updates #18884 Change-Id: I1a0ea17a57b2a37bde3770187e1a6e2d8aa55bfe Signed-off-by: Brad Fitzpatrick --- cmd/testwrapper/testwrapper_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/testwrapper/testwrapper_test.go b/cmd/testwrapper/testwrapper_test.go index 7ad78a3d003ca..46400fd1c0a67 100644 --- a/cmd/testwrapper/testwrapper_test.go +++ b/cmd/testwrapper/testwrapper_test.go @@ -220,11 +220,14 @@ func TestCached(t *testing.T) { // Construct our trivial package. pkgDir := t.TempDir() + goVersion := runtime.Version() + goVersion = strings.TrimPrefix(goVersion, "go") + goVersion, _, _ = strings.Cut(goVersion, "-X:") // map 1.26.1-X:nogreenteagc to 1.26.1 + goMod := fmt.Sprintf(`module example.com go %s -`, runtime.Version()[2:]) // strip leading "go" - +`, goVersion) test := `package main import "testing" From ac74dfa5cd8ba3153a90a84381dad9500162c746 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Thu, 5 Mar 2026 15:58:18 +0000 Subject: [PATCH 1069/1093] util/osuser: extend id command fallback for group IDs to freebsd Users on FreeBSD run into a similar problem as has been reported for Linux #11682 and fixed in #11682: because the tailscaled binaries that we distribute are static and don't link cgo tailscaled fails to fetch group IDs that are returned via NSS when spawning an ssh child process. This change extends the fallback on the 'id' command that was put in place as part of #11682 to FreeBSD. More precisely, we try to fetch the group IDs with the 'id' command first, and only if that fails do we fall back on the logic in the os/user package. Updates #14025 Signed-off-by: Gesa Stupperich --- util/osuser/group_ids.go | 17 ++++++++++++++--- util/osuser/group_ids_test.go | 4 +++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/util/osuser/group_ids.go b/util/osuser/group_ids.go index 2a1f147d87b00..34d15c926ae98 100644 --- a/util/osuser/group_ids.go +++ b/util/osuser/group_ids.go @@ -23,7 +23,7 @@ func GetGroupIds(user *user.User) ([]string, error) { return nil, nil } - if runtime.GOOS != "linux" { + if runtime.GOOS != "linux" && runtime.GOOS != "freebsd" { return user.GroupIds() } @@ -46,13 +46,24 @@ func getGroupIdsWithId(usernameOrUID string) ([]string, error) { defer cancel() cmd := exec.CommandContext(ctx, "id", "-Gz", usernameOrUID) - out, err := cmd.Output() + if runtime.GOOS == "freebsd" { + cmd = exec.CommandContext(ctx, "id", "-G", usernameOrUID) + } + + out, err := cmd.CombinedOutput() if err != nil { return nil, fmt.Errorf("running 'id' command: %w", err) } + return parseGroupIds(out), nil } func parseGroupIds(cmdOutput []byte) []string { - return strings.Split(strings.Trim(string(cmdOutput), "\n\x00"), "\x00") + s := strings.TrimSpace(string(cmdOutput)) + // Parse NUL-delimited output. + if strings.ContainsRune(s, '\x00') { + return strings.Split(strings.Trim(s, "\x00"), "\x00") + } + // Parse whitespace-delimited output. + return strings.Fields(s) } diff --git a/util/osuser/group_ids_test.go b/util/osuser/group_ids_test.go index 79e189ed8c866..fee86029bf4dc 100644 --- a/util/osuser/group_ids_test.go +++ b/util/osuser/group_ids_test.go @@ -15,7 +15,9 @@ func TestParseGroupIds(t *testing.T) { }{ {"5000\x005001\n", []string{"5000", "5001"}}, {"5000\n", []string{"5000"}}, - {"\n", []string{""}}, + {"\n", []string{}}, + {"5000 5001 5002\n", []string{"5000", "5001", "5002"}}, + {"5000\t5001\n", []string{"5000", "5001"}}, } for _, test := range tests { actual := parseGroupIds([]byte(test.in)) From 6a19995f136686be74cdd3c4d720dc4e92159b6a Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Tue, 17 Feb 2026 13:15:02 +0000 Subject: [PATCH 1070/1093] tailcfg: reintroduce UserProfile.Groups This change reintroduces UserProfile.Groups, a slice that contains the ACL-defined and synced groups that a user is a member of. The slice will only be non-nil for clients with the node attribute see-groups, and will only contain groups that the client is allowed to see as per the app payload of the see-groups node attribute. For example: ``` "nodeAttrs": [ { "target": ["tag:dev"], "app": { "tailscale.com/see-groups": [{"groups": ["group:dev"]}] } }, [...] ] ``` UserProfile.Groups will also be gated by a feature flag for the time being. Updates tailscale/corp#31529 Signed-off-by: Gesa Stupperich --- feature/taildrop/ext.go | 4 ++-- ipn/ipn_clone.go | 1 + ipn/ipn_view.go | 2 +- ipn/ipnlocal/local.go | 2 +- ipn/ipnlocal/profiles.go | 8 ++++---- ipn/ipnlocal/state_test.go | 6 +++--- tailcfg/tailcfg.go | 10 +++++++++- tailcfg/tailcfg_clone.go | 2 ++ tailcfg/tailcfg_view.go | 12 ++++++++++-- types/persist/persist_clone.go | 1 + types/persist/persist_view.go | 2 +- 11 files changed, 35 insertions(+), 15 deletions(-) diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index 3a4ed456d2269..abf574ebc5407 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -139,8 +139,8 @@ func (e *Extension) onChangeProfile(profile ipn.LoginProfileView, _ ipn.PrefsVie e.mu.Lock() defer e.mu.Unlock() - uid := profile.UserProfile().ID - activeLogin := profile.UserProfile().LoginName + uid := profile.UserProfile().ID() + activeLogin := profile.UserProfile().LoginName() if uid == 0 { e.setMgrLocked(nil) diff --git a/ipn/ipn_clone.go b/ipn/ipn_clone.go index 3e6cbbb823a4f..e179438cdcfcb 100644 --- a/ipn/ipn_clone.go +++ b/ipn/ipn_clone.go @@ -24,6 +24,7 @@ func (src *LoginProfile) Clone() *LoginProfile { } dst := new(LoginProfile) *dst = *src + dst.UserProfile = *src.UserProfile.Clone() return dst } diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 90560cec0e195..4e9d46bda30a0 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -113,7 +113,7 @@ func (v LoginProfileView) Key() StateKey { return v.ж.Key } // UserProfile is the server provided UserProfile for this profile. // This is updated whenever the server provides a new UserProfile. -func (v LoginProfileView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v LoginProfileView) UserProfile() tailcfg.UserProfileView { return v.ж.UserProfile.View() } // NodeID is the NodeID of the node that this profile is logged into. // This should be stable across tagging and untagging nodes. diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 5f694e915c59c..77bb14f368db9 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -4689,7 +4689,7 @@ func (b *LocalBackend) setPrefsLocked(newp *ipn.Prefs) ipn.PrefsView { if !oldp.Persist().Valid() { b.logf("active login: %s", newLoginName) } else { - oldLoginName := oldp.Persist().UserProfile().LoginName + oldLoginName := oldp.Persist().UserProfile().LoginName() if oldLoginName != newLoginName { b.logf("active login: %q (changed from %q)", newLoginName, oldLoginName) } diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 430fa63152a77..4e073e5c9aeba 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -274,7 +274,7 @@ func (pm *profileManager) matchingProfiles(uid ipn.WindowsUserID, f func(ipn.Log func (pm *profileManager) findMatchingProfiles(uid ipn.WindowsUserID, prefs ipn.PrefsView) []ipn.LoginProfileView { return pm.matchingProfiles(uid, func(p ipn.LoginProfileView) bool { return p.ControlURL() == prefs.ControlURL() && - (p.UserProfile().ID == prefs.Persist().UserProfile().ID || + (p.UserProfile().ID() == prefs.Persist().UserProfile().ID() || p.NodeID() == prefs.Persist().NodeID()) }) } @@ -337,7 +337,7 @@ func (pm *profileManager) setUnattendedModeAsConfigured() error { // across user switches to disambiguate the same account but a different tailnet. func (pm *profileManager) SetPrefs(prefsIn ipn.PrefsView, np ipn.NetworkProfile) error { cp := pm.currentProfile - if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName == "" { + if persist := prefsIn.Persist(); !persist.Valid() || persist.NodeID() == "" || persist.UserProfile().LoginName() == "" { // We don't know anything about this profile, so ignore it for now. return pm.setProfilePrefsNoPermCheck(pm.currentProfile, prefsIn.AsStruct().View()) } @@ -410,7 +410,7 @@ func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref // and it hasn't been persisted yet. We'll generate both an ID and [ipn.StateKey] // once the information is available and needs to be persisted. if lp.ID == "" { - if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName != "" { + if persist := prefsIn.Persist(); persist.Valid() && persist.NodeID() != "" && persist.UserProfile().LoginName() != "" { // Generate an ID and [ipn.StateKey] now that we have the node info. lp.ID, lp.Key = newUnusedID(pm.knownProfiles) } @@ -425,7 +425,7 @@ func (pm *profileManager) setProfilePrefs(lp *ipn.LoginProfile, prefsIn ipn.Pref var up tailcfg.UserProfile if persist := prefsIn.Persist(); persist.Valid() { - up = persist.UserProfile() + up = *persist.UserProfile().AsStruct() if up.DisplayName == "" { up.DisplayName = up.LoginName } diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 39796ec325367..ab09e0a09934b 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -606,7 +606,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { cc.assertCalls() c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) - c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user1") + c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName(), qt.Equals, "user1") // nn[2] is a state notification after login // Verify login finished but need machine auth using backend state c.Assert(isFullyAuthenticated(b), qt.IsTrue) @@ -818,7 +818,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[1].Prefs, qt.IsNotNil) c.Assert(nn[1].Prefs.Persist(), qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. - c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user2") + c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName(), qt.Equals, "user2") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) // If a user initiates an interactive login, they also expect WantRunning to become true. c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) @@ -964,7 +964,7 @@ func runTestStateMachine(t *testing.T, seamless bool) { c.Assert(nn[0].LoginFinished, qt.IsNotNil) c.Assert(nn[1].Prefs, qt.IsNotNil) // Prefs after finishing the login, so LoginName updated. - c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName, qt.Equals, "user3") + c.Assert(nn[1].Prefs.Persist().UserProfile().LoginName(), qt.Equals, "user3") c.Assert(nn[1].Prefs.LoggedOut(), qt.IsFalse) c.Assert(nn[1].Prefs.WantRunning(), qt.IsTrue) // nn[2] is state notification (Starting) - verify using backend state diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b49791be6fb39..1efa6c959214e 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -282,6 +282,13 @@ type UserProfile struct { LoginName string // "alice@smith.com"; for display purposes only (provider is not listed) DisplayName string // "Alice Smith" ProfilePicURL string `json:",omitzero"` + + // Groups is a subset of SCIM groups (e.g. "engineering@example.com") + // or group names in the tailnet policy document (e.g. "group:eng") + // that contain this user and that the coordination server was + // configured to report to this node. + // The list is always sorted when loaded from storage. + Groups []string `json:",omitempty"` } func (p *UserProfile) Equal(p2 *UserProfile) bool { @@ -294,7 +301,8 @@ func (p *UserProfile) Equal(p2 *UserProfile) bool { return p.ID == p2.ID && p.LoginName == p2.LoginName && p.DisplayName == p2.DisplayName && - p.ProfilePicURL == p2.ProfilePicURL + p.ProfilePicURL == p2.ProfilePicURL && + slices.Equal(p.Groups, p2.Groups) } // RawMessage is a raw encoded JSON value. It implements Marshaler and diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 1911707235b87..8b966b621820a 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -620,6 +620,7 @@ func (src *UserProfile) Clone() *UserProfile { } dst := new(UserProfile) *dst = *src + dst.Groups = append(src.Groups[:0:0], src.Groups...) return dst } @@ -629,6 +630,7 @@ var _UserProfileCloneNeedsRegeneration = UserProfile(struct { LoginName string DisplayName string ProfilePicURL string + Groups []string }{}) // Clone makes a deep copy of VIPService. diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 7960000fd3d6a..9900efbcc3d63 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -2505,8 +2505,15 @@ func (v UserProfileView) ID() UserID { return v.ж.ID } func (v UserProfileView) LoginName() string { return v.ж.LoginName } // "Alice Smith" -func (v UserProfileView) DisplayName() string { return v.ж.DisplayName } -func (v UserProfileView) ProfilePicURL() string { return v.ж.ProfilePicURL } +func (v UserProfileView) DisplayName() string { return v.ж.DisplayName } +func (v UserProfileView) ProfilePicURL() string { return v.ж.ProfilePicURL } + +// Groups is a subset of SCIM groups (e.g. "engineering@example.com") +// or group names in the tailnet policy document (e.g. "group:eng") +// that contain this user and that the coordination server was +// configured to report to this node. +// The list is always sorted when loaded from storage. +func (v UserProfileView) Groups() views.Slice[string] { return views.SliceOf(v.ж.Groups) } func (v UserProfileView) Equal(v2 UserProfileView) bool { return v.ж.Equal(v2.ж) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. @@ -2515,6 +2522,7 @@ var _UserProfileViewNeedsRegeneration = UserProfile(struct { LoginName string DisplayName string ProfilePicURL string + Groups []string }{}) // View returns a read-only view of VIPService. diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index f5fa36b6da0fc..b43dcc7fd979e 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,6 +19,7 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + dst.UserProfile = *src.UserProfile.Clone() if src.AttestationKey != nil { dst.AttestationKey = src.AttestationKey.Clone() } diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index b18634917c651..f33d222c6fb8d 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -90,7 +90,7 @@ func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeK // needed to request key rotation func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) UserProfile() tailcfg.UserProfileView { return v.ж.UserProfile.View() } func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } From a4614d7d17506ca2ed9d007ec77189d9c00f7519 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Fri, 27 Feb 2026 14:31:43 -0800 Subject: [PATCH 1071/1093] appc,feature/conn25: conn25: send address assignments to connector After we intercept a DNS response and assign magic and transit addresses we must communicate the assignment to our connector so that it can direct traffic when it arrives. Use the recently added peerapi endpoint to send the addresses. Updates tailscale/corp#34258 Signed-off-by: Fran Bull --- appc/conn25.go | 50 +++++++-- appc/conn25_test.go | 156 +++++++++++++++++++++++++++ cmd/k8s-operator/depaware.txt | 2 +- cmd/tailscaled/depaware-min.txt | 2 +- cmd/tailscaled/depaware-minbox.txt | 2 +- cmd/tailscaled/depaware.txt | 2 +- cmd/tsidp/depaware.txt | 2 +- feature/conn25/conn25.go | 167 +++++++++++++++++++++++++---- feature/conn25/conn25_test.go | 138 +++++++++++++++++++++++- tsnet/depaware.txt | 2 +- 10 files changed, 486 insertions(+), 37 deletions(-) diff --git a/appc/conn25.go b/appc/conn25.go index 08b2a1ade6826..fd1748fa6cb81 100644 --- a/appc/conn25.go +++ b/appc/conn25.go @@ -7,6 +7,7 @@ import ( "cmp" "slices" + "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" "tailscale.com/types/appctype" "tailscale.com/util/mak" @@ -15,6 +16,43 @@ import ( const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental" +func isEligibleConnector(peer tailcfg.NodeView) bool { + if !peer.Valid() || !peer.Hostinfo().Valid() { + return false + } + isConn, _ := peer.Hostinfo().AppConnector().Get() + return isConn +} + +func sortByPreference(ns []tailcfg.NodeView) { + // The ordering of the nodes is semantic (callers use the first node they can + // get a peer api url for). We don't (currently 2026-02-27) have any + // preference over which node is chosen as long as it's consistent. In the + // future we anticipate integrating with traffic steering. + slices.SortFunc(ns, func(a, b tailcfg.NodeView) int { + return cmp.Compare(a.ID(), b.ID()) + }) +} + +// PickConnector returns peers the backend knows about that match the app, in order of preference to use as +// a connector. +func PickConnector(nb ipnext.NodeBackend, app appctype.Conn25Attr) []tailcfg.NodeView { + appTagsSet := set.SetOf(app.Connectors) + matches := nb.AppendMatchingPeers(nil, func(n tailcfg.NodeView) bool { + if !isEligibleConnector(n) { + return false + } + for _, t := range n.Tags().All() { + if appTagsSet.Contains(t) { + return true + } + } + return false + }) + sortByPreference(matches) + return matches +} + // PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers // want to be connectors for which domains. func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.NodeView, peers map[tailcfg.NodeID]tailcfg.NodeView) map[string][]tailcfg.NodeView { @@ -36,10 +74,7 @@ func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg. // use a Set of NodeIDs to deduplicate, and populate into a []NodeView later. var work map[string]set.Set[tailcfg.NodeID] for _, peer := range peers { - if !peer.Valid() || !peer.Hostinfo().Valid() { - continue - } - if isConn, _ := peer.Hostinfo().AppConnector().Get(); !isConn { + if !isEligibleConnector(peer) { continue } for _, t := range peer.Tags().All() { @@ -60,12 +95,7 @@ func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg. for id := range ids { nodes = append(nodes, peers[id]) } - // The ordering of the nodes in the map vals is semantic (dnsConfigForNetmap uses the first node it can - // get a peer api url for as its split dns target). We can think of it as a preference order, except that - // we don't (currently 2026-01-14) have any preference over which node is chosen. - slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int { - return cmp.Compare(a.ID(), b.ID()) - }) + sortByPreference(nodes) mak.Set(&m, domain, nodes) } return m diff --git a/appc/conn25_test.go b/appc/conn25_test.go index a9cb0fb7ebf9c..fc14caf36d5e9 100644 --- a/appc/conn25_test.go +++ b/appc/conn25_test.go @@ -8,6 +8,8 @@ import ( "reflect" "testing" + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" "tailscale.com/types/appctype" "tailscale.com/types/opt" @@ -131,3 +133,157 @@ func TestPickSplitDNSPeers(t *testing.T) { }) } } + +type testNodeBackend struct { + ipnext.NodeBackend + peers []tailcfg.NodeView +} + +func (nb *testNodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + for _, p := range nb.peers { + if pred(p) { + base = append(base, p) + } + } + return base +} + +func (nb *testNodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return true +} + +func TestPickConnector(t *testing.T) { + exampleApp := appctype.Conn25Attr{ + Name: "example", + Connectors: []string{"tag:example"}, + Domains: []string{"example.com"}, + } + + nvWithConnectorSet := func(id tailcfg.NodeID, isConnector bool, tags ...string) tailcfg.NodeView { + return (&tailcfg.Node{ + ID: id, + Tags: tags, + Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(isConnector)}).View(), + }).View() + } + + nv := func(id tailcfg.NodeID, tags ...string) tailcfg.NodeView { + return nvWithConnectorSet(id, true, tags...) + } + + for _, tt := range []struct { + name string + candidates []tailcfg.NodeView + app appctype.Conn25Attr + want []tailcfg.NodeView + }{ + { + name: "empty-everything", + candidates: []tailcfg.NodeView{}, + app: appctype.Conn25Attr{}, + want: nil, + }, + { + name: "empty-candidates", + candidates: []tailcfg.NodeView{}, + app: exampleApp, + want: nil, + }, + { + name: "empty-app", + candidates: []tailcfg.NodeView{nv(1, "tag:example")}, + app: appctype.Conn25Attr{}, + want: nil, + }, + { + name: "one-matches", + candidates: []tailcfg.NodeView{nv(1, "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(1, "tag:example")}, + }, + { + name: "invalid-candidate", + candidates: []tailcfg.NodeView{ + {}, + nv(1, "tag:example"), + }, + app: exampleApp, + want: []tailcfg.NodeView{ + nv(1, "tag:example"), + }, + }, + { + name: "no-host-info", + candidates: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 1, + Tags: []string{"tag:example"}, + }).View(), + nv(2, "tag:example"), + }, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:example")}, + }, + { + name: "not-a-connector", + candidates: []tailcfg.NodeView{nvWithConnectorSet(1, false, "tag:example.com"), nv(2, "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:example")}, + }, + { + name: "without-matches", + candidates: []tailcfg.NodeView{nv(1, "tag:woo"), nv(2, "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:example")}, + }, + { + name: "multi-tags", + candidates: []tailcfg.NodeView{nv(1, "tag:woo", "tag:hoo"), nv(2, "tag:woo", "tag:example")}, + app: exampleApp, + want: []tailcfg.NodeView{nv(2, "tag:woo", "tag:example")}, + }, + { + name: "multi-matches", + candidates: []tailcfg.NodeView{nv(1, "tag:woo", "tag:hoo"), nv(2, "tag:woo", "tag:example"), nv(3, "tag:example1", "tag:example")}, + app: appctype.Conn25Attr{ + Name: "example2", + Connectors: []string{"tag:example1", "tag:example"}, + Domains: []string{"example.com"}, + }, + want: []tailcfg.NodeView{nv(2, "tag:woo", "tag:example"), nv(3, "tag:example1", "tag:example")}, + }, + { + name: "bit-of-everything", + candidates: []tailcfg.NodeView{ + nv(3, "tag:woo", "tag:hoo"), + {}, + nv(2, "tag:woo", "tag:example"), + nvWithConnectorSet(4, false, "tag:example"), + nv(1, "tag:example1", "tag:example"), + nv(7, "tag:example1", "tag:example"), + nvWithConnectorSet(5, false), + nv(6), + nvWithConnectorSet(8, false, "tag:example"), + nvWithConnectorSet(9, false), + nvWithConnectorSet(10, false), + }, + app: appctype.Conn25Attr{ + Name: "example2", + Connectors: []string{"tag:example1", "tag:example", "tag:example2"}, + Domains: []string{"example.com"}, + }, + want: []tailcfg.NodeView{ + nv(1, "tag:example1", "tag:example"), + nv(2, "tag:woo", "tag:example"), + nv(7, "tag:example1", "tag:example"), + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got := PickConnector(&testNodeBackend{peers: tt.candidates}, tt.app) + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Fatalf("PickConnectors (-want, +got):\n%s", diff) + } + }) + } +} diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 356f1f6c438a2..436202216eef8 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -820,7 +820,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt index e485e3397fe11..c2c2f730023c8 100644 --- a/cmd/tailscaled/depaware-min.txt +++ b/cmd/tailscaled/depaware-min.txt @@ -70,7 +70,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt index 2696e17ec2713..c7f77a3c33bc0 100644 --- a/cmd/tailscaled/depaware-minbox.txt +++ b/cmd/tailscaled/depaware-minbox.txt @@ -85,7 +85,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3f3d343de07ed..e36c975e5790b 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -250,7 +250,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de gvisor.dev/gvisor/pkg/tcpip/transport/udp from gvisor.dev/gvisor/pkg/tcpip/adapters/gonet+ gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context+ tailscale.com from tailscale.com/version - tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/appc from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/atomicfile from tailscale.com/ipn+ LD tailscale.com/chirp from tailscale.com/cmd/tailscaled tailscale.com/client/local from tailscale.com/client/web+ diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index d16a96f932324..14239cfa20ff7 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -239,7 +239,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ diff --git a/feature/conn25/conn25.go b/feature/conn25/conn25.go index 64fa93394d307..b5d0dc9dfe155 100644 --- a/feature/conn25/conn25.go +++ b/feature/conn25/conn25.go @@ -8,8 +8,12 @@ package conn25 import ( + "bytes" + "context" "encoding/json" "errors" + "fmt" + "io" "net/http" "net/netip" "slices" @@ -17,6 +21,7 @@ import ( "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" + "tailscale.com/appc" "tailscale.com/feature" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnlocal" @@ -33,16 +38,30 @@ import ( // It is also the [extension] name and the log prefix. const featureName = "conn25" +const maxBodyBytes = 1024 * 1024 + +// jsonDecode decodes all of a io.ReadCloser (eg an http.Request Body) into one pointer with best practices. +// It limits the size of bytes it will read. +// It either decodes all of the bytes into the pointer, or errors (unlike json.Decoder.Decode). +// It closes the ReadCloser after reading. +func jsonDecode(target any, rc io.ReadCloser) error { + defer rc.Close() + respBs, err := io.ReadAll(io.LimitReader(rc, maxBodyBytes+1)) + if err != nil { + return err + } + err = json.Unmarshal(respBs, &target) + return err +} + func init() { feature.Register(featureName) - newExtension := func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { - e := &extension{ + ipnext.RegisterExtension(featureName, func(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ conn25: newConn25(logger.WithPrefix(logf, "conn25: ")), backend: sb, - } - return e, nil - } - ipnext.RegisterExtension(featureName, newExtension) + }, nil + }) ipnlocal.RegisterPeerAPIHandler("/v0/connector/transit-ip", handleConnectorTransitIP) } @@ -61,6 +80,9 @@ type extension struct { conn25 *Conn25 // safe for concurrent access and only set at creation backend ipnext.SafeBackend // safe for concurrent access and only set at creation + host ipnext.Host // set in Init, read-only after + ctxCancel context.CancelCauseFunc // cancels sendLoop goroutine + mu sync.Mutex // protects the fields below isDNSHookRegistered bool } @@ -72,17 +94,32 @@ func (e *extension) Name() string { // Init implements [ipnext.Extension]. func (e *extension) Init(host ipnext.Host) error { + //Init only once + e.mu.Lock() + defer e.mu.Unlock() + if e.ctxCancel != nil { + return nil + } + e.host = host host.Hooks().OnSelfChange.Add(e.onSelfChange) + ctx, cancel := context.WithCancelCause(context.Background()) + e.ctxCancel = cancel + go e.sendLoop(ctx) return nil } // Shutdown implements [ipnlocal.Extension]. func (e *extension) Shutdown() error { + if e.ctxCancel != nil { + e.ctxCancel(errors.New("extension shutdown")) + } + if e.conn25 != nil { + close(e.conn25.client.addrsCh) + } return nil } func (e *extension) handleConnectorTransitIP(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { - const maxBodyBytes = 1024 * 1024 defer r.Body.Close() if r.Method != "POST" { http.Error(w, "Method should be POST", http.StatusMethodNotAllowed) @@ -172,7 +209,10 @@ func (c *Conn25) isConfigured() bool { func newConn25(logf logger.Logf) *Conn25 { c := &Conn25{ - client: &client{logf: logf}, + client: &client{ + logf: logf, + addrsCh: make(chan addrs, 64), + }, connector: &connector{logf: logf}, } return c @@ -310,7 +350,8 @@ const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experime type config struct { isConfigured bool apps []appctype.Conn25Attr - appsByDomain map[dnsname.FQDN][]string + appsByName map[string]appctype.Conn25Attr + appNamesByDomain map[dnsname.FQDN][]string selfRoutedDomains set.Set[dnsname.FQDN] } @@ -326,7 +367,8 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) { cfg := config{ isConfigured: true, apps: apps, - appsByDomain: map[dnsname.FQDN][]string{}, + appsByName: map[string]appctype.Conn25Attr{}, + appNamesByDomain: map[dnsname.FQDN][]string{}, selfRoutedDomains: set.Set[dnsname.FQDN]{}, } for _, app := range apps { @@ -336,11 +378,12 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) { if err != nil { return config{}, err } - mak.Set(&cfg.appsByDomain, fqdn, append(cfg.appsByDomain[fqdn], app.Name)) + mak.Set(&cfg.appNamesByDomain, fqdn, append(cfg.appNamesByDomain[fqdn], app.Name)) if selfMatchesTags { cfg.selfRoutedDomains.Add(fqdn) } } + mak.Set(&cfg.appsByName, app.Name, app) } return cfg, nil } @@ -350,7 +393,8 @@ func configFromNodeView(n tailcfg.NodeView) (config, error) { // connectors. // It's safe for concurrent use. type client struct { - logf logger.Logf + logf logger.Logf + addrsCh chan addrs mu sync.Mutex // protects the fields below magicIPPool *ippool @@ -402,7 +446,7 @@ func (c *client) reconfig(newCfg config) error { func (c *client) isConnectorDomain(domain dnsname.FQDN) bool { c.mu.Lock() defer c.mu.Unlock() - appNames, ok := c.config.appsByDomain[domain] + appNames, ok := c.config.appNamesByDomain[domain] return ok && len(appNames) > 0 } @@ -416,7 +460,7 @@ func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, e if existing, ok := c.assignments.lookupByDomainDst(domain, dst); ok { return existing, nil } - appNames, _ := c.config.appsByDomain[domain] + appNames, _ := c.config.appNamesByDomain[domain] // only reserve for first app app := appNames[0] mip, err := c.magicIPPool.next() @@ -437,12 +481,100 @@ func (c *client) reserveAddresses(domain dnsname.FQDN, dst netip.Addr) (addrs, e if err := c.assignments.insert(as); err != nil { return addrs{}, err } + err = c.enqueueAddressAssignment(as) + if err != nil { + return addrs{}, err + } return as, nil } -func (c *client) enqueueAddressAssignment(addrs addrs) { - // TODO(fran) 2026-02-03 asynchronously send peerapi req to connector to - // allocate these addresses for us. +func (e *extension) sendLoop(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case as := <-e.conn25.client.addrsCh: + if err := e.sendAddressAssignment(ctx, as); err != nil { + e.conn25.client.logf("error sending transit IP assignment (app: %s, mip: %v, src: %v): %v", as.app, as.magic, as.dst, err) + } + } + } +} + +func (c *client) enqueueAddressAssignment(addrs addrs) error { + select { + // TODO(fran) investigate the value of waiting for multiple addresses and sending them + // in one ConnectorTransitIPRequest + case c.addrsCh <- addrs: + return nil + default: + c.logf("address assignment queue full, dropping transit assignment for %v", addrs.domain) + return errors.New("queue full") + } +} + +func makePeerAPIReq(ctx context.Context, httpClient *http.Client, urlBase string, as addrs) error { + url := urlBase + "/v0/connector/transit-ip" + + reqBody := ConnectorTransitIPRequest{ + TransitIPs: []TransitIPRequest{{ + TransitIP: as.transit, + DestinationIP: as.dst, + App: as.app, + }}, + } + bs, err := json.Marshal(reqBody) + if err != nil { + return fmt.Errorf("marshalling request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bs)) + if err != nil { + return fmt.Errorf("creating request: %w", err) + } + + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("sending request: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("connector returned HTTP %d", resp.StatusCode) + } + + var respBody ConnectorTransitIPResponse + err = jsonDecode(&respBody, resp.Body) + if err != nil { + return fmt.Errorf("decoding response: %w", err) + } + + if len(respBody.TransitIPs) > 0 && respBody.TransitIPs[0].Code != OK { + return fmt.Errorf("connector error: %s", respBody.TransitIPs[0].Message) + } + return nil +} + +func (e *extension) sendAddressAssignment(ctx context.Context, as addrs) error { + app, ok := e.conn25.client.config.appsByName[as.app] + if !ok { + e.conn25.client.logf("App not found for app: %s (domain: %s)", as.app, as.domain) + return errors.New("app not found") + } + + nb := e.host.NodeBackend() + peers := appc.PickConnector(nb, app) + var urlBase string + for _, p := range peers { + urlBase = nb.PeerAPIBase(p) + if urlBase != "" { + break + } + } + if urlBase == "" { + return errors.New("no connector peer found to handle address assignment") + } + client := e.backend.Sys().Dialer.Get().PeerAPIHTTPClient() + return makePeerAPIReq(ctx, client, urlBase, as) } func (c *client) mapDNSResponse(buf []byte) []byte { @@ -501,7 +633,6 @@ func (c *client) mapDNSResponse(buf []byte) []byte { c.logf("assigned connector addresses unexpectedly empty: %v", err) return buf } - c.enqueueAddressAssignment(addrs) default: if err := p.SkipAnswer(); err != nil { c.logf("error parsing dns response: %v", err) diff --git a/feature/conn25/conn25_test.go b/feature/conn25/conn25_test.go index 7ed5c13b28ea9..97a22c50017df 100644 --- a/feature/conn25/conn25_test.go +++ b/feature/conn25/conn25_test.go @@ -5,17 +5,24 @@ package conn25 import ( "encoding/json" + "net/http" + "net/http/httptest" "net/netip" "reflect" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" + "tailscale.com/ipn/ipnext" + "tailscale.com/net/tsdial" "tailscale.com/tailcfg" + "tailscale.com/tsd" "tailscale.com/types/appctype" "tailscale.com/types/logger" + "tailscale.com/types/opt" "tailscale.com/util/dnsname" "tailscale.com/util/must" "tailscale.com/util/set" @@ -214,7 +221,7 @@ func TestReserveIPs(t *testing.T) { c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) mbd := map[dnsname.FQDN][]string{} mbd["example.com."] = []string{"a"} - c.client.config.appsByDomain = mbd + c.client.config.appNamesByDomain = mbd dst := netip.MustParseAddr("0.0.0.1") addrs, err := c.client.reserveAddresses("example.com.", dst) @@ -340,7 +347,7 @@ func TestConfigReconfig(t *testing.T) { if (err != nil) != tt.wantErr { t.Fatalf("wantErr: %t, err: %v", tt.wantErr, err) } - if diff := cmp.Diff(tt.wantAppsByDomain, c.appsByDomain); diff != "" { + if diff := cmp.Diff(tt.wantAppsByDomain, c.appNamesByDomain); diff != "" { t.Errorf("appsByDomain diff (-want, +got):\n%s", diff) } if diff := cmp.Diff(tt.wantSelfRoutedDomains, c.selfRoutedDomains); diff != "" { @@ -499,7 +506,7 @@ func TestReserveAddressesDeduplicated(t *testing.T) { c := newConn25(logger.Discard) c.client.magicIPPool = newIPPool(mustIPSetFromPrefix("100.64.0.0/24")) c.client.transitIPPool = newIPPool(mustIPSetFromPrefix("169.254.0.0/24")) - c.client.config.appsByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}} + c.client.config.appNamesByDomain = map[dnsname.FQDN][]string{"example.com.": {"a"}} dst := netip.MustParseAddr("0.0.0.1") first, err := c.client.reserveAddresses("example.com.", dst) @@ -522,3 +529,128 @@ func TestReserveAddressesDeduplicated(t *testing.T) { t.Errorf("want 1 entry in byDomainDst, got %d", got) } } + +type testNodeBackend struct { + ipnext.NodeBackend + peers []tailcfg.NodeView + peerAPIURL string // should be per peer but there's only one peer in our test so this is ok for now +} + +func (nb *testNodeBackend) AppendMatchingPeers(base []tailcfg.NodeView, pred func(tailcfg.NodeView) bool) []tailcfg.NodeView { + for _, p := range nb.peers { + if pred(p) { + base = append(base, p) + } + } + return base +} + +func (nb *testNodeBackend) PeerHasPeerAPI(p tailcfg.NodeView) bool { + return true +} + +func (nb *testNodeBackend) PeerAPIBase(p tailcfg.NodeView) string { + return nb.peerAPIURL +} + +type testHost struct { + ipnext.Host + nb ipnext.NodeBackend + hooks ipnext.Hooks +} + +func (h *testHost) NodeBackend() ipnext.NodeBackend { return h.nb } +func (h *testHost) Hooks() *ipnext.Hooks { return &h.hooks } + +type testSafeBackend struct { + ipnext.SafeBackend + sys *tsd.System +} + +func (b *testSafeBackend) Sys() *tsd.System { return b.sys } + +// TestEnqueueAddress tests that after enqueueAddress has been called a +// peerapi request is made to a peer. +func TestEnqueueAddress(t *testing.T) { + // make a fake peer to test against + received := make(chan ConnectorTransitIPRequest, 1) + peersAPI := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v0/connector/transit-ip" { + http.Error(w, "unexpected path", http.StatusNotFound) + return + } + var req ConnectorTransitIPRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "bad body", http.StatusBadRequest) + return + } + received <- req + resp := ConnectorTransitIPResponse{ + TransitIPs: []TransitIPResponse{{Code: OK}}, + } + json.NewEncoder(w).Encode(resp) + })) + defer peersAPI.Close() + + connectorPeer := (&tailcfg.Node{ + ID: tailcfg.NodeID(1), + Tags: []string{"tag:woo"}, + Hostinfo: (&tailcfg.Hostinfo{AppConnector: opt.NewBool(true)}).View(), + }).View() + + // make extension to test + sys := &tsd.System{} + sys.Dialer.Set(&tsdial.Dialer{Logf: logger.Discard}) + + ext := &extension{ + conn25: newConn25(logger.Discard), + backend: &testSafeBackend{sys: sys}, + } + if err := ext.Init(&testHost{ + nb: &testNodeBackend{ + peers: []tailcfg.NodeView{connectorPeer}, + peerAPIURL: peersAPI.URL, + }, + }); err != nil { + t.Fatal(err) + } + defer ext.Shutdown() + + sn := makeSelfNode(t, appctype.Conn25Attr{ + Name: "app1", + Connectors: []string{"tag:woo"}, + Domains: []string{"example.com"}, + }, []string{}) + err := ext.conn25.reconfig(sn) + if err != nil { + t.Fatal(err) + } + + as := addrs{ + dst: netip.MustParseAddr("1.2.3.4"), + magic: netip.MustParseAddr("100.64.0.0"), + transit: netip.MustParseAddr("169.254.0.1"), + domain: "example.com.", + app: "app1", + } + ext.conn25.client.enqueueAddressAssignment(as) + + select { + case got := <-received: + if len(got.TransitIPs) != 1 { + t.Fatalf("want 1 TransitIP in request, got %d", len(got.TransitIPs)) + } + tip := got.TransitIPs[0] + if tip.TransitIP != as.transit { + t.Errorf("TransitIP: got %v, want %v", tip.TransitIP, as.transit) + } + if tip.DestinationIP != as.dst { + t.Errorf("DestinationIP: got %v, want %v", tip.DestinationIP, as.dst) + } + if tip.App != as.app { + t.Errorf("App: got %q, want %q", tip.App, as.app) + } + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for connector to receive request") + } +} diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 9da63feb4bb0c..2df729c21d3f0 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -235,7 +235,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ - tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnlocal/netmapcache from tailscale.com/ipn/ipnlocal tailscale.com/ipn/ipnstate from tailscale.com/client/local+ From 633e892164596bfc69bf064e00a139538c3e2b91 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 9 Mar 2026 15:47:33 +0000 Subject: [PATCH 1072/1093] ssh/tailssh: fix race between termination message write and session teardown When a recording upload fails mid-session, killProcessOnContextDone writes the termination message to ss.Stderr() and kills the process. Meanwhile, run() takes the ss.ctx.Done() path and proceeds to ss.Exit(), which tears down the SSH channel. The termination message write races with the channel teardown, so the client sometimes never receives it. Fix by adding an exitHandled channel that killProcessOnContextDone closes when done. run() now waits on this channel after ctx.Done() fires, ensuring the termination message is fully written before the SSH channel is torn down. Fixes #7707 Change-Id: Ib60116c928d3af46d553a4186a72963c2c731e3e Signed-off-by: Brad Fitzpatrick --- ssh/tailssh/tailssh.go | 12 ++++++++++++ ssh/tailssh/tailssh_test.go | 6 ------ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index debad2b5cf195..96f9c826c3a70 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -712,6 +712,12 @@ type sshSession struct { // We use this sync.Once to ensure that we only terminate the process once, // either it exits itself or is terminated exitOnce sync.Once + + // exitHandled is closed when killProcessOnContextDone finishes writing any + // termination message to the client. run() waits on this before calling + // ss.Exit to ensure the message is flushed before the SSH channel is torn + // down. It is initialized by run() before starting killProcessOnContextDone. + exitHandled chan struct{} } func (ss *sshSession) vlogf(format string, args ...any) { @@ -807,6 +813,7 @@ func (c *conn) fetchSSHAction(ctx context.Context, url string) (*tailcfg.SSHActi // killProcessOnContextDone waits for ss.ctx to be done and kills the process, // unless the process has already exited. func (ss *sshSession) killProcessOnContextDone() { + defer close(ss.exitHandled) <-ss.ctx.Done() // Either the process has already exited, in which case this does nothing. // Or, the process is still running in which case this will kill it. @@ -987,6 +994,7 @@ func (ss *sshSession) run() { ss.Exit(1) return } + ss.exitHandled = make(chan struct{}) go ss.killProcessOnContextDone() var processDone atomic.Bool @@ -1049,6 +1057,10 @@ func (ss *sshSession) run() { select { case <-outputDone: case <-ss.ctx.Done(): + // Wait for killProcessOnContextDone to finish writing any + // termination message to the client before we call ss.Exit, + // which tears down the SSH channel. + <-ss.exitHandled } if err == nil { diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 4d6f2172d90f4..ec577461660f8 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -37,7 +37,6 @@ import ( gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" - "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -490,14 +489,9 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { } func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7707") - if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } - if runtime.GOOS == "darwin" && cibuild.On() { - t.Skipf("this fails on CI on macOS; see https://github.com/tailscale/tailscale/issues/7707") - } var handler http.HandlerFunc recordingServer := mockRecordingServer(t, func(w http.ResponseWriter, r *http.Request) { From 8d3efd488dd512afa95af0927e45b3c608e7ae31 Mon Sep 17 00:00:00 2001 From: Nick Khyl Date: Mon, 9 Mar 2026 16:23:43 -0500 Subject: [PATCH 1073/1093] go.mod: bump for internal/poll: move rsan to heap on windows This picks up the change in tailscale/go@5cce30e20c1fc6d8463b0a99acdd9777c4ad124b Updates #18884 Updates tailscale/go#158 Updates golang/go#77975 Signed-off-by: Nick Khyl --- go.toolchain.rev | 2 +- go.toolchain.rev.sri | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index 753deba47a297..0b07150d516a0 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -0f1a3326f30508521e7b8322f4e0f084560c1404 +5cce30e20c1fc6d8463b0a99acdd9777c4ad124b diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri index d6105252b02d0..39306739de25f 100644 --- a/go.toolchain.rev.sri +++ b/go.toolchain.rev.sri @@ -1 +1 @@ -sha256-zyo1dIQnrwq8TVxwKCjJ3PfiShjAXO4wMQb/F7ze/mU= +sha256-nYXUQfKPoHgKCvK5BCh0BKOgPh6n90XX+iUNETLETBA= From 0023f1a969dfa91ddbe573432a2b790e4b9bdf17 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 9 Mar 2026 22:02:46 +0000 Subject: [PATCH 1074/1093] .github/workflows: use tailscale/go for Windows CI too We did so for Linux and macOS already, so also do so for Windows. We only didn't already because originally we never produced binaries for it (due to our corp repo not needing them), and later because we had no ./tool/go wrapper. But we have both of those things now. Updates #18884 Signed-off-by: Brad Fitzpatrick --- .github/workflows/test.yml | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 064765ca2a2af..4f6068e6e33cd 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -244,12 +244,6 @@ jobs: with: path: ${{ github.workspace }}/src - - name: Install Go - uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 - with: - go-version-file: ${{ github.workspace }}/src/go.mod - cache: false - - name: Restore Go module cache uses: actions/cache/restore@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3 with: @@ -269,7 +263,9 @@ jobs: - name: test if: matrix.key != 'win-bench' # skip on bench builder working-directory: src - run: go run ./cmd/testwrapper sharded:${{ matrix.shard }} + run: ./tool/go run ./cmd/testwrapper sharded:${{ matrix.shard }} + env: + NOPWSHDEBUG: "true" # to quiet tool/gocross/gocross-wrapper.ps1 in CI - name: bench all if: matrix.key == 'win-bench' @@ -277,7 +273,9 @@ jobs: # Don't use -bench=. -benchtime=1x. # Somewhere in the layers (powershell?) # the equals signs cause great confusion. - run: go test ./... -bench . -benchtime 1x -run "^$" + run: ./tool/go test ./... -bench . -benchtime 1x -run "^$" + env: + NOPWSHDEBUG: "true" # to quiet tool/gocross/gocross-wrapper.ps1 in CI - name: Print stats shell: pwsh @@ -287,19 +285,6 @@ jobs: run: | Invoke-Expression "$env:GOCACHEPROG --stats" | jq . - win-tool-go: - runs-on: windows-latest - needs: gomod-cache - name: Windows (win-tool-go) - steps: - - name: checkout - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - with: - path: src - - name: test-tool-go - working-directory: src - run: ./tool/go version - macos: runs-on: macos-latest needs: gomod-cache From 9522619031287c6dcda68cda11dbcf2baf326ed3 Mon Sep 17 00:00:00 2001 From: David Bond Date: Tue, 10 Mar 2026 10:33:55 +0000 Subject: [PATCH 1075/1093] cmd/k8s-operator: use correct tailnet client for L7 & L3 ingresses (#18749) * cmd/k8s-operator: use correct tailnet client for L7 & L3 ingresses This commit fixes a bug when using multi-tailnet within the operator to spin up L7 & L3 ingresses where the client used to create the tailscale services was not switching depending on the tailnet used by the proxygroup backing the service/ingress. Updates: https://github.com/tailscale/corp/issues/34561 Signed-off-by: David Bond * cmd/k8s-operator: adding server url to proxygroups when a custom tailnet has been specified Signed-off-by: chaosinthecrd (cherry picked from commit 3b21ac5504e713e32dfcd43d9ee21e7e712ac200) --------- Signed-off-by: David Bond Signed-off-by: chaosinthecrd Co-authored-by: chaosinthecrd --- cmd/k8s-operator/api-server-proxy-pg.go | 57 ++++--- cmd/k8s-operator/api-server-proxy-pg_test.go | 47 +++--- cmd/k8s-operator/ingress-for-pg.go | 154 ++++++++----------- cmd/k8s-operator/ingress-for-pg_test.go | 105 ++++++++----- cmd/k8s-operator/operator.go | 3 - cmd/k8s-operator/proxygroup.go | 73 +++++---- cmd/k8s-operator/sts.go | 48 ++++-- cmd/k8s-operator/svc-for-pg.go | 144 +++++++++-------- cmd/k8s-operator/svc-for-pg_test.go | 14 +- cmd/k8s-operator/tailnet.go | 23 ++- cmd/k8s-operator/testutils_test.go | 17 +- cmd/k8s-operator/tsrecorder.go | 40 +++-- 12 files changed, 403 insertions(+), 322 deletions(-) diff --git a/cmd/k8s-operator/api-server-proxy-pg.go b/cmd/k8s-operator/api-server-proxy-pg.go index ff04d553a7da3..0900fd0aaa264 100644 --- a/cmd/k8s-operator/api-server-proxy-pg.go +++ b/cmd/k8s-operator/api-server-proxy-pg.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" @@ -52,7 +53,6 @@ type KubeAPIServerTSServiceReconciler struct { logger *zap.SugaredLogger tsClient tsClient tsNamespace string - lc localClient defaultTags []string operatorID string // stableID of the operator's Tailscale device @@ -78,9 +78,14 @@ func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req re serviceName := serviceNameForAPIServerProxy(pg) logger = logger.With("Tailscale Service", serviceName) + tailscaleClient, err := r.getClient(ctx, pg.Spec.Tailnet) + if err != nil { + return res, fmt.Errorf("failed to get tailscale client: %w", err) + } + if markedForDeletion(pg) { logger.Debugf("ProxyGroup is being deleted, ensuring any created resources are cleaned up") - if err = r.maybeCleanup(ctx, serviceName, pg, logger); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { + if err = r.maybeCleanup(ctx, serviceName, pg, logger, tailscaleClient); err != nil && strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return res, nil } @@ -88,7 +93,7 @@ func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req re return res, err } - err = r.maybeProvision(ctx, serviceName, pg, logger) + err = r.maybeProvision(ctx, serviceName, pg, logger, tailscaleClient) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) @@ -100,11 +105,27 @@ func (r *KubeAPIServerTSServiceReconciler) Reconcile(ctx context.Context, req re return reconcile.Result{}, nil } +// getClient returns the appropriate Tailscale client for the given tailnet. +// If no tailnet is specified, returns the default client. +func (r *KubeAPIServerTSServiceReconciler) getClient(ctx context.Context, tailnetName string) (tsClient, + error) { + if tailnetName == "" { + return r.tsClient, nil + } + + tc, _, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) + if err != nil { + return nil, err + } + + return tc, nil +} + // maybeProvision ensures that a Tailscale Service for this ProxyGroup exists // and is up to date. // // Returns true if the operation resulted in a Tailscale Service update. -func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) { +func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (err error) { var dnsName string oldPGStatus := pg.Status.DeepCopy() defer func() { @@ -156,7 +177,7 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s // 1. Check there isn't a Tailscale Service with the same hostname // already created and not owned by this ProxyGroup. - existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) if err != nil && !isErrorTailscaleServiceNotFound(err) { return fmt.Errorf("error getting Tailscale Service %q: %w", serviceName, err) } @@ -198,17 +219,17 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s !ownersAreSetAndEqual(tsSvc, existingTSSvc) || !slices.Equal(tsSvc.Ports, existingTSSvc.Ports) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err = tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { return fmt.Errorf("error creating Tailscale Service: %w", err) } } // 3. Ensure that TLS Secret and RBAC exists. - tcd, err := tailnetCertDomain(ctx, r.lc) + dnsName, err = dnsNameForService(ctx, r.Client, serviceName, pg, r.tsNamespace) if err != nil { - return fmt.Errorf("error determining DNS name base: %w", err) + return fmt.Errorf("error determining service DNS name: %w", err) } - dnsName = serviceName.WithoutPrefix() + "." + tcd + if err = r.ensureCertResources(ctx, pg, dnsName); err != nil { return fmt.Errorf("error ensuring cert resources: %w", err) } @@ -219,7 +240,7 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s } // 5. Clean up any stale Tailscale Services from previous resource versions. - if err = r.maybeDeleteStaleServices(ctx, pg, logger); err != nil { + if err = r.maybeDeleteStaleServices(ctx, pg, logger, tsClient); err != nil { return fmt.Errorf("failed to delete stale Tailscale Services: %w", err) } @@ -230,7 +251,7 @@ func (r *KubeAPIServerTSServiceReconciler) maybeProvision(ctx context.Context, s // Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does, the cleanup only removes the owner reference // corresponding to this Service. -func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (err error) { +func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (err error) { ix := slices.Index(pg.Finalizers, proxyPGFinalizerName) if ix < 0 { logger.Debugf("no finalizer, nothing to do") @@ -244,11 +265,11 @@ func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, ser } }() - if _, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger); err != nil { + if _, err = cleanupTailscaleService(ctx, tsClient, serviceName, r.operatorID, logger); err != nil { return fmt.Errorf("error deleting Tailscale Service: %w", err) } - if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, serviceName); err != nil { + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, serviceName, pg); err != nil { return fmt.Errorf("failed to clean up cert resources: %w", err) } @@ -257,10 +278,10 @@ func (r *KubeAPIServerTSServiceReconciler) maybeCleanup(ctx context.Context, ser // maybeDeleteStaleServices deletes Services that have previously been created for // this ProxyGroup but are no longer needed. -func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) error { +func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context.Context, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) error { serviceName := serviceNameForAPIServerProxy(pg) - svcs, err := r.tsClient.ListVIPServices(ctx) + svcs, err := tsClient.ListVIPServices(ctx) if err != nil { return fmt.Errorf("error listing Tailscale Services: %w", err) } @@ -285,11 +306,11 @@ func (r *KubeAPIServerTSServiceReconciler) maybeDeleteStaleServices(ctx context. } logger.Infof("Deleting Tailscale Service %s", svc.Name) - if err := r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + if err = tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { return fmt.Errorf("error deleting Tailscale Service %s: %w", svc.Name, err) } - if err = cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg.Name, svc.Name); err != nil { + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, svc.Name, pg); err != nil { return fmt.Errorf("failed to clean up cert resources: %w", err) } } @@ -343,7 +364,7 @@ func (r *KubeAPIServerTSServiceReconciler) maybeAdvertiseServices(ctx context.Co // Only advertise a Tailscale Service once the TLS certs required for // serving it are available. - shouldBeAdvertised, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName) + shouldBeAdvertised, err := hasCerts(ctx, r.Client, r.tsNamespace, serviceName, pg) if err != nil { return fmt.Errorf("error checking TLS credentials provisioned for Tailscale Service %q: %w", serviceName, err) } diff --git a/cmd/k8s-operator/api-server-proxy-pg_test.go b/cmd/k8s-operator/api-server-proxy-pg_test.go index 8fb18c818edb1..52dda93e515ee 100644 --- a/cmd/k8s-operator/api-server-proxy-pg_test.go +++ b/cmd/k8s-operator/api-server-proxy-pg_test.go @@ -16,8 +16,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" - "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/k8s-proxy/conf" @@ -107,14 +107,6 @@ func TestAPIServerProxyReconciler(t *testing.T) { } ft.CreateOrUpdateVIPService(t.Context(), ingressTSSvc) - lc := &fakeLocalClient{ - status: &ipnstate.Status{ - CurrentTailnet: &ipnstate.TailnetStatus{ - MagicDNSSuffix: "ts.net", - }, - }, - } - r := &KubeAPIServerTSServiceReconciler{ Client: fc, tsClient: ft, @@ -122,7 +114,6 @@ func TestAPIServerProxyReconciler(t *testing.T) { tsNamespace: ns, logger: zap.Must(zap.NewDevelopment()).Sugar(), recorder: record.NewFakeRecorder(10), - lc: lc, clock: tstest.NewClock(tstest.ClockOpts{}), operatorID: "self-id", } @@ -147,6 +138,20 @@ func TestAPIServerProxyReconciler(t *testing.T) { if err := ft.DeleteVIPService(t.Context(), "svc:"+pgName); err != nil { t.Fatalf("deleting initial Tailscale Service: %v", err) } + + // Create the state secret for the ProxyGroup without services being advertised. + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: ns, + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + "_current-profile": []byte("test"), + "test": []byte(`{"Config":{"NodeID":"node-foo", "UserProfile": {"LoginName": "test-pg.ts.net" }}}`), + }, + }) + expectReconciled(t, r, "", pgName) tsSvc, err := ft.GetVIPService(t.Context(), "svc:"+pgName) @@ -190,17 +195,19 @@ func TestAPIServerProxyReconciler(t *testing.T) { expectEqual(t, fc, pg, omitPGStatusConditionMessages) // Unchanged status. // Simulate Pod prefs updated with advertised services; should see Configured condition updated to true. - mustCreate(t, fc, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-0", - Namespace: ns, - Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), - }, - Data: map[string][]byte{ - "_current-profile": []byte("profile-foo"), - "profile-foo": []byte(`{"AdvertiseServices":["svc:test-pg"],"Config":{"NodeID":"node-foo"}}`), - }, + mustUpdate(t, fc, ns, "test-pg-0", func(o *corev1.Secret) { + var p prefs + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:test-pg"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } }) + expectReconciled(t, r, "", pgName) tsoperator.SetProxyGroupCondition(pg, tsapi.KubeAPIServerProxyConfigured, metav1.ConditionTrue, reasonKubeAPIServerProxyConfigured, "", 1, r.clock, r.logger) pg.Status.URL = "https://" + defaultDomain diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 4b140a8aedd72..28a836e975273 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -33,7 +33,6 @@ import ( "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -45,22 +44,15 @@ import ( ) const ( - serveConfigKey = "serve-config.json" - TailscaleSvcOwnerRef = "tailscale.com/k8s-operator:owned-by:%s" + serveConfigKey = "serve-config.json" // FinalizerNamePG is the finalizer used by the IngressPGReconciler - FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" - + FinalizerNamePG = "tailscale.com/ingress-pg-finalizer" indexIngressProxyGroup = ".metadata.annotations.ingress-proxy-group" // annotationHTTPEndpoint can be used to configure the Ingress to expose an HTTP endpoint to tailnet (as // well as the default HTTPS endpoint). - annotationHTTPEndpoint = "tailscale.com/http-endpoint" - - labelDomain = "tailscale.com/domain" - msgFeatureFlagNotEnabled = "Tailscale Service feature flag is not enabled for this tailnet, skipping provisioning. " + - "Please contact Tailscale support through https://tailscale.com/contact/support to enable the feature flag, then recreate the operator's Pod." - - warningTailscaleServiceFeatureFlagNotEnabled = "TailscaleServiceFeatureFlagNotEnabled" - managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" + annotationHTTPEndpoint = "tailscale.com/http-endpoint" + labelDomain = "tailscale.com/domain" + managedTSServiceComment = "This Tailscale Service is managed by the Tailscale Kubernetes Operator, do not modify" ) var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) @@ -75,7 +67,6 @@ type HAIngressReconciler struct { tsClient tsClient tsnetServer tsnetServer tsNamespace string - lc localClient defaultTags []string operatorID string // stableID of the operator's Tailscale device ingressClassName string @@ -109,11 +100,12 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque ing := new(networkingv1.Ingress) err = r.Get(ctx, req.NamespacedName, ing) - if apierrors.IsNotFound(err) { + switch { + case apierrors.IsNotFound(err): // Request object not found, could have been deleted after reconcile request. logger.Debugf("Ingress not found, assuming it was deleted") return res, nil - } else if err != nil { + case err != nil: return res, fmt.Errorf("failed to get Ingress: %w", err) } @@ -123,6 +115,23 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) + pgName := ing.Annotations[AnnotationProxyGroup] + pg := &tsapi.ProxyGroup{} + + err = r.Get(ctx, client.ObjectKey{Name: pgName}, pg) + switch { + case apierrors.IsNotFound(err): + logger.Infof("ProxyGroup %q does not exist, it may have been deleted. Reconciliation for ingress %q will be skipped until the ProxyGroup is found", pgName, ing.Name) + return res, nil + case err != nil: + return res, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + } + + tailscaleClient, err := clientFromProxyGroup(ctx, r.Client, pg, r.tsNamespace, r.tsClient) + if err != nil { + return res, fmt.Errorf("failed to get tailscale client: %w", err) + } + // needsRequeue is set to true if the underlying Tailscale Service has // changed as a result of this reconcile. If that is the case, we // reconcile the Ingress one more time to ensure that concurrent updates @@ -130,9 +139,9 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque // resulted in another actor overwriting our Tailscale Service update. needsRequeue := false if !ing.DeletionTimestamp.IsZero() || !r.shouldExpose(ing) { - needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger) + needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger, tailscaleClient, pg) } else { - needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger) + needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger, tailscaleClient, pg) } if err != nil { return res, err @@ -151,16 +160,16 @@ func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Reque // If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. // Returns true if the operation resulted in a Tailscale Service update. -func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { // Currently (2025-05) Tailscale Services are behind an alpha feature flag that // needs to be explicitly enabled for a tailnet to be able to use them. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } - if err := validateIngressClass(ctx, r.Client, r.ingressClassName); err != nil { + if err = validateIngressClass(ctx, r.Client, r.ingressClassName); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) return false, nil } @@ -172,14 +181,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin } logger = logger.With("ProxyGroup", pgName) - pg := &tsapi.ProxyGroup{} - if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { - if apierrors.IsNotFound(err) { - logger.Infof("ProxyGroup does not exist") - return false, nil - } - return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) - } if !tsoperator.ProxyGroupAvailable(pg) { logger.Infof("ProxyGroup is not (yet) ready") return false, nil @@ -220,7 +221,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // that in edge cases (a single update changed both hostname and removed // ProxyGroup annotation) the Tailscale Service is more likely to be // (eventually) removed. - svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, logger, tsClient, pg) if err != nil { return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err) } @@ -245,12 +246,12 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, nil } // 3. Ensure that TLS Secret and RBAC exists - tcd, err := tailnetCertDomain(ctx, r.lc) + dnsName, err := dnsNameForService(ctx, r.Client, serviceName, pg, r.tsNamespace) if err != nil { - return false, fmt.Errorf("error determining DNS name base: %w", err) + return false, fmt.Errorf("error determining DNS name for service: %w", err) } - dnsName := hostname + "." + tcd - if err := r.ensureCertResources(ctx, pg, dnsName, ing); err != nil { + + if err = r.ensureCertResources(ctx, pg, dnsName, ing); err != nil { return false, fmt.Errorf("error ensuring cert resources: %w", err) } @@ -358,7 +359,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(tsSvc.Ports, existingTSSvc.Ports) || !ownersAreSetAndEqual(tsSvc, existingTSSvc) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err := tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { return false, fmt.Errorf("error creating Tailscale Service: %w", err) } } @@ -369,7 +370,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin if isHTTPEndpointEnabled(ing) || isHTTPRedirectEnabled(ing) { mode = serviceAdvertisementHTTPAndHTTPS } - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, mode, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, mode, pg); err != nil { return false, fmt.Errorf("failed to update tailscaled config: %w", err) } @@ -386,7 +387,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin ing.Status.LoadBalancer.Ingress = nil default: var ports []networkingv1.IngressPortStatus - hasCerts, err := hasCerts(ctx, r.Client, r.lc, r.tsNamespace, serviceName) + hasCerts, err := hasCerts(ctx, r.Client, r.tsNamespace, serviceName, pg) if err != nil { return false, fmt.Errorf("error checking TLS credentials provisioned for Ingress: %w", err) } @@ -426,9 +427,10 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin logger.Infof("%s. %d Pod(s) advertising Tailscale Service", prefix, count) } - if err := r.Status().Update(ctx, ing); err != nil { + if err = r.Status().Update(ctx, ing); err != nil { return false, fmt.Errorf("failed to update Ingress status: %w", err) } + return svcsChanged, nil } @@ -438,9 +440,9 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin // operator instances, else the owner reference is cleaned up. Returns true if // the operation resulted in an existing Tailscale Service updates (owner // reference removal). -func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcsChanged bool, err error) { // Get serve config for the ProxyGroup - cm, cfg, err := r.proxyGroupServeConfig(ctx, proxyGroupName) + cm, cfg, err := r.proxyGroupServeConfig(ctx, pg.Name) if err != nil { return false, fmt.Errorf("getting serve config: %w", err) } @@ -468,7 +470,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("Tailscale Service %q is not owned by any Ingress, cleaning up", tsSvcName) - tsService, err := r.tsClient.GetVIPService(ctx, tsSvcName) + tsService, err := tsClient.GetVIPService(ctx, tsSvcName) if isErrorTailscaleServiceNotFound(err) { return false, nil } @@ -477,22 +479,24 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } // Delete the Tailscale Service from control if necessary. - svcsChanged, err = r.cleanupTailscaleService(ctx, tsService, logger) + svcsChanged, err = r.cleanupTailscaleService(ctx, tsService, logger, tsClient) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } // Make sure the Tailscale Service is not advertised in tailscaled or serve config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, tsSvcName, serviceAdvertisementOff, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, tsSvcName, serviceAdvertisementOff, pg); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } + _, ok := cfg.Services[tsSvcName] if ok { logger.Infof("Removing Tailscale Service %q from serve config", tsSvcName) delete(cfg.Services, tsSvcName) serveConfigChanged = true } - if err := cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, proxyGroupName, tsSvcName); err != nil { + + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, tsSvcName, pg); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } } @@ -515,7 +519,7 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG // Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Ingress. -func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcChanged bool, err error) { +func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger, tsClient tsClient, pg *tsapi.ProxyGroup) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Ingress are cleaned up") ix := slices.Index(ing.Finalizers, FinalizerNamePG) if ix < 0 { @@ -524,7 +528,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } logger.Infof("Ensuring that Tailscale Service %q configuration is cleaned up", hostname) serviceName := tailcfg.ServiceName("svc:" + hostname) - svc, err := r.tsClient.GetVIPService(ctx, serviceName) + svc, err := tsClient.GetVIPService(ctx, serviceName) if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service: %w", err) } @@ -538,8 +542,7 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, }() // 1. Check if there is a Tailscale Service associated with this Ingress. - pg := ing.Annotations[AnnotationProxyGroup] - cm, cfg, err := r.proxyGroupServeConfig(ctx, pg) + cm, cfg, err := r.proxyGroupServeConfig(ctx, pg.Name) if err != nil { return false, fmt.Errorf("error getting ProxyGroup serve config: %w", err) } @@ -553,13 +556,13 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 2. Clean up the Tailscale Service resources. - svcChanged, err = r.cleanupTailscaleService(ctx, svc, logger) + svcChanged, err = r.cleanupTailscaleService(ctx, svc, logger, tsClient) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } // 3. Clean up any cluster resources - if err := cleanupCertResources(ctx, r.Client, r.lc, r.tsNamespace, pg, serviceName); err != nil { + if err = cleanupCertResources(ctx, r.Client, r.tsNamespace, serviceName, pg); err != nil { return false, fmt.Errorf("failed to clean up cert resources: %w", err) } @@ -568,12 +571,12 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, } // 4. Unadvertise the Tailscale Service in tailscaled config. - if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, serviceAdvertisementOff, logger); err != nil { + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, serviceName, serviceAdvertisementOff, pg); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } // 5. Remove the Tailscale Service from the serve config for the ProxyGroup. - logger.Infof("Removing TailscaleService %q from serve config for ProxyGroup %q", hostname, pg) + logger.Infof("Removing TailscaleService %q from serve config for ProxyGroup %q", hostname, pg.Name) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) if err != nil { @@ -631,19 +634,6 @@ func (r *HAIngressReconciler) proxyGroupServeConfig(ctx context.Context, pg stri return cm, cfg, nil } -type localClient interface { - StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) -} - -// tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func tailnetCertDomain(ctx context.Context, lc localClient) (string, error) { - st, err := lc.StatusWithoutPeers(ctx) - if err != nil { - return "", fmt.Errorf("error getting tailscale status: %w", err) - } - return st.CurrentTailnet.MagicDNSSuffix, nil -} - // shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup). func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && @@ -708,7 +698,7 @@ func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networki // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. // It returns true if an existing Tailscale Service was updated to remove owner reference, as well as any error that occurred. -func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger) (updated bool, _ error) { +func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc *tailscale.VIPService, logger *zap.SugaredLogger, tsClient tsClient) (updated bool, _ error) { if svc == nil { return false, nil } @@ -731,7 +721,7 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc * } if len(o.OwnerRefs) == 1 { logger.Infof("Deleting Tailscale Service %q", svc.Name) - if err = r.tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { + if err = tsClient.DeleteVIPService(ctx, svc.Name); err != nil && !isErrorTailscaleServiceNotFound(err) { return false, err } @@ -745,7 +735,7 @@ func (r *HAIngressReconciler) cleanupTailscaleService(ctx context.Context, svc * return false, fmt.Errorf("error marshalling updated Tailscale Service owner reference: %w", err) } svc.Annotations[ownerAnnotation] = string(json) - return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) + return true, tsClient.CreateOrUpdateVIPService(ctx, svc) } // isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet. @@ -765,10 +755,10 @@ const ( serviceAdvertisementHTTPAndHTTPS // Both ports 80 and 443 should be advertised ) -func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, logger *zap.SugaredLogger) (err error) { +func (r *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, serviceName tailcfg.ServiceName, mode serviceAdvertisementMode, pg *tsapi.ProxyGroup) (err error) { // Get all config Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } @@ -780,7 +770,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con // The only exception is Ingresses with an HTTP endpoint enabled - if an // Ingress has an HTTP endpoint enabled, it will be advertised even if the // TLS cert is not yet provisioned. - hasCert, err := hasCerts(ctx, a.Client, a.lc, a.tsNamespace, serviceName) + hasCert, err := hasCerts(ctx, r.Client, r.tsNamespace, serviceName, pg) if err != nil { return fmt.Errorf("error checking TLS credentials provisioned for service %q: %w", serviceName, err) } @@ -820,7 +810,7 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con } if updated { - if err := a.Update(ctx, &secret); err != nil { + if err := r.Update(ctx, &secret); err != nil { return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) } } @@ -978,12 +968,12 @@ func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pg *tsapi // cleanupCertResources ensures that the TLS Secret and associated RBAC // resources that allow proxies to read/write to the Secret are deleted. -func cleanupCertResources(ctx context.Context, cl client.Client, lc localClient, tsNamespace, pgName string, serviceName tailcfg.ServiceName) error { - domainName, err := dnsNameForService(ctx, lc, serviceName) +func cleanupCertResources(ctx context.Context, cl client.Client, tsNamespace string, serviceName tailcfg.ServiceName, pg *tsapi.ProxyGroup) error { + domainName, err := dnsNameForService(ctx, cl, serviceName, pg, tsNamespace) if err != nil { return fmt.Errorf("error getting DNS name for Tailscale Service %s: %w", serviceName, err) } - labels := certResourceLabels(pgName, domainName) + labels := certResourceLabels(pg.Name, domainName) if err := cl.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(tsNamespace), client.MatchingLabels(labels)); err != nil { return fmt.Errorf("error deleting RoleBinding for domain name %s: %w", domainName, err) } @@ -1093,19 +1083,9 @@ func certResourceLabels(pgName, domain string) map[string]string { } } -// dnsNameForService returns the DNS name for the given Tailscale Service's name. -func dnsNameForService(ctx context.Context, lc localClient, svc tailcfg.ServiceName) (string, error) { - s := svc.WithoutPrefix() - tcd, err := tailnetCertDomain(ctx, lc) - if err != nil { - return "", fmt.Errorf("error determining DNS name base: %w", err) - } - return s + "." + tcd, nil -} - // hasCerts checks if the TLS Secret for the given service has non-zero cert and key data. -func hasCerts(ctx context.Context, cl client.Client, lc localClient, ns string, svc tailcfg.ServiceName) (bool, error) { - domain, err := dnsNameForService(ctx, lc, svc) +func hasCerts(ctx context.Context, cl client.Client, ns string, svc tailcfg.ServiceName, pg *tsapi.ProxyGroup) (bool, error) { + domain, err := dnsNameForService(ctx, cl, svc, pg, ns) if err != nil { return false, fmt.Errorf("failed to get DNS name for service: %w", err) } diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 480e6a26ec65e..33e27ef371d90 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -28,7 +28,6 @@ import ( "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -562,16 +561,18 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { } // Add the Tailscale Service to prefs to have the Ingress recognised as ready. - mustCreate(t, fc, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-0", - Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), - }, - Data: map[string][]byte{ - "_current-profile": []byte("profile-foo"), - "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), - }, + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(o *corev1.Secret) { + var p prefs + var err error + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:my-svc"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } }) // Reconcile and re-fetch Ingress. @@ -685,17 +686,19 @@ func TestIngressPGReconciler_HTTPRedirect(t *testing.T) { t.Fatal(err) } - // Add the Tailscale Service to prefs to have the Ingress recognised as ready - mustCreate(t, fc, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-0", - Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), - }, - Data: map[string][]byte{ - "_current-profile": []byte("profile-foo"), - "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), - }, + // Add the Tailscale Service to prefs to have the Ingress recognised as ready. + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(o *corev1.Secret) { + var p prefs + var err error + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:my-svc"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } }) // Reconcile and re-fetch Ingress @@ -818,17 +821,19 @@ func TestIngressPGReconciler_HTTPEndpointAndRedirectConflict(t *testing.T) { t.Fatal(err) } - // Add the Tailscale Service to prefs to have the Ingress recognised as ready - mustCreate(t, fc, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pg-0", - Namespace: "operator-ns", - Labels: pgSecretLabels("test-pg", kubetypes.LabelSecretTypeState), - }, - Data: map[string][]byte{ - "_current-profile": []byte("profile-foo"), - "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), - }, + // Add the Tailscale Service to prefs to have the Ingress recognised as ready. + mustUpdate(t, fc, "operator-ns", "test-pg-0", func(o *corev1.Secret) { + var p prefs + var err error + if err = json.Unmarshal(o.Data["test"], &p); err != nil { + t.Errorf("failed to unmarshal preferences: %v", err) + } + + p.AdvertiseServices = []string{"svc:my-svc"} + o.Data["test"], err = json.Marshal(p) + if err != nil { + t.Errorf("failed to marshal preferences: %v", err) + } }) // Reconcile and re-fetch Ingress @@ -1109,6 +1114,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, pgName string, expec func createPGResources(t *testing.T, fc client.Client, pgName string) { t.Helper() + // Pre-create the ProxyGroup pg := &tsapi.ProxyGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -1145,6 +1151,30 @@ func createPGResources(t *testing.T, fc client.Client, pgName string) { }, } mustCreate(t, fc, pgCfgSecret) + + pr := prefs{} + pr.Config.UserProfile.LoginName = "test.ts.net" + pr.Config.NodeID = "test" + + p, err := json.Marshal(pr) + if err != nil { + t.Fatalf("marshaling prefs: %v", err) + } + + // Pre-create a state secret for the ProxyGroup + pgStateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pgName, 0), + Namespace: "operator-ns", + Labels: pgSecretLabels(pgName, kubetypes.LabelSecretTypeState), + }, + Data: map[string][]byte{ + currentProfileKey: []byte("test"), + "test": p, + }, + } + mustCreate(t, fc, pgStateSecret) + pg.Status.Conditions = []metav1.Condition{ { Type: string(tsapi.ProxyGroupAvailable), @@ -1179,14 +1209,6 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT t.Fatal(err) } - lc := &fakeLocalClient{ - status: &ipnstate.Status{ - CurrentTailnet: &ipnstate.TailnetStatus{ - MagicDNSSuffix: "ts.net", - }, - }, - } - ingPGR := &HAIngressReconciler{ Client: fc, tsClient: ft, @@ -1195,7 +1217,6 @@ func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeT tsnetServer: fakeTsnetServer, logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), - lc: lc, ingressClassName: tsIngressClass.Name, } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 81f62d4775671..ef55d27481266 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -441,7 +441,6 @@ func runReconcilers(opts reconcilerOpts) { defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), logger: opts.log.Named("ingress-pg-reconciler"), - lc: lc, operatorID: id, tsNamespace: opts.tailscaleNamespace, ingressClassName: opts.ingressClassName, @@ -467,7 +466,6 @@ func runReconcilers(opts reconcilerOpts) { defaultTags: strings.Split(opts.proxyTags, ","), Client: mgr.GetClient(), logger: opts.log.Named("service-pg-reconciler"), - lc: lc, clock: tstime.DefaultClock{}, operatorID: id, tsNamespace: opts.tailscaleNamespace, @@ -686,7 +684,6 @@ func runReconcilers(opts reconcilerOpts) { logger: opts.log.Named("kube-apiserver-ts-service-reconciler"), tsClient: opts.tsClient, tsNamespace: opts.tailscaleNamespace, - lc: lc, defaultTags: strings.Split(opts.proxyTags, ","), operatorID: id, clock: tstime.DefaultClock{}, diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 2007824988fc7..538933f14dbe1 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -118,20 +118,15 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ return reconcile.Result{}, fmt.Errorf("failed to get tailscale.com ProxyGroup: %w", err) } - tailscaleClient := r.tsClient - if pg.Spec.Tailnet != "" { - tc, err := clientForTailnet(ctx, r.Client, r.tsNamespace, pg.Spec.Tailnet) - if err != nil { - oldPGStatus := pg.Status.DeepCopy() - nrr := ¬ReadyReason{ - reason: reasonProxyGroupTailnetUnavailable, - message: err.Error(), - } - - return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, make(map[string][]netip.AddrPort))) + tailscaleClient, loginUrl, err := r.getClientAndLoginURL(ctx, pg.Spec.Tailnet) + if err != nil { + oldPGStatus := pg.Status.DeepCopy() + nrr := ¬ReadyReason{ + reason: reasonProxyGroupTailnetUnavailable, + message: fmt.Errorf("failed to get tailscale client and loginUrl: %w", err).Error(), } - tailscaleClient = tc + return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, make(map[string][]netip.AddrPort))) } if markedForDeletion(pg) { @@ -161,7 +156,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ } oldPGStatus := pg.Status.DeepCopy() - staticEndpoints, nrr, err := r.reconcilePG(ctx, tailscaleClient, pg, logger) + staticEndpoints, nrr, err := r.reconcilePG(ctx, tailscaleClient, loginUrl, pg, logger) return reconcile.Result{}, errors.Join(err, r.maybeUpdateStatus(ctx, logger, pg, oldPGStatus, nrr, staticEndpoints)) } @@ -169,7 +164,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ // for deletion. It is separated out from Reconcile to make a clear separation // between reconciling the ProxyGroup, and posting the status of its created // resources onto the ProxyGroup status field. -func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger) (map[string][]netip.AddrPort, *notReadyReason, error) { if !slices.Contains(pg.Finalizers, FinalizerName) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -210,7 +205,7 @@ func (r *ProxyGroupReconciler) reconcilePG(ctx context.Context, tailscaleClient return notReady(reasonProxyGroupInvalid, fmt.Sprintf("invalid ProxyGroup spec: %v", err)) } - staticEndpoints, nrr, err := r.maybeProvision(ctx, tailscaleClient, pg, proxyClass) + staticEndpoints, nrr, err := r.maybeProvision(ctx, tailscaleClient, loginUrl, pg, proxyClass) if err != nil { return nil, nrr, err } @@ -296,7 +291,7 @@ func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGrou return errors.Join(errs...) } -func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { +func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() r.ensureAddedToGaugeForProxyGroup(pg) @@ -318,7 +313,7 @@ func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClie } } - staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tailscaleClient, pg, proxyClass, svcToNodePorts) + staticEndpoints, err := r.ensureConfigSecretsCreated(ctx, tailscaleClient, loginUrl, pg, proxyClass, svcToNodePorts) if err != nil { if _, ok := errors.AsType[*FindStaticEndpointErr](err); ok { reason := reasonProxyGroupCreationFailed @@ -628,7 +623,7 @@ func (r *ProxyGroupReconciler) ensureNodePortServiceCreated(ctx context.Context, // tailnet devices when the number of replicas specified is reduced. func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass) error { logger := r.logger(pg.Name) - metadata, err := r.getNodeMetadata(ctx, pg) + metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { return err } @@ -686,7 +681,7 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tai func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup) (bool, error) { logger := r.logger(pg.Name) - metadata, err := r.getNodeMetadata(ctx, pg) + metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { return false, err } @@ -731,6 +726,7 @@ func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, tailscal func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( ctx context.Context, tailscaleClient tsClient, + loginUrl string, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass, svcToNodePorts map[string]uint16, @@ -866,8 +862,8 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( } } - if r.loginServer != "" { - cfg.ServerURL = &r.loginServer + if loginUrl != "" { + cfg.ServerURL = new(loginUrl) } if proxyClass != nil && proxyClass.Spec.TailscaleConfig != nil { @@ -895,7 +891,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( return nil, err } - configs, err := pgTailscaledConfig(pg, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices, r.loginServer) + configs, err := pgTailscaledConfig(pg, loginUrl, proxyClass, i, authKey, endpoints[nodePortSvcName], existingAdvertiseServices) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -1052,7 +1048,7 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) } -func pgTailscaledConfig(pg *tsapi.ProxyGroup, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string, loginServer string) (tailscaledConfigs, error) { +func pgTailscaledConfig(pg *tsapi.ProxyGroup, loginServer string, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -1103,10 +1099,10 @@ func extractAdvertiseServicesConfig(cfgSecret *corev1.Secret) ([]string, error) // some pods have failed to write state. // // The returned metadata will contain an entry for each state Secret that exists. -func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup) (metadata []nodeMetadata, _ error) { +func getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup, cl client.Client, tsNamespace string) (metadata []nodeMetadata, _ error) { // List all state Secrets owned by this ProxyGroup. secrets := &corev1.SecretList{} - if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState))); err != nil { + if err := cl.List(ctx, secrets, client.InNamespace(tsNamespace), client.MatchingLabels(pgSecretLabels(pg.Name, kubetypes.LabelSecretTypeState))); err != nil { return nil, fmt.Errorf("failed to list state Secrets: %w", err) } for _, secret := range secrets.Items { @@ -1130,7 +1126,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr } pod := &corev1.Pod{} - if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { + if err := cl.Get(ctx, client.ObjectKey{Namespace: tsNamespace, Name: fmt.Sprintf("%s-%d", pg.Name, ordinal)}, pod); err != nil && !apierrors.IsNotFound(err) { return nil, err } else if err == nil { nm.podUID = string(pod.UID) @@ -1149,7 +1145,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr // getRunningProxies will return status for all proxy Pods whose state Secret // has an up to date Pod UID and at least a hostname. func (r *ProxyGroupReconciler) getRunningProxies(ctx context.Context, pg *tsapi.ProxyGroup, staticEndpoints map[string][]netip.AddrPort) (devices []tsapi.TailnetDevice, _ error) { - metadata, err := r.getNodeMetadata(ctx, pg) + metadata, err := getNodeMetadata(ctx, pg, r.Client, r.tsNamespace) if err != nil { return nil, err } @@ -1193,6 +1189,29 @@ func (r *ProxyGroupReconciler) getRunningProxies(ctx context.Context, pg *tsapi. return devices, nil } +// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL +// for the given tailnet name. If no tailnet is specified, returns the default client +// and login server. Applies fallback to the operator's login server if the tailnet +// doesn't specify a custom login URL. +func (r *ProxyGroupReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, + string, error) { + if tailnetName == "" { + return r.tsClient, r.loginServer, nil + } + + tc, loginUrl, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) + if err != nil { + return nil, "", err + } + + // Apply fallback if tailnet doesn't specify custom login URL + if loginUrl == "" { + loginUrl = r.loginServer + } + + return tc, loginUrl, nil +} + type nodeMetadata struct { ordinal int stateSecret *corev1.Secret diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index c88a6df173647..5f33a94905785 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -198,14 +198,9 @@ func IsHTTPSEnabledOnTailnet(tsnetServer tsnetServer) bool { // Provision ensures that the StatefulSet for the given service is running and // up to date. func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.SugaredLogger, sts *tailscaleSTSConfig) (*corev1.Service, error) { - tailscaleClient := a.tsClient - if sts.Tailnet != "" { - tc, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, sts.Tailnet) - if err != nil { - return nil, err - } - - tailscaleClient = tc + tailscaleClient, loginUrl, err := a.getClientAndLoginURL(ctx, sts.Tailnet) + if err != nil { + return nil, fmt.Errorf("failed to get tailscale client and loginUrl: %w", err) } // Do full reconcile. @@ -227,7 +222,7 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga } sts.ProxyClass = proxyClass - secretNames, err := a.provisionSecrets(ctx, tailscaleClient, logger, sts, hsvc) + secretNames, err := a.provisionSecrets(ctx, tailscaleClient, loginUrl, sts, hsvc, logger) if err != nil { return nil, fmt.Errorf("failed to create or get API key secret: %w", err) } @@ -248,13 +243,36 @@ func (a *tailscaleSTSReconciler) Provision(ctx context.Context, logger *zap.Suga return hsvc, nil } +// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL +// for the given tailnet name. If no tailnet is specified, returns the default client +// and login server. Applies fallback to the operator's login server if the tailnet +// doesn't specify a custom login URL. +func (a *tailscaleSTSReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, + string, error) { + if tailnetName == "" { + return a.tsClient, a.loginServer, nil + } + + tc, loginUrl, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnetName) + if err != nil { + return nil, "", err + } + + // Apply fallback if tailnet doesn't specify custom login URL + if loginUrl == "" { + loginUrl = a.loginServer + } + + return tc, loginUrl, nil +} + // Cleanup removes all resources associated that were created by Provision with // the given labels. It returns true when all resources have been removed, // otherwise it returns false and the caller should retry later. func (a *tailscaleSTSReconciler) Cleanup(ctx context.Context, tailnet string, logger *zap.SugaredLogger, labels map[string]string, typ string) (done bool, _ error) { tailscaleClient := a.tsClient if tailnet != "" { - tc, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnet) + tc, _, err := clientForTailnet(ctx, a.Client, a.operatorNamespace, tailnet) if err != nil { logger.Errorf("failed to get tailscale client: %v", err) return false, nil @@ -384,7 +402,7 @@ func (a *tailscaleSTSReconciler) reconcileHeadlessService(ctx context.Context, l return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec }) } -func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscaleClient tsClient, logger *zap.SugaredLogger, stsC *tailscaleSTSConfig, hsvc *corev1.Service) ([]string, error) { +func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscaleClient tsClient, loginUrl string, stsC *tailscaleSTSConfig, hsvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { secretNames := make([]string, stsC.Replicas) // Start by ensuring we have Secrets for the desired number of replicas. This will handle both creating and scaling @@ -433,7 +451,7 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, tailscale } } - configs, err := tailscaledConfig(stsC, authKey, orig, hostname) + configs, err := tailscaledConfig(stsC, loginUrl, authKey, orig, hostname) if err != nil { return nil, fmt.Errorf("error creating tailscaled config: %w", err) } @@ -1062,7 +1080,7 @@ func isMainContainer(c *corev1.Container) bool { // tailscaledConfig takes a proxy config, a newly generated auth key if generated and a Secret with the previous proxy // state and auth key and returns tailscaled config files for currently supported proxy versions. -func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *corev1.Secret, hostname string) (tailscaledConfigs, error) { +func tailscaledConfig(stsC *tailscaleSTSConfig, loginUrl string, newAuthkey string, oldSecret *corev1.Secret, hostname string) (tailscaledConfigs, error) { conf := &ipn.ConfigVAlpha{ Version: "alpha0", AcceptDNS: "false", @@ -1101,6 +1119,10 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, newAuthkey string, oldSecret *co conf.AuthKey = key } + if loginUrl != "" { + conf.ServerURL = new(loginUrl) + } + capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) capVerConfigs[107] = *conf diff --git a/cmd/k8s-operator/svc-for-pg.go b/cmd/k8s-operator/svc-for-pg.go index 7cbbaebaa89ac..e1891a4a9f359 100644 --- a/cmd/k8s-operator/svc-for-pg.go +++ b/cmd/k8s-operator/svc-for-pg.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" @@ -41,13 +42,10 @@ import ( ) const ( - svcPGFinalizerName = "tailscale.com/service-pg-finalizer" - + svcPGFinalizerName = "tailscale.com/service-pg-finalizer" reasonIngressSvcInvalid = "IngressSvcInvalid" - reasonIngressSvcValid = "IngressSvcValid" reasonIngressSvcConfigured = "IngressSvcConfigured" reasonIngressSvcNoBackendsConfigured = "IngressSvcNoBackendsConfigured" - reasonIngressSvcCreationFailed = "IngressSvcCreationFailed" ) var gaugePGServiceResources = clientmetric.NewGauge(kubetypes.MetricServicePGResourceCount) @@ -61,7 +59,6 @@ type HAServiceReconciler struct { logger *zap.SugaredLogger tsClient tsClient tsNamespace string - lc localClient defaultTags []string operatorID string // stableID of the operator's Tailscale device @@ -100,12 +97,41 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque return res, fmt.Errorf("failed to get Service: %w", err) } + pgName := svc.Annotations[AnnotationProxyGroup] + if pgName == "" { + logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning") + return res, nil + } + + logger = logger.With("ProxyGroup", pgName) + + pg := &tsapi.ProxyGroup{} + err = r.Get(ctx, client.ObjectKey{Name: pgName}, pg) + switch { + case apierrors.IsNotFound(err): + logger.Infof("ProxyGroup %q does not exist, it may have been deleted. Reconciliation for service %q will be skipped until the ProxyGroup is found", pgName, svc.Name) + r.recorder.Event(svc, corev1.EventTypeWarning, "ProxyGroupNotFound", "ProxyGroup not found") + return res, nil + case err != nil: + return res, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + } + + if !tsoperator.ProxyGroupAvailable(pg) { + logger.Infof("ProxyGroup is not (yet) ready") + return res, nil + } + + tailscaleClient, err := clientFromProxyGroup(ctx, r.Client, pg, r.tsNamespace, r.tsClient) + if err != nil { + return res, fmt.Errorf("failed to get tailscale client: %w", err) + } + hostname := nameForService(svc) logger = logger.With("hostname", hostname) if !svc.DeletionTimestamp.IsZero() || !r.isTailscaleService(svc) { logger.Debugf("Service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up") - _, err = r.maybeCleanup(ctx, hostname, svc, logger) + _, err = r.maybeCleanup(ctx, hostname, svc, logger, tailscaleClient) return res, err } @@ -113,7 +139,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the Tailscale Service in a // multi-cluster Ingress setup have not resulted in another actor overwriting our Tailscale Service update. needsRequeue := false - needsRequeue, err = r.maybeProvision(ctx, hostname, svc, logger) + needsRequeue, err = r.maybeProvision(ctx, hostname, svc, pg, logger, tailscaleClient) if err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) @@ -136,7 +162,7 @@ func (r *HAServiceReconciler) Reconcile(ctx context.Context, req reconcile.Reque // If a Tailscale Service exists, but does not have an owner reference from any operator, we error // out assuming that this is an owner reference created by an unknown actor. // Returns true if the operation resulted in a Tailscale Service update. -func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname string, svc *corev1.Service, pg *tsapi.ProxyGroup, logger *zap.SugaredLogger, tsClient tsClient) (svcsChanged bool, err error) { oldSvcStatus := svc.Status.DeepCopy() defer func() { if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) { @@ -145,30 +171,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } }() - pgName := svc.Annotations[AnnotationProxyGroup] - if pgName == "" { - logger.Infof("[unexpected] no ProxyGroup annotation, skipping Tailscale Service provisioning") - return false, nil - } - - logger = logger.With("ProxyGroup", pgName) - - pg := &tsapi.ProxyGroup{} - if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { - if apierrors.IsNotFound(err) { - msg := fmt.Sprintf("ProxyGroup %q does not exist", pgName) - logger.Warnf(msg) - r.recorder.Event(svc, corev1.EventTypeWarning, "ProxyGroupNotFound", msg) - return false, nil - } - return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) - } - if !tsoperator.ProxyGroupAvailable(pg) { - logger.Infof("ProxyGroup is not (yet) ready") - return false, nil - } - - if err := r.validateService(ctx, svc, pg); err != nil { + if err = r.validateService(ctx, svc, pg); err != nil { r.recorder.Event(svc, corev1.EventTypeWarning, reasonIngressSvcInvalid, err.Error()) tsoperator.SetServiceCondition(svc, tsapi.IngressSvcValid, metav1.ConditionFalse, reasonIngressSvcInvalid, err.Error(), r.clock, logger) return false, nil @@ -198,7 +201,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // that in edge cases (a single update changed both hostname and removed // ProxyGroup annotation) the Tailscale Service is more likely to be // (eventually) removed. - svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pg.Name, logger, tsClient) if err != nil { return false, fmt.Errorf("failed to cleanup Tailscale Service resources for ProxyGroup: %w", err) } @@ -206,7 +209,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // 2. Ensure that there isn't a Tailscale Service with the same hostname // already created and not owned by this Service. serviceName := tailcfg.ServiceName("svc:" + hostname) - existingTSSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + existingTSSvc, err := tsClient.GetVIPService(ctx, serviceName) if err != nil && !isErrorTailscaleServiceNotFound(err) { return false, fmt.Errorf("error getting Tailscale Service %q: %w", hostname, err) } @@ -248,13 +251,13 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(tsSvc.Tags, existingTSSvc.Tags) || !ownersAreSetAndEqual(tsSvc, existingTSSvc) { logger.Infof("Ensuring Tailscale Service exists and is up to date") - if err := r.tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { + if err := tsClient.CreateOrUpdateVIPService(ctx, tsSvc); err != nil { return false, fmt.Errorf("error creating Tailscale Service: %w", err) } existingTSSvc = tsSvc } - cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pgName, r.tsNamespace) + cm, cfgs, err := ingressSvcsConfigs(ctx, r.Client, pg.Name, r.tsNamespace) if err != nil { return false, fmt.Errorf("error retrieving ingress services configuration: %w", err) } @@ -264,7 +267,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin } if existingTSSvc.Addrs == nil { - existingTSSvc, err = r.tsClient.GetVIPService(ctx, tsSvc.Name) + existingTSSvc, err = tsClient.GetVIPService(ctx, tsSvc.Name) if err != nil { return false, fmt.Errorf("error getting Tailscale Service: %w", err) } @@ -329,7 +332,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("failed to update tailscaled config: %w", err) } - count, err := r.numberPodsAdvertising(ctx, pgName, serviceName) + count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) if err != nil { return false, fmt.Errorf("failed to get number of advertised Pods: %w", err) } @@ -345,7 +348,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin conditionReason := reasonIngressSvcNoBackendsConfigured conditionMessage := fmt.Sprintf("%d/%d proxy backends ready and advertising", count, pgReplicas(pg)) if count != 0 { - dnsName, err := r.dnsNameForService(ctx, serviceName) + dnsName, err := dnsNameForService(ctx, r.Client, serviceName, pg, r.tsNamespace) if err != nil { return false, fmt.Errorf("error getting DNS name for Service: %w", err) } @@ -371,7 +374,7 @@ func (r *HAServiceReconciler) maybeProvision(ctx context.Context, hostname strin // Service is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the Tailscale Service is only // deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference // corresponding to this Service. -func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger) (svcChanged bool, err error) { +func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, svc *corev1.Service, logger *zap.SugaredLogger, tsClient tsClient) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Service are cleaned up") ix := slices.Index(svc.Finalizers, svcPGFinalizerName) if ix < 0 { @@ -389,7 +392,7 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, serviceName := tailcfg.ServiceName("svc:" + hostname) // 1. Clean up the Tailscale Service. - svcChanged, err = cleanupTailscaleService(ctx, r.tsClient, serviceName, r.operatorID, logger) + svcChanged, err = cleanupTailscaleService(ctx, tsClient, serviceName, r.operatorID, logger) if err != nil { return false, fmt.Errorf("error deleting Tailscale Service: %w", err) } @@ -422,14 +425,14 @@ func (r *HAServiceReconciler) maybeCleanup(ctx context.Context, hostname string, // Tailscale Services that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. // Returns true if the operation resulted in existing Tailscale Service updates (owner reference removal). -func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { +func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger, tsClient tsClient) (svcsChanged bool, err error) { cm, config, err := ingressSvcsConfigs(ctx, r.Client, proxyGroupName, r.tsNamespace) if err != nil { return false, fmt.Errorf("failed to get ingress service config: %s", err) } svcList := &corev1.ServiceList{} - if err := r.Client.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: proxyGroupName}); err != nil { + if err = r.Client.List(ctx, svcList, client.MatchingFields{indexIngressProxyGroup: proxyGroupName}); err != nil { return false, fmt.Errorf("failed to find Services for ProxyGroup %q: %w", proxyGroupName, err) } @@ -450,7 +453,7 @@ func (r *HAServiceReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - svcsChanged, err = cleanupTailscaleService(ctx, r.tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger) + svcsChanged, err = cleanupTailscaleService(ctx, tsClient, tailcfg.ServiceName(tsSvcName), r.operatorID, logger) if err != nil { return false, fmt.Errorf("deleting Tailscale Service %q: %w", tsSvcName, err) } @@ -510,15 +513,6 @@ func (r *HAServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool { return isTailscaleLoadBalancerService(svc, r.isDefaultLoadBalancer) || hasExposeAnnotation(svc) } -// tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (r *HAServiceReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := r.lc.StatusWithoutPeers(ctx) - if err != nil { - return "", fmt.Errorf("error getting tailscale status: %w", err) - } - return st.CurrentTailnet.MagicDNSSuffix, nil -} - // cleanupTailscaleService deletes any Tailscale Service by the provided name if it is not owned by operator instances other than this one. // If a Tailscale Service is found, but contains other owner references, only removes this operator's owner reference. // If a Tailscale Service by the given name is not found or does not contain this operator's owner reference, do nothing. @@ -570,10 +564,10 @@ func cleanupTailscaleService(ctx context.Context, tsClient tsClient, name tailcf return true, tsClient.CreateOrUpdateVIPService(ctx, svc) } -func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName, pgName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { +func (r *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceName, replicaName string, wantsCfg *ingressservices.Config, logger *zap.SugaredLogger) (bool, error) { logger.Debugf("checking backend routes for service '%s'", serviceName) pod := &corev1.Pod{} - err := a.Get(ctx, client.ObjectKey{Namespace: a.tsNamespace, Name: replicaName}, pod) + err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: replicaName}, pod) if apierrors.IsNotFound(err) { logger.Debugf("Pod %q not found", replicaName) return false, nil @@ -582,7 +576,7 @@ func (a *HAServiceReconciler) backendRoutesSetup(ctx context.Context, serviceNam return false, fmt.Errorf("failed to get Pod: %w", err) } secret := &corev1.Secret{} - err = a.Get(ctx, client.ObjectKey{Namespace: a.tsNamespace, Name: replicaName}, secret) + err = r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: replicaName}, secret) if apierrors.IsNotFound(err) { logger.Debugf("Secret %q not found", replicaName) return false, nil @@ -637,17 +631,17 @@ func isCurrentStatus(gotCfgs ingressservices.Status, pod *corev1.Pod, logger *za return true, nil } -func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, svc *corev1.Service, pgName string, serviceName tailcfg.ServiceName, cfg *ingressservices.Config, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { +func (r *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, svc *corev1.Service, pgName string, serviceName tailcfg.ServiceName, cfg *ingressservices.Config, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { logger.Debugf("checking advertisement for service '%s'", serviceName) // Get all config Secrets for this ProxyGroup. // Get all Pods secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeConfig))); err != nil { return fmt.Errorf("failed to list config Secrets: %w", err) } if svc != nil && shouldBeAdvertised { - shouldBeAdvertised, err = a.checkEndpointsReady(ctx, svc, logger) + shouldBeAdvertised, err = r.checkEndpointsReady(ctx, svc, logger) if err != nil { return fmt.Errorf("failed to check readiness of Service '%s' endpoints: %w", svc.Name, err) } @@ -679,7 +673,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con logger.Infof("[unexpected] unable to determine replica name from config Secret name %q, unable to determine if backend routing has been configured", secret.Name) return nil } - ready, err := a.backendRoutesSetup(ctx, serviceName.String(), replicaName, pgName, cfg, logger) + ready, err := r.backendRoutesSetup(ctx, serviceName.String(), replicaName, cfg, logger) if err != nil { return fmt.Errorf("error checking backend routes: %w", err) } @@ -698,7 +692,7 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con updated = true } if updated { - if err := a.Update(ctx, &secret); err != nil { + if err := r.Update(ctx, &secret); err != nil { return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) } } @@ -706,10 +700,10 @@ func (a *HAServiceReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } -func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { +func (r *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { // Get all state Secrets for this ProxyGroup. secrets := &corev1.SecretList{} - if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { + if err := r.List(ctx, secrets, client.InNamespace(r.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, kubetypes.LabelSecretTypeState))); err != nil { return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) } @@ -731,12 +725,28 @@ func (a *HAServiceReconciler) numberPodsAdvertising(ctx context.Context, pgName } // dnsNameForService returns the DNS name for the given Tailscale Service name. -func (r *HAServiceReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { +func dnsNameForService(ctx context.Context, cl client.Client, svc tailcfg.ServiceName, pg *tsapi.ProxyGroup, namespace string) (string, error) { s := svc.WithoutPrefix() - tcd, err := r.tailnetCertDomain(ctx) - if err != nil { - return "", fmt.Errorf("error determining DNS name base: %w", err) + + md, err := getNodeMetadata(ctx, pg, cl, namespace) + switch { + case err != nil: + return "", fmt.Errorf("error getting node metadata: %w", err) + case len(md) == 0: + return "", fmt.Errorf("failed to find node metadata for ProxyGroup %q", pg.Name) + } + + // To determine the appropriate magic DNS name we take the first dns name we can find that is not empty and + // contains a period. + idx := slices.IndexFunc(md, func(metadata nodeMetadata) bool { + return metadata.dnsName != "" && strings.ContainsRune(metadata.dnsName, '.') + }) + if idx == -1 { + return "", fmt.Errorf("failed to find dns name for ProxyGroup %q", pg.Name) } + + tcd := strings.SplitN(md[idx].dnsName, ".", 2)[1] + return s + "." + tcd, nil } diff --git a/cmd/k8s-operator/svc-for-pg_test.go b/cmd/k8s-operator/svc-for-pg_test.go index 3c478a90c8e21..7a767a9b898fa 100644 --- a/cmd/k8s-operator/svc-for-pg_test.go +++ b/cmd/k8s-operator/svc-for-pg_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/ingressservices" @@ -194,14 +194,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien t.Fatal(err) } - lc := &fakeLocalClient{ - status: &ipnstate.Status{ - CurrentTailnet: &ipnstate.TailnetStatus{ - MagicDNSSuffix: "ts.net", - }, - }, - } - cl := tstest.NewClock(tstest.ClockOpts{}) svcPGR := &HAServiceReconciler{ Client: fc, @@ -211,7 +203,6 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien tsNamespace: "operator-ns", logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), - lc: lc, } return svcPGR, pgStateSecret, fc, ft, cl @@ -279,15 +270,12 @@ func TestValidateService(t *testing.T) { func TestServicePGReconciler_MultiCluster(t *testing.T) { var ft *fakeTSClient - var lc localClient for i := 0; i <= 10; i++ { pgr, stateSecret, fc, fti, _ := setupServiceTest(t) if i == 0 { ft = fti - lc = pgr.lc } else { pgr.tsClient = ft - pgr.lc = lc } svc, _ := setupTestService(t, "test-multi-cluster", "", "4.3.2.1", fc, stateSecret) diff --git a/cmd/k8s-operator/tailnet.go b/cmd/k8s-operator/tailnet.go index 57c749bec31ee..439489f750665 100644 --- a/cmd/k8s-operator/tailnet.go +++ b/cmd/k8s-operator/tailnet.go @@ -20,19 +20,19 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" ) -func clientForTailnet(ctx context.Context, cl client.Client, namespace, name string) (tsClient, error) { +func clientForTailnet(ctx context.Context, cl client.Client, namespace, name string) (tsClient, string, error) { var tn tsapi.Tailnet if err := cl.Get(ctx, client.ObjectKey{Name: name}, &tn); err != nil { - return nil, fmt.Errorf("failed to get tailnet %q: %w", name, err) + return nil, "", fmt.Errorf("failed to get tailnet %q: %w", name, err) } if !operatorutils.TailnetIsReady(&tn) { - return nil, fmt.Errorf("tailnet %q is not ready", name) + return nil, "", fmt.Errorf("tailnet %q is not ready", name) } var secret corev1.Secret if err := cl.Get(ctx, client.ObjectKey{Name: tn.Spec.Credentials.SecretName, Namespace: namespace}, &secret); err != nil { - return nil, fmt.Errorf("failed to get Secret %q in namespace %q: %w", tn.Spec.Credentials.SecretName, namespace, err) + return nil, "", fmt.Errorf("failed to get Secret %q in namespace %q: %w", tn.Spec.Credentials.SecretName, namespace, err) } baseURL := ipn.DefaultControlURL @@ -54,5 +54,18 @@ func clientForTailnet(ctx context.Context, cl client.Client, namespace, name str ts.HTTPClient = httpClient ts.BaseURL = baseURL - return ts, nil + return ts, baseURL, nil +} + +func clientFromProxyGroup(ctx context.Context, cl client.Client, pg *tsapi.ProxyGroup, namespace string, def tsClient) (tsClient, error) { + if pg.Spec.Tailnet == "" { + return def, nil + } + + tailscaleClient, _, err := clientForTailnet(ctx, cl, namespace, pg.Spec.Tailnet) + if err != nil { + return nil, err + } + + return tailscaleClient, nil } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 191a31723eea9..d418f01284b95 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -31,9 +31,9 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" - "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" @@ -985,18 +985,3 @@ func (c *fakeTSClient) DeleteVIPService(ctx context.Context, name tailcfg.Servic } return nil } - -type fakeLocalClient struct { - status *ipnstate.Status -} - -func (f *fakeLocalClient) StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { - if f.status == nil { - return &ipnstate.Status{ - Self: &ipnstate.PeerStatus{ - DNSName: "test-node.test.ts.net.", - }, - }, nil - } - return f.status, nil -} diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 0a497a46ed955..0a7dbda58c028 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -99,14 +99,9 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return reconcile.Result{}, nil } - tailscaleClient := r.tsClient - if tsr.Spec.Tailnet != "" { - tc, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tsr.Spec.Tailnet) - if err != nil { - return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderTailnetUnavailable, err.Error()) - } - - tailscaleClient = tc + tailscaleClient, loginUrl, err := r.getClientAndLoginURL(ctx, tsr.Spec.Tailnet) + if err != nil { + return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderTailnetUnavailable, err.Error()) } if markedForDeletion(tsr) { @@ -149,7 +144,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionFalse, reasonRecorderInvalid, message) } - if err = r.maybeProvision(ctx, tailscaleClient, tsr); err != nil { + if err = r.maybeProvision(ctx, tailscaleClient, loginUrl, tsr); err != nil { reason := reasonRecorderCreationFailed message := fmt.Sprintf("failed creating Recorder: %s", err) if strings.Contains(err.Error(), optimisticLockErrorMsg) { @@ -167,7 +162,30 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques return setStatusReady(tsr, metav1.ConditionTrue, reasonRecorderCreated, reasonRecorderCreated) } -func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, tsr *tsapi.Recorder) error { +// getClientAndLoginURL returns the appropriate Tailscale client and resolved login URL +// for the given tailnet name. If no tailnet is specified, returns the default client +// and login server. Applies fallback to the operator's login server if the tailnet +// doesn't specify a custom login URL. +func (r *RecorderReconciler) getClientAndLoginURL(ctx context.Context, tailnetName string) (tsClient, + string, error) { + if tailnetName == "" { + return r.tsClient, r.loginServer, nil + } + + tc, loginUrl, err := clientForTailnet(ctx, r.Client, r.tsNamespace, tailnetName) + if err != nil { + return nil, "", err + } + + // Apply fallback if tailnet doesn't specify custom login URL + if loginUrl == "" { + loginUrl = r.loginServer + } + + return tc, loginUrl, nil +} + +func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, tsr *tsapi.Recorder) error { logger := r.logger(tsr.Name) r.mu.Lock() @@ -234,7 +252,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tailscaleClient return fmt.Errorf("error creating RoleBinding: %w", err) } - ss := tsrStatefulSet(tsr, r.tsNamespace, r.loginServer) + ss := tsrStatefulSet(tsr, r.tsNamespace, loginUrl) _, err = createOrUpdate(ctx, r.Client, r.tsNamespace, ss, func(s *appsv1.StatefulSet) { s.ObjectMeta.Labels = ss.ObjectMeta.Labels s.ObjectMeta.Annotations = ss.ObjectMeta.Annotations From 16c4780f0a0354f137eae201952785d32745438f Mon Sep 17 00:00:00 2001 From: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> Date: Tue, 10 Mar 2026 08:23:01 -0400 Subject: [PATCH 1076/1093] go.toolchain.next.rev: update to final Go 1.26.1 [next] (#18939) This updates the TS_GO_NEXT=1 (testing) toolchain to Go 1.26.1 The default one is still Go 1.26.0. Updates #18682 Signed-off-by: Jason O'Donnell <2160810+jasonodonnell@users.noreply.github.com> --- go.toolchain.next.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.next.rev b/go.toolchain.next.rev index ea3d3c773f779..0b07150d516a0 100644 --- a/go.toolchain.next.rev +++ b/go.toolchain.next.rev @@ -1 +1 @@ -5b5cb0db47535a0a8d2f450cb1bf83af8e70f164 +5cce30e20c1fc6d8463b0a99acdd9777c4ad124b From 32adca78f1cbf0b0bdb5db5840bfe4303607b64f Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 10 Mar 2026 13:45:29 +0000 Subject: [PATCH 1077/1093] pull-toolchain.sh: advance the next hash if it's behind Updates tailscale/corp#36382 Change-Id: Ida55b7b1a2cdd0a4653bb41852008e7088fc4a48 Signed-off-by: Brad Fitzpatrick --- pull-toolchain.sh | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/pull-toolchain.sh b/pull-toolchain.sh index c80c913bb17b2..effeca669cd67 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -20,6 +20,32 @@ if [ "$upstream" != "$current" ]; then echo "$upstream" >"$go_toolchain_rev_file" fi +# When updating the regular (non-next) toolchain, also bump go.toolchain.next.rev +# if it has fallen behind on the same branch. This happens when "next" was tracking +# a release candidate (e.g. Go 1.26.0rc2) and the regular toolchain later gets +# bumped to a newer release (e.g. Go 1.26.2) on the same branch. At that point +# the "next" rev shouldn't still point at the older RC. +if [ "${TS_GO_NEXT:-}" != "1" ]; then + read -r next_branch /dev/null; then + if git -C "$tmpdir" merge-base --is-ancestor "$next_rev" "$new_rev" 2>/dev/null; then + echo "$new_rev" >go.toolchain.next.rev + echo "pull-toolchain.sh: also bumped go.toolchain.next.rev to match (was behind on same branch)" >&2 + fi + fi + rm -rf "$tmpdir" + fi + fi +fi + # Only update go.toolchain.version and go.toolchain.rev.sri for the main toolchain, # skipping it if TS_GO_NEXT=1. Those two files are only used by Nix, and as of 2026-01-26 # don't yet support TS_GO_NEXT=1 with flake.nix or in our corp CI. @@ -29,6 +55,6 @@ if [ "${TS_GO_NEXT:-}" != "1" ]; then ./update-flake.sh fi -if [ -n "$(git diff-index --name-only HEAD -- "$go_toolchain_rev_file" go.toolchain.rev.sri go.toolchain.version)" ]; then +if [ -n "$(git diff-index --name-only HEAD -- "$go_toolchain_rev_file" go.toolchain.next.rev go.toolchain.rev.sri go.toolchain.version)" ]; then echo "pull-toolchain.sh: changes imported. Use git commit to make them permanent." >&2 fi From 525f7a1e47060309a2b9dd98ae8ee81374da743a Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 9 Mar 2026 23:47:30 +0000 Subject: [PATCH 1078/1093] types/key: add NodePrivate.Raw32 and DiscoPrivateFromRaw32 Raw byte accessors for key types, mirroring existing patterns (NodePublic.Raw32 and DiscoPublicFromRaw32 already exist). NodePrivate.Raw32 returns the raw 32 bytes of a node private key. DiscoPrivateFromRaw32 parses a 32-byte raw value as a DiscoPrivate. Updates tailscale/corp#24454 Change-Id: Ibc08bed14ab359eddefbebd811c375b6365c7919 Signed-off-by: Brad Fitzpatrick --- types/key/disco.go | 10 ++++++++++ types/key/node.go | 3 +++ 2 files changed, 13 insertions(+) diff --git a/types/key/disco.go b/types/key/disco.go index f46347c919ebb..7fa476dc35ec0 100644 --- a/types/key/disco.go +++ b/types/key/disco.go @@ -42,6 +42,16 @@ func NewDisco() DiscoPrivate { return ret } +// DiscoPrivateFromRaw32 parses a 32-byte raw value as a DiscoPrivate. +func DiscoPrivateFromRaw32(raw mem.RO) DiscoPrivate { + if raw.Len() != 32 { + panic("input has wrong size") + } + var ret DiscoPrivate + raw.Copy(ret.k[:]) + return ret +} + // IsZero reports whether k is the zero value. func (k DiscoPrivate) IsZero() bool { return k.Equal(DiscoPrivate{}) diff --git a/types/key/node.go b/types/key/node.go index 1402aad361870..83be593afece0 100644 --- a/types/key/node.go +++ b/types/key/node.go @@ -61,6 +61,9 @@ func NewNode() NodePrivate { return ret } +// Raw32 returns k as 32 raw bytes. +func (k NodePrivate) Raw32() [32]byte { return k.k } + // NodePrivateFromRaw32 parses a 32-byte raw value as a NodePrivate. // // Deprecated: only needed to cast from legacy node private key types, From 021de2e1bc8d5d4ab66d4f4f5c560dc585ae3ae0 Mon Sep 17 00:00:00 2001 From: Mike O'Driscoll Date: Tue, 10 Mar 2026 15:19:15 -0400 Subject: [PATCH 1079/1093] util/linuxfw: fix nil pointer panic in connmark rules without IPv6 (#18946) When IPv6 is unavailable on a system, AddConnmarkSaveRule() and DelConnmarkSaveRule() would panic with a nil pointer dereference. Both methods directly iterated over []iptablesInterface{i.ipt4, i.ipt6} without checking if ipt6 was nil. Use `getTables()` instead to properly retrieve the available tables on a given system Fixes #3310 Signed-off-by: Mike O'Driscoll --- util/linuxfw/fake.go | 18 ++-- util/linuxfw/iptables_runner.go | 8 +- util/linuxfw/iptables_runner_test.go | 140 +++++++++++++++++++++++++++ 3 files changed, 154 insertions(+), 12 deletions(-) diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index 166d80401e52e..b902b93c1a66b 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -26,13 +26,15 @@ type fakeRule struct { func newFakeIPTables() *fakeIPTables { return &fakeIPTables{ n: map[string][]string{ - "filter/INPUT": nil, - "filter/OUTPUT": nil, - "filter/FORWARD": nil, - "nat/PREROUTING": nil, - "nat/OUTPUT": nil, - "nat/POSTROUTING": nil, - "mangle/FORWARD": nil, + "filter/INPUT": nil, + "filter/OUTPUT": nil, + "filter/FORWARD": nil, + "nat/PREROUTING": nil, + "nat/OUTPUT": nil, + "nat/POSTROUTING": nil, + "mangle/FORWARD": nil, + "mangle/PREROUTING": nil, + "mangle/OUTPUT": nil, }, } } @@ -80,7 +82,7 @@ func (n *fakeIPTables) Delete(table, chain string, args ...string) error { return nil } } - return fmt.Errorf("delete of unknown rule %q from %s", strings.Join(args, " "), k) + return errors.New("exitcode:1") } else { return fmt.Errorf("unknown table/chain %s", k) } diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index b8eb39f219be9..0d50bdd61de38 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -533,7 +533,7 @@ func (i *iptablesRunner) DelStatefulRule(tunname string) error { // proper routing table lookups for exit nodes and subnet routers. func (i *iptablesRunner) AddConnmarkSaveRule() error { // Check if rules already exist (idempotency) - for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + for _, ipt := range i.getTables() { rules, err := ipt.List("mangle", "PREROUTING") if err != nil { continue @@ -551,7 +551,7 @@ func (i *iptablesRunner) AddConnmarkSaveRule() error { // mangle/PREROUTING: Restore mark from conntrack for ESTABLISHED/RELATED connections // This runs BEFORE routing decision and rp_filter check - for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + for _, ipt := range i.getTables() { args := []string{ "-m", "conntrack", "--ctstate", "ESTABLISHED,RELATED", @@ -566,7 +566,7 @@ func (i *iptablesRunner) AddConnmarkSaveRule() error { } // mangle/OUTPUT: Save mark to conntrack for NEW connections with non-zero marks - for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + for _, ipt := range i.getTables() { args := []string{ "-m", "conntrack", "--ctstate", "NEW", @@ -587,7 +587,7 @@ func (i *iptablesRunner) AddConnmarkSaveRule() error { // DelConnmarkSaveRule removes conntrack marking rules added by AddConnmarkSaveRule. func (i *iptablesRunner) DelConnmarkSaveRule() error { - for _, ipt := range []iptablesInterface{i.ipt4, i.ipt6} { + for _, ipt := range i.getTables() { // Delete PREROUTING rule args := []string{ "-m", "conntrack", diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 0dcade35188fc..77c753004a770 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -364,3 +364,143 @@ func checkSNATRuleCount(t *testing.T, iptr *iptablesRunner, ip netip.Addr, wants t.Fatalf("wants %d rules, got %d", wantsRules, len(rules)) } } + +func TestAddAndDelConnmarkSaveRule(t *testing.T) { + preroutingArgs := []string{ + "-m", "conntrack", + "--ctstate", "ESTABLISHED,RELATED", + "-j", "CONNMARK", + "--restore-mark", + "--nfmask", "0xff0000", + "--ctmask", "0xff0000", + } + + outputArgs := []string{ + "-m", "conntrack", + "--ctstate", "NEW", + "-m", "mark", + "!", "--mark", "0x0/0xff0000", + "-j", "CONNMARK", + "--save-mark", + "--nfmask", "0xff0000", + "--ctmask", "0xff0000", + } + + t.Run("with_ipv6", func(t *testing.T) { + iptr := newFakeIPTablesRunner() + + // Add connmark rules + if err := iptr.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule failed: %v", err) + } + + // Verify rules exist in both IPv4 and IPv6 + for _, proto := range []iptablesInterface{iptr.ipt4, iptr.ipt6} { + if exists, err := proto.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking PREROUTING rule: %v", err) + } else if !exists { + t.Errorf("PREROUTING connmark rule doesn't exist") + } + + if exists, err := proto.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking OUTPUT rule: %v", err) + } else if !exists { + t.Errorf("OUTPUT connmark rule doesn't exist") + } + } + + // Test idempotency - calling AddConnmarkSaveRule again should not fail or duplicate + if err := iptr.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule (second call) failed: %v", err) + } + + // Verify rules still exist and weren't duplicated + for _, proto := range []iptablesInterface{iptr.ipt4, iptr.ipt6} { + preroutingRules, err := proto.List("mangle", "PREROUTING") + if err != nil { + t.Fatalf("error listing PREROUTING rules: %v", err) + } + connmarkCount := 0 + for _, rule := range preroutingRules { + if strings.Contains(rule, "CONNMARK") && strings.Contains(rule, "restore-mark") { + connmarkCount++ + } + } + if connmarkCount != 1 { + t.Errorf("expected 1 PREROUTING connmark rule, got %d", connmarkCount) + } + } + + // Delete connmark rules + if err := iptr.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule failed: %v", err) + } + + // Verify rules are deleted + for _, proto := range []iptablesInterface{iptr.ipt4, iptr.ipt6} { + if exists, err := proto.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking PREROUTING rule: %v", err) + } else if exists { + t.Errorf("PREROUTING connmark rule still exists after deletion") + } + + if exists, err := proto.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking OUTPUT rule: %v", err) + } else if exists { + t.Errorf("OUTPUT connmark rule still exists after deletion") + } + } + + // Test idempotency of deletion + if err := iptr.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule (second call) failed: %v", err) + } + }) + + t.Run("without_ipv6", func(t *testing.T) { + // Create an iptables runner with only IPv4 (simulating system without IPv6) + iptr := &iptablesRunner{ + ipt4: newFakeIPTables(), + ipt6: nil, // IPv6 not available + v6Available: false, + v6NATAvailable: false, + v6FilterAvailable: false, + } + + // Add connmark rules should NOT panic with nil ipt6 + if err := iptr.AddConnmarkSaveRule(); err != nil { + t.Fatalf("AddConnmarkSaveRule failed with IPv6 disabled: %v", err) + } + + // Verify rules exist ONLY in IPv4 + if exists, err := iptr.ipt4.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking IPv4 PREROUTING rule: %v", err) + } else if !exists { + t.Errorf("IPv4 PREROUTING connmark rule doesn't exist") + } + + if exists, err := iptr.ipt4.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking IPv4 OUTPUT rule: %v", err) + } else if !exists { + t.Errorf("IPv4 OUTPUT connmark rule doesn't exist") + } + + // Delete connmark rules should NOT panic with nil ipt6 + if err := iptr.DelConnmarkSaveRule(); err != nil { + t.Fatalf("DelConnmarkSaveRule failed with IPv6 disabled: %v", err) + } + + // Verify rules are deleted from IPv4 + if exists, err := iptr.ipt4.Exists("mangle", "PREROUTING", preroutingArgs...); err != nil { + t.Fatalf("error checking IPv4 PREROUTING rule: %v", err) + } else if exists { + t.Errorf("IPv4 PREROUTING connmark rule still exists after deletion") + } + + if exists, err := iptr.ipt4.Exists("mangle", "OUTPUT", outputArgs...); err != nil { + t.Fatalf("error checking IPv4 OUTPUT rule: %v", err) + } else if exists { + t.Errorf("IPv4 OUTPUT connmark rule still exists after deletion") + } + }) +} From 99bde5a40670e28f589dcf5ca7c7e77dfcb424d5 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 10 Mar 2026 03:44:45 +0000 Subject: [PATCH 1080/1093] tstest/integration: deflake TestCollectPanic Two issues caused TestCollectPanic to flake: 1. ETXTBSY: The test exec'd the tailscaled binary directly without going through StartDaemon/awaitTailscaledRunnable, so it lacked the retry loop that other tests use to work around a mysterious ETXTBSY on GitHub Actions. 2. Shared filch files: The test didn't pass --statedir or TS_LOGS_DIR, so all parallel test instances wrote panic logs to the shared system state directory (~/.local/share/tailscale). Concurrent runs would clobber each other's filch log files, causing the second run to not find the panic data from the first. Fix both by adding awaitTailscaledRunnable before the first exec, and passing --statedir and TS_LOGS_DIR to isolate each test's log files, matching what StartDaemon does. It now passes x/tools/cmd/stress. Fixes #15865 Change-Id: If18b9acf8dbe9a986446a42c5d98de7ad8aae098 Signed-off-by: Brad Fitzpatrick --- tstest/integration/integration_test.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index 0482e4b533333..19f9fa159a348 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -200,23 +200,34 @@ func TestExpectedFeaturesLinked(t *testing.T) { } func TestCollectPanic(t *testing.T) { - flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/15865") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) n := NewTestNode(t, env) - cmd := exec.Command(env.daemon, "--cleanup") + // Wait for the binary to be executable, working around a + // mysterious ETXTBSY on GitHub Actions. + // See https://github.com/tailscale/tailscale/issues/15868. + if err := n.awaitTailscaledRunnable(); err != nil { + t.Fatal(err) + } + + logsDir := t.TempDir() + cmd := exec.Command(env.daemon, "--cleanup", "--statedir="+n.dir) cmd.Env = append(os.Environ(), "TS_PLEASE_PANIC=1", "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, + "TS_LOGS_DIR="+logsDir, ) got, _ := cmd.CombinedOutput() // we expect it to fail, ignore err t.Logf("initial run: %s", got) // Now we run it again, and on start, it will upload the logs to logcatcher. - cmd = exec.Command(env.daemon, "--cleanup") - cmd.Env = append(os.Environ(), "TS_LOG_TARGET="+n.env.LogCatcherServer.URL) + cmd = exec.Command(env.daemon, "--cleanup", "--statedir="+n.dir) + cmd.Env = append(os.Environ(), + "TS_LOG_TARGET="+n.env.LogCatcherServer.URL, + "TS_LOGS_DIR="+logsDir, + ) if out, err := cmd.CombinedOutput(); err != nil { t.Fatalf("cleanup failed: %v: %q", err, out) } From 99e3e9af51f065688d2287bd1727f16b45176b48 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 17 Sep 2025 11:11:35 +0100 Subject: [PATCH 1081/1093] ssh/tailssh: mark TestSSHRecordingCancelsSessionsOnUploadFailure as flaky again Updates #7707 Change-Id: I98cdace78cd5060643894fb0c9be02574edb2894 Signed-off-by: Brad Fitzpatrick --- ssh/tailssh/tailssh_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index ec577461660f8..4ef3cbd461812 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -37,6 +37,7 @@ import ( gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -489,6 +490,8 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { } func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7707") + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } From f905871fb1b10ae7c75c5850b04e18b7bea09b36 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 10 Mar 2026 21:33:12 +0000 Subject: [PATCH 1082/1093] ipn/ipnlocal, feature/ssh: move SSH code out of LocalBackend to feature This makes tsnet apps not depend on x/crypto/ssh and locks that in with a test. It also paves the wave for tsnet apps to opt-in to SSH support via a blank feature import in the future. Updates #12614 Change-Id: Ica85628f89c8f015413b074f5001b82b27c953a9 Signed-off-by: Brad Fitzpatrick --- cmd/k8s-operator/depaware.txt | 13 +- cmd/tailscaled/depaware.txt | 3 +- cmd/tailscaled/ssh.go | 4 +- cmd/tsidp/depaware.txt | 11 +- feature/hooks.go | 5 + feature/ssh/ssh.go | 11 + ipn/ipnlocal/c2n.go | 23 -- ipn/ipnlocal/local.go | 16 +- ipn/ipnlocal/ssh.go | 234 ------------------ ipn/ipnlocal/ssh_stub.go | 20 -- ipn/ipnlocal/ssh_test.go | 62 ----- ssh/tailssh/c2n.go | 109 ++++++++ ssh/tailssh/hostkeys.go | 155 ++++++++++++ ssh/tailssh/hostkeys_test.go | 39 +++ ssh/tailssh/tailssh.go | 5 +- ssh/tailssh/tailssh_integration_test.go | 23 -- ssh/tailssh/tailssh_test.go | 38 ++- tsnet/depaware.txt | 11 +- tsnet/tsnet_test.go | 4 + .../tailscaled_deps_test_darwin.go | 2 +- .../tailscaled_deps_test_freebsd.go | 2 +- .../integration/tailscaled_deps_test_linux.go | 2 +- .../tailscaled_deps_test_openbsd.go | 2 +- 23 files changed, 371 insertions(+), 423 deletions(-) create mode 100644 feature/ssh/ssh.go delete mode 100644 ipn/ipnlocal/ssh.go delete mode 100644 ipn/ipnlocal/ssh_stub.go delete mode 100644 ipn/ipnlocal/ssh_test.go create mode 100644 ssh/tailssh/c2n.go create mode 100644 ssh/tailssh/hostkeys.go create mode 100644 ssh/tailssh/hostkeys_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 436202216eef8..cbb4738d7b09a 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1000,10 +1000,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ - golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ @@ -1011,8 +1010,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ @@ -1078,7 +1075,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ - crypto/dsa from crypto/x509+ + crypto/dsa from crypto/x509 crypto/ecdh from crypto/ecdsa+ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ @@ -1127,9 +1124,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/internal/randutil from crypto/internal/rand crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - crypto/mlkem from golang.org/x/crypto/ssh+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ - crypto/rc4 from crypto/tls+ + crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index e36c975e5790b..c34bd490a6f85 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -303,6 +303,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/feature/posture from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister + LD tailscale.com/feature/ssh from tailscale.com/cmd/tailscaled tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister @@ -387,7 +388,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh - LD 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/tailscaled + LD 💣 tailscale.com/ssh/tailssh from tailscale.com/feature/ssh tailscale.com/syncs from tailscale.com/cmd/tailscaled+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal diff --git a/cmd/tailscaled/ssh.go b/cmd/tailscaled/ssh.go index e69cbd5dce086..8de3117944431 100644 --- a/cmd/tailscaled/ssh.go +++ b/cmd/tailscaled/ssh.go @@ -5,5 +5,5 @@ package main -// Force registration of tailssh with LocalBackend. -import _ "tailscale.com/ssh/tailssh" +// Register implementations of various SSH hooks. +import _ "tailscale.com/feature/ssh" diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index 14239cfa20ff7..2f11903936e33 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -401,8 +401,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/ed25519 from gopkg.in/square/go-jose.v2 @@ -414,8 +413,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/pbkdf2 from gopkg.in/square/go-jose.v2 golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ @@ -476,7 +473,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ - crypto/dsa from crypto/x509+ + crypto/dsa from crypto/x509 crypto/ecdh from crypto/ecdsa+ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ @@ -525,9 +522,9 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/internal/randutil from crypto/internal/rand crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - crypto/mlkem from golang.org/x/crypto/ssh+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ - crypto/rc4 from crypto/tls+ + crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ diff --git a/feature/hooks.go b/feature/hooks.go index 5cd3c0d818ca6..7611499a19011 100644 --- a/feature/hooks.go +++ b/feature/hooks.go @@ -67,6 +67,11 @@ func TPMAvailable() bool { return false } +// HookGetSSHHostKeyPublicStrings is a hook for the ssh/hostkeys package to +// provide SSH host key public strings to ipn/ipnlocal without ipnlocal needing +// to import golang.org/x/crypto/ssh. +var HookGetSSHHostKeyPublicStrings Hook[func(varRoot string, logf logger.Logf) ([]string, error)] + // HookHardwareAttestationAvailable is a hook that reports whether hardware // attestation is supported and available. var HookHardwareAttestationAvailable Hook[func() bool] diff --git a/feature/ssh/ssh.go b/feature/ssh/ssh.go new file mode 100644 index 0000000000000..bd22005916d60 --- /dev/null +++ b/feature/ssh/ssh.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh + +// Package ssh registers the Tailscale SSH feature, including host key +// management and the SSH server. +package ssh + +// Register implementations of various SSH hooks. +import _ "tailscale.com/ssh/tailssh" diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index ccce2a65d99e6..8284872b9a86e 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -44,9 +44,6 @@ func init() { // several candidate nodes is reachable and actually alive. RegisterC2N("/echo", handleC2NEcho) } - if buildfeatures.HasSSH { - RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) - } if buildfeatures.HasLogTail { RegisterC2N("POST /logtail/flush", handleC2NLogtailFlush) } @@ -290,26 +287,6 @@ func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) { c2nPprof(w, r, profile) } -func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - if !buildfeatures.HasSSH { - http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) - return - } - var req tailcfg.C2NSSHUsernamesRequest - if r.Method == "POST" { - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - res, err := b.getSSHUsernames(&req) - if err != nil { - http.Error(w, err.Error(), 500) - return - } - writeJSON(w, res) -} - func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") if b.sockstatLogger == nil { diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 77bb14f368db9..ea5af0897a54a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -5646,10 +5646,12 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // TODO(bradfitz): this is called with b.mu held. Not ideal. // If the filesystem gets wedged or something we could block for // a long time. But probably fine. - var err error - sshHostKeys, err = b.getSSHHostKeyPublicStrings() - if err != nil { - b.logf("warning: unable to get SSH host keys, SSH will appear as disabled for this node: %v", err) + if f, ok := feature.HookGetSSHHostKeyPublicStrings.GetOk(); ok { + var err error + sshHostKeys, err = f(b.TailscaleVarRoot(), b.logf) + if err != nil { + b.logf("warning: unable to get SSH host keys, SSH will appear as disabled for this node: %v", err) + } } } hi.SSH_HostKeys = sshHostKeys @@ -6439,9 +6441,9 @@ func (b *LocalBackend) maybeSentHostinfoIfChangedLocked(prefs ipn.PrefsView) { } } -// operatorUserName returns the current pref's OperatorUser's name, or the +// OperatorUserName returns the current pref's OperatorUser's name, or the // empty string if none. -func (b *LocalBackend) operatorUserName() string { +func (b *LocalBackend) OperatorUserName() string { b.mu.Lock() defer b.mu.Unlock() prefs := b.pm.CurrentPrefs() @@ -6454,7 +6456,7 @@ func (b *LocalBackend) operatorUserName() string { // OperatorUserID returns the current pref's OperatorUser's ID (in // os/user.User.Uid string form), or the empty string if none. func (b *LocalBackend) OperatorUserID() string { - opUserName := b.operatorUserName() + opUserName := b.OperatorUserName() if opUserName == "" { return "" } diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go deleted file mode 100644 index 56a6d60ccb043..0000000000000 --- a/ipn/ipnlocal/ssh.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh - -package ipnlocal - -import ( - "bytes" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "slices" - "strings" - "sync" - - "go4.org/mem" - "golang.org/x/crypto/ssh" - "tailscale.com/tailcfg" - "tailscale.com/util/lineiter" - "tailscale.com/util/mak" -) - -// keyTypes are the SSH key types that we either try to read from the -// system's OpenSSH keys or try to generate for ourselves when not -// running as root. -var keyTypes = []string{"rsa", "ecdsa", "ed25519"} - -// getSSHUsernames discovers and returns the list of usernames that are -// potential Tailscale SSH user targets. -// -// Invariant: must not be called with b.mu held. -func (b *LocalBackend) getSSHUsernames(req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) { - res := new(tailcfg.C2NSSHUsernamesResponse) - if !b.tailscaleSSHEnabled() { - return res, nil - } - - max := 10 - if req != nil && req.Max != 0 { - max = req.Max - } - - add := func(u string) { - if req != nil && req.Exclude[u] { - return - } - switch u { - case "nobody", "daemon", "sync": - return - } - if slices.Contains(res.Usernames, u) { - return - } - if len(res.Usernames) > max { - // Enough for a hint. - return - } - res.Usernames = append(res.Usernames, u) - } - - if opUser := b.operatorUserName(); opUser != "" { - add(opUser) - } - - // Check popular usernames and see if they exist with a real shell. - switch runtime.GOOS { - case "darwin": - out, err := exec.Command("dscl", ".", "list", "/Users").Output() - if err != nil { - return nil, err - } - for line := range lineiter.Bytes(out) { - line = bytes.TrimSpace(line) - if len(line) == 0 || line[0] == '_' { - continue - } - add(string(line)) - } - default: - for lr := range lineiter.File("/etc/passwd") { - line, err := lr.Value() - if err != nil { - break - } - line = bytes.TrimSpace(line) - if len(line) == 0 || line[0] == '#' || line[0] == '_' { - continue - } - if mem.HasSuffix(mem.B(line), mem.S("/nologin")) || - mem.HasSuffix(mem.B(line), mem.S("/false")) { - continue - } - before, _, ok := bytes.Cut(line, []byte{':'}) - if ok { - add(string(before)) - } - } - } - return res, nil -} - -func (b *LocalBackend) GetSSH_HostKeys() (keys []ssh.Signer, err error) { - var existing map[string]ssh.Signer - if os.Geteuid() == 0 { - existing = b.getSystemSSH_HostKeys() - } - return b.getTailscaleSSH_HostKeys(existing) -} - -// getTailscaleSSH_HostKeys returns the three (rsa, ecdsa, ed25519) SSH host -// keys, reusing the provided ones in existing if present in the map. -func (b *LocalBackend) getTailscaleSSH_HostKeys(existing map[string]ssh.Signer) (keys []ssh.Signer, err error) { - var keyDir string // lazily initialized $TAILSCALE_VAR/ssh dir. - for _, typ := range keyTypes { - if s, ok := existing[typ]; ok { - keys = append(keys, s) - continue - } - if keyDir == "" { - root := b.TailscaleVarRoot() - if root == "" { - return nil, errors.New("no var root for ssh keys") - } - keyDir = filepath.Join(root, "ssh") - if err := os.MkdirAll(keyDir, 0700); err != nil { - return nil, err - } - } - hostKey, err := b.hostKeyFileOrCreate(keyDir, typ) - if err != nil { - return nil, fmt.Errorf("error creating SSH host key type %q in %q: %w", typ, keyDir, err) - } - signer, err := ssh.ParsePrivateKey(hostKey) - if err != nil { - return nil, fmt.Errorf("error parsing SSH host key type %q from %q: %w", typ, keyDir, err) - } - keys = append(keys, signer) - } - return keys, nil -} - -var keyGenMu sync.Mutex - -func (b *LocalBackend) hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) { - keyGenMu.Lock() - defer keyGenMu.Unlock() - - path := filepath.Join(keyDir, "ssh_host_"+typ+"_key") - v, err := os.ReadFile(path) - if err == nil { - return v, nil - } - if !os.IsNotExist(err) { - return nil, err - } - var priv any - switch typ { - default: - return nil, fmt.Errorf("unsupported key type %q", typ) - case "ed25519": - _, priv, err = ed25519.GenerateKey(rand.Reader) - case "ecdsa": - // curve is arbitrary. We pick whatever will at - // least pacify clients as the actual encryption - // doesn't matter: it's all over WireGuard anyway. - curve := elliptic.P256() - priv, err = ecdsa.GenerateKey(curve, rand.Reader) - case "rsa": - // keySize is arbitrary. We pick whatever will at - // least pacify clients as the actual encryption - // doesn't matter: it's all over WireGuard anyway. - const keySize = 2048 - priv, err = rsa.GenerateKey(rand.Reader, keySize) - } - if err != nil { - return nil, err - } - mk, err := x509.MarshalPKCS8PrivateKey(priv) - if err != nil { - return nil, err - } - pemGen := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk}) - err = os.WriteFile(path, pemGen, 0700) - return pemGen, err -} - -func (b *LocalBackend) getSystemSSH_HostKeys() (ret map[string]ssh.Signer) { - for _, typ := range keyTypes { - filename := "/etc/ssh/ssh_host_" + typ + "_key" - hostKey, err := os.ReadFile(filename) - if err != nil || len(bytes.TrimSpace(hostKey)) == 0 { - continue - } - signer, err := ssh.ParsePrivateKey(hostKey) - if err != nil { - b.logf("warning: error reading host key %s: %v (generating one instead)", filename, err) - continue - } - mak.Set(&ret, typ, signer) - } - return ret -} - -func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) { - signers, err := b.GetSSH_HostKeys() - if err != nil { - return nil, err - } - var keyStrings []string - for _, signer := range signers { - keyStrings = append(keyStrings, strings.TrimSpace(string(ssh.MarshalAuthorizedKey(signer.PublicKey())))) - } - return keyStrings, nil -} - -// tailscaleSSHEnabled reports whether Tailscale SSH is currently enabled based -// on prefs. It returns false if there are no prefs set. -func (b *LocalBackend) tailscaleSSHEnabled() bool { - b.mu.Lock() - defer b.mu.Unlock() - p := b.pm.CurrentPrefs() - return p.Valid() && p.RunSSH() -} diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go deleted file mode 100644 index 9a997c9143f7b..0000000000000 --- a/ipn/ipnlocal/ssh_stub.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -//go:build ts_omit_ssh || ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) - -package ipnlocal - -import ( - "errors" - - "tailscale.com/tailcfg" -) - -func (b *LocalBackend) getSSHHostKeyPublicStrings() ([]string, error) { - return nil, nil -} - -func (b *LocalBackend) getSSHUsernames(*tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) { - return nil, errors.New("not implemented") -} diff --git a/ipn/ipnlocal/ssh_test.go b/ipn/ipnlocal/ssh_test.go deleted file mode 100644 index bb293d10ac4d6..0000000000000 --- a/ipn/ipnlocal/ssh_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) Tailscale Inc & contributors -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux || (darwin && !ios) - -package ipnlocal - -import ( - "encoding/json" - "reflect" - "testing" - - "tailscale.com/health" - "tailscale.com/ipn/store/mem" - "tailscale.com/tailcfg" - "tailscale.com/util/eventbus/eventbustest" - "tailscale.com/util/must" -) - -func TestSSHKeyGen(t *testing.T) { - dir := t.TempDir() - lb := &LocalBackend{varRoot: dir} - keys, err := lb.getTailscaleSSH_HostKeys(nil) - if err != nil { - t.Fatal(err) - } - got := map[string]bool{} - for _, k := range keys { - got[k.PublicKey().Type()] = true - } - want := map[string]bool{ - "ssh-rsa": true, - "ecdsa-sha2-nistp256": true, - "ssh-ed25519": true, - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("keys = %v; want %v", got, want) - } - - keys2, err := lb.getTailscaleSSH_HostKeys(nil) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(keys, keys2) { - t.Errorf("got different keys on second call") - } -} - -type fakeSSHServer struct { - SSHServer -} - -func TestGetSSHUsernames(t *testing.T) { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) - b := &LocalBackend{pm: pm, store: pm.Store()} - b.sshServer = fakeSSHServer{} - res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest)) - if err != nil { - t.Fatal(err) - } - t.Logf("Got: %s", must.Get(json.Marshal(res))) -} diff --git a/ssh/tailssh/c2n.go b/ssh/tailssh/c2n.go new file mode 100644 index 0000000000000..621be74d4baba --- /dev/null +++ b/ssh/tailssh/c2n.go @@ -0,0 +1,109 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 + +package tailssh + +import ( + "bytes" + "encoding/json" + "net/http" + "os/exec" + "runtime" + "slices" + + "go4.org/mem" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" + "tailscale.com/util/lineiter" +) + +func handleC2NSSHUsernames(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + var req tailcfg.C2NSSHUsernamesRequest + if r.Method == "POST" { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + } + res, err := getSSHUsernames(b, &req) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +// getSSHUsernames discovers and returns the list of usernames that are +// potential Tailscale SSH user targets. +func getSSHUsernames(b *ipnlocal.LocalBackend, req *tailcfg.C2NSSHUsernamesRequest) (*tailcfg.C2NSSHUsernamesResponse, error) { + res := new(tailcfg.C2NSSHUsernamesResponse) + if b == nil || !b.ShouldRunSSH() { + return res, nil + } + + max := 10 + if req != nil && req.Max != 0 { + max = req.Max + } + + add := func(u string) { + if req != nil && req.Exclude[u] { + return + } + switch u { + case "nobody", "daemon", "sync": + return + } + if slices.Contains(res.Usernames, u) { + return + } + if len(res.Usernames) > max { + // Enough for a hint. + return + } + res.Usernames = append(res.Usernames, u) + } + + if opUser := b.OperatorUserName(); opUser != "" { + add(opUser) + } + + // Check popular usernames and see if they exist with a real shell. + switch runtime.GOOS { + case "darwin": + out, err := exec.Command("dscl", ".", "list", "/Users").Output() + if err != nil { + return nil, err + } + for line := range lineiter.Bytes(out) { + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '_' { + continue + } + add(string(line)) + } + default: + for lr := range lineiter.File("/etc/passwd") { + line, err := lr.Value() + if err != nil { + break + } + line = bytes.TrimSpace(line) + if len(line) == 0 || line[0] == '#' || line[0] == '_' { + continue + } + if mem.HasSuffix(mem.B(line), mem.S("/nologin")) || + mem.HasSuffix(mem.B(line), mem.S("/false")) { + continue + } + before, _, ok := bytes.Cut(line, []byte{':'}) + if ok { + add(string(before)) + } + } + } + return res, nil +} diff --git a/ssh/tailssh/hostkeys.go b/ssh/tailssh/hostkeys.go new file mode 100644 index 0000000000000..f14d99c467d92 --- /dev/null +++ b/ssh/tailssh/hostkeys.go @@ -0,0 +1,155 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 + +package tailssh + +import ( + "bytes" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + gossh "golang.org/x/crypto/ssh" + "tailscale.com/types/logger" + "tailscale.com/util/mak" +) + +// keyTypes are the SSH key types that we either try to read from the +// system's OpenSSH keys or try to generate for ourselves when not +// running as root. +var keyTypes = []string{"rsa", "ecdsa", "ed25519"} + +// getHostKeys returns the SSH host keys, using system keys when running as root +// and generating Tailscale-specific keys as needed. +func getHostKeys(varRoot string, logf logger.Logf) ([]gossh.Signer, error) { + var existing map[string]gossh.Signer + if os.Geteuid() == 0 { + existing = getSystemHostKeys(logf) + } + return getTailscaleHostKeys(varRoot, existing) +} + +// getHostKeyPublicStrings returns the SSH host key public key strings. +func getHostKeyPublicStrings(varRoot string, logf logger.Logf) ([]string, error) { + signers, err := getHostKeys(varRoot, logf) + if err != nil { + return nil, err + } + var keyStrings []string + for _, signer := range signers { + keyStrings = append(keyStrings, strings.TrimSpace(string(gossh.MarshalAuthorizedKey(signer.PublicKey())))) + } + return keyStrings, nil +} + +// getTailscaleHostKeys returns the three (rsa, ecdsa, ed25519) SSH host +// keys, reusing the provided ones in existing if present in the map. +func getTailscaleHostKeys(varRoot string, existing map[string]gossh.Signer) (keys []gossh.Signer, err error) { + var keyDir string // lazily initialized $TAILSCALE_VAR/ssh dir. + for _, typ := range keyTypes { + if s, ok := existing[typ]; ok { + keys = append(keys, s) + continue + } + if keyDir == "" { + if varRoot == "" { + return nil, errors.New("no var root for ssh keys") + } + keyDir = filepath.Join(varRoot, "ssh") + if err := os.MkdirAll(keyDir, 0700); err != nil { + return nil, err + } + } + hostKey, err := hostKeyFileOrCreate(keyDir, typ) + if err != nil { + return nil, fmt.Errorf("error creating SSH host key type %q in %q: %w", typ, keyDir, err) + } + signer, err := gossh.ParsePrivateKey(hostKey) + if err != nil { + return nil, fmt.Errorf("error parsing SSH host key type %q from %q: %w", typ, keyDir, err) + } + keys = append(keys, signer) + } + return keys, nil +} + +// keyGenMu protects concurrent generation of host keys with +// [hostKeyFileOrCreate], making sure two callers don't try to concurrently find +// a missing key and generate it at the same time, returning different keys to +// their callers. +// +// Technically we actually want to have a mutex per directory (the keyDir +// passed), but that's overkill for how rarely keys are loaded or generated. +var keyGenMu sync.Mutex + +func hostKeyFileOrCreate(keyDir, typ string) ([]byte, error) { + keyGenMu.Lock() + defer keyGenMu.Unlock() + + path := filepath.Join(keyDir, "ssh_host_"+typ+"_key") + v, err := os.ReadFile(path) + if err == nil { + return v, nil + } + if !os.IsNotExist(err) { + return nil, err + } + var priv any + switch typ { + default: + return nil, fmt.Errorf("unsupported key type %q", typ) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + case "ecdsa": + // curve is arbitrary. We pick whatever will at + // least pacify clients as the actual encryption + // doesn't matter: it's all over WireGuard anyway. + curve := elliptic.P256() + priv, err = ecdsa.GenerateKey(curve, rand.Reader) + case "rsa": + // keySize is arbitrary. We pick whatever will at + // least pacify clients as the actual encryption + // doesn't matter: it's all over WireGuard anyway. + const keySize = 2048 + priv, err = rsa.GenerateKey(rand.Reader, keySize) + } + if err != nil { + return nil, err + } + mk, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return nil, err + } + pemGen := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk}) + err = os.WriteFile(path, pemGen, 0700) + return pemGen, err +} + +func getSystemHostKeys(logf logger.Logf) (ret map[string]gossh.Signer) { + for _, typ := range keyTypes { + filename := "/etc/ssh/ssh_host_" + typ + "_key" + hostKey, err := os.ReadFile(filename) + if err != nil || len(bytes.TrimSpace(hostKey)) == 0 { + continue + } + signer, err := gossh.ParsePrivateKey(hostKey) + if err != nil { + logf("warning: error reading host key %s: %v (generating one instead)", filename, err) + continue + } + mak.Set(&ret, typ, signer) + } + return ret +} diff --git a/ssh/tailssh/hostkeys_test.go b/ssh/tailssh/hostkeys_test.go new file mode 100644 index 0000000000000..24a876454ea6e --- /dev/null +++ b/ssh/tailssh/hostkeys_test.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux || (darwin && !ios) + +package tailssh + +import ( + "reflect" + "testing" +) + +func TestSSHKeyGen(t *testing.T) { + dir := t.TempDir() + keys, err := getTailscaleHostKeys(dir, nil) + if err != nil { + t.Fatal(err) + } + got := map[string]bool{} + for _, k := range keys { + got[k.PublicKey().Type()] = true + } + want := map[string]bool{ + "ssh-rsa": true, + "ecdsa-sha2-nistp256": true, + "ssh-ed25519": true, + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("keys = %v; want %v", got, want) + } + + keys2, err := getTailscaleHostKeys(dir, nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(keys, keys2) { + t.Errorf("got different keys on second call") + } +} diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 96f9c826c3a70..2be133267c3d1 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -74,7 +74,6 @@ const ( // ipnLocalBackend is the subset of ipnlocal.LocalBackend that we use. // It is used for testing. type ipnLocalBackend interface { - GetSSH_HostKeys() ([]gossh.Signer, error) ShouldRunSSH() bool NetMap() *netmap.NetworkMap WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeView, u tailcfg.UserProfile, ok bool) @@ -107,6 +106,8 @@ func (srv *server) now() time.Time { } func init() { + feature.HookGetSSHHostKeyPublicStrings.Set(getHostKeyPublicStrings) + ipnlocal.RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) ipnlocal.RegisterNewSSHServer(func(logf logger.Logf, lb *ipnlocal.LocalBackend) (ipnlocal.SSHServer, error) { tsd, err := os.Executable() if err != nil { @@ -504,7 +505,7 @@ func (srv *server) newConn() (*conn, error) { maps.Copy(ss.RequestHandlers, ssh.DefaultRequestHandlers) maps.Copy(ss.ChannelHandlers, ssh.DefaultChannelHandlers) maps.Copy(ss.SubsystemHandlers, ssh.DefaultSubsystemHandlers) - keys, err := srv.lb.GetSSH_HostKeys() + keys, err := getHostKeys(srv.lb.TailscaleVarRoot(), srv.logf) if err != nil { return nil, err } diff --git a/ssh/tailssh/tailssh_integration_test.go b/ssh/tailssh/tailssh_integration_test.go index 1135bebbc2a5b..ea2fe85776b7c 100644 --- a/ssh/tailssh/tailssh_integration_test.go +++ b/ssh/tailssh/tailssh_integration_test.go @@ -32,7 +32,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" - gossh "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -631,28 +630,6 @@ type testBackend struct { allowSendEnv bool } -func (tb *testBackend) GetSSH_HostKeys() ([]gossh.Signer, error) { - var result []gossh.Signer - var priv any - var err error - const keySize = 2048 - priv, err = rsa.GenerateKey(rand.Reader, keySize) - if err != nil { - return nil, err - } - mk, err := x509.MarshalPKCS8PrivateKey(priv) - if err != nil { - return nil, err - } - hostKey := pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: mk}) - signer, err := gossh.ParsePrivateKey(hostKey) - if err != nil { - return nil, err - } - result = append(result, signer) - return result, nil -} - func (tb *testBackend) ShouldRunSSH() bool { return true } diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 4ef3cbd461812..3bf6a72c3ba2b 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -9,7 +9,6 @@ import ( "bytes" "context" "crypto/ecdsa" - "crypto/ed25519" "crypto/elliptic" "crypto/rand" "encoding/json" @@ -34,7 +33,6 @@ import ( "testing/synctest" "time" - gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" "tailscale.com/cmd/testwrapper/flakytest" @@ -381,6 +379,7 @@ func TestEvalSSHPolicy(t *testing.T) { type localState struct { sshEnabled bool matchingRule *tailcfg.SSHRule + varRoot string // if empty, TailscaleVarRoot returns "" // serverActions is a map of the action name to the action. // It is served for paths like https://unused/ssh-action/. @@ -388,31 +387,12 @@ type localState struct { serverActions map[string]*tailcfg.SSHAction } -var ( - currentUser = os.Getenv("USER") // Use the current user for the test. - testSigner gossh.Signer - testSignerOnce sync.Once -) +var currentUser = os.Getenv("USER") // Use the current user for the test. func (ts *localState) Dialer() *tsdial.Dialer { return &tsdial.Dialer{} } -func (ts *localState) GetSSH_HostKeys() ([]gossh.Signer, error) { - testSignerOnce.Do(func() { - _, priv, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - panic(err) - } - s, err := gossh.NewSignerFromSigner(priv) - if err != nil { - panic(err) - } - testSigner = s - }) - return []gossh.Signer{testSigner}, nil -} - func (ts *localState) ShouldRunSSH() bool { return ts.sshEnabled } @@ -468,7 +448,7 @@ func (ts *localState) DoNoiseRequest(req *http.Request) (*http.Response, error) } func (ts *localState) TailscaleVarRoot() string { - return "" + return ts.varRoot } func (ts *localState) NodeKey() key.NodePublic { @@ -505,6 +485,7 @@ func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, + varRoot: t.TempDir(), matchingRule: newSSHRule( &tailcfg.SSHAction{ Accept: true, @@ -633,6 +614,7 @@ func TestMultipleRecorders(t *testing.T) { logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, + varRoot: t.TempDir(), matchingRule: newSSHRule( &tailcfg.SSHAction{ Accept: true, @@ -724,6 +706,7 @@ func TestSSHRecordingNonInteractive(t *testing.T) { logf: tstest.WhileTestRunningLogger(t), lb: &localState{ sshEnabled: true, + varRoot: t.TempDir(), matchingRule: newSSHRule( &tailcfg.SSHAction{ Accept: true, @@ -792,6 +775,7 @@ func TestSSHAuthFlow(t *testing.T) { if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } + varRoot := t.TempDir() acceptRule := newSSHRule(&tailcfg.SSHAction{ Accept: true, Message: "Welcome to Tailscale SSH!", @@ -818,6 +802,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "no-policy", state: &localState{ sshEnabled: true, + varRoot: varRoot, }, authErr: true, wantBanners: []string{"tailscale: tailnet policy does not permit you to SSH to this node\n"}, @@ -826,6 +811,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "user-mismatch", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: bobRule, }, authErr: true, @@ -835,6 +821,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "accept", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: acceptRule, }, wantBanners: []string{"Welcome to Tailscale SSH!"}, @@ -843,6 +830,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "reject", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: rejectRule, }, wantBanners: []string{"Go Away!"}, @@ -852,6 +840,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "simple-check", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: newSSHRule(&tailcfg.SSHAction{ HoldAndDelegate: "https://unused/ssh-action/accept", }), @@ -865,6 +854,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "multi-check", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: newSSHRule(&tailcfg.SSHAction{ Message: "First", HoldAndDelegate: "https://unused/ssh-action/check1", @@ -883,6 +873,7 @@ func TestSSHAuthFlow(t *testing.T) { name: "check-reject", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: newSSHRule(&tailcfg.SSHAction{ Message: "First", HoldAndDelegate: "https://unused/ssh-action/reject", @@ -899,6 +890,7 @@ func TestSSHAuthFlow(t *testing.T) { sshUser: "alice+password", state: &localState{ sshEnabled: true, + varRoot: varRoot, matchingRule: acceptRule, }, usesPassword: true, diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 2df729c21d3f0..79700c713618c 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -396,8 +396,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase @@ -407,8 +406,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ - LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal - LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ @@ -469,7 +466,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/aes from crypto/tls+ crypto/cipher from crypto/aes+ crypto/des from crypto/tls+ - crypto/dsa from crypto/x509+ + crypto/dsa from crypto/x509 crypto/ecdh from crypto/ecdsa+ crypto/ecdsa from crypto/tls+ crypto/ed25519 from crypto/tls+ @@ -518,9 +515,9 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/internal/randutil from crypto/internal/rand crypto/internal/sysrand from crypto/internal/fips140/drbg crypto/md5 from crypto/tls+ - crypto/mlkem from golang.org/x/crypto/ssh+ + crypto/mlkem from crypto/hpke+ crypto/rand from crypto/ed25519+ - crypto/rc4 from crypto/tls+ + crypto/rc4 from crypto/tls crypto/rsa from crypto/tls+ crypto/sha1 from crypto/tls+ crypto/sha256 from crypto/tls+ diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index a2bf76e18c765..f9342d7a710fe 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -2631,6 +2631,10 @@ func TestDeps(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", + BadDeps: map[string]string{ + "golang.org/x/crypto/ssh": "tsnet should not depend on SSH", + "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf": "tsnet should not depend on SSH", + }, OnDep: func(dep string) { if strings.Contains(dep, "portlist") { t.Errorf("unexpected dep: %q", dep) diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index 112f04767c89d..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -20,6 +20,7 @@ import ( _ "tailscale.com/feature" _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -40,7 +41,6 @@ import ( _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index 112f04767c89d..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -20,6 +20,7 @@ import ( _ "tailscale.com/feature" _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -40,7 +41,6 @@ import ( _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index 112f04767c89d..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -20,6 +20,7 @@ import ( _ "tailscale.com/feature" _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -40,7 +41,6 @@ import ( _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index 112f04767c89d..70e0d75faf3eb 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -20,6 +20,7 @@ import ( _ "tailscale.com/feature" _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" + _ "tailscale.com/feature/ssh" _ "tailscale.com/health" _ "tailscale.com/hostinfo" _ "tailscale.com/ipn" @@ -40,7 +41,6 @@ import ( _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" - _ "tailscale.com/ssh/tailssh" _ "tailscale.com/syncs" _ "tailscale.com/tailcfg" _ "tailscale.com/tsd" From 16fa81e8047c51c7f25caaff485d5a91c08f10f2 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 10 Mar 2026 17:52:55 +0000 Subject: [PATCH 1083/1093] wgengine: add API to force a disco key for experiments, testing Updates #12639 Updates tailscale/corp#24454 Change-Id: I2361206aec197a7eecbdf29d87b1b75335ee8eec Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/magicsock.go | 10 ++++++++++ wgengine/userspace.go | 8 ++++++++ 2 files changed, 18 insertions(+) diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 78ffd0cd0e0f5..f61e85b37fcec 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -495,6 +495,13 @@ type Options struct { // DisablePortMapper, if true, disables the portmapper. // This is primarily useful in tests. DisablePortMapper bool + + // ForceDiscoKey, if non-zero, forces the use of a specific disco + // private key. This should only be used for special cases and + // experiments, not for production. The recommended normal path is to + // leave it zero, in which case a new disco key is generated per + // Tailscale start and kept only in memory. + ForceDiscoKey key.DiscoPrivate } func (o *Options) logf() logger.Logf { @@ -622,6 +629,9 @@ func NewConn(opts Options) (*Conn, error) { } c := newConn(opts.logf()) + if !opts.ForceDiscoKey.IsZero() { + c.discoAtomic.Set(opts.ForceDiscoKey) + } c.eventBus = opts.EventBus c.port.Store(uint32(opts.Port)) c.controlKnobs = opts.ControlKnobs diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 245ce421fbe5a..705555d4446a6 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -265,6 +265,13 @@ type Config struct { // Conn25PacketHooks, if non-nil, is used to hook packets for Connectors 2025 // app connector handling logic. Conn25PacketHooks Conn25PacketHooks + + // ForceDiscoKey, if non-zero, forces the use of a specific disco + // private key. This should only be used for special cases and + // experiments, not for production. The recommended normal path is to + // leave it zero, in which case a new disco key is generated per + // Tailscale start and kept only in memory. + ForceDiscoKey key.DiscoPrivate } // NewFakeUserspaceEngine returns a new userspace engine for testing. @@ -433,6 +440,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) Metrics: conf.Metrics, ControlKnobs: conf.ControlKnobs, PeerByKeyFunc: e.PeerByKey, + ForceDiscoKey: conf.ForceDiscoKey, } if buildfeatures.HasLazyWG { magicsockOpts.NoteRecvActivity = e.noteRecvActivity From 7a43e41a27eeb792c13a0a865a7381bfb7c51e87 Mon Sep 17 00:00:00 2001 From: Gesa Stupperich Date: Wed, 11 Mar 2026 08:15:21 +0000 Subject: [PATCH 1084/1093] client/web: signal need to wait for auth across tabs This amends the session creation and auth status querying logic of the device UI backend. On creation of new browser sessions we now store a PendingAuth flag as part of the session that indicates a pending auth process that needs to be awaited. On auth status queries, the server initiates a polling for the auth result if it finds this flag to be true. Once the polling is completes, the flag is set to false. Why this change was necessary: with regular browser settings, the device UI frontend opens the control auth URL in a new tab and starts polling for the results of the auth flow in the current tab. With certain browser settings (that we still want to support), however, the auth URL opens in the same tab, thus aborting the subsequent call to auth/session/wait that initiates the polling, and preventing successful registration of the auth results in the session status. The new logic ensures the polling happens on the next call to /api/auth in these kinds of scenarios. In addition to ensuring the auth wait happens, we now also revalidate the auth state whenever an open tab regains focus, so that auth changes effected in one tab propagate to other tabs without the need to refresh. This improves the experience for all users of the web client when they've got multiple tabs open, regardless of their browser settings. Fixes #11905 Signed-off-by: Gesa Stupperich --- client/web/auth.go | 17 ++++++-- client/web/src/api.ts | 11 +++-- client/web/src/hooks/auth.ts | 78 +++++++++++++++--------------------- client/web/web.go | 13 ++++++ client/web/web_test.go | 51 ++++++++++++++--------- 5 files changed, 99 insertions(+), 71 deletions(-) diff --git a/client/web/auth.go b/client/web/auth.go index 4e25b049b30ac..916f24782d55a 100644 --- a/client/web/auth.go +++ b/client/web/auth.go @@ -37,6 +37,7 @@ type browserSession struct { AuthURL string // from tailcfg.WebClientAuthResponse Created time.Time Authenticated bool + PendingAuth bool } // isAuthorized reports true if the given session is authorized @@ -172,12 +173,14 @@ func (s *Server) newSession(ctx context.Context, src *apitype.WhoIsResponse) (*b } session.AuthID = a.ID session.AuthURL = a.URL + session.PendingAuth = true } else { // control does not support check mode, so there is no additional auth we can do. session.Authenticated = true } s.browserSessions.Store(sid, session) + return session, nil } @@ -206,16 +209,24 @@ func (s *Server) awaitUserAuth(ctx context.Context, session *browserSession) err if session.isAuthorized(s.timeNow()) { return nil // already authorized } + a, err := s.waitAuthURL(ctx, session.AuthID, session.SrcNode) if err != nil { - // Clean up the session. Doing this on any error from control - // server to avoid the user getting stuck with a bad session - // cookie. + // Don't delete the session on context cancellation, as this is expected + // when users navigate away or refresh the page. + if errors.Is(err, context.Canceled) { + return err + } + + // Clean up the session for non-cancellation errors from control server + // to avoid the user getting stuck with a bad session cookie. s.browserSessions.Delete(session.ID) return err } + if a.Complete { session.Authenticated = a.Complete + session.PendingAuth = false s.browserSessions.Store(session.ID, session) } return nil diff --git a/client/web/src/api.ts b/client/web/src/api.ts index 246f74ff231c2..ea64742cdd339 100644 --- a/client/web/src/api.ts +++ b/client/web/src/api.ts @@ -123,7 +123,10 @@ export function useAPI() { return apiFetch<{ url?: string }>("/up", "POST", t.data) .then((d) => d.url && window.open(d.url, "_blank")) // "up" login step .then(() => incrementMetric("web_client_node_connect")) - .then(() => mutate("/data")) + .then(() => { + mutate("/data") + mutate("/auth") + }) .catch(handlePostError("Failed to login")) /** @@ -134,9 +137,9 @@ export function useAPI() { // For logout, must increment metric before running api call, // as tailscaled will be unreachable after the call completes. incrementMetric("web_client_node_disconnect") - return apiFetch("/local/v0/logout", "POST").catch( - handlePostError("Failed to logout") - ) + return apiFetch("/local/v0/logout", "POST") + .then(() => mutate("/auth")) + .catch(handlePostError("Failed to logout")) /** * "new-auth-session" handles creating a new check mode session to diff --git a/client/web/src/hooks/auth.ts b/client/web/src/hooks/auth.ts index c3d0cdc877022..c676647ca0b7e 100644 --- a/client/web/src/hooks/auth.ts +++ b/client/web/src/hooks/auth.ts @@ -3,6 +3,7 @@ import { useCallback, useEffect, useState } from "react" import { apiFetch, setSynoToken } from "src/api" +import useSWR from "swr" export type AuthResponse = { serverMode: AuthServerMode @@ -49,33 +50,26 @@ export function hasAnyEditCapabilities(auth: AuthResponse): boolean { * useAuth reports and refreshes Tailscale auth status for the web client. */ export default function useAuth() { - const [data, setData] = useState() - const [loading, setLoading] = useState(true) + const { data, error, mutate } = useSWR("/auth") const [ranSynoAuth, setRanSynoAuth] = useState(false) - const loadAuth = useCallback(() => { - setLoading(true) - return apiFetch("/auth", "GET") - .then((d) => { - setData(d) - if (d.needsSynoAuth) { - fetch("/webman/login.cgi") - .then((r) => r.json()) - .then((a) => { - setSynoToken(a.SynoToken) - setRanSynoAuth(true) - setLoading(false) - }) - } else { - setLoading(false) - } - return d - }) - .catch((error) => { - setLoading(false) - console.error(error) - }) - }, []) + const loading = !data && !error + + // Start Synology auth flow if needed. + useEffect(() => { + if (data?.needsSynoAuth && !ranSynoAuth) { + fetch("/webman/login.cgi") + .then((r) => r.json()) + .then((a) => { + setSynoToken(a.SynoToken) + setRanSynoAuth(true) + mutate() + }) + .catch((error) => { + console.error("Synology auth error:", error) + }) + } + }, [data?.needsSynoAuth, ranSynoAuth, mutate]) const newSession = useCallback(() => { return apiFetch<{ authUrl?: string }>("/auth/session/new", "GET") @@ -86,34 +80,26 @@ export default function useAuth() { } }) .then(() => { - loadAuth() + mutate() }) .catch((error) => { console.error(error) }) - }, [loadAuth]) + }, [mutate]) + // Start regular auth flow. useEffect(() => { - loadAuth().then((d) => { - if (!d) { - return - } - if ( - !d.authorized && - hasAnyEditCapabilities(d) && - // Start auth flow immediately if browser has requested it. - new URLSearchParams(window.location.search).get("check") === "now" - ) { - newSession() - } - }) - // eslint-disable-next-line react-hooks/exhaustive-deps - }, []) + const needsAuth = + data && + !loading && + !data.authorized && + hasAnyEditCapabilities(data) && + new URLSearchParams(window.location.search).get("check") === "now" - useEffect(() => { - loadAuth() // Refresh auth state after syno auth runs - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [ranSynoAuth]) + if (needsAuth) { + newSession() + } + }, [data, loading, newSession]) return { data, diff --git a/client/web/web.go b/client/web/web.go index f8a9e7c1769a2..3e5fa4b54cbcf 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -771,6 +771,19 @@ func (s *Server) serveAPIAuth(w http.ResponseWriter, r *http.Request) { } } + // We might have a session for which we haven't awaited the result yet. + // This can happen when the AuthURL opens in the same browser tab instead + // of a new one due to browser settings. + // (See https://github.com/tailscale/tailscale/issues/11905) + // We therefore set a PendingAuth flag when creating a new session, check + // it here and call awaitUserAuth if we find it to be true. Once the auth + // wait completes, awaitUserAuth will set PendingAuth to false. + if sErr == nil && session.PendingAuth == true { + if err := s.awaitUserAuth(r.Context(), session); err != nil { + sErr = err + } + } + switch { case sErr != nil && errors.Is(sErr, errNotUsingTailscale): s.lc.IncrementCounter(r.Context(), "web_client_viewing_local", 1) diff --git a/client/web/web_test.go b/client/web/web_test.go index 6b9a51002b33b..fa44e55452a2a 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -582,12 +582,23 @@ func TestServeAuth(t *testing.T) { successCookie := "ts-cookie-success" s.browserSessions.Store(successCookie, &browserSession{ - ID: successCookie, - SrcNode: remoteNode.Node.ID, - SrcUser: user.ID, - Created: oneHourAgo, - AuthID: testAuthPathSuccess, - AuthURL: *testControlURL + testAuthPathSuccess, + ID: successCookie, + SrcNode: remoteNode.Node.ID, + SrcUser: user.ID, + Created: oneHourAgo, + AuthID: testAuthPathSuccess, + AuthURL: *testControlURL + testAuthPathSuccess, + PendingAuth: true, + }) + successCookie2 := "ts-cookie-success-2" + s.browserSessions.Store(successCookie2, &browserSession{ + ID: successCookie2, + SrcNode: remoteNode.Node.ID, + SrcUser: user.ID, + Created: oneHourAgo, + AuthID: testAuthPathSuccess, + AuthURL: *testControlURL + testAuthPathSuccess, + PendingAuth: true, }) failureCookie := "ts-cookie-failure" s.browserSessions.Store(failureCookie, &browserSession{ @@ -642,14 +653,15 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPath, AuthURL: *testControlURL + testAuthPath, Authenticated: false, + PendingAuth: true, }, }, { - name: "query-existing-incomplete-session", - path: "/api/auth", + name: "existing-session-used", + path: "/api/auth/session/new", // should not create new session cookie: successCookie, wantStatus: http.StatusOK, - wantResp: &authResponse{ViewerIdentity: vi, ServerMode: ManageServerMode}, + wantResp: &newSessionAuthResponse{AuthURL: *testControlURL + testAuthPathSuccess}, wantSession: &browserSession{ ID: successCookie, SrcNode: remoteNode.Node.ID, @@ -658,14 +670,15 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPathSuccess, AuthURL: *testControlURL + testAuthPathSuccess, Authenticated: false, + PendingAuth: true, }, }, { - name: "existing-session-used", - path: "/api/auth/session/new", // should not create new session + name: "transition-to-successful-session-via-api-auth-session-wait", + path: "/api/auth/session/wait", cookie: successCookie, wantStatus: http.StatusOK, - wantResp: &newSessionAuthResponse{AuthURL: *testControlURL + testAuthPathSuccess}, + wantResp: nil, wantSession: &browserSession{ ID: successCookie, SrcNode: remoteNode.Node.ID, @@ -673,17 +686,17 @@ func TestServeAuth(t *testing.T) { Created: oneHourAgo, AuthID: testAuthPathSuccess, AuthURL: *testControlURL + testAuthPathSuccess, - Authenticated: false, + Authenticated: true, }, }, { - name: "transition-to-successful-session", - path: "/api/auth/session/wait", - cookie: successCookie, + name: "transition-to-successful-session-via-api-auth", + path: "/api/auth", + cookie: successCookie2, wantStatus: http.StatusOK, - wantResp: nil, + wantResp: &authResponse{Authorized: true, ViewerIdentity: vi, ServerMode: ManageServerMode}, wantSession: &browserSession{ - ID: successCookie, + ID: successCookie2, SrcNode: remoteNode.Node.ID, SrcUser: user.ID, Created: oneHourAgo, @@ -731,6 +744,7 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPath, AuthURL: *testControlURL + testAuthPath, Authenticated: false, + PendingAuth: true, }, }, { @@ -748,6 +762,7 @@ func TestServeAuth(t *testing.T) { AuthID: testAuthPath, AuthURL: *testControlURL + testAuthPath, Authenticated: false, + PendingAuth: true, }, }, { From 95a135ead10db7378d7cf22aa21deba7e5ce1bc7 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 11 Mar 2026 10:25:57 +0000 Subject: [PATCH 1085/1093] cmd/{containerboot,k8s-operator}: reissue auth keys for broken proxies (#16450) Adds logic for containerboot to signal that it can't auth, so the operator can reissue a new auth key. This only applies when running with a config file and with a kube state store. If the operator sees reissue_authkey in a state Secret, it will create a new auth key iff the config has no auth key or its auth key matches the value of reissue_authkey from the state Secret. This is to ensure we don't reissue auth keys in a tight loop if the proxy is slow to start or failing for some other reason. The reissue logic also uses a burstable rate limiter to ensure there's no way a terminally misconfigured or buggy operator can automatically generate new auth keys in a tight loop. Additional implementation details (ChaosInTheCRD): - Added `ipn.NotifyInitialHealthState` to ipn watcher, to ensure that `n.Health` is populated when notify's are returned. - on auth failure, containerboot: - Disconnects from control server - Sets reissue_authkey marker in state Secret with the failing key - Polls config file for new auth key (10 minute timeout) - Restarts after receiving new key to apply it - modified operator's reissue logic slightly: - Deletes old device from tailnet before creating new key - Rate limiting: 1 key per 30s with initial burst equal to replica count - In-flight tracking (authKeyReissuing map) prevents duplicate API calls across reconcile loops Updates #14080 Change-Id: I6982f8e741932a6891f2f48a2936f7f6a455317f (cherry picked from commit 969927c47c3d4de05e90f5b26a6d8d931c5ceed4) Signed-off-by: Tom Proctor Co-authored-by: chaosinthecrd --- cmd/containerboot/kube.go | 148 ++++++++++++-- cmd/containerboot/kube_test.go | 72 ++++++- cmd/containerboot/main.go | 66 ++++++- cmd/containerboot/main_test.go | 189 ++++++++++++++++-- cmd/k8s-operator/operator.go | 3 + cmd/k8s-operator/proxygroup.go | 216 +++++++++++++++----- cmd/k8s-operator/proxygroup_test.go | 297 ++++++++++++++++++++++++---- cmd/k8s-operator/sts.go | 14 +- cmd/k8s-operator/testutils_test.go | 4 +- cmd/k8s-operator/tsrecorder_test.go | 2 +- kube/kubetypes/types.go | 16 +- 11 files changed, 873 insertions(+), 154 deletions(-) diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 4943bddba7ad4..73f5819b406db 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -14,9 +14,12 @@ import ( "net/http" "net/netip" "os" + "path/filepath" "strings" "time" + "github.com/fsnotify/fsnotify" + "tailscale.com/client/local" "tailscale.com/ipn" "tailscale.com/kube/egressservices" "tailscale.com/kube/ingressservices" @@ -26,9 +29,11 @@ import ( "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/backoff" - "tailscale.com/util/set" ) +const fieldManager = "tailscale-container" +const kubeletMountedConfigLn = "..data" + // kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use // this rather than any of the upstream Kubernetes client libaries to avoid extra imports. type kubeClient struct { @@ -46,7 +51,7 @@ func newKubeClient(root string, stateSecret string) (*kubeClient, error) { var err error kc, err := kubeclient.New("tailscale-container") if err != nil { - return nil, fmt.Errorf("Error creating kube client: %w", err) + return nil, fmt.Errorf("error creating kube client: %w", err) } if (root != "/") || os.Getenv("TS_KUBERNETES_READ_API_SERVER_ADDRESS_FROM_ENV") == "true" { // Derive the API server address from the environment variables @@ -63,7 +68,7 @@ func (kc *kubeClient) storeDeviceID(ctx context.Context, deviceID tailcfg.Stable kubetypes.KeyDeviceID: []byte(deviceID), }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // storeDeviceEndpoints writes device's tailnet IPs and MagicDNS name to fields 'device_ips', 'device_fqdn' of client's @@ -84,7 +89,7 @@ func (kc *kubeClient) storeDeviceEndpoints(ctx context.Context, fqdn string, add kubetypes.KeyDeviceIPs: deviceIPs, }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // storeHTTPSEndpoint writes an HTTPS endpoint exposed by this device via 'tailscale serve' to the client's state @@ -96,7 +101,7 @@ func (kc *kubeClient) storeHTTPSEndpoint(ctx context.Context, ep string) error { kubetypes.KeyHTTPSEndpoint: []byte(ep), }, } - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // deleteAuthKey deletes the 'authkey' field of the given kube @@ -122,7 +127,7 @@ func (kc *kubeClient) deleteAuthKey(ctx context.Context) error { // resetContainerbootState resets state from previous runs of containerboot to // ensure the operator doesn't use stale state when a Pod is first recreated. -func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string) error { +func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string, tailscaledConfigAuthkey string) error { existingSecret, err := kc.GetSecret(ctx, kc.stateSecret) switch { case kubeclient.IsNotFoundErr(err): @@ -131,32 +136,135 @@ func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string case err != nil: return fmt.Errorf("failed to read state Secret %q to reset state: %w", kc.stateSecret, err) } + s := &kubeapi.Secret{ Data: map[string][]byte{ kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion), + + // TODO(tomhjp): Perhaps shouldn't clear device ID and use a different signal, as this could leak tailnet devices. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, }, } if podUID != "" { s.Data[kubetypes.KeyPodUID] = []byte(podUID) } - toClear := set.SetOf([]string{ - kubetypes.KeyDeviceID, - kubetypes.KeyDeviceFQDN, - kubetypes.KeyDeviceIPs, - kubetypes.KeyHTTPSEndpoint, - egressservices.KeyEgressServices, - ingressservices.IngressConfigKey, - }) - for key := range existingSecret.Data { - if toClear.Contains(key) { - // It's fine to leave the key in place as a debugging breadcrumb, - // it should get a new value soon. - s.Data[key] = nil + // Only clear reissue_authkey if the operator has actioned it. + brokenAuthkey, ok := existingSecret.Data[kubetypes.KeyReissueAuthkey] + if ok && tailscaledConfigAuthkey != "" && string(brokenAuthkey) != tailscaledConfigAuthkey { + s.Data[kubetypes.KeyReissueAuthkey] = nil + } + + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) +} + +func (kc *kubeClient) setAndWaitForAuthKeyReissue(ctx context.Context, client *local.Client, cfg *settings, tailscaledConfigAuthKey string) error { + err := client.DisconnectControl(ctx) + if err != nil { + return fmt.Errorf("error disconnecting from control: %w", err) + } + + err = kc.setReissueAuthKey(ctx, tailscaledConfigAuthKey) + if err != nil { + return fmt.Errorf("failed to set reissue_authkey in Kubernetes Secret: %w", err) + } + + err = kc.waitForAuthKeyReissue(ctx, cfg.TailscaledConfigFilePath, tailscaledConfigAuthKey, 10*time.Minute) + if err != nil { + return fmt.Errorf("failed to receive new auth key: %w", err) + } + + return nil +} + +func (kc *kubeClient) setReissueAuthKey(ctx context.Context, authKey string) error { + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte(authKey), + }, + } + + log.Printf("Requesting a new auth key from operator") + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) +} + +func (kc *kubeClient) waitForAuthKeyReissue(ctx context.Context, configPath string, oldAuthKey string, maxWait time.Duration) error { + log.Printf("Waiting for operator to provide new auth key (max wait: %v)", maxWait) + + ctx, cancel := context.WithTimeout(ctx, maxWait) + defer cancel() + + tailscaledCfgDir := filepath.Dir(configPath) + toWatch := filepath.Join(tailscaledCfgDir, kubeletMountedConfigLn) + + var ( + pollTicker <-chan time.Time + eventChan <-chan fsnotify.Event + ) + + pollInterval := 5 * time.Second + + // Try to use fsnotify for faster notification + if w, err := fsnotify.NewWatcher(); err != nil { + log.Printf("auth key reissue: fsnotify unavailable, using polling: %v", err) + } else if err := w.Add(tailscaledCfgDir); err != nil { + w.Close() + log.Printf("auth key reissue: fsnotify watch failed, using polling: %v", err) + } else { + defer w.Close() + log.Printf("auth key reissue: watching for config changes via fsnotify") + eventChan = w.Events + } + + // still keep polling if using fsnotify, for logging and in case fsnotify fails + pt := time.NewTicker(pollInterval) + defer pt.Stop() + pollTicker = pt.C + + start := time.Now() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for auth key reissue after %v", maxWait) + case <-pollTicker: // Waits for polling tick, continues when received + case event := <-eventChan: + if event.Name != toWatch { + continue + } + } + + newAuthKey := authkeyFromTailscaledConfig(configPath) + if newAuthKey != "" && newAuthKey != oldAuthKey { + log.Printf("New auth key received from operator after %v", time.Since(start).Round(time.Second)) + + if err := kc.clearReissueAuthKeyRequest(ctx); err != nil { + log.Printf("Warning: failed to clear reissue request: %v", err) + } + + return nil + } + + if eventChan == nil && pollTicker != nil { + log.Printf("Waiting for new auth key from operator (%v elapsed)", time.Since(start).Round(time.Second)) } } +} - return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, "tailscale-container") +// clearReissueAuthKeyRequest removes the reissue_authkey marker from the Secret +// to signal to the operator that we've successfully received the new key. +func (kc *kubeClient) clearReissueAuthKeyRequest(ctx context.Context) error { + s := &kubeapi.Secret{ + Data: map[string][]byte{ + kubetypes.KeyReissueAuthkey: nil, + }, + } + return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager) } // waitForConsistentState waits for tailscaled to finish writing state if it diff --git a/cmd/containerboot/kube_test.go b/cmd/containerboot/kube_test.go index bc80e9cdf2cb3..6acaa60e1588e 100644 --- a/cmd/containerboot/kube_test.go +++ b/cmd/containerboot/kube_test.go @@ -248,25 +248,42 @@ func TestResetContainerbootState(t *testing.T) { capver := fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion) for name, tc := range map[string]struct { podUID string + authkey string initial map[string][]byte expected map[string][]byte }{ "empty_initial": { podUID: "1234", + authkey: "new-authkey", initial: map[string][]byte{}, expected: map[string][]byte{ kubetypes.KeyCapVer: capver, kubetypes.KeyPodUID: []byte("1234"), + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, }, }, "empty_initial_no_pod_uid": { initial: map[string][]byte{}, expected: map[string][]byte{ kubetypes.KeyCapVer: capver, + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, }, }, "only_relevant_keys_updated": { - podUID: "1234", + podUID: "1234", + authkey: "new-authkey", initial: map[string][]byte{ kubetypes.KeyCapVer: []byte("1"), kubetypes.KeyPodUID: []byte("5678"), @@ -295,6 +312,57 @@ func TestResetContainerbootState(t *testing.T) { // Tailscaled keys not included in patch. }, }, + "new_authkey_issued": { + initial: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("old-authkey"), + }, + authkey: "new-authkey", + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyReissueAuthkey: nil, + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + "authkey_not_yet_updated": { + initial: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("old-authkey"), + }, + authkey: "old-authkey", + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + // reissue_authkey not cleared. + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, + "authkey_deleted_from_config": { + initial: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("old-authkey"), + }, + authkey: "", + expected: map[string][]byte{ + kubetypes.KeyCapVer: capver, + // reissue_authkey not cleared. + // Cleared keys. + kubetypes.KeyDeviceID: nil, + kubetypes.KeyDeviceFQDN: nil, + kubetypes.KeyDeviceIPs: nil, + kubetypes.KeyHTTPSEndpoint: nil, + egressservices.KeyEgressServices: nil, + ingressservices.IngressConfigKey: nil, + }, + }, } { t.Run(name, func(t *testing.T) { var actual map[string][]byte @@ -309,7 +377,7 @@ func TestResetContainerbootState(t *testing.T) { return nil }, }} - if err := kc.resetContainerbootState(context.Background(), tc.podUID); err != nil { + if err := kc.resetContainerbootState(context.Background(), tc.podUID, tc.authkey); err != nil { t.Fatalf("resetContainerbootState() error = %v", err) } if diff := cmp.Diff(tc.expected, actual); diff != "" { diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index ba47111fd797f..76c6e910a9dbc 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -137,7 +137,9 @@ import ( "golang.org/x/sys/unix" "tailscale.com/client/tailscale" + "tailscale.com/health" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" kubeutils "tailscale.com/k8s-operator" healthz "tailscale.com/kube/health" "tailscale.com/kube/kubetypes" @@ -206,6 +208,11 @@ func run() error { bootCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() + var tailscaledConfigAuthkey string + if isOneStepConfig(cfg) { + tailscaledConfigAuthkey = authkeyFromTailscaledConfig(cfg.TailscaledConfigFilePath) + } + var kc *kubeClient if cfg.KubeSecret != "" { kc, err = newKubeClient(cfg.Root, cfg.KubeSecret) @@ -219,7 +226,7 @@ func run() error { // hasKubeStateStore because although we know we're in kube, that // doesn't guarantee the state store is properly configured. if hasKubeStateStore(cfg) { - if err := kc.resetContainerbootState(bootCtx, cfg.PodUID); err != nil { + if err := kc.resetContainerbootState(bootCtx, cfg.PodUID, tailscaledConfigAuthkey); err != nil { return fmt.Errorf("error clearing previous state from Secret: %w", err) } } @@ -299,7 +306,7 @@ func run() error { } } - w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState) + w, err := client.WatchIPNBus(bootCtx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState|ipn.NotifyInitialHealthState) if err != nil { return fmt.Errorf("failed to watch tailscaled for updates: %w", err) } @@ -365,8 +372,23 @@ authLoop: if isOneStepConfig(cfg) { // This could happen if this is the first time tailscaled was run for this // device and the auth key was not passed via the configfile. - return fmt.Errorf("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file.") + if hasKubeStateStore(cfg) { + log.Printf("Auth key missing or invalid (NeedsLogin state), disconnecting from control and requesting new key from operator") + + err := kc.setAndWaitForAuthKeyReissue(bootCtx, client, cfg, tailscaledConfigAuthkey) + if err != nil { + return fmt.Errorf("failed to get a reissued authkey: %w", err) + } + + log.Printf("Successfully received new auth key, restarting to apply configuration") + + // we don't return an error here since we have handled the reissue gracefully. + return nil + } + + return errors.New("invalid state: tailscaled daemon started with a config file, but tailscale is not logged in: ensure you pass a valid auth key in the config file") } + if err := authTailscale(); err != nil { return fmt.Errorf("failed to auth tailscale: %w", err) } @@ -384,6 +406,27 @@ authLoop: log.Printf("tailscaled in state %q, waiting", *n.State) } } + + if n.Health != nil { + // This can happen if the config has an auth key but it's invalid, + // for example if it was single-use and already got used, but the + // device state was lost. + if _, ok := n.Health.Warnings[health.LoginStateWarnable.Code]; ok { + if isOneStepConfig(cfg) && hasKubeStateStore(cfg) { + log.Printf("Auth key failed to authenticate (may be expired or single-use), disconnecting from control and requesting new key from operator") + + err := kc.setAndWaitForAuthKeyReissue(bootCtx, client, cfg, tailscaledConfigAuthkey) + if err != nil { + return fmt.Errorf("failed to get a reissued authkey: %w", err) + } + + // we don't return an error here since we have handled the reissue gracefully. + log.Printf("Successfully received new auth key, restarting to apply configuration") + + return nil + } + } + } } w.Close() @@ -409,9 +452,9 @@ authLoop: // We were told to only auth once, so any secret-bound // authkey is no longer needed. We don't strictly need to // wipe it, but it's good hygiene. - log.Printf("Deleting authkey from kube secret") + log.Printf("Deleting authkey from Kubernetes Secret") if err := kc.deleteAuthKey(ctx); err != nil { - return fmt.Errorf("deleting authkey from kube secret: %w", err) + return fmt.Errorf("deleting authkey from Kubernetes Secret: %w", err) } } @@ -422,8 +465,10 @@ authLoop: // If tailscaled config was read from a mounted file, watch the file for updates and reload. cfgWatchErrChan := make(chan error) + cfgWatchCtx, cfgWatchCancel := context.WithCancel(ctx) + defer cfgWatchCancel() if cfg.TailscaledConfigFilePath != "" { - go watchTailscaledConfigChanges(ctx, cfg.TailscaledConfigFilePath, client, cfgWatchErrChan) + go watchTailscaledConfigChanges(cfgWatchCtx, cfg.TailscaledConfigFilePath, client, cfgWatchErrChan) } var ( @@ -523,6 +568,7 @@ runLoop: case err := <-cfgWatchErrChan: return fmt.Errorf("failed to watch tailscaled config: %w", err) case n := <-notifyChan: + // TODO: (ChaosInTheCRD) Add node removed check when supported by ipn if n.State != nil && *n.State != ipn.Running { // Something's gone wrong and we've left the authenticated state. // Our container image never recovered gracefully from this, and the @@ -979,3 +1025,11 @@ func serviceIPsFromNetMap(nm *netmap.NetworkMap, fqdn dnsname.FQDN) []netip.Pref return prefixes } + +func authkeyFromTailscaledConfig(path string) string { + if cfg, err := conffile.Load(path); err == nil && cfg.Parsed.AuthKey != nil { + return *cfg.Parsed.AuthKey + } + + return "" +} diff --git a/cmd/containerboot/main_test.go b/cmd/containerboot/main_test.go index 365cf218424de..5ea402f6678c9 100644 --- a/cmd/containerboot/main_test.go +++ b/cmd/containerboot/main_test.go @@ -32,6 +32,7 @@ import ( "github.com/google/go-cmp/cmp" "golang.org/x/sys/unix" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/kube/egressservices" "tailscale.com/kube/kubeclient" @@ -41,6 +42,8 @@ import ( "tailscale.com/types/netmap" ) +const configFileAuthKey = "some-auth-key" + func TestContainerBoot(t *testing.T) { boot := filepath.Join(t.TempDir(), "containerboot") if err := exec.Command("go", "build", "-ldflags", "-X main.testSleepDuration=1ms", "-o", boot, "tailscale.com/cmd/containerboot").Run(); err != nil { @@ -77,6 +80,10 @@ func TestContainerBoot(t *testing.T) { // phase (simulates our fake tailscaled doing it). UpdateKubeSecret map[string]string + // Update files with these paths/contents at the beginning of the phase + // (simulates the operator updating mounted config files). + UpdateFiles map[string]string + // WantFiles files that should exist in the container and their // contents. WantFiles map[string]string @@ -781,6 +788,127 @@ func TestContainerBoot(t *testing.T) { }, } }, + "sets_reissue_authkey_if_needs_login": func(env *testEnv) testCase { + newAuthKey := "new-reissued-auth-key" + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + Phases: []phase{ + { + UpdateFiles: map[string]string{ + "etc/tailscaled/..data": "", + }, + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + }, { + Notify: &ipn.Notify{ + State: new(ipn.NeedsLogin), + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyReissueAuthkey: configFileAuthKey, + }, + WantLog: "watching for config changes via fsnotify", + }, { + UpdateFiles: map[string]string{ + "etc/tailscaled/cap-95.hujson": fmt.Sprintf(`{"Version":"alpha0","AuthKey":"%s"}`, newAuthKey), + "etc/tailscaled/..data": "updated", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + WantExitCode: new(0), + WantLog: "Successfully received new auth key, restarting to apply configuration", + }, + }, + } + }, + "sets_reissue_authkey_if_auth_fails": func(env *testEnv) testCase { + newAuthKey := "new-reissued-auth-key" + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + Phases: []phase{ + { + UpdateFiles: map[string]string{ + "etc/tailscaled/..data": "", + }, + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + }, { + Notify: &ipn.Notify{ + Health: &health.State{ + Warnings: map[health.WarnableCode]health.UnhealthyState{ + health.LoginStateWarnable.Code: {}, + }, + }, + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + kubetypes.KeyReissueAuthkey: configFileAuthKey, + }, + WantLog: "watching for config changes via fsnotify", + }, { + UpdateFiles: map[string]string{ + "etc/tailscaled/cap-95.hujson": fmt.Sprintf(`{"Version":"alpha0","AuthKey":"%s"}`, newAuthKey), + "etc/tailscaled/..data": "updated", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + }, + WantExitCode: new(0), + WantLog: "Successfully received new auth key, restarting to apply configuration", + }, + }, + } + }, + "clears_reissue_authkey_on_change": func(env *testEnv) testCase { + return testCase{ + Env: map[string]string{ + "TS_EXPERIMENTAL_VERSIONED_CONFIG_DIR": filepath.Join(env.d, "etc/tailscaled/"), + "KUBERNETES_SERVICE_HOST": env.kube.Host, + "KUBERNETES_SERVICE_PORT_HTTPS": env.kube.Port, + }, + KubeSecret: map[string]string{ + kubetypes.KeyReissueAuthkey: "some-older-authkey", + "foo": "bar", // Check not everything is cleared. + }, + Phases: []phase{ + { + WantCmds: []string{ + "/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=kube:tailscale --statedir=/tmp --tun=userspace-networking --config=/etc/tailscaled/cap-95.hujson", + }, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + "foo": "bar", + }, + }, { + Notify: runningNotify, + WantKubeSecret: map[string]string{ + kubetypes.KeyCapVer: capver, + "foo": "bar", + kubetypes.KeyDeviceFQDN: "test-node.test.ts.net.", + kubetypes.KeyDeviceID: "myID", + kubetypes.KeyDeviceIPs: `["100.64.0.1"]`, + }, + }, + }, + } + }, "metrics_enabled": func(env *testEnv) testCase { return testCase{ Env: map[string]string{ @@ -1134,19 +1262,22 @@ func TestContainerBoot(t *testing.T) { for k, v := range p.UpdateKubeSecret { env.kube.SetSecret(k, v) } + for path, content := range p.UpdateFiles { + fullPath := filepath.Join(env.d, path) + if err := os.WriteFile(fullPath, []byte(content), 0700); err != nil { + t.Fatalf("phase %d: updating file %q: %v", i, path, err) + } + // Explicitly update mtime to ensure fsnotify detects the change. + // Without this, file operations can be buffered and fsnotify events may not trigger. + now := time.Now() + if err := os.Chtimes(fullPath, now, now); err != nil { + t.Fatalf("phase %d: updating mtime for %q: %v", i, path, err) + } + } env.lapi.Notify(p.Notify) if p.Signal != nil { cmd.Process.Signal(*p.Signal) } - if p.WantLog != "" { - err := tstest.WaitFor(2*time.Second, func() error { - waitLogLine(t, time.Second, cbOut, p.WantLog) - return nil - }) - if err != nil { - t.Fatal(err) - } - } if p.WantExitCode != nil { state, err := cmd.Process.Wait() @@ -1156,14 +1287,19 @@ func TestContainerBoot(t *testing.T) { if state.ExitCode() != *p.WantExitCode { t.Fatalf("phase %d: want exit code %d, got %d", i, *p.WantExitCode, state.ExitCode()) } + } - // Early test return, we don't expect the successful startup log message. - return + if p.WantLog != "" { + err := tstest.WaitFor(5*time.Second, func() error { + waitLogLine(t, 5*time.Second, cbOut, p.WantLog) + return nil + }) + if err != nil { + t.Fatal(err) + } } - wantCmds = append(wantCmds, p.WantCmds...) - waitArgs(t, 2*time.Second, env.d, env.argFile, strings.Join(wantCmds, "\n")) - err := tstest.WaitFor(2*time.Second, func() error { + err := tstest.WaitFor(5*time.Second, func() error { if p.WantKubeSecret != nil { got := env.kube.Secret() if diff := cmp.Diff(got, p.WantKubeSecret); diff != "" { @@ -1180,6 +1316,16 @@ func TestContainerBoot(t *testing.T) { if err != nil { t.Fatalf("test: %q phase %d: %v", name, i, err) } + + // if we provide a wanted exit code, we expect that the process is finished, + // so should return from the test. + if p.WantExitCode != nil { + return + } + + wantCmds = append(wantCmds, p.WantCmds...) + waitArgs(t, 2*time.Second, env.d, env.argFile, strings.Join(wantCmds, "\n")) + err = tstest.WaitFor(2*time.Second, func() error { for path, want := range p.WantFiles { gotBs, err := os.ReadFile(filepath.Join(env.d, path)) @@ -1393,6 +1539,13 @@ func (lc *localAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { default: panic(fmt.Sprintf("unsupported method %q", r.Method)) } + // In the localAPI ServeHTTP method + case "/localapi/v0/disconnect-control": + if r.Method != "POST" { + panic(fmt.Sprintf("unsupported method %q", r.Method)) + } + w.WriteHeader(http.StatusOK) + return default: panic(fmt.Sprintf("unsupported path %q", r.URL.Path)) } @@ -1591,7 +1744,11 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) { panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs))) } for key, val := range req.Data { - k.secret[key] = string(val) + if val == nil { + delete(k.secret, key) + } else { + k.secret[key] = string(val) + } } default: panic(fmt.Sprintf("unknown content type %q", r.Header.Get("Content-Type"))) @@ -1659,7 +1816,7 @@ func newTestEnv(t *testing.T) testEnv { kube.Start(t) t.Cleanup(kube.Close) - tailscaledConf := &ipn.ConfigVAlpha{AuthKey: new("foo"), Version: "alpha0"} + tailscaledConf := &ipn.ConfigVAlpha{AuthKey: new(configFileAuthKey), Version: "alpha0"} serveConf := ipn.ServeConfig{TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}} serveConfWithServices := ipn.ServeConfig{ TCP: map[uint16]*ipn.TCPPortHandler{80: {HTTP: true}}, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index ef55d27481266..1060c6f3da9e7 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -20,6 +20,7 @@ import ( "github.com/go-logr/zapr" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -723,6 +724,8 @@ func runReconcilers(opts reconcilerOpts) { tsFirewallMode: opts.proxyFirewallMode, defaultProxyClass: opts.defaultProxyClass, loginServer: opts.tsServer.ControlURL, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), }) if err != nil { startlog.Fatalf("could not create ProxyGroup reconciler: %v", err) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 538933f14dbe1..4d5a795d79796 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -16,10 +16,12 @@ import ( "sort" "strings" "sync" + "time" dockerref "github.com/distribution/reference" "go.uber.org/zap" xslices "golang.org/x/exp/slices" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -94,10 +96,12 @@ type ProxyGroupReconciler struct { defaultProxyClass string loginServer string - mu sync.Mutex // protects following - egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge - ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge - apiServerProxyGroups set.Slice[types.UID] // for kube-apiserver proxygroups gauge + mu sync.Mutex // protects following + egressProxyGroups set.Slice[types.UID] // for egress proxygroups gauge + ingressProxyGroups set.Slice[types.UID] // for ingress proxygroups gauge + apiServerProxyGroups set.Slice[types.UID] // for kube-apiserver proxygroups gauge + authKeyRateLimits map[string]*rate.Limiter // per-ProxyGroup rate limiters for auth key re-issuance. + authKeyReissuing map[string]bool } func (r *ProxyGroupReconciler) logger(name string) *zap.SugaredLogger { @@ -294,7 +298,7 @@ func (r *ProxyGroupReconciler) validate(ctx context.Context, pg *tsapi.ProxyGrou func (r *ProxyGroupReconciler) maybeProvision(ctx context.Context, tailscaleClient tsClient, loginUrl string, pg *tsapi.ProxyGroup, proxyClass *tsapi.ProxyClass) (map[string][]netip.AddrPort, *notReadyReason, error) { logger := r.logger(pg.Name) r.mu.Lock() - r.ensureAddedToGaugeForProxyGroup(pg) + r.ensureStateAddedForProxyGroup(pg) r.mu.Unlock() svcToNodePorts := make(map[string]uint16) @@ -629,13 +633,13 @@ func (r *ProxyGroupReconciler) cleanupDanglingResources(ctx context.Context, tai } for _, m := range metadata { - if m.ordinal+1 <= int(pgReplicas(pg)) { + if m.ordinal+1 <= pgReplicas(pg) { continue } // Dangling resource, delete the config + state Secrets, as well as // deleting the device from the tailnet. - if err := r.deleteTailnetDevice(ctx, tailscaleClient, m.tsID, logger); err != nil { + if err := r.ensureDeviceDeleted(ctx, tailscaleClient, m.tsID, logger); err != nil { return err } if err := r.Delete(ctx, m.stateSecret); err != nil && !apierrors.IsNotFound(err) { @@ -687,7 +691,7 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient } for _, m := range metadata { - if err := r.deleteTailnetDevice(ctx, tailscaleClient, m.tsID, logger); err != nil { + if err := r.ensureDeviceDeleted(ctx, tailscaleClient, m.tsID, logger); err != nil { return false, err } } @@ -703,12 +707,12 @@ func (r *ProxyGroupReconciler) maybeCleanup(ctx context.Context, tailscaleClient logger.Infof("cleaned up ProxyGroup resources") r.mu.Lock() - r.ensureRemovedFromGaugeForProxyGroup(pg) + r.ensureStateRemovedForProxyGroup(pg) r.mu.Unlock() return true, nil } -func (r *ProxyGroupReconciler) deleteTailnetDevice(ctx context.Context, tailscaleClient tsClient, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { +func (r *ProxyGroupReconciler) ensureDeviceDeleted(ctx context.Context, tailscaleClient tsClient, id tailcfg.StableNodeID, logger *zap.SugaredLogger) error { logger.Debugf("deleting device %s from control", string(id)) if err := tailscaleClient.DeleteDevice(ctx, string(id)); err != nil { if errResp, ok := errors.AsType[tailscale.ErrResponse](err); ok && errResp.Status == http.StatusNotFound { @@ -734,6 +738,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( logger := r.logger(pg.Name) endpoints = make(map[string][]netip.AddrPort, pgReplicas(pg)) // keyed by Service name. for i := range pgReplicas(pg) { + logger = logger.With("Pod", fmt.Sprintf("%s-%d", pg.Name, i)) cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: pgConfigSecretName(pg.Name, i), @@ -751,38 +756,9 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( return nil, err } - var authKey *string - if existingCfgSecret == nil { - logger.Debugf("Creating authkey for new ProxyGroup proxy") - tags := pg.Spec.Tags.Stringify() - if len(tags) == 0 { - tags = r.defaultTags - } - key, err := newAuthKey(ctx, tailscaleClient, tags) - if err != nil { - return nil, err - } - authKey = &key - } - - if authKey == nil { - // Get state Secret to check if it's already authed. - stateSecret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: pgStateSecretName(pg.Name, i), - Namespace: r.tsNamespace, - }, - } - if err = r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - - if shouldRetainAuthKey(stateSecret) && existingCfgSecret != nil { - authKey, err = authKeyFromSecret(existingCfgSecret) - if err != nil { - return nil, fmt.Errorf("error retrieving auth key from existing config Secret: %w", err) - } - } + authKey, err := r.getAuthKey(ctx, tailscaleClient, pg, existingCfgSecret, i, logger) + if err != nil { + return nil, err } nodePortSvcName := pgNodePortServiceName(pg.Name, i) @@ -918,11 +894,137 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated( return nil, err } } + } return endpoints, nil } +// getAuthKey returns an auth key for the proxy, or nil if none is needed. +// A new key is created if the config Secret doesn't exist yet, or if the +// proxy has requested a reissue via its state Secret. An existing key is +// retained while the device hasn't authed or a reissue is in progress. +func (r *ProxyGroupReconciler) getAuthKey(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, existingCfgSecret *corev1.Secret, ordinal int32, logger *zap.SugaredLogger) (*string, error) { + // Get state Secret to check if it's already authed or has requested + // a fresh auth key. + stateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, ordinal), + Namespace: r.tsNamespace, + }, + } + if err := r.Get(ctx, client.ObjectKeyFromObject(stateSecret), stateSecret); err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + + var createAuthKey bool + var cfgAuthKey *string + if existingCfgSecret == nil { + createAuthKey = true + } else { + var err error + cfgAuthKey, err = authKeyFromSecret(existingCfgSecret) + if err != nil { + return nil, fmt.Errorf("error retrieving auth key from existing config Secret: %w", err) + } + } + + if !createAuthKey { + var err error + createAuthKey, err = r.shouldReissueAuthKey(ctx, tailscaleClient, pg, stateSecret, cfgAuthKey) + if err != nil { + return nil, err + } + } + + var authKey *string + if createAuthKey { + logger.Debugf("creating auth key for ProxyGroup proxy %q", stateSecret.Name) + + tags := pg.Spec.Tags.Stringify() + if len(tags) == 0 { + tags = r.defaultTags + } + key, err := newAuthKey(ctx, tailscaleClient, tags) + if err != nil { + return nil, err + } + authKey = &key + } else { + // Retain auth key if the device hasn't authed yet, or if a + // reissue is in progress (device_id is stale during reissue). + _, reissueRequested := stateSecret.Data[kubetypes.KeyReissueAuthkey] + if !deviceAuthed(stateSecret) || reissueRequested { + authKey = cfgAuthKey + } + } + + return authKey, nil +} + +// shouldReissueAuthKey returns true if the proxy needs a new auth key. It +// tracks in-flight reissues via authKeyReissuing to avoid duplicate API calls +// across reconciles. +func (r *ProxyGroupReconciler) shouldReissueAuthKey(ctx context.Context, tailscaleClient tsClient, pg *tsapi.ProxyGroup, stateSecret *corev1.Secret, cfgAuthKey *string) (shouldReissue bool, err error) { + r.mu.Lock() + reissuing := r.authKeyReissuing[stateSecret.Name] + r.mu.Unlock() + + if reissuing { + // Check if reissue is complete by seeing if request was cleared + _, requestStillPresent := stateSecret.Data[kubetypes.KeyReissueAuthkey] + if !requestStillPresent { + // Containerboot cleared the request, reissue is complete + r.mu.Lock() + r.authKeyReissuing[stateSecret.Name] = false + r.mu.Unlock() + r.log.Debugf("auth key reissue completed for %q", stateSecret.Name) + return false, nil + } + + // Reissue still in-flight; waiting for containerboot to pick up new key + r.log.Debugf("auth key already in process of re-issuance, waiting for secret to be updated") + return false, nil + } + + defer func() { + r.mu.Lock() + r.authKeyReissuing[stateSecret.Name] = shouldReissue + r.mu.Unlock() + }() + + brokenAuthkey, ok := stateSecret.Data[kubetypes.KeyReissueAuthkey] + if !ok { + // reissue hasn't been requested since the key in the secret hasn't been populated + return false, nil + } + + empty := cfgAuthKey == nil || *cfgAuthKey == "" + broken := cfgAuthKey != nil && *cfgAuthKey == string(brokenAuthkey) + + // A new key has been written but the proxy hasn't picked it up yet. + if !empty && !broken { + return false, nil + } + + lim := r.authKeyRateLimits[pg.Name] + if !lim.Allow() { + r.log.Debugf("auth key re-issuance rate limit exceeded, limit: %.2f, burst: %d, tokens: %.2f", + lim.Limit(), lim.Burst(), lim.Tokens()) + return false, fmt.Errorf("auth key re-issuance rate limit exceeded for ProxyGroup %q, will retry with backoff", pg.Name) + } + + r.log.Infof("Proxy failing to auth; attempting cleanup and new key") + if tsID := stateSecret.Data[kubetypes.KeyDeviceID]; len(tsID) > 0 { + id := tailcfg.StableNodeID(tsID) + if err := r.ensureDeviceDeleted(ctx, tailscaleClient, id, r.log); err != nil { + return false, err + } + } + + return true, nil +} + type FindStaticEndpointErr struct { msg string } @@ -1016,9 +1118,9 @@ func getStaticEndpointAddress(a *corev1.NodeAddress, port uint16) *netip.AddrPor return new(netip.AddrPortFrom(addr, port)) } -// ensureAddedToGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup -// is created. r.mu must be held. -func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGroup) { +// ensureStateAddedForProxyGroup ensures the gauge metric for the ProxyGroup resource is updated when the ProxyGroup +// is created, and initialises per-ProxyGroup rate limits on re-issuing auth keys. r.mu must be held. +func (r *ProxyGroupReconciler) ensureStateAddedForProxyGroup(pg *tsapi.ProxyGroup) { switch pg.Spec.Type { case tsapi.ProxyGroupTypeEgress: r.egressProxyGroups.Add(pg.UID) @@ -1030,11 +1132,24 @@ func (r *ProxyGroupReconciler) ensureAddedToGaugeForProxyGroup(pg *tsapi.ProxyGr gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) + + if _, ok := r.authKeyRateLimits[pg.Name]; !ok { + // Allow every replica to have its auth key re-issued quickly the first + // time, but with an overall limit of 1 every 30s after a burst. + r.authKeyRateLimits[pg.Name] = rate.NewLimiter(rate.Every(30*time.Second), int(pgReplicas(pg))) + } + + for i := range pgReplicas(pg) { + rep := pgStateSecretName(pg.Name, i) + if _, ok := r.authKeyReissuing[rep]; !ok { + r.authKeyReissuing[rep] = false + } + } } -// ensureRemovedFromGaugeForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the -// ProxyGroup is deleted. r.mu must be held. -func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.ProxyGroup) { +// ensureStateRemovedForProxyGroup ensures the gauge metric for the ProxyGroup resource type is updated when the +// ProxyGroup is deleted, and deletes the per-ProxyGroup rate limiter to free memory. r.mu must be held. +func (r *ProxyGroupReconciler) ensureStateRemovedForProxyGroup(pg *tsapi.ProxyGroup) { switch pg.Spec.Type { case tsapi.ProxyGroupTypeEgress: r.egressProxyGroups.Remove(pg.UID) @@ -1046,6 +1161,7 @@ func (r *ProxyGroupReconciler) ensureRemovedFromGaugeForProxyGroup(pg *tsapi.Pro gaugeEgressProxyGroupResources.Set(int64(r.egressProxyGroups.Len())) gaugeIngressProxyGroupResources.Set(int64(r.ingressProxyGroups.Len())) gaugeAPIServerProxyGroupResources.Set(int64(r.apiServerProxyGroups.Len())) + delete(r.authKeyRateLimits, pg.Name) } func pgTailscaledConfig(pg *tsapi.ProxyGroup, loginServer string, pc *tsapi.ProxyClass, idx int32, authKey *string, staticEndpoints []netip.AddrPort, oldAdvertiseServices []string) (tailscaledConfigs, error) { @@ -1106,7 +1222,7 @@ func getNodeMetadata(ctx context.Context, pg *tsapi.ProxyGroup, cl client.Client return nil, fmt.Errorf("failed to list state Secrets: %w", err) } for _, secret := range secrets.Items { - var ordinal int + var ordinal int32 if _, err := fmt.Sscanf(secret.Name, pg.Name+"-%d", &ordinal); err != nil { return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) } @@ -1213,7 +1329,7 @@ func (r *ProxyGroupReconciler) getClientAndLoginURL(ctx context.Context, tailnet } type nodeMetadata struct { - ordinal int + ordinal int32 stateSecret *corev1.Secret podUID string // or empty if the Pod no longer exists. tsID tailcfg.StableNodeID diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 9b3ee0e0fd30f..1a50ee1f05f44 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -6,15 +6,19 @@ package main import ( + "context" "encoding/json" "fmt" "net/netip" + "reflect" "slices" + "strings" "testing" "time" "github.com/google/go-cmp/cmp" "go.uber.org/zap" + "golang.org/x/time/rate" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -28,7 +32,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/client/tailscale" "tailscale.com/ipn" - kube "tailscale.com/k8s-operator" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/k8s-proxy/conf" @@ -637,10 +640,12 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { tsFirewallMode: "auto", defaultProxyClass: "default-pc", - Client: fc, - tsClient: tsClient, - recorder: fr, - clock: cl, + Client: fc, + tsClient: tsClient, + recorder: fr, + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } for i, r := range tt.reconciles { @@ -780,11 +785,13 @@ func TestProxyGroupWithStaticEndpoints(t *testing.T) { tsFirewallMode: "auto", defaultProxyClass: "default-pc", - Client: fc, - tsClient: tsClient, - recorder: fr, - log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), - clock: cl, + Client: fc, + tsClient: tsClient, + recorder: fr, + log: zl.Sugar().With("TestName", tt.name).With("Reconcile", "cleanup"), + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } if err := fc.Delete(t.Context(), pg); err != nil { @@ -841,12 +848,15 @@ func TestProxyGroup(t *testing.T) { tsFirewallMode: "auto", defaultProxyClass: "default-pc", - Client: fc, - tsClient: tsClient, - recorder: fr, - log: zl.Sugar(), - clock: cl, + Client: fc, + tsClient: tsClient, + recorder: fr, + log: zl.Sugar(), + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } + crd := &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: serviceMonitorCRD}} opts := configOpts{ proxyType: "proxygroup", @@ -863,7 +873,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, metav1.ConditionFalse, reasonProxyGroupCreating, "the ProxyGroup's ProxyClass \"default-pc\" is not yet in a ready state, waiting...", 1, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, false, pc) - if kube.ProxyGroupAvailable(pg) { + if tsoperator.ProxyGroupAvailable(pg) { t.Fatal("expected ProxyGroup to not be available") } }) @@ -891,7 +901,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionFalse, reasonProxyGroupCreating, "0/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) - if kube.ProxyGroupAvailable(pg) { + if tsoperator.ProxyGroupAvailable(pg) { t.Fatal("expected ProxyGroup to not be available") } if expected := 1; reconciler.egressProxyGroups.Len() != expected { @@ -935,7 +945,7 @@ func TestProxyGroup(t *testing.T) { tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupAvailable, metav1.ConditionTrue, reasonProxyGroupAvailable, "2/2 ProxyGroup pods running", 0, cl, zl.Sugar()) expectEqual(t, fc, pg) expectProxyGroupResources(t, fc, pg, true, pc) - if !kube.ProxyGroupAvailable(pg) { + if !tsoperator.ProxyGroupAvailable(pg) { t.Fatal("expected ProxyGroup to be available") } }) @@ -1045,12 +1055,14 @@ func TestProxyGroupTypes(t *testing.T) { zl, _ := zap.NewDevelopment() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - tsProxyImage: testProxyImage, - Client: fc, - log: zl.Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zl.Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } t.Run("egress_type", func(t *testing.T) { @@ -1285,12 +1297,14 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) { WithStatusSubresource(pg). Build() r := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - tsProxyImage: testProxyImage, - Client: fc, - log: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } expectReconciled(t, r, "", pg.Name) @@ -1338,12 +1352,14 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { Build() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - tsProxyImage: testProxyImage, - Client: fc, - log: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } pg := &tsapi.ProxyGroup{ @@ -1367,7 +1383,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) { cfg := conf.VersionedConfig{ Version: "v1alpha1", ConfigV1Alpha1: &conf.ConfigV1Alpha1{ - AuthKey: new("secret-authkey"), + AuthKey: new("new-authkey"), State: new(fmt.Sprintf("kube:%s", pgPodName(pg.Name, 0))), App: new(kubetypes.AppProxyGroupKubeAPIServer), LogLevel: new("debug"), @@ -1423,12 +1439,14 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { WithStatusSubresource(&tsapi.ProxyGroup{}). Build() reconciler := &ProxyGroupReconciler{ - tsNamespace: tsNamespace, - tsProxyImage: testProxyImage, - Client: fc, - log: zap.Must(zap.NewDevelopment()).Sugar(), - tsClient: &fakeTSClient{}, - clock: tstest.NewClock(tstest.ClockOpts{}), + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + Client: fc, + log: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } existingServices := []string{"svc1", "svc2"} @@ -1653,6 +1671,197 @@ func TestValidateProxyGroup(t *testing.T) { } } +func TestProxyGroupGetAuthKey(t *testing.T) { + pg := &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{"tailscale.com/finalizer"}, + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeEgress, + Replicas: new(int32(1)), + }, + } + tsClient := &fakeTSClient{} + + // Variables to reference in test cases. + existingAuthKey := new("existing-auth-key") + newAuthKey := new("new-authkey") + configWith := func(authKey *string) map[string][]byte { + value := []byte("{}") + if authKey != nil { + value = fmt.Appendf(nil, `{"AuthKey": "%s"}`, *authKey) + } + return map[string][]byte{ + tsoperator.TailscaledConfigFileName(pgMinCapabilityVersion): value, + } + } + + initTest := func() (*ProxyGroupReconciler, client.WithWatch) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(pg). + WithStatusSubresource(pg). + Build() + zl, _ := zap.NewDevelopment() + fr := record.NewFakeRecorder(1) + cl := tstest.NewClock(tstest.ClockOpts{}) + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + tsProxyImage: testProxyImage, + defaultTags: []string{"tag:test-tag"}, + tsFirewallMode: "auto", + + Client: fc, + tsClient: tsClient, + recorder: fr, + log: zl.Sugar(), + clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), + } + reconciler.ensureStateAddedForProxyGroup(pg) + + return reconciler, fc + } + + // Config Secret: exists or not, has key or not. + // State Secret: has device ID or not, requested reissue or not. + for name, tc := range map[string]struct { + configData map[string][]byte + stateData map[string][]byte + expectedAuthKey *string + expectReissue bool + }{ + "no_secrets_needs_new": { + expectedAuthKey: newAuthKey, // New ProxyGroup or manually cleared Pod. + }, + "no_config_secret_state_authed_ok": { + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + }, + expectedAuthKey: newAuthKey, // Always create an auth key if we're creating the config Secret. + }, + "config_secret_without_key_state_authed_with_reissue_needs_new": { + configData: configWith(nil), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + kubetypes.KeyReissueAuthkey: []byte(""), + }, + expectedAuthKey: newAuthKey, + expectReissue: true, // Device is authed but reissue was requested. + }, + "config_secret_with_key_state_with_reissue_stale_ok": { + configData: configWith(existingAuthKey), + stateData: map[string][]byte{ + kubetypes.KeyReissueAuthkey: []byte("some-older-authkey"), + }, + expectedAuthKey: existingAuthKey, // Config's auth key is different from the one marked for reissue. + }, + "config_secret_with_key_state_with_reissue_existing_key_needs_new": { + configData: configWith(existingAuthKey), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + kubetypes.KeyReissueAuthkey: []byte(*existingAuthKey), + }, + expectedAuthKey: newAuthKey, + expectReissue: true, // Current config's auth key is marked for reissue. + }, + "config_secret_without_key_no_state_ok": { + configData: configWith(nil), + expectedAuthKey: nil, // Proxy will set reissue_authkey and then next reconcile will reissue. + }, + "config_secret_without_key_state_authed_ok": { + configData: configWith(nil), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + }, + expectedAuthKey: nil, // Device is already authed. + }, + "config_secret_with_key_state_authed_ok": { + configData: configWith(existingAuthKey), + stateData: map[string][]byte{ + kubetypes.KeyDeviceID: []byte("nodeid-0"), + }, + expectedAuthKey: nil, // Auth key getting removed because device is authed. + }, + "config_secret_with_key_no_state_keeps_existing": { + configData: configWith(existingAuthKey), + expectedAuthKey: existingAuthKey, // No state, waiting for containerboot to try the auth key. + }, + } { + t.Run(name, func(t *testing.T) { + tsClient.deleted = tsClient.deleted[:0] // Reset deleted devices for each test case. + reconciler, fc := initTest() + var cfgSecret *corev1.Secret + if tc.configData != nil { + cfgSecret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + Data: tc.configData, + } + } + if tc.stateData != nil { + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgStateSecretName(pg.Name, 0), + Namespace: tsNamespace, + }, + Data: tc.stateData, + }) + } + + authKey, err := reconciler.getAuthKey(t.Context(), tsClient, pg, cfgSecret, 0, reconciler.log.With("TestName", t.Name())) + if err != nil { + t.Fatalf("unexpected error getting auth key: %v", err) + } + if !reflect.DeepEqual(authKey, tc.expectedAuthKey) { + deref := func(s *string) string { + if s == nil { + return "" + } + return *s + } + t.Errorf("expected auth key %v, got %v", deref(tc.expectedAuthKey), deref(authKey)) + } + + // Use the device deletion as a proxy for the fact the new auth key + // was due to a reissue. + switch { + case tc.expectReissue && len(tsClient.deleted) != 1: + t.Errorf("expected 1 deleted device, got %v", tsClient.deleted) + case !tc.expectReissue && len(tsClient.deleted) != 0: + t.Errorf("expected no deleted devices, got %v", tsClient.deleted) + } + + if tc.expectReissue { + // Trigger the rate limit in a tight loop. Up to 100 iterations + // to allow for CI that is extremely slow, but should happen on + // first try for any reasonable machine. + stateSecretName := pgStateSecretName(pg.Name, 0) + for range 100 { + //NOTE: (ChaosInTheCRD) we added some protection here to avoid + // trying to reissue when already reissung. This overrides it. + reconciler.mu.Lock() + reconciler.authKeyReissuing[stateSecretName] = false + reconciler.mu.Unlock() + _, err := reconciler.getAuthKey(context.Background(), tsClient, pg, cfgSecret, 0, + reconciler.log.With("TestName", t.Name())) + if err != nil { + if !strings.Contains(err.Error(), "rate limit exceeded") { + t.Fatalf("unexpected error getting auth key: %v", err) + } + return // Expected rate limit error. + } + } + t.Fatal("expected rate limit error, but got none") + } + }) + } +} + func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsapi.ProxyClass) { pcLEStaging := &tsapi.ProxyClass{ ObjectMeta: metav1.ObjectMeta{ @@ -1903,6 +2112,8 @@ func TestProxyGroupLetsEncryptStaging(t *testing.T) { tsClient: &fakeTSClient{}, log: zl.Sugar(), clock: cl, + authKeyRateLimits: make(map[string]*rate.Limiter), + authKeyReissuing: make(map[string]bool), } expectReconciled(t, reconciler, "", pg.Name) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 5f33a94905785..519f81fe0db29 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -1111,7 +1111,7 @@ func tailscaledConfig(stsC *tailscaleSTSConfig, loginUrl string, newAuthkey stri if newAuthkey != "" { conf.AuthKey = &newAuthkey - } else if shouldRetainAuthKey(oldSecret) { + } else if !deviceAuthed(oldSecret) { key, err := authKeyFromSecret(oldSecret) if err != nil { return nil, fmt.Errorf("error retrieving auth key from Secret: %w", err) @@ -1164,6 +1164,8 @@ func latestConfigFromSecret(s *corev1.Secret) (*ipn.ConfigVAlpha, error) { return conf, nil } +// authKeyFromSecret returns the auth key from the latest config version if +// found, or else nil. func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { conf, err := latestConfigFromSecret(s) if err != nil { @@ -1180,13 +1182,13 @@ func authKeyFromSecret(s *corev1.Secret) (key *string, err error) { return key, nil } -// shouldRetainAuthKey returns true if the state stored in a proxy's state Secret suggests that auth key should be -// retained (because the proxy has not yet successfully authenticated). -func shouldRetainAuthKey(s *corev1.Secret) bool { +// deviceAuthed returns true if the state stored in a proxy's state Secret +// suggests that the proxy has successfully authenticated. +func deviceAuthed(s *corev1.Secret) bool { if s == nil { - return false // nothing to retain here + return false // No state Secret means no device state. } - return len(s.Data["device_id"]) == 0 // proxy has not authed yet + return len(s.Data["device_id"]) > 0 } func shouldAcceptRoutes(pc *tsapi.ProxyClass) bool { diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index d418f01284b95..36b608ef6f4fd 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -529,7 +529,7 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec AcceptDNS: "false", Hostname: &opts.hostname, Locked: "false", - AuthKey: new("secret-authkey"), + AuthKey: new("new-authkey"), AcceptRoutes: "false", AppConnector: &ipn.AppConnectorPrefs{Advertise: false}, NoStatefulFiltering: "true", @@ -859,7 +859,7 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili Created: time.Now(), Capabilities: caps, } - return "secret-authkey", k, nil + return "new-authkey", k, nil } func (c *fakeTSClient) Device(ctx context.Context, deviceID string, fields *tailscale.DeviceFieldsOpts) (*tailscale.Device, error) { diff --git a/cmd/k8s-operator/tsrecorder_test.go b/cmd/k8s-operator/tsrecorder_test.go index 0e1641243c937..d3ebc3bd5eaa8 100644 --- a/cmd/k8s-operator/tsrecorder_test.go +++ b/cmd/k8s-operator/tsrecorder_test.go @@ -284,7 +284,7 @@ func expectRecorderResources(t *testing.T, fc client.WithWatch, tsr *tsapi.Recor } for replica := range replicas { - auth := tsrAuthSecret(tsr, tsNamespace, "secret-authkey", replica) + auth := tsrAuthSecret(tsr, tsNamespace, "new-authkey", replica) state := tsrStateSecret(tsr, tsNamespace, replica) if shouldExist { diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 187f54f3481f8..9f1b29064acca 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -38,17 +38,17 @@ const ( // Keys that containerboot writes to state file that can be used to determine its state. // fields set in Tailscale state Secret. These are mostly used by the Tailscale Kubernetes operator to determine // the state of this tailscale device. - KeyDeviceID string = "device_id" // node stable ID of the device - KeyDeviceFQDN string = "device_fqdn" // device's tailnet hostname - KeyDeviceIPs string = "device_ips" // device's tailnet IPs - KeyPodUID string = "pod_uid" // Pod UID - // KeyCapVer contains Tailscale capability version of this proxy instance. - KeyCapVer string = "tailscale_capver" + KeyDeviceID = "device_id" // node stable ID of the device + KeyDeviceFQDN = "device_fqdn" // device's tailnet hostname + KeyDeviceIPs = "device_ips" // device's tailnet IPs + KeyPodUID = "pod_uid" // Pod UID + KeyCapVer = "tailscale_capver" // tailcfg.CurrentCapabilityVersion of this proxy instance. + KeyReissueAuthkey = "reissue_authkey" // Proxies will set this to the authkey that failed, or "no-authkey", if they can't log in. // KeyHTTPSEndpoint is a name of a field that can be set to the value of any HTTPS endpoint currently exposed by // this device to the tailnet. This is used by the Kubernetes operator Ingress proxy to communicate to the operator // that cluster workloads behind the Ingress can now be accessed via the given DNS name over HTTPS. - KeyHTTPSEndpoint string = "https_endpoint" - ValueNoHTTPS string = "no-https" + KeyHTTPSEndpoint = "https_endpoint" + ValueNoHTTPS = "no-https" // Pod's IPv4 address header key as returned by containerboot health check endpoint. PodIPv4Header string = "Pod-IPv4" From 70de1113948dd03e72e10277be60411faf72d459 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 9 Mar 2026 21:26:36 +0000 Subject: [PATCH 1086/1093] wgengine/magicsock: fix three race conditions in TestTwoDevicePing Fix three independent flake sources, at least as debugged by Claude, though empirically no longer flaking as it was before: 1. Poll for connection counter data instead of reading immediately. The conncount callback fires asynchronously on received WireGuard traffic, so after counts.Reset() there is no guarantee the counter has been repopulated before checkStats reads it. Use tstest.WaitFor with a 5s timeout to retry until a matching connection appears. 2. Replace the *2 symmetry assumption in global metric assertions. metricSendUDP and friends are AggregateCounters that sum per-conn expvars from both magicsock instances. The old assertion assumed both instances had identical packet counts, which breaks under asymmetric background WireGuard activity (handshake retries, etc). The new assertGlobalMetricsMatchPerConn computes the actual sum of both conns' expvars and compares against the AggregateCounter value. 3. Tolerate physical stats being 0 when user metrics are non-zero. A rebind event replaces the socket mid-measurement, resetting the physical connection counter while user metrics still reflect packets processed before the rebind. Log instead of failing in this case. Also move counts.Reset() after metric reads and reorder the reset sequence (counts before metrics) to minimize the race window. Fixes tailscale/tailscale#13420 Change-Id: I7b090a4dc229a862c1a52161b3f2547ec1d1f23f Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/magicsock_test.go | 103 ++++++++++++++++++--------- 1 file changed, 71 insertions(+), 32 deletions(-) diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 7a8a6374cd1bc..4ecea8b18a586 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -1191,15 +1191,19 @@ func testTwoDevicePing(t *testing.T, d *devices) { m2.conn.SetConnectionCounter(m2.counts.Add) checkStats := func(t *testing.T, m *magicStack, wantConns []netlogtype.Connection) { + t.Helper() defer m.counts.Reset() - counts := m.counts.Clone() - for _, conn := range wantConns { - if _, ok := counts[conn]; ok { - return + if err := tstest.WaitFor(5*time.Second, func() error { + counts := m.counts.Clone() + for _, conn := range wantConns { + if _, ok := counts[conn]; ok { + return nil + } } + return fmt.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(counts)) + }); err != nil { + t.Error(err) } - t.Helper() - t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(counts)) } addrPort := netip.MustParseAddrPort @@ -1261,15 +1265,16 @@ func testTwoDevicePing(t *testing.T, d *devices) { t.Run("compare-metrics-stats", func(t *testing.T) { setT(t) defer setT(outerT) - m1.conn.resetMetricsForTest() m1.counts.Reset() - m2.conn.resetMetricsForTest() m2.counts.Reset() + m1.conn.resetMetricsForTest() + m2.conn.resetMetricsForTest() t.Logf("Metrics before: %s\n", m1.metrics.String()) ping1(t) ping2(t) assertConnStatsAndUserMetricsEqual(t, m1) assertConnStatsAndUserMetricsEqual(t, m2) + assertGlobalMetricsMatchPerConn(t, m1, m2) t.Logf("Metrics after: %s\n", m1.metrics.String()) }) } @@ -1290,6 +1295,7 @@ func (c *Conn) resetMetricsForTest() { } func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { + t.Helper() physIPv4RxBytes := int64(0) physIPv4TxBytes := int64(0) physDERPRxBytes := int64(0) @@ -1312,7 +1318,6 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets += int64(count.TxPackets) } } - ms.counts.Reset() metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value() metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value() @@ -1324,30 +1329,64 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { metricDERPTxBytes := ms.conn.metrics.outboundBytesDERPTotal.Value() metricDERPTxPackets := ms.conn.metrics.outboundPacketsDERPTotal.Value() + // Reset counts after reading all values to minimize the window where a + // background packet could increment metrics but miss the cloned counts. + ms.counts.Reset() + + // Compare physical connection stats with per-conn user metrics. + // A rebind during the measurement window can reset the physical connection + // counter, causing physical stats to show 0 while user metrics recorded + // packets normally. Tolerate this by logging instead of failing. + checkPhysVsMetric := func(phys, metric int64, name string) { + if phys == metric { + return + } + if phys == 0 && metric > 0 { + t.Logf("%s: physical counter is 0 but metric is %d (possible rebind during measurement)", name, metric) + return + } + t.Errorf("%s: physical=%d, metric=%d", name, phys, metric) + } + checkPhysVsMetric(physDERPRxBytes, metricDERPRxBytes, "DERPRxBytes") + checkPhysVsMetric(physDERPTxBytes, metricDERPTxBytes, "DERPTxBytes") + checkPhysVsMetric(physIPv4RxBytes, metricIPv4RxBytes, "IPv4RxBytes") + checkPhysVsMetric(physIPv4TxBytes, metricIPv4TxBytes, "IPv4TxBytes") + checkPhysVsMetric(physDERPRxPackets, metricDERPRxPackets, "DERPRxPackets") + checkPhysVsMetric(physDERPTxPackets, metricDERPTxPackets, "DERPTxPackets") + checkPhysVsMetric(physIPv4RxPackets, metricIPv4RxPackets, "IPv4RxPackets") + checkPhysVsMetric(physIPv4TxPackets, metricIPv4TxPackets, "IPv4TxPackets") +} + +// assertGlobalMetricsMatchPerConn validates that the global clientmetric +// AggregateCounters match the sum of per-conn user metrics from both magicsock +// instances. This tests the metric registration wiring rather than assuming +// symmetric traffic between the two instances. +func assertGlobalMetricsMatchPerConn(t *testing.T, m1, m2 *magicStack) { + t.Helper() c := qt.New(t) - c.Assert(physDERPRxBytes, qt.Equals, metricDERPRxBytes) - c.Assert(physDERPTxBytes, qt.Equals, metricDERPTxBytes) - c.Assert(physIPv4RxBytes, qt.Equals, metricIPv4RxBytes) - c.Assert(physIPv4TxBytes, qt.Equals, metricIPv4TxBytes) - c.Assert(physDERPRxPackets, qt.Equals, metricDERPRxPackets) - c.Assert(physDERPTxPackets, qt.Equals, metricDERPTxPackets) - c.Assert(physIPv4RxPackets, qt.Equals, metricIPv4RxPackets) - c.Assert(physIPv4TxPackets, qt.Equals, metricIPv4TxPackets) - - // Validate that the usermetrics and clientmetrics are in sync - // Note: the clientmetrics are global, this means that when they are registering with the - // wgengine, multiple in-process nodes used by this test will be updating the same metrics. This is why we need to multiply - // the metrics by 2 to get the expected value. - // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420 - c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) - c.Assert(metricSendDataPacketsIPv4.Value(), qt.Equals, metricIPv4TxPackets*2) - c.Assert(metricSendDataPacketsDERP.Value(), qt.Equals, metricDERPTxPackets*2) - c.Assert(metricSendDataBytesIPv4.Value(), qt.Equals, metricIPv4TxBytes*2) - c.Assert(metricSendDataBytesDERP.Value(), qt.Equals, metricDERPTxBytes*2) - c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) - c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) - c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, metricIPv4RxBytes*2) - c.Assert(metricRecvDataBytesDERP.Value(), qt.Equals, metricDERPRxBytes*2) + m1m := m1.conn.metrics + m2m := m2.conn.metrics + + // metricSendUDP aggregates outboundPacketsIPv4Total + outboundPacketsIPv6Total + c.Assert(metricSendUDP.Value(), qt.Equals, + m1m.outboundPacketsIPv4Total.Value()+m1m.outboundPacketsIPv6Total.Value()+ + m2m.outboundPacketsIPv4Total.Value()+m2m.outboundPacketsIPv6Total.Value()) + c.Assert(metricSendDataPacketsIPv4.Value(), qt.Equals, + m1m.outboundPacketsIPv4Total.Value()+m2m.outboundPacketsIPv4Total.Value()) + c.Assert(metricSendDataPacketsDERP.Value(), qt.Equals, + m1m.outboundPacketsDERPTotal.Value()+m2m.outboundPacketsDERPTotal.Value()) + c.Assert(metricSendDataBytesIPv4.Value(), qt.Equals, + m1m.outboundBytesIPv4Total.Value()+m2m.outboundBytesIPv4Total.Value()) + c.Assert(metricSendDataBytesDERP.Value(), qt.Equals, + m1m.outboundBytesDERPTotal.Value()+m2m.outboundBytesDERPTotal.Value()) + c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, + m1m.inboundPacketsIPv4Total.Value()+m2m.inboundPacketsIPv4Total.Value()) + c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, + m1m.inboundPacketsDERPTotal.Value()+m2m.inboundPacketsDERPTotal.Value()) + c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, + m1m.inboundBytesIPv4Total.Value()+m2m.inboundBytesIPv4Total.Value()) + c.Assert(metricRecvDataBytesDERP.Value(), qt.Equals, + m1m.inboundBytesDERPTotal.Value()+m2m.inboundBytesDERPTotal.Value()) } // tests that having a endpoint.String prevents wireguard-go's From 607d01cdaee918d801157ac110530b4b92d3d11c Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Wed, 11 Mar 2026 10:13:49 -0700 Subject: [PATCH 1087/1093] net/batching: clarify & simplify single packet read limitations ReadFromUDPAddrPort worked if UDP GRO was unsupported, but we don't actually want attempted usage, nor does any exist today. Future work on tailscale/corp#37679 would have required more complexity in this method, vs clarifying the API intents. Updates tailscale/corp#37679 Signed-off-by: Jordan Whited --- net/batching/conn.go | 12 +++++++++++- net/batching/conn_linux.go | 11 +---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/net/batching/conn.go b/net/batching/conn.go index 1631c33cfe448..1843a2cfced5a 100644 --- a/net/batching/conn.go +++ b/net/batching/conn.go @@ -19,14 +19,24 @@ var ( _ ipv6.Message = ipv4.Message{} ) -// Conn is a nettype.PacketConn that provides batched i/o using +// Conn is a [nettype.PacketConn] that provides batched i/o using // platform-specific optimizations, e.g. {recv,send}mmsg & UDP GSO/GRO. // +// Conn does not support single packet reads (see ReadFromUDPAddrPort docs). It +// is the caller's responsibility to use the appropriate read API where a +// [nettype.PacketConn] has been upgraded to support batched i/o. +// // Conn originated from (and is still used by) magicsock where its API was // strongly influenced by [wireguard-go/conn.Bind] constraints, namely // wireguard-go's ownership of packet memory. type Conn interface { nettype.PacketConn + // ReadFromUDPAddrPort always returns an error, as UDP GRO is incompatible + // with single packet reads. A single datagram may be multiple, coalesced + // datagrams, and this API lacks the ability to pass that context. + // + // TODO: consider detaching Conn from [nettype.PacketConn] + ReadFromUDPAddrPort([]byte) (int, netip.AddrPort, error) // ReadBatch reads messages from [Conn] into msgs. It returns the number of // messages the caller should evaluate for nonzero len, as a zero len // message may fall on either side of a nonzero. diff --git a/net/batching/conn_linux.go b/net/batching/conn_linux.go index 373625b772738..70f91cfb6847c 100644 --- a/net/batching/conn_linux.go +++ b/net/batching/conn_linux.go @@ -61,16 +61,7 @@ type linuxBatchingConn struct { } func (c *linuxBatchingConn) ReadFromUDPAddrPort(p []byte) (n int, addr netip.AddrPort, err error) { - if c.rxOffload { - // UDP_GRO is opt-in on Linux via setsockopt(). Once enabled you may - // receive a "monster datagram" from any read call. The ReadFrom() API - // does not support passing the GSO size and is unsafe to use in such a - // case. Other platforms may vary in behavior, but we go with the most - // conservative approach to prevent this from becoming a footgun in the - // future. - return 0, netip.AddrPort{}, errors.New("rx UDP offload is enabled on this socket, single packet reads are unavailable") - } - return c.pc.ReadFromUDPAddrPort(p) + return 0, netip.AddrPort{}, errors.New("single packet reads are unsupported") } func (c *linuxBatchingConn) SetDeadline(t time.Time) error { From dd1da0b38921de5a8091f4e9e83845ac997d2c83 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Wed, 11 Mar 2026 12:21:50 -0700 Subject: [PATCH 1088/1093] wgengine: search randomly for unused port instead of in contiguous range (#18974) In TestUserspaceEnginePortReconfig, when selecting a port, use a random offset rather than searching in a continguous range in case there is a range that is blocked Updates tailscale/tailscale#2855 Signed-off-by: kari-ts --- wgengine/userspace_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index b06ea527b27ba..18d870af1e6dc 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -5,6 +5,7 @@ package wgengine import ( "fmt" + "math/rand" "net/netip" "os" "reflect" @@ -175,8 +176,8 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { var ue *userspaceEngine ht := health.NewTracker(bus) reg := new(usermetric.Registry) - for i := range 100 { - attempt := uint16(defaultPort + i) + for range 100 { + attempt := uint16(defaultPort + rand.Intn(1000)) e, err := NewFakeUserspaceEngine(t.Logf, attempt, &knobs, ht, reg, bus) if err != nil { t.Fatal(err) From 4c7c1091ba5c623031df289affe2337d26585fcc Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Wed, 11 Mar 2026 12:28:28 -0700 Subject: [PATCH 1089/1093] netns: add Android callback to bind socket to network (#18915) After switching from cellular to wifi without ipv6, ForeachInterface still sees rmnet prefixes, so HaveV6 stays true, and magicsock keeps attempting ipv6 connections that either route through cellular or time out for users on wifi without ipv6 This: -Adds SetAndroidBindToNetworkFunc, a callback to bind the socket to the selected Android Network object Updates tailscale/tailscale#6152 Signed-off-by: kari-ts --- ipn/ipnlocal/local.go | 3 +++ net/netns/netns.go | 12 ++++++++++++ net/netns/netns_android.go | 39 +++++++++++++++++++++++++++++++++++--- tailcfg/tailcfg.go | 9 ++++++++- 4 files changed, 59 insertions(+), 4 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index ea5af0897a54a..da126ed0f8ca0 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6299,6 +6299,9 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { // See the netns package for documentation on what these capability do. netns.SetBindToInterfaceByRoute(b.logf, nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) + if runtime.GOOS == "android" { + netns.SetDisableAndroidBindToActiveNetwork(b.logf, nm.HasCap(tailcfg.NodeAttrDisableAndroidBindToActiveNetwork)) + } netns.SetDisableBindConnToInterface(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) netns.SetDisableBindConnToInterfaceAppleExt(b.logf, nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterfaceAppleExt)) diff --git a/net/netns/netns.go b/net/netns/netns.go index 5d692c787eae8..fe7ff4dcbadd8 100644 --- a/net/netns/netns.go +++ b/net/netns/netns.go @@ -46,6 +46,18 @@ func SetBindToInterfaceByRoute(logf logger.Logf, v bool) { } } +// When true, disableAndroidBindToActiveNetwork skips binding sockets to the currently +// active network on Android. +var disableAndroidBindToActiveNetwork atomic.Bool + +// SetDisableAndroidBindToActiveNetwork disables the default behavior of binding +// sockets to the currently active network on Android. +func SetDisableAndroidBindToActiveNetwork(logf logger.Logf, v bool) { + if runtime.GOOS == "android" && disableAndroidBindToActiveNetwork.Swap(v) != v { + logf("netns: disableAndroidBindToActiveNetwork changed to %v", v) + } +} + var disableBindConnToInterface atomic.Bool // SetDisableBindConnToInterface disables the (normal) behavior of binding diff --git a/net/netns/netns_android.go b/net/netns/netns_android.go index e747f61f40e50..7c5fe3214dcbf 100644 --- a/net/netns/netns_android.go +++ b/net/netns/netns_android.go @@ -17,6 +17,9 @@ import ( var ( androidProtectFuncMu sync.Mutex androidProtectFunc func(fd int) error + + androidBindToNetworkFuncMu sync.Mutex + androidBindToNetworkFunc func(fd int) error ) // UseSocketMark reports whether SO_MARK is in use. Android does not use SO_MARK. @@ -50,6 +53,14 @@ func SetAndroidProtectFunc(f func(fd int) error) { androidProtectFunc = f } +// SetAndroidBindToNetworkFunc registers a func provided by Android that binds +// the socket FD to the currently selected underlying network. +func SetAndroidBindToNetworkFunc(f func(fd int) error) { + androidBindToNetworkFuncMu.Lock() + defer androidBindToNetworkFuncMu.Unlock() + androidBindToNetworkFunc = f +} + func control(logger.Logf, *netmon.Monitor) func(network, address string, c syscall.RawConn) error { return controlC } @@ -60,14 +71,36 @@ func control(logger.Logf, *netmon.Monitor) func(network, address string, c sysca // and net.ListenConfig.Control. func controlC(network, address string, c syscall.RawConn) error { var sockErr error + err := c.Control(func(fd uintptr) { + fdInt := int(fd) + + // Protect from VPN loops androidProtectFuncMu.Lock() - f := androidProtectFunc + pf := androidProtectFunc androidProtectFuncMu.Unlock() - if f != nil { - sockErr = f(int(fd)) + if pf != nil { + if err := pf(fdInt); err != nil { + sockErr = err + return + } + } + + if disableAndroidBindToActiveNetwork.Load() { + return + } + + androidBindToNetworkFuncMu.Lock() + bf := androidBindToNetworkFunc + androidBindToNetworkFuncMu.Unlock() + if bf != nil { + if err := bf(fdInt); err != nil { + sockErr = err + return + } } }) + if err != nil { return fmt.Errorf("RawConn.Control on %T: %w", c, err) } diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 1efa6c959214e..04389fabaded8 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -180,7 +180,8 @@ type CapabilityVersion int // - 131: 2025-11-25: client respects [NodeAttrDefaultAutoUpdate] // - 132: 2026-02-13: client respects [NodeAttrDisableHostsFileUpdates] // - 133: 2026-02-17: client understands [NodeAttrForceRegisterMagicDNSIPv4Only]; MagicDNS IPv6 registered w/ OS by default -const CurrentCapabilityVersion CapabilityVersion = 133 +// - 134: 2026-03-09: Client understands [NodeAttrDisableAndroidBindToActiveNetwork] +const CurrentCapabilityVersion CapabilityVersion = 134 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -2463,6 +2464,12 @@ const ( // details on the behaviour of this capability. CapabilityBindToInterfaceByRoute NodeCapability = "https://tailscale.com/cap/bind-to-interface-by-route" + // NodeAttrDisableAndroidBindToActiveNetwork disables binding sockets to the + // currently active network on Android, which is enabled by default. + // This allows the control plane to turn off the behavior if it causes + // problems. + NodeAttrDisableAndroidBindToActiveNetwork NodeCapability = "disable-android-bind-to-active-network" + // CapabilityDebugDisableAlternateDefaultRouteInterface changes how Darwin // nodes get the default interface. There is an optional hook (used by the // macOS and iOS clients) to override the default interface, this capability From 073a9a8c9ed449c1a620106084e43b0d38d1c5cb Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 11 Mar 2026 13:29:06 +0000 Subject: [PATCH 1090/1093] wgengine{,/magicsock}: add DERP hooks for filtering+sending packets Add two small APIs to support out-of-tree projects to exchange custom signaling messages over DERP without requiring disco protocol extensions: - OnDERPRecv callback on magicsock.Options / wgengine.Config: called for every non-disco DERP packet before the peer map lookup, allowing callers to intercept packets from unknown peers that would otherwise be dropped. - SendDERPPacketTo method on magicsock.Conn: sends arbitrary bytes to a node key via a DERP region, creating the connection if needed. Thin wrapper around the existing internal sendAddr. Also allow netstack.Start to accept a nil LocalBackend for use cases that wire up TCP/UDP handlers directly without a full LocalBackend. Updates tailscale/corp#24454 Change-Id: I99a523ef281625b8c0024a963f5f5bf5d8792c17 Signed-off-by: Brad Fitzpatrick --- wgengine/magicsock/derp.go | 13 +++++++++++++ wgengine/magicsock/magicsock.go | 17 +++++++++++++---- wgengine/netstack/netstack.go | 24 +++++++++++++++++------- wgengine/userspace.go | 8 ++++++++ 4 files changed, 51 insertions(+), 11 deletions(-) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index f9e5050705b31..17e3cfa82ebe6 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -725,6 +725,10 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en return 0, nil } + if c.onDERPRecv != nil && c.onDERPRecv(regionID, dm.src, b[:n]) { + return 0, nil + } + var ok bool c.mu.Lock() ep, ok = c.peerMap.endpointForNodeKey(dm.src) @@ -745,6 +749,15 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en return n, ep } +// SendDERPPacketTo sends an arbitrary packet to the given node key via +// the DERP relay for the given region. It creates the DERP connection +// to the region if one doesn't already exist. +func (c *Conn) SendDERPPacketTo(dstKey key.NodePublic, regionID int, pkt []byte) (sent bool, err error) { + return c.sendAddr( + netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(regionID)), + dstKey, pkt, false, false) +} + // SetOnlyTCP443 set whether the magicsock connection is restricted // to only using TCP port 443 outbound. If true, no UDP is allowed, // no STUN checks are performend, etc. diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index f61e85b37fcec..5c16750f7e8a0 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -163,10 +163,11 @@ type Conn struct { derpActiveFunc func() idleFunc func() time.Duration // nil means unknown testOnlyPacketListener nettype.PacketListener - noteRecvActivity func(key.NodePublic) // or nil, see Options.NoteRecvActivity - netMon *netmon.Monitor // must be non-nil - health *health.Tracker // or nil - controlKnobs *controlknobs.Knobs // or nil + noteRecvActivity func(key.NodePublic) // or nil, see Options.NoteRecvActivity + onDERPRecv func(int, key.NodePublic, []byte) bool // or nil, see Options.OnDERPRecv + netMon *netmon.Monitor // must be non-nil + health *health.Tracker // or nil + controlKnobs *controlknobs.Knobs // or nil // ================================================================ // No locking required to access these fields, either because @@ -502,6 +503,13 @@ type Options struct { // leave it zero, in which case a new disco key is generated per // Tailscale start and kept only in memory. ForceDiscoKey key.DiscoPrivate + + // OnDERPRecv, if non-nil, is called for every non-disco packet + // received from DERP before the peer map lookup. If it returns + // true, the packet is considered handled and is not passed to + // WireGuard. The pkt slice is borrowed and must be copied if + // the callee needs to retain it. + OnDERPRecv func(regionID int, src key.NodePublic, pkt []byte) bool } func (o *Options) logf() logger.Logf { @@ -640,6 +648,7 @@ func NewConn(opts Options) (*Conn, error) { c.idleFunc = opts.IdleFunc c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity + c.onDERPRecv = opts.OnDERPRecv // Set up publishers and subscribers. Subscribe calls must return before // NewConn otherwise published events can be missed. diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 59c2613451fa5..ae77a1dac1787 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -603,15 +603,25 @@ type LocalBackend = any // Start sets up all the handlers so netstack can start working. Implements // wgengine.FakeImpl. +// +// The provided LocalBackend interface can be either nil, for special case users +// of netstack that don't have a LocalBackend, or a non-nil +// *ipnlocal.LocalBackend. Any other type will cause Start to panic. +// +// Start currently (2026-03-11) never returns a non-nil error, but maybe it did +// in the past and maybe it will in the future. func (ns *Impl) Start(b LocalBackend) error { - if b == nil { - panic("nil LocalBackend interface") - } - lb := b.(*ipnlocal.LocalBackend) - if lb == nil { - panic("nil LocalBackend") + switch b := b.(type) { + case nil: + // No backend, so just continue with ns.lb unset. + case *ipnlocal.LocalBackend: + if b == nil { + panic("nil LocalBackend") + } + ns.lb = b + default: + panic(fmt.Sprintf("unexpected type for LocalBackend: %T", b)) } - ns.lb = lb tcpFwd := tcp.NewForwarder(ns.ipstack, tcpRXBufDefSize, maxInFlightConnectionAttempts(), ns.acceptTCP) udpFwd := udp.NewForwarder(ns.ipstack, ns.acceptUDPNoICMP) ns.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, ns.wrapTCPProtocolHandler(tcpFwd.HandlePacket)) diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 705555d4446a6..ecf3c22983aa4 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -272,6 +272,13 @@ type Config struct { // leave it zero, in which case a new disco key is generated per // Tailscale start and kept only in memory. ForceDiscoKey key.DiscoPrivate + + // OnDERPRecv, if non-nil, is called for every non-disco packet + // received from DERP before the peer map lookup. If it returns + // true, the packet is considered handled and is not passed to + // WireGuard. The pkt slice is borrowed and must be copied if + // the callee needs to retain it. + OnDERPRecv func(regionID int, src key.NodePublic, pkt []byte) (handled bool) } // NewFakeUserspaceEngine returns a new userspace engine for testing. @@ -441,6 +448,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) ControlKnobs: conf.ControlKnobs, PeerByKeyFunc: e.PeerByKey, ForceDiscoKey: conf.ForceDiscoKey, + OnDERPRecv: conf.OnDERPRecv, } if buildfeatures.HasLazyWG { magicsockOpts.NoteRecvActivity = e.noteRecvActivity From 0c53cf7ad9bc065b55e62bd45fda454a665e0726 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 12:01:14 +0000 Subject: [PATCH 1091/1093] .github: Bump actions/upload-artifact from 6.0.0 to 7.0.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 6.0.0 to 7.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/b7c566a772e6b6bfb58ed0dc250532a479d7789f...bbbca2ddaa5d8feaa63e36b76fdaad77386f024f) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: 7.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cigocacher.yml | 2 +- .github/workflows/test.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml index 15aec8af90904..8a13474f2d92c 100644 --- a/.github/workflows/cigocacher.yml +++ b/.github/workflows/cigocacher.yml @@ -24,7 +24,7 @@ jobs: ./tool/go build -o "${OUT}" ./cmd/cigocacher/ tar -zcf cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz "${OUT}" - - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + - uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }} path: cigocacher-${{ matrix.GOOS }}-${{ matrix.GOARCH }}.tar.gz diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4f6068e6e33cd..317052229676e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -741,7 +741,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0 + uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From 224305b57710bb6131afe8f469bdc3f91c0b570a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 12:01:20 +0000 Subject: [PATCH 1092/1093] .github: Bump actions/download-artifact from 7.0.0 to 8.0.0 Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 7.0.0 to 8.0.0. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/37930b1c2abaa49bbe596cd826c3c89aef350131...70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3) --- updated-dependencies: - dependency-name: actions/download-artifact dependency-version: 8.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/cigocacher.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cigocacher.yml b/.github/workflows/cigocacher.yml index 8a13474f2d92c..fea1f6a0dc988 100644 --- a/.github/workflows/cigocacher.yml +++ b/.github/workflows/cigocacher.yml @@ -36,7 +36,7 @@ jobs: contents: write steps: - name: Download all artifacts - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0 + uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0 with: pattern: 'cigocacher-*' merge-multiple: true From 0a4e0e2940c0938697305d1c87a38f53b5aefefd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Mar 2026 11:47:36 +0000 Subject: [PATCH 1093/1093] .github: Bump github/codeql-action from 4.32.5 to 4.32.6 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 4.32.5 to 4.32.6. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/c793b717bc78562f491db7b0e93a3a178b099162...0d579ffd059c29b07949a3cce3983f0780820c98) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 4.32.6 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 51bae5a068df5..9e1e518f666fc 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@c793b717bc78562f491db7b0e93a3a178b099162 # v4.32.5 + uses: github/codeql-action/init@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@c793b717bc78562f491db7b0e93a3a178b099162 # v4.32.5 + uses: github/codeql-action/autobuild@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@c793b717bc78562f491db7b0e93a3a178b099162 # v4.32.5 + uses: github/codeql-action/analyze@0d579ffd059c29b07949a3cce3983f0780820c98 # v4.32.6
      KeyTokens